diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4fc13237f4a7938b6aeaff8d6ef9cb351138f40 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__init__.py @@ -0,0 +1,4 @@ +from . import context +from . import op + +__version__ = "1.16.4" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__main__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..af1b8e8702f4986b4e17ca129dab18f338803da4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/__main__.py @@ -0,0 +1,4 @@ +from .config import main + +if __name__ == "__main__": + main(prog="alembic") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..445ddb25125aa63994052dd4ecea1362dc91656d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/__init__.py @@ -0,0 +1,10 @@ +from .api import _render_migration_diffs as _render_migration_diffs +from .api import compare_metadata as compare_metadata +from .api import produce_migrations as produce_migrations +from .api import render_python_code as render_python_code +from .api import RevisionContext as RevisionContext +from .compare import _produce_net_changes as _produce_net_changes +from .compare import comparators as comparators +from .render import render_op_text as render_op_text +from .render import renderers as renderers +from .rewriter import Rewriter as Rewriter diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/api.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/api.py new file mode 100644 index 0000000000000000000000000000000000000000..811462e8288f3465364cd9c6f86920e9c7b8d31a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/api.py @@ -0,0 +1,650 @@ +from __future__ import annotations + +import contextlib +from typing import Any +from typing import Dict +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import inspect + +from . import compare +from . import render +from .. import util +from ..operations import ops +from ..util import sqla_compat + +"""Provide the 'autogenerate' feature which can produce migration operations +automatically.""" + +if TYPE_CHECKING: + from sqlalchemy.engine import Connection + from sqlalchemy.engine import Dialect + from sqlalchemy.engine import Inspector + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.sql.schema import Table + + from ..config import Config + from ..operations.ops import DowngradeOps + from ..operations.ops import MigrationScript + from ..operations.ops import UpgradeOps + from ..runtime.environment import NameFilterParentNames + from ..runtime.environment import NameFilterType + from ..runtime.environment import ProcessRevisionDirectiveFn + from ..runtime.environment import RenderItemFn + from ..runtime.migration import MigrationContext + from ..script.base import Script + from ..script.base import ScriptDirectory + from ..script.revision import _GetRevArg + + +def compare_metadata(context: MigrationContext, metadata: MetaData) -> Any: + """Compare a database schema to that given in a + :class:`~sqlalchemy.schema.MetaData` instance. + + The database connection is presented in the context + of a :class:`.MigrationContext` object, which + provides database connectivity as well as optional + comparison functions to use for datatypes and + server defaults - see the "autogenerate" arguments + at :meth:`.EnvironmentContext.configure` + for details on these. + + The return format is a list of "diff" directives, + each representing individual differences:: + + from alembic.migration import MigrationContext + from alembic.autogenerate import compare_metadata + from sqlalchemy import ( + create_engine, + MetaData, + Column, + Integer, + String, + Table, + text, + ) + import pprint + + engine = create_engine("sqlite://") + + with engine.begin() as conn: + conn.execute( + text( + ''' + create table foo ( + id integer not null primary key, + old_data varchar, + x integer + ) + ''' + ) + ) + conn.execute(text("create table bar (data varchar)")) + + metadata = MetaData() + Table( + "foo", + metadata, + Column("id", Integer, primary_key=True), + Column("data", Integer), + Column("x", Integer, nullable=False), + ) + Table("bat", metadata, Column("info", String)) + + mc = MigrationContext.configure(engine.connect()) + + diff = compare_metadata(mc, metadata) + pprint.pprint(diff, indent=2, width=20) + + Output:: + + [ + ( + "add_table", + Table( + "bat", + MetaData(), + Column("info", String(), table=), + schema=None, + ), + ), + ( + "remove_table", + Table( + "bar", + MetaData(), + Column("data", VARCHAR(), table=), + schema=None, + ), + ), + ( + "add_column", + None, + "foo", + Column("data", Integer(), table=), + ), + [ + ( + "modify_nullable", + None, + "foo", + "x", + { + "existing_comment": None, + "existing_server_default": False, + "existing_type": INTEGER(), + }, + True, + False, + ) + ], + ( + "remove_column", + None, + "foo", + Column("old_data", VARCHAR(), table=), + ), + ] + + :param context: a :class:`.MigrationContext` + instance. + :param metadata: a :class:`~sqlalchemy.schema.MetaData` + instance. + + .. seealso:: + + :func:`.produce_migrations` - produces a :class:`.MigrationScript` + structure based on metadata comparison. + + """ + + migration_script = produce_migrations(context, metadata) + assert migration_script.upgrade_ops is not None + return migration_script.upgrade_ops.as_diffs() + + +def produce_migrations( + context: MigrationContext, metadata: MetaData +) -> MigrationScript: + """Produce a :class:`.MigrationScript` structure based on schema + comparison. + + This function does essentially what :func:`.compare_metadata` does, + but then runs the resulting list of diffs to produce the full + :class:`.MigrationScript` object. For an example of what this looks like, + see the example in :ref:`customizing_revision`. + + .. seealso:: + + :func:`.compare_metadata` - returns more fundamental "diff" + data from comparing a schema. + + """ + + autogen_context = AutogenContext(context, metadata=metadata) + + migration_script = ops.MigrationScript( + rev_id=None, + upgrade_ops=ops.UpgradeOps([]), + downgrade_ops=ops.DowngradeOps([]), + ) + + compare._populate_migration_script(autogen_context, migration_script) + + return migration_script + + +def render_python_code( + up_or_down_op: Union[UpgradeOps, DowngradeOps], + sqlalchemy_module_prefix: str = "sa.", + alembic_module_prefix: str = "op.", + render_as_batch: bool = False, + imports: Sequence[str] = (), + render_item: Optional[RenderItemFn] = None, + migration_context: Optional[MigrationContext] = None, + user_module_prefix: Optional[str] = None, +) -> str: + """Render Python code given an :class:`.UpgradeOps` or + :class:`.DowngradeOps` object. + + This is a convenience function that can be used to test the + autogenerate output of a user-defined :class:`.MigrationScript` structure. + + :param up_or_down_op: :class:`.UpgradeOps` or :class:`.DowngradeOps` object + :param sqlalchemy_module_prefix: module prefix for SQLAlchemy objects + :param alembic_module_prefix: module prefix for Alembic constructs + :param render_as_batch: use "batch operations" style for rendering + :param imports: sequence of import symbols to add + :param render_item: callable to render items + :param migration_context: optional :class:`.MigrationContext` + :param user_module_prefix: optional string prefix for user-defined types + + .. versionadded:: 1.11.0 + + """ + opts = { + "sqlalchemy_module_prefix": sqlalchemy_module_prefix, + "alembic_module_prefix": alembic_module_prefix, + "render_item": render_item, + "render_as_batch": render_as_batch, + "user_module_prefix": user_module_prefix, + } + + if migration_context is None: + from ..runtime.migration import MigrationContext + from sqlalchemy.engine.default import DefaultDialect + + migration_context = MigrationContext.configure( + dialect=DefaultDialect() + ) + + autogen_context = AutogenContext(migration_context, opts=opts) + autogen_context.imports = set(imports) + return render._indent( + render._render_cmd_body(up_or_down_op, autogen_context) + ) + + +def _render_migration_diffs( + context: MigrationContext, template_args: Dict[Any, Any] +) -> None: + """legacy, used by test_autogen_composition at the moment""" + + autogen_context = AutogenContext(context) + + upgrade_ops = ops.UpgradeOps([]) + compare._produce_net_changes(autogen_context, upgrade_ops) + + migration_script = ops.MigrationScript( + rev_id=None, + upgrade_ops=upgrade_ops, + downgrade_ops=upgrade_ops.reverse(), + ) + + render._render_python_into_templatevars( + autogen_context, migration_script, template_args + ) + + +class AutogenContext: + """Maintains configuration and state that's specific to an + autogenerate operation.""" + + metadata: Union[MetaData, Sequence[MetaData], None] = None + """The :class:`~sqlalchemy.schema.MetaData` object + representing the destination. + + This object is the one that is passed within ``env.py`` + to the :paramref:`.EnvironmentContext.configure.target_metadata` + parameter. It represents the structure of :class:`.Table` and other + objects as stated in the current database model, and represents the + destination structure for the database being examined. + + While the :class:`~sqlalchemy.schema.MetaData` object is primarily + known as a collection of :class:`~sqlalchemy.schema.Table` objects, + it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary + that may be used by end-user schemes to store additional schema-level + objects that are to be compared in custom autogeneration schemes. + + """ + + connection: Optional[Connection] = None + """The :class:`~sqlalchemy.engine.base.Connection` object currently + connected to the database backend being compared. + + This is obtained from the :attr:`.MigrationContext.bind` and is + ultimately set up in the ``env.py`` script. + + """ + + dialect: Optional[Dialect] = None + """The :class:`~sqlalchemy.engine.Dialect` object currently in use. + + This is normally obtained from the + :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute. + + """ + + imports: Set[str] = None # type: ignore[assignment] + """A ``set()`` which contains string Python import directives. + + The directives are to be rendered into the ``${imports}`` section + of a script template. The set is normally empty and can be modified + within hooks such as the + :paramref:`.EnvironmentContext.configure.render_item` hook. + + .. seealso:: + + :ref:`autogen_render_types` + + """ + + migration_context: MigrationContext = None # type: ignore[assignment] + """The :class:`.MigrationContext` established by the ``env.py`` script.""" + + def __init__( + self, + migration_context: MigrationContext, + metadata: Union[MetaData, Sequence[MetaData], None] = None, + opts: Optional[Dict[str, Any]] = None, + autogenerate: bool = True, + ) -> None: + if ( + autogenerate + and migration_context is not None + and migration_context.as_sql + ): + raise util.CommandError( + "autogenerate can't use as_sql=True as it prevents querying " + "the database for schema information" + ) + + if opts is None: + opts = migration_context.opts + + self.metadata = metadata = ( + opts.get("target_metadata", None) if metadata is None else metadata + ) + + if ( + autogenerate + and metadata is None + and migration_context is not None + and migration_context.script is not None + ): + raise util.CommandError( + "Can't proceed with --autogenerate option; environment " + "script %s does not provide " + "a MetaData object or sequence of objects to the context." + % (migration_context.script.env_py_location) + ) + + include_object = opts.get("include_object", None) + include_name = opts.get("include_name", None) + + object_filters = [] + name_filters = [] + if include_object: + object_filters.append(include_object) + if include_name: + name_filters.append(include_name) + + self._object_filters = object_filters + self._name_filters = name_filters + + self.migration_context = migration_context + if self.migration_context is not None: + self.connection = self.migration_context.bind + self.dialect = self.migration_context.dialect + + self.imports = set() + self.opts: Dict[str, Any] = opts + self._has_batch: bool = False + + @util.memoized_property + def inspector(self) -> Inspector: + if self.connection is None: + raise TypeError( + "can't return inspector as this " + "AutogenContext has no database connection" + ) + return inspect(self.connection) + + @contextlib.contextmanager + def _within_batch(self) -> Iterator[None]: + self._has_batch = True + yield + self._has_batch = False + + def run_name_filters( + self, + name: Optional[str], + type_: NameFilterType, + parent_names: NameFilterParentNames, + ) -> bool: + """Run the context's name filters and return True if the targets + should be part of the autogenerate operation. + + This method should be run for every kind of name encountered within the + reflection side of an autogenerate operation, giving the environment + the chance to filter what names should be reflected as database + objects. The filters here are produced directly via the + :paramref:`.EnvironmentContext.configure.include_name` parameter. + + """ + if "schema_name" in parent_names: + if type_ == "table": + table_name = name + else: + table_name = parent_names.get("table_name", None) + if table_name: + schema_name = parent_names["schema_name"] + if schema_name: + parent_names["schema_qualified_table_name"] = "%s.%s" % ( + schema_name, + table_name, + ) + else: + parent_names["schema_qualified_table_name"] = table_name + + for fn in self._name_filters: + if not fn(name, type_, parent_names): + return False + else: + return True + + def run_object_filters( + self, + object_: SchemaItem, + name: sqla_compat._ConstraintName, + type_: NameFilterType, + reflected: bool, + compare_to: Optional[SchemaItem], + ) -> bool: + """Run the context's object filters and return True if the targets + should be part of the autogenerate operation. + + This method should be run for every kind of object encountered within + an autogenerate operation, giving the environment the chance + to filter what objects should be included in the comparison. + The filters here are produced directly via the + :paramref:`.EnvironmentContext.configure.include_object` parameter. + + """ + for fn in self._object_filters: + if not fn(object_, name, type_, reflected, compare_to): + return False + else: + return True + + run_filters = run_object_filters + + @util.memoized_property + def sorted_tables(self) -> List[Table]: + """Return an aggregate of the :attr:`.MetaData.sorted_tables` + collection(s). + + For a sequence of :class:`.MetaData` objects, this + concatenates the :attr:`.MetaData.sorted_tables` collection + for each individual :class:`.MetaData` in the order of the + sequence. It does **not** collate the sorted tables collections. + + """ + result = [] + for m in util.to_list(self.metadata): + result.extend(m.sorted_tables) + return result + + @util.memoized_property + def table_key_to_table(self) -> Dict[str, Table]: + """Return an aggregate of the :attr:`.MetaData.tables` dictionaries. + + The :attr:`.MetaData.tables` collection is a dictionary of table key + to :class:`.Table`; this method aggregates the dictionary across + multiple :class:`.MetaData` objects into one dictionary. + + Duplicate table keys are **not** supported; if two :class:`.MetaData` + objects contain the same table key, an exception is raised. + + """ + result: Dict[str, Table] = {} + for m in util.to_list(self.metadata): + intersect = set(result).intersection(set(m.tables)) + if intersect: + raise ValueError( + "Duplicate table keys across multiple " + "MetaData objects: %s" + % (", ".join('"%s"' % key for key in sorted(intersect))) + ) + + result.update(m.tables) + return result + + +class RevisionContext: + """Maintains configuration and state that's specific to a revision + file generation operation.""" + + generated_revisions: List[MigrationScript] + process_revision_directives: Optional[ProcessRevisionDirectiveFn] + + def __init__( + self, + config: Config, + script_directory: ScriptDirectory, + command_args: Dict[str, Any], + process_revision_directives: Optional[ + ProcessRevisionDirectiveFn + ] = None, + ) -> None: + self.config = config + self.script_directory = script_directory + self.command_args = command_args + self.process_revision_directives = process_revision_directives + self.template_args = { + "config": config # Let templates use config for + # e.g. multiple databases + } + self.generated_revisions = [self._default_revision()] + + def _to_script( + self, migration_script: MigrationScript + ) -> Optional[Script]: + template_args: Dict[str, Any] = self.template_args.copy() + + if getattr(migration_script, "_needs_render", False): + autogen_context = self._last_autogen_context + + # clear out existing imports if we are doing multiple + # renders + autogen_context.imports = set() + if migration_script.imports: + autogen_context.imports.update(migration_script.imports) + render._render_python_into_templatevars( + autogen_context, migration_script, template_args + ) + + assert migration_script.rev_id is not None + return self.script_directory.generate_revision( + migration_script.rev_id, + migration_script.message, + refresh=True, + head=migration_script.head, + splice=migration_script.splice, + branch_labels=migration_script.branch_label, + version_path=migration_script.version_path, + depends_on=migration_script.depends_on, + **template_args, + ) + + def run_autogenerate( + self, rev: _GetRevArg, migration_context: MigrationContext + ) -> None: + self._run_environment(rev, migration_context, True) + + def run_no_autogenerate( + self, rev: _GetRevArg, migration_context: MigrationContext + ) -> None: + self._run_environment(rev, migration_context, False) + + def _run_environment( + self, + rev: _GetRevArg, + migration_context: MigrationContext, + autogenerate: bool, + ) -> None: + if autogenerate: + if self.command_args["sql"]: + raise util.CommandError( + "Using --sql with --autogenerate does not make any sense" + ) + if set(self.script_directory.get_revisions(rev)) != set( + self.script_directory.get_revisions("heads") + ): + raise util.CommandError("Target database is not up to date.") + + upgrade_token = migration_context.opts["upgrade_token"] + downgrade_token = migration_context.opts["downgrade_token"] + + migration_script = self.generated_revisions[-1] + if not getattr(migration_script, "_needs_render", False): + migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token + migration_script.downgrade_ops_list[-1].downgrade_token = ( + downgrade_token + ) + migration_script._needs_render = True + else: + migration_script._upgrade_ops.append( + ops.UpgradeOps([], upgrade_token=upgrade_token) + ) + migration_script._downgrade_ops.append( + ops.DowngradeOps([], downgrade_token=downgrade_token) + ) + + autogen_context = AutogenContext( + migration_context, autogenerate=autogenerate + ) + self._last_autogen_context: AutogenContext = autogen_context + + if autogenerate: + compare._populate_migration_script( + autogen_context, migration_script + ) + + if self.process_revision_directives: + self.process_revision_directives( + migration_context, rev, self.generated_revisions + ) + + hook = migration_context.opts["process_revision_directives"] + if hook: + hook(migration_context, rev, self.generated_revisions) + + for migration_script in self.generated_revisions: + migration_script._needs_render = True + + def _default_revision(self) -> MigrationScript: + command_args: Dict[str, Any] = self.command_args + op = ops.MigrationScript( + rev_id=command_args["rev_id"] or util.rev_id(), + message=command_args["message"], + upgrade_ops=ops.UpgradeOps([]), + downgrade_ops=ops.DowngradeOps([]), + head=command_args["head"], + splice=command_args["splice"], + branch_label=command_args["branch_label"], + version_path=command_args["version_path"], + depends_on=command_args["depends_on"], + ) + return op + + def generate_scripts(self) -> Iterator[Optional[Script]]: + for generated_revision in self.generated_revisions: + yield self._to_script(generated_revision) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/compare.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/compare.py new file mode 100644 index 0000000000000000000000000000000000000000..a9adda1cd5b43ae581c73054bb9670b2dfff0b7d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/compare.py @@ -0,0 +1,1370 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import contextlib +import logging +import re +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterator +from typing import Mapping +from typing import Optional +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy import event +from sqlalchemy import inspect +from sqlalchemy import schema as sa_schema +from sqlalchemy import text +from sqlalchemy import types as sqltypes +from sqlalchemy.sql import expression +from sqlalchemy.sql.elements import conv +from sqlalchemy.sql.schema import ForeignKeyConstraint +from sqlalchemy.sql.schema import Index +from sqlalchemy.sql.schema import UniqueConstraint +from sqlalchemy.util import OrderedSet + +from .. import util +from ..ddl._autogen import is_index_sig +from ..ddl._autogen import is_uq_sig +from ..operations import ops +from ..util import sqla_compat + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy.engine.reflection import Inspector + from sqlalchemy.sql.elements import quoted_name + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Table + + from alembic.autogenerate.api import AutogenContext + from alembic.ddl.impl import DefaultImpl + from alembic.operations.ops import AlterColumnOp + from alembic.operations.ops import MigrationScript + from alembic.operations.ops import ModifyTableOps + from alembic.operations.ops import UpgradeOps + from ..ddl._autogen import _constraint_sig + + +log = logging.getLogger(__name__) + + +def _populate_migration_script( + autogen_context: AutogenContext, migration_script: MigrationScript +) -> None: + upgrade_ops = migration_script.upgrade_ops_list[-1] + downgrade_ops = migration_script.downgrade_ops_list[-1] + + _produce_net_changes(autogen_context, upgrade_ops) + upgrade_ops.reverse_into(downgrade_ops) + + +comparators = util.Dispatcher(uselist=True) + + +def _produce_net_changes( + autogen_context: AutogenContext, upgrade_ops: UpgradeOps +) -> None: + connection = autogen_context.connection + assert connection is not None + include_schemas = autogen_context.opts.get("include_schemas", False) + + inspector: Inspector = inspect(connection) + + default_schema = connection.dialect.default_schema_name + schemas: Set[Optional[str]] + if include_schemas: + schemas = set(inspector.get_schema_names()) + # replace default schema name with None + schemas.discard("information_schema") + # replace the "default" schema with None + schemas.discard(default_schema) + schemas.add(None) + else: + schemas = {None} + + schemas = { + s for s in schemas if autogen_context.run_name_filters(s, "schema", {}) + } + + assert autogen_context.dialect is not None + comparators.dispatch("schema", autogen_context.dialect.name)( + autogen_context, upgrade_ops, schemas + ) + + +@comparators.dispatch_for("schema") +def _autogen_for_tables( + autogen_context: AutogenContext, + upgrade_ops: UpgradeOps, + schemas: Union[Set[None], Set[Optional[str]]], +) -> None: + inspector = autogen_context.inspector + + conn_table_names: Set[Tuple[Optional[str], str]] = set() + + version_table_schema = ( + autogen_context.migration_context.version_table_schema + ) + version_table = autogen_context.migration_context.version_table + + for schema_name in schemas: + tables = set(inspector.get_table_names(schema=schema_name)) + if schema_name == version_table_schema: + tables = tables.difference( + [autogen_context.migration_context.version_table] + ) + + conn_table_names.update( + (schema_name, tname) + for tname in tables + if autogen_context.run_name_filters( + tname, "table", {"schema_name": schema_name} + ) + ) + + metadata_table_names = OrderedSet( + [(table.schema, table.name) for table in autogen_context.sorted_tables] + ).difference([(version_table_schema, version_table)]) + + _compare_tables( + conn_table_names, + metadata_table_names, + inspector, + upgrade_ops, + autogen_context, + ) + + +def _compare_tables( + conn_table_names: set, + metadata_table_names: set, + inspector: Inspector, + upgrade_ops: UpgradeOps, + autogen_context: AutogenContext, +) -> None: + default_schema = inspector.bind.dialect.default_schema_name + + # tables coming from the connection will not have "schema" + # set if it matches default_schema_name; so we need a list + # of table names from local metadata that also have "None" if schema + # == default_schema_name. Most setups will be like this anyway but + # some are not (see #170) + metadata_table_names_no_dflt_schema = OrderedSet( + [ + (schema if schema != default_schema else None, tname) + for schema, tname in metadata_table_names + ] + ) + + # to adjust for the MetaData collection storing the tables either + # as "schemaname.tablename" or just "tablename", create a new lookup + # which will match the "non-default-schema" keys to the Table object. + tname_to_table = { + no_dflt_schema: autogen_context.table_key_to_table[ + sa_schema._get_table_key(tname, schema) + ] + for no_dflt_schema, (schema, tname) in zip( + metadata_table_names_no_dflt_schema, metadata_table_names + ) + } + metadata_table_names = metadata_table_names_no_dflt_schema + + for s, tname in metadata_table_names.difference(conn_table_names): + name = "%s.%s" % (s, tname) if s else tname + metadata_table = tname_to_table[(s, tname)] + if autogen_context.run_object_filters( + metadata_table, tname, "table", False, None + ): + upgrade_ops.ops.append( + ops.CreateTableOp.from_table(metadata_table) + ) + log.info("Detected added table %r", name) + modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) + + comparators.dispatch("table")( + autogen_context, + modify_table_ops, + s, + tname, + None, + metadata_table, + ) + if not modify_table_ops.is_empty(): + upgrade_ops.ops.append(modify_table_ops) + + removal_metadata = sa_schema.MetaData() + for s, tname in conn_table_names.difference(metadata_table_names): + name = sa_schema._get_table_key(tname, s) + exists = name in removal_metadata.tables + t = sa_schema.Table(tname, removal_metadata, schema=s) + + if not exists: + event.listen( + t, + "column_reflect", + # fmt: off + autogen_context.migration_context.impl. + _compat_autogen_column_reflect + (inspector), + # fmt: on + ) + _InspectorConv(inspector).reflect_table(t, include_columns=None) + if autogen_context.run_object_filters(t, tname, "table", True, None): + modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) + + comparators.dispatch("table")( + autogen_context, modify_table_ops, s, tname, t, None + ) + if not modify_table_ops.is_empty(): + upgrade_ops.ops.append(modify_table_ops) + + upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) + log.info("Detected removed table %r", name) + + existing_tables = conn_table_names.intersection(metadata_table_names) + + existing_metadata = sa_schema.MetaData() + conn_column_info = {} + for s, tname in existing_tables: + name = sa_schema._get_table_key(tname, s) + exists = name in existing_metadata.tables + t = sa_schema.Table(tname, existing_metadata, schema=s) + if not exists: + event.listen( + t, + "column_reflect", + # fmt: off + autogen_context.migration_context.impl. + _compat_autogen_column_reflect(inspector), + # fmt: on + ) + _InspectorConv(inspector).reflect_table(t, include_columns=None) + + conn_column_info[(s, tname)] = t + + for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): + s = s or None + name = "%s.%s" % (s, tname) if s else tname + metadata_table = tname_to_table[(s, tname)] + conn_table = existing_metadata.tables[name] + + if autogen_context.run_object_filters( + metadata_table, tname, "table", False, conn_table + ): + modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) + with _compare_columns( + s, + tname, + conn_table, + metadata_table, + modify_table_ops, + autogen_context, + inspector, + ): + comparators.dispatch("table")( + autogen_context, + modify_table_ops, + s, + tname, + conn_table, + metadata_table, + ) + + if not modify_table_ops.is_empty(): + upgrade_ops.ops.append(modify_table_ops) + + +_IndexColumnSortingOps: Mapping[str, Any] = util.immutabledict( + { + "asc": expression.asc, + "desc": expression.desc, + "nulls_first": expression.nullsfirst, + "nulls_last": expression.nullslast, + "nullsfirst": expression.nullsfirst, # 1_3 name + "nullslast": expression.nullslast, # 1_3 name + } +) + + +def _make_index( + impl: DefaultImpl, params: Dict[str, Any], conn_table: Table +) -> Optional[Index]: + exprs: list[Union[Column[Any], TextClause]] = [] + sorting = params.get("column_sorting") + + for num, col_name in enumerate(params["column_names"]): + item: Union[Column[Any], TextClause] + if col_name is None: + assert "expressions" in params + name = params["expressions"][num] + item = text(name) + else: + name = col_name + item = conn_table.c[col_name] + if sorting and name in sorting: + for operator in sorting[name]: + if operator in _IndexColumnSortingOps: + item = _IndexColumnSortingOps[operator](item) + exprs.append(item) + ix = sa_schema.Index( + params["name"], + *exprs, + unique=params["unique"], + _table=conn_table, + **impl.adjust_reflected_dialect_options(params, "index"), + ) + if "duplicates_constraint" in params: + ix.info["duplicates_constraint"] = params["duplicates_constraint"] + return ix + + +def _make_unique_constraint( + impl: DefaultImpl, params: Dict[str, Any], conn_table: Table +) -> UniqueConstraint: + uq = sa_schema.UniqueConstraint( + *[conn_table.c[cname] for cname in params["column_names"]], + name=params["name"], + **impl.adjust_reflected_dialect_options(params, "unique_constraint"), + ) + if "duplicates_index" in params: + uq.info["duplicates_index"] = params["duplicates_index"] + + return uq + + +def _make_foreign_key( + params: Dict[str, Any], conn_table: Table +) -> ForeignKeyConstraint: + tname = params["referred_table"] + if params["referred_schema"]: + tname = "%s.%s" % (params["referred_schema"], tname) + + options = params.get("options", {}) + + const = sa_schema.ForeignKeyConstraint( + [conn_table.c[cname] for cname in params["constrained_columns"]], + ["%s.%s" % (tname, n) for n in params["referred_columns"]], + onupdate=options.get("onupdate"), + ondelete=options.get("ondelete"), + deferrable=options.get("deferrable"), + initially=options.get("initially"), + name=params["name"], + ) + # needed by 0.7 + conn_table.append_constraint(const) + return const + + +@contextlib.contextmanager +def _compare_columns( + schema: Optional[str], + tname: Union[quoted_name, str], + conn_table: Table, + metadata_table: Table, + modify_table_ops: ModifyTableOps, + autogen_context: AutogenContext, + inspector: Inspector, +) -> Iterator[None]: + name = "%s.%s" % (schema, tname) if schema else tname + metadata_col_names = OrderedSet( + c.name for c in metadata_table.c if not c.system + ) + metadata_cols_by_name = { + c.name: c for c in metadata_table.c if not c.system + } + + conn_col_names = { + c.name: c + for c in conn_table.c + if autogen_context.run_name_filters( + c.name, "column", {"table_name": tname, "schema_name": schema} + ) + } + + for cname in metadata_col_names.difference(conn_col_names): + if autogen_context.run_object_filters( + metadata_cols_by_name[cname], cname, "column", False, None + ): + modify_table_ops.ops.append( + ops.AddColumnOp.from_column_and_tablename( + schema, tname, metadata_cols_by_name[cname] + ) + ) + log.info("Detected added column '%s.%s'", name, cname) + + for colname in metadata_col_names.intersection(conn_col_names): + metadata_col = metadata_cols_by_name[colname] + conn_col = conn_table.c[colname] + if not autogen_context.run_object_filters( + metadata_col, colname, "column", False, conn_col + ): + continue + alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema) + + comparators.dispatch("column")( + autogen_context, + alter_column_op, + schema, + tname, + colname, + conn_col, + metadata_col, + ) + + if alter_column_op.has_changes(): + modify_table_ops.ops.append(alter_column_op) + + yield + + for cname in set(conn_col_names).difference(metadata_col_names): + if autogen_context.run_object_filters( + conn_table.c[cname], cname, "column", True, None + ): + modify_table_ops.ops.append( + ops.DropColumnOp.from_column_and_tablename( + schema, tname, conn_table.c[cname] + ) + ) + log.info("Detected removed column '%s.%s'", name, cname) + + +_C = TypeVar("_C", bound=Union[UniqueConstraint, ForeignKeyConstraint, Index]) + + +class _InspectorConv: + __slots__ = ("inspector",) + + def __init__(self, inspector): + self.inspector = inspector + + def _apply_reflectinfo_conv(self, consts): + if not consts: + return consts + for const in consts: + if const["name"] is not None and not isinstance( + const["name"], conv + ): + const["name"] = conv(const["name"]) + return consts + + def _apply_constraint_conv(self, consts): + if not consts: + return consts + for const in consts: + if const.name is not None and not isinstance(const.name, conv): + const.name = conv(const.name) + return consts + + def get_indexes(self, *args, **kw): + return self._apply_reflectinfo_conv( + self.inspector.get_indexes(*args, **kw) + ) + + def get_unique_constraints(self, *args, **kw): + return self._apply_reflectinfo_conv( + self.inspector.get_unique_constraints(*args, **kw) + ) + + def get_foreign_keys(self, *args, **kw): + return self._apply_reflectinfo_conv( + self.inspector.get_foreign_keys(*args, **kw) + ) + + def reflect_table(self, table, *, include_columns): + self.inspector.reflect_table(table, include_columns=include_columns) + + # I had a cool version of this using _ReflectInfo, however that doesn't + # work in 1.4 and it's not public API in 2.x. Then this is just a two + # liner. So there's no competition... + self._apply_constraint_conv(table.constraints) + self._apply_constraint_conv(table.indexes) + + +@comparators.dispatch_for("table") +def _compare_indexes_and_uniques( + autogen_context: AutogenContext, + modify_ops: ModifyTableOps, + schema: Optional[str], + tname: Union[quoted_name, str], + conn_table: Optional[Table], + metadata_table: Optional[Table], +) -> None: + inspector = autogen_context.inspector + is_create_table = conn_table is None + is_drop_table = metadata_table is None + impl = autogen_context.migration_context.impl + + # 1a. get raw indexes and unique constraints from metadata ... + if metadata_table is not None: + metadata_unique_constraints = { + uq + for uq in metadata_table.constraints + if isinstance(uq, sa_schema.UniqueConstraint) + } + metadata_indexes = set(metadata_table.indexes) + else: + metadata_unique_constraints = set() + metadata_indexes = set() + + conn_uniques = conn_indexes = frozenset() # type:ignore[var-annotated] + + supports_unique_constraints = False + + unique_constraints_duplicate_unique_indexes = False + + if conn_table is not None: + # 1b. ... and from connection, if the table exists + try: + conn_uniques = _InspectorConv(inspector).get_unique_constraints( + tname, schema=schema + ) + + supports_unique_constraints = True + except NotImplementedError: + pass + except TypeError: + # number of arguments is off for the base + # method in SQLAlchemy due to the cache decorator + # not being present + pass + else: + conn_uniques = [ # type:ignore[assignment] + uq + for uq in conn_uniques + if autogen_context.run_name_filters( + uq["name"], + "unique_constraint", + {"table_name": tname, "schema_name": schema}, + ) + ] + for uq in conn_uniques: + if uq.get("duplicates_index"): + unique_constraints_duplicate_unique_indexes = True + try: + conn_indexes = _InspectorConv(inspector).get_indexes( + tname, schema=schema + ) + except NotImplementedError: + pass + else: + conn_indexes = [ # type:ignore[assignment] + ix + for ix in conn_indexes + if autogen_context.run_name_filters( + ix["name"], + "index", + {"table_name": tname, "schema_name": schema}, + ) + ] + + # 2. convert conn-level objects from raw inspector records + # into schema objects + if is_drop_table: + # for DROP TABLE uniques are inline, don't need them + conn_uniques = set() # type:ignore[assignment] + else: + conn_uniques = { # type:ignore[assignment] + _make_unique_constraint(impl, uq_def, conn_table) + for uq_def in conn_uniques + } + + conn_indexes = { # type:ignore[assignment] + index + for index in ( + _make_index(impl, ix, conn_table) for ix in conn_indexes + ) + if index is not None + } + + # 2a. if the dialect dupes unique indexes as unique constraints + # (mysql and oracle), correct for that + + if unique_constraints_duplicate_unique_indexes: + _correct_for_uq_duplicates_uix( + conn_uniques, + conn_indexes, + metadata_unique_constraints, + metadata_indexes, + autogen_context.dialect, + impl, + ) + + # 3. give the dialect a chance to omit indexes and constraints that + # we know are either added implicitly by the DB or that the DB + # can't accurately report on + impl.correct_for_autogen_constraints( + conn_uniques, # type: ignore[arg-type] + conn_indexes, # type: ignore[arg-type] + metadata_unique_constraints, + metadata_indexes, + ) + + # 4. organize the constraints into "signature" collections, the + # _constraint_sig() objects provide a consistent facade over both + # Index and UniqueConstraint so we can easily work with them + # interchangeably + metadata_unique_constraints_sig = { + impl._create_metadata_constraint_sig(uq) + for uq in metadata_unique_constraints + } + + metadata_indexes_sig = { + impl._create_metadata_constraint_sig(ix) for ix in metadata_indexes + } + + conn_unique_constraints = { + impl._create_reflected_constraint_sig(uq) for uq in conn_uniques + } + + conn_indexes_sig = { + impl._create_reflected_constraint_sig(ix) for ix in conn_indexes + } + + # 5. index things by name, for those objects that have names + metadata_names = { + cast(str, c.md_name_to_sql_name(autogen_context)): c + for c in metadata_unique_constraints_sig.union(metadata_indexes_sig) + if c.is_named + } + + conn_uniques_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig] + conn_indexes_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig] + + conn_uniques_by_name = {c.name: c for c in conn_unique_constraints} + conn_indexes_by_name = {c.name: c for c in conn_indexes_sig} + conn_names = { + c.name: c + for c in conn_unique_constraints.union(conn_indexes_sig) + if sqla_compat.constraint_name_string(c.name) + } + + doubled_constraints = { + name: (conn_uniques_by_name[name], conn_indexes_by_name[name]) + for name in set(conn_uniques_by_name).intersection( + conn_indexes_by_name + ) + } + + # 6. index things by "column signature", to help with unnamed unique + # constraints. + conn_uniques_by_sig = {uq.unnamed: uq for uq in conn_unique_constraints} + metadata_uniques_by_sig = { + uq.unnamed: uq for uq in metadata_unique_constraints_sig + } + unnamed_metadata_uniques = { + uq.unnamed: uq + for uq in metadata_unique_constraints_sig + if not sqla_compat._constraint_is_named( + uq.const, autogen_context.dialect + ) + } + + # assumptions: + # 1. a unique constraint or an index from the connection *always* + # has a name. + # 2. an index on the metadata side *always* has a name. + # 3. a unique constraint on the metadata side *might* have a name. + # 4. The backend may double up indexes as unique constraints and + # vice versa (e.g. MySQL, Postgresql) + + def obj_added(obj: _constraint_sig): + if is_index_sig(obj): + if autogen_context.run_object_filters( + obj.const, obj.name, "index", False, None + ): + modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const)) + log.info( + "Detected added index %r on '%s'", + obj.name, + obj.column_names, + ) + elif is_uq_sig(obj): + if not supports_unique_constraints: + # can't report unique indexes as added if we don't + # detect them + return + if is_create_table or is_drop_table: + # unique constraints are created inline with table defs + return + if autogen_context.run_object_filters( + obj.const, obj.name, "unique_constraint", False, None + ): + modify_ops.ops.append( + ops.AddConstraintOp.from_constraint(obj.const) + ) + log.info( + "Detected added unique constraint %r on '%s'", + obj.name, + obj.column_names, + ) + else: + assert False + + def obj_removed(obj: _constraint_sig): + if is_index_sig(obj): + if obj.is_unique and not supports_unique_constraints: + # many databases double up unique constraints + # as unique indexes. without that list we can't + # be sure what we're doing here + return + + if autogen_context.run_object_filters( + obj.const, obj.name, "index", True, None + ): + modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const)) + log.info("Detected removed index %r on %r", obj.name, tname) + elif is_uq_sig(obj): + if is_create_table or is_drop_table: + # if the whole table is being dropped, we don't need to + # consider unique constraint separately + return + if autogen_context.run_object_filters( + obj.const, obj.name, "unique_constraint", True, None + ): + modify_ops.ops.append( + ops.DropConstraintOp.from_constraint(obj.const) + ) + log.info( + "Detected removed unique constraint %r on %r", + obj.name, + tname, + ) + else: + assert False + + def obj_changed( + old: _constraint_sig, + new: _constraint_sig, + msg: str, + ): + if is_index_sig(old): + assert is_index_sig(new) + + if autogen_context.run_object_filters( + new.const, new.name, "index", False, old.const + ): + log.info( + "Detected changed index %r on %r: %s", old.name, tname, msg + ) + modify_ops.ops.append(ops.DropIndexOp.from_index(old.const)) + modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const)) + elif is_uq_sig(old): + assert is_uq_sig(new) + + if autogen_context.run_object_filters( + new.const, new.name, "unique_constraint", False, old.const + ): + log.info( + "Detected changed unique constraint %r on %r: %s", + old.name, + tname, + msg, + ) + modify_ops.ops.append( + ops.DropConstraintOp.from_constraint(old.const) + ) + modify_ops.ops.append( + ops.AddConstraintOp.from_constraint(new.const) + ) + else: + assert False + + for removed_name in sorted(set(conn_names).difference(metadata_names)): + conn_obj = conn_names[removed_name] + if ( + is_uq_sig(conn_obj) + and conn_obj.unnamed in unnamed_metadata_uniques + ): + continue + elif removed_name in doubled_constraints: + conn_uq, conn_idx = doubled_constraints[removed_name] + if ( + all( + conn_idx.unnamed != meta_idx.unnamed + for meta_idx in metadata_indexes_sig + ) + and conn_uq.unnamed not in metadata_uniques_by_sig + ): + obj_removed(conn_uq) + obj_removed(conn_idx) + else: + obj_removed(conn_obj) + + for existing_name in sorted(set(metadata_names).intersection(conn_names)): + metadata_obj = metadata_names[existing_name] + + if existing_name in doubled_constraints: + conn_uq, conn_idx = doubled_constraints[existing_name] + if is_index_sig(metadata_obj): + conn_obj = conn_idx + else: + conn_obj = conn_uq + else: + conn_obj = conn_names[existing_name] + + if type(conn_obj) != type(metadata_obj): + obj_removed(conn_obj) + obj_added(metadata_obj) + else: + comparison = metadata_obj.compare_to_reflected(conn_obj) + + if comparison.is_different: + # constraint are different + obj_changed(conn_obj, metadata_obj, comparison.message) + elif comparison.is_skip: + # constraint cannot be compared, skip them + thing = ( + "index" if is_index_sig(conn_obj) else "unique constraint" + ) + log.info( + "Cannot compare %s %r, assuming equal and skipping. %s", + thing, + conn_obj.name, + comparison.message, + ) + else: + # constraint are equal + assert comparison.is_equal + + for added_name in sorted(set(metadata_names).difference(conn_names)): + obj = metadata_names[added_name] + obj_added(obj) + + for uq_sig in unnamed_metadata_uniques: + if uq_sig not in conn_uniques_by_sig: + obj_added(unnamed_metadata_uniques[uq_sig]) + + +def _correct_for_uq_duplicates_uix( + conn_unique_constraints, + conn_indexes, + metadata_unique_constraints, + metadata_indexes, + dialect, + impl, +): + # dedupe unique indexes vs. constraints, since MySQL / Oracle + # doesn't really have unique constraints as a separate construct. + # but look in the metadata and try to maintain constructs + # that already seem to be defined one way or the other + # on that side. This logic was formerly local to MySQL dialect, + # generalized to Oracle and others. See #276 + + # resolve final rendered name for unique constraints defined in the + # metadata. this includes truncation of long names. naming convention + # names currently should already be set as cons.name, however leave this + # to the sqla_compat to decide. + metadata_cons_names = [ + (sqla_compat._get_constraint_final_name(cons, dialect), cons) + for cons in metadata_unique_constraints + ] + + metadata_uq_names = { + name for name, cons in metadata_cons_names if name is not None + } + + unnamed_metadata_uqs = { + impl._create_metadata_constraint_sig(cons).unnamed + for name, cons in metadata_cons_names + if name is None + } + + metadata_ix_names = { + sqla_compat._get_constraint_final_name(cons, dialect) + for cons in metadata_indexes + if cons.unique + } + + # for reflection side, names are in their final database form + # already since they're from the database + conn_ix_names = {cons.name: cons for cons in conn_indexes if cons.unique} + + uqs_dupe_indexes = { + cons.name: cons + for cons in conn_unique_constraints + if cons.info["duplicates_index"] + } + + for overlap in uqs_dupe_indexes: + if overlap not in metadata_uq_names: + if ( + impl._create_reflected_constraint_sig( + uqs_dupe_indexes[overlap] + ).unnamed + not in unnamed_metadata_uqs + ): + conn_unique_constraints.discard(uqs_dupe_indexes[overlap]) + elif overlap not in metadata_ix_names: + conn_indexes.discard(conn_ix_names[overlap]) + + +@comparators.dispatch_for("column") +def _compare_nullable( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: Union[quoted_name, str], + cname: Union[quoted_name, str], + conn_col: Column[Any], + metadata_col: Column[Any], +) -> None: + metadata_col_nullable = metadata_col.nullable + conn_col_nullable = conn_col.nullable + alter_column_op.existing_nullable = conn_col_nullable + + if conn_col_nullable is not metadata_col_nullable: + if ( + sqla_compat._server_default_is_computed( + metadata_col.server_default, conn_col.server_default + ) + and sqla_compat._nullability_might_be_unset(metadata_col) + or ( + sqla_compat._server_default_is_identity( + metadata_col.server_default, conn_col.server_default + ) + ) + ): + log.info( + "Ignoring nullable change on identity column '%s.%s'", + tname, + cname, + ) + else: + alter_column_op.modify_nullable = metadata_col_nullable + log.info( + "Detected %s on column '%s.%s'", + "NULL" if metadata_col_nullable else "NOT NULL", + tname, + cname, + ) + + +@comparators.dispatch_for("column") +def _setup_autoincrement( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: Union[quoted_name, str], + cname: quoted_name, + conn_col: Column[Any], + metadata_col: Column[Any], +) -> None: + if metadata_col.table._autoincrement_column is metadata_col: + alter_column_op.kw["autoincrement"] = True + elif metadata_col.autoincrement is True: + alter_column_op.kw["autoincrement"] = True + elif metadata_col.autoincrement is False: + alter_column_op.kw["autoincrement"] = False + + +@comparators.dispatch_for("column") +def _compare_type( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: Union[quoted_name, str], + cname: Union[quoted_name, str], + conn_col: Column[Any], + metadata_col: Column[Any], +) -> None: + conn_type = conn_col.type + alter_column_op.existing_type = conn_type + metadata_type = metadata_col.type + if conn_type._type_affinity is sqltypes.NullType: + log.info( + "Couldn't determine database type " "for column '%s.%s'", + tname, + cname, + ) + return + if metadata_type._type_affinity is sqltypes.NullType: + log.info( + "Column '%s.%s' has no type within " "the model; can't compare", + tname, + cname, + ) + return + + isdiff = autogen_context.migration_context._compare_type( + conn_col, metadata_col + ) + + if isdiff: + alter_column_op.modify_type = metadata_type + log.info( + "Detected type change from %r to %r on '%s.%s'", + conn_type, + metadata_type, + tname, + cname, + ) + + +def _render_server_default_for_compare( + metadata_default: Optional[Any], autogen_context: AutogenContext +) -> Optional[str]: + if isinstance(metadata_default, sa_schema.DefaultClause): + if isinstance(metadata_default.arg, str): + metadata_default = metadata_default.arg + else: + metadata_default = str( + metadata_default.arg.compile( + dialect=autogen_context.dialect, + compile_kwargs={"literal_binds": True}, + ) + ) + if isinstance(metadata_default, str): + return metadata_default + else: + return None + + +def _normalize_computed_default(sqltext: str) -> str: + """we want to warn if a computed sql expression has changed. however + we don't want false positives and the warning is not that critical. + so filter out most forms of variability from the SQL text. + + """ + + return re.sub(r"[ \(\)'\"`\[\]\t\r\n]", "", sqltext).lower() + + +def _compare_computed_default( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: str, + cname: str, + conn_col: Column[Any], + metadata_col: Column[Any], +) -> None: + rendered_metadata_default = str( + cast(sa_schema.Computed, metadata_col.server_default).sqltext.compile( + dialect=autogen_context.dialect, + compile_kwargs={"literal_binds": True}, + ) + ) + + # since we cannot change computed columns, we do only a crude comparison + # here where we try to eliminate syntactical differences in order to + # get a minimal comparison just to emit a warning. + + rendered_metadata_default = _normalize_computed_default( + rendered_metadata_default + ) + + if isinstance(conn_col.server_default, sa_schema.Computed): + rendered_conn_default = str( + conn_col.server_default.sqltext.compile( + dialect=autogen_context.dialect, + compile_kwargs={"literal_binds": True}, + ) + ) + if rendered_conn_default is None: + rendered_conn_default = "" + else: + rendered_conn_default = _normalize_computed_default( + rendered_conn_default + ) + else: + rendered_conn_default = "" + + if rendered_metadata_default != rendered_conn_default: + _warn_computed_not_supported(tname, cname) + + +def _warn_computed_not_supported(tname: str, cname: str) -> None: + util.warn("Computed default on %s.%s cannot be modified" % (tname, cname)) + + +def _compare_identity_default( + autogen_context, + alter_column_op, + schema, + tname, + cname, + conn_col, + metadata_col, +): + impl = autogen_context.migration_context.impl + diff, ignored_attr, is_alter = impl._compare_identity_default( + metadata_col.server_default, conn_col.server_default + ) + + return diff, is_alter + + +@comparators.dispatch_for("column") +def _compare_server_default( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: Union[quoted_name, str], + cname: Union[quoted_name, str], + conn_col: Column[Any], + metadata_col: Column[Any], +) -> Optional[bool]: + metadata_default = metadata_col.server_default + conn_col_default = conn_col.server_default + if conn_col_default is None and metadata_default is None: + return False + + if sqla_compat._server_default_is_computed(metadata_default): + return _compare_computed_default( # type:ignore[func-returns-value] + autogen_context, + alter_column_op, + schema, + tname, + cname, + conn_col, + metadata_col, + ) + if sqla_compat._server_default_is_computed(conn_col_default): + _warn_computed_not_supported(tname, cname) + return False + + if sqla_compat._server_default_is_identity( + metadata_default, conn_col_default + ): + alter_column_op.existing_server_default = conn_col_default + diff, is_alter = _compare_identity_default( + autogen_context, + alter_column_op, + schema, + tname, + cname, + conn_col, + metadata_col, + ) + if is_alter: + alter_column_op.modify_server_default = metadata_default + if diff: + log.info( + "Detected server default on column '%s.%s': " + "identity options attributes %s", + tname, + cname, + sorted(diff), + ) + else: + rendered_metadata_default = _render_server_default_for_compare( + metadata_default, autogen_context + ) + + rendered_conn_default = ( + cast(Any, conn_col_default).arg.text if conn_col_default else None + ) + + alter_column_op.existing_server_default = conn_col_default + + is_diff = autogen_context.migration_context._compare_server_default( + conn_col, + metadata_col, + rendered_metadata_default, + rendered_conn_default, + ) + if is_diff: + alter_column_op.modify_server_default = metadata_default + log.info("Detected server default on column '%s.%s'", tname, cname) + + return None + + +@comparators.dispatch_for("column") +def _compare_column_comment( + autogen_context: AutogenContext, + alter_column_op: AlterColumnOp, + schema: Optional[str], + tname: Union[quoted_name, str], + cname: quoted_name, + conn_col: Column[Any], + metadata_col: Column[Any], +) -> Optional[Literal[False]]: + assert autogen_context.dialect is not None + if not autogen_context.dialect.supports_comments: + return None + + metadata_comment = metadata_col.comment + conn_col_comment = conn_col.comment + if conn_col_comment is None and metadata_comment is None: + return False + + alter_column_op.existing_comment = conn_col_comment + + if conn_col_comment != metadata_comment: + alter_column_op.modify_comment = metadata_comment + log.info("Detected column comment '%s.%s'", tname, cname) + + return None + + +@comparators.dispatch_for("table") +def _compare_foreign_keys( + autogen_context: AutogenContext, + modify_table_ops: ModifyTableOps, + schema: Optional[str], + tname: Union[quoted_name, str], + conn_table: Table, + metadata_table: Table, +) -> None: + # if we're doing CREATE TABLE, all FKs are created + # inline within the table def + if conn_table is None or metadata_table is None: + return + + inspector = autogen_context.inspector + metadata_fks = { + fk + for fk in metadata_table.constraints + if isinstance(fk, sa_schema.ForeignKeyConstraint) + } + + conn_fks_list = [ + fk + for fk in _InspectorConv(inspector).get_foreign_keys( + tname, schema=schema + ) + if autogen_context.run_name_filters( + fk["name"], + "foreign_key_constraint", + {"table_name": tname, "schema_name": schema}, + ) + ] + + conn_fks = { + _make_foreign_key(const, conn_table) for const in conn_fks_list + } + + impl = autogen_context.migration_context.impl + + # give the dialect a chance to correct the FKs to match more + # closely + autogen_context.migration_context.impl.correct_for_autogen_foreignkeys( + conn_fks, metadata_fks + ) + + metadata_fks_sig = { + impl._create_metadata_constraint_sig(fk) for fk in metadata_fks + } + + conn_fks_sig = { + impl._create_reflected_constraint_sig(fk) for fk in conn_fks + } + + # check if reflected FKs include options, indicating the backend + # can reflect FK options + if conn_fks_list and "options" in conn_fks_list[0]: + conn_fks_by_sig = {c.unnamed: c for c in conn_fks_sig} + metadata_fks_by_sig = {c.unnamed: c for c in metadata_fks_sig} + else: + # otherwise compare by sig without options added + conn_fks_by_sig = {c.unnamed_no_options: c for c in conn_fks_sig} + metadata_fks_by_sig = { + c.unnamed_no_options: c for c in metadata_fks_sig + } + + metadata_fks_by_name = { + c.name: c for c in metadata_fks_sig if c.name is not None + } + conn_fks_by_name = {c.name: c for c in conn_fks_sig if c.name is not None} + + def _add_fk(obj, compare_to): + if autogen_context.run_object_filters( + obj.const, obj.name, "foreign_key_constraint", False, compare_to + ): + modify_table_ops.ops.append( + ops.CreateForeignKeyOp.from_constraint(const.const) + ) + + log.info( + "Detected added foreign key (%s)(%s) on table %s%s", + ", ".join(obj.source_columns), + ", ".join(obj.target_columns), + "%s." % obj.source_schema if obj.source_schema else "", + obj.source_table, + ) + + def _remove_fk(obj, compare_to): + if autogen_context.run_object_filters( + obj.const, obj.name, "foreign_key_constraint", True, compare_to + ): + modify_table_ops.ops.append( + ops.DropConstraintOp.from_constraint(obj.const) + ) + log.info( + "Detected removed foreign key (%s)(%s) on table %s%s", + ", ".join(obj.source_columns), + ", ".join(obj.target_columns), + "%s." % obj.source_schema if obj.source_schema else "", + obj.source_table, + ) + + # so far it appears we don't need to do this by name at all. + # SQLite doesn't preserve constraint names anyway + + for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig): + const = conn_fks_by_sig[removed_sig] + if removed_sig not in metadata_fks_by_sig: + compare_to = ( + metadata_fks_by_name[const.name].const + if const.name in metadata_fks_by_name + else None + ) + _remove_fk(const, compare_to) + + for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig): + const = metadata_fks_by_sig[added_sig] + if added_sig not in conn_fks_by_sig: + compare_to = ( + conn_fks_by_name[const.name].const + if const.name in conn_fks_by_name + else None + ) + _add_fk(const, compare_to) + + +@comparators.dispatch_for("table") +def _compare_table_comment( + autogen_context: AutogenContext, + modify_table_ops: ModifyTableOps, + schema: Optional[str], + tname: Union[quoted_name, str], + conn_table: Optional[Table], + metadata_table: Optional[Table], +) -> None: + assert autogen_context.dialect is not None + if not autogen_context.dialect.supports_comments: + return + + # if we're doing CREATE TABLE, comments will be created inline + # with the create_table op. + if conn_table is None or metadata_table is None: + return + + if conn_table.comment is None and metadata_table.comment is None: + return + + if metadata_table.comment is None and conn_table.comment is not None: + modify_table_ops.ops.append( + ops.DropTableCommentOp( + tname, existing_comment=conn_table.comment, schema=schema + ) + ) + elif metadata_table.comment != conn_table.comment: + modify_table_ops.ops.append( + ops.CreateTableCommentOp( + tname, + metadata_table.comment, + existing_comment=conn_table.comment, + schema=schema, + ) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/render.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/render.py new file mode 100644 index 0000000000000000000000000000000000000000..7f32838df7b292a86c1fd60d23a2926046653020 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/render.py @@ -0,0 +1,1172 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +from io import StringIO +import re +from typing import Any +from typing import cast +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from mako.pygen import PythonPrinter +from sqlalchemy import schema as sa_schema +from sqlalchemy import sql +from sqlalchemy import types as sqltypes +from sqlalchemy.sql.base import _DialectArgView +from sqlalchemy.sql.elements import conv +from sqlalchemy.sql.elements import Label +from sqlalchemy.sql.elements import quoted_name + +from .. import util +from ..operations import ops +from ..util import sqla_compat + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy import Computed + from sqlalchemy import Identity + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.schema import CheckConstraint + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.schema import FetchedValue + from sqlalchemy.sql.schema import ForeignKey + from sqlalchemy.sql.schema import ForeignKeyConstraint + from sqlalchemy.sql.schema import Index + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import PrimaryKeyConstraint + from sqlalchemy.sql.schema import UniqueConstraint + from sqlalchemy.sql.sqltypes import ARRAY + from sqlalchemy.sql.type_api import TypeEngine + + from alembic.autogenerate.api import AutogenContext + from alembic.config import Config + from alembic.operations.ops import MigrationScript + from alembic.operations.ops import ModifyTableOps + + +MAX_PYTHON_ARGS = 255 + + +def _render_gen_name( + autogen_context: AutogenContext, + name: sqla_compat._ConstraintName, +) -> Optional[Union[quoted_name, str, _f_name]]: + if isinstance(name, conv): + return _f_name(_alembic_autogenerate_prefix(autogen_context), name) + else: + return sqla_compat.constraint_name_or_none(name) + + +def _indent(text: str) -> str: + text = re.compile(r"^", re.M).sub(" ", text).strip() + text = re.compile(r" +$", re.M).sub("", text) + return text + + +def _render_python_into_templatevars( + autogen_context: AutogenContext, + migration_script: MigrationScript, + template_args: Dict[str, Union[str, Config]], +) -> None: + imports = autogen_context.imports + + for upgrade_ops, downgrade_ops in zip( + migration_script.upgrade_ops_list, migration_script.downgrade_ops_list + ): + template_args[upgrade_ops.upgrade_token] = _indent( + _render_cmd_body(upgrade_ops, autogen_context) + ) + template_args[downgrade_ops.downgrade_token] = _indent( + _render_cmd_body(downgrade_ops, autogen_context) + ) + template_args["imports"] = "\n".join(sorted(imports)) + + +default_renderers = renderers = util.Dispatcher() + + +def _render_cmd_body( + op_container: ops.OpContainer, + autogen_context: AutogenContext, +) -> str: + buf = StringIO() + printer = PythonPrinter(buf) + + printer.writeline( + "# ### commands auto generated by Alembic - please adjust! ###" + ) + + has_lines = False + for op in op_container.ops: + lines = render_op(autogen_context, op) + has_lines = has_lines or bool(lines) + + for line in lines: + printer.writeline(line) + + if not has_lines: + printer.writeline("pass") + + printer.writeline("# ### end Alembic commands ###") + + return buf.getvalue() + + +def render_op( + autogen_context: AutogenContext, op: ops.MigrateOperation +) -> List[str]: + renderer = renderers.dispatch(op) + lines = util.to_list(renderer(autogen_context, op)) + return lines + + +def render_op_text( + autogen_context: AutogenContext, op: ops.MigrateOperation +) -> str: + return "\n".join(render_op(autogen_context, op)) + + +@renderers.dispatch_for(ops.ModifyTableOps) +def _render_modify_table( + autogen_context: AutogenContext, op: ModifyTableOps +) -> List[str]: + opts = autogen_context.opts + render_as_batch = opts.get("render_as_batch", False) + + if op.ops: + lines = [] + if render_as_batch: + with autogen_context._within_batch(): + lines.append( + "with op.batch_alter_table(%r, schema=%r) as batch_op:" + % (op.table_name, op.schema) + ) + for t_op in op.ops: + t_lines = render_op(autogen_context, t_op) + lines.extend(t_lines) + lines.append("") + else: + for t_op in op.ops: + t_lines = render_op(autogen_context, t_op) + lines.extend(t_lines) + + return lines + else: + return [] + + +@renderers.dispatch_for(ops.CreateTableCommentOp) +def _render_create_table_comment( + autogen_context: AutogenContext, op: ops.CreateTableCommentOp +) -> str: + if autogen_context._has_batch: + templ = ( + "{prefix}create_table_comment(\n" + "{indent}{comment},\n" + "{indent}existing_comment={existing}\n" + ")" + ) + else: + templ = ( + "{prefix}create_table_comment(\n" + "{indent}'{tname}',\n" + "{indent}{comment},\n" + "{indent}existing_comment={existing},\n" + "{indent}schema={schema}\n" + ")" + ) + return templ.format( + prefix=_alembic_autogenerate_prefix(autogen_context), + tname=op.table_name, + comment="%r" % op.comment if op.comment is not None else None, + existing=( + "%r" % op.existing_comment + if op.existing_comment is not None + else None + ), + schema="'%s'" % op.schema if op.schema is not None else None, + indent=" ", + ) + + +@renderers.dispatch_for(ops.DropTableCommentOp) +def _render_drop_table_comment( + autogen_context: AutogenContext, op: ops.DropTableCommentOp +) -> str: + if autogen_context._has_batch: + templ = ( + "{prefix}drop_table_comment(\n" + "{indent}existing_comment={existing}\n" + ")" + ) + else: + templ = ( + "{prefix}drop_table_comment(\n" + "{indent}'{tname}',\n" + "{indent}existing_comment={existing},\n" + "{indent}schema={schema}\n" + ")" + ) + return templ.format( + prefix=_alembic_autogenerate_prefix(autogen_context), + tname=op.table_name, + existing=( + "%r" % op.existing_comment + if op.existing_comment is not None + else None + ), + schema="'%s'" % op.schema if op.schema is not None else None, + indent=" ", + ) + + +@renderers.dispatch_for(ops.CreateTableOp) +def _add_table(autogen_context: AutogenContext, op: ops.CreateTableOp) -> str: + table = op.to_table() + + args = [ + col + for col in [ + _render_column(col, autogen_context) for col in table.columns + ] + if col + ] + sorted( + [ + rcons + for rcons in [ + _render_constraint( + cons, autogen_context, op._namespace_metadata + ) + for cons in table.constraints + ] + if rcons is not None + ] + ) + + if len(args) > MAX_PYTHON_ARGS: + args_str = "*[" + ",\n".join(args) + "]" + else: + args_str = ",\n".join(args) + + text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { + "tablename": _ident(op.table_name), + "prefix": _alembic_autogenerate_prefix(autogen_context), + "args": args_str, + } + if op.schema: + text += ",\nschema=%r" % _ident(op.schema) + + comment = table.comment + if comment: + text += ",\ncomment=%r" % _ident(comment) + + info = table.info + if info: + text += f",\ninfo={info!r}" + + for k in sorted(op.kw): + text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k]) + + if table._prefixes: + prefixes = ", ".join("'%s'" % p for p in table._prefixes) + text += ",\nprefixes=[%s]" % prefixes + + if op.if_not_exists is not None: + text += ",\nif_not_exists=%r" % bool(op.if_not_exists) + + text += "\n)" + return text + + +@renderers.dispatch_for(ops.DropTableOp) +def _drop_table(autogen_context: AutogenContext, op: ops.DropTableOp) -> str: + text = "%(prefix)sdrop_table(%(tname)r" % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": _ident(op.table_name), + } + if op.schema: + text += ", schema=%r" % _ident(op.schema) + + if op.if_exists is not None: + text += ", if_exists=%r" % bool(op.if_exists) + + text += ")" + return text + + +def _render_dialect_kwargs_items( + autogen_context: AutogenContext, dialect_kwargs: _DialectArgView +) -> list[str]: + return [ + f"{key}={_render_potential_expr(val, autogen_context)}" + for key, val in dialect_kwargs.items() + ] + + +@renderers.dispatch_for(ops.CreateIndexOp) +def _add_index(autogen_context: AutogenContext, op: ops.CreateIndexOp) -> str: + index = op.to_index() + + has_batch = autogen_context._has_batch + + if has_batch: + tmpl = ( + "%(prefix)screate_index(%(name)r, [%(columns)s], " + "unique=%(unique)r%(kwargs)s)" + ) + else: + tmpl = ( + "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], " + "unique=%(unique)r%(schema)s%(kwargs)s)" + ) + + assert index.table is not None + + opts = _render_dialect_kwargs_items(autogen_context, index.dialect_kwargs) + if op.if_not_exists is not None: + opts.append("if_not_exists=%r" % bool(op.if_not_exists)) + text = tmpl % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "name": _render_gen_name(autogen_context, index.name), + "table": _ident(index.table.name), + "columns": ", ".join( + _get_index_rendered_expressions(index, autogen_context) + ), + "unique": index.unique or False, + "schema": ( + (", schema=%r" % _ident(index.table.schema)) + if index.table.schema + else "" + ), + "kwargs": ", " + ", ".join(opts) if opts else "", + } + return text + + +@renderers.dispatch_for(ops.DropIndexOp) +def _drop_index(autogen_context: AutogenContext, op: ops.DropIndexOp) -> str: + index = op.to_index() + + has_batch = autogen_context._has_batch + + if has_batch: + tmpl = "%(prefix)sdrop_index(%(name)r%(kwargs)s)" + else: + tmpl = ( + "%(prefix)sdrop_index(%(name)r, " + "table_name=%(table_name)r%(schema)s%(kwargs)s)" + ) + opts = _render_dialect_kwargs_items(autogen_context, index.dialect_kwargs) + if op.if_exists is not None: + opts.append("if_exists=%r" % bool(op.if_exists)) + text = tmpl % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "name": _render_gen_name(autogen_context, op.index_name), + "table_name": _ident(op.table_name), + "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""), + "kwargs": ", " + ", ".join(opts) if opts else "", + } + return text + + +@renderers.dispatch_for(ops.CreateUniqueConstraintOp) +def _add_unique_constraint( + autogen_context: AutogenContext, op: ops.CreateUniqueConstraintOp +) -> List[str]: + return [_uq_constraint(op.to_constraint(), autogen_context, True)] + + +@renderers.dispatch_for(ops.CreateForeignKeyOp) +def _add_fk_constraint( + autogen_context: AutogenContext, op: ops.CreateForeignKeyOp +) -> str: + constraint = op.to_constraint() + args = [repr(_render_gen_name(autogen_context, op.constraint_name))] + if not autogen_context._has_batch: + args.append(repr(_ident(op.source_table))) + + args.extend( + [ + repr(_ident(op.referent_table)), + repr([_ident(col) for col in op.local_cols]), + repr([_ident(col) for col in op.remote_cols]), + ] + ) + kwargs = [ + "referent_schema", + "onupdate", + "ondelete", + "initially", + "deferrable", + "use_alter", + "match", + ] + if not autogen_context._has_batch: + kwargs.insert(0, "source_schema") + + for k in kwargs: + if k in op.kw: + value = op.kw[k] + if value is not None: + args.append("%s=%r" % (k, value)) + + dialect_kwargs = _render_dialect_kwargs_items( + autogen_context, constraint.dialect_kwargs + ) + + return "%(prefix)screate_foreign_key(%(args)s%(dialect_kwargs)s)" % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "args": ", ".join(args), + "dialect_kwargs": ( + ", " + ", ".join(dialect_kwargs) if dialect_kwargs else "" + ), + } + + +@renderers.dispatch_for(ops.CreatePrimaryKeyOp) +def _add_pk_constraint(constraint, autogen_context): + raise NotImplementedError() + + +@renderers.dispatch_for(ops.CreateCheckConstraintOp) +def _add_check_constraint(constraint, autogen_context): + raise NotImplementedError() + + +@renderers.dispatch_for(ops.DropConstraintOp) +def _drop_constraint( + autogen_context: AutogenContext, op: ops.DropConstraintOp +) -> str: + prefix = _alembic_autogenerate_prefix(autogen_context) + name = _render_gen_name(autogen_context, op.constraint_name) + schema = _ident(op.schema) if op.schema else None + type_ = _ident(op.constraint_type) if op.constraint_type else None + if_exists = op.if_exists + params_strs = [] + params_strs.append(repr(name)) + if not autogen_context._has_batch: + params_strs.append(repr(_ident(op.table_name))) + if schema is not None: + params_strs.append(f"schema={schema!r}") + if type_ is not None: + params_strs.append(f"type_={type_!r}") + if if_exists is not None: + params_strs.append(f"if_exists={if_exists}") + + return f"{prefix}drop_constraint({', '.join(params_strs)})" + + +@renderers.dispatch_for(ops.AddColumnOp) +def _add_column(autogen_context: AutogenContext, op: ops.AddColumnOp) -> str: + schema, tname, column, if_not_exists = ( + op.schema, + op.table_name, + op.column, + op.if_not_exists, + ) + if autogen_context._has_batch: + template = "%(prefix)sadd_column(%(column)s)" + else: + template = "%(prefix)sadd_column(%(tname)r, %(column)s" + if schema: + template += ", schema=%(schema)r" + if if_not_exists is not None: + template += ", if_not_exists=%(if_not_exists)r" + template += ")" + text = template % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": tname, + "column": _render_column(column, autogen_context), + "schema": schema, + "if_not_exists": if_not_exists, + } + return text + + +@renderers.dispatch_for(ops.DropColumnOp) +def _drop_column(autogen_context: AutogenContext, op: ops.DropColumnOp) -> str: + schema, tname, column_name, if_exists = ( + op.schema, + op.table_name, + op.column_name, + op.if_exists, + ) + + if autogen_context._has_batch: + template = "%(prefix)sdrop_column(%(cname)r)" + else: + template = "%(prefix)sdrop_column(%(tname)r, %(cname)r" + if schema: + template += ", schema=%(schema)r" + if if_exists is not None: + template += ", if_exists=%(if_exists)r" + template += ")" + + text = template % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": _ident(tname), + "cname": _ident(column_name), + "schema": _ident(schema), + "if_exists": if_exists, + } + return text + + +@renderers.dispatch_for(ops.AlterColumnOp) +def _alter_column( + autogen_context: AutogenContext, op: ops.AlterColumnOp +) -> str: + tname = op.table_name + cname = op.column_name + server_default = op.modify_server_default + type_ = op.modify_type + nullable = op.modify_nullable + comment = op.modify_comment + newname = op.modify_name + autoincrement = op.kw.get("autoincrement", None) + existing_type = op.existing_type + existing_nullable = op.existing_nullable + existing_comment = op.existing_comment + existing_server_default = op.existing_server_default + schema = op.schema + + indent = " " * 11 + + if autogen_context._has_batch: + template = "%(prefix)salter_column(%(cname)r" + else: + template = "%(prefix)salter_column(%(tname)r, %(cname)r" + + text = template % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": tname, + "cname": cname, + } + if existing_type is not None: + text += ",\n%sexisting_type=%s" % ( + indent, + _repr_type(existing_type, autogen_context), + ) + if server_default is not False: + rendered = _render_server_default(server_default, autogen_context) + text += ",\n%sserver_default=%s" % (indent, rendered) + + if newname is not None: + text += ",\n%snew_column_name=%r" % (indent, newname) + if type_ is not None: + text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context)) + if nullable is not None: + text += ",\n%snullable=%r" % (indent, nullable) + if comment is not False: + text += ",\n%scomment=%r" % (indent, comment) + if existing_comment is not None: + text += ",\n%sexisting_comment=%r" % (indent, existing_comment) + if nullable is None and existing_nullable is not None: + text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable) + if autoincrement is not None: + text += ",\n%sautoincrement=%r" % (indent, autoincrement) + if server_default is False and existing_server_default: + rendered = _render_server_default( + existing_server_default, autogen_context + ) + text += ",\n%sexisting_server_default=%s" % (indent, rendered) + if schema and not autogen_context._has_batch: + text += ",\n%sschema=%r" % (indent, schema) + text += ")" + return text + + +class _f_name: + def __init__(self, prefix: str, name: conv) -> None: + self.prefix = prefix + self.name = name + + def __repr__(self) -> str: + return "%sf(%r)" % (self.prefix, _ident(self.name)) + + +def _ident(name: Optional[Union[quoted_name, str]]) -> Optional[str]: + """produce a __repr__() object for a string identifier that may + use quoted_name() in SQLAlchemy 0.9 and greater. + + The issue worked around here is that quoted_name() doesn't have + very good repr() behavior by itself when unicode is involved. + + """ + if name is None: + return name + elif isinstance(name, quoted_name): + return str(name) + elif isinstance(name, str): + return name + + +def _render_potential_expr( + value: Any, + autogen_context: AutogenContext, + *, + wrap_in_element: bool = True, + is_server_default: bool = False, + is_index: bool = False, +) -> str: + if isinstance(value, sql.ClauseElement): + sql_text = autogen_context.migration_context.impl.render_ddl_sql_expr( + value, is_server_default=is_server_default, is_index=is_index + ) + if wrap_in_element: + prefix = _sqlalchemy_autogenerate_prefix(autogen_context) + element = "literal_column" if is_index else "text" + value_str = f"{prefix}{element}({sql_text!r})" + if ( + is_index + and isinstance(value, Label) + and type(value.name) is str + ): + return value_str + f".label({value.name!r})" + else: + return value_str + else: + return repr(sql_text) + else: + return repr(value) + + +def _get_index_rendered_expressions( + idx: Index, autogen_context: AutogenContext +) -> List[str]: + return [ + ( + repr(_ident(getattr(exp, "name", None))) + if isinstance(exp, sa_schema.Column) + else _render_potential_expr(exp, autogen_context, is_index=True) + ) + for exp in idx.expressions + ] + + +def _uq_constraint( + constraint: UniqueConstraint, + autogen_context: AutogenContext, + alter: bool, +) -> str: + opts: List[Tuple[str, Any]] = [] + + has_batch = autogen_context._has_batch + + if constraint.deferrable: + opts.append(("deferrable", constraint.deferrable)) + if constraint.initially: + opts.append(("initially", constraint.initially)) + if not has_batch and alter and constraint.table.schema: + opts.append(("schema", _ident(constraint.table.schema))) + if not alter and constraint.name: + opts.append( + ("name", _render_gen_name(autogen_context, constraint.name)) + ) + dialect_options = _render_dialect_kwargs_items( + autogen_context, constraint.dialect_kwargs + ) + + if alter: + args = [repr(_render_gen_name(autogen_context, constraint.name))] + if not has_batch: + args += [repr(_ident(constraint.table.name))] + args.append(repr([_ident(col.name) for col in constraint.columns])) + args.extend(["%s=%r" % (k, v) for k, v in opts]) + args.extend(dialect_options) + return "%(prefix)screate_unique_constraint(%(args)s)" % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "args": ", ".join(args), + } + else: + args = [repr(_ident(col.name)) for col in constraint.columns] + args.extend(["%s=%r" % (k, v) for k, v in opts]) + args.extend(dialect_options) + return "%(prefix)sUniqueConstraint(%(args)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "args": ", ".join(args), + } + + +def _user_autogenerate_prefix(autogen_context, target): + prefix = autogen_context.opts["user_module_prefix"] + if prefix is None: + return "%s." % target.__module__ + else: + return prefix + + +def _sqlalchemy_autogenerate_prefix(autogen_context: AutogenContext) -> str: + return autogen_context.opts["sqlalchemy_module_prefix"] or "" + + +def _alembic_autogenerate_prefix(autogen_context: AutogenContext) -> str: + if autogen_context._has_batch: + return "batch_op." + else: + return autogen_context.opts["alembic_module_prefix"] or "" + + +def _user_defined_render( + type_: str, object_: Any, autogen_context: AutogenContext +) -> Union[str, Literal[False]]: + if "render_item" in autogen_context.opts: + render = autogen_context.opts["render_item"] + if render: + rendered = render(type_, object_, autogen_context) + if rendered is not False: + return rendered + return False + + +def _render_column( + column: Column[Any], autogen_context: AutogenContext +) -> str: + rendered = _user_defined_render("column", column, autogen_context) + if rendered is not False: + return rendered + + args: List[str] = [] + opts: List[Tuple[str, Any]] = [] + + if column.server_default: + rendered = _render_server_default( # type:ignore[assignment] + column.server_default, autogen_context + ) + if rendered: + if _should_render_server_default_positionally( + column.server_default + ): + args.append(rendered) + else: + opts.append(("server_default", rendered)) + + if ( + column.autoincrement is not None + and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT + ): + opts.append(("autoincrement", column.autoincrement)) + + if column.nullable is not None: + opts.append(("nullable", column.nullable)) + + if column.system: + opts.append(("system", column.system)) + + comment = column.comment + if comment: + opts.append(("comment", "%r" % comment)) + + # TODO: for non-ascii colname, assign a "key" + return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "name": _ident(column.name), + "type": _repr_type(column.type, autogen_context), + "args": ", ".join([str(arg) for arg in args]) + ", " if args else "", + "kwargs": ( + ", ".join( + ["%s=%s" % (kwname, val) for kwname, val in opts] + + [ + "%s=%s" + % (key, _render_potential_expr(val, autogen_context)) + for key, val in column.kwargs.items() + ] + ) + ), + } + + +def _should_render_server_default_positionally(server_default: Any) -> bool: + return sqla_compat._server_default_is_computed( + server_default + ) or sqla_compat._server_default_is_identity(server_default) + + +def _render_server_default( + default: Optional[ + Union[FetchedValue, str, TextClause, ColumnElement[Any]] + ], + autogen_context: AutogenContext, + repr_: bool = True, +) -> Optional[str]: + rendered = _user_defined_render("server_default", default, autogen_context) + if rendered is not False: + return rendered + + if sqla_compat._server_default_is_computed(default): + return _render_computed(cast("Computed", default), autogen_context) + elif sqla_compat._server_default_is_identity(default): + return _render_identity(cast("Identity", default), autogen_context) + elif isinstance(default, sa_schema.DefaultClause): + if isinstance(default.arg, str): + default = default.arg + else: + return _render_potential_expr( + default.arg, autogen_context, is_server_default=True + ) + elif isinstance(default, sa_schema.FetchedValue): + return _render_fetched_value(autogen_context) + + if isinstance(default, str) and repr_: + default = repr(re.sub(r"^'|'$", "", default)) + + return cast(str, default) + + +def _render_computed( + computed: Computed, autogen_context: AutogenContext +) -> str: + text = _render_potential_expr( + computed.sqltext, autogen_context, wrap_in_element=False + ) + + kwargs = {} + if computed.persisted is not None: + kwargs["persisted"] = computed.persisted + return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "text": text, + "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())), + } + + +def _render_identity( + identity: Identity, autogen_context: AutogenContext +) -> str: + kwargs = sqla_compat._get_identity_options_dict( + identity, dialect_kwargs=True + ) + + return "%(prefix)sIdentity(%(kwargs)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())), + } + + +def _render_fetched_value(autogen_context: AutogenContext) -> str: + return "%(prefix)sFetchedValue()" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + } + + +def _repr_type( + type_: TypeEngine, + autogen_context: AutogenContext, + _skip_variants: bool = False, +) -> str: + rendered = _user_defined_render("type", type_, autogen_context) + if rendered is not False: + return rendered + + if hasattr(autogen_context.migration_context, "impl"): + impl_rt = autogen_context.migration_context.impl.render_type( + type_, autogen_context + ) + else: + impl_rt = None + + mod = type(type_).__module__ + imports = autogen_context.imports + + if not _skip_variants and sqla_compat._type_has_variants(type_): + return _render_Variant_type(type_, autogen_context) + elif mod.startswith("sqlalchemy.dialects"): + match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod) + assert match is not None + dname = match.group(1) + if imports is not None: + imports.add("from sqlalchemy.dialects import %s" % dname) + if impl_rt: + return impl_rt + else: + return "%s.%r" % (dname, type_) + elif impl_rt: + return impl_rt + elif mod.startswith("sqlalchemy."): + if "_render_%s_type" % type_.__visit_name__ in globals(): + fn = globals()["_render_%s_type" % type_.__visit_name__] + return fn(type_, autogen_context) + else: + prefix = _sqlalchemy_autogenerate_prefix(autogen_context) + return "%s%r" % (prefix, type_) + else: + prefix = _user_autogenerate_prefix(autogen_context, type_) + return "%s%r" % (prefix, type_) + + +def _render_ARRAY_type(type_: ARRAY, autogen_context: AutogenContext) -> str: + return cast( + str, + _render_type_w_subtype( + type_, autogen_context, "item_type", r"(.+?\()" + ), + ) + + +def _render_Variant_type( + type_: TypeEngine, autogen_context: AutogenContext +) -> str: + base_type, variant_mapping = sqla_compat._get_variant_mapping(type_) + base = _repr_type(base_type, autogen_context, _skip_variants=True) + assert base is not None and base is not False # type: ignore[comparison-overlap] # noqa:E501 + for dialect in sorted(variant_mapping): + typ = variant_mapping[dialect] + base += ".with_variant(%s, %r)" % ( + _repr_type(typ, autogen_context, _skip_variants=True), + dialect, + ) + return base + + +def _render_type_w_subtype( + type_: TypeEngine, + autogen_context: AutogenContext, + attrname: str, + regexp: str, + prefix: Optional[str] = None, +) -> Union[Optional[str], Literal[False]]: + outer_repr = repr(type_) + inner_type = getattr(type_, attrname, None) + if inner_type is None: + return False + + inner_repr = repr(inner_type) + + inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr) + sub_type = _repr_type(getattr(type_, attrname), autogen_context) + outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr) + + if prefix: + return "%s%s" % (prefix, outer_type) + + mod = type(type_).__module__ + if mod.startswith("sqlalchemy.dialects"): + match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod) + assert match is not None + dname = match.group(1) + return "%s.%s" % (dname, outer_type) + elif mod.startswith("sqlalchemy"): + prefix = _sqlalchemy_autogenerate_prefix(autogen_context) + return "%s%s" % (prefix, outer_type) + else: + return None + + +_constraint_renderers = util.Dispatcher() + + +def _render_constraint( + constraint: Constraint, + autogen_context: AutogenContext, + namespace_metadata: Optional[MetaData], +) -> Optional[str]: + try: + renderer = _constraint_renderers.dispatch(constraint) + except ValueError: + util.warn("No renderer is established for object %r" % constraint) + return "[Unknown Python object %r]" % constraint + else: + return renderer(constraint, autogen_context, namespace_metadata) + + +@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint) +def _render_primary_key( + constraint: PrimaryKeyConstraint, + autogen_context: AutogenContext, + namespace_metadata: Optional[MetaData], +) -> Optional[str]: + rendered = _user_defined_render("primary_key", constraint, autogen_context) + if rendered is not False: + return rendered + + if not constraint.columns: + return None + + opts = [] + if constraint.name: + opts.append( + ("name", repr(_render_gen_name(autogen_context, constraint.name))) + ) + return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "args": ", ".join( + [repr(c.name) for c in constraint.columns] + + ["%s=%s" % (kwname, val) for kwname, val in opts] + ), + } + + +def _fk_colspec( + fk: ForeignKey, + metadata_schema: Optional[str], + namespace_metadata: Optional[MetaData], +) -> str: + """Implement a 'safe' version of ForeignKey._get_colspec() that + won't fail if the remote table can't be resolved. + + """ + colspec = fk._get_colspec() + tokens = colspec.split(".") + tname, colname = tokens[-2:] + + if metadata_schema is not None and len(tokens) == 2: + table_fullname = "%s.%s" % (metadata_schema, tname) + else: + table_fullname = ".".join(tokens[0:-1]) + + if ( + not fk.link_to_name + and fk.parent is not None + and fk.parent.table is not None + ): + # try to resolve the remote table in order to adjust for column.key. + # the FK constraint needs to be rendered in terms of the column + # name. + + if ( + namespace_metadata is not None + and table_fullname in namespace_metadata.tables + ): + col = namespace_metadata.tables[table_fullname].c.get(colname) + if col is not None: + colname = _ident(col.name) # type: ignore[assignment] + + colspec = "%s.%s" % (table_fullname, colname) + + return colspec + + +def _populate_render_fk_opts( + constraint: ForeignKeyConstraint, opts: List[Tuple[str, str]] +) -> None: + if constraint.onupdate: + opts.append(("onupdate", repr(constraint.onupdate))) + if constraint.ondelete: + opts.append(("ondelete", repr(constraint.ondelete))) + if constraint.initially: + opts.append(("initially", repr(constraint.initially))) + if constraint.deferrable: + opts.append(("deferrable", repr(constraint.deferrable))) + if constraint.use_alter: + opts.append(("use_alter", repr(constraint.use_alter))) + if constraint.match: + opts.append(("match", repr(constraint.match))) + + +@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint) +def _render_foreign_key( + constraint: ForeignKeyConstraint, + autogen_context: AutogenContext, + namespace_metadata: Optional[MetaData], +) -> Optional[str]: + rendered = _user_defined_render("foreign_key", constraint, autogen_context) + if rendered is not False: + return rendered + + opts = [] + if constraint.name: + opts.append( + ("name", repr(_render_gen_name(autogen_context, constraint.name))) + ) + + _populate_render_fk_opts(constraint, opts) + + apply_metadata_schema = ( + namespace_metadata.schema if namespace_metadata is not None else None + ) + return ( + "%(prefix)sForeignKeyConstraint([%(cols)s], " + "[%(refcols)s], %(args)s)" + % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "cols": ", ".join( + repr(_ident(f.parent.name)) for f in constraint.elements + ), + "refcols": ", ".join( + repr(_fk_colspec(f, apply_metadata_schema, namespace_metadata)) + for f in constraint.elements + ), + "args": ", ".join( + ["%s=%s" % (kwname, val) for kwname, val in opts] + ), + } + ) + + +@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint) +def _render_unique_constraint( + constraint: UniqueConstraint, + autogen_context: AutogenContext, + namespace_metadata: Optional[MetaData], +) -> str: + rendered = _user_defined_render("unique", constraint, autogen_context) + if rendered is not False: + return rendered + + return _uq_constraint(constraint, autogen_context, False) + + +@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint) +def _render_check_constraint( + constraint: CheckConstraint, + autogen_context: AutogenContext, + namespace_metadata: Optional[MetaData], +) -> Optional[str]: + rendered = _user_defined_render("check", constraint, autogen_context) + if rendered is not False: + return rendered + + # detect the constraint being part of + # a parent type which is probably in the Table already. + # ideally SQLAlchemy would give us more of a first class + # way to detect this. + if ( + constraint._create_rule + and hasattr(constraint._create_rule, "target") + and isinstance( + constraint._create_rule.target, + sqltypes.TypeEngine, + ) + ): + return None + opts = [] + if constraint.name: + opts.append( + ("name", repr(_render_gen_name(autogen_context, constraint.name))) + ) + return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "opts": ( + ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) + if opts + else "" + ), + "sqltext": _render_potential_expr( + constraint.sqltext, autogen_context, wrap_in_element=False + ), + } + + +@renderers.dispatch_for(ops.ExecuteSQLOp) +def _execute_sql(autogen_context: AutogenContext, op: ops.ExecuteSQLOp) -> str: + if not isinstance(op.sqltext, str): + raise NotImplementedError( + "Autogenerate rendering of SQL Expression language constructs " + "not supported here; please use a plain SQL string" + ) + return "{prefix}execute({sqltext!r})".format( + prefix=_alembic_autogenerate_prefix(autogen_context), + sqltext=op.sqltext, + ) + + +renderers = default_renderers.branch() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/rewriter.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..1d44b5c340b7efc63302e7a78e4ac84d6fe1d4fe --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/autogenerate/rewriter.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Iterator +from typing import List +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from .. import util +from ..operations import ops + +if TYPE_CHECKING: + from ..operations.ops import AddColumnOp + from ..operations.ops import AlterColumnOp + from ..operations.ops import CreateTableOp + from ..operations.ops import DowngradeOps + from ..operations.ops import MigrateOperation + from ..operations.ops import MigrationScript + from ..operations.ops import ModifyTableOps + from ..operations.ops import OpContainer + from ..operations.ops import UpgradeOps + from ..runtime.migration import MigrationContext + from ..script.revision import _GetRevArg + +ProcessRevisionDirectiveFn = Callable[ + ["MigrationContext", "_GetRevArg", List["MigrationScript"]], None +] + + +class Rewriter: + """A helper object that allows easy 'rewriting' of ops streams. + + The :class:`.Rewriter` object is intended to be passed along + to the + :paramref:`.EnvironmentContext.configure.process_revision_directives` + parameter in an ``env.py`` script. Once constructed, any number + of "rewrites" functions can be associated with it, which will be given + the opportunity to modify the structure without having to have explicit + knowledge of the overall structure. + + The function is passed the :class:`.MigrationContext` object and + ``revision`` tuple that are passed to the :paramref:`.Environment + Context.configure.process_revision_directives` function normally, + and the third argument is an individual directive of the type + noted in the decorator. The function has the choice of returning + a single op directive, which normally can be the directive that + was actually passed, or a new directive to replace it, or a list + of zero or more directives to replace it. + + .. seealso:: + + :ref:`autogen_rewriter` - usage example + + """ + + _traverse = util.Dispatcher() + + _chained: Tuple[Union[ProcessRevisionDirectiveFn, Rewriter], ...] = () + + def __init__(self) -> None: + self.dispatch = util.Dispatcher() + + def chain( + self, + other: Union[ + ProcessRevisionDirectiveFn, + Rewriter, + ], + ) -> Rewriter: + """Produce a "chain" of this :class:`.Rewriter` to another. + + This allows two or more rewriters to operate serially on a stream, + e.g.:: + + writer1 = autogenerate.Rewriter() + writer2 = autogenerate.Rewriter() + + + @writer1.rewrites(ops.AddColumnOp) + def add_column_nullable(context, revision, op): + op.column.nullable = True + return op + + + @writer2.rewrites(ops.AddColumnOp) + def add_column_idx(context, revision, op): + idx_op = ops.CreateIndexOp( + "ixc", op.table_name, [op.column.name] + ) + return [op, idx_op] + + writer = writer1.chain(writer2) + + :param other: a :class:`.Rewriter` instance + :return: a new :class:`.Rewriter` that will run the operations + of this writer, then the "other" writer, in succession. + + """ + wr = self.__class__.__new__(self.__class__) + wr.__dict__.update(self.__dict__) + wr._chained += (other,) + return wr + + def rewrites( + self, + operator: Union[ + Type[AddColumnOp], + Type[MigrateOperation], + Type[AlterColumnOp], + Type[CreateTableOp], + Type[ModifyTableOps], + ], + ) -> Callable[..., Any]: + """Register a function as rewriter for a given type. + + The function should receive three arguments, which are + the :class:`.MigrationContext`, a ``revision`` tuple, and + an op directive of the type indicated. E.g.:: + + @writer1.rewrites(ops.AddColumnOp) + def add_column_nullable(context, revision, op): + op.column.nullable = True + return op + + """ + return self.dispatch.dispatch_for(operator) + + def _rewrite( + self, + context: MigrationContext, + revision: _GetRevArg, + directive: MigrateOperation, + ) -> Iterator[MigrateOperation]: + try: + _rewriter = self.dispatch.dispatch(directive) + except ValueError: + _rewriter = None + yield directive + else: + if self in directive._mutations: + yield directive + else: + for r_directive in util.to_list( + _rewriter(context, revision, directive), [] + ): + r_directive._mutations = r_directive._mutations.union( + [self] + ) + yield r_directive + + def __call__( + self, + context: MigrationContext, + revision: _GetRevArg, + directives: List[MigrationScript], + ) -> None: + self.process_revision_directives(context, revision, directives) + for process_revision_directives in self._chained: + process_revision_directives(context, revision, directives) + + @_traverse.dispatch_for(ops.MigrationScript) + def _traverse_script( + self, + context: MigrationContext, + revision: _GetRevArg, + directive: MigrationScript, + ) -> None: + upgrade_ops_list: List[UpgradeOps] = [] + for upgrade_ops in directive.upgrade_ops_list: + ret = self._traverse_for(context, revision, upgrade_ops) + if len(ret) != 1: + raise ValueError( + "Can only return single object for UpgradeOps traverse" + ) + upgrade_ops_list.append(ret[0]) + + directive.upgrade_ops = upgrade_ops_list + + downgrade_ops_list: List[DowngradeOps] = [] + for downgrade_ops in directive.downgrade_ops_list: + ret = self._traverse_for(context, revision, downgrade_ops) + if len(ret) != 1: + raise ValueError( + "Can only return single object for DowngradeOps traverse" + ) + downgrade_ops_list.append(ret[0]) + directive.downgrade_ops = downgrade_ops_list + + @_traverse.dispatch_for(ops.OpContainer) + def _traverse_op_container( + self, + context: MigrationContext, + revision: _GetRevArg, + directive: OpContainer, + ) -> None: + self._traverse_list(context, revision, directive.ops) + + @_traverse.dispatch_for(ops.MigrateOperation) + def _traverse_any_directive( + self, + context: MigrationContext, + revision: _GetRevArg, + directive: MigrateOperation, + ) -> None: + pass + + def _traverse_for( + self, + context: MigrationContext, + revision: _GetRevArg, + directive: MigrateOperation, + ) -> Any: + directives = list(self._rewrite(context, revision, directive)) + for directive in directives: + traverser = self._traverse.dispatch(directive) + traverser(self, context, revision, directive) + return directives + + def _traverse_list( + self, + context: MigrationContext, + revision: _GetRevArg, + directives: Any, + ) -> None: + dest = [] + for directive in directives: + dest.extend(self._traverse_for(context, revision, directive)) + + directives[:] = dest + + def process_revision_directives( + self, + context: MigrationContext, + revision: _GetRevArg, + directives: List[MigrationScript], + ) -> None: + self._traverse_list(context, revision, directives) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/command.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/command.py new file mode 100644 index 0000000000000000000000000000000000000000..8e4854744ab32cd89f06c870a62a2cc711543c15 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/command.py @@ -0,0 +1,835 @@ +# mypy: allow-untyped-defs, allow-untyped-calls + +from __future__ import annotations + +import os +import pathlib +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from . import autogenerate as autogen +from . import util +from .runtime.environment import EnvironmentContext +from .script import ScriptDirectory +from .util import compat + +if TYPE_CHECKING: + from alembic.config import Config + from alembic.script.base import Script + from alembic.script.revision import _RevIdType + from .runtime.environment import ProcessRevisionDirectiveFn + + +def list_templates(config: Config) -> None: + """List available templates. + + :param config: a :class:`.Config` object. + + """ + + config.print_stdout("Available templates:\n") + for tempname in config._get_template_path().iterdir(): + with (tempname / "README").open() as readme: + synopsis = next(readme).rstrip() + config.print_stdout("%s - %s", tempname.name, synopsis) + + config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") + config.print_stdout("\n alembic init --template generic ./scripts") + + +def init( + config: Config, + directory: str, + template: str = "generic", + package: bool = False, +) -> None: + """Initialize a new scripts directory. + + :param config: a :class:`.Config` object. + + :param directory: string path of the target directory. + + :param template: string name of the migration environment template to + use. + + :param package: when True, write ``__init__.py`` files into the + environment location as well as the versions/ location. + + """ + + directory_path = pathlib.Path(directory) + if directory_path.exists() and list(directory_path.iterdir()): + raise util.CommandError( + "Directory %s already exists and is not empty" % directory_path + ) + + template_path = config._get_template_path() / template + + if not template_path.exists(): + raise util.CommandError(f"No such template {template_path}") + + # left as os.access() to suit unit test mocking + if not os.access(directory_path, os.F_OK): + with util.status( + f"Creating directory {directory_path.absolute()}", + **config.messaging_opts, + ): + os.makedirs(directory_path) + + versions = directory_path / "versions" + with util.status( + f"Creating directory {versions.absolute()}", + **config.messaging_opts, + ): + os.makedirs(versions) + + if not directory_path.is_absolute(): + # for non-absolute path, state config file in .ini / pyproject + # as relative to the %(here)s token, which is where the config + # file itself would be + + if config._config_file_path is not None: + rel_dir = compat.path_relative_to( + directory_path.absolute(), + config._config_file_path.absolute().parent, + walk_up=True, + ) + ini_script_location_directory = ("%(here)s" / rel_dir).as_posix() + if config._toml_file_path is not None: + rel_dir = compat.path_relative_to( + directory_path.absolute(), + config._toml_file_path.absolute().parent, + walk_up=True, + ) + toml_script_location_directory = ("%(here)s" / rel_dir).as_posix() + + else: + ini_script_location_directory = directory_path.as_posix() + toml_script_location_directory = directory_path.as_posix() + + script = ScriptDirectory(directory_path) + + has_toml = False + + config_file: pathlib.Path | None = None + + for file_path in template_path.iterdir(): + file_ = file_path.name + if file_ == "alembic.ini.mako": + assert config.config_file_name is not None + config_file = pathlib.Path(config.config_file_name).absolute() + if config_file.exists(): + util.msg( + f"File {config_file} already exists, skipping", + **config.messaging_opts, + ) + else: + script._generate_template( + file_path, + config_file, + script_location=ini_script_location_directory, + ) + elif file_ == "pyproject.toml.mako": + has_toml = True + assert config._toml_file_path is not None + toml_path = config._toml_file_path.absolute() + + if toml_path.exists(): + # left as open() to suit unit test mocking + with open(toml_path, "rb") as f: + toml_data = compat.tomllib.load(f) + if "tool" in toml_data and "alembic" in toml_data["tool"]: + + util.msg( + f"File {toml_path} already exists " + "and already has a [tool.alembic] section, " + "skipping", + ) + continue + script._append_template( + file_path, + toml_path, + script_location=toml_script_location_directory, + ) + else: + script._generate_template( + file_path, + toml_path, + script_location=toml_script_location_directory, + ) + + elif file_path.is_file(): + output_file = directory_path / file_ + script._copy_file(file_path, output_file) + + if package: + for path in [ + directory_path.absolute() / "__init__.py", + versions.absolute() / "__init__.py", + ]: + with util.status(f"Adding {path!s}", **config.messaging_opts): + # left as open() to suit unit test mocking + with open(path, "w"): + pass + + assert config_file is not None + + if has_toml: + util.msg( + f"Please edit configuration settings in {toml_path} and " + "configuration/connection/logging " + f"settings in {config_file} before proceeding.", + **config.messaging_opts, + ) + else: + util.msg( + "Please edit configuration/connection/logging " + f"settings in {config_file} before proceeding.", + **config.messaging_opts, + ) + + +def revision( + config: Config, + message: Optional[str] = None, + autogenerate: bool = False, + sql: bool = False, + head: str = "head", + splice: bool = False, + branch_label: Optional[_RevIdType] = None, + version_path: Union[str, os.PathLike[str], None] = None, + rev_id: Optional[str] = None, + depends_on: Optional[str] = None, + process_revision_directives: Optional[ProcessRevisionDirectiveFn] = None, +) -> Union[Optional[Script], List[Optional[Script]]]: + """Create a new revision file. + + :param config: a :class:`.Config` object. + + :param message: string message to apply to the revision; this is the + ``-m`` option to ``alembic revision``. + + :param autogenerate: whether or not to autogenerate the script from + the database; this is the ``--autogenerate`` option to + ``alembic revision``. + + :param sql: whether to dump the script out as a SQL string; when specified, + the script is dumped to stdout. This is the ``--sql`` option to + ``alembic revision``. + + :param head: head revision to build the new revision upon as a parent; + this is the ``--head`` option to ``alembic revision``. + + :param splice: whether or not the new revision should be made into a + new head of its own; is required when the given ``head`` is not itself + a head. This is the ``--splice`` option to ``alembic revision``. + + :param branch_label: string label to apply to the branch; this is the + ``--branch-label`` option to ``alembic revision``. + + :param version_path: string symbol identifying a specific version path + from the configuration; this is the ``--version-path`` option to + ``alembic revision``. + + :param rev_id: optional revision identifier to use instead of having + one generated; this is the ``--rev-id`` option to ``alembic revision``. + + :param depends_on: optional list of "depends on" identifiers; this is the + ``--depends-on`` option to ``alembic revision``. + + :param process_revision_directives: this is a callable that takes the + same form as the callable described at + :paramref:`.EnvironmentContext.configure.process_revision_directives`; + will be applied to the structure generated by the revision process + where it can be altered programmatically. Note that unlike all + the other parameters, this option is only available via programmatic + use of :func:`.command.revision`. + + """ + + script_directory = ScriptDirectory.from_config(config) + + command_args = dict( + message=message, + autogenerate=autogenerate, + sql=sql, + head=head, + splice=splice, + branch_label=branch_label, + version_path=version_path, + rev_id=rev_id, + depends_on=depends_on, + ) + revision_context = autogen.RevisionContext( + config, + script_directory, + command_args, + process_revision_directives=process_revision_directives, + ) + + environment = util.asbool( + config.get_alembic_option("revision_environment") + ) + + if autogenerate: + environment = True + + if sql: + raise util.CommandError( + "Using --sql with --autogenerate does not make any sense" + ) + + def retrieve_migrations(rev, context): + revision_context.run_autogenerate(rev, context) + return [] + + elif environment: + + def retrieve_migrations(rev, context): + revision_context.run_no_autogenerate(rev, context) + return [] + + elif sql: + raise util.CommandError( + "Using --sql with the revision command when " + "revision_environment is not configured does not make any sense" + ) + + if environment: + with EnvironmentContext( + config, + script_directory, + fn=retrieve_migrations, + as_sql=sql, + template_args=revision_context.template_args, + revision_context=revision_context, + ): + script_directory.run_env() + + # the revision_context now has MigrationScript structure(s) present. + # these could theoretically be further processed / rewritten *here*, + # in addition to the hooks present within each run_migrations() call, + # or at the end of env.py run_migrations_online(). + + scripts = [script for script in revision_context.generate_scripts()] + if len(scripts) == 1: + return scripts[0] + else: + return scripts + + +def check(config: "Config") -> None: + """Check if revision command with autogenerate has pending upgrade ops. + + :param config: a :class:`.Config` object. + + .. versionadded:: 1.9.0 + + """ + + script_directory = ScriptDirectory.from_config(config) + + command_args = dict( + message=None, + autogenerate=True, + sql=False, + head="head", + splice=False, + branch_label=None, + version_path=None, + rev_id=None, + depends_on=None, + ) + revision_context = autogen.RevisionContext( + config, + script_directory, + command_args, + ) + + def retrieve_migrations(rev, context): + revision_context.run_autogenerate(rev, context) + return [] + + with EnvironmentContext( + config, + script_directory, + fn=retrieve_migrations, + as_sql=False, + template_args=revision_context.template_args, + revision_context=revision_context, + ): + script_directory.run_env() + + # the revision_context now has MigrationScript structure(s) present. + + migration_script = revision_context.generated_revisions[-1] + diffs = [] + for upgrade_ops in migration_script.upgrade_ops_list: + diffs.extend(upgrade_ops.as_diffs()) + + if diffs: + raise util.AutogenerateDiffsDetected( + f"New upgrade operations detected: {diffs}", + revision_context=revision_context, + diffs=diffs, + ) + else: + config.print_stdout("No new upgrade operations detected.") + + +def merge( + config: Config, + revisions: _RevIdType, + message: Optional[str] = None, + branch_label: Optional[_RevIdType] = None, + rev_id: Optional[str] = None, +) -> Optional[Script]: + """Merge two revisions together. Creates a new migration file. + + :param config: a :class:`.Config` instance + + :param revisions: The revisions to merge. + + :param message: string message to apply to the revision. + + :param branch_label: string label name to apply to the new revision. + + :param rev_id: hardcoded revision identifier instead of generating a new + one. + + .. seealso:: + + :ref:`branches` + + """ + + script = ScriptDirectory.from_config(config) + template_args = { + "config": config # Let templates use config for + # e.g. multiple databases + } + + environment = util.asbool( + config.get_alembic_option("revision_environment") + ) + + if environment: + + def nothing(rev, context): + return [] + + with EnvironmentContext( + config, + script, + fn=nothing, + as_sql=False, + template_args=template_args, + ): + script.run_env() + + return script.generate_revision( + rev_id or util.rev_id(), + message, + refresh=True, + head=revisions, + branch_labels=branch_label, + **template_args, # type:ignore[arg-type] + ) + + +def upgrade( + config: Config, + revision: str, + sql: bool = False, + tag: Optional[str] = None, +) -> None: + """Upgrade to a later version. + + :param config: a :class:`.Config` instance. + + :param revision: string revision target or range for --sql mode. May be + ``"heads"`` to target the most recent revision(s). + + :param sql: if True, use ``--sql`` mode. + + :param tag: an arbitrary "tag" that can be intercepted by custom + ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` + method. + + """ + + script = ScriptDirectory.from_config(config) + + starting_rev = None + if ":" in revision: + if not sql: + raise util.CommandError("Range revision not allowed") + starting_rev, revision = revision.split(":", 2) + + def upgrade(rev, context): + return script._upgrade_revs(revision, rev) + + with EnvironmentContext( + config, + script, + fn=upgrade, + as_sql=sql, + starting_rev=starting_rev, + destination_rev=revision, + tag=tag, + ): + script.run_env() + + +def downgrade( + config: Config, + revision: str, + sql: bool = False, + tag: Optional[str] = None, +) -> None: + """Revert to a previous version. + + :param config: a :class:`.Config` instance. + + :param revision: string revision target or range for --sql mode. May + be ``"base"`` to target the first revision. + + :param sql: if True, use ``--sql`` mode. + + :param tag: an arbitrary "tag" that can be intercepted by custom + ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` + method. + + """ + + script = ScriptDirectory.from_config(config) + starting_rev = None + if ":" in revision: + if not sql: + raise util.CommandError("Range revision not allowed") + starting_rev, revision = revision.split(":", 2) + elif sql: + raise util.CommandError( + "downgrade with --sql requires :" + ) + + def downgrade(rev, context): + return script._downgrade_revs(revision, rev) + + with EnvironmentContext( + config, + script, + fn=downgrade, + as_sql=sql, + starting_rev=starting_rev, + destination_rev=revision, + tag=tag, + ): + script.run_env() + + +def show(config: Config, rev: str) -> None: + """Show the revision(s) denoted by the given symbol. + + :param config: a :class:`.Config` instance. + + :param rev: string revision target. May be ``"current"`` to show the + revision(s) currently applied in the database. + + """ + + script = ScriptDirectory.from_config(config) + + if rev == "current": + + def show_current(rev, context): + for sc in script.get_revisions(rev): + config.print_stdout(sc.log_entry) + return [] + + with EnvironmentContext(config, script, fn=show_current): + script.run_env() + else: + for sc in script.get_revisions(rev): + config.print_stdout(sc.log_entry) + + +def history( + config: Config, + rev_range: Optional[str] = None, + verbose: bool = False, + indicate_current: bool = False, +) -> None: + """List changeset scripts in chronological order. + + :param config: a :class:`.Config` instance. + + :param rev_range: string revision range. + + :param verbose: output in verbose mode. + + :param indicate_current: indicate current revision. + + """ + base: Optional[str] + head: Optional[str] + script = ScriptDirectory.from_config(config) + if rev_range is not None: + if ":" not in rev_range: + raise util.CommandError( + "History range requires [start]:[end], " "[start]:, or :[end]" + ) + base, head = rev_range.strip().split(":") + else: + base = head = None + + environment = ( + util.asbool(config.get_alembic_option("revision_environment")) + or indicate_current + ) + + def _display_history(config, script, base, head, currents=()): + for sc in script.walk_revisions( + base=base or "base", head=head or "heads" + ): + if indicate_current: + sc._db_current_indicator = sc.revision in currents + + config.print_stdout( + sc.cmd_format( + verbose=verbose, + include_branches=True, + include_doc=True, + include_parents=True, + ) + ) + + def _display_history_w_current(config, script, base, head): + def _display_current_history(rev, context): + if head == "current": + _display_history(config, script, base, rev, rev) + elif base == "current": + _display_history(config, script, rev, head, rev) + else: + _display_history(config, script, base, head, rev) + return [] + + with EnvironmentContext(config, script, fn=_display_current_history): + script.run_env() + + if base == "current" or head == "current" or environment: + _display_history_w_current(config, script, base, head) + else: + _display_history(config, script, base, head) + + +def heads( + config: Config, verbose: bool = False, resolve_dependencies: bool = False +) -> None: + """Show current available heads in the script directory. + + :param config: a :class:`.Config` instance. + + :param verbose: output in verbose mode. + + :param resolve_dependencies: treat dependency version as down revisions. + + """ + + script = ScriptDirectory.from_config(config) + if resolve_dependencies: + heads = script.get_revisions("heads") + else: + heads = script.get_revisions(script.get_heads()) + + for rev in heads: + config.print_stdout( + rev.cmd_format( + verbose, include_branches=True, tree_indicators=False + ) + ) + + +def branches(config: Config, verbose: bool = False) -> None: + """Show current branch points. + + :param config: a :class:`.Config` instance. + + :param verbose: output in verbose mode. + + """ + script = ScriptDirectory.from_config(config) + for sc in script.walk_revisions(): + if sc.is_branch_point: + config.print_stdout( + "%s\n%s\n", + sc.cmd_format(verbose, include_branches=True), + "\n".join( + "%s -> %s" + % ( + " " * len(str(sc.revision)), + rev_obj.cmd_format( + False, include_branches=True, include_doc=verbose + ), + ) + for rev_obj in ( + script.get_revision(rev) for rev in sc.nextrev + ) + ), + ) + + +def current(config: Config, verbose: bool = False) -> None: + """Display the current revision for a database. + + :param config: a :class:`.Config` instance. + + :param verbose: output in verbose mode. + + """ + + script = ScriptDirectory.from_config(config) + + def display_version(rev, context): + if verbose: + config.print_stdout( + "Current revision(s) for %s:", + util.obfuscate_url_pw(context.connection.engine.url), + ) + for rev in script.get_all_current(rev): + config.print_stdout(rev.cmd_format(verbose)) + + return [] + + with EnvironmentContext( + config, script, fn=display_version, dont_mutate=True + ): + script.run_env() + + +def stamp( + config: Config, + revision: _RevIdType, + sql: bool = False, + tag: Optional[str] = None, + purge: bool = False, +) -> None: + """'stamp' the revision table with the given revision; don't + run any migrations. + + :param config: a :class:`.Config` instance. + + :param revision: target revision or list of revisions. May be a list + to indicate stamping of multiple branch heads; may be ``"base"`` + to remove all revisions from the table or ``"heads"`` to stamp the + most recent revision(s). + + .. note:: this parameter is called "revisions" in the command line + interface. + + :param sql: use ``--sql`` mode + + :param tag: an arbitrary "tag" that can be intercepted by custom + ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument` + method. + + :param purge: delete all entries in the version table before stamping. + + """ + + script = ScriptDirectory.from_config(config) + + if sql: + destination_revs = [] + starting_rev = None + for _revision in util.to_list(revision): + if ":" in _revision: + srev, _revision = _revision.split(":", 2) + + if starting_rev != srev: + if starting_rev is None: + starting_rev = srev + else: + raise util.CommandError( + "Stamp operation with --sql only supports a " + "single starting revision at a time" + ) + destination_revs.append(_revision) + else: + destination_revs = util.to_list(revision) + + def do_stamp(rev, context): + return script._stamp_revs(util.to_tuple(destination_revs), rev) + + with EnvironmentContext( + config, + script, + fn=do_stamp, + as_sql=sql, + starting_rev=starting_rev if sql else None, + destination_rev=util.to_tuple(destination_revs), + tag=tag, + purge=purge, + ): + script.run_env() + + +def edit(config: Config, rev: str) -> None: + """Edit revision script(s) using $EDITOR. + + :param config: a :class:`.Config` instance. + + :param rev: target revision. + + """ + + script = ScriptDirectory.from_config(config) + + if rev == "current": + + def edit_current(rev, context): + if not rev: + raise util.CommandError("No current revisions") + for sc in script.get_revisions(rev): + util.open_in_editor(sc.path) + return [] + + with EnvironmentContext(config, script, fn=edit_current): + script.run_env() + else: + revs = script.get_revisions(rev) + if not revs: + raise util.CommandError( + "No revision files indicated by symbol '%s'" % rev + ) + for sc in revs: + assert sc + util.open_in_editor(sc.path) + + +def ensure_version(config: Config, sql: bool = False) -> None: + """Create the alembic version table if it doesn't exist already . + + :param config: a :class:`.Config` instance. + + :param sql: use ``--sql`` mode. + + .. versionadded:: 1.7.6 + + """ + + script = ScriptDirectory.from_config(config) + + def do_ensure_version(rev, context): + context._ensure_version_table() + return [] + + with EnvironmentContext( + config, + script, + fn=do_ensure_version, + as_sql=sql, + ): + script.run_env() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/config.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/config.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc64a4cfd9f0543d55a13b985ca16e1b1b132ba --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/config.py @@ -0,0 +1,1020 @@ +from __future__ import annotations + +from argparse import ArgumentParser +from argparse import Namespace +from configparser import ConfigParser +import inspect +import os +from pathlib import Path +import re +import sys +from typing import Any +from typing import cast +from typing import Dict +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Protocol +from typing import Sequence +from typing import TextIO +from typing import Union + +from typing_extensions import TypedDict + +from . import __version__ +from . import command +from . import util +from .util import compat +from .util.pyfiles import _preserving_path_as_str + + +class Config: + r"""Represent an Alembic configuration. + + Within an ``env.py`` script, this is available + via the :attr:`.EnvironmentContext.config` attribute, + which in turn is available at ``alembic.context``:: + + from alembic import context + + some_param = context.config.get_main_option("my option") + + When invoking Alembic programmatically, a new + :class:`.Config` can be created by passing + the name of an .ini file to the constructor:: + + from alembic.config import Config + alembic_cfg = Config("/path/to/yourapp/alembic.ini") + + With a :class:`.Config` object, you can then + run Alembic commands programmatically using the directives + in :mod:`alembic.command`. + + The :class:`.Config` object can also be constructed without + a filename. Values can be set programmatically, and + new sections will be created as needed:: + + from alembic.config import Config + alembic_cfg = Config() + alembic_cfg.set_main_option("script_location", "myapp:migrations") + alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar") + alembic_cfg.set_section_option("mysection", "foo", "bar") + + .. warning:: + + When using programmatic configuration, make sure the + ``env.py`` file in use is compatible with the target configuration; + including that the call to Python ``logging.fileConfig()`` is + omitted if the programmatic configuration doesn't actually include + logging directives. + + For passing non-string values to environments, such as connections and + engines, use the :attr:`.Config.attributes` dictionary:: + + with engine.begin() as connection: + alembic_cfg.attributes['connection'] = connection + command.upgrade(alembic_cfg, "head") + + :param file\_: name of the .ini file to open if an ``alembic.ini`` is + to be used. This should refer to the ``alembic.ini`` file, either as + a filename or a full path to the file. This filename if passed must refer + to an **ini file in ConfigParser format** only. + + :param toml\_file: name of the pyproject.toml file to open if a + ``pyproject.toml`` file is to be used. This should refer to the + ``pyproject.toml`` file, either as a filename or a full path to the file. + This file must be in toml format. Both :paramref:`.Config.file\_` and + :paramref:`.Config.toml\_file` may be passed simultaneously, or + exclusively. + + .. versionadded:: 1.16.0 + + :param ini_section: name of the main Alembic section within the + .ini file + :param output_buffer: optional file-like input buffer which + will be passed to the :class:`.MigrationContext` - used to redirect + the output of "offline generation" when using Alembic programmatically. + :param stdout: buffer where the "print" output of commands will be sent. + Defaults to ``sys.stdout``. + + :param config_args: A dictionary of keys and values that will be used + for substitution in the alembic config file, as well as the pyproject.toml + file, depending on which / both are used. The dictionary as given is + **copied** to two new, independent dictionaries, stored locally under the + attributes ``.config_args`` and ``.toml_args``. Both of these + dictionaries will also be populated with the replacement variable + ``%(here)s``, which refers to the location of the .ini and/or .toml file + as appropriate. + + :param attributes: optional dictionary of arbitrary Python keys/values, + which will be populated into the :attr:`.Config.attributes` dictionary. + + .. seealso:: + + :ref:`connection_sharing` + + """ + + def __init__( + self, + file_: Union[str, os.PathLike[str], None] = None, + toml_file: Union[str, os.PathLike[str], None] = None, + ini_section: str = "alembic", + output_buffer: Optional[TextIO] = None, + stdout: TextIO = sys.stdout, + cmd_opts: Optional[Namespace] = None, + config_args: Mapping[str, Any] = util.immutabledict(), + attributes: Optional[Dict[str, Any]] = None, + ) -> None: + """Construct a new :class:`.Config`""" + self.config_file_name = ( + _preserving_path_as_str(file_) if file_ else None + ) + self.toml_file_name = ( + _preserving_path_as_str(toml_file) if toml_file else None + ) + self.config_ini_section = ini_section + self.output_buffer = output_buffer + self.stdout = stdout + self.cmd_opts = cmd_opts + self.config_args = dict(config_args) + self.toml_args = dict(config_args) + if attributes: + self.attributes.update(attributes) + + cmd_opts: Optional[Namespace] = None + """The command-line options passed to the ``alembic`` script. + + Within an ``env.py`` script this can be accessed via the + :attr:`.EnvironmentContext.config` attribute. + + .. seealso:: + + :meth:`.EnvironmentContext.get_x_argument` + + """ + + config_file_name: Optional[str] = None + """Filesystem path to the .ini file in use.""" + + toml_file_name: Optional[str] = None + """Filesystem path to the pyproject.toml file in use. + + .. versionadded:: 1.16.0 + + """ + + @property + def _config_file_path(self) -> Optional[Path]: + if self.config_file_name is None: + return None + return Path(self.config_file_name) + + @property + def _toml_file_path(self) -> Optional[Path]: + if self.toml_file_name is None: + return None + return Path(self.toml_file_name) + + config_ini_section: str = None # type:ignore[assignment] + """Name of the config file section to read basic configuration + from. Defaults to ``alembic``, that is the ``[alembic]`` section + of the .ini file. This value is modified using the ``-n/--name`` + option to the Alembic runner. + + """ + + @util.memoized_property + def attributes(self) -> Dict[str, Any]: + """A Python dictionary for storage of additional state. + + + This is a utility dictionary which can include not just strings but + engines, connections, schema objects, or anything else. + Use this to pass objects into an env.py script, such as passing + a :class:`sqlalchemy.engine.base.Connection` when calling + commands from :mod:`alembic.command` programmatically. + + .. seealso:: + + :ref:`connection_sharing` + + :paramref:`.Config.attributes` + + """ + return {} + + def print_stdout(self, text: str, *arg: Any) -> None: + """Render a message to standard out. + + When :meth:`.Config.print_stdout` is called with additional args + those arguments will formatted against the provided text, + otherwise we simply output the provided text verbatim. + + This is a no-op when the``quiet`` messaging option is enabled. + + e.g.:: + + >>> config.print_stdout('Some text %s', 'arg') + Some Text arg + + """ + + if arg: + output = str(text) % arg + else: + output = str(text) + + util.write_outstream(self.stdout, output, "\n", **self.messaging_opts) + + @util.memoized_property + def file_config(self) -> ConfigParser: + """Return the underlying ``ConfigParser`` object. + + Dir*-ect access to the .ini file is available here, + though the :meth:`.Config.get_section` and + :meth:`.Config.get_main_option` + methods provide a possibly simpler interface. + + """ + + if self._config_file_path: + here = self._config_file_path.absolute().parent + else: + here = Path() + self.config_args["here"] = here.as_posix() + file_config = ConfigParser(self.config_args) + if self._config_file_path: + compat.read_config_parser(file_config, [self._config_file_path]) + else: + file_config.add_section(self.config_ini_section) + return file_config + + @util.memoized_property + def toml_alembic_config(self) -> Mapping[str, Any]: + """Return a dictionary of the [tool.alembic] section from + pyproject.toml""" + + if self._toml_file_path and self._toml_file_path.exists(): + + here = self._toml_file_path.absolute().parent + self.toml_args["here"] = here.as_posix() + + with open(self._toml_file_path, "rb") as f: + toml_data = compat.tomllib.load(f) + data = toml_data.get("tool", {}).get("alembic", {}) + if not isinstance(data, dict): + raise util.CommandError("Incorrect TOML format") + return data + + else: + return {} + + def get_template_directory(self) -> str: + """Return the directory where Alembic setup templates are found. + + This method is used by the alembic ``init`` and ``list_templates`` + commands. + + """ + import alembic + + package_dir = Path(alembic.__file__).absolute().parent + return str(package_dir / "templates") + + def _get_template_path(self) -> Path: + """Return the directory where Alembic setup templates are found. + + This method is used by the alembic ``init`` and ``list_templates`` + commands. + + .. versionadded:: 1.16.0 + + """ + return Path(self.get_template_directory()) + + @overload + def get_section( + self, name: str, default: None = ... + ) -> Optional[Dict[str, str]]: ... + + # "default" here could also be a TypeVar + # _MT = TypeVar("_MT", bound=Mapping[str, str]), + # however mypy wasn't handling that correctly (pyright was) + @overload + def get_section( + self, name: str, default: Dict[str, str] + ) -> Dict[str, str]: ... + + @overload + def get_section( + self, name: str, default: Mapping[str, str] + ) -> Union[Dict[str, str], Mapping[str, str]]: ... + + def get_section( + self, name: str, default: Optional[Mapping[str, str]] = None + ) -> Optional[Mapping[str, str]]: + """Return all the configuration options from a given .ini file section + as a dictionary. + + If the given section does not exist, the value of ``default`` + is returned, which is expected to be a dictionary or other mapping. + + """ + if not self.file_config.has_section(name): + return default + + return dict(self.file_config.items(name)) + + def set_main_option(self, name: str, value: str) -> None: + """Set an option programmatically within the 'main' section. + + This overrides whatever was in the .ini file. + + :param name: name of the value + + :param value: the value. Note that this value is passed to + ``ConfigParser.set``, which supports variable interpolation using + pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of + an interpolation symbol must therefore be escaped, e.g. ``%%``. + The given value may refer to another value already in the file + using the interpolation format. + + """ + self.set_section_option(self.config_ini_section, name, value) + + def remove_main_option(self, name: str) -> None: + self.file_config.remove_option(self.config_ini_section, name) + + def set_section_option(self, section: str, name: str, value: str) -> None: + """Set an option programmatically within the given section. + + The section is created if it doesn't exist already. + The value here will override whatever was in the .ini + file. + + Does **NOT** consume from the pyproject.toml file. + + .. seealso:: + + :meth:`.Config.get_alembic_option` - includes pyproject support + + :param section: name of the section + + :param name: name of the value + + :param value: the value. Note that this value is passed to + ``ConfigParser.set``, which supports variable interpolation using + pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of + an interpolation symbol must therefore be escaped, e.g. ``%%``. + The given value may refer to another value already in the file + using the interpolation format. + + """ + + if not self.file_config.has_section(section): + self.file_config.add_section(section) + self.file_config.set(section, name, value) + + def get_section_option( + self, section: str, name: str, default: Optional[str] = None + ) -> Optional[str]: + """Return an option from the given section of the .ini file.""" + if not self.file_config.has_section(section): + raise util.CommandError( + "No config file %r found, or file has no " + "'[%s]' section" % (self.config_file_name, section) + ) + if self.file_config.has_option(section, name): + return self.file_config.get(section, name) + else: + return default + + @overload + def get_main_option(self, name: str, default: str) -> str: ... + + @overload + def get_main_option( + self, name: str, default: Optional[str] = None + ) -> Optional[str]: ... + + def get_main_option( + self, name: str, default: Optional[str] = None + ) -> Optional[str]: + """Return an option from the 'main' section of the .ini file. + + This defaults to being a key from the ``[alembic]`` + section, unless the ``-n/--name`` flag were used to + indicate a different section. + + Does **NOT** consume from the pyproject.toml file. + + .. seealso:: + + :meth:`.Config.get_alembic_option` - includes pyproject support + + """ + return self.get_section_option(self.config_ini_section, name, default) + + @overload + def get_alembic_option(self, name: str, default: str) -> str: ... + + @overload + def get_alembic_option( + self, name: str, default: Optional[str] = None + ) -> Optional[str]: ... + + def get_alembic_option( + self, name: str, default: Optional[str] = None + ) -> Union[None, str, list[str], dict[str, str], list[dict[str, str]]]: + """Return an option from the "[alembic]" or "[tool.alembic]" section + of the configparser-parsed .ini file (e.g. ``alembic.ini``) or + toml-parsed ``pyproject.toml`` file. + + The value returned is expected to be None, string, list of strings, + or dictionary of strings. Within each type of string value, the + ``%(here)s`` token is substituted out with the absolute path of the + ``pyproject.toml`` file, as are other tokens which are extracted from + the :paramref:`.Config.config_args` dictionary. + + Searches always prioritize the configparser namespace first, before + searching in the toml namespace. + + If Alembic was run using the ``-n/--name`` flag to indicate an + alternate main section name, this is taken into account **only** for + the configparser-parsed .ini file. The section name in toml is always + ``[tool.alembic]``. + + + .. versionadded:: 1.16.0 + + """ + + if self.file_config.has_option(self.config_ini_section, name): + return self.file_config.get(self.config_ini_section, name) + else: + return self._get_toml_config_value(name, default=default) + + def get_alembic_boolean_option(self, name: str) -> bool: + if self.file_config.has_option(self.config_ini_section, name): + return ( + self.file_config.get(self.config_ini_section, name) == "true" + ) + else: + value = self.toml_alembic_config.get(name, False) + if not isinstance(value, bool): + raise util.CommandError( + f"boolean value expected for TOML parameter {name!r}" + ) + return value + + def _get_toml_config_value( + self, name: str, default: Optional[Any] = None + ) -> Union[None, str, list[str], dict[str, str], list[dict[str, str]]]: + USE_DEFAULT = object() + value: Union[None, str, list[str], dict[str, str]] = ( + self.toml_alembic_config.get(name, USE_DEFAULT) + ) + if value is USE_DEFAULT: + return default + if value is not None: + if isinstance(value, str): + value = value % (self.toml_args) + elif isinstance(value, list): + if value and isinstance(value[0], dict): + value = [ + {k: v % (self.toml_args) for k, v in dv.items()} + for dv in value + ] + else: + value = cast( + "list[str]", [v % (self.toml_args) for v in value] + ) + elif isinstance(value, dict): + value = cast( + "dict[str, str]", + {k: v % (self.toml_args) for k, v in value.items()}, + ) + else: + raise util.CommandError( + f"unsupported TOML value type for key: {name!r}" + ) + return value + + @util.memoized_property + def messaging_opts(self) -> MessagingOptions: + """The messaging options.""" + return cast( + MessagingOptions, + util.immutabledict( + {"quiet": getattr(self.cmd_opts, "quiet", False)} + ), + ) + + def _get_file_separator_char(self, *names: str) -> Optional[str]: + for name in names: + separator = self.get_main_option(name) + if separator is not None: + break + else: + return None + + split_on_path = { + "space": " ", + "newline": "\n", + "os": os.pathsep, + ":": ":", + ";": ";", + } + + try: + sep = split_on_path[separator] + except KeyError as ke: + raise ValueError( + "'%s' is not a valid value for %s; " + "expected 'space', 'newline', 'os', ':', ';'" + % (separator, name) + ) from ke + else: + if name == "version_path_separator": + util.warn_deprecated( + "The version_path_separator configuration parameter " + "is deprecated; please use path_separator" + ) + return sep + + def get_version_locations_list(self) -> Optional[list[str]]: + + version_locations_str = self.file_config.get( + self.config_ini_section, "version_locations", fallback=None + ) + + if version_locations_str: + split_char = self._get_file_separator_char( + "path_separator", "version_path_separator" + ) + + if split_char is None: + + # legacy behaviour for backwards compatibility + util.warn_deprecated( + "No path_separator found in configuration; " + "falling back to legacy splitting on spaces/commas " + "for version_locations. Consider adding " + "path_separator=os to Alembic config." + ) + + _split_on_space_comma = re.compile(r", *|(?: +)") + return _split_on_space_comma.split(version_locations_str) + else: + return [ + x.strip() + for x in version_locations_str.split(split_char) + if x + ] + else: + return cast( + "list[str]", + self._get_toml_config_value("version_locations", None), + ) + + def get_prepend_sys_paths_list(self) -> Optional[list[str]]: + prepend_sys_path_str = self.file_config.get( + self.config_ini_section, "prepend_sys_path", fallback=None + ) + + if prepend_sys_path_str: + split_char = self._get_file_separator_char("path_separator") + + if split_char is None: + + # legacy behaviour for backwards compatibility + util.warn_deprecated( + "No path_separator found in configuration; " + "falling back to legacy splitting on spaces, commas, " + "and colons for prepend_sys_path. Consider adding " + "path_separator=os to Alembic config." + ) + + _split_on_space_comma_colon = re.compile(r", *|(?: +)|\:") + return _split_on_space_comma_colon.split(prepend_sys_path_str) + else: + return [ + x.strip() + for x in prepend_sys_path_str.split(split_char) + if x + ] + else: + return cast( + "list[str]", + self._get_toml_config_value("prepend_sys_path", None), + ) + + def get_hooks_list(self) -> list[PostWriteHookConfig]: + + hooks: list[PostWriteHookConfig] = [] + + if not self.file_config.has_section("post_write_hooks"): + toml_hook_config = cast( + "list[dict[str, str]]", + self._get_toml_config_value("post_write_hooks", []), + ) + for cfg in toml_hook_config: + opts = dict(cfg) + opts["_hook_name"] = opts.pop("name") + hooks.append(opts) + + else: + _split_on_space_comma = re.compile(r", *|(?: +)") + ini_hook_config = self.get_section("post_write_hooks", {}) + names = _split_on_space_comma.split( + ini_hook_config.get("hooks", "") + ) + + for name in names: + if not name: + continue + opts = { + key[len(name) + 1 :]: ini_hook_config[key] + for key in ini_hook_config + if key.startswith(name + ".") + } + + opts["_hook_name"] = name + hooks.append(opts) + + return hooks + + +PostWriteHookConfig = Mapping[str, str] + + +class MessagingOptions(TypedDict, total=False): + quiet: bool + + +class CommandFunction(Protocol): + """A function that may be registered in the CLI as an alembic command. + It must be a named function and it must accept a :class:`.Config` object + as the first argument. + + .. versionadded:: 1.15.3 + + """ + + __name__: str + + def __call__(self, config: Config, *args: Any, **kwargs: Any) -> Any: ... + + +class CommandLine: + """Provides the command line interface to Alembic.""" + + def __init__(self, prog: Optional[str] = None) -> None: + self._generate_args(prog) + + _KWARGS_OPTS = { + "template": ( + "-t", + "--template", + dict( + default="generic", + type=str, + help="Setup template for use with 'init'", + ), + ), + "message": ( + "-m", + "--message", + dict(type=str, help="Message string to use with 'revision'"), + ), + "sql": ( + "--sql", + dict( + action="store_true", + help="Don't emit SQL to database - dump to " + "standard output/file instead. See docs on " + "offline mode.", + ), + ), + "tag": ( + "--tag", + dict( + type=str, + help="Arbitrary 'tag' name - can be used by " + "custom env.py scripts.", + ), + ), + "head": ( + "--head", + dict( + type=str, + help="Specify head revision or @head " + "to base new revision on.", + ), + ), + "splice": ( + "--splice", + dict( + action="store_true", + help="Allow a non-head revision as the 'head' to splice onto", + ), + ), + "depends_on": ( + "--depends-on", + dict( + action="append", + help="Specify one or more revision identifiers " + "which this revision should depend on.", + ), + ), + "rev_id": ( + "--rev-id", + dict( + type=str, + help="Specify a hardcoded revision id instead of " + "generating one", + ), + ), + "version_path": ( + "--version-path", + dict( + type=str, + help="Specify specific path from config for version file", + ), + ), + "branch_label": ( + "--branch-label", + dict( + type=str, + help="Specify a branch label to apply to the new revision", + ), + ), + "verbose": ( + "-v", + "--verbose", + dict(action="store_true", help="Use more verbose output"), + ), + "resolve_dependencies": ( + "--resolve-dependencies", + dict( + action="store_true", + help="Treat dependency versions as down revisions", + ), + ), + "autogenerate": ( + "--autogenerate", + dict( + action="store_true", + help="Populate revision script with candidate " + "migration operations, based on comparison " + "of database to model.", + ), + ), + "rev_range": ( + "-r", + "--rev-range", + dict( + action="store", + help="Specify a revision range; format is [start]:[end]", + ), + ), + "indicate_current": ( + "-i", + "--indicate-current", + dict( + action="store_true", + help="Indicate the current revision", + ), + ), + "purge": ( + "--purge", + dict( + action="store_true", + help="Unconditionally erase the version table before stamping", + ), + ), + "package": ( + "--package", + dict( + action="store_true", + help="Write empty __init__.py files to the " + "environment and version locations", + ), + ), + } + _POSITIONAL_OPTS = { + "directory": dict(help="location of scripts directory"), + "revision": dict( + help="revision identifier", + ), + "revisions": dict( + nargs="+", + help="one or more revisions, or 'heads' for all heads", + ), + } + _POSITIONAL_TRANSLATIONS: dict[Any, dict[str, str]] = { + command.stamp: {"revision": "revisions"} + } + + def _generate_args(self, prog: Optional[str]) -> None: + parser = ArgumentParser(prog=prog) + + parser.add_argument( + "--version", action="version", version="%%(prog)s %s" % __version__ + ) + parser.add_argument( + "-c", + "--config", + action="append", + help="Alternate config file; defaults to value of " + 'ALEMBIC_CONFIG environment variable, or "alembic.ini". ' + "May also refer to pyproject.toml file. May be specified twice " + "to reference both files separately", + ) + parser.add_argument( + "-n", + "--name", + type=str, + default="alembic", + help="Name of section in .ini file to use for Alembic config " + "(only applies to configparser config, not toml)", + ) + parser.add_argument( + "-x", + action="append", + help="Additional arguments consumed by " + "custom env.py scripts, e.g. -x " + "setting1=somesetting -x setting2=somesetting", + ) + parser.add_argument( + "--raiseerr", + action="store_true", + help="Raise a full stack trace on error", + ) + parser.add_argument( + "-q", + "--quiet", + action="store_true", + help="Do not log to std output.", + ) + + self.subparsers = parser.add_subparsers() + alembic_commands = ( + cast(CommandFunction, fn) + for fn in (getattr(command, name) for name in dir(command)) + if ( + inspect.isfunction(fn) + and fn.__name__[0] != "_" + and fn.__module__ == "alembic.command" + ) + ) + + for fn in alembic_commands: + self.register_command(fn) + + self.parser = parser + + def register_command(self, fn: CommandFunction) -> None: + """Registers a function as a CLI subcommand. The subcommand name + matches the function name, the arguments are extracted from the + signature and the help text is read from the docstring. + + .. versionadded:: 1.15.3 + + .. seealso:: + + :ref:`custom_commandline` + """ + + positional, kwarg, help_text = self._inspect_function(fn) + + subparser = self.subparsers.add_parser(fn.__name__, help=help_text) + subparser.set_defaults(cmd=(fn, positional, kwarg)) + + for arg in kwarg: + if arg in self._KWARGS_OPTS: + kwarg_opt = self._KWARGS_OPTS[arg] + args, opts = kwarg_opt[0:-1], kwarg_opt[-1] + subparser.add_argument(*args, **opts) # type:ignore + + for arg in positional: + opts = self._POSITIONAL_OPTS.get(arg, {}) + subparser.add_argument(arg, **opts) # type:ignore + + def _inspect_function(self, fn: CommandFunction) -> tuple[Any, Any, str]: + spec = compat.inspect_getfullargspec(fn) + if spec[3] is not None: + positional = spec[0][1 : -len(spec[3])] + kwarg = spec[0][-len(spec[3]) :] + else: + positional = spec[0][1:] + kwarg = [] + + if fn in self._POSITIONAL_TRANSLATIONS: + positional = [ + self._POSITIONAL_TRANSLATIONS[fn].get(name, name) + for name in positional + ] + + # parse first line(s) of helptext without a line break + help_ = fn.__doc__ + if help_: + help_lines = [] + for line in help_.split("\n"): + if not line.strip(): + break + else: + help_lines.append(line.strip()) + else: + help_lines = [] + + help_text = " ".join(help_lines) + + return positional, kwarg, help_text + + def run_cmd(self, config: Config, options: Namespace) -> None: + fn, positional, kwarg = options.cmd + + try: + fn( + config, + *[getattr(options, k, None) for k in positional], + **{k: getattr(options, k, None) for k in kwarg}, + ) + except util.CommandError as e: + if options.raiseerr: + raise + else: + util.err(str(e), **config.messaging_opts) + + def _inis_from_config(self, options: Namespace) -> tuple[str, str]: + names = options.config + + alembic_config_env = os.environ.get("ALEMBIC_CONFIG") + if ( + alembic_config_env + and os.path.basename(alembic_config_env) == "pyproject.toml" + ): + default_pyproject_toml = alembic_config_env + default_alembic_config = "alembic.ini" + elif alembic_config_env: + default_pyproject_toml = "pyproject.toml" + default_alembic_config = alembic_config_env + else: + default_alembic_config = "alembic.ini" + default_pyproject_toml = "pyproject.toml" + + if not names: + return default_pyproject_toml, default_alembic_config + + toml = ini = None + + for name in names: + if os.path.basename(name) == "pyproject.toml": + if toml is not None: + raise util.CommandError( + "pyproject.toml indicated more than once" + ) + toml = name + else: + if ini is not None: + raise util.CommandError( + "only one ini file may be indicated" + ) + ini = name + + return toml if toml else default_pyproject_toml, ( + ini if ini else default_alembic_config + ) + + def main(self, argv: Optional[Sequence[str]] = None) -> None: + """Executes the command line with the provided arguments.""" + options = self.parser.parse_args(argv) + if not hasattr(options, "cmd"): + # see http://bugs.python.org/issue9253, argparse + # behavior changed incompatibly in py3.3 + self.parser.error("too few arguments") + else: + toml, ini = self._inis_from_config(options) + cfg = Config( + file_=ini, + toml_file=toml, + ini_section=options.name, + cmd_opts=options, + ) + self.run_cmd(cfg, options) + + +def main( + argv: Optional[Sequence[str]] = None, + prog: Optional[str] = None, + **kwargs: Any, +) -> None: + """The console runner function for Alembic.""" + + CommandLine(prog=prog).main(argv=argv) + + +if __name__ == "__main__": + main() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.py new file mode 100644 index 0000000000000000000000000000000000000000..758fca8756c8bac18ea91888b6de484a11618018 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.py @@ -0,0 +1,5 @@ +from .runtime.environment import EnvironmentContext + +# create proxy functions for +# each method on the EnvironmentContext class. +EnvironmentContext.create_module_class_proxy(globals(), locals()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.pyi b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9117c31e8e3d7b4f2ee1badcc537b8346290edfe --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/context.pyi @@ -0,0 +1,856 @@ +# ### this file stubs are generated by tools/write_pyi.py - do not edit ### +# ### imports are manually managed +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Collection +from typing import Dict +from typing import Iterable +from typing import List +from typing import Literal +from typing import Mapping +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from typing_extensions import ContextManager + +if TYPE_CHECKING: + from sqlalchemy.engine.base import Connection + from sqlalchemy.engine.url import URL + from sqlalchemy.sql import Executable + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import FetchedValue + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.sql.type_api import TypeEngine + + from .autogenerate.api import AutogenContext + from .config import Config + from .operations.ops import MigrationScript + from .runtime.migration import _ProxyTransaction + from .runtime.migration import MigrationContext + from .runtime.migration import MigrationInfo + from .script import ScriptDirectory + +### end imports ### + +def begin_transaction() -> ( + Union[_ProxyTransaction, ContextManager[None, Optional[bool]]] +): + """Return a context manager that will + enclose an operation within a "transaction", + as defined by the environment's offline + and transactional DDL settings. + + e.g.:: + + with context.begin_transaction(): + context.run_migrations() + + :meth:`.begin_transaction` is intended to + "do the right thing" regardless of + calling context: + + * If :meth:`.is_transactional_ddl` is ``False``, + returns a "do nothing" context manager + which otherwise produces no transactional + state or directives. + * If :meth:`.is_offline_mode` is ``True``, + returns a context manager that will + invoke the :meth:`.DefaultImpl.emit_begin` + and :meth:`.DefaultImpl.emit_commit` + methods, which will produce the string + directives ``BEGIN`` and ``COMMIT`` on + the output stream, as rendered by the + target backend (e.g. SQL Server would + emit ``BEGIN TRANSACTION``). + * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` + on the current online connection, which + returns a :class:`sqlalchemy.engine.Transaction` + object. This object demarcates a real + transaction and is itself a context manager, + which will roll back if an exception + is raised. + + Note that a custom ``env.py`` script which + has more specific transactional needs can of course + manipulate the :class:`~sqlalchemy.engine.Connection` + directly to produce transactional state in "online" + mode. + + """ + +config: Config + +def configure( + connection: Optional[Connection] = None, + url: Union[str, URL, None] = None, + dialect_name: Optional[str] = None, + dialect_opts: Optional[Dict[str, Any]] = None, + transactional_ddl: Optional[bool] = None, + transaction_per_migration: bool = False, + output_buffer: Optional[TextIO] = None, + starting_rev: Optional[str] = None, + tag: Optional[str] = None, + template_args: Optional[Dict[str, Any]] = None, + render_as_batch: bool = False, + target_metadata: Union[MetaData, Sequence[MetaData], None] = None, + include_name: Optional[ + Callable[ + [ + Optional[str], + Literal[ + "schema", + "table", + "column", + "index", + "unique_constraint", + "foreign_key_constraint", + ], + MutableMapping[ + Literal[ + "schema_name", + "table_name", + "schema_qualified_table_name", + ], + Optional[str], + ], + ], + bool, + ] + ] = None, + include_object: Optional[ + Callable[ + [ + SchemaItem, + Optional[str], + Literal[ + "schema", + "table", + "column", + "index", + "unique_constraint", + "foreign_key_constraint", + ], + bool, + Optional[SchemaItem], + ], + bool, + ] + ] = None, + include_schemas: bool = False, + process_revision_directives: Optional[ + Callable[ + [ + MigrationContext, + Union[str, Iterable[Optional[str]], Iterable[str]], + List[MigrationScript], + ], + None, + ] + ] = None, + compare_type: Union[ + bool, + Callable[ + [ + MigrationContext, + Column[Any], + Column[Any], + TypeEngine[Any], + TypeEngine[Any], + ], + Optional[bool], + ], + ] = True, + compare_server_default: Union[ + bool, + Callable[ + [ + MigrationContext, + Column[Any], + Column[Any], + Optional[str], + Optional[FetchedValue], + Optional[str], + ], + Optional[bool], + ], + ] = False, + render_item: Optional[ + Callable[[str, Any, AutogenContext], Union[str, Literal[False]]] + ] = None, + literal_binds: bool = False, + upgrade_token: str = "upgrades", + downgrade_token: str = "downgrades", + alembic_module_prefix: str = "op.", + sqlalchemy_module_prefix: str = "sa.", + user_module_prefix: Optional[str] = None, + on_version_apply: Optional[ + Callable[ + [ + MigrationContext, + MigrationInfo, + Collection[Any], + Mapping[str, Any], + ], + None, + ] + ] = None, + **kw: Any, +) -> None: + """Configure a :class:`.MigrationContext` within this + :class:`.EnvironmentContext` which will provide database + connectivity and other configuration to a series of + migration scripts. + + Many methods on :class:`.EnvironmentContext` require that + this method has been called in order to function, as they + ultimately need to have database access or at least access + to the dialect in use. Those which do are documented as such. + + The important thing needed by :meth:`.configure` is a + means to determine what kind of database dialect is in use. + An actual connection to that database is needed only if + the :class:`.MigrationContext` is to be used in + "online" mode. + + If the :meth:`.is_offline_mode` function returns ``True``, + then no connection is needed here. Otherwise, the + ``connection`` parameter should be present as an + instance of :class:`sqlalchemy.engine.Connection`. + + This function is typically called from the ``env.py`` + script within a migration environment. It can be called + multiple times for an invocation. The most recent + :class:`~sqlalchemy.engine.Connection` + for which it was called is the one that will be operated upon + by the next call to :meth:`.run_migrations`. + + General parameters: + + :param connection: a :class:`~sqlalchemy.engine.Connection` + to use + for SQL execution in "online" mode. When present, is also + used to determine the type of dialect in use. + :param url: a string database url, or a + :class:`sqlalchemy.engine.url.URL` object. + The type of dialect to be used will be derived from this if + ``connection`` is not passed. + :param dialect_name: string name of a dialect, such as + "postgresql", "mssql", etc. + The type of dialect to be used will be derived from this if + ``connection`` and ``url`` are not passed. + :param dialect_opts: dictionary of options to be passed to dialect + constructor. + :param transactional_ddl: Force the usage of "transactional" + DDL on or off; + this otherwise defaults to whether or not the dialect in + use supports it. + :param transaction_per_migration: if True, nest each migration script + in a transaction rather than the full series of migrations to + run. + :param output_buffer: a file-like object that will be used + for textual output + when the ``--sql`` option is used to generate SQL scripts. + Defaults to + ``sys.stdout`` if not passed here and also not present on + the :class:`.Config` + object. The value here overrides that of the :class:`.Config` + object. + :param output_encoding: when using ``--sql`` to generate SQL + scripts, apply this encoding to the string output. + :param literal_binds: when using ``--sql`` to generate SQL + scripts, pass through the ``literal_binds`` flag to the compiler + so that any literal values that would ordinarily be bound + parameters are converted to plain strings. + + .. warning:: Dialects can typically only handle simple datatypes + like strings and numbers for auto-literal generation. Datatypes + like dates, intervals, and others may still require manual + formatting, typically using :meth:`.Operations.inline_literal`. + + .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy + versions prior to 0.8 where this feature is not supported. + + .. seealso:: + + :meth:`.Operations.inline_literal` + + :param starting_rev: Override the "starting revision" argument + when using ``--sql`` mode. + :param tag: a string tag for usage by custom ``env.py`` scripts. + Set via the ``--tag`` option, can be overridden here. + :param template_args: dictionary of template arguments which + will be added to the template argument environment when + running the "revision" command. Note that the script environment + is only run within the "revision" command if the --autogenerate + option is used, or if the option "revision_environment=true" + is present in the alembic.ini file. + + :param version_table: The name of the Alembic version table. + The default is ``'alembic_version'``. + :param version_table_schema: Optional schema to place version + table within. + :param version_table_pk: boolean, whether the Alembic version table + should use a primary key constraint for the "value" column; this + only takes effect when the table is first created. + Defaults to True; setting to False should not be necessary and is + here for backwards compatibility reasons. + :param on_version_apply: a callable or collection of callables to be + run for each migration step. + The callables will be run in the order they are given, once for + each migration step, after the respective operation has been + applied but before its transaction is finalized. + Each callable accepts no positional arguments and the following + keyword arguments: + + * ``ctx``: the :class:`.MigrationContext` running the migration, + * ``step``: a :class:`.MigrationInfo` representing the + step currently being applied, + * ``heads``: a collection of version strings representing the + current heads, + * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. + + Parameters specific to the autogenerate feature, when + ``alembic revision`` is run with the ``--autogenerate`` feature: + + :param target_metadata: a :class:`sqlalchemy.schema.MetaData` + object, or a sequence of :class:`~sqlalchemy.schema.MetaData` + objects, that will be consulted during autogeneration. + The tables present in each :class:`~sqlalchemy.schema.MetaData` + will be compared against + what is locally available on the target + :class:`~sqlalchemy.engine.Connection` + to produce candidate upgrade/downgrade operations. + :param compare_type: Indicates type comparison behavior during + an autogenerate + operation. Defaults to ``True`` turning on type comparison, which + has good accuracy on most backends. See :ref:`compare_types` + for an example as well as information on other type + comparison options. Set to ``False`` which disables type + comparison. A callable can also be passed to provide custom type + comparison, see :ref:`compare_types` for additional details. + + .. versionchanged:: 1.12.0 The default value of + :paramref:`.EnvironmentContext.configure.compare_type` has been + changed to ``True``. + + .. seealso:: + + :ref:`compare_types` + + :paramref:`.EnvironmentContext.configure.compare_server_default` + + :param compare_server_default: Indicates server default comparison + behavior during + an autogenerate operation. Defaults to ``False`` which disables + server default + comparison. Set to ``True`` to turn on server default comparison, + which has + varied accuracy depending on backend. + + To customize server default comparison behavior, a callable may + be specified + which can filter server default comparisons during an + autogenerate operation. + defaults during an autogenerate operation. The format of this + callable is:: + + def my_compare_server_default(context, inspected_column, + metadata_column, inspected_default, metadata_default, + rendered_metadata_default): + # return True if the defaults are different, + # False if not, or None to allow the default implementation + # to compare these defaults + return None + + context.configure( + # ... + compare_server_default = my_compare_server_default + ) + + ``inspected_column`` is a dictionary structure as returned by + :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas + ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from + the local model environment. + + A return value of ``None`` indicates to allow default server default + comparison + to proceed. Note that some backends such as Postgresql actually + execute + the two defaults on the database side to compare for equivalence. + + .. seealso:: + + :paramref:`.EnvironmentContext.configure.compare_type` + + :param include_name: A callable function which is given + the chance to return ``True`` or ``False`` for any database reflected + object based on its name, including database schema names when + the :paramref:`.EnvironmentContext.configure.include_schemas` flag + is set to ``True``. + + The function accepts the following positional arguments: + + * ``name``: the name of the object, such as schema name or table name. + Will be ``None`` when indicating the default schema name of the + database connection. + * ``type``: a string describing the type of object; currently + ``"schema"``, ``"table"``, ``"column"``, ``"index"``, + ``"unique_constraint"``, or ``"foreign_key_constraint"`` + * ``parent_names``: a dictionary of "parent" object names, that are + relative to the name being given. Keys in this dictionary may + include: ``"schema_name"``, ``"table_name"`` or + ``"schema_qualified_table_name"``. + + E.g.:: + + def include_name(name, type_, parent_names): + if type_ == "schema": + return name in ["schema_one", "schema_two"] + else: + return True + + context.configure( + # ... + include_schemas = True, + include_name = include_name + ) + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_object` + + :paramref:`.EnvironmentContext.configure.include_schemas` + + + :param include_object: A callable function which is given + the chance to return ``True`` or ``False`` for any object, + indicating if the given object should be considered in the + autogenerate sweep. + + The function accepts the following positional arguments: + + * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such + as a :class:`~sqlalchemy.schema.Table`, + :class:`~sqlalchemy.schema.Column`, + :class:`~sqlalchemy.schema.Index` + :class:`~sqlalchemy.schema.UniqueConstraint`, + or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object + * ``name``: the name of the object. This is typically available + via ``object.name``. + * ``type``: a string describing the type of object; currently + ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, + or ``"foreign_key_constraint"`` + * ``reflected``: ``True`` if the given object was produced based on + table reflection, ``False`` if it's from a local :class:`.MetaData` + object. + * ``compare_to``: the object being compared against, if available, + else ``None``. + + E.g.:: + + def include_object(object, name, type_, reflected, compare_to): + if (type_ == "column" and + not reflected and + object.info.get("skip_autogenerate", False)): + return False + else: + return True + + context.configure( + # ... + include_object = include_object + ) + + For the use case of omitting specific schemas from a target database + when :paramref:`.EnvironmentContext.configure.include_schemas` is + set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema` + attribute can be checked for each :class:`~sqlalchemy.schema.Table` + object passed to the hook, however it is much more efficient + to filter on schemas before reflection of objects takes place + using the :paramref:`.EnvironmentContext.configure.include_name` + hook. + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_name` + + :paramref:`.EnvironmentContext.configure.include_schemas` + + :param render_as_batch: if True, commands which alter elements + within a table will be placed under a ``with batch_alter_table():`` + directive, so that batch migrations will take place. + + .. seealso:: + + :ref:`batch_migrations` + + :param include_schemas: If True, autogenerate will scan across + all schemas located by the SQLAlchemy + :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` + method, and include all differences in tables found across all + those schemas. When using this option, you may want to also + use the :paramref:`.EnvironmentContext.configure.include_name` + parameter to specify a callable which + can filter the tables/schemas that get included. + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_name` + + :paramref:`.EnvironmentContext.configure.include_object` + + :param render_item: Callable that can be used to override how + any schema item, i.e. column, constraint, type, + etc., is rendered for autogenerate. The callable receives a + string describing the type of object, the object, and + the autogen context. If it returns False, the + default rendering method will be used. If it returns None, + the item will not be rendered in the context of a Table + construct, that is, can be used to skip columns or constraints + within op.create_table():: + + def my_render_column(type_, col, autogen_context): + if type_ == "column" and isinstance(col, MySpecialCol): + return repr(col) + else: + return False + + context.configure( + # ... + render_item = my_render_column + ) + + Available values for the type string include: ``"column"``, + ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, + ``"type"``, ``"server_default"``. + + .. seealso:: + + :ref:`autogen_render_types` + + :param upgrade_token: When autogenerate completes, the text of the + candidate upgrade operations will be present in this template + variable when ``script.py.mako`` is rendered. Defaults to + ``upgrades``. + :param downgrade_token: When autogenerate completes, the text of the + candidate downgrade operations will be present in this + template variable when ``script.py.mako`` is rendered. Defaults to + ``downgrades``. + + :param alembic_module_prefix: When autogenerate refers to Alembic + :mod:`alembic.operations` constructs, this prefix will be used + (i.e. ``op.create_table``) Defaults to "``op.``". + Can be ``None`` to indicate no prefix. + + :param sqlalchemy_module_prefix: When autogenerate refers to + SQLAlchemy + :class:`~sqlalchemy.schema.Column` or type classes, this prefix + will be used + (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". + Can be ``None`` to indicate no prefix. + Note that when dialect-specific types are rendered, autogenerate + will render them using the dialect module name, i.e. ``mssql.BIT()``, + ``postgresql.UUID()``. + + :param user_module_prefix: When autogenerate refers to a SQLAlchemy + type (e.g. :class:`.TypeEngine`) where the module name is not + under the ``sqlalchemy`` namespace, this prefix will be used + within autogenerate. If left at its default of + ``None``, the ``__module__`` attribute of the type is used to + render the import module. It's a good practice to set this + and to have all custom types be available from a fixed module space, + in order to future-proof migration files against reorganizations + in modules. + + .. seealso:: + + :ref:`autogen_module_prefix` + + :param process_revision_directives: a callable function that will + be passed a structure representing the end result of an autogenerate + or plain "revision" operation, which can be manipulated to affect + how the ``alembic revision`` command ultimately outputs new + revision scripts. The structure of the callable is:: + + def process_revision_directives(context, revision, directives): + pass + + The ``directives`` parameter is a Python list containing + a single :class:`.MigrationScript` directive, which represents + the revision file to be generated. This list as well as its + contents may be freely modified to produce any set of commands. + The section :ref:`customizing_revision` shows an example of + doing this. The ``context`` parameter is the + :class:`.MigrationContext` in use, + and ``revision`` is a tuple of revision identifiers representing the + current revision of the database. + + The callable is invoked at all times when the ``--autogenerate`` + option is passed to ``alembic revision``. If ``--autogenerate`` + is not passed, the callable is invoked only if the + ``revision_environment`` variable is set to True in the Alembic + configuration, in which case the given ``directives`` collection + will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` + collections for ``.upgrade_ops`` and ``.downgrade_ops``. The + ``--autogenerate`` option itself can be inferred by inspecting + ``context.config.cmd_opts.autogenerate``. + + The callable function may optionally be an instance of + a :class:`.Rewriter` object. This is a helper object that + assists in the production of autogenerate-stream rewriter functions. + + .. seealso:: + + :ref:`customizing_revision` + + :ref:`autogen_rewriter` + + :paramref:`.command.revision.process_revision_directives` + + Parameters specific to individual backends: + + :param mssql_batch_separator: The "batch separator" which will + be placed between each statement when generating offline SQL Server + migrations. Defaults to ``GO``. Note this is in addition to the + customary semicolon ``;`` at the end of each statement; SQL Server + considers the "batch separator" to denote the end of an + individual statement execution, and cannot group certain + dependent operations in one step. + :param oracle_batch_separator: The "batch separator" which will + be placed between each statement when generating offline + Oracle migrations. Defaults to ``/``. Oracle doesn't add a + semicolon between statements like most other backends. + + """ + +def execute( + sql: Union[Executable, str], + execution_options: Optional[Dict[str, Any]] = None, +) -> None: + """Execute the given SQL using the current change context. + + The behavior of :meth:`.execute` is the same + as that of :meth:`.Operations.execute`. Please see that + function's documentation for full detail including + caveats and limitations. + + This function requires that a :class:`.MigrationContext` has + first been made available via :meth:`.configure`. + + """ + +def get_bind() -> Connection: + """Return the current 'bind'. + + In "online" mode, this is the + :class:`sqlalchemy.engine.Connection` currently being used + to emit SQL to the database. + + This function requires that a :class:`.MigrationContext` + has first been made available via :meth:`.configure`. + + """ + +def get_context() -> MigrationContext: + """Return the current :class:`.MigrationContext` object. + + If :meth:`.EnvironmentContext.configure` has not been + called yet, raises an exception. + + """ + +def get_head_revision() -> Union[str, Tuple[str, ...], None]: + """Return the hex identifier of the 'head' script revision. + + If the script directory has multiple heads, this + method raises a :class:`.CommandError`; + :meth:`.EnvironmentContext.get_head_revisions` should be preferred. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` + + """ + +def get_head_revisions() -> Union[str, Tuple[str, ...], None]: + """Return the hex identifier of the 'heads' script revision(s). + + This returns a tuple containing the version number of all + heads in the script directory. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + +def get_revision_argument() -> Union[str, Tuple[str, ...], None]: + """Get the 'destination' revision argument. + + This is typically the argument passed to the + ``upgrade`` or ``downgrade`` command. + + If it was specified as ``head``, the actual + version number is returned; if specified + as ``base``, ``None`` is returned. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + +def get_starting_revision_argument() -> Union[str, Tuple[str, ...], None]: + """Return the 'starting revision' argument, + if the revision was passed using ``start:end``. + + This is only meaningful in "offline" mode. + Returns ``None`` if no value is available + or was configured. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + +def get_tag_argument() -> Optional[str]: + """Return the value passed for the ``--tag`` argument, if any. + + The ``--tag`` argument is not used directly by Alembic, + but is available for custom ``env.py`` configurations that + wish to use it; particularly for offline generation scripts + that wish to generate tagged filenames. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: + + :meth:`.EnvironmentContext.get_x_argument` - a newer and more + open ended system of extending ``env.py`` scripts via the command + line. + + """ + +@overload +def get_x_argument(as_dictionary: Literal[False]) -> List[str]: ... +@overload +def get_x_argument(as_dictionary: Literal[True]) -> Dict[str, str]: ... +@overload +def get_x_argument( + as_dictionary: bool = ..., +) -> Union[List[str], Dict[str, str]]: + """Return the value(s) passed for the ``-x`` argument, if any. + + The ``-x`` argument is an open ended flag that allows any user-defined + value or values to be passed on the command line, then available + here for consumption by a custom ``env.py`` script. + + The return value is a list, returned directly from the ``argparse`` + structure. If ``as_dictionary=True`` is passed, the ``x`` arguments + are parsed using ``key=value`` format into a dictionary that is + then returned. If there is no ``=`` in the argument, value is an empty + string. + + .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when + arguments are passed without the ``=`` symbol. + + For example, to support passing a database URL on the command line, + the standard ``env.py`` script can be modified like this:: + + cmd_line_url = context.get_x_argument( + as_dictionary=True).get('dbname') + if cmd_line_url: + engine = create_engine(cmd_line_url) + else: + engine = engine_from_config( + config.get_section(config.config_ini_section), + prefix='sqlalchemy.', + poolclass=pool.NullPool) + + This then takes effect by running the ``alembic`` script as:: + + alembic -x dbname=postgresql://user:pass@host/dbname upgrade head + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: + + :meth:`.EnvironmentContext.get_tag_argument` + + :attr:`.Config.cmd_opts` + + """ + +def is_offline_mode() -> bool: + """Return True if the current migrations environment + is running in "offline mode". + + This is ``True`` or ``False`` depending + on the ``--sql`` flag passed. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + +def is_transactional_ddl() -> bool: + """Return True if the context is configured to expect a + transactional DDL capable backend. + + This defaults to the type of database in use, and + can be overridden by the ``transactional_ddl`` argument + to :meth:`.configure` + + This function requires that a :class:`.MigrationContext` + has first been made available via :meth:`.configure`. + + """ + +def run_migrations(**kw: Any) -> None: + """Run migrations as determined by the current command line + configuration + as well as versioning information present (or not) in the current + database connection (if one is present). + + The function accepts optional ``**kw`` arguments. If these are + passed, they are sent directly to the ``upgrade()`` and + ``downgrade()`` + functions within each target revision file. By modifying the + ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` + functions accept arguments, parameters can be passed here so that + contextual information, usually information to identify a particular + database in use, can be passed from a custom ``env.py`` script + to the migration functions. + + This function requires that a :class:`.MigrationContext` has + first been made available via :meth:`.configure`. + + """ + +script: ScriptDirectory + +def static_output(text: str) -> None: + """Emit text directly to the "offline" SQL stream. + + Typically this is for emitting comments that + start with --. The statement is not treated + as a SQL execution, no ; or batch separator + is added, etc. + + """ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f72b3dd8d3748b36cb7acfcda7abf8468b6926 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/__init__.py @@ -0,0 +1,6 @@ +from . import mssql +from . import mysql +from . import oracle +from . import postgresql +from . import sqlite +from .impl import DefaultImpl as DefaultImpl diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/_autogen.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/_autogen.py new file mode 100644 index 0000000000000000000000000000000000000000..74715b18a8bfd8b727ee14e8ed3d290de7169d7b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/_autogen.py @@ -0,0 +1,329 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +from typing import Any +from typing import ClassVar +from typing import Dict +from typing import Generic +from typing import NamedTuple +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy.sql.schema import Constraint +from sqlalchemy.sql.schema import ForeignKeyConstraint +from sqlalchemy.sql.schema import Index +from sqlalchemy.sql.schema import UniqueConstraint +from typing_extensions import TypeGuard + +from .. import util +from ..util import sqla_compat + +if TYPE_CHECKING: + from typing import Literal + + from alembic.autogenerate.api import AutogenContext + from alembic.ddl.impl import DefaultImpl + +CompareConstraintType = Union[Constraint, Index] + +_C = TypeVar("_C", bound=CompareConstraintType) + +_clsreg: Dict[str, Type[_constraint_sig]] = {} + + +class ComparisonResult(NamedTuple): + status: Literal["equal", "different", "skip"] + message: str + + @property + def is_equal(self) -> bool: + return self.status == "equal" + + @property + def is_different(self) -> bool: + return self.status == "different" + + @property + def is_skip(self) -> bool: + return self.status == "skip" + + @classmethod + def Equal(cls) -> ComparisonResult: + """the constraints are equal.""" + return cls("equal", "The two constraints are equal") + + @classmethod + def Different(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult: + """the constraints are different for the provided reason(s).""" + return cls("different", ", ".join(util.to_list(reason))) + + @classmethod + def Skip(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult: + """the constraint cannot be compared for the provided reason(s). + + The message is logged, but the constraints will be otherwise + considered equal, meaning that no migration command will be + generated. + """ + return cls("skip", ", ".join(util.to_list(reason))) + + +class _constraint_sig(Generic[_C]): + const: _C + + _sig: Tuple[Any, ...] + name: Optional[sqla_compat._ConstraintNameDefined] + + impl: DefaultImpl + + _is_index: ClassVar[bool] = False + _is_fk: ClassVar[bool] = False + _is_uq: ClassVar[bool] = False + + _is_metadata: bool + + def __init_subclass__(cls) -> None: + cls._register() + + @classmethod + def _register(cls): + raise NotImplementedError() + + def __init__( + self, is_metadata: bool, impl: DefaultImpl, const: _C + ) -> None: + raise NotImplementedError() + + def compare_to_reflected( + self, other: _constraint_sig[Any] + ) -> ComparisonResult: + assert self.impl is other.impl + assert self._is_metadata + assert not other._is_metadata + + return self._compare_to_reflected(other) + + def _compare_to_reflected( + self, other: _constraint_sig[_C] + ) -> ComparisonResult: + raise NotImplementedError() + + @classmethod + def from_constraint( + cls, is_metadata: bool, impl: DefaultImpl, constraint: _C + ) -> _constraint_sig[_C]: + # these could be cached by constraint/impl, however, if the + # constraint is modified in place, then the sig is wrong. the mysql + # impl currently does this, and if we fixed that we can't be sure + # someone else might do it too, so play it safe. + sig = _clsreg[constraint.__visit_name__](is_metadata, impl, constraint) + return sig + + def md_name_to_sql_name(self, context: AutogenContext) -> Optional[str]: + return sqla_compat._get_constraint_final_name( + self.const, context.dialect + ) + + @util.memoized_property + def is_named(self): + return sqla_compat._constraint_is_named(self.const, self.impl.dialect) + + @util.memoized_property + def unnamed(self) -> Tuple[Any, ...]: + return self._sig + + @util.memoized_property + def unnamed_no_options(self) -> Tuple[Any, ...]: + raise NotImplementedError() + + @util.memoized_property + def _full_sig(self) -> Tuple[Any, ...]: + return (self.name,) + self.unnamed + + def __eq__(self, other) -> bool: + return self._full_sig == other._full_sig + + def __ne__(self, other) -> bool: + return self._full_sig != other._full_sig + + def __hash__(self) -> int: + return hash(self._full_sig) + + +class _uq_constraint_sig(_constraint_sig[UniqueConstraint]): + _is_uq = True + + @classmethod + def _register(cls) -> None: + _clsreg["unique_constraint"] = cls + + is_unique = True + + def __init__( + self, + is_metadata: bool, + impl: DefaultImpl, + const: UniqueConstraint, + ) -> None: + self.impl = impl + self.const = const + self.name = sqla_compat.constraint_name_or_none(const.name) + self._sig = tuple(sorted([col.name for col in const.columns])) + self._is_metadata = is_metadata + + @property + def column_names(self) -> Tuple[str, ...]: + return tuple([col.name for col in self.const.columns]) + + def _compare_to_reflected( + self, other: _constraint_sig[_C] + ) -> ComparisonResult: + assert self._is_metadata + metadata_obj = self + conn_obj = other + + assert is_uq_sig(conn_obj) + return self.impl.compare_unique_constraint( + metadata_obj.const, conn_obj.const + ) + + +class _ix_constraint_sig(_constraint_sig[Index]): + _is_index = True + + name: sqla_compat._ConstraintName + + @classmethod + def _register(cls) -> None: + _clsreg["index"] = cls + + def __init__( + self, is_metadata: bool, impl: DefaultImpl, const: Index + ) -> None: + self.impl = impl + self.const = const + self.name = const.name + self.is_unique = bool(const.unique) + self._is_metadata = is_metadata + + def _compare_to_reflected( + self, other: _constraint_sig[_C] + ) -> ComparisonResult: + assert self._is_metadata + metadata_obj = self + conn_obj = other + + assert is_index_sig(conn_obj) + return self.impl.compare_indexes(metadata_obj.const, conn_obj.const) + + @util.memoized_property + def has_expressions(self): + return sqla_compat.is_expression_index(self.const) + + @util.memoized_property + def column_names(self) -> Tuple[str, ...]: + return tuple([col.name for col in self.const.columns]) + + @util.memoized_property + def column_names_optional(self) -> Tuple[Optional[str], ...]: + return tuple( + [getattr(col, "name", None) for col in self.const.expressions] + ) + + @util.memoized_property + def is_named(self): + return True + + @util.memoized_property + def unnamed(self): + return (self.is_unique,) + self.column_names_optional + + +class _fk_constraint_sig(_constraint_sig[ForeignKeyConstraint]): + _is_fk = True + + @classmethod + def _register(cls) -> None: + _clsreg["foreign_key_constraint"] = cls + + def __init__( + self, + is_metadata: bool, + impl: DefaultImpl, + const: ForeignKeyConstraint, + ) -> None: + self._is_metadata = is_metadata + + self.impl = impl + self.const = const + + self.name = sqla_compat.constraint_name_or_none(const.name) + + ( + self.source_schema, + self.source_table, + self.source_columns, + self.target_schema, + self.target_table, + self.target_columns, + onupdate, + ondelete, + deferrable, + initially, + ) = sqla_compat._fk_spec(const) + + self._sig: Tuple[Any, ...] = ( + self.source_schema, + self.source_table, + tuple(self.source_columns), + self.target_schema, + self.target_table, + tuple(self.target_columns), + ) + ( + ( + (None if onupdate.lower() == "no action" else onupdate.lower()) + if onupdate + else None + ), + ( + (None if ondelete.lower() == "no action" else ondelete.lower()) + if ondelete + else None + ), + # convert initially + deferrable into one three-state value + ( + "initially_deferrable" + if initially and initially.lower() == "deferred" + else "deferrable" if deferrable else "not deferrable" + ), + ) + + @util.memoized_property + def unnamed_no_options(self): + return ( + self.source_schema, + self.source_table, + tuple(self.source_columns), + self.target_schema, + self.target_table, + tuple(self.target_columns), + ) + + +def is_index_sig(sig: _constraint_sig) -> TypeGuard[_ix_constraint_sig]: + return sig._is_index + + +def is_uq_sig(sig: _constraint_sig) -> TypeGuard[_uq_constraint_sig]: + return sig._is_uq + + +def is_fk_sig(sig: _constraint_sig) -> TypeGuard[_fk_constraint_sig]: + return sig._is_fk diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ad2847eb2f76066264f2218ede2e173032082f92 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/base.py @@ -0,0 +1,364 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import functools +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import exc +from sqlalchemy import Integer +from sqlalchemy import types as sqltypes +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.schema import Column +from sqlalchemy.schema import DDLElement +from sqlalchemy.sql.elements import quoted_name + +from ..util.sqla_compat import _columns_for_constraint # noqa +from ..util.sqla_compat import _find_columns # noqa +from ..util.sqla_compat import _fk_spec # noqa +from ..util.sqla_compat import _is_type_bound # noqa +from ..util.sqla_compat import _table_for_constraint # noqa + +if TYPE_CHECKING: + from typing import Any + + from sqlalchemy import Computed + from sqlalchemy import Identity + from sqlalchemy.sql.compiler import Compiled + from sqlalchemy.sql.compiler import DDLCompiler + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.functions import Function + from sqlalchemy.sql.schema import FetchedValue + from sqlalchemy.sql.type_api import TypeEngine + + from .impl import DefaultImpl + +_ServerDefault = Union["TextClause", "FetchedValue", "Function[Any]", str] + + +class AlterTable(DDLElement): + """Represent an ALTER TABLE statement. + + Only the string name and optional schema name of the table + is required, not a full Table object. + + """ + + def __init__( + self, + table_name: str, + schema: Optional[Union[quoted_name, str]] = None, + ) -> None: + self.table_name = table_name + self.schema = schema + + +class RenameTable(AlterTable): + def __init__( + self, + old_table_name: str, + new_table_name: Union[quoted_name, str], + schema: Optional[Union[quoted_name, str]] = None, + ) -> None: + super().__init__(old_table_name, schema=schema) + self.new_table_name = new_table_name + + +class AlterColumn(AlterTable): + def __init__( + self, + name: str, + column_name: str, + schema: Optional[str] = None, + existing_type: Optional[TypeEngine] = None, + existing_nullable: Optional[bool] = None, + existing_server_default: Optional[_ServerDefault] = None, + existing_comment: Optional[str] = None, + ) -> None: + super().__init__(name, schema=schema) + self.column_name = column_name + self.existing_type = ( + sqltypes.to_instance(existing_type) + if existing_type is not None + else None + ) + self.existing_nullable = existing_nullable + self.existing_server_default = existing_server_default + self.existing_comment = existing_comment + + +class ColumnNullable(AlterColumn): + def __init__( + self, name: str, column_name: str, nullable: bool, **kw + ) -> None: + super().__init__(name, column_name, **kw) + self.nullable = nullable + + +class ColumnType(AlterColumn): + def __init__( + self, name: str, column_name: str, type_: TypeEngine, **kw + ) -> None: + super().__init__(name, column_name, **kw) + self.type_ = sqltypes.to_instance(type_) + + +class ColumnName(AlterColumn): + def __init__( + self, name: str, column_name: str, newname: str, **kw + ) -> None: + super().__init__(name, column_name, **kw) + self.newname = newname + + +class ColumnDefault(AlterColumn): + def __init__( + self, + name: str, + column_name: str, + default: Optional[_ServerDefault], + **kw, + ) -> None: + super().__init__(name, column_name, **kw) + self.default = default + + +class ComputedColumnDefault(AlterColumn): + def __init__( + self, name: str, column_name: str, default: Optional[Computed], **kw + ) -> None: + super().__init__(name, column_name, **kw) + self.default = default + + +class IdentityColumnDefault(AlterColumn): + def __init__( + self, + name: str, + column_name: str, + default: Optional[Identity], + impl: DefaultImpl, + **kw, + ) -> None: + super().__init__(name, column_name, **kw) + self.default = default + self.impl = impl + + +class AddColumn(AlterTable): + def __init__( + self, + name: str, + column: Column[Any], + schema: Optional[Union[quoted_name, str]] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + super().__init__(name, schema=schema) + self.column = column + self.if_not_exists = if_not_exists + + +class DropColumn(AlterTable): + def __init__( + self, + name: str, + column: Column[Any], + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + ) -> None: + super().__init__(name, schema=schema) + self.column = column + self.if_exists = if_exists + + +class ColumnComment(AlterColumn): + def __init__( + self, name: str, column_name: str, comment: Optional[str], **kw + ) -> None: + super().__init__(name, column_name, **kw) + self.comment = comment + + +@compiles(RenameTable) +def visit_rename_table( + element: RenameTable, compiler: DDLCompiler, **kw +) -> str: + return "%s RENAME TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_table_name(compiler, element.new_table_name, element.schema), + ) + + +@compiles(AddColumn) +def visit_add_column(element: AddColumn, compiler: DDLCompiler, **kw) -> str: + return "%s %s" % ( + alter_table(compiler, element.table_name, element.schema), + add_column( + compiler, element.column, if_not_exists=element.if_not_exists, **kw + ), + ) + + +@compiles(DropColumn) +def visit_drop_column(element: DropColumn, compiler: DDLCompiler, **kw) -> str: + return "%s %s" % ( + alter_table(compiler, element.table_name, element.schema), + drop_column( + compiler, element.column.name, if_exists=element.if_exists, **kw + ), + ) + + +@compiles(ColumnNullable) +def visit_column_nullable( + element: ColumnNullable, compiler: DDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "DROP NOT NULL" if element.nullable else "SET NOT NULL", + ) + + +@compiles(ColumnType) +def visit_column_type(element: ColumnType, compiler: DDLCompiler, **kw) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "TYPE %s" % format_type(compiler, element.type_), + ) + + +@compiles(ColumnName) +def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str: + return "%s RENAME %s TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + format_column_name(compiler, element.newname), + ) + + +@compiles(ColumnDefault) +def visit_column_default( + element: ColumnDefault, compiler: DDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + ( + "SET DEFAULT %s" % format_server_default(compiler, element.default) + if element.default is not None + else "DROP DEFAULT" + ), + ) + + +@compiles(ComputedColumnDefault) +def visit_computed_column( + element: ComputedColumnDefault, compiler: DDLCompiler, **kw +): + raise exc.CompileError( + 'Adding or removing a "computed" construct, e.g. GENERATED ' + "ALWAYS AS, to or from an existing column is not supported." + ) + + +@compiles(IdentityColumnDefault) +def visit_identity_column( + element: IdentityColumnDefault, compiler: DDLCompiler, **kw +): + raise exc.CompileError( + 'Adding, removing or modifying an "identity" construct, ' + "e.g. GENERATED AS IDENTITY, to or from an existing " + "column is not supported in this dialect." + ) + + +def quote_dotted( + name: Union[quoted_name, str], quote: functools.partial +) -> Union[quoted_name, str]: + """quote the elements of a dotted name""" + + if isinstance(name, quoted_name): + return quote(name) + result = ".".join([quote(x) for x in name.split(".")]) + return result + + +def format_table_name( + compiler: Compiled, + name: Union[quoted_name, str], + schema: Optional[Union[quoted_name, str]], +) -> Union[quoted_name, str]: + quote = functools.partial(compiler.preparer.quote) + if schema: + return quote_dotted(schema, quote) + "." + quote(name) + else: + return quote(name) + + +def format_column_name( + compiler: DDLCompiler, name: Optional[Union[quoted_name, str]] +) -> Union[quoted_name, str]: + return compiler.preparer.quote(name) # type: ignore[arg-type] + + +def format_server_default( + compiler: DDLCompiler, + default: Optional[_ServerDefault], +) -> str: + # this can be updated to use compiler.render_default_string + # for SQLAlchemy 2.0 and above; not in 1.4 + default_str = compiler.get_column_default_string( + Column("x", Integer, server_default=default) + ) + assert default_str is not None + return default_str + + +def format_type(compiler: DDLCompiler, type_: TypeEngine) -> str: + return compiler.dialect.type_compiler.process(type_) + + +def alter_table( + compiler: DDLCompiler, + name: str, + schema: Optional[str], +) -> str: + return "ALTER TABLE %s" % format_table_name(compiler, name, schema) + + +def drop_column( + compiler: DDLCompiler, name: str, if_exists: Optional[bool] = None, **kw +) -> str: + return "DROP COLUMN %s%s" % ( + "IF EXISTS " if if_exists else "", + format_column_name(compiler, name), + ) + + +def alter_column(compiler: DDLCompiler, name: str) -> str: + return "ALTER COLUMN %s" % format_column_name(compiler, name) + + +def add_column( + compiler: DDLCompiler, + column: Column[Any], + if_not_exists: Optional[bool] = None, + **kw, +) -> str: + text = "ADD COLUMN %s%s" % ( + "IF NOT EXISTS " if if_not_exists else "", + compiler.get_column_specification(column, **kw), + ) + + const = " ".join( + compiler.process(constraint) for constraint in column.constraints + ) + if const: + text += " " + const + + return text diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/impl.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/impl.py new file mode 100644 index 0000000000000000000000000000000000000000..d352f12ee7dd90ce5121d81908e0f915b03d2e23 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/impl.py @@ -0,0 +1,902 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import logging +import re +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import List +from typing import Mapping +from typing import NamedTuple +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import cast +from sqlalchemy import Column +from sqlalchemy import MetaData +from sqlalchemy import PrimaryKeyConstraint +from sqlalchemy import schema +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy import text + +from . import _autogen +from . import base +from ._autogen import _constraint_sig as _constraint_sig +from ._autogen import ComparisonResult as ComparisonResult +from .. import util +from ..util import sqla_compat + +if TYPE_CHECKING: + from typing import Literal + from typing import TextIO + + from sqlalchemy.engine import Connection + from sqlalchemy.engine import Dialect + from sqlalchemy.engine.cursor import CursorResult + from sqlalchemy.engine.reflection import Inspector + from sqlalchemy.sql import ClauseElement + from sqlalchemy.sql import Executable + from sqlalchemy.sql.elements import quoted_name + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.schema import ForeignKeyConstraint + from sqlalchemy.sql.schema import Index + from sqlalchemy.sql.schema import UniqueConstraint + from sqlalchemy.sql.selectable import TableClause + from sqlalchemy.sql.type_api import TypeEngine + + from .base import _ServerDefault + from ..autogenerate.api import AutogenContext + from ..operations.batch import ApplyBatchImpl + from ..operations.batch import BatchOperationsImpl + +log = logging.getLogger(__name__) + + +class ImplMeta(type): + def __init__( + cls, + classname: str, + bases: Tuple[Type[DefaultImpl]], + dict_: Dict[str, Any], + ): + newtype = type.__init__(cls, classname, bases, dict_) + if "__dialect__" in dict_: + _impls[dict_["__dialect__"]] = cls # type: ignore[assignment] + return newtype + + +_impls: Dict[str, Type[DefaultImpl]] = {} + + +class DefaultImpl(metaclass=ImplMeta): + """Provide the entrypoint for major migration operations, + including database-specific behavioral variances. + + While individual SQL/DDL constructs already provide + for database-specific implementations, variances here + allow for entirely different sequences of operations + to take place for a particular migration, such as + SQL Server's special 'IDENTITY INSERT' step for + bulk inserts. + + """ + + __dialect__ = "default" + + transactional_ddl = False + command_terminator = ";" + type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},) + type_arg_extract: Sequence[str] = () + # These attributes are deprecated in SQLAlchemy via #10247. They need to + # be ignored to support older version that did not use dialect kwargs. + # They only apply to Oracle and are replaced by oracle_order, + # oracle_on_null + identity_attrs_ignore: Tuple[str, ...] = ("order", "on_null") + + def __init__( + self, + dialect: Dialect, + connection: Optional[Connection], + as_sql: bool, + transactional_ddl: Optional[bool], + output_buffer: Optional[TextIO], + context_opts: Dict[str, Any], + ) -> None: + self.dialect = dialect + self.connection = connection + self.as_sql = as_sql + self.literal_binds = context_opts.get("literal_binds", False) + + self.output_buffer = output_buffer + self.memo: dict = {} + self.context_opts = context_opts + if transactional_ddl is not None: + self.transactional_ddl = transactional_ddl + + if self.literal_binds: + if not self.as_sql: + raise util.CommandError( + "Can't use literal_binds setting without as_sql mode" + ) + + @classmethod + def get_by_dialect(cls, dialect: Dialect) -> Type[DefaultImpl]: + return _impls[dialect.name] + + def static_output(self, text: str) -> None: + assert self.output_buffer is not None + self.output_buffer.write(text + "\n\n") + self.output_buffer.flush() + + def version_table_impl( + self, + *, + version_table: str, + version_table_schema: Optional[str], + version_table_pk: bool, + **kw: Any, + ) -> Table: + """Generate a :class:`.Table` object which will be used as the + structure for the Alembic version table. + + Third party dialects may override this hook to provide an alternate + structure for this :class:`.Table`; requirements are only that it + be named based on the ``version_table`` parameter and contains + at least a single string-holding column named ``version_num``. + + .. versionadded:: 1.14 + + """ + vt = Table( + version_table, + MetaData(), + Column("version_num", String(32), nullable=False), + schema=version_table_schema, + ) + if version_table_pk: + vt.append_constraint( + PrimaryKeyConstraint( + "version_num", name=f"{version_table}_pkc" + ) + ) + + return vt + + def requires_recreate_in_batch( + self, batch_op: BatchOperationsImpl + ) -> bool: + """Return True if the given :class:`.BatchOperationsImpl` + would need the table to be recreated and copied in order to + proceed. + + Normally, only returns True on SQLite when operations other + than add_column are present. + + """ + return False + + def prep_table_for_batch( + self, batch_impl: ApplyBatchImpl, table: Table + ) -> None: + """perform any operations needed on a table before a new + one is created to replace it in batch mode. + + the PG dialect uses this to drop constraints on the table + before the new one uses those same names. + + """ + + @property + def bind(self) -> Optional[Connection]: + return self.connection + + def _exec( + self, + construct: Union[Executable, str], + execution_options: Optional[Mapping[str, Any]] = None, + multiparams: Optional[Sequence[Mapping[str, Any]]] = None, + params: Mapping[str, Any] = util.immutabledict(), + ) -> Optional[CursorResult]: + if isinstance(construct, str): + construct = text(construct) + if self.as_sql: + if multiparams is not None or params: + raise TypeError("SQL parameters not allowed with as_sql") + + compile_kw: dict[str, Any] + if self.literal_binds and not isinstance( + construct, schema.DDLElement + ): + compile_kw = dict(compile_kwargs={"literal_binds": True}) + else: + compile_kw = {} + + if TYPE_CHECKING: + assert isinstance(construct, ClauseElement) + compiled = construct.compile(dialect=self.dialect, **compile_kw) + self.static_output( + str(compiled).replace("\t", " ").strip() + + self.command_terminator + ) + return None + else: + conn = self.connection + assert conn is not None + if execution_options: + conn = conn.execution_options(**execution_options) + + if params and multiparams is not None: + raise TypeError( + "Can't send params and multiparams at the same time" + ) + + if multiparams: + return conn.execute(construct, multiparams) + else: + return conn.execute(construct, params) + + def execute( + self, + sql: Union[Executable, str], + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + self._exec(sql, execution_options) + + def alter_column( + self, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + server_default: Optional[ + Union[_ServerDefault, Literal[False]] + ] = False, + name: Optional[str] = None, + type_: Optional[TypeEngine] = None, + schema: Optional[str] = None, + autoincrement: Optional[bool] = None, + comment: Optional[Union[str, Literal[False]]] = False, + existing_comment: Optional[str] = None, + existing_type: Optional[TypeEngine] = None, + existing_server_default: Optional[_ServerDefault] = None, + existing_nullable: Optional[bool] = None, + existing_autoincrement: Optional[bool] = None, + **kw: Any, + ) -> None: + if autoincrement is not None or existing_autoincrement is not None: + util.warn( + "autoincrement and existing_autoincrement " + "only make sense for MySQL", + stacklevel=3, + ) + if nullable is not None: + self._exec( + base.ColumnNullable( + table_name, + column_name, + nullable, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + ) + ) + if server_default is not False: + kw = {} + cls_: Type[ + Union[ + base.ComputedColumnDefault, + base.IdentityColumnDefault, + base.ColumnDefault, + ] + ] + if sqla_compat._server_default_is_computed( + server_default, existing_server_default + ): + cls_ = base.ComputedColumnDefault + elif sqla_compat._server_default_is_identity( + server_default, existing_server_default + ): + cls_ = base.IdentityColumnDefault + kw["impl"] = self + else: + cls_ = base.ColumnDefault + self._exec( + cls_( + table_name, + column_name, + server_default, # type:ignore[arg-type] + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + **kw, + ) + ) + if type_ is not None: + self._exec( + base.ColumnType( + table_name, + column_name, + type_, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + ) + ) + + if comment is not False: + self._exec( + base.ColumnComment( + table_name, + column_name, + comment, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + ) + ) + + # do the new name last ;) + if name is not None: + self._exec( + base.ColumnName( + table_name, + column_name, + name, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + ) + ) + + def add_column( + self, + table_name: str, + column: Column[Any], + *, + schema: Optional[Union[str, quoted_name]] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + self._exec( + base.AddColumn( + table_name, + column, + schema=schema, + if_not_exists=if_not_exists, + ) + ) + + def drop_column( + self, + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw, + ) -> None: + self._exec( + base.DropColumn( + table_name, column, schema=schema, if_exists=if_exists + ) + ) + + def add_constraint(self, const: Any) -> None: + if const._create_rule is None or const._create_rule(self): + self._exec(schema.AddConstraint(const)) + + def drop_constraint(self, const: Constraint, **kw: Any) -> None: + self._exec(schema.DropConstraint(const, **kw)) + + def rename_table( + self, + old_table_name: str, + new_table_name: Union[str, quoted_name], + schema: Optional[Union[str, quoted_name]] = None, + ) -> None: + self._exec( + base.RenameTable(old_table_name, new_table_name, schema=schema) + ) + + def create_table(self, table: Table, **kw: Any) -> None: + table.dispatch.before_create( + table, self.connection, checkfirst=False, _ddl_runner=self + ) + self._exec(schema.CreateTable(table, **kw)) + table.dispatch.after_create( + table, self.connection, checkfirst=False, _ddl_runner=self + ) + for index in table.indexes: + self._exec(schema.CreateIndex(index)) + + with_comment = ( + self.dialect.supports_comments and not self.dialect.inline_comments + ) + comment = table.comment + if comment and with_comment: + self.create_table_comment(table) + + for column in table.columns: + comment = column.comment + if comment and with_comment: + self.create_column_comment(column) + + def drop_table(self, table: Table, **kw: Any) -> None: + table.dispatch.before_drop( + table, self.connection, checkfirst=False, _ddl_runner=self + ) + self._exec(schema.DropTable(table, **kw)) + table.dispatch.after_drop( + table, self.connection, checkfirst=False, _ddl_runner=self + ) + + def create_index(self, index: Index, **kw: Any) -> None: + self._exec(schema.CreateIndex(index, **kw)) + + def create_table_comment(self, table: Table) -> None: + self._exec(schema.SetTableComment(table)) + + def drop_table_comment(self, table: Table) -> None: + self._exec(schema.DropTableComment(table)) + + def create_column_comment(self, column: Column[Any]) -> None: + self._exec(schema.SetColumnComment(column)) + + def drop_index(self, index: Index, **kw: Any) -> None: + self._exec(schema.DropIndex(index, **kw)) + + def bulk_insert( + self, + table: Union[TableClause, Table], + rows: List[dict], + multiinsert: bool = True, + ) -> None: + if not isinstance(rows, list): + raise TypeError("List expected") + elif rows and not isinstance(rows[0], dict): + raise TypeError("List of dictionaries expected") + if self.as_sql: + for row in rows: + self._exec( + table.insert() + .inline() + .values( + **{ + k: ( + sqla_compat._literal_bindparam( + k, v, type_=table.c[k].type + ) + if not isinstance( + v, sqla_compat._literal_bindparam + ) + else v + ) + for k, v in row.items() + } + ) + ) + else: + if rows: + if multiinsert: + self._exec(table.insert().inline(), multiparams=rows) + else: + for row in rows: + self._exec(table.insert().inline().values(**row)) + + def _tokenize_column_type(self, column: Column) -> Params: + definition: str + definition = self.dialect.type_compiler.process(column.type).lower() + + # tokenize the SQLAlchemy-generated version of a type, so that + # the two can be compared. + # + # examples: + # NUMERIC(10, 5) + # TIMESTAMP WITH TIMEZONE + # INTEGER UNSIGNED + # INTEGER (10) UNSIGNED + # INTEGER(10) UNSIGNED + # varchar character set utf8 + # + + tokens: List[str] = re.findall(r"[\w\-_]+|\(.+?\)", definition) + + term_tokens: List[str] = [] + paren_term = None + + for token in tokens: + if re.match(r"^\(.*\)$", token): + paren_term = token + else: + term_tokens.append(token) + + params = Params(term_tokens[0], term_tokens[1:], [], {}) + + if paren_term: + term: str + for term in re.findall("[^(),]+", paren_term): + if "=" in term: + key, val = term.split("=") + params.kwargs[key.strip()] = val.strip() + else: + params.args.append(term.strip()) + + return params + + def _column_types_match( + self, inspector_params: Params, metadata_params: Params + ) -> bool: + if inspector_params.token0 == metadata_params.token0: + return True + + synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms] + inspector_all_terms = " ".join( + [inspector_params.token0] + inspector_params.tokens + ) + metadata_all_terms = " ".join( + [metadata_params.token0] + metadata_params.tokens + ) + + for batch in synonyms: + if {inspector_all_terms, metadata_all_terms}.issubset(batch) or { + inspector_params.token0, + metadata_params.token0, + }.issubset(batch): + return True + return False + + def _column_args_match( + self, inspected_params: Params, meta_params: Params + ) -> bool: + """We want to compare column parameters. However, we only want + to compare parameters that are set. If they both have `collation`, + we want to make sure they are the same. However, if only one + specifies it, dont flag it for being less specific + """ + + if ( + len(meta_params.tokens) == len(inspected_params.tokens) + and meta_params.tokens != inspected_params.tokens + ): + return False + + if ( + len(meta_params.args) == len(inspected_params.args) + and meta_params.args != inspected_params.args + ): + return False + + insp = " ".join(inspected_params.tokens).lower() + meta = " ".join(meta_params.tokens).lower() + + for reg in self.type_arg_extract: + mi = re.search(reg, insp) + mm = re.search(reg, meta) + + if mi and mm and mi.group(1) != mm.group(1): + return False + + return True + + def compare_type( + self, inspector_column: Column[Any], metadata_column: Column + ) -> bool: + """Returns True if there ARE differences between the types of the two + columns. Takes impl.type_synonyms into account between retrospected + and metadata types + """ + inspector_params = self._tokenize_column_type(inspector_column) + metadata_params = self._tokenize_column_type(metadata_column) + + if not self._column_types_match(inspector_params, metadata_params): + return True + if not self._column_args_match(inspector_params, metadata_params): + return True + return False + + def compare_server_default( + self, + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_inspector_default, + ): + return rendered_inspector_default != rendered_metadata_default + + def correct_for_autogen_constraints( + self, + conn_uniques: Set[UniqueConstraint], + conn_indexes: Set[Index], + metadata_unique_constraints: Set[UniqueConstraint], + metadata_indexes: Set[Index], + ) -> None: + pass + + def cast_for_batch_migrate(self, existing, existing_transfer, new_type): + if existing.type._type_affinity is not new_type._type_affinity: + existing_transfer["expr"] = cast( + existing_transfer["expr"], new_type + ) + + def render_ddl_sql_expr( + self, expr: ClauseElement, is_server_default: bool = False, **kw: Any + ) -> str: + """Render a SQL expression that is typically a server default, + index expression, etc. + + """ + + compile_kw = {"literal_binds": True, "include_table": False} + + return str( + expr.compile(dialect=self.dialect, compile_kwargs=compile_kw) + ) + + def _compat_autogen_column_reflect(self, inspector: Inspector) -> Callable: + return self.autogen_column_reflect + + def correct_for_autogen_foreignkeys( + self, + conn_fks: Set[ForeignKeyConstraint], + metadata_fks: Set[ForeignKeyConstraint], + ) -> None: + pass + + def autogen_column_reflect(self, inspector, table, column_info): + """A hook that is attached to the 'column_reflect' event for when + a Table is reflected from the database during the autogenerate + process. + + Dialects can elect to modify the information gathered here. + + """ + + def start_migrations(self) -> None: + """A hook called when :meth:`.EnvironmentContext.run_migrations` + is called. + + Implementations can set up per-migration-run state here. + + """ + + def emit_begin(self) -> None: + """Emit the string ``BEGIN``, or the backend-specific + equivalent, on the current connection context. + + This is used in offline mode and typically + via :meth:`.EnvironmentContext.begin_transaction`. + + """ + self.static_output("BEGIN" + self.command_terminator) + + def emit_commit(self) -> None: + """Emit the string ``COMMIT``, or the backend-specific + equivalent, on the current connection context. + + This is used in offline mode and typically + via :meth:`.EnvironmentContext.begin_transaction`. + + """ + self.static_output("COMMIT" + self.command_terminator) + + def render_type( + self, type_obj: TypeEngine, autogen_context: AutogenContext + ) -> Union[str, Literal[False]]: + return False + + def _compare_identity_default(self, metadata_identity, inspector_identity): + # ignored contains the attributes that were not considered + # because assumed to their default values in the db. + diff, ignored = _compare_identity_options( + metadata_identity, + inspector_identity, + schema.Identity(), + skip={"always"}, + ) + + meta_always = getattr(metadata_identity, "always", None) + inspector_always = getattr(inspector_identity, "always", None) + # None and False are the same in this comparison + if bool(meta_always) != bool(inspector_always): + diff.add("always") + + diff.difference_update(self.identity_attrs_ignore) + + # returns 3 values: + return ( + # different identity attributes + diff, + # ignored identity attributes + ignored, + # if the two identity should be considered different + bool(diff) or bool(metadata_identity) != bool(inspector_identity), + ) + + def _compare_index_unique( + self, metadata_index: Index, reflected_index: Index + ) -> Optional[str]: + conn_unique = bool(reflected_index.unique) + meta_unique = bool(metadata_index.unique) + if conn_unique != meta_unique: + return f"unique={conn_unique} to unique={meta_unique}" + else: + return None + + def _create_metadata_constraint_sig( + self, constraint: _autogen._C, **opts: Any + ) -> _constraint_sig[_autogen._C]: + return _constraint_sig.from_constraint(True, self, constraint, **opts) + + def _create_reflected_constraint_sig( + self, constraint: _autogen._C, **opts: Any + ) -> _constraint_sig[_autogen._C]: + return _constraint_sig.from_constraint(False, self, constraint, **opts) + + def compare_indexes( + self, + metadata_index: Index, + reflected_index: Index, + ) -> ComparisonResult: + """Compare two indexes by comparing the signature generated by + ``create_index_sig``. + + This method returns a ``ComparisonResult``. + """ + msg: List[str] = [] + unique_msg = self._compare_index_unique( + metadata_index, reflected_index + ) + if unique_msg: + msg.append(unique_msg) + m_sig = self._create_metadata_constraint_sig(metadata_index) + r_sig = self._create_reflected_constraint_sig(reflected_index) + + assert _autogen.is_index_sig(m_sig) + assert _autogen.is_index_sig(r_sig) + + # The assumption is that the index have no expression + for sig in m_sig, r_sig: + if sig.has_expressions: + log.warning( + "Generating approximate signature for index %s. " + "The dialect " + "implementation should either skip expression indexes " + "or provide a custom implementation.", + sig.const, + ) + + if m_sig.column_names != r_sig.column_names: + msg.append( + f"expression {r_sig.column_names} to {m_sig.column_names}" + ) + + if msg: + return ComparisonResult.Different(msg) + else: + return ComparisonResult.Equal() + + def compare_unique_constraint( + self, + metadata_constraint: UniqueConstraint, + reflected_constraint: UniqueConstraint, + ) -> ComparisonResult: + """Compare two unique constraints by comparing the two signatures. + + The arguments are two tuples that contain the unique constraint and + the signatures generated by ``create_unique_constraint_sig``. + + This method returns a ``ComparisonResult``. + """ + metadata_tup = self._create_metadata_constraint_sig( + metadata_constraint + ) + reflected_tup = self._create_reflected_constraint_sig( + reflected_constraint + ) + + meta_sig = metadata_tup.unnamed + conn_sig = reflected_tup.unnamed + if conn_sig != meta_sig: + return ComparisonResult.Different( + f"expression {conn_sig} to {meta_sig}" + ) + else: + return ComparisonResult.Equal() + + def _skip_functional_indexes(self, metadata_indexes, conn_indexes): + conn_indexes_by_name = {c.name: c for c in conn_indexes} + + for idx in list(metadata_indexes): + if idx.name in conn_indexes_by_name: + continue + iex = sqla_compat.is_expression_index(idx) + if iex: + util.warn( + "autogenerate skipping metadata-specified " + "expression-based index " + f"{idx.name!r}; dialect {self.__dialect__!r} under " + f"SQLAlchemy {sqla_compat.sqlalchemy_version} can't " + "reflect these indexes so they can't be compared" + ) + metadata_indexes.discard(idx) + + def adjust_reflected_dialect_options( + self, reflected_object: Dict[str, Any], kind: str + ) -> Dict[str, Any]: + return reflected_object.get("dialect_options", {}) + + +class Params(NamedTuple): + token0: str + tokens: List[str] + args: List[str] + kwargs: Dict[str, str] + + +def _compare_identity_options( + metadata_io: Union[schema.Identity, schema.Sequence, None], + inspector_io: Union[schema.Identity, schema.Sequence, None], + default_io: Union[schema.Identity, schema.Sequence], + skip: Set[str], +): + # this can be used for identity or sequence compare. + # default_io is an instance of IdentityOption with all attributes to the + # default value. + meta_d = sqla_compat._get_identity_options_dict(metadata_io) + insp_d = sqla_compat._get_identity_options_dict(inspector_io) + + diff = set() + ignored_attr = set() + + def check_dicts( + meta_dict: Mapping[str, Any], + insp_dict: Mapping[str, Any], + default_dict: Mapping[str, Any], + attrs: Iterable[str], + ): + for attr in set(attrs).difference(skip): + meta_value = meta_dict.get(attr) + insp_value = insp_dict.get(attr) + if insp_value != meta_value: + default_value = default_dict.get(attr) + if meta_value == default_value: + ignored_attr.add(attr) + else: + diff.add(attr) + + check_dicts( + meta_d, + insp_d, + sqla_compat._get_identity_options_dict(default_io), + set(meta_d).union(insp_d), + ) + if sqla_compat.identity_has_dialect_kwargs: + assert hasattr(default_io, "dialect_kwargs") + # use only the dialect kwargs in inspector_io since metadata_io + # can have options for many backends + check_dicts( + getattr(metadata_io, "dialect_kwargs", {}), + getattr(inspector_io, "dialect_kwargs", {}), + default_io.dialect_kwargs, + getattr(inspector_io, "dialect_kwargs", {}), + ) + + return diff, ignored_attr diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mssql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mssql.py new file mode 100644 index 0000000000000000000000000000000000000000..5376da5adece8799051c35126bbddcf938acef0f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mssql.py @@ -0,0 +1,421 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import re +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import types as sqltypes +from sqlalchemy.schema import Column +from sqlalchemy.schema import CreateIndex +from sqlalchemy.sql.base import Executable +from sqlalchemy.sql.elements import ClauseElement + +from .base import AddColumn +from .base import alter_column +from .base import alter_table +from .base import ColumnDefault +from .base import ColumnName +from .base import ColumnNullable +from .base import ColumnType +from .base import format_column_name +from .base import format_server_default +from .base import format_table_name +from .base import format_type +from .base import RenameTable +from .impl import DefaultImpl +from .. import util +from ..util import sqla_compat +from ..util.sqla_compat import compiles + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy.dialects.mssql.base import MSDDLCompiler + from sqlalchemy.dialects.mssql.base import MSSQLCompiler + from sqlalchemy.engine.cursor import CursorResult + from sqlalchemy.sql.schema import Index + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.selectable import TableClause + from sqlalchemy.sql.type_api import TypeEngine + + from .base import _ServerDefault + + +class MSSQLImpl(DefaultImpl): + __dialect__ = "mssql" + transactional_ddl = True + batch_separator = "GO" + + type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},) + identity_attrs_ignore = DefaultImpl.identity_attrs_ignore + ( + "minvalue", + "maxvalue", + "nominvalue", + "nomaxvalue", + "cycle", + "cache", + ) + + def __init__(self, *arg, **kw) -> None: + super().__init__(*arg, **kw) + self.batch_separator = self.context_opts.get( + "mssql_batch_separator", self.batch_separator + ) + + def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]: + result = super()._exec(construct, *args, **kw) + if self.as_sql and self.batch_separator: + self.static_output(self.batch_separator) + return result + + def emit_begin(self) -> None: + self.static_output("BEGIN TRANSACTION" + self.command_terminator) + + def emit_commit(self) -> None: + super().emit_commit() + if self.as_sql and self.batch_separator: + self.static_output(self.batch_separator) + + def alter_column( + self, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + server_default: Optional[ + Union[_ServerDefault, Literal[False]] + ] = False, + name: Optional[str] = None, + type_: Optional[TypeEngine] = None, + schema: Optional[str] = None, + existing_type: Optional[TypeEngine] = None, + existing_server_default: Optional[_ServerDefault] = None, + existing_nullable: Optional[bool] = None, + **kw: Any, + ) -> None: + if nullable is not None: + if type_ is not None: + # the NULL/NOT NULL alter will handle + # the type alteration + existing_type = type_ + type_ = None + elif existing_type is None: + raise util.CommandError( + "MS-SQL ALTER COLUMN operations " + "with NULL or NOT NULL require the " + "existing_type or a new type_ be passed." + ) + elif existing_nullable is not None and type_ is not None: + nullable = existing_nullable + + # the NULL/NOT NULL alter will handle + # the type alteration + existing_type = type_ + type_ = None + + elif type_ is not None: + util.warn( + "MS-SQL ALTER COLUMN operations that specify type_= " + "should also specify a nullable= or " + "existing_nullable= argument to avoid implicit conversion " + "of NOT NULL columns to NULL." + ) + + used_default = False + if sqla_compat._server_default_is_identity( + server_default, existing_server_default + ) or sqla_compat._server_default_is_computed( + server_default, existing_server_default + ): + used_default = True + kw["server_default"] = server_default + kw["existing_server_default"] = existing_server_default + + super().alter_column( + table_name, + column_name, + nullable=nullable, + type_=type_, + schema=schema, + existing_type=existing_type, + existing_nullable=existing_nullable, + **kw, + ) + + if server_default is not False and used_default is False: + if existing_server_default is not False or server_default is None: + self._exec( + _ExecDropConstraint( + table_name, + column_name, + "sys.default_constraints", + schema, + ) + ) + if server_default is not None: + super().alter_column( + table_name, + column_name, + schema=schema, + server_default=server_default, + ) + + if name is not None: + super().alter_column( + table_name, column_name, schema=schema, name=name + ) + + def create_index(self, index: Index, **kw: Any) -> None: + # this likely defaults to None if not present, so get() + # should normally not return the default value. being + # defensive in any case + mssql_include = index.kwargs.get("mssql_include", None) or () + assert index.table is not None + for col in mssql_include: + if col not in index.table.c: + index.table.append_column(Column(col, sqltypes.NullType)) + self._exec(CreateIndex(index, **kw)) + + def bulk_insert( # type:ignore[override] + self, table: Union[TableClause, Table], rows: List[dict], **kw: Any + ) -> None: + if self.as_sql: + self._exec( + "SET IDENTITY_INSERT %s ON" + % self.dialect.identifier_preparer.format_table(table) + ) + super().bulk_insert(table, rows, **kw) + self._exec( + "SET IDENTITY_INSERT %s OFF" + % self.dialect.identifier_preparer.format_table(table) + ) + else: + super().bulk_insert(table, rows, **kw) + + def drop_column( + self, + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + **kw, + ) -> None: + drop_default = kw.pop("mssql_drop_default", False) + if drop_default: + self._exec( + _ExecDropConstraint( + table_name, column, "sys.default_constraints", schema + ) + ) + drop_check = kw.pop("mssql_drop_check", False) + if drop_check: + self._exec( + _ExecDropConstraint( + table_name, column, "sys.check_constraints", schema + ) + ) + drop_fks = kw.pop("mssql_drop_foreign_key", False) + if drop_fks: + self._exec(_ExecDropFKConstraint(table_name, column, schema)) + super().drop_column(table_name, column, schema=schema, **kw) + + def compare_server_default( + self, + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_inspector_default, + ): + if rendered_metadata_default is not None: + rendered_metadata_default = re.sub( + r"[\(\) \"\']", "", rendered_metadata_default + ) + + if rendered_inspector_default is not None: + # SQL Server collapses whitespace and adds arbitrary parenthesis + # within expressions. our only option is collapse all of it + + rendered_inspector_default = re.sub( + r"[\(\) \"\']", "", rendered_inspector_default + ) + + return rendered_inspector_default != rendered_metadata_default + + def _compare_identity_default(self, metadata_identity, inspector_identity): + diff, ignored, is_alter = super()._compare_identity_default( + metadata_identity, inspector_identity + ) + + if ( + metadata_identity is None + and inspector_identity is not None + and not diff + and inspector_identity.column is not None + and inspector_identity.column.primary_key + ): + # mssql reflect primary keys with autoincrement as identity + # columns. if no different attributes are present ignore them + is_alter = False + + return diff, ignored, is_alter + + def adjust_reflected_dialect_options( + self, reflected_object: Dict[str, Any], kind: str + ) -> Dict[str, Any]: + options: Dict[str, Any] + options = reflected_object.get("dialect_options", {}).copy() + if not options.get("mssql_include"): + options.pop("mssql_include", None) + if not options.get("mssql_clustered"): + options.pop("mssql_clustered", None) + return options + + +class _ExecDropConstraint(Executable, ClauseElement): + inherit_cache = False + + def __init__( + self, + tname: str, + colname: Union[Column[Any], str], + type_: str, + schema: Optional[str], + ) -> None: + self.tname = tname + self.colname = colname + self.type_ = type_ + self.schema = schema + + +class _ExecDropFKConstraint(Executable, ClauseElement): + inherit_cache = False + + def __init__( + self, tname: str, colname: Column[Any], schema: Optional[str] + ) -> None: + self.tname = tname + self.colname = colname + self.schema = schema + + +@compiles(_ExecDropConstraint, "mssql") +def _exec_drop_col_constraint( + element: _ExecDropConstraint, compiler: MSSQLCompiler, **kw +) -> str: + schema, tname, colname, type_ = ( + element.schema, + element.tname, + element.colname, + element.type_, + ) + # from http://www.mssqltips.com/sqlservertip/1425/\ + # working-with-default-constraints-in-sql-server/ + return """declare @const_name varchar(256) +select @const_name = QUOTENAME([name]) from %(type)s +where parent_object_id = object_id('%(schema_dot)s%(tname)s') +and col_name(parent_object_id, parent_column_id) = '%(colname)s' +exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { + "type": type_, + "tname": tname, + "colname": colname, + "tname_quoted": format_table_name(compiler, tname, schema), + "schema_dot": schema + "." if schema else "", + } + + +@compiles(_ExecDropFKConstraint, "mssql") +def _exec_drop_col_fk_constraint( + element: _ExecDropFKConstraint, compiler: MSSQLCompiler, **kw +) -> str: + schema, tname, colname = element.schema, element.tname, element.colname + + return """declare @const_name varchar(256) +select @const_name = QUOTENAME([name]) from +sys.foreign_keys fk join sys.foreign_key_columns fkc +on fk.object_id=fkc.constraint_object_id +where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s') +and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s' +exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { + "tname": tname, + "colname": colname, + "tname_quoted": format_table_name(compiler, tname, schema), + "schema_dot": schema + "." if schema else "", + } + + +@compiles(AddColumn, "mssql") +def visit_add_column(element: AddColumn, compiler: MSDDLCompiler, **kw) -> str: + return "%s %s" % ( + alter_table(compiler, element.table_name, element.schema), + mssql_add_column(compiler, element.column, **kw), + ) + + +def mssql_add_column( + compiler: MSDDLCompiler, column: Column[Any], **kw +) -> str: + return "ADD %s" % compiler.get_column_specification(column, **kw) + + +@compiles(ColumnNullable, "mssql") +def visit_column_nullable( + element: ColumnNullable, compiler: MSDDLCompiler, **kw +) -> str: + return "%s %s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + format_type(compiler, element.existing_type), # type: ignore[arg-type] + "NULL" if element.nullable else "NOT NULL", + ) + + +@compiles(ColumnDefault, "mssql") +def visit_column_default( + element: ColumnDefault, compiler: MSDDLCompiler, **kw +) -> str: + # TODO: there can also be a named constraint + # with ADD CONSTRAINT here + return "%s ADD DEFAULT %s FOR %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_server_default(compiler, element.default), + format_column_name(compiler, element.column_name), + ) + + +@compiles(ColumnName, "mssql") +def visit_rename_column( + element: ColumnName, compiler: MSDDLCompiler, **kw +) -> str: + return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % ( + format_table_name(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + format_column_name(compiler, element.newname), + ) + + +@compiles(ColumnType, "mssql") +def visit_column_type( + element: ColumnType, compiler: MSDDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + format_type(compiler, element.type_), + ) + + +@compiles(RenameTable, "mssql") +def visit_rename_table( + element: RenameTable, compiler: MSDDLCompiler, **kw +) -> str: + return "EXEC sp_rename '%s', %s" % ( + format_table_name(compiler, element.table_name, element.schema), + format_table_name(compiler, element.new_table_name, None), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mysql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mysql.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8c0628e557ef244b06c5350ba23083c4af185d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/mysql.py @@ -0,0 +1,495 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import re +from typing import Any +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import schema +from sqlalchemy import types as sqltypes + +from .base import alter_table +from .base import AlterColumn +from .base import ColumnDefault +from .base import ColumnName +from .base import ColumnNullable +from .base import ColumnType +from .base import format_column_name +from .base import format_server_default +from .impl import DefaultImpl +from .. import util +from ..util import sqla_compat +from ..util.sqla_compat import _is_type_bound +from ..util.sqla_compat import compiles + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler + from sqlalchemy.sql.ddl import DropConstraint + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.type_api import TypeEngine + + from .base import _ServerDefault + + +class MySQLImpl(DefaultImpl): + __dialect__ = "mysql" + + transactional_ddl = False + type_synonyms = DefaultImpl.type_synonyms + ( + {"BOOL", "TINYINT"}, + {"JSON", "LONGTEXT"}, + ) + type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"] + + def alter_column( + self, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + server_default: Optional[ + Union[_ServerDefault, Literal[False]] + ] = False, + name: Optional[str] = None, + type_: Optional[TypeEngine] = None, + schema: Optional[str] = None, + existing_type: Optional[TypeEngine] = None, + existing_server_default: Optional[_ServerDefault] = None, + existing_nullable: Optional[bool] = None, + autoincrement: Optional[bool] = None, + existing_autoincrement: Optional[bool] = None, + comment: Optional[Union[str, Literal[False]]] = False, + existing_comment: Optional[str] = None, + **kw: Any, + ) -> None: + if sqla_compat._server_default_is_identity( + server_default, existing_server_default + ) or sqla_compat._server_default_is_computed( + server_default, existing_server_default + ): + # modifying computed or identity columns is not supported + # the default will raise + super().alter_column( + table_name, + column_name, + nullable=nullable, + type_=type_, + schema=schema, + existing_type=existing_type, + existing_nullable=existing_nullable, + server_default=server_default, + existing_server_default=existing_server_default, + **kw, + ) + if name is not None or self._is_mysql_allowed_functional_default( + type_ if type_ is not None else existing_type, server_default + ): + self._exec( + MySQLChangeColumn( + table_name, + column_name, + schema=schema, + newname=name if name is not None else column_name, + nullable=( + nullable + if nullable is not None + else ( + existing_nullable + if existing_nullable is not None + else True + ) + ), + type_=type_ if type_ is not None else existing_type, + default=( + server_default + if server_default is not False + else existing_server_default + ), + autoincrement=( + autoincrement + if autoincrement is not None + else existing_autoincrement + ), + comment=( + comment if comment is not False else existing_comment + ), + ) + ) + elif ( + nullable is not None + or type_ is not None + or autoincrement is not None + or comment is not False + ): + self._exec( + MySQLModifyColumn( + table_name, + column_name, + schema=schema, + newname=name if name is not None else column_name, + nullable=( + nullable + if nullable is not None + else ( + existing_nullable + if existing_nullable is not None + else True + ) + ), + type_=type_ if type_ is not None else existing_type, + default=( + server_default + if server_default is not False + else existing_server_default + ), + autoincrement=( + autoincrement + if autoincrement is not None + else existing_autoincrement + ), + comment=( + comment if comment is not False else existing_comment + ), + ) + ) + elif server_default is not False: + self._exec( + MySQLAlterDefault( + table_name, column_name, server_default, schema=schema + ) + ) + + def drop_constraint( + self, + const: Constraint, + **kw: Any, + ) -> None: + if isinstance(const, schema.CheckConstraint) and _is_type_bound(const): + return + + super().drop_constraint(const) + + def _is_mysql_allowed_functional_default( + self, + type_: Optional[TypeEngine], + server_default: Optional[Union[_ServerDefault, Literal[False]]], + ) -> bool: + return ( + type_ is not None + and type_._type_affinity is sqltypes.DateTime + and server_default is not None + ) + + def compare_server_default( + self, + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_inspector_default, + ): + # partially a workaround for SQLAlchemy issue #3023; if the + # column were created without "NOT NULL", MySQL may have added + # an implicit default of '0' which we need to skip + # TODO: this is not really covered anymore ? + if ( + metadata_column.type._type_affinity is sqltypes.Integer + and inspector_column.primary_key + and not inspector_column.autoincrement + and not rendered_metadata_default + and rendered_inspector_default == "'0'" + ): + return False + elif ( + rendered_inspector_default + and inspector_column.type._type_affinity is sqltypes.Integer + ): + rendered_inspector_default = ( + re.sub(r"^'|'$", "", rendered_inspector_default) + if rendered_inspector_default is not None + else None + ) + return rendered_inspector_default != rendered_metadata_default + elif ( + rendered_metadata_default + and metadata_column.type._type_affinity is sqltypes.String + ): + metadata_default = re.sub(r"^'|'$", "", rendered_metadata_default) + return rendered_inspector_default != f"'{metadata_default}'" + elif rendered_inspector_default and rendered_metadata_default: + # adjust for "function()" vs. "FUNCTION" as can occur particularly + # for the CURRENT_TIMESTAMP function on newer MariaDB versions + + # SQLAlchemy MySQL dialect bundles ON UPDATE into the server + # default; adjust for this possibly being present. + onupdate_ins = re.match( + r"(.*) (on update.*?)(?:\(\))?$", + rendered_inspector_default.lower(), + ) + onupdate_met = re.match( + r"(.*) (on update.*?)(?:\(\))?$", + rendered_metadata_default.lower(), + ) + + if onupdate_ins: + if not onupdate_met: + return True + elif onupdate_ins.group(2) != onupdate_met.group(2): + return True + + rendered_inspector_default = onupdate_ins.group(1) + rendered_metadata_default = onupdate_met.group(1) + + return re.sub( + r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower() + ) != re.sub( + r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower() + ) + else: + return rendered_inspector_default != rendered_metadata_default + + def correct_for_autogen_constraints( + self, + conn_unique_constraints, + conn_indexes, + metadata_unique_constraints, + metadata_indexes, + ): + # TODO: if SQLA 1.0, make use of "duplicates_index" + # metadata + removed = set() + for idx in list(conn_indexes): + if idx.unique: + continue + # MySQL puts implicit indexes on FK columns, even if + # composite and even if MyISAM, so can't check this too easily. + # the name of the index may be the column name or it may + # be the name of the FK constraint. + for col in idx.columns: + if idx.name == col.name: + conn_indexes.remove(idx) + removed.add(idx.name) + break + for fk in col.foreign_keys: + if fk.name == idx.name: + conn_indexes.remove(idx) + removed.add(idx.name) + break + if idx.name in removed: + break + + # then remove indexes from the "metadata_indexes" + # that we've removed from reflected, otherwise they come out + # as adds (see #202) + for idx in list(metadata_indexes): + if idx.name in removed: + metadata_indexes.remove(idx) + + def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): + conn_fk_by_sig = { + self._create_reflected_constraint_sig(fk).unnamed_no_options: fk + for fk in conn_fks + } + metadata_fk_by_sig = { + self._create_metadata_constraint_sig(fk).unnamed_no_options: fk + for fk in metadata_fks + } + + for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig): + mdfk = metadata_fk_by_sig[sig] + cnfk = conn_fk_by_sig[sig] + # MySQL considers RESTRICT to be the default and doesn't + # report on it. if the model has explicit RESTRICT and + # the conn FK has None, set it to RESTRICT + if ( + mdfk.ondelete is not None + and mdfk.ondelete.lower() == "restrict" + and cnfk.ondelete is None + ): + cnfk.ondelete = "RESTRICT" + if ( + mdfk.onupdate is not None + and mdfk.onupdate.lower() == "restrict" + and cnfk.onupdate is None + ): + cnfk.onupdate = "RESTRICT" + + +class MariaDBImpl(MySQLImpl): + __dialect__ = "mariadb" + + +class MySQLAlterDefault(AlterColumn): + def __init__( + self, + name: str, + column_name: str, + default: Optional[_ServerDefault], + schema: Optional[str] = None, + ) -> None: + super(AlterColumn, self).__init__(name, schema=schema) + self.column_name = column_name + self.default = default + + +class MySQLChangeColumn(AlterColumn): + def __init__( + self, + name: str, + column_name: str, + schema: Optional[str] = None, + newname: Optional[str] = None, + type_: Optional[TypeEngine] = None, + nullable: Optional[bool] = None, + default: Optional[Union[_ServerDefault, Literal[False]]] = False, + autoincrement: Optional[bool] = None, + comment: Optional[Union[str, Literal[False]]] = False, + ) -> None: + super(AlterColumn, self).__init__(name, schema=schema) + self.column_name = column_name + self.nullable = nullable + self.newname = newname + self.default = default + self.autoincrement = autoincrement + self.comment = comment + if type_ is None: + raise util.CommandError( + "All MySQL CHANGE/MODIFY COLUMN operations " + "require the existing type." + ) + + self.type_ = sqltypes.to_instance(type_) + + +class MySQLModifyColumn(MySQLChangeColumn): + pass + + +@compiles(ColumnNullable, "mysql", "mariadb") +@compiles(ColumnName, "mysql", "mariadb") +@compiles(ColumnDefault, "mysql", "mariadb") +@compiles(ColumnType, "mysql", "mariadb") +def _mysql_doesnt_support_individual(element, compiler, **kw): + raise NotImplementedError( + "Individual alter column constructs not supported by MySQL" + ) + + +@compiles(MySQLAlterDefault, "mysql", "mariadb") +def _mysql_alter_default( + element: MySQLAlterDefault, compiler: MySQLDDLCompiler, **kw +) -> str: + return "%s ALTER COLUMN %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + ( + "SET DEFAULT %s" % format_server_default(compiler, element.default) + if element.default is not None + else "DROP DEFAULT" + ), + ) + + +@compiles(MySQLModifyColumn, "mysql", "mariadb") +def _mysql_modify_column( + element: MySQLModifyColumn, compiler: MySQLDDLCompiler, **kw +) -> str: + return "%s MODIFY %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + _mysql_colspec( + compiler, + nullable=element.nullable, + server_default=element.default, + type_=element.type_, + autoincrement=element.autoincrement, + comment=element.comment, + ), + ) + + +@compiles(MySQLChangeColumn, "mysql", "mariadb") +def _mysql_change_column( + element: MySQLChangeColumn, compiler: MySQLDDLCompiler, **kw +) -> str: + return "%s CHANGE %s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + format_column_name(compiler, element.newname), + _mysql_colspec( + compiler, + nullable=element.nullable, + server_default=element.default, + type_=element.type_, + autoincrement=element.autoincrement, + comment=element.comment, + ), + ) + + +def _mysql_colspec( + compiler: MySQLDDLCompiler, + nullable: Optional[bool], + server_default: Optional[Union[_ServerDefault, Literal[False]]], + type_: TypeEngine, + autoincrement: Optional[bool], + comment: Optional[Union[str, Literal[False]]], +) -> str: + spec = "%s %s" % ( + compiler.dialect.type_compiler.process(type_), + "NULL" if nullable else "NOT NULL", + ) + if autoincrement: + spec += " AUTO_INCREMENT" + if server_default is not False and server_default is not None: + spec += " DEFAULT %s" % format_server_default(compiler, server_default) + if comment: + spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value( + comment, sqltypes.String() + ) + + return spec + + +@compiles(schema.DropConstraint, "mysql", "mariadb") +def _mysql_drop_constraint( + element: DropConstraint, compiler: MySQLDDLCompiler, **kw +) -> str: + """Redefine SQLAlchemy's drop constraint to + raise errors for invalid constraint type.""" + + constraint = element.element + if isinstance( + constraint, + ( + schema.ForeignKeyConstraint, + schema.PrimaryKeyConstraint, + schema.UniqueConstraint, + ), + ): + assert not kw + return compiler.visit_drop_constraint(element) + elif isinstance(constraint, schema.CheckConstraint): + # note that SQLAlchemy as of 1.2 does not yet support + # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully + # here. + if compiler.dialect.is_mariadb: # type: ignore[attr-defined] + return "ALTER TABLE %s DROP CONSTRAINT %s" % ( + compiler.preparer.format_table(constraint.table), + compiler.preparer.format_constraint(constraint), + ) + else: + return "ALTER TABLE %s DROP CHECK %s" % ( + compiler.preparer.format_table(constraint.table), + compiler.preparer.format_constraint(constraint), + ) + else: + raise NotImplementedError( + "No generic 'DROP CONSTRAINT' in MySQL - " + "please specify constraint type" + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/oracle.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/oracle.py new file mode 100644 index 0000000000000000000000000000000000000000..eac99124f42290163b402765b0a94e7d4f75f820 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/oracle.py @@ -0,0 +1,202 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import re +from typing import Any +from typing import Optional +from typing import TYPE_CHECKING + +from sqlalchemy.sql import sqltypes + +from .base import AddColumn +from .base import alter_table +from .base import ColumnComment +from .base import ColumnDefault +from .base import ColumnName +from .base import ColumnNullable +from .base import ColumnType +from .base import format_column_name +from .base import format_server_default +from .base import format_table_name +from .base import format_type +from .base import IdentityColumnDefault +from .base import RenameTable +from .impl import DefaultImpl +from ..util.sqla_compat import compiles + +if TYPE_CHECKING: + from sqlalchemy.dialects.oracle.base import OracleDDLCompiler + from sqlalchemy.engine.cursor import CursorResult + from sqlalchemy.sql.schema import Column + + +class OracleImpl(DefaultImpl): + __dialect__ = "oracle" + transactional_ddl = False + batch_separator = "/" + command_terminator = "" + type_synonyms = DefaultImpl.type_synonyms + ( + {"VARCHAR", "VARCHAR2"}, + {"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"}, + {"DOUBLE", "FLOAT", "DOUBLE_PRECISION"}, + ) + identity_attrs_ignore = () + + def __init__(self, *arg, **kw) -> None: + super().__init__(*arg, **kw) + self.batch_separator = self.context_opts.get( + "oracle_batch_separator", self.batch_separator + ) + + def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]: + result = super()._exec(construct, *args, **kw) + if self.as_sql and self.batch_separator: + self.static_output(self.batch_separator) + return result + + def compare_server_default( + self, + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_inspector_default, + ): + if rendered_metadata_default is not None: + rendered_metadata_default = re.sub( + r"^\((.+)\)$", r"\1", rendered_metadata_default + ) + + rendered_metadata_default = re.sub( + r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default + ) + + if rendered_inspector_default is not None: + rendered_inspector_default = re.sub( + r"^\((.+)\)$", r"\1", rendered_inspector_default + ) + + rendered_inspector_default = re.sub( + r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default + ) + + rendered_inspector_default = rendered_inspector_default.strip() + return rendered_inspector_default != rendered_metadata_default + + def emit_begin(self) -> None: + self._exec("SET TRANSACTION READ WRITE") + + def emit_commit(self) -> None: + self._exec("COMMIT") + + +@compiles(AddColumn, "oracle") +def visit_add_column( + element: AddColumn, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s %s" % ( + alter_table(compiler, element.table_name, element.schema), + add_column(compiler, element.column, **kw), + ) + + +@compiles(ColumnNullable, "oracle") +def visit_column_nullable( + element: ColumnNullable, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "NULL" if element.nullable else "NOT NULL", + ) + + +@compiles(ColumnType, "oracle") +def visit_column_type( + element: ColumnType, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "%s" % format_type(compiler, element.type_), + ) + + +@compiles(ColumnName, "oracle") +def visit_column_name( + element: ColumnName, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s RENAME COLUMN %s TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + format_column_name(compiler, element.newname), + ) + + +@compiles(ColumnDefault, "oracle") +def visit_column_default( + element: ColumnDefault, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + ( + "DEFAULT %s" % format_server_default(compiler, element.default) + if element.default is not None + else "DEFAULT NULL" + ), + ) + + +@compiles(ColumnComment, "oracle") +def visit_column_comment( + element: ColumnComment, compiler: OracleDDLCompiler, **kw +) -> str: + ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" + + comment = compiler.sql_compiler.render_literal_value( + (element.comment if element.comment is not None else ""), + sqltypes.String(), + ) + + return ddl.format( + table_name=element.table_name, + column_name=element.column_name, + comment=comment, + ) + + +@compiles(RenameTable, "oracle") +def visit_rename_table( + element: RenameTable, compiler: OracleDDLCompiler, **kw +) -> str: + return "%s RENAME TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_table_name(compiler, element.new_table_name, None), + ) + + +def alter_column(compiler: OracleDDLCompiler, name: str) -> str: + return "MODIFY %s" % format_column_name(compiler, name) + + +def add_column(compiler: OracleDDLCompiler, column: Column[Any], **kw) -> str: + return "ADD %s" % compiler.get_column_specification(column, **kw) + + +@compiles(IdentityColumnDefault, "oracle") +def visit_identity_column( + element: IdentityColumnDefault, compiler: OracleDDLCompiler, **kw +): + text = "%s %s " % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + ) + if element.default is None: + # drop identity + text += "DROP IDENTITY" + return text + else: + text += compiler.visit_identity_column(element.default) + return text diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/postgresql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/postgresql.py new file mode 100644 index 0000000000000000000000000000000000000000..90ecf70c19eaf3240ed9722786c91efb3129cc66 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/postgresql.py @@ -0,0 +1,854 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import logging +import re +from typing import Any +from typing import cast +from typing import Dict +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import Column +from sqlalchemy import Float +from sqlalchemy import Identity +from sqlalchemy import literal_column +from sqlalchemy import Numeric +from sqlalchemy import select +from sqlalchemy import text +from sqlalchemy import types as sqltypes +from sqlalchemy.dialects.postgresql import BIGINT +from sqlalchemy.dialects.postgresql import ExcludeConstraint +from sqlalchemy.dialects.postgresql import INTEGER +from sqlalchemy.schema import CreateIndex +from sqlalchemy.sql.elements import ColumnClause +from sqlalchemy.sql.elements import TextClause +from sqlalchemy.sql.functions import FunctionElement +from sqlalchemy.types import NULLTYPE + +from .base import alter_column +from .base import alter_table +from .base import AlterColumn +from .base import ColumnComment +from .base import format_column_name +from .base import format_table_name +from .base import format_type +from .base import IdentityColumnDefault +from .base import RenameTable +from .impl import ComparisonResult +from .impl import DefaultImpl +from .. import util +from ..autogenerate import render +from ..operations import ops +from ..operations import schemaobj +from ..operations.base import BatchOperations +from ..operations.base import Operations +from ..util import sqla_compat +from ..util.sqla_compat import compiles + + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy import Index + from sqlalchemy import UniqueConstraint + from sqlalchemy.dialects.postgresql.array import ARRAY + from sqlalchemy.dialects.postgresql.base import PGDDLCompiler + from sqlalchemy.dialects.postgresql.hstore import HSTORE + from sqlalchemy.dialects.postgresql.json import JSON + from sqlalchemy.dialects.postgresql.json import JSONB + from sqlalchemy.sql.elements import ClauseElement + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.elements import quoted_name + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.type_api import TypeEngine + + from .base import _ServerDefault + from ..autogenerate.api import AutogenContext + from ..autogenerate.render import _f_name + from ..runtime.migration import MigrationContext + + +log = logging.getLogger(__name__) + + +class PostgresqlImpl(DefaultImpl): + __dialect__ = "postgresql" + transactional_ddl = True + type_synonyms = DefaultImpl.type_synonyms + ( + {"FLOAT", "DOUBLE PRECISION"}, + ) + + def create_index(self, index: Index, **kw: Any) -> None: + # this likely defaults to None if not present, so get() + # should normally not return the default value. being + # defensive in any case + postgresql_include = index.kwargs.get("postgresql_include", None) or () + for col in postgresql_include: + if col not in index.table.c: # type: ignore[union-attr] + index.table.append_column( # type: ignore[union-attr] + Column(col, sqltypes.NullType) + ) + self._exec(CreateIndex(index, **kw)) + + def prep_table_for_batch(self, batch_impl, table): + for constraint in table.constraints: + if ( + constraint.name is not None + and constraint.name in batch_impl.named_constraints + ): + self.drop_constraint(constraint) + + def compare_server_default( + self, + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_inspector_default, + ): + # don't do defaults for SERIAL columns + if ( + metadata_column.primary_key + and metadata_column is metadata_column.table._autoincrement_column + ): + return False + + conn_col_default = rendered_inspector_default + + defaults_equal = conn_col_default == rendered_metadata_default + if defaults_equal: + return False + + if None in ( + conn_col_default, + rendered_metadata_default, + metadata_column.server_default, + ): + return not defaults_equal + + metadata_default = metadata_column.server_default.arg + + if isinstance(metadata_default, str): + if not isinstance(inspector_column.type, (Numeric, Float)): + metadata_default = re.sub(r"^'|'$", "", metadata_default) + metadata_default = f"'{metadata_default}'" + + metadata_default = literal_column(metadata_default) + + # run a real compare against the server + conn = self.connection + assert conn is not None + return not conn.scalar( + select(literal_column(conn_col_default) == metadata_default) + ) + + def alter_column( + self, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + server_default: Optional[ + Union[_ServerDefault, Literal[False]] + ] = False, + name: Optional[str] = None, + type_: Optional[TypeEngine] = None, + schema: Optional[str] = None, + autoincrement: Optional[bool] = None, + existing_type: Optional[TypeEngine] = None, + existing_server_default: Optional[_ServerDefault] = None, + existing_nullable: Optional[bool] = None, + existing_autoincrement: Optional[bool] = None, + **kw: Any, + ) -> None: + using = kw.pop("postgresql_using", None) + + if using is not None and type_ is None: + raise util.CommandError( + "postgresql_using must be used with the type_ parameter" + ) + + if type_ is not None: + self._exec( + PostgresqlColumnType( + table_name, + column_name, + type_, + schema=schema, + using=using, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + ) + ) + + super().alter_column( + table_name, + column_name, + nullable=nullable, + server_default=server_default, + name=name, + schema=schema, + autoincrement=autoincrement, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_autoincrement=existing_autoincrement, + **kw, + ) + + def autogen_column_reflect(self, inspector, table, column_info): + if column_info.get("default") and isinstance( + column_info["type"], (INTEGER, BIGINT) + ): + seq_match = re.match( + r"nextval\('(.+?)'::regclass\)", column_info["default"] + ) + if seq_match: + info = sqla_compat._exec_on_inspector( + inspector, + text( + "select c.relname, a.attname " + "from pg_class as c join " + "pg_depend d on d.objid=c.oid and " + "d.classid='pg_class'::regclass and " + "d.refclassid='pg_class'::regclass " + "join pg_class t on t.oid=d.refobjid " + "join pg_attribute a on a.attrelid=t.oid and " + "a.attnum=d.refobjsubid " + "where c.relkind='S' and " + "c.oid=cast(:seqname as regclass)" + ), + seqname=seq_match.group(1), + ).first() + if info: + seqname, colname = info + if colname == column_info["name"]: + log.info( + "Detected sequence named '%s' as " + "owned by integer column '%s(%s)', " + "assuming SERIAL and omitting", + seqname, + table.name, + colname, + ) + # sequence, and the owner is this column, + # its a SERIAL - whack it! + del column_info["default"] + + def correct_for_autogen_constraints( + self, + conn_unique_constraints, + conn_indexes, + metadata_unique_constraints, + metadata_indexes, + ): + doubled_constraints = { + index + for index in conn_indexes + if index.info.get("duplicates_constraint") + } + + for ix in doubled_constraints: + conn_indexes.remove(ix) + + if not sqla_compat.sqla_2: + self._skip_functional_indexes(metadata_indexes, conn_indexes) + + # pg behavior regarding modifiers + # | # | compiled sql | returned sql | regexp. group is removed | + # | - | ---------------- | -----------------| ------------------------ | + # | 1 | nulls first | nulls first | - | + # | 2 | nulls last | | (? str: + expr = expr.lower().replace('"', "").replace("'", "") + if index.table is not None: + # should not be needed, since include_table=False is in compile + expr = expr.replace(f"{index.table.name.lower()}.", "") + + if "::" in expr: + # strip :: cast. types can have spaces in them + expr = re.sub(r"(::[\w ]+\w)", "", expr) + + while expr and expr[0] == "(" and expr[-1] == ")": + expr = expr[1:-1] + + # NOTE: when parsing the connection expression this cleanup could + # be skipped + for rs in self._default_modifiers_re: + if match := rs.search(expr): + start, end = match.span(1) + expr = expr[:start] + expr[end:] + break + + while expr and expr[0] == "(" and expr[-1] == ")": + expr = expr[1:-1] + + # strip casts + cast_re = re.compile(r"cast\s*\(") + if cast_re.match(expr): + expr = cast_re.sub("", expr) + # remove the as type + expr = re.sub(r"as\s+[^)]+\)", "", expr) + # remove spaces + expr = expr.replace(" ", "") + return expr + + def _dialect_options( + self, item: Union[Index, UniqueConstraint] + ) -> Tuple[Any, ...]: + # only the positive case is returned by sqlalchemy reflection so + # None and False are threated the same + if item.dialect_kwargs.get("postgresql_nulls_not_distinct"): + return ("nulls_not_distinct",) + return () + + def compare_indexes( + self, + metadata_index: Index, + reflected_index: Index, + ) -> ComparisonResult: + msg = [] + unique_msg = self._compare_index_unique( + metadata_index, reflected_index + ) + if unique_msg: + msg.append(unique_msg) + m_exprs = metadata_index.expressions + r_exprs = reflected_index.expressions + if len(m_exprs) != len(r_exprs): + msg.append(f"expression number {len(r_exprs)} to {len(m_exprs)}") + if msg: + # no point going further, return early + return ComparisonResult.Different(msg) + skip = [] + for pos, (m_e, r_e) in enumerate(zip(m_exprs, r_exprs), 1): + m_compile = self._compile_element(m_e) + m_text = self._cleanup_index_expr(metadata_index, m_compile) + # print(f"META ORIG: {m_compile!r} CLEANUP: {m_text!r}") + r_compile = self._compile_element(r_e) + r_text = self._cleanup_index_expr(metadata_index, r_compile) + # print(f"CONN ORIG: {r_compile!r} CLEANUP: {r_text!r}") + if m_text == r_text: + continue # expressions these are equal + elif m_compile.strip().endswith("_ops") and ( + " " in m_compile or ")" in m_compile # is an expression + ): + skip.append( + f"expression #{pos} {m_compile!r} detected " + "as including operator clause." + ) + util.warn( + f"Expression #{pos} {m_compile!r} in index " + f"{reflected_index.name!r} detected to include " + "an operator clause. Expression compare cannot proceed. " + "Please move the operator clause to the " + "``postgresql_ops`` dict to enable proper compare " + "of the index expressions: " + "https://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#operator-classes", # noqa: E501 + ) + else: + msg.append(f"expression #{pos} {r_compile!r} to {m_compile!r}") + + m_options = self._dialect_options(metadata_index) + r_options = self._dialect_options(reflected_index) + if m_options != r_options: + msg.extend(f"options {r_options} to {m_options}") + + if msg: + return ComparisonResult.Different(msg) + elif skip: + # if there are other changes detected don't skip the index + return ComparisonResult.Skip(skip) + else: + return ComparisonResult.Equal() + + def compare_unique_constraint( + self, + metadata_constraint: UniqueConstraint, + reflected_constraint: UniqueConstraint, + ) -> ComparisonResult: + metadata_tup = self._create_metadata_constraint_sig( + metadata_constraint + ) + reflected_tup = self._create_reflected_constraint_sig( + reflected_constraint + ) + + meta_sig = metadata_tup.unnamed + conn_sig = reflected_tup.unnamed + if conn_sig != meta_sig: + return ComparisonResult.Different( + f"expression {conn_sig} to {meta_sig}" + ) + + metadata_do = self._dialect_options(metadata_tup.const) + conn_do = self._dialect_options(reflected_tup.const) + if metadata_do != conn_do: + return ComparisonResult.Different( + f"expression {conn_do} to {metadata_do}" + ) + + return ComparisonResult.Equal() + + def adjust_reflected_dialect_options( + self, reflected_options: Dict[str, Any], kind: str + ) -> Dict[str, Any]: + options: Dict[str, Any] + options = reflected_options.get("dialect_options", {}).copy() + if not options.get("postgresql_include"): + options.pop("postgresql_include", None) + return options + + def _compile_element(self, element: Union[ClauseElement, str]) -> str: + if isinstance(element, str): + return element + return element.compile( + dialect=self.dialect, + compile_kwargs={"literal_binds": True, "include_table": False}, + ).string + + def render_ddl_sql_expr( + self, + expr: ClauseElement, + is_server_default: bool = False, + is_index: bool = False, + **kw: Any, + ) -> str: + """Render a SQL expression that is typically a server default, + index expression, etc. + + """ + + # apply self_group to index expressions; + # see https://github.com/sqlalchemy/sqlalchemy/blob/ + # 82fa95cfce070fab401d020c6e6e4a6a96cc2578/ + # lib/sqlalchemy/dialects/postgresql/base.py#L2261 + if is_index and not isinstance(expr, ColumnClause): + expr = expr.self_group() + + return super().render_ddl_sql_expr( + expr, is_server_default=is_server_default, is_index=is_index, **kw + ) + + def render_type( + self, type_: TypeEngine, autogen_context: AutogenContext + ) -> Union[str, Literal[False]]: + mod = type(type_).__module__ + if not mod.startswith("sqlalchemy.dialects.postgresql"): + return False + + if hasattr(self, "_render_%s_type" % type_.__visit_name__): + meth = getattr(self, "_render_%s_type" % type_.__visit_name__) + return meth(type_, autogen_context) + + return False + + def _render_HSTORE_type( + self, type_: HSTORE, autogen_context: AutogenContext + ) -> str: + return cast( + str, + render._render_type_w_subtype( + type_, autogen_context, "text_type", r"(.+?\(.*text_type=)" + ), + ) + + def _render_ARRAY_type( + self, type_: ARRAY, autogen_context: AutogenContext + ) -> str: + return cast( + str, + render._render_type_w_subtype( + type_, autogen_context, "item_type", r"(.+?\()" + ), + ) + + def _render_JSON_type( + self, type_: JSON, autogen_context: AutogenContext + ) -> str: + return cast( + str, + render._render_type_w_subtype( + type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" + ), + ) + + def _render_JSONB_type( + self, type_: JSONB, autogen_context: AutogenContext + ) -> str: + return cast( + str, + render._render_type_w_subtype( + type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" + ), + ) + + +class PostgresqlColumnType(AlterColumn): + def __init__( + self, name: str, column_name: str, type_: TypeEngine, **kw + ) -> None: + using = kw.pop("using", None) + super().__init__(name, column_name, **kw) + self.type_ = sqltypes.to_instance(type_) + self.using = using + + +@compiles(RenameTable, "postgresql") +def visit_rename_table( + element: RenameTable, compiler: PGDDLCompiler, **kw +) -> str: + return "%s RENAME TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_table_name(compiler, element.new_table_name, None), + ) + + +@compiles(PostgresqlColumnType, "postgresql") +def visit_column_type( + element: PostgresqlColumnType, compiler: PGDDLCompiler, **kw +) -> str: + return "%s %s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "TYPE %s" % format_type(compiler, element.type_), + "USING %s" % element.using if element.using else "", + ) + + +@compiles(ColumnComment, "postgresql") +def visit_column_comment( + element: ColumnComment, compiler: PGDDLCompiler, **kw +) -> str: + ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" + comment = ( + compiler.sql_compiler.render_literal_value( + element.comment, sqltypes.String() + ) + if element.comment is not None + else "NULL" + ) + + return ddl.format( + table_name=format_table_name( + compiler, element.table_name, element.schema + ), + column_name=format_column_name(compiler, element.column_name), + comment=comment, + ) + + +@compiles(IdentityColumnDefault, "postgresql") +def visit_identity_column( + element: IdentityColumnDefault, compiler: PGDDLCompiler, **kw +): + text = "%s %s " % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + ) + if element.default is None: + # drop identity + text += "DROP IDENTITY" + return text + elif element.existing_server_default is None: + # add identity options + text += "ADD " + text += compiler.visit_identity_column(element.default) + return text + else: + # alter identity + diff, _, _ = element.impl._compare_identity_default( + element.default, element.existing_server_default + ) + identity = element.default + for attr in sorted(diff): + if attr == "always": + text += "SET GENERATED %s " % ( + "ALWAYS" if identity.always else "BY DEFAULT" + ) + else: + text += "SET %s " % compiler.get_identity_options( + Identity(**{attr: getattr(identity, attr)}) + ) + return text + + +@Operations.register_operation("create_exclude_constraint") +@BatchOperations.register_operation( + "create_exclude_constraint", "batch_create_exclude_constraint" +) +@ops.AddConstraintOp.register_add_constraint("exclude_constraint") +class CreateExcludeConstraintOp(ops.AddConstraintOp): + """Represent a create exclude constraint operation.""" + + constraint_type = "exclude" + + def __init__( + self, + constraint_name: sqla_compat._ConstraintName, + table_name: Union[str, quoted_name], + elements: Union[ + Sequence[Tuple[str, str]], + Sequence[Tuple[ColumnClause[Any], str]], + ], + where: Optional[Union[ColumnElement[bool], str]] = None, + schema: Optional[str] = None, + _orig_constraint: Optional[ExcludeConstraint] = None, + **kw, + ) -> None: + self.constraint_name = constraint_name + self.table_name = table_name + self.elements = elements + self.where = where + self.schema = schema + self._orig_constraint = _orig_constraint + self.kw = kw + + @classmethod + def from_constraint( # type:ignore[override] + cls, constraint: ExcludeConstraint + ) -> CreateExcludeConstraintOp: + constraint_table = sqla_compat._table_for_constraint(constraint) + return cls( + constraint.name, + constraint_table.name, + [ # type: ignore + (expr, op) for expr, name, op in constraint._render_exprs + ], + where=cast("ColumnElement[bool] | None", constraint.where), + schema=constraint_table.schema, + _orig_constraint=constraint, + deferrable=constraint.deferrable, + initially=constraint.initially, + using=constraint.using, + ) + + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> ExcludeConstraint: + if self._orig_constraint is not None: + return self._orig_constraint + schema_obj = schemaobj.SchemaObjects(migration_context) + t = schema_obj.table(self.table_name, schema=self.schema) + excl = ExcludeConstraint( + *self.elements, + name=self.constraint_name, + where=self.where, + **self.kw, + ) + for ( + expr, + name, + oper, + ) in excl._render_exprs: + t.append_column(Column(name, NULLTYPE)) + t.append_constraint(excl) + return excl + + @classmethod + def create_exclude_constraint( + cls, + operations: Operations, + constraint_name: str, + table_name: str, + *elements: Any, + **kw: Any, + ) -> Optional[Table]: + """Issue an alter to create an EXCLUDE constraint using the + current migration context. + + .. note:: This method is Postgresql specific, and additionally + requires at least SQLAlchemy 1.0. + + e.g.:: + + from alembic import op + + op.create_exclude_constraint( + "user_excl", + "user", + ("period", "&&"), + ("group", "="), + where=("group != 'some group'"), + ) + + Note that the expressions work the same way as that of + the ``ExcludeConstraint`` object itself; if plain strings are + passed, quoting rules must be applied manually. + + :param name: Name of the constraint. + :param table_name: String name of the source table. + :param elements: exclude conditions. + :param where: SQL expression or SQL string with optional WHERE + clause. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. + + """ + op = cls(constraint_name, table_name, elements, **kw) + return operations.invoke(op) + + @classmethod + def batch_create_exclude_constraint( + cls, + operations: BatchOperations, + constraint_name: str, + *elements: Any, + **kw: Any, + ) -> Optional[Table]: + """Issue a "create exclude constraint" instruction using the + current batch migration context. + + .. note:: This method is Postgresql specific, and additionally + requires at least SQLAlchemy 1.0. + + .. seealso:: + + :meth:`.Operations.create_exclude_constraint` + + """ + kw["schema"] = operations.impl.schema + op = cls(constraint_name, operations.impl.table_name, elements, **kw) + return operations.invoke(op) + + +@render.renderers.dispatch_for(CreateExcludeConstraintOp) +def _add_exclude_constraint( + autogen_context: AutogenContext, op: CreateExcludeConstraintOp +) -> str: + return _exclude_constraint(op.to_constraint(), autogen_context, alter=True) + + +@render._constraint_renderers.dispatch_for(ExcludeConstraint) +def _render_inline_exclude_constraint( + constraint: ExcludeConstraint, + autogen_context: AutogenContext, + namespace_metadata: MetaData, +) -> str: + rendered = render._user_defined_render( + "exclude", constraint, autogen_context + ) + if rendered is not False: + return rendered + + return _exclude_constraint(constraint, autogen_context, False) + + +def _postgresql_autogenerate_prefix(autogen_context: AutogenContext) -> str: + imports = autogen_context.imports + if imports is not None: + imports.add("from sqlalchemy.dialects import postgresql") + return "postgresql." + + +def _exclude_constraint( + constraint: ExcludeConstraint, + autogen_context: AutogenContext, + alter: bool, +) -> str: + opts: List[Tuple[str, Union[quoted_name, str, _f_name, None]]] = [] + + has_batch = autogen_context._has_batch + + if constraint.deferrable: + opts.append(("deferrable", str(constraint.deferrable))) + if constraint.initially: + opts.append(("initially", str(constraint.initially))) + if constraint.using: + opts.append(("using", str(constraint.using))) + if not has_batch and alter and constraint.table.schema: + opts.append(("schema", render._ident(constraint.table.schema))) + if not alter and constraint.name: + opts.append( + ("name", render._render_gen_name(autogen_context, constraint.name)) + ) + + def do_expr_where_opts(): + args = [ + "(%s, %r)" + % ( + _render_potential_column( + sqltext, # type:ignore[arg-type] + autogen_context, + ), + opstring, + ) + for sqltext, name, opstring in constraint._render_exprs + ] + if constraint.where is not None: + args.append( + "where=%s" + % render._render_potential_expr( + constraint.where, autogen_context + ) + ) + args.extend(["%s=%r" % (k, v) for k, v in opts]) + return args + + if alter: + args = [ + repr(render._render_gen_name(autogen_context, constraint.name)) + ] + if not has_batch: + args += [repr(render._ident(constraint.table.name))] + args.extend(do_expr_where_opts()) + return "%(prefix)screate_exclude_constraint(%(args)s)" % { + "prefix": render._alembic_autogenerate_prefix(autogen_context), + "args": ", ".join(args), + } + else: + args = do_expr_where_opts() + return "%(prefix)sExcludeConstraint(%(args)s)" % { + "prefix": _postgresql_autogenerate_prefix(autogen_context), + "args": ", ".join(args), + } + + +def _render_potential_column( + value: Union[ + ColumnClause[Any], Column[Any], TextClause, FunctionElement[Any] + ], + autogen_context: AutogenContext, +) -> str: + if isinstance(value, ColumnClause): + if value.is_literal: + # like literal_column("int8range(from, to)") in ExcludeConstraint + template = "%(prefix)sliteral_column(%(name)r)" + else: + template = "%(prefix)scolumn(%(name)r)" + + return template % { + "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context), + "name": value.name, + } + else: + return render._render_potential_expr( + value, + autogen_context, + wrap_in_element=isinstance(value, (TextClause, FunctionElement)), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/sqlite.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/sqlite.py new file mode 100644 index 0000000000000000000000000000000000000000..5f141330fb8bb32d5c2c01996824ccaa2151a39a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/ddl/sqlite.py @@ -0,0 +1,237 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import re +from typing import Any +from typing import Dict +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import cast +from sqlalchemy import Computed +from sqlalchemy import JSON +from sqlalchemy import schema +from sqlalchemy import sql + +from .base import alter_table +from .base import ColumnName +from .base import format_column_name +from .base import format_table_name +from .base import RenameTable +from .impl import DefaultImpl +from .. import util +from ..util.sqla_compat import compiles + +if TYPE_CHECKING: + from sqlalchemy.engine.reflection import Inspector + from sqlalchemy.sql.compiler import DDLCompiler + from sqlalchemy.sql.elements import Cast + from sqlalchemy.sql.elements import ClauseElement + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.type_api import TypeEngine + + from ..operations.batch import BatchOperationsImpl + + +class SQLiteImpl(DefaultImpl): + __dialect__ = "sqlite" + + transactional_ddl = False + """SQLite supports transactional DDL, but pysqlite does not: + see: http://bugs.python.org/issue10740 + """ + + def requires_recreate_in_batch( + self, batch_op: BatchOperationsImpl + ) -> bool: + """Return True if the given :class:`.BatchOperationsImpl` + would need the table to be recreated and copied in order to + proceed. + + Normally, only returns True on SQLite when operations other + than add_column are present. + + """ + for op in batch_op.batch: + if op[0] == "add_column": + col = op[1][1] + if isinstance( + col.server_default, schema.DefaultClause + ) and isinstance(col.server_default.arg, sql.ClauseElement): + return True + elif ( + isinstance(col.server_default, Computed) + and col.server_default.persisted + ): + return True + elif op[0] not in ("create_index", "drop_index"): + return True + else: + return False + + def add_constraint(self, const: Constraint): + # attempt to distinguish between an + # auto-gen constraint and an explicit one + if const._create_rule is None: + raise NotImplementedError( + "No support for ALTER of constraints in SQLite dialect. " + "Please refer to the batch mode feature which allows for " + "SQLite migrations using a copy-and-move strategy." + ) + elif const._create_rule(self): + util.warn( + "Skipping unsupported ALTER for " + "creation of implicit constraint. " + "Please refer to the batch mode feature which allows for " + "SQLite migrations using a copy-and-move strategy." + ) + + def drop_constraint(self, const: Constraint, **kw: Any): + if const._create_rule is None: + raise NotImplementedError( + "No support for ALTER of constraints in SQLite dialect. " + "Please refer to the batch mode feature which allows for " + "SQLite migrations using a copy-and-move strategy." + ) + + def compare_server_default( + self, + inspector_column: Column[Any], + metadata_column: Column[Any], + rendered_metadata_default: Optional[str], + rendered_inspector_default: Optional[str], + ) -> bool: + if rendered_metadata_default is not None: + rendered_metadata_default = re.sub( + r"^\((.+)\)$", r"\1", rendered_metadata_default + ) + + rendered_metadata_default = re.sub( + r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default + ) + + if rendered_inspector_default is not None: + rendered_inspector_default = re.sub( + r"^\((.+)\)$", r"\1", rendered_inspector_default + ) + + rendered_inspector_default = re.sub( + r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default + ) + + return rendered_inspector_default != rendered_metadata_default + + def _guess_if_default_is_unparenthesized_sql_expr( + self, expr: Optional[str] + ) -> bool: + """Determine if a server default is a SQL expression or a constant. + + There are too many assertions that expect server defaults to round-trip + identically without parenthesis added so we will add parens only in + very specific cases. + + """ + if not expr: + return False + elif re.match(r"^[0-9\.]$", expr): + return False + elif re.match(r"^'.+'$", expr): + return False + elif re.match(r"^\(.+\)$", expr): + return False + else: + return True + + def autogen_column_reflect( + self, + inspector: Inspector, + table: Table, + column_info: Dict[str, Any], + ) -> None: + # SQLite expression defaults require parenthesis when sent + # as DDL + if self._guess_if_default_is_unparenthesized_sql_expr( + column_info.get("default", None) + ): + column_info["default"] = "(%s)" % (column_info["default"],) + + def render_ddl_sql_expr( + self, expr: ClauseElement, is_server_default: bool = False, **kw + ) -> str: + # SQLite expression defaults require parenthesis when sent + # as DDL + str_expr = super().render_ddl_sql_expr( + expr, is_server_default=is_server_default, **kw + ) + + if ( + is_server_default + and self._guess_if_default_is_unparenthesized_sql_expr(str_expr) + ): + str_expr = "(%s)" % (str_expr,) + return str_expr + + def cast_for_batch_migrate( + self, + existing: Column[Any], + existing_transfer: Dict[str, Union[TypeEngine, Cast]], + new_type: TypeEngine, + ) -> None: + if ( + existing.type._type_affinity is not new_type._type_affinity + and not isinstance(new_type, JSON) + ): + existing_transfer["expr"] = cast( + existing_transfer["expr"], new_type + ) + + def correct_for_autogen_constraints( + self, + conn_unique_constraints, + conn_indexes, + metadata_unique_constraints, + metadata_indexes, + ): + self._skip_functional_indexes(metadata_indexes, conn_indexes) + + +@compiles(RenameTable, "sqlite") +def visit_rename_table( + element: RenameTable, compiler: DDLCompiler, **kw +) -> str: + return "%s RENAME TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_table_name(compiler, element.new_table_name, None), + ) + + +@compiles(ColumnName, "sqlite") +def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str: + return "%s RENAME COLUMN %s TO %s" % ( + alter_table(compiler, element.table_name, element.schema), + format_column_name(compiler, element.column_name), + format_column_name(compiler, element.newname), + ) + + +# @compiles(AddColumn, 'sqlite') +# def visit_add_column(element, compiler, **kw): +# return "%s %s" % ( +# alter_table(compiler, element.table_name, element.schema), +# add_column(compiler, element.column, **kw) +# ) + + +# def add_column(compiler, column, **kw): +# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) +# need to modify SQLAlchemy so that the CHECK associated with a Boolean +# or Enum gets placed as part of the column constraints, not the Table +# see ticket 98 +# for const in column.constraints: +# text += compiler.process(AddConstraint(const)) +# return text diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/environment.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..adfc93eb0c2fdf4e8104faab95bdb4bdd210fbaa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/environment.py @@ -0,0 +1 @@ +from .runtime.environment import * # noqa diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/migration.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/migration.py new file mode 100644 index 0000000000000000000000000000000000000000..02626e2cf6d4cbe7f57dc95fce2399ea93df0dbc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/migration.py @@ -0,0 +1 @@ +from .runtime.migration import * # noqa diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f5fac0cf5c1e56d44f42051b6d829f7026c86d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.py @@ -0,0 +1,5 @@ +from .operations.base import Operations + +# create proxy functions for +# each method on the Operations class. +Operations.create_module_class_proxy(globals(), locals()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.pyi b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8cdf7590756286685cedd2a82678c39b7ca55dc6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/op.pyi @@ -0,0 +1,1356 @@ +# ### this file stubs are generated by tools/write_pyi.py - do not edit ### +# ### imports are manually managed +from __future__ import annotations + +from contextlib import contextmanager +from typing import Any +from typing import Awaitable +from typing import Callable +from typing import Dict +from typing import Iterator +from typing import List +from typing import Literal +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +if TYPE_CHECKING: + from sqlalchemy.engine import Connection + from sqlalchemy.sql import Executable + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.elements import conv + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.expression import TableClause + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Computed + from sqlalchemy.sql.schema import Identity + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.type_api import TypeEngine + from sqlalchemy.util import immutabledict + + from .operations.base import BatchOperations + from .operations.ops import AddColumnOp + from .operations.ops import AddConstraintOp + from .operations.ops import AlterColumnOp + from .operations.ops import AlterTableOp + from .operations.ops import BulkInsertOp + from .operations.ops import CreateIndexOp + from .operations.ops import CreateTableCommentOp + from .operations.ops import CreateTableOp + from .operations.ops import DropColumnOp + from .operations.ops import DropConstraintOp + from .operations.ops import DropIndexOp + from .operations.ops import DropTableCommentOp + from .operations.ops import DropTableOp + from .operations.ops import ExecuteSQLOp + from .operations.ops import MigrateOperation + from .runtime.migration import MigrationContext + from .util.sqla_compat import _literal_bindparam + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=Callable[..., Any]) + +### end imports ### + +def add_column( + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + if_not_exists: Optional[bool] = None, +) -> None: + """Issue an "add column" instruction using the current + migration context. + + e.g.:: + + from alembic import op + from sqlalchemy import Column, String + + op.add_column("organization", Column("name", String())) + + The :meth:`.Operations.add_column` method typically corresponds + to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope + of this command, the column's name, datatype, nullability, + and optional server-generated defaults may be indicated. + + .. note:: + + With the exception of NOT NULL constraints or single-column FOREIGN + KEY constraints, other kinds of constraints such as PRIMARY KEY, + UNIQUE or CHECK constraints **cannot** be generated using this + method; for these constraints, refer to operations such as + :meth:`.Operations.create_primary_key` and + :meth:`.Operations.create_check_constraint`. In particular, the + following :class:`~sqlalchemy.schema.Column` parameters are + **ignored**: + + * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases + typically do not support an ALTER operation that can add + individual columns one at a time to an existing primary key + constraint, therefore it's less ambiguous to use the + :meth:`.Operations.create_primary_key` method, which assumes no + existing primary key constraint is present. + * :paramref:`~sqlalchemy.schema.Column.unique` - use the + :meth:`.Operations.create_unique_constraint` method + * :paramref:`~sqlalchemy.schema.Column.index` - use the + :meth:`.Operations.create_index` method + + + The provided :class:`~sqlalchemy.schema.Column` object may include a + :class:`~sqlalchemy.schema.ForeignKey` constraint directive, + referencing a remote table name. For this specific type of constraint, + Alembic will automatically emit a second ALTER statement in order to + add the single-column FOREIGN KEY constraint separately:: + + from alembic import op + from sqlalchemy import Column, INTEGER, ForeignKey + + op.add_column( + "organization", + Column("account_id", INTEGER, ForeignKey("accounts.id")), + ) + + The column argument passed to :meth:`.Operations.add_column` is a + :class:`~sqlalchemy.schema.Column` construct, used in the same way it's + used in SQLAlchemy. In particular, values or functions to be indicated + as producing the column's default value on the database side are + specified using the ``server_default`` parameter, and not ``default`` + which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the column add + op.add_column( + "account", + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + :param table_name: String name of the parent table. + :param column: a :class:`sqlalchemy.schema.Column` object + representing the new column. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator + when creating the new column for compatible dialects + + .. versionadded:: 1.16.0 + + """ + +def alter_column( + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + comment: Union[str, Literal[False], None] = False, + server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + new_column_name: Optional[str] = None, + type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None, + existing_type: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None, + existing_server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + **kw: Any, +) -> None: + r"""Issue an "alter column" instruction using the + current migration context. + + Generally, only that aspect of the column which + is being changed, i.e. name, type, nullability, + default, needs to be specified. Multiple changes + can also be specified at once and the backend should + "do the right thing", emitting each change either + separately or together as the backend allows. + + MySQL has special requirements here, since MySQL + cannot ALTER a column without a full specification. + When producing MySQL-compatible migration files, + it is recommended that the ``existing_type``, + ``existing_server_default``, and ``existing_nullable`` + parameters be present, if not being altered. + + Type changes which are against the SQLAlchemy + "schema" types :class:`~sqlalchemy.types.Boolean` + and :class:`~sqlalchemy.types.Enum` may also + add or drop constraints which accompany those + types on backends that don't support them natively. + The ``existing_type`` argument is + used in this case to identify and remove a previous + constraint that was bound to the type object. + + :param table_name: string name of the target table. + :param column_name: string name of the target column, + as it exists before the operation begins. + :param nullable: Optional; specify ``True`` or ``False`` + to alter the column's nullability. + :param server_default: Optional; specify a string + SQL expression, :func:`~sqlalchemy.sql.expression.text`, + or :class:`~sqlalchemy.schema.DefaultClause` to indicate + an alteration to the column's default value. + Set to ``None`` to have the default removed. + :param comment: optional string text of a new comment to add to the + column. + :param new_column_name: Optional; specify a string name here to + indicate the new name within a column rename operation. + :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` + type object to specify a change to the column's type. + For SQLAlchemy types that also indicate a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), + the constraint is also generated. + :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; + currently understood by the MySQL dialect. + :param existing_type: Optional; a + :class:`~sqlalchemy.types.TypeEngine` + type object to specify the previous type. This + is required for all MySQL column alter operations that + don't otherwise specify a new type, as well as for + when nullability is being changed on a SQL Server + column. It is also used if the type is a so-called + SQLAlchemy "schema" type which may define a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, + :class:`~sqlalchemy.types.Enum`), + so that the constraint can be dropped. + :param existing_server_default: Optional; The existing + default value of the column. Required on MySQL if + an existing default is not being changed; else MySQL + removes the default. + :param existing_nullable: Optional; the existing nullability + of the column. Required on MySQL if the existing nullability + is not being changed; else MySQL sets this to NULL. + :param existing_autoincrement: Optional; the existing autoincrement + of the column. Used for MySQL's system of altering a column + that specifies ``AUTO_INCREMENT``. + :param existing_comment: string text of the existing comment on the + column to be maintained. Required on MySQL if the existing comment + on the column is not being changed. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param postgresql_using: String argument which will indicate a + SQL expression to render within the Postgresql-specific USING clause + within ALTER COLUMN. This string is taken directly as raw SQL which + must explicitly include any necessary quoting or escaping of tokens + within the expression. + + """ + +@contextmanager +def batch_alter_table( + table_name: str, + schema: Optional[str] = None, + recreate: Literal["auto", "always", "never"] = "auto", + partial_reordering: Optional[Tuple[Any, ...]] = None, + copy_from: Optional[Table] = None, + table_args: Tuple[Any, ...] = (), + table_kwargs: Mapping[str, Any] = immutabledict({}), + reflect_args: Tuple[Any, ...] = (), + reflect_kwargs: Mapping[str, Any] = immutabledict({}), + naming_convention: Optional[Dict[str, str]] = None, +) -> Iterator[BatchOperations]: + """Invoke a series of per-table migrations in batch. + + Batch mode allows a series of operations specific to a table + to be syntactically grouped together, and allows for alternate + modes of table migration, in particular the "recreate" style of + migration required by SQLite. + + "recreate" style is as follows: + + 1. A new table is created with the new specification, based on the + migration directives within the batch, using a temporary name. + + 2. the data copied from the existing table to the new table. + + 3. the existing table is dropped. + + 4. the new table is renamed to the existing table name. + + The directive by default will only use "recreate" style on the + SQLite backend, and only if directives are present which require + this form, e.g. anything other than ``add_column()``. The batch + operation on other backends will proceed using standard ALTER TABLE + operations. + + The method is used as a context manager, which returns an instance + of :class:`.BatchOperations`; this object is the same as + :class:`.Operations` except that table names and schema names + are omitted. E.g.:: + + with op.batch_alter_table("some_table") as batch_op: + batch_op.add_column(Column("foo", Integer)) + batch_op.drop_column("bar") + + The operations within the context manager are invoked at once + when the context is ended. When run against SQLite, if the + migrations include operations not supported by SQLite's ALTER TABLE, + the entire table will be copied to a new one with the new + specification, moving all data across as well. + + The copy operation by default uses reflection to retrieve the current + structure of the table, and therefore :meth:`.batch_alter_table` + in this mode requires that the migration is run in "online" mode. + The ``copy_from`` parameter may be passed which refers to an existing + :class:`.Table` object, which will bypass this reflection step. + + .. note:: The table copy operation will currently not copy + CHECK constraints, and may not copy UNIQUE constraints that are + unnamed, as is possible on SQLite. See the section + :ref:`sqlite_batch_constraints` for workarounds. + + :param table_name: name of table + :param schema: optional schema name. + :param recreate: under what circumstances the table should be + recreated. At its default of ``"auto"``, the SQLite dialect will + recreate the table if any operations other than ``add_column()``, + ``create_index()``, or ``drop_index()`` are + present. Other options include ``"always"`` and ``"never"``. + :param copy_from: optional :class:`~sqlalchemy.schema.Table` object + that will act as the structure of the table being copied. If omitted, + table reflection is used to retrieve the structure of the table. + + .. seealso:: + + :ref:`batch_offline_mode` + + :paramref:`~.Operations.batch_alter_table.reflect_args` + + :paramref:`~.Operations.batch_alter_table.reflect_kwargs` + + :param reflect_args: a sequence of additional positional arguments that + will be applied to the table structure being reflected / copied; + this may be used to pass column and constraint overrides to the + table that will be reflected, in lieu of passing the whole + :class:`~sqlalchemy.schema.Table` using + :paramref:`~.Operations.batch_alter_table.copy_from`. + :param reflect_kwargs: a dictionary of additional keyword arguments + that will be applied to the table structure being copied; this may be + used to pass additional table and reflection options to the table that + will be reflected, in lieu of passing the whole + :class:`~sqlalchemy.schema.Table` using + :paramref:`~.Operations.batch_alter_table.copy_from`. + :param table_args: a sequence of additional positional arguments that + will be applied to the new :class:`~sqlalchemy.schema.Table` when + created, in addition to those copied from the source table. + This may be used to provide additional constraints such as CHECK + constraints that may not be reflected. + :param table_kwargs: a dictionary of additional keyword arguments + that will be applied to the new :class:`~sqlalchemy.schema.Table` + when created, in addition to those copied from the source table. + This may be used to provide for additional table options that may + not be reflected. + :param naming_convention: a naming convention dictionary of the form + described at :ref:`autogen_naming_conventions` which will be applied + to the :class:`~sqlalchemy.schema.MetaData` during the reflection + process. This is typically required if one wants to drop SQLite + constraints, as these constraints will not have names when + reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. + + .. seealso:: + + :ref:`dropping_sqlite_foreign_keys` + + :param partial_reordering: a list of tuples, each suggesting a desired + ordering of two or more columns in the newly created table. Requires + that :paramref:`.batch_alter_table.recreate` is set to ``"always"``. + Examples, given a table with columns "a", "b", "c", and "d": + + Specify the order of all columns:: + + with op.batch_alter_table( + "some_table", + recreate="always", + partial_reordering=[("c", "d", "a", "b")], + ) as batch_op: + pass + + Ensure "d" appears before "c", and "b", appears before "a":: + + with op.batch_alter_table( + "some_table", + recreate="always", + partial_reordering=[("d", "c"), ("b", "a")], + ) as batch_op: + pass + + The ordering of columns not included in the partial_reordering + set is undefined. Therefore it is best to specify the complete + ordering of all columns for best results. + + .. note:: batch mode requires SQLAlchemy 0.8 or above. + + .. seealso:: + + :ref:`batch_migrations` + + """ + +def bulk_insert( + table: Union[Table, TableClause], + rows: List[Dict[str, Any]], + *, + multiinsert: bool = True, +) -> None: + """Issue a "bulk insert" operation using the current + migration context. + + This provides a means of representing an INSERT of multiple rows + which works equally well in the context of executing on a live + connection as well as that of generating a SQL script. In the + case of a SQL script, the values are rendered inline into the + statement. + + e.g.:: + + from alembic import op + from datetime import date + from sqlalchemy.sql import table, column + from sqlalchemy import String, Integer, Date + + # Create an ad-hoc table to use for the insert statement. + accounts_table = table( + "account", + column("id", Integer), + column("name", String), + column("create_date", Date), + ) + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": date(2010, 10, 5), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": date(2007, 5, 27), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": date(2008, 8, 15), + }, + ], + ) + + When using --sql mode, some datatypes may not render inline + automatically, such as dates and other special types. When this + issue is present, :meth:`.Operations.inline_literal` may be used:: + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": op.inline_literal("2010-10-05"), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": op.inline_literal("2007-05-27"), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": op.inline_literal("2008-08-15"), + }, + ], + multiinsert=False, + ) + + When using :meth:`.Operations.inline_literal` in conjunction with + :meth:`.Operations.bulk_insert`, in order for the statement to work + in "online" (e.g. non --sql) mode, the + :paramref:`~.Operations.bulk_insert.multiinsert` + flag should be set to ``False``, which will have the effect of + individual INSERT statements being emitted to the database, each + with a distinct VALUES clause, so that the "inline" values can + still be rendered, rather than attempting to pass the values + as bound parameters. + + :param table: a table object which represents the target of the INSERT. + + :param rows: a list of dictionaries indicating rows. + + :param multiinsert: when at its default of True and --sql mode is not + enabled, the INSERT statement will be executed using + "executemany()" style, where all elements in the list of + dictionaries are passed as bound parameters in a single + list. Setting this to False results in individual INSERT + statements being emitted per parameter set, and is needed + in those cases where non-literal values are present in the + parameter sets. + + """ + +def create_check_constraint( + constraint_name: Optional[str], + table_name: str, + condition: Union[str, ColumnElement[bool], TextClause], + *, + schema: Optional[str] = None, + **kw: Any, +) -> None: + """Issue a "create check constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + from sqlalchemy.sql import column, func + + op.create_check_constraint( + "ck_user_name_len", + "user", + func.len(column("name")) > 5, + ) + + CHECK constraints are usually against a SQL expression, so ad-hoc + table metadata is usually needed. The function will convert the given + arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound + to an anonymous table in order to emit the CREATE statement. + + :param name: Name of the check constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param condition: SQL expression that's the condition of the + constraint. Can be a string or SQLAlchemy expression language + structure. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + +def create_exclude_constraint( + constraint_name: str, table_name: str, *elements: Any, **kw: Any +) -> Optional[Table]: + """Issue an alter to create an EXCLUDE constraint using the + current migration context. + + .. note:: This method is Postgresql specific, and additionally + requires at least SQLAlchemy 1.0. + + e.g.:: + + from alembic import op + + op.create_exclude_constraint( + "user_excl", + "user", + ("period", "&&"), + ("group", "="), + where=("group != 'some group'"), + ) + + Note that the expressions work the same way as that of + the ``ExcludeConstraint`` object itself; if plain strings are + passed, quoting rules must be applied manually. + + :param name: Name of the constraint. + :param table_name: String name of the source table. + :param elements: exclude conditions. + :param where: SQL expression or SQL string with optional WHERE + clause. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. + + """ + +def create_foreign_key( + constraint_name: Optional[str], + source_table: str, + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + *, + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + source_schema: Optional[str] = None, + referent_schema: Optional[str] = None, + **dialect_kw: Any, +) -> None: + """Issue a "create foreign key" instruction using the + current migration context. + + e.g.:: + + from alembic import op + + op.create_foreign_key( + "fk_user_address", + "address", + "user", + ["user_id"], + ["id"], + ) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.ForeignKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the foreign key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param source_table: String name of the source table. + :param referent_table: String name of the destination table. + :param local_cols: a list of string column names in the + source table. + :param remote_cols: a list of string column names in the + remote table. + :param onupdate: Optional string. If set, emit ON UPDATE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param ondelete: Optional string. If set, emit ON DELETE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param deferrable: optional bool. If set, emit DEFERRABLE or NOT + DEFERRABLE when issuing DDL for this constraint. + :param source_schema: Optional schema name of the source table. + :param referent_schema: Optional schema name of the destination table. + + """ + +def create_index( + index_name: Optional[str], + table_name: str, + columns: Sequence[Union[str, TextClause, ColumnElement[Any]]], + *, + schema: Optional[str] = None, + unique: bool = False, + if_not_exists: Optional[bool] = None, + **kw: Any, +) -> None: + r"""Issue a "create index" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_index("ik_test", "t1", ["foo", "bar"]) + + Functional indexes can be produced by using the + :func:`sqlalchemy.sql.expression.text` construct:: + + from alembic import op + from sqlalchemy import text + + op.create_index("ik_test", "t1", [text("lower(foo)")]) + + :param index_name: name of the index. + :param table_name: name of the owning table. + :param columns: a list consisting of string column names and/or + :func:`~sqlalchemy.sql.expression.text` constructs. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param unique: If True, create a unique index. + + :param quote: Force quoting of this column's name on or off, + corresponding to ``True`` or ``False``. When left at its default + of ``None``, the column identifier will be quoted according to + whether the name is case sensitive (identifiers with at least one + upper case character are treated as case sensitive), or if it's a + reserved word. This flag is only needed to force quoting of a + reserved word which is not known by the SQLAlchemy dialect. + + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + +def create_primary_key( + constraint_name: Optional[str], + table_name: str, + columns: List[str], + *, + schema: Optional[str] = None, +) -> None: + """Issue a "create primary key" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_primary_key("pk_my_table", "my_table", ["id", "version"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.PrimaryKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the primary key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions` + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the target table. + :param columns: a list of string column names to be applied to the + primary key constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + +def create_table( + table_name: str, + *columns: SchemaItem, + if_not_exists: Optional[bool] = None, + **kw: Any, +) -> Table: + r"""Issue a "create table" instruction using the current migration + context. + + This directive receives an argument list similar to that of the + traditional :class:`sqlalchemy.schema.Table` construct, but without the + metadata:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + Note that :meth:`.create_table` accepts + :class:`~sqlalchemy.schema.Column` + constructs directly from the SQLAlchemy library. In particular, + default values to be created on the database side are + specified using the ``server_default`` parameter, and not + ``default`` which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the "timestamp" column + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + The function also returns a newly created + :class:`~sqlalchemy.schema.Table` object, corresponding to the table + specification given, which is suitable for + immediate SQL operations, in particular + :meth:`.Operations.bulk_insert`:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + account_table = op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + op.bulk_insert( + account_table, + [ + {"name": "A1", "description": "account 1"}, + {"name": "A2", "description": "account 2"}, + ], + ) + + :param table_name: Name of the table + :param \*columns: collection of :class:`~sqlalchemy.schema.Column` + objects within + the table, as well as optional :class:`~sqlalchemy.schema.Constraint` + objects + and :class:`~.sqlalchemy.schema.Index` objects. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + :return: the :class:`~sqlalchemy.schema.Table` object corresponding + to the parameters given. + + """ + +def create_table_comment( + table_name: str, + comment: Optional[str], + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, +) -> None: + """Emit a COMMENT ON operation to set the comment for a table. + + :param table_name: string name of the target table. + :param comment: string value of the comment being registered against + the specified table. + :param existing_comment: String value of a comment + already registered on the specified table, used within autogenerate + so that the operation is reversible, but not required for direct + use. + + .. seealso:: + + :meth:`.Operations.drop_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ + +def create_unique_constraint( + constraint_name: Optional[str], + table_name: str, + columns: Sequence[str], + *, + schema: Optional[str] = None, + **kw: Any, +) -> Any: + """Issue a "create unique constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + op.create_unique_constraint("uq_user_name", "user", ["name"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.UniqueConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param name: Name of the unique constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param columns: a list of string column names in the + source table. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + +def drop_column( + table_name: str, + column_name: str, + *, + schema: Optional[str] = None, + **kw: Any, +) -> None: + """Issue a "drop column" instruction using the current + migration context. + + e.g.:: + + drop_column("organization", "account_id") + + :param table_name: name of table + :param column_name: name of column + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the new column for compatible dialects + + .. versionadded:: 1.16.0 + + :param mssql_drop_check: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the CHECK constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.check_constraints, + then exec's a separate DROP CONSTRAINT for that constraint. + :param mssql_drop_default: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the DEFAULT constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.default_constraints, + then exec's a separate DROP CONSTRAINT for that default. + :param mssql_drop_foreign_key: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop a single FOREIGN KEY constraint on the column using a + SQL-script-compatible + block that selects into a @variable from + sys.foreign_keys/sys.foreign_key_columns, + then exec's a separate DROP CONSTRAINT for that default. Only + works if the column has exactly one FK constraint which refers to + it, at the moment. + """ + +def drop_constraint( + constraint_name: str, + table_name: str, + type_: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, +) -> None: + r"""Drop a constraint of the given name, typically via DROP CONSTRAINT. + + :param constraint_name: name of the constraint. + :param table_name: table name. + :param type\_: optional, required on MySQL. can be + 'foreignkey', 'primary', 'unique', or 'check'. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the constraint + + .. versionadded:: 1.16.0 + + """ + +def drop_index( + index_name: str, + table_name: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, +) -> None: + r"""Issue a "drop index" instruction using the current + migration context. + + e.g.:: + + drop_index("accounts") + + :param index_name: name of the index. + :param table_name: name of the owning table. Some + backends such as Microsoft SQL Server require this. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + :param if_exists: If True, adds IF EXISTS operator when + dropping the index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + +def drop_table( + table_name: str, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, +) -> None: + r"""Issue a "drop table" instruction using the current + migration context. + + + e.g.:: + + drop_table("accounts") + + :param table_name: Name of the table + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + """ + +def drop_table_comment( + table_name: str, + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, +) -> None: + """Issue a "drop table comment" operation to + remove an existing comment set on a table. + + :param table_name: string name of the target table. + :param existing_comment: An optional string value of a comment already + registered on the specified table. + + .. seealso:: + + :meth:`.Operations.create_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ + +def execute( + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, +) -> None: + r"""Execute the given SQL using the current migration context. + + The given SQL can be a plain string, e.g.:: + + op.execute("INSERT INTO table (foo) VALUES ('some value')") + + Or it can be any kind of Core SQL Expression construct, such as + below where we use an update construct:: + + from sqlalchemy.sql import table, column + from sqlalchemy import String + from alembic import op + + account = table("account", column("name", String)) + op.execute( + account.update() + .where(account.c.name == op.inline_literal("account 1")) + .values({"name": op.inline_literal("account 2")}) + ) + + Above, we made use of the SQLAlchemy + :func:`sqlalchemy.sql.expression.table` and + :func:`sqlalchemy.sql.expression.column` constructs to make a brief, + ad-hoc table construct just for our UPDATE statement. A full + :class:`~sqlalchemy.schema.Table` construct of course works perfectly + fine as well, though note it's a recommended practice to at least + ensure the definition of a table is self-contained within the migration + script, rather than imported from a module that may break compatibility + with older migrations. + + In a SQL script context, the statement is emitted directly to the + output stream. There is *no* return result, however, as this + function is oriented towards generating a change script + that can run in "offline" mode. Additionally, parameterized + statements are discouraged here, as they *will not work* in offline + mode. Above, we use :meth:`.inline_literal` where parameters are + to be used. + + For full interaction with a connected database where parameters can + also be used normally, use the "bind" available from the context:: + + from alembic import op + + connection = op.get_bind() + + connection.execute( + account.update() + .where(account.c.name == "account 1") + .values({"name": "account 2"}) + ) + + Additionally, when passing the statement as a plain string, it is first + coerced into a :func:`sqlalchemy.sql.expression.text` construct + before being passed along. In the less likely case that the + literal SQL string contains a colon, it must be escaped with a + backslash, as:: + + op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')") + + + :param sqltext: Any legal SQLAlchemy expression, including: + + * a string + * a :func:`sqlalchemy.sql.expression.text` construct. + * a :func:`sqlalchemy.sql.expression.insert` construct. + * a :func:`sqlalchemy.sql.expression.update` construct. + * a :func:`sqlalchemy.sql.expression.delete` construct. + * Any "executable" described in SQLAlchemy Core documentation, + noting that no result set is returned. + + .. note:: when passing a plain string, the statement is coerced into + a :func:`sqlalchemy.sql.expression.text` construct. This construct + considers symbols with colons, e.g. ``:foo`` to be bound parameters. + To avoid this, ensure that colon symbols are escaped, e.g. + ``\:foo``. + + :param execution_options: Optional dictionary of + execution options, will be passed to + :meth:`sqlalchemy.engine.Connection.execution_options`. + """ + +def f(name: str) -> conv: + """Indicate a string name that has already had a naming convention + applied to it. + + This feature combines with the SQLAlchemy ``naming_convention`` feature + to disambiguate constraint names that have already had naming + conventions applied to them, versus those that have not. This is + necessary in the case that the ``"%(constraint_name)s"`` token + is used within a naming convention, so that it can be identified + that this particular name should remain fixed. + + If the :meth:`.Operations.f` is used on a constraint, the naming + convention will not take effect:: + + op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x"))) + + Above, the CHECK constraint generated will have the name + ``ck_bool_t_x`` regardless of whether or not a naming convention is + in use. + + Alternatively, if a naming convention is in use, and 'f' is not used, + names will be converted along conventions. If the ``target_metadata`` + contains the naming convention + ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the + output of the following:: + + op.add_column("t", "x", Boolean(name="x")) + + will be:: + + CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) + + The function is rendered in the output of autogenerate when + a particular constraint name is already converted. + + """ + +def get_bind() -> Connection: + """Return the current 'bind'. + + Under normal circumstances, this is the + :class:`~sqlalchemy.engine.Connection` currently being used + to emit SQL to the database. + + In a SQL script context, this value is ``None``. [TODO: verify this] + + """ + +def get_context() -> MigrationContext: + """Return the :class:`.MigrationContext` object that's + currently in use. + + """ + +def implementation_for(op_cls: Any) -> Callable[[_C], _C]: + """Register an implementation for a given :class:`.MigrateOperation`. + + This is part of the operation extensibility API. + + .. seealso:: + + :ref:`operation_plugins` - example of use + + """ + +def inline_literal( + value: Union[str, int], type_: Optional[TypeEngine[Any]] = None +) -> _literal_bindparam: + r"""Produce an 'inline literal' expression, suitable for + using in an INSERT, UPDATE, or DELETE statement. + + When using Alembic in "offline" mode, CRUD operations + aren't compatible with SQLAlchemy's default behavior surrounding + literal values, + which is that they are converted into bound values and passed + separately into the ``execute()`` method of the DBAPI cursor. + An offline SQL + script needs to have these rendered inline. While it should + always be noted that inline literal values are an **enormous** + security hole in an application that handles untrusted input, + a schema migration is not run in this context, so + literals are safe to render inline, with the caveat that + advanced types like dates may not be supported directly + by SQLAlchemy. + + See :meth:`.Operations.execute` for an example usage of + :meth:`.Operations.inline_literal`. + + The environment can also be configured to attempt to render + "literal" values inline automatically, for those simple types + that are supported by the dialect; see + :paramref:`.EnvironmentContext.configure.literal_binds` for this + more recently added feature. + + :param value: The value to render. Strings, integers, and simple + numerics should be supported. Other types like boolean, + dates, etc. may or may not be supported yet by various + backends. + :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine` + subclass stating the type of this value. In SQLAlchemy + expressions, this is usually derived automatically + from the Python type of the value itself, as well as + based on the context in which the value is used. + + .. seealso:: + + :paramref:`.EnvironmentContext.configure.literal_binds` + + """ + +@overload +def invoke(operation: CreateTableOp) -> Table: ... +@overload +def invoke( + operation: Union[ + AddConstraintOp, + DropConstraintOp, + CreateIndexOp, + DropIndexOp, + AddColumnOp, + AlterColumnOp, + AlterTableOp, + CreateTableCommentOp, + DropTableCommentOp, + DropColumnOp, + BulkInsertOp, + DropTableOp, + ExecuteSQLOp, + ], +) -> None: ... +@overload +def invoke(operation: MigrateOperation) -> Any: + """Given a :class:`.MigrateOperation`, invoke it in terms of + this :class:`.Operations` instance. + + """ + +def register_operation( + name: str, sourcename: Optional[str] = None +) -> Callable[[Type[_T]], Type[_T]]: + """Register a new operation for this class. + + This method is normally used to add new operations + to the :class:`.Operations` class, and possibly the + :class:`.BatchOperations` class as well. All Alembic migration + operations are implemented via this system, however the system + is also available as a public API to facilitate adding custom + operations. + + .. seealso:: + + :ref:`operation_plugins` + + + """ + +def rename_table( + old_table_name: str, new_table_name: str, *, schema: Optional[str] = None +) -> None: + """Emit an ALTER TABLE to rename a table. + + :param old_table_name: old name. + :param new_table_name: new name. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + +def run_async( + async_function: Callable[..., Awaitable[_T]], *args: Any, **kw_args: Any +) -> _T: + """Invoke the given asynchronous callable, passing an asynchronous + :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first + argument. + + This method allows calling async functions from within the + synchronous ``upgrade()`` or ``downgrade()`` alembic migration + method. + + The async connection passed to the callable shares the same + transaction as the connection running in the migration context. + + Any additional arg or kw_arg passed to this function are passed + to the provided async function. + + .. versionadded: 1.11 + + .. note:: + + This method can be called only when alembic is called using + an async dialect. + """ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..26197cbe8205decca224757d329e634a6a23d2e2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/__init__.py @@ -0,0 +1,15 @@ +from . import toimpl +from .base import AbstractOperations +from .base import BatchOperations +from .base import Operations +from .ops import MigrateOperation +from .ops import MigrationScript + + +__all__ = [ + "AbstractOperations", + "Operations", + "BatchOperations", + "MigrateOperation", + "MigrationScript", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/base.py new file mode 100644 index 0000000000000000000000000000000000000000..26c3272427200fc8eb4b67b4e92f6573b6e3fbe2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/base.py @@ -0,0 +1,1923 @@ +# mypy: allow-untyped-calls + +from __future__ import annotations + +from contextlib import contextmanager +import re +import textwrap +from typing import Any +from typing import Awaitable +from typing import Callable +from typing import Dict +from typing import Iterator +from typing import List # noqa +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence # noqa +from typing import Tuple +from typing import Type # noqa +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy.sql.elements import conv + +from . import batch +from . import schemaobj +from .. import util +from ..util import sqla_compat +from ..util.compat import formatannotation_fwdref +from ..util.compat import inspect_formatargspec +from ..util.compat import inspect_getfullargspec +from ..util.sqla_compat import _literal_bindparam + + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy import Table + from sqlalchemy.engine import Connection + from sqlalchemy.sql import Executable + from sqlalchemy.sql.expression import ColumnElement + from sqlalchemy.sql.expression import TableClause + from sqlalchemy.sql.expression import TextClause + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Computed + from sqlalchemy.sql.schema import Identity + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.types import TypeEngine + + from .batch import BatchOperationsImpl + from .ops import AddColumnOp + from .ops import AddConstraintOp + from .ops import AlterColumnOp + from .ops import AlterTableOp + from .ops import BulkInsertOp + from .ops import CreateIndexOp + from .ops import CreateTableCommentOp + from .ops import CreateTableOp + from .ops import DropColumnOp + from .ops import DropConstraintOp + from .ops import DropIndexOp + from .ops import DropTableCommentOp + from .ops import DropTableOp + from .ops import ExecuteSQLOp + from .ops import MigrateOperation + from ..ddl import DefaultImpl + from ..runtime.migration import MigrationContext +__all__ = ("Operations", "BatchOperations") +_T = TypeVar("_T") + +_C = TypeVar("_C", bound=Callable[..., Any]) + + +class AbstractOperations(util.ModuleClsProxy): + """Base class for Operations and BatchOperations. + + .. versionadded:: 1.11.0 + + """ + + impl: Union[DefaultImpl, BatchOperationsImpl] + _to_impl = util.Dispatcher() + + def __init__( + self, + migration_context: MigrationContext, + impl: Optional[BatchOperationsImpl] = None, + ) -> None: + """Construct a new :class:`.Operations` + + :param migration_context: a :class:`.MigrationContext` + instance. + + """ + self.migration_context = migration_context + if impl is None: + self.impl = migration_context.impl + else: + self.impl = impl + + self.schema_obj = schemaobj.SchemaObjects(migration_context) + + @classmethod + def register_operation( + cls, name: str, sourcename: Optional[str] = None + ) -> Callable[[Type[_T]], Type[_T]]: + """Register a new operation for this class. + + This method is normally used to add new operations + to the :class:`.Operations` class, and possibly the + :class:`.BatchOperations` class as well. All Alembic migration + operations are implemented via this system, however the system + is also available as a public API to facilitate adding custom + operations. + + .. seealso:: + + :ref:`operation_plugins` + + + """ + + def register(op_cls: Type[_T]) -> Type[_T]: + if sourcename is None: + fn = getattr(op_cls, name) + source_name = fn.__name__ + else: + fn = getattr(op_cls, sourcename) + source_name = fn.__name__ + + spec = inspect_getfullargspec(fn) + + name_args = spec[0] + assert name_args[0:2] == ["cls", "operations"] + + name_args[0:2] = ["self"] + + args = inspect_formatargspec( + *spec, formatannotation=formatannotation_fwdref + ) + num_defaults = len(spec[3]) if spec[3] else 0 + + defaulted_vals: Tuple[Any, ...] + + if num_defaults: + defaulted_vals = tuple(name_args[0 - num_defaults :]) + else: + defaulted_vals = () + + defaulted_vals += tuple(spec[4]) + # here, we are using formatargspec in a different way in order + # to get a string that will re-apply incoming arguments to a new + # function call + + apply_kw = inspect_formatargspec( + name_args + spec[4], + spec[1], + spec[2], + defaulted_vals, + formatvalue=lambda x: "=" + x, + formatannotation=formatannotation_fwdref, + ) + + args = re.sub( + r'[_]?ForwardRef\(([\'"].+?[\'"])\)', + lambda m: m.group(1), + args, + ) + + func_text = textwrap.dedent( + """\ + def %(name)s%(args)s: + %(doc)r + return op_cls.%(source_name)s%(apply_kw)s + """ + % { + "name": name, + "source_name": source_name, + "args": args, + "apply_kw": apply_kw, + "doc": fn.__doc__, + } + ) + + globals_ = dict(globals()) + globals_.update({"op_cls": op_cls}) + lcl: Dict[str, Any] = {} + + exec(func_text, globals_, lcl) + setattr(cls, name, lcl[name]) + fn.__func__.__doc__ = ( + "This method is proxied on " + "the :class:`.%s` class, via the :meth:`.%s.%s` method." + % (cls.__name__, cls.__name__, name) + ) + if hasattr(fn, "_legacy_translations"): + lcl[name]._legacy_translations = fn._legacy_translations + return op_cls + + return register + + @classmethod + def implementation_for(cls, op_cls: Any) -> Callable[[_C], _C]: + """Register an implementation for a given :class:`.MigrateOperation`. + + This is part of the operation extensibility API. + + .. seealso:: + + :ref:`operation_plugins` - example of use + + """ + + def decorate(fn: _C) -> _C: + cls._to_impl.dispatch_for(op_cls)(fn) + return fn + + return decorate + + @classmethod + @contextmanager + def context( + cls, migration_context: MigrationContext + ) -> Iterator[Operations]: + op = Operations(migration_context) + op._install_proxy() + yield op + op._remove_proxy() + + @contextmanager + def batch_alter_table( + self, + table_name: str, + schema: Optional[str] = None, + recreate: Literal["auto", "always", "never"] = "auto", + partial_reordering: Optional[Tuple[Any, ...]] = None, + copy_from: Optional[Table] = None, + table_args: Tuple[Any, ...] = (), + table_kwargs: Mapping[str, Any] = util.immutabledict(), + reflect_args: Tuple[Any, ...] = (), + reflect_kwargs: Mapping[str, Any] = util.immutabledict(), + naming_convention: Optional[Dict[str, str]] = None, + ) -> Iterator[BatchOperations]: + """Invoke a series of per-table migrations in batch. + + Batch mode allows a series of operations specific to a table + to be syntactically grouped together, and allows for alternate + modes of table migration, in particular the "recreate" style of + migration required by SQLite. + + "recreate" style is as follows: + + 1. A new table is created with the new specification, based on the + migration directives within the batch, using a temporary name. + + 2. the data copied from the existing table to the new table. + + 3. the existing table is dropped. + + 4. the new table is renamed to the existing table name. + + The directive by default will only use "recreate" style on the + SQLite backend, and only if directives are present which require + this form, e.g. anything other than ``add_column()``. The batch + operation on other backends will proceed using standard ALTER TABLE + operations. + + The method is used as a context manager, which returns an instance + of :class:`.BatchOperations`; this object is the same as + :class:`.Operations` except that table names and schema names + are omitted. E.g.:: + + with op.batch_alter_table("some_table") as batch_op: + batch_op.add_column(Column("foo", Integer)) + batch_op.drop_column("bar") + + The operations within the context manager are invoked at once + when the context is ended. When run against SQLite, if the + migrations include operations not supported by SQLite's ALTER TABLE, + the entire table will be copied to a new one with the new + specification, moving all data across as well. + + The copy operation by default uses reflection to retrieve the current + structure of the table, and therefore :meth:`.batch_alter_table` + in this mode requires that the migration is run in "online" mode. + The ``copy_from`` parameter may be passed which refers to an existing + :class:`.Table` object, which will bypass this reflection step. + + .. note:: The table copy operation will currently not copy + CHECK constraints, and may not copy UNIQUE constraints that are + unnamed, as is possible on SQLite. See the section + :ref:`sqlite_batch_constraints` for workarounds. + + :param table_name: name of table + :param schema: optional schema name. + :param recreate: under what circumstances the table should be + recreated. At its default of ``"auto"``, the SQLite dialect will + recreate the table if any operations other than ``add_column()``, + ``create_index()``, or ``drop_index()`` are + present. Other options include ``"always"`` and ``"never"``. + :param copy_from: optional :class:`~sqlalchemy.schema.Table` object + that will act as the structure of the table being copied. If omitted, + table reflection is used to retrieve the structure of the table. + + .. seealso:: + + :ref:`batch_offline_mode` + + :paramref:`~.Operations.batch_alter_table.reflect_args` + + :paramref:`~.Operations.batch_alter_table.reflect_kwargs` + + :param reflect_args: a sequence of additional positional arguments that + will be applied to the table structure being reflected / copied; + this may be used to pass column and constraint overrides to the + table that will be reflected, in lieu of passing the whole + :class:`~sqlalchemy.schema.Table` using + :paramref:`~.Operations.batch_alter_table.copy_from`. + :param reflect_kwargs: a dictionary of additional keyword arguments + that will be applied to the table structure being copied; this may be + used to pass additional table and reflection options to the table that + will be reflected, in lieu of passing the whole + :class:`~sqlalchemy.schema.Table` using + :paramref:`~.Operations.batch_alter_table.copy_from`. + :param table_args: a sequence of additional positional arguments that + will be applied to the new :class:`~sqlalchemy.schema.Table` when + created, in addition to those copied from the source table. + This may be used to provide additional constraints such as CHECK + constraints that may not be reflected. + :param table_kwargs: a dictionary of additional keyword arguments + that will be applied to the new :class:`~sqlalchemy.schema.Table` + when created, in addition to those copied from the source table. + This may be used to provide for additional table options that may + not be reflected. + :param naming_convention: a naming convention dictionary of the form + described at :ref:`autogen_naming_conventions` which will be applied + to the :class:`~sqlalchemy.schema.MetaData` during the reflection + process. This is typically required if one wants to drop SQLite + constraints, as these constraints will not have names when + reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. + + .. seealso:: + + :ref:`dropping_sqlite_foreign_keys` + + :param partial_reordering: a list of tuples, each suggesting a desired + ordering of two or more columns in the newly created table. Requires + that :paramref:`.batch_alter_table.recreate` is set to ``"always"``. + Examples, given a table with columns "a", "b", "c", and "d": + + Specify the order of all columns:: + + with op.batch_alter_table( + "some_table", + recreate="always", + partial_reordering=[("c", "d", "a", "b")], + ) as batch_op: + pass + + Ensure "d" appears before "c", and "b", appears before "a":: + + with op.batch_alter_table( + "some_table", + recreate="always", + partial_reordering=[("d", "c"), ("b", "a")], + ) as batch_op: + pass + + The ordering of columns not included in the partial_reordering + set is undefined. Therefore it is best to specify the complete + ordering of all columns for best results. + + .. note:: batch mode requires SQLAlchemy 0.8 or above. + + .. seealso:: + + :ref:`batch_migrations` + + """ + impl = batch.BatchOperationsImpl( + self, + table_name, + schema, + recreate, + copy_from, + table_args, + table_kwargs, + reflect_args, + reflect_kwargs, + naming_convention, + partial_reordering, + ) + batch_op = BatchOperations(self.migration_context, impl=impl) + yield batch_op + impl.flush() + + def get_context(self) -> MigrationContext: + """Return the :class:`.MigrationContext` object that's + currently in use. + + """ + + return self.migration_context + + @overload + def invoke(self, operation: CreateTableOp) -> Table: ... + + @overload + def invoke( + self, + operation: Union[ + AddConstraintOp, + DropConstraintOp, + CreateIndexOp, + DropIndexOp, + AddColumnOp, + AlterColumnOp, + AlterTableOp, + CreateTableCommentOp, + DropTableCommentOp, + DropColumnOp, + BulkInsertOp, + DropTableOp, + ExecuteSQLOp, + ], + ) -> None: ... + + @overload + def invoke(self, operation: MigrateOperation) -> Any: ... + + def invoke(self, operation: MigrateOperation) -> Any: + """Given a :class:`.MigrateOperation`, invoke it in terms of + this :class:`.Operations` instance. + + """ + fn = self._to_impl.dispatch( + operation, self.migration_context.impl.__dialect__ + ) + return fn(self, operation) + + def f(self, name: str) -> conv: + """Indicate a string name that has already had a naming convention + applied to it. + + This feature combines with the SQLAlchemy ``naming_convention`` feature + to disambiguate constraint names that have already had naming + conventions applied to them, versus those that have not. This is + necessary in the case that the ``"%(constraint_name)s"`` token + is used within a naming convention, so that it can be identified + that this particular name should remain fixed. + + If the :meth:`.Operations.f` is used on a constraint, the naming + convention will not take effect:: + + op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x"))) + + Above, the CHECK constraint generated will have the name + ``ck_bool_t_x`` regardless of whether or not a naming convention is + in use. + + Alternatively, if a naming convention is in use, and 'f' is not used, + names will be converted along conventions. If the ``target_metadata`` + contains the naming convention + ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the + output of the following:: + + op.add_column("t", "x", Boolean(name="x")) + + will be:: + + CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) + + The function is rendered in the output of autogenerate when + a particular constraint name is already converted. + + """ + return conv(name) + + def inline_literal( + self, value: Union[str, int], type_: Optional[TypeEngine[Any]] = None + ) -> _literal_bindparam: + r"""Produce an 'inline literal' expression, suitable for + using in an INSERT, UPDATE, or DELETE statement. + + When using Alembic in "offline" mode, CRUD operations + aren't compatible with SQLAlchemy's default behavior surrounding + literal values, + which is that they are converted into bound values and passed + separately into the ``execute()`` method of the DBAPI cursor. + An offline SQL + script needs to have these rendered inline. While it should + always be noted that inline literal values are an **enormous** + security hole in an application that handles untrusted input, + a schema migration is not run in this context, so + literals are safe to render inline, with the caveat that + advanced types like dates may not be supported directly + by SQLAlchemy. + + See :meth:`.Operations.execute` for an example usage of + :meth:`.Operations.inline_literal`. + + The environment can also be configured to attempt to render + "literal" values inline automatically, for those simple types + that are supported by the dialect; see + :paramref:`.EnvironmentContext.configure.literal_binds` for this + more recently added feature. + + :param value: The value to render. Strings, integers, and simple + numerics should be supported. Other types like boolean, + dates, etc. may or may not be supported yet by various + backends. + :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine` + subclass stating the type of this value. In SQLAlchemy + expressions, this is usually derived automatically + from the Python type of the value itself, as well as + based on the context in which the value is used. + + .. seealso:: + + :paramref:`.EnvironmentContext.configure.literal_binds` + + """ + return sqla_compat._literal_bindparam(None, value, type_=type_) + + def get_bind(self) -> Connection: + """Return the current 'bind'. + + Under normal circumstances, this is the + :class:`~sqlalchemy.engine.Connection` currently being used + to emit SQL to the database. + + In a SQL script context, this value is ``None``. [TODO: verify this] + + """ + return self.migration_context.impl.bind # type: ignore[return-value] + + def run_async( + self, + async_function: Callable[..., Awaitable[_T]], + *args: Any, + **kw_args: Any, + ) -> _T: + """Invoke the given asynchronous callable, passing an asynchronous + :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first + argument. + + This method allows calling async functions from within the + synchronous ``upgrade()`` or ``downgrade()`` alembic migration + method. + + The async connection passed to the callable shares the same + transaction as the connection running in the migration context. + + Any additional arg or kw_arg passed to this function are passed + to the provided async function. + + .. versionadded: 1.11 + + .. note:: + + This method can be called only when alembic is called using + an async dialect. + """ + if not sqla_compat.sqla_14_18: + raise NotImplementedError("SQLAlchemy 1.4.18+ required") + sync_conn = self.get_bind() + if sync_conn is None: + raise NotImplementedError("Cannot call run_async in SQL mode") + if not sync_conn.dialect.is_async: + raise ValueError("Cannot call run_async with a sync engine") + from sqlalchemy.ext.asyncio import AsyncConnection + from sqlalchemy.util import await_only + + async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) + return await_only(async_function(async_conn, *args, **kw_args)) + + +class Operations(AbstractOperations): + """Define high level migration operations. + + Each operation corresponds to some schema migration operation, + executed against a particular :class:`.MigrationContext` + which in turn represents connectivity to a database, + or a file output stream. + + While :class:`.Operations` is normally configured as + part of the :meth:`.EnvironmentContext.run_migrations` + method called from an ``env.py`` script, a standalone + :class:`.Operations` instance can be + made for use cases external to regular Alembic + migrations by passing in a :class:`.MigrationContext`:: + + from alembic.migration import MigrationContext + from alembic.operations import Operations + + conn = myengine.connect() + ctx = MigrationContext.configure(conn) + op = Operations(ctx) + + op.alter_column("t", "c", nullable=True) + + Note that as of 0.8, most of the methods on this class are produced + dynamically using the :meth:`.Operations.register_operation` + method. + + """ + + if TYPE_CHECKING: + # START STUB FUNCTIONS: op_cls + # ### the following stubs are generated by tools/write_pyi.py ### + # ### do not edit ### + + def add_column( + self, + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + """Issue an "add column" instruction using the current + migration context. + + e.g.:: + + from alembic import op + from sqlalchemy import Column, String + + op.add_column("organization", Column("name", String())) + + The :meth:`.Operations.add_column` method typically corresponds + to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope + of this command, the column's name, datatype, nullability, + and optional server-generated defaults may be indicated. + + .. note:: + + With the exception of NOT NULL constraints or single-column FOREIGN + KEY constraints, other kinds of constraints such as PRIMARY KEY, + UNIQUE or CHECK constraints **cannot** be generated using this + method; for these constraints, refer to operations such as + :meth:`.Operations.create_primary_key` and + :meth:`.Operations.create_check_constraint`. In particular, the + following :class:`~sqlalchemy.schema.Column` parameters are + **ignored**: + + * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases + typically do not support an ALTER operation that can add + individual columns one at a time to an existing primary key + constraint, therefore it's less ambiguous to use the + :meth:`.Operations.create_primary_key` method, which assumes no + existing primary key constraint is present. + * :paramref:`~sqlalchemy.schema.Column.unique` - use the + :meth:`.Operations.create_unique_constraint` method + * :paramref:`~sqlalchemy.schema.Column.index` - use the + :meth:`.Operations.create_index` method + + + The provided :class:`~sqlalchemy.schema.Column` object may include a + :class:`~sqlalchemy.schema.ForeignKey` constraint directive, + referencing a remote table name. For this specific type of constraint, + Alembic will automatically emit a second ALTER statement in order to + add the single-column FOREIGN KEY constraint separately:: + + from alembic import op + from sqlalchemy import Column, INTEGER, ForeignKey + + op.add_column( + "organization", + Column("account_id", INTEGER, ForeignKey("accounts.id")), + ) + + The column argument passed to :meth:`.Operations.add_column` is a + :class:`~sqlalchemy.schema.Column` construct, used in the same way it's + used in SQLAlchemy. In particular, values or functions to be indicated + as producing the column's default value on the database side are + specified using the ``server_default`` parameter, and not ``default`` + which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the column add + op.add_column( + "account", + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + :param table_name: String name of the parent table. + :param column: a :class:`sqlalchemy.schema.Column` object + representing the new column. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator + when creating the new column for compatible dialects + + .. versionadded:: 1.16.0 + + """ # noqa: E501 + ... + + def alter_column( + self, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + comment: Union[str, Literal[False], None] = False, + server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + new_column_name: Optional[str] = None, + type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None, + existing_type: Union[ + TypeEngine[Any], Type[TypeEngine[Any]], None + ] = None, + existing_server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + r"""Issue an "alter column" instruction using the + current migration context. + + Generally, only that aspect of the column which + is being changed, i.e. name, type, nullability, + default, needs to be specified. Multiple changes + can also be specified at once and the backend should + "do the right thing", emitting each change either + separately or together as the backend allows. + + MySQL has special requirements here, since MySQL + cannot ALTER a column without a full specification. + When producing MySQL-compatible migration files, + it is recommended that the ``existing_type``, + ``existing_server_default``, and ``existing_nullable`` + parameters be present, if not being altered. + + Type changes which are against the SQLAlchemy + "schema" types :class:`~sqlalchemy.types.Boolean` + and :class:`~sqlalchemy.types.Enum` may also + add or drop constraints which accompany those + types on backends that don't support them natively. + The ``existing_type`` argument is + used in this case to identify and remove a previous + constraint that was bound to the type object. + + :param table_name: string name of the target table. + :param column_name: string name of the target column, + as it exists before the operation begins. + :param nullable: Optional; specify ``True`` or ``False`` + to alter the column's nullability. + :param server_default: Optional; specify a string + SQL expression, :func:`~sqlalchemy.sql.expression.text`, + or :class:`~sqlalchemy.schema.DefaultClause` to indicate + an alteration to the column's default value. + Set to ``None`` to have the default removed. + :param comment: optional string text of a new comment to add to the + column. + :param new_column_name: Optional; specify a string name here to + indicate the new name within a column rename operation. + :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` + type object to specify a change to the column's type. + For SQLAlchemy types that also indicate a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), + the constraint is also generated. + :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; + currently understood by the MySQL dialect. + :param existing_type: Optional; a + :class:`~sqlalchemy.types.TypeEngine` + type object to specify the previous type. This + is required for all MySQL column alter operations that + don't otherwise specify a new type, as well as for + when nullability is being changed on a SQL Server + column. It is also used if the type is a so-called + SQLAlchemy "schema" type which may define a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, + :class:`~sqlalchemy.types.Enum`), + so that the constraint can be dropped. + :param existing_server_default: Optional; The existing + default value of the column. Required on MySQL if + an existing default is not being changed; else MySQL + removes the default. + :param existing_nullable: Optional; the existing nullability + of the column. Required on MySQL if the existing nullability + is not being changed; else MySQL sets this to NULL. + :param existing_autoincrement: Optional; the existing autoincrement + of the column. Used for MySQL's system of altering a column + that specifies ``AUTO_INCREMENT``. + :param existing_comment: string text of the existing comment on the + column to be maintained. Required on MySQL if the existing comment + on the column is not being changed. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param postgresql_using: String argument which will indicate a + SQL expression to render within the Postgresql-specific USING clause + within ALTER COLUMN. This string is taken directly as raw SQL which + must explicitly include any necessary quoting or escaping of tokens + within the expression. + + """ # noqa: E501 + ... + + def bulk_insert( + self, + table: Union[Table, TableClause], + rows: List[Dict[str, Any]], + *, + multiinsert: bool = True, + ) -> None: + """Issue a "bulk insert" operation using the current + migration context. + + This provides a means of representing an INSERT of multiple rows + which works equally well in the context of executing on a live + connection as well as that of generating a SQL script. In the + case of a SQL script, the values are rendered inline into the + statement. + + e.g.:: + + from alembic import op + from datetime import date + from sqlalchemy.sql import table, column + from sqlalchemy import String, Integer, Date + + # Create an ad-hoc table to use for the insert statement. + accounts_table = table( + "account", + column("id", Integer), + column("name", String), + column("create_date", Date), + ) + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": date(2010, 10, 5), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": date(2007, 5, 27), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": date(2008, 8, 15), + }, + ], + ) + + When using --sql mode, some datatypes may not render inline + automatically, such as dates and other special types. When this + issue is present, :meth:`.Operations.inline_literal` may be used:: + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": op.inline_literal("2010-10-05"), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": op.inline_literal("2007-05-27"), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": op.inline_literal("2008-08-15"), + }, + ], + multiinsert=False, + ) + + When using :meth:`.Operations.inline_literal` in conjunction with + :meth:`.Operations.bulk_insert`, in order for the statement to work + in "online" (e.g. non --sql) mode, the + :paramref:`~.Operations.bulk_insert.multiinsert` + flag should be set to ``False``, which will have the effect of + individual INSERT statements being emitted to the database, each + with a distinct VALUES clause, so that the "inline" values can + still be rendered, rather than attempting to pass the values + as bound parameters. + + :param table: a table object which represents the target of the INSERT. + + :param rows: a list of dictionaries indicating rows. + + :param multiinsert: when at its default of True and --sql mode is not + enabled, the INSERT statement will be executed using + "executemany()" style, where all elements in the list of + dictionaries are passed as bound parameters in a single + list. Setting this to False results in individual INSERT + statements being emitted per parameter set, and is needed + in those cases where non-literal values are present in the + parameter sets. + + """ # noqa: E501 + ... + + def create_check_constraint( + self, + constraint_name: Optional[str], + table_name: str, + condition: Union[str, ColumnElement[bool], TextClause], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue a "create check constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + from sqlalchemy.sql import column, func + + op.create_check_constraint( + "ck_user_name_len", + "user", + func.len(column("name")) > 5, + ) + + CHECK constraints are usually against a SQL expression, so ad-hoc + table metadata is usually needed. The function will convert the given + arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound + to an anonymous table in order to emit the CREATE statement. + + :param name: Name of the check constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param condition: SQL expression that's the condition of the + constraint. Can be a string or SQLAlchemy expression language + structure. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ # noqa: E501 + ... + + def create_exclude_constraint( + self, + constraint_name: str, + table_name: str, + *elements: Any, + **kw: Any, + ) -> Optional[Table]: + """Issue an alter to create an EXCLUDE constraint using the + current migration context. + + .. note:: This method is Postgresql specific, and additionally + requires at least SQLAlchemy 1.0. + + e.g.:: + + from alembic import op + + op.create_exclude_constraint( + "user_excl", + "user", + ("period", "&&"), + ("group", "="), + where=("group != 'some group'"), + ) + + Note that the expressions work the same way as that of + the ``ExcludeConstraint`` object itself; if plain strings are + passed, quoting rules must be applied manually. + + :param name: Name of the constraint. + :param table_name: String name of the source table. + :param elements: exclude conditions. + :param where: SQL expression or SQL string with optional WHERE + clause. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. + + """ # noqa: E501 + ... + + def create_foreign_key( + self, + constraint_name: Optional[str], + source_table: str, + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + *, + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + source_schema: Optional[str] = None, + referent_schema: Optional[str] = None, + **dialect_kw: Any, + ) -> None: + """Issue a "create foreign key" instruction using the + current migration context. + + e.g.:: + + from alembic import op + + op.create_foreign_key( + "fk_user_address", + "address", + "user", + ["user_id"], + ["id"], + ) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.ForeignKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the foreign key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param source_table: String name of the source table. + :param referent_table: String name of the destination table. + :param local_cols: a list of string column names in the + source table. + :param remote_cols: a list of string column names in the + remote table. + :param onupdate: Optional string. If set, emit ON UPDATE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param ondelete: Optional string. If set, emit ON DELETE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param deferrable: optional bool. If set, emit DEFERRABLE or NOT + DEFERRABLE when issuing DDL for this constraint. + :param source_schema: Optional schema name of the source table. + :param referent_schema: Optional schema name of the destination table. + + """ # noqa: E501 + ... + + def create_index( + self, + index_name: Optional[str], + table_name: str, + columns: Sequence[Union[str, TextClause, ColumnElement[Any]]], + *, + schema: Optional[str] = None, + unique: bool = False, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "create index" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_index("ik_test", "t1", ["foo", "bar"]) + + Functional indexes can be produced by using the + :func:`sqlalchemy.sql.expression.text` construct:: + + from alembic import op + from sqlalchemy import text + + op.create_index("ik_test", "t1", [text("lower(foo)")]) + + :param index_name: name of the index. + :param table_name: name of the owning table. + :param columns: a list consisting of string column names and/or + :func:`~sqlalchemy.sql.expression.text` constructs. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param unique: If True, create a unique index. + + :param quote: Force quoting of this column's name on or off, + corresponding to ``True`` or ``False``. When left at its default + of ``None``, the column identifier will be quoted according to + whether the name is case sensitive (identifiers with at least one + upper case character are treated as case sensitive), or if it's a + reserved word. This flag is only needed to force quoting of a + reserved word which is not known by the SQLAlchemy dialect. + + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ # noqa: E501 + ... + + def create_primary_key( + self, + constraint_name: Optional[str], + table_name: str, + columns: List[str], + *, + schema: Optional[str] = None, + ) -> None: + """Issue a "create primary key" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_primary_key("pk_my_table", "my_table", ["id", "version"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.PrimaryKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the primary key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions` + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the target table. + :param columns: a list of string column names to be applied to the + primary key constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ # noqa: E501 + ... + + def create_table( + self, + table_name: str, + *columns: SchemaItem, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> Table: + r"""Issue a "create table" instruction using the current migration + context. + + This directive receives an argument list similar to that of the + traditional :class:`sqlalchemy.schema.Table` construct, but without the + metadata:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + Note that :meth:`.create_table` accepts + :class:`~sqlalchemy.schema.Column` + constructs directly from the SQLAlchemy library. In particular, + default values to be created on the database side are + specified using the ``server_default`` parameter, and not + ``default`` which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the "timestamp" column + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + The function also returns a newly created + :class:`~sqlalchemy.schema.Table` object, corresponding to the table + specification given, which is suitable for + immediate SQL operations, in particular + :meth:`.Operations.bulk_insert`:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + account_table = op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + op.bulk_insert( + account_table, + [ + {"name": "A1", "description": "account 1"}, + {"name": "A2", "description": "account 2"}, + ], + ) + + :param table_name: Name of the table + :param \*columns: collection of :class:`~sqlalchemy.schema.Column` + objects within + the table, as well as optional :class:`~sqlalchemy.schema.Constraint` + objects + and :class:`~.sqlalchemy.schema.Index` objects. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + :return: the :class:`~sqlalchemy.schema.Table` object corresponding + to the parameters given. + + """ # noqa: E501 + ... + + def create_table_comment( + self, + table_name: str, + comment: Optional[str], + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + ) -> None: + """Emit a COMMENT ON operation to set the comment for a table. + + :param table_name: string name of the target table. + :param comment: string value of the comment being registered against + the specified table. + :param existing_comment: String value of a comment + already registered on the specified table, used within autogenerate + so that the operation is reversible, but not required for direct + use. + + .. seealso:: + + :meth:`.Operations.drop_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ # noqa: E501 + ... + + def create_unique_constraint( + self, + constraint_name: Optional[str], + table_name: str, + columns: Sequence[str], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> Any: + """Issue a "create unique constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + op.create_unique_constraint("uq_user_name", "user", ["name"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.UniqueConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param name: Name of the unique constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param columns: a list of string column names in the + source table. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ # noqa: E501 + ... + + def drop_column( + self, + table_name: str, + column_name: str, + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue a "drop column" instruction using the current + migration context. + + e.g.:: + + drop_column("organization", "account_id") + + :param table_name: name of table + :param column_name: name of column + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the new column for compatible dialects + + .. versionadded:: 1.16.0 + + :param mssql_drop_check: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the CHECK constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.check_constraints, + then exec's a separate DROP CONSTRAINT for that constraint. + :param mssql_drop_default: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the DEFAULT constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.default_constraints, + then exec's a separate DROP CONSTRAINT for that default. + :param mssql_drop_foreign_key: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop a single FOREIGN KEY constraint on the column using a + SQL-script-compatible + block that selects into a @variable from + sys.foreign_keys/sys.foreign_key_columns, + then exec's a separate DROP CONSTRAINT for that default. Only + works if the column has exactly one FK constraint which refers to + it, at the moment. + """ # noqa: E501 + ... + + def drop_constraint( + self, + constraint_name: str, + table_name: str, + type_: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + ) -> None: + r"""Drop a constraint of the given name, typically via DROP CONSTRAINT. + + :param constraint_name: name of the constraint. + :param table_name: table name. + :param type\_: optional, required on MySQL. can be + 'foreignkey', 'primary', 'unique', or 'check'. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the constraint + + .. versionadded:: 1.16.0 + + """ # noqa: E501 + ... + + def drop_index( + self, + index_name: str, + table_name: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "drop index" instruction using the current + migration context. + + e.g.:: + + drop_index("accounts") + + :param index_name: name of the index. + :param table_name: name of the owning table. Some + backends such as Microsoft SQL Server require this. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + :param if_exists: If True, adds IF EXISTS operator when + dropping the index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ # noqa: E501 + ... + + def drop_table( + self, + table_name: str, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "drop table" instruction using the current + migration context. + + + e.g.:: + + drop_table("accounts") + + :param table_name: Name of the table + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + """ # noqa: E501 + ... + + def drop_table_comment( + self, + table_name: str, + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + ) -> None: + """Issue a "drop table comment" operation to + remove an existing comment set on a table. + + :param table_name: string name of the target table. + :param existing_comment: An optional string value of a comment already + registered on the specified table. + + .. seealso:: + + :meth:`.Operations.create_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ # noqa: E501 + ... + + def execute( + self, + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + r"""Execute the given SQL using the current migration context. + + The given SQL can be a plain string, e.g.:: + + op.execute("INSERT INTO table (foo) VALUES ('some value')") + + Or it can be any kind of Core SQL Expression construct, such as + below where we use an update construct:: + + from sqlalchemy.sql import table, column + from sqlalchemy import String + from alembic import op + + account = table("account", column("name", String)) + op.execute( + account.update() + .where(account.c.name == op.inline_literal("account 1")) + .values({"name": op.inline_literal("account 2")}) + ) + + Above, we made use of the SQLAlchemy + :func:`sqlalchemy.sql.expression.table` and + :func:`sqlalchemy.sql.expression.column` constructs to make a brief, + ad-hoc table construct just for our UPDATE statement. A full + :class:`~sqlalchemy.schema.Table` construct of course works perfectly + fine as well, though note it's a recommended practice to at least + ensure the definition of a table is self-contained within the migration + script, rather than imported from a module that may break compatibility + with older migrations. + + In a SQL script context, the statement is emitted directly to the + output stream. There is *no* return result, however, as this + function is oriented towards generating a change script + that can run in "offline" mode. Additionally, parameterized + statements are discouraged here, as they *will not work* in offline + mode. Above, we use :meth:`.inline_literal` where parameters are + to be used. + + For full interaction with a connected database where parameters can + also be used normally, use the "bind" available from the context:: + + from alembic import op + + connection = op.get_bind() + + connection.execute( + account.update() + .where(account.c.name == "account 1") + .values({"name": "account 2"}) + ) + + Additionally, when passing the statement as a plain string, it is first + coerced into a :func:`sqlalchemy.sql.expression.text` construct + before being passed along. In the less likely case that the + literal SQL string contains a colon, it must be escaped with a + backslash, as:: + + op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')") + + + :param sqltext: Any legal SQLAlchemy expression, including: + + * a string + * a :func:`sqlalchemy.sql.expression.text` construct. + * a :func:`sqlalchemy.sql.expression.insert` construct. + * a :func:`sqlalchemy.sql.expression.update` construct. + * a :func:`sqlalchemy.sql.expression.delete` construct. + * Any "executable" described in SQLAlchemy Core documentation, + noting that no result set is returned. + + .. note:: when passing a plain string, the statement is coerced into + a :func:`sqlalchemy.sql.expression.text` construct. This construct + considers symbols with colons, e.g. ``:foo`` to be bound parameters. + To avoid this, ensure that colon symbols are escaped, e.g. + ``\:foo``. + + :param execution_options: Optional dictionary of + execution options, will be passed to + :meth:`sqlalchemy.engine.Connection.execution_options`. + """ # noqa: E501 + ... + + def rename_table( + self, + old_table_name: str, + new_table_name: str, + *, + schema: Optional[str] = None, + ) -> None: + """Emit an ALTER TABLE to rename a table. + + :param old_table_name: old name. + :param new_table_name: new name. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ # noqa: E501 + ... + + # END STUB FUNCTIONS: op_cls + + +class BatchOperations(AbstractOperations): + """Modifies the interface :class:`.Operations` for batch mode. + + This basically omits the ``table_name`` and ``schema`` parameters + from associated methods, as these are a given when running under batch + mode. + + .. seealso:: + + :meth:`.Operations.batch_alter_table` + + Note that as of 0.8, most of the methods on this class are produced + dynamically using the :meth:`.Operations.register_operation` + method. + + """ + + impl: BatchOperationsImpl + + def _noop(self, operation: Any) -> NoReturn: + raise NotImplementedError( + "The %s method does not apply to a batch table alter operation." + % operation + ) + + if TYPE_CHECKING: + # START STUB FUNCTIONS: batch_op + # ### the following stubs are generated by tools/write_pyi.py ### + # ### do not edit ### + + def add_column( + self, + column: Column[Any], + *, + insert_before: Optional[str] = None, + insert_after: Optional[str] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + """Issue an "add column" instruction using the current + batch migration context. + + .. seealso:: + + :meth:`.Operations.add_column` + + """ # noqa: E501 + ... + + def alter_column( + self, + column_name: str, + *, + nullable: Optional[bool] = None, + comment: Union[str, Literal[False], None] = False, + server_default: Any = False, + new_column_name: Optional[str] = None, + type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None, + existing_type: Union[ + TypeEngine[Any], Type[TypeEngine[Any]], None + ] = None, + existing_server_default: Union[ + str, bool, Identity, Computed, None + ] = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + insert_before: Optional[str] = None, + insert_after: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue an "alter column" instruction using the current + batch migration context. + + Parameters are the same as that of :meth:`.Operations.alter_column`, + as well as the following option(s): + + :param insert_before: String name of an existing column which this + column should be placed before, when creating the new table. + + :param insert_after: String name of an existing column which this + column should be placed after, when creating the new table. If + both :paramref:`.BatchOperations.alter_column.insert_before` + and :paramref:`.BatchOperations.alter_column.insert_after` are + omitted, the column is inserted after the last existing column + in the table. + + .. seealso:: + + :meth:`.Operations.alter_column` + + + """ # noqa: E501 + ... + + def create_check_constraint( + self, + constraint_name: str, + condition: Union[str, ColumnElement[bool], TextClause], + **kw: Any, + ) -> None: + """Issue a "create check constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_check_constraint` + + """ # noqa: E501 + ... + + def create_exclude_constraint( + self, constraint_name: str, *elements: Any, **kw: Any + ) -> Optional[Table]: + """Issue a "create exclude constraint" instruction using the + current batch migration context. + + .. note:: This method is Postgresql specific, and additionally + requires at least SQLAlchemy 1.0. + + .. seealso:: + + :meth:`.Operations.create_exclude_constraint` + + """ # noqa: E501 + ... + + def create_foreign_key( + self, + constraint_name: Optional[str], + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + *, + referent_schema: Optional[str] = None, + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + **dialect_kw: Any, + ) -> None: + """Issue a "create foreign key" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``source_schema`` + arguments from the call. + + e.g.:: + + with batch_alter_table("address") as batch_op: + batch_op.create_foreign_key( + "fk_user_address", + "user", + ["user_id"], + ["id"], + ) + + .. seealso:: + + :meth:`.Operations.create_foreign_key` + + """ # noqa: E501 + ... + + def create_index( + self, index_name: str, columns: List[str], **kw: Any + ) -> None: + """Issue a "create index" instruction using the + current batch migration context. + + .. seealso:: + + :meth:`.Operations.create_index` + + """ # noqa: E501 + ... + + def create_primary_key( + self, constraint_name: Optional[str], columns: List[str] + ) -> None: + """Issue a "create primary key" instruction using the + current batch migration context. + + The batch form of this call omits the ``table_name`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_primary_key` + + """ # noqa: E501 + ... + + def create_table_comment( + self, + comment: Optional[str], + *, + existing_comment: Optional[str] = None, + ) -> None: + """Emit a COMMENT ON operation to set the comment for a table + using the current batch migration context. + + :param comment: string value of the comment being registered against + the specified table. + :param existing_comment: String value of a comment + already registered on the specified table, used within autogenerate + so that the operation is reversible, but not required for direct + use. + + """ # noqa: E501 + ... + + def create_unique_constraint( + self, constraint_name: str, columns: Sequence[str], **kw: Any + ) -> Any: + """Issue a "create unique constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_unique_constraint` + + """ # noqa: E501 + ... + + def drop_column(self, column_name: str, **kw: Any) -> None: + """Issue a "drop column" instruction using the current + batch migration context. + + .. seealso:: + + :meth:`.Operations.drop_column` + + """ # noqa: E501 + ... + + def drop_constraint( + self, constraint_name: str, type_: Optional[str] = None + ) -> None: + """Issue a "drop constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``table_name`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.drop_constraint` + + """ # noqa: E501 + ... + + def drop_index(self, index_name: str, **kw: Any) -> None: + """Issue a "drop index" instruction using the + current batch migration context. + + .. seealso:: + + :meth:`.Operations.drop_index` + + """ # noqa: E501 + ... + + def drop_table_comment( + self, *, existing_comment: Optional[str] = None + ) -> None: + """Issue a "drop table comment" operation to + remove an existing comment set on a table using the current + batch operations context. + + :param existing_comment: An optional string value of a comment already + registered on the specified table. + + """ # noqa: E501 + ... + + def execute( + self, + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + """Execute the given SQL using the current migration context. + + .. seealso:: + + :meth:`.Operations.execute` + + """ # noqa: E501 + ... + + # END STUB FUNCTIONS: batch_op diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/batch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/batch.py new file mode 100644 index 0000000000000000000000000000000000000000..fe183e9c8815b950d10a2280c9167969923e53b9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/batch.py @@ -0,0 +1,718 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy import ForeignKeyConstraint +from sqlalchemy import Index +from sqlalchemy import MetaData +from sqlalchemy import PrimaryKeyConstraint +from sqlalchemy import schema as sql_schema +from sqlalchemy import select +from sqlalchemy import Table +from sqlalchemy import types as sqltypes +from sqlalchemy.sql.schema import SchemaEventTarget +from sqlalchemy.util import OrderedDict +from sqlalchemy.util import topological + +from ..util import exc +from ..util.sqla_compat import _columns_for_constraint +from ..util.sqla_compat import _copy +from ..util.sqla_compat import _copy_expression +from ..util.sqla_compat import _ensure_scope_for_ddl +from ..util.sqla_compat import _fk_is_self_referential +from ..util.sqla_compat import _idx_table_bound_expressions +from ..util.sqla_compat import _is_type_bound +from ..util.sqla_compat import _remove_column_from_collection +from ..util.sqla_compat import _resolve_for_variant +from ..util.sqla_compat import constraint_name_defined +from ..util.sqla_compat import constraint_name_string + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy.engine import Dialect + from sqlalchemy.sql.elements import ColumnClause + from sqlalchemy.sql.elements import quoted_name + from sqlalchemy.sql.functions import Function + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.type_api import TypeEngine + + from ..ddl.impl import DefaultImpl + + +class BatchOperationsImpl: + def __init__( + self, + operations, + table_name, + schema, + recreate, + copy_from, + table_args, + table_kwargs, + reflect_args, + reflect_kwargs, + naming_convention, + partial_reordering, + ): + self.operations = operations + self.table_name = table_name + self.schema = schema + if recreate not in ("auto", "always", "never"): + raise ValueError( + "recreate may be one of 'auto', 'always', or 'never'." + ) + self.recreate = recreate + self.copy_from = copy_from + self.table_args = table_args + self.table_kwargs = dict(table_kwargs) + self.reflect_args = reflect_args + self.reflect_kwargs = dict(reflect_kwargs) + self.reflect_kwargs.setdefault( + "listeners", list(self.reflect_kwargs.get("listeners", ())) + ) + self.reflect_kwargs["listeners"].append( + ("column_reflect", operations.impl.autogen_column_reflect) + ) + self.naming_convention = naming_convention + self.partial_reordering = partial_reordering + self.batch = [] + + @property + def dialect(self) -> Dialect: + return self.operations.impl.dialect + + @property + def impl(self) -> DefaultImpl: + return self.operations.impl + + def _should_recreate(self) -> bool: + if self.recreate == "auto": + return self.operations.impl.requires_recreate_in_batch(self) + elif self.recreate == "always": + return True + else: + return False + + def flush(self) -> None: + should_recreate = self._should_recreate() + + with _ensure_scope_for_ddl(self.impl.connection): + if not should_recreate: + for opname, arg, kw in self.batch: + fn = getattr(self.operations.impl, opname) + fn(*arg, **kw) + else: + if self.naming_convention: + m1 = MetaData(naming_convention=self.naming_convention) + else: + m1 = MetaData() + + if self.copy_from is not None: + existing_table = self.copy_from + reflected = False + else: + if self.operations.migration_context.as_sql: + raise exc.CommandError( + f"This operation cannot proceed in --sql mode; " + f"batch mode with dialect " + f"{self.operations.migration_context.dialect.name} " # noqa: E501 + f"requires a live database connection with which " + f'to reflect the table "{self.table_name}". ' + f"To generate a batch SQL migration script using " + "table " + '"move and copy", a complete Table object ' + f'should be passed to the "copy_from" argument ' + "of the batch_alter_table() method so that table " + "reflection can be skipped." + ) + + existing_table = Table( + self.table_name, + m1, + schema=self.schema, + autoload_with=self.operations.get_bind(), + *self.reflect_args, + **self.reflect_kwargs, + ) + reflected = True + + batch_impl = ApplyBatchImpl( + self.impl, + existing_table, + self.table_args, + self.table_kwargs, + reflected, + partial_reordering=self.partial_reordering, + ) + for opname, arg, kw in self.batch: + fn = getattr(batch_impl, opname) + fn(*arg, **kw) + + batch_impl._create(self.impl) + + def alter_column(self, *arg, **kw) -> None: + self.batch.append(("alter_column", arg, kw)) + + def add_column(self, *arg, **kw) -> None: + if ( + "insert_before" in kw or "insert_after" in kw + ) and not self._should_recreate(): + raise exc.CommandError( + "Can't specify insert_before or insert_after when using " + "ALTER; please specify recreate='always'" + ) + self.batch.append(("add_column", arg, kw)) + + def drop_column(self, *arg, **kw) -> None: + self.batch.append(("drop_column", arg, kw)) + + def add_constraint(self, const: Constraint) -> None: + self.batch.append(("add_constraint", (const,), {})) + + def drop_constraint(self, const: Constraint) -> None: + self.batch.append(("drop_constraint", (const,), {})) + + def rename_table(self, *arg, **kw): + self.batch.append(("rename_table", arg, kw)) + + def create_index(self, idx: Index, **kw: Any) -> None: + self.batch.append(("create_index", (idx,), kw)) + + def drop_index(self, idx: Index, **kw: Any) -> None: + self.batch.append(("drop_index", (idx,), kw)) + + def create_table_comment(self, table): + self.batch.append(("create_table_comment", (table,), {})) + + def drop_table_comment(self, table): + self.batch.append(("drop_table_comment", (table,), {})) + + def create_table(self, table): + raise NotImplementedError("Can't create table in batch mode") + + def drop_table(self, table): + raise NotImplementedError("Can't drop table in batch mode") + + def create_column_comment(self, column): + self.batch.append(("create_column_comment", (column,), {})) + + +class ApplyBatchImpl: + def __init__( + self, + impl: DefaultImpl, + table: Table, + table_args: tuple, + table_kwargs: Dict[str, Any], + reflected: bool, + partial_reordering: tuple = (), + ) -> None: + self.impl = impl + self.table = table # this is a Table object + self.table_args = table_args + self.table_kwargs = table_kwargs + self.temp_table_name = self._calc_temp_name(table.name) + self.new_table: Optional[Table] = None + + self.partial_reordering = partial_reordering # tuple of tuples + self.add_col_ordering: Tuple[ + Tuple[str, str], ... + ] = () # tuple of tuples + + self.column_transfers = OrderedDict( + (c.name, {"expr": c}) for c in self.table.c + ) + self.existing_ordering = list(self.column_transfers) + + self.reflected = reflected + self._grab_table_elements() + + @classmethod + def _calc_temp_name(cls, tablename: Union[quoted_name, str]) -> str: + return ("_alembic_tmp_%s" % tablename)[0:50] + + def _grab_table_elements(self) -> None: + schema = self.table.schema + self.columns: Dict[str, Column[Any]] = OrderedDict() + for c in self.table.c: + c_copy = _copy(c, schema=schema) + c_copy.unique = c_copy.index = False + # ensure that the type object was copied, + # as we may need to modify it in-place + if isinstance(c.type, SchemaEventTarget): + assert c_copy.type is not c.type + self.columns[c.name] = c_copy + self.named_constraints: Dict[str, Constraint] = {} + self.unnamed_constraints = [] + self.col_named_constraints = {} + self.indexes: Dict[str, Index] = {} + self.new_indexes: Dict[str, Index] = {} + + for const in self.table.constraints: + if _is_type_bound(const): + continue + elif ( + self.reflected + and isinstance(const, CheckConstraint) + and not const.name + ): + # TODO: we are skipping unnamed reflected CheckConstraint + # because + # we have no way to determine _is_type_bound() for these. + pass + elif constraint_name_string(const.name): + self.named_constraints[const.name] = const + else: + self.unnamed_constraints.append(const) + + if not self.reflected: + for col in self.table.c: + for const in col.constraints: + if const.name: + self.col_named_constraints[const.name] = (col, const) + + for idx in self.table.indexes: + self.indexes[idx.name] = idx # type: ignore[index] + + for k in self.table.kwargs: + self.table_kwargs.setdefault(k, self.table.kwargs[k]) + + def _adjust_self_columns_for_partial_reordering(self) -> None: + pairs = set() + + col_by_idx = list(self.columns) + + if self.partial_reordering: + for tuple_ in self.partial_reordering: + for index, elem in enumerate(tuple_): + if index > 0: + pairs.add((tuple_[index - 1], elem)) + else: + for index, elem in enumerate(self.existing_ordering): + if index > 0: + pairs.add((col_by_idx[index - 1], elem)) + + pairs.update(self.add_col_ordering) + + # this can happen if some columns were dropped and not removed + # from existing_ordering. this should be prevented already, but + # conservatively making sure this didn't happen + pairs_list = [p for p in pairs if p[0] != p[1]] + + sorted_ = list( + topological.sort(pairs_list, col_by_idx, deterministic_order=True) + ) + self.columns = OrderedDict((k, self.columns[k]) for k in sorted_) + self.column_transfers = OrderedDict( + (k, self.column_transfers[k]) for k in sorted_ + ) + + def _transfer_elements_to_new_table(self) -> None: + assert self.new_table is None, "Can only create new table once" + + m = MetaData() + schema = self.table.schema + + if self.partial_reordering or self.add_col_ordering: + self._adjust_self_columns_for_partial_reordering() + + self.new_table = new_table = Table( + self.temp_table_name, + m, + *(list(self.columns.values()) + list(self.table_args)), + schema=schema, + **self.table_kwargs, + ) + + for const in ( + list(self.named_constraints.values()) + self.unnamed_constraints + ): + const_columns = {c.key for c in _columns_for_constraint(const)} + + if not const_columns.issubset(self.column_transfers): + continue + + const_copy: Constraint + if isinstance(const, ForeignKeyConstraint): + if _fk_is_self_referential(const): + # for self-referential constraint, refer to the + # *original* table name, and not _alembic_batch_temp. + # This is consistent with how we're handling + # FK constraints from other tables; we assume SQLite + # no foreign keys just keeps the names unchanged, so + # when we rename back, they match again. + const_copy = _copy( + const, schema=schema, target_table=self.table + ) + else: + # "target_table" for ForeignKeyConstraint.copy() is + # only used if the FK is detected as being + # self-referential, which we are handling above. + const_copy = _copy(const, schema=schema) + else: + const_copy = _copy( + const, schema=schema, target_table=new_table + ) + if isinstance(const, ForeignKeyConstraint): + self._setup_referent(m, const) + new_table.append_constraint(const_copy) + + def _gather_indexes_from_both_tables(self) -> List[Index]: + assert self.new_table is not None + idx: List[Index] = [] + + for idx_existing in self.indexes.values(): + # this is a lift-and-move from Table.to_metadata + + if idx_existing._column_flag: + continue + + idx_copy = Index( + idx_existing.name, + unique=idx_existing.unique, + *[ + _copy_expression(expr, self.new_table) + for expr in _idx_table_bound_expressions(idx_existing) + ], + _table=self.new_table, + **idx_existing.kwargs, + ) + idx.append(idx_copy) + + for index in self.new_indexes.values(): + idx.append( + Index( + index.name, + unique=index.unique, + *[self.new_table.c[col] for col in index.columns.keys()], + **index.kwargs, + ) + ) + return idx + + def _setup_referent( + self, metadata: MetaData, constraint: ForeignKeyConstraint + ) -> None: + spec = constraint.elements[0]._get_colspec() + parts = spec.split(".") + tname = parts[-2] + if len(parts) == 3: + referent_schema = parts[0] + else: + referent_schema = None + + if tname != self.temp_table_name: + key = sql_schema._get_table_key(tname, referent_schema) + + def colspec(elem: Any): + return elem._get_colspec() + + if key in metadata.tables: + t = metadata.tables[key] + for elem in constraint.elements: + colname = colspec(elem).split(".")[-1] + if colname not in t.c: + t.append_column(Column(colname, sqltypes.NULLTYPE)) + else: + Table( + tname, + metadata, + *[ + Column(n, sqltypes.NULLTYPE) + for n in [ + colspec(elem).split(".")[-1] + for elem in constraint.elements + ] + ], + schema=referent_schema, + ) + + def _create(self, op_impl: DefaultImpl) -> None: + self._transfer_elements_to_new_table() + + op_impl.prep_table_for_batch(self, self.table) + assert self.new_table is not None + op_impl.create_table(self.new_table) + + try: + op_impl._exec( + self.new_table.insert() + .inline() + .from_select( + list( + k + for k, transfer in self.column_transfers.items() + if "expr" in transfer + ), + select( + *[ + transfer["expr"] + for transfer in self.column_transfers.values() + if "expr" in transfer + ] + ), + ) + ) + op_impl.drop_table(self.table) + except: + op_impl.drop_table(self.new_table) + raise + else: + op_impl.rename_table( + self.temp_table_name, self.table.name, schema=self.table.schema + ) + self.new_table.name = self.table.name + try: + for idx in self._gather_indexes_from_both_tables(): + op_impl.create_index(idx) + finally: + self.new_table.name = self.temp_table_name + + def alter_column( + self, + table_name: str, + column_name: str, + nullable: Optional[bool] = None, + server_default: Optional[Union[Function[Any], str, bool]] = False, + name: Optional[str] = None, + type_: Optional[TypeEngine] = None, + autoincrement: Optional[Union[bool, Literal["auto"]]] = None, + comment: Union[str, Literal[False]] = False, + **kw, + ) -> None: + existing = self.columns[column_name] + existing_transfer: Dict[str, Any] = self.column_transfers[column_name] + if name is not None and name != column_name: + # note that we don't change '.key' - we keep referring + # to the renamed column by its old key in _create(). neat! + existing.name = name + existing_transfer["name"] = name + + existing_type = kw.get("existing_type", None) + if existing_type: + resolved_existing_type = _resolve_for_variant( + kw["existing_type"], self.impl.dialect + ) + + # pop named constraints for Boolean/Enum for rename + if ( + isinstance(resolved_existing_type, SchemaEventTarget) + and resolved_existing_type.name # type:ignore[attr-defined] # noqa E501 + ): + self.named_constraints.pop( + resolved_existing_type.name, # type:ignore[attr-defined] # noqa E501 + None, + ) + + if type_ is not None: + type_ = sqltypes.to_instance(type_) + # old type is being discarded so turn off eventing + # rules. Alternatively we can + # erase the events set up by this type, but this is simpler. + # we also ignore the drop_constraint that will come here from + # Operations.implementation_for(alter_column) + + if isinstance(existing.type, SchemaEventTarget): + existing.type._create_events = ( # type:ignore[attr-defined] + existing.type.create_constraint # type:ignore[attr-defined] # noqa + ) = False + + self.impl.cast_for_batch_migrate( + existing, existing_transfer, type_ + ) + + existing.type = type_ + + # we *dont* however set events for the new type, because + # alter_column is invoked from + # Operations.implementation_for(alter_column) which already + # will emit an add_constraint() + + if nullable is not None: + existing.nullable = nullable + if server_default is not False: + if server_default is None: + existing.server_default = None + else: + sql_schema.DefaultClause( + server_default # type: ignore[arg-type] + )._set_parent(existing) + if autoincrement is not None: + existing.autoincrement = bool(autoincrement) + + if comment is not False: + existing.comment = comment + + def _setup_dependencies_for_add_column( + self, + colname: str, + insert_before: Optional[str], + insert_after: Optional[str], + ) -> None: + index_cols = self.existing_ordering + col_indexes = {name: i for i, name in enumerate(index_cols)} + + if not self.partial_reordering: + if insert_after: + if not insert_before: + if insert_after in col_indexes: + # insert after an existing column + idx = col_indexes[insert_after] + 1 + if idx < len(index_cols): + insert_before = index_cols[idx] + else: + # insert after a column that is also new + insert_before = dict(self.add_col_ordering)[ + insert_after + ] + if insert_before: + if not insert_after: + if insert_before in col_indexes: + # insert before an existing column + idx = col_indexes[insert_before] - 1 + if idx >= 0: + insert_after = index_cols[idx] + else: + # insert before a column that is also new + insert_after = { + b: a for a, b in self.add_col_ordering + }[insert_before] + + if insert_before: + self.add_col_ordering += ((colname, insert_before),) + if insert_after: + self.add_col_ordering += ((insert_after, colname),) + + if ( + not self.partial_reordering + and not insert_before + and not insert_after + and col_indexes + ): + self.add_col_ordering += ((index_cols[-1], colname),) + + def add_column( + self, + table_name: str, + column: Column[Any], + insert_before: Optional[str] = None, + insert_after: Optional[str] = None, + **kw, + ) -> None: + self._setup_dependencies_for_add_column( + column.name, insert_before, insert_after + ) + # we copy the column because operations.add_column() + # gives us a Column that is part of a Table already. + self.columns[column.name] = _copy(column, schema=self.table.schema) + self.column_transfers[column.name] = {} + + def drop_column( + self, + table_name: str, + column: Union[ColumnClause[Any], Column[Any]], + **kw, + ) -> None: + if column.name in self.table.primary_key.columns: + _remove_column_from_collection( + self.table.primary_key.columns, column + ) + del self.columns[column.name] + del self.column_transfers[column.name] + self.existing_ordering.remove(column.name) + + # pop named constraints for Boolean/Enum for rename + if ( + "existing_type" in kw + and isinstance(kw["existing_type"], SchemaEventTarget) + and kw["existing_type"].name # type:ignore[attr-defined] + ): + self.named_constraints.pop( + kw["existing_type"].name, None # type:ignore[attr-defined] + ) + + def create_column_comment(self, column): + """the batch table creation function will issue create_column_comment + on the real "impl" as part of the create table process. + + That is, the Column object will have the comment on it already, + so when it is received by add_column() it will be a normal part of + the CREATE TABLE and doesn't need an extra step here. + + """ + + def create_table_comment(self, table): + """the batch table creation function will issue create_table_comment + on the real "impl" as part of the create table process. + + """ + + def drop_table_comment(self, table): + """the batch table creation function will issue drop_table_comment + on the real "impl" as part of the create table process. + + """ + + def add_constraint(self, const: Constraint) -> None: + if not constraint_name_defined(const.name): + raise ValueError("Constraint must have a name") + if isinstance(const, sql_schema.PrimaryKeyConstraint): + if self.table.primary_key in self.unnamed_constraints: + self.unnamed_constraints.remove(self.table.primary_key) + + if constraint_name_string(const.name): + self.named_constraints[const.name] = const + else: + self.unnamed_constraints.append(const) + + def drop_constraint(self, const: Constraint) -> None: + if not const.name: + raise ValueError("Constraint must have a name") + try: + if const.name in self.col_named_constraints: + col, const = self.col_named_constraints.pop(const.name) + + for col_const in list(self.columns[col.name].constraints): + if col_const.name == const.name: + self.columns[col.name].constraints.remove(col_const) + elif constraint_name_string(const.name): + const = self.named_constraints.pop(const.name) + elif const in self.unnamed_constraints: + self.unnamed_constraints.remove(const) + + except KeyError: + if _is_type_bound(const): + # type-bound constraints are only included in the new + # table via their type object in any case, so ignore the + # drop_constraint() that comes here via the + # Operations.implementation_for(alter_column) + return + raise ValueError("No such constraint: '%s'" % const.name) + else: + if isinstance(const, PrimaryKeyConstraint): + for col in const.columns: + self.columns[col.name].primary_key = False + + def create_index(self, idx: Index) -> None: + self.new_indexes[idx.name] = idx # type: ignore[index] + + def drop_index(self, idx: Index) -> None: + try: + del self.indexes[idx.name] # type: ignore[arg-type] + except KeyError: + raise ValueError("No such index: '%s'" % idx.name) + + def rename_table(self, *arg, **kw): + raise NotImplementedError("TODO") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/ops.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c9b1526b61ffcb99770eeadd61d5e1e09c4e4066 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/ops.py @@ -0,0 +1,2842 @@ +from __future__ import annotations + +from abc import abstractmethod +import os +import pathlib +import re +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import FrozenSet +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy.types import NULLTYPE + +from . import schemaobj +from .base import BatchOperations +from .base import Operations +from .. import util +from ..util import sqla_compat + +if TYPE_CHECKING: + from typing import Literal + + from sqlalchemy.sql import Executable + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.elements import conv + from sqlalchemy.sql.elements import quoted_name + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.schema import CheckConstraint + from sqlalchemy.sql.schema import Column + from sqlalchemy.sql.schema import Computed + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.schema import ForeignKeyConstraint + from sqlalchemy.sql.schema import Identity + from sqlalchemy.sql.schema import Index + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import PrimaryKeyConstraint + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.schema import UniqueConstraint + from sqlalchemy.sql.selectable import TableClause + from sqlalchemy.sql.type_api import TypeEngine + + from ..autogenerate.rewriter import Rewriter + from ..runtime.migration import MigrationContext + from ..script.revision import _RevIdType + +_T = TypeVar("_T", bound=Any) +_AC = TypeVar("_AC", bound="AddConstraintOp") + + +class MigrateOperation: + """base class for migration command and organization objects. + + This system is part of the operation extensibility API. + + .. seealso:: + + :ref:`operation_objects` + + :ref:`operation_plugins` + + :ref:`customizing_revision` + + """ + + @util.memoized_property + def info(self) -> Dict[Any, Any]: + """A dictionary that may be used to store arbitrary information + along with this :class:`.MigrateOperation` object. + + """ + return {} + + _mutations: FrozenSet[Rewriter] = frozenset() + + def reverse(self) -> MigrateOperation: + raise NotImplementedError + + def to_diff_tuple(self) -> Tuple[Any, ...]: + raise NotImplementedError + + +class AddConstraintOp(MigrateOperation): + """Represent an add constraint operation.""" + + add_constraint_ops = util.Dispatcher() + + @property + def constraint_type(self) -> str: + raise NotImplementedError() + + @classmethod + def register_add_constraint( + cls, type_: str + ) -> Callable[[Type[_AC]], Type[_AC]]: + def go(klass: Type[_AC]) -> Type[_AC]: + cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint) + return klass + + return go + + @classmethod + def from_constraint(cls, constraint: Constraint) -> AddConstraintOp: + return cls.add_constraint_ops.dispatch(constraint.__visit_name__)( # type: ignore[no-any-return] # noqa: E501 + constraint + ) + + @abstractmethod + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> Constraint: + pass + + def reverse(self) -> DropConstraintOp: + return DropConstraintOp.from_constraint(self.to_constraint()) + + def to_diff_tuple(self) -> Tuple[str, Constraint]: + return ("add_constraint", self.to_constraint()) + + +@Operations.register_operation("drop_constraint") +@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint") +class DropConstraintOp(MigrateOperation): + """Represent a drop constraint operation.""" + + def __init__( + self, + constraint_name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + type_: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + _reverse: Optional[AddConstraintOp] = None, + ) -> None: + self.constraint_name = constraint_name + self.table_name = table_name + self.constraint_type = type_ + self.schema = schema + self.if_exists = if_exists + self._reverse = _reverse + + def reverse(self) -> AddConstraintOp: + return AddConstraintOp.from_constraint(self.to_constraint()) + + def to_diff_tuple( + self, + ) -> Tuple[str, SchemaItem]: + if self.constraint_type == "foreignkey": + return ("remove_fk", self.to_constraint()) + else: + return ("remove_constraint", self.to_constraint()) + + @classmethod + def from_constraint(cls, constraint: Constraint) -> DropConstraintOp: + types = { + "unique_constraint": "unique", + "foreign_key_constraint": "foreignkey", + "primary_key_constraint": "primary", + "check_constraint": "check", + "column_check_constraint": "check", + "table_or_column_check_constraint": "check", + } + + constraint_table = sqla_compat._table_for_constraint(constraint) + return cls( + sqla_compat.constraint_name_or_none(constraint.name), + constraint_table.name, + schema=constraint_table.schema, + type_=types.get(constraint.__visit_name__), + _reverse=AddConstraintOp.from_constraint(constraint), + ) + + def to_constraint(self) -> Constraint: + if self._reverse is not None: + constraint = self._reverse.to_constraint() + constraint.name = self.constraint_name + constraint_table = sqla_compat._table_for_constraint(constraint) + constraint_table.name = self.table_name + constraint_table.schema = self.schema + + return constraint + else: + raise ValueError( + "constraint cannot be produced; " + "original constraint is not present" + ) + + @classmethod + def drop_constraint( + cls, + operations: Operations, + constraint_name: str, + table_name: str, + type_: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + ) -> None: + r"""Drop a constraint of the given name, typically via DROP CONSTRAINT. + + :param constraint_name: name of the constraint. + :param table_name: table name. + :param type\_: optional, required on MySQL. can be + 'foreignkey', 'primary', 'unique', or 'check'. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the constraint + + .. versionadded:: 1.16.0 + + """ + + op = cls( + constraint_name, + table_name, + type_=type_, + schema=schema, + if_exists=if_exists, + ) + return operations.invoke(op) + + @classmethod + def batch_drop_constraint( + cls, + operations: BatchOperations, + constraint_name: str, + type_: Optional[str] = None, + ) -> None: + """Issue a "drop constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``table_name`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.drop_constraint` + + """ + op = cls( + constraint_name, + operations.impl.table_name, + type_=type_, + schema=operations.impl.schema, + ) + return operations.invoke(op) + + +@Operations.register_operation("create_primary_key") +@BatchOperations.register_operation( + "create_primary_key", "batch_create_primary_key" +) +@AddConstraintOp.register_add_constraint("primary_key_constraint") +class CreatePrimaryKeyOp(AddConstraintOp): + """Represent a create primary key operation.""" + + constraint_type = "primarykey" + + def __init__( + self, + constraint_name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + columns: Sequence[str], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + self.constraint_name = constraint_name + self.table_name = table_name + self.columns = columns + self.schema = schema + self.kw = kw + + @classmethod + def from_constraint(cls, constraint: Constraint) -> CreatePrimaryKeyOp: + constraint_table = sqla_compat._table_for_constraint(constraint) + pk_constraint = cast("PrimaryKeyConstraint", constraint) + return cls( + sqla_compat.constraint_name_or_none(pk_constraint.name), + constraint_table.name, + pk_constraint.columns.keys(), + schema=constraint_table.schema, + **pk_constraint.dialect_kwargs, + ) + + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> PrimaryKeyConstraint: + schema_obj = schemaobj.SchemaObjects(migration_context) + + return schema_obj.primary_key_constraint( + self.constraint_name, + self.table_name, + self.columns, + schema=self.schema, + **self.kw, + ) + + @classmethod + def create_primary_key( + cls, + operations: Operations, + constraint_name: Optional[str], + table_name: str, + columns: List[str], + *, + schema: Optional[str] = None, + ) -> None: + """Issue a "create primary key" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_primary_key("pk_my_table", "my_table", ["id", "version"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.PrimaryKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the primary key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions` + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the target table. + :param columns: a list of string column names to be applied to the + primary key constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + op = cls(constraint_name, table_name, columns, schema=schema) + return operations.invoke(op) + + @classmethod + def batch_create_primary_key( + cls, + operations: BatchOperations, + constraint_name: Optional[str], + columns: List[str], + ) -> None: + """Issue a "create primary key" instruction using the + current batch migration context. + + The batch form of this call omits the ``table_name`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_primary_key` + + """ + op = cls( + constraint_name, + operations.impl.table_name, + columns, + schema=operations.impl.schema, + ) + return operations.invoke(op) + + +@Operations.register_operation("create_unique_constraint") +@BatchOperations.register_operation( + "create_unique_constraint", "batch_create_unique_constraint" +) +@AddConstraintOp.register_add_constraint("unique_constraint") +class CreateUniqueConstraintOp(AddConstraintOp): + """Represent a create unique constraint operation.""" + + constraint_type = "unique" + + def __init__( + self, + constraint_name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + columns: Sequence[str], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + self.constraint_name = constraint_name + self.table_name = table_name + self.columns = columns + self.schema = schema + self.kw = kw + + @classmethod + def from_constraint( + cls, constraint: Constraint + ) -> CreateUniqueConstraintOp: + constraint_table = sqla_compat._table_for_constraint(constraint) + + uq_constraint = cast("UniqueConstraint", constraint) + + kw: Dict[str, Any] = {} + if uq_constraint.deferrable: + kw["deferrable"] = uq_constraint.deferrable + if uq_constraint.initially: + kw["initially"] = uq_constraint.initially + kw.update(uq_constraint.dialect_kwargs) + return cls( + sqla_compat.constraint_name_or_none(uq_constraint.name), + constraint_table.name, + [c.name for c in uq_constraint.columns], + schema=constraint_table.schema, + **kw, + ) + + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> UniqueConstraint: + schema_obj = schemaobj.SchemaObjects(migration_context) + return schema_obj.unique_constraint( + self.constraint_name, + self.table_name, + self.columns, + schema=self.schema, + **self.kw, + ) + + @classmethod + def create_unique_constraint( + cls, + operations: Operations, + constraint_name: Optional[str], + table_name: str, + columns: Sequence[str], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> Any: + """Issue a "create unique constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + op.create_unique_constraint("uq_user_name", "user", ["name"]) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.UniqueConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param name: Name of the unique constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param columns: a list of string column names in the + source table. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + + op = cls(constraint_name, table_name, columns, schema=schema, **kw) + return operations.invoke(op) + + @classmethod + def batch_create_unique_constraint( + cls, + operations: BatchOperations, + constraint_name: str, + columns: Sequence[str], + **kw: Any, + ) -> Any: + """Issue a "create unique constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_unique_constraint` + + """ + kw["schema"] = operations.impl.schema + op = cls(constraint_name, operations.impl.table_name, columns, **kw) + return operations.invoke(op) + + +@Operations.register_operation("create_foreign_key") +@BatchOperations.register_operation( + "create_foreign_key", "batch_create_foreign_key" +) +@AddConstraintOp.register_add_constraint("foreign_key_constraint") +class CreateForeignKeyOp(AddConstraintOp): + """Represent a create foreign key constraint operation.""" + + constraint_type = "foreignkey" + + def __init__( + self, + constraint_name: Optional[sqla_compat._ConstraintNameDefined], + source_table: str, + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + **kw: Any, + ) -> None: + self.constraint_name = constraint_name + self.source_table = source_table + self.referent_table = referent_table + self.local_cols = local_cols + self.remote_cols = remote_cols + self.kw = kw + + def to_diff_tuple(self) -> Tuple[str, ForeignKeyConstraint]: + return ("add_fk", self.to_constraint()) + + @classmethod + def from_constraint(cls, constraint: Constraint) -> CreateForeignKeyOp: + fk_constraint = cast("ForeignKeyConstraint", constraint) + kw: Dict[str, Any] = {} + if fk_constraint.onupdate: + kw["onupdate"] = fk_constraint.onupdate + if fk_constraint.ondelete: + kw["ondelete"] = fk_constraint.ondelete + if fk_constraint.initially: + kw["initially"] = fk_constraint.initially + if fk_constraint.deferrable: + kw["deferrable"] = fk_constraint.deferrable + if fk_constraint.use_alter: + kw["use_alter"] = fk_constraint.use_alter + if fk_constraint.match: + kw["match"] = fk_constraint.match + + ( + source_schema, + source_table, + source_columns, + target_schema, + target_table, + target_columns, + onupdate, + ondelete, + deferrable, + initially, + ) = sqla_compat._fk_spec(fk_constraint) + + kw["source_schema"] = source_schema + kw["referent_schema"] = target_schema + kw.update(fk_constraint.dialect_kwargs) + return cls( + sqla_compat.constraint_name_or_none(fk_constraint.name), + source_table, + target_table, + source_columns, + target_columns, + **kw, + ) + + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> ForeignKeyConstraint: + schema_obj = schemaobj.SchemaObjects(migration_context) + return schema_obj.foreign_key_constraint( + self.constraint_name, + self.source_table, + self.referent_table, + self.local_cols, + self.remote_cols, + **self.kw, + ) + + @classmethod + def create_foreign_key( + cls, + operations: Operations, + constraint_name: Optional[str], + source_table: str, + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + *, + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + source_schema: Optional[str] = None, + referent_schema: Optional[str] = None, + **dialect_kw: Any, + ) -> None: + """Issue a "create foreign key" instruction using the + current migration context. + + e.g.:: + + from alembic import op + + op.create_foreign_key( + "fk_user_address", + "address", + "user", + ["user_id"], + ["id"], + ) + + This internally generates a :class:`~sqlalchemy.schema.Table` object + containing the necessary columns, then generates a new + :class:`~sqlalchemy.schema.ForeignKeyConstraint` + object which it then associates with the + :class:`~sqlalchemy.schema.Table`. + Any event listeners associated with this action will be fired + off normally. The :class:`~sqlalchemy.schema.AddConstraint` + construct is ultimately used to generate the ALTER statement. + + :param constraint_name: Name of the foreign key constraint. The name + is necessary so that an ALTER statement can be emitted. For setups + that use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param source_table: String name of the source table. + :param referent_table: String name of the destination table. + :param local_cols: a list of string column names in the + source table. + :param remote_cols: a list of string column names in the + remote table. + :param onupdate: Optional string. If set, emit ON UPDATE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param ondelete: Optional string. If set, emit ON DELETE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + :param deferrable: optional bool. If set, emit DEFERRABLE or NOT + DEFERRABLE when issuing DDL for this constraint. + :param source_schema: Optional schema name of the source table. + :param referent_schema: Optional schema name of the destination table. + + """ + + op = cls( + constraint_name, + source_table, + referent_table, + local_cols, + remote_cols, + onupdate=onupdate, + ondelete=ondelete, + deferrable=deferrable, + source_schema=source_schema, + referent_schema=referent_schema, + initially=initially, + match=match, + **dialect_kw, + ) + return operations.invoke(op) + + @classmethod + def batch_create_foreign_key( + cls, + operations: BatchOperations, + constraint_name: Optional[str], + referent_table: str, + local_cols: List[str], + remote_cols: List[str], + *, + referent_schema: Optional[str] = None, + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + **dialect_kw: Any, + ) -> None: + """Issue a "create foreign key" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``source_schema`` + arguments from the call. + + e.g.:: + + with batch_alter_table("address") as batch_op: + batch_op.create_foreign_key( + "fk_user_address", + "user", + ["user_id"], + ["id"], + ) + + .. seealso:: + + :meth:`.Operations.create_foreign_key` + + """ + op = cls( + constraint_name, + operations.impl.table_name, + referent_table, + local_cols, + remote_cols, + onupdate=onupdate, + ondelete=ondelete, + deferrable=deferrable, + source_schema=operations.impl.schema, + referent_schema=referent_schema, + initially=initially, + match=match, + **dialect_kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("create_check_constraint") +@BatchOperations.register_operation( + "create_check_constraint", "batch_create_check_constraint" +) +@AddConstraintOp.register_add_constraint("check_constraint") +@AddConstraintOp.register_add_constraint("table_or_column_check_constraint") +@AddConstraintOp.register_add_constraint("column_check_constraint") +class CreateCheckConstraintOp(AddConstraintOp): + """Represent a create check constraint operation.""" + + constraint_type = "check" + + def __init__( + self, + constraint_name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + condition: Union[str, TextClause, ColumnElement[Any]], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + self.constraint_name = constraint_name + self.table_name = table_name + self.condition = condition + self.schema = schema + self.kw = kw + + @classmethod + def from_constraint( + cls, constraint: Constraint + ) -> CreateCheckConstraintOp: + constraint_table = sqla_compat._table_for_constraint(constraint) + + ck_constraint = cast("CheckConstraint", constraint) + return cls( + sqla_compat.constraint_name_or_none(ck_constraint.name), + constraint_table.name, + cast("ColumnElement[Any]", ck_constraint.sqltext), + schema=constraint_table.schema, + **ck_constraint.dialect_kwargs, + ) + + def to_constraint( + self, migration_context: Optional[MigrationContext] = None + ) -> CheckConstraint: + schema_obj = schemaobj.SchemaObjects(migration_context) + return schema_obj.check_constraint( + self.constraint_name, + self.table_name, + self.condition, + schema=self.schema, + **self.kw, + ) + + @classmethod + def create_check_constraint( + cls, + operations: Operations, + constraint_name: Optional[str], + table_name: str, + condition: Union[str, ColumnElement[bool], TextClause], + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue a "create check constraint" instruction using the + current migration context. + + e.g.:: + + from alembic import op + from sqlalchemy.sql import column, func + + op.create_check_constraint( + "ck_user_name_len", + "user", + func.len(column("name")) > 5, + ) + + CHECK constraints are usually against a SQL expression, so ad-hoc + table metadata is usually needed. The function will convert the given + arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound + to an anonymous table in order to emit the CREATE statement. + + :param name: Name of the check constraint. The name is necessary + so that an ALTER statement can be emitted. For setups that + use an automated naming scheme such as that described at + :ref:`sqla:constraint_naming_conventions`, + ``name`` here can be ``None``, as the event listener will + apply the name to the constraint object when it is associated + with the table. + :param table_name: String name of the source table. + :param condition: SQL expression that's the condition of the + constraint. Can be a string or SQLAlchemy expression language + structure. + :param deferrable: optional bool. If set, emit DEFERRABLE or + NOT DEFERRABLE when issuing DDL for this constraint. + :param initially: optional string. If set, emit INITIALLY + when issuing DDL for this constraint. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + op = cls(constraint_name, table_name, condition, schema=schema, **kw) + return operations.invoke(op) + + @classmethod + def batch_create_check_constraint( + cls, + operations: BatchOperations, + constraint_name: str, + condition: Union[str, ColumnElement[bool], TextClause], + **kw: Any, + ) -> None: + """Issue a "create check constraint" instruction using the + current batch migration context. + + The batch form of this call omits the ``source`` and ``schema`` + arguments from the call. + + .. seealso:: + + :meth:`.Operations.create_check_constraint` + + """ + op = cls( + constraint_name, + operations.impl.table_name, + condition, + schema=operations.impl.schema, + **kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("create_index") +@BatchOperations.register_operation("create_index", "batch_create_index") +class CreateIndexOp(MigrateOperation): + """Represent a create index operation.""" + + def __init__( + self, + index_name: Optional[str], + table_name: str, + columns: Sequence[Union[str, TextClause, ColumnElement[Any]]], + *, + schema: Optional[str] = None, + unique: bool = False, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + self.index_name = index_name + self.table_name = table_name + self.columns = columns + self.schema = schema + self.unique = unique + self.if_not_exists = if_not_exists + self.kw = kw + + def reverse(self) -> DropIndexOp: + return DropIndexOp.from_index(self.to_index()) + + def to_diff_tuple(self) -> Tuple[str, Index]: + return ("add_index", self.to_index()) + + @classmethod + def from_index(cls, index: Index) -> CreateIndexOp: + assert index.table is not None + return cls( + index.name, + index.table.name, + index.expressions, + schema=index.table.schema, + unique=index.unique, + **index.kwargs, + ) + + def to_index( + self, migration_context: Optional[MigrationContext] = None + ) -> Index: + schema_obj = schemaobj.SchemaObjects(migration_context) + + idx = schema_obj.index( + self.index_name, + self.table_name, + self.columns, + schema=self.schema, + unique=self.unique, + **self.kw, + ) + return idx + + @classmethod + def create_index( + cls, + operations: Operations, + index_name: Optional[str], + table_name: str, + columns: Sequence[Union[str, TextClause, ColumnElement[Any]]], + *, + schema: Optional[str] = None, + unique: bool = False, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "create index" instruction using the current + migration context. + + e.g.:: + + from alembic import op + + op.create_index("ik_test", "t1", ["foo", "bar"]) + + Functional indexes can be produced by using the + :func:`sqlalchemy.sql.expression.text` construct:: + + from alembic import op + from sqlalchemy import text + + op.create_index("ik_test", "t1", [text("lower(foo)")]) + + :param index_name: name of the index. + :param table_name: name of the owning table. + :param columns: a list consisting of string column names and/or + :func:`~sqlalchemy.sql.expression.text` constructs. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param unique: If True, create a unique index. + + :param quote: Force quoting of this column's name on or off, + corresponding to ``True`` or ``False``. When left at its default + of ``None``, the column identifier will be quoted according to + whether the name is case sensitive (identifiers with at least one + upper case character are treated as case sensitive), or if it's a + reserved word. This flag is only needed to force quoting of a + reserved word which is not known by the SQLAlchemy dialect. + + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + op = cls( + index_name, + table_name, + columns, + schema=schema, + unique=unique, + if_not_exists=if_not_exists, + **kw, + ) + return operations.invoke(op) + + @classmethod + def batch_create_index( + cls, + operations: BatchOperations, + index_name: str, + columns: List[str], + **kw: Any, + ) -> None: + """Issue a "create index" instruction using the + current batch migration context. + + .. seealso:: + + :meth:`.Operations.create_index` + + """ + + op = cls( + index_name, + operations.impl.table_name, + columns, + schema=operations.impl.schema, + **kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("drop_index") +@BatchOperations.register_operation("drop_index", "batch_drop_index") +class DropIndexOp(MigrateOperation): + """Represent a drop index operation.""" + + def __init__( + self, + index_name: Union[quoted_name, str, conv], + table_name: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + _reverse: Optional[CreateIndexOp] = None, + **kw: Any, + ) -> None: + self.index_name = index_name + self.table_name = table_name + self.schema = schema + self.if_exists = if_exists + self._reverse = _reverse + self.kw = kw + + def to_diff_tuple(self) -> Tuple[str, Index]: + return ("remove_index", self.to_index()) + + def reverse(self) -> CreateIndexOp: + return CreateIndexOp.from_index(self.to_index()) + + @classmethod + def from_index(cls, index: Index) -> DropIndexOp: + assert index.table is not None + return cls( + index.name, # type: ignore[arg-type] + table_name=index.table.name, + schema=index.table.schema, + _reverse=CreateIndexOp.from_index(index), + unique=index.unique, + **index.kwargs, + ) + + def to_index( + self, migration_context: Optional[MigrationContext] = None + ) -> Index: + schema_obj = schemaobj.SchemaObjects(migration_context) + + # need a dummy column name here since SQLAlchemy + # 0.7.6 and further raises on Index with no columns + return schema_obj.index( + self.index_name, + self.table_name, + self._reverse.columns if self._reverse else ["x"], + schema=self.schema, + **self.kw, + ) + + @classmethod + def drop_index( + cls, + operations: Operations, + index_name: str, + table_name: Optional[str] = None, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "drop index" instruction using the current + migration context. + + e.g.:: + + drop_index("accounts") + + :param index_name: name of the index. + :param table_name: name of the owning table. Some + backends such as Microsoft SQL Server require this. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + :param if_exists: If True, adds IF EXISTS operator when + dropping the index. + + .. versionadded:: 1.12.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + op = cls( + index_name, + table_name=table_name, + schema=schema, + if_exists=if_exists, + **kw, + ) + return operations.invoke(op) + + @classmethod + def batch_drop_index( + cls, operations: BatchOperations, index_name: str, **kw: Any + ) -> None: + """Issue a "drop index" instruction using the + current batch migration context. + + .. seealso:: + + :meth:`.Operations.drop_index` + + """ + + op = cls( + index_name, + table_name=operations.impl.table_name, + schema=operations.impl.schema, + **kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("create_table") +class CreateTableOp(MigrateOperation): + """Represent a create table operation.""" + + def __init__( + self, + table_name: str, + columns: Sequence[SchemaItem], + *, + schema: Optional[str] = None, + if_not_exists: Optional[bool] = None, + _namespace_metadata: Optional[MetaData] = None, + _constraints_included: bool = False, + **kw: Any, + ) -> None: + self.table_name = table_name + self.columns = columns + self.schema = schema + self.if_not_exists = if_not_exists + self.info = kw.pop("info", {}) + self.comment = kw.pop("comment", None) + self.prefixes = kw.pop("prefixes", None) + self.kw = kw + self._namespace_metadata = _namespace_metadata + self._constraints_included = _constraints_included + + def reverse(self) -> DropTableOp: + return DropTableOp.from_table( + self.to_table(), _namespace_metadata=self._namespace_metadata + ) + + def to_diff_tuple(self) -> Tuple[str, Table]: + return ("add_table", self.to_table()) + + @classmethod + def from_table( + cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None + ) -> CreateTableOp: + if _namespace_metadata is None: + _namespace_metadata = table.metadata + + return cls( + table.name, + list(table.c) + list(table.constraints), + schema=table.schema, + _namespace_metadata=_namespace_metadata, + # given a Table() object, this Table will contain full Index() + # and UniqueConstraint objects already constructed in response to + # each unique=True / index=True flag on a Column. Carry this + # state along so that when we re-convert back into a Table, we + # skip unique=True/index=True so that these constraints are + # not doubled up. see #844 #848 + _constraints_included=True, + comment=table.comment, + info=dict(table.info), + prefixes=list(table._prefixes), + **table.kwargs, + ) + + def to_table( + self, migration_context: Optional[MigrationContext] = None + ) -> Table: + schema_obj = schemaobj.SchemaObjects(migration_context) + + return schema_obj.table( + self.table_name, + *self.columns, + schema=self.schema, + prefixes=list(self.prefixes) if self.prefixes else [], + comment=self.comment, + info=self.info.copy() if self.info else {}, + _constraints_included=self._constraints_included, + **self.kw, + ) + + @classmethod + def create_table( + cls, + operations: Operations, + table_name: str, + *columns: SchemaItem, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> Table: + r"""Issue a "create table" instruction using the current migration + context. + + This directive receives an argument list similar to that of the + traditional :class:`sqlalchemy.schema.Table` construct, but without the + metadata:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + Note that :meth:`.create_table` accepts + :class:`~sqlalchemy.schema.Column` + constructs directly from the SQLAlchemy library. In particular, + default values to be created on the database side are + specified using the ``server_default`` parameter, and not + ``default`` which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the "timestamp" column + op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + The function also returns a newly created + :class:`~sqlalchemy.schema.Table` object, corresponding to the table + specification given, which is suitable for + immediate SQL operations, in particular + :meth:`.Operations.bulk_insert`:: + + from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column + from alembic import op + + account_table = op.create_table( + "account", + Column("id", INTEGER, primary_key=True), + Column("name", VARCHAR(50), nullable=False), + Column("description", NVARCHAR(200)), + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + op.bulk_insert( + account_table, + [ + {"name": "A1", "description": "account 1"}, + {"name": "A2", "description": "account 2"}, + ], + ) + + :param table_name: Name of the table + :param \*columns: collection of :class:`~sqlalchemy.schema.Column` + objects within + the table, as well as optional :class:`~sqlalchemy.schema.Constraint` + objects + and :class:`~.sqlalchemy.schema.Index` objects. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator when + creating the new table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + :return: the :class:`~sqlalchemy.schema.Table` object corresponding + to the parameters given. + + """ + op = cls(table_name, columns, if_not_exists=if_not_exists, **kw) + return operations.invoke(op) + + +@Operations.register_operation("drop_table") +class DropTableOp(MigrateOperation): + """Represent a drop table operation.""" + + def __init__( + self, + table_name: str, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + table_kw: Optional[MutableMapping[Any, Any]] = None, + _reverse: Optional[CreateTableOp] = None, + ) -> None: + self.table_name = table_name + self.schema = schema + self.if_exists = if_exists + self.table_kw = table_kw or {} + self.comment = self.table_kw.pop("comment", None) + self.info = self.table_kw.pop("info", None) + self.prefixes = self.table_kw.pop("prefixes", None) + self._reverse = _reverse + + def to_diff_tuple(self) -> Tuple[str, Table]: + return ("remove_table", self.to_table()) + + def reverse(self) -> CreateTableOp: + return CreateTableOp.from_table(self.to_table()) + + @classmethod + def from_table( + cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None + ) -> DropTableOp: + return cls( + table.name, + schema=table.schema, + table_kw={ + "comment": table.comment, + "info": dict(table.info), + "prefixes": list(table._prefixes), + **table.kwargs, + }, + _reverse=CreateTableOp.from_table( + table, _namespace_metadata=_namespace_metadata + ), + ) + + def to_table( + self, migration_context: Optional[MigrationContext] = None + ) -> Table: + if self._reverse: + cols_and_constraints = self._reverse.columns + else: + cols_and_constraints = [] + + schema_obj = schemaobj.SchemaObjects(migration_context) + t = schema_obj.table( + self.table_name, + *cols_and_constraints, + comment=self.comment, + info=self.info.copy() if self.info else {}, + prefixes=list(self.prefixes) if self.prefixes else [], + schema=self.schema, + _constraints_included=( + self._reverse._constraints_included if self._reverse else False + ), + **self.table_kw, + ) + return t + + @classmethod + def drop_table( + cls, + operations: Operations, + table_name: str, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + r"""Issue a "drop table" instruction using the current + migration context. + + + e.g.:: + + drop_table("accounts") + + :param table_name: Name of the table + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the table. + + .. versionadded:: 1.13.3 + :param \**kw: Other keyword arguments are passed to the underlying + :class:`sqlalchemy.schema.Table` object created for the command. + + """ + op = cls(table_name, schema=schema, if_exists=if_exists, table_kw=kw) + operations.invoke(op) + + +class AlterTableOp(MigrateOperation): + """Represent an alter table operation.""" + + def __init__( + self, + table_name: str, + *, + schema: Optional[str] = None, + ) -> None: + self.table_name = table_name + self.schema = schema + + +@Operations.register_operation("rename_table") +class RenameTableOp(AlterTableOp): + """Represent a rename table operation.""" + + def __init__( + self, + old_table_name: str, + new_table_name: str, + *, + schema: Optional[str] = None, + ) -> None: + super().__init__(old_table_name, schema=schema) + self.new_table_name = new_table_name + + @classmethod + def rename_table( + cls, + operations: Operations, + old_table_name: str, + new_table_name: str, + *, + schema: Optional[str] = None, + ) -> None: + """Emit an ALTER TABLE to rename a table. + + :param old_table_name: old name. + :param new_table_name: new name. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + + """ + op = cls(old_table_name, new_table_name, schema=schema) + return operations.invoke(op) + + +@Operations.register_operation("create_table_comment") +@BatchOperations.register_operation( + "create_table_comment", "batch_create_table_comment" +) +class CreateTableCommentOp(AlterTableOp): + """Represent a COMMENT ON `table` operation.""" + + def __init__( + self, + table_name: str, + comment: Optional[str], + *, + schema: Optional[str] = None, + existing_comment: Optional[str] = None, + ) -> None: + self.table_name = table_name + self.comment = comment + self.existing_comment = existing_comment + self.schema = schema + + @classmethod + def create_table_comment( + cls, + operations: Operations, + table_name: str, + comment: Optional[str], + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + ) -> None: + """Emit a COMMENT ON operation to set the comment for a table. + + :param table_name: string name of the target table. + :param comment: string value of the comment being registered against + the specified table. + :param existing_comment: String value of a comment + already registered on the specified table, used within autogenerate + so that the operation is reversible, but not required for direct + use. + + .. seealso:: + + :meth:`.Operations.drop_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ + + op = cls( + table_name, + comment, + existing_comment=existing_comment, + schema=schema, + ) + return operations.invoke(op) + + @classmethod + def batch_create_table_comment( + cls, + operations: BatchOperations, + comment: Optional[str], + *, + existing_comment: Optional[str] = None, + ) -> None: + """Emit a COMMENT ON operation to set the comment for a table + using the current batch migration context. + + :param comment: string value of the comment being registered against + the specified table. + :param existing_comment: String value of a comment + already registered on the specified table, used within autogenerate + so that the operation is reversible, but not required for direct + use. + + """ + + op = cls( + operations.impl.table_name, + comment, + existing_comment=existing_comment, + schema=operations.impl.schema, + ) + return operations.invoke(op) + + def reverse(self) -> Union[CreateTableCommentOp, DropTableCommentOp]: + """Reverses the COMMENT ON operation against a table.""" + if self.existing_comment is None: + return DropTableCommentOp( + self.table_name, + existing_comment=self.comment, + schema=self.schema, + ) + else: + return CreateTableCommentOp( + self.table_name, + self.existing_comment, + existing_comment=self.comment, + schema=self.schema, + ) + + def to_table( + self, migration_context: Optional[MigrationContext] = None + ) -> Table: + schema_obj = schemaobj.SchemaObjects(migration_context) + + return schema_obj.table( + self.table_name, schema=self.schema, comment=self.comment + ) + + def to_diff_tuple(self) -> Tuple[Any, ...]: + return ("add_table_comment", self.to_table(), self.existing_comment) + + +@Operations.register_operation("drop_table_comment") +@BatchOperations.register_operation( + "drop_table_comment", "batch_drop_table_comment" +) +class DropTableCommentOp(AlterTableOp): + """Represent an operation to remove the comment from a table.""" + + def __init__( + self, + table_name: str, + *, + schema: Optional[str] = None, + existing_comment: Optional[str] = None, + ) -> None: + self.table_name = table_name + self.existing_comment = existing_comment + self.schema = schema + + @classmethod + def drop_table_comment( + cls, + operations: Operations, + table_name: str, + *, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + ) -> None: + """Issue a "drop table comment" operation to + remove an existing comment set on a table. + + :param table_name: string name of the target table. + :param existing_comment: An optional string value of a comment already + registered on the specified table. + + .. seealso:: + + :meth:`.Operations.create_table_comment` + + :paramref:`.Operations.alter_column.comment` + + """ + + op = cls(table_name, existing_comment=existing_comment, schema=schema) + return operations.invoke(op) + + @classmethod + def batch_drop_table_comment( + cls, + operations: BatchOperations, + *, + existing_comment: Optional[str] = None, + ) -> None: + """Issue a "drop table comment" operation to + remove an existing comment set on a table using the current + batch operations context. + + :param existing_comment: An optional string value of a comment already + registered on the specified table. + + """ + + op = cls( + operations.impl.table_name, + existing_comment=existing_comment, + schema=operations.impl.schema, + ) + return operations.invoke(op) + + def reverse(self) -> CreateTableCommentOp: + """Reverses the COMMENT ON operation against a table.""" + return CreateTableCommentOp( + self.table_name, self.existing_comment, schema=self.schema + ) + + def to_table( + self, migration_context: Optional[MigrationContext] = None + ) -> Table: + schema_obj = schemaobj.SchemaObjects(migration_context) + + return schema_obj.table(self.table_name, schema=self.schema) + + def to_diff_tuple(self) -> Tuple[Any, ...]: + return ("remove_table_comment", self.to_table()) + + +@Operations.register_operation("alter_column") +@BatchOperations.register_operation("alter_column", "batch_alter_column") +class AlterColumnOp(AlterTableOp): + """Represent an alter column operation.""" + + def __init__( + self, + table_name: str, + column_name: str, + *, + schema: Optional[str] = None, + existing_type: Optional[Any] = None, + existing_server_default: Any = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + modify_nullable: Optional[bool] = None, + modify_comment: Optional[Union[str, Literal[False]]] = False, + modify_server_default: Any = False, + modify_name: Optional[str] = None, + modify_type: Optional[Any] = None, + **kw: Any, + ) -> None: + super().__init__(table_name, schema=schema) + self.column_name = column_name + self.existing_type = existing_type + self.existing_server_default = existing_server_default + self.existing_nullable = existing_nullable + self.existing_comment = existing_comment + self.modify_nullable = modify_nullable + self.modify_comment = modify_comment + self.modify_server_default = modify_server_default + self.modify_name = modify_name + self.modify_type = modify_type + self.kw = kw + + def to_diff_tuple(self) -> Any: + col_diff = [] + schema, tname, cname = self.schema, self.table_name, self.column_name + + if self.modify_type is not None: + col_diff.append( + ( + "modify_type", + schema, + tname, + cname, + { + "existing_nullable": self.existing_nullable, + "existing_server_default": ( + self.existing_server_default + ), + "existing_comment": self.existing_comment, + }, + self.existing_type, + self.modify_type, + ) + ) + + if self.modify_nullable is not None: + col_diff.append( + ( + "modify_nullable", + schema, + tname, + cname, + { + "existing_type": self.existing_type, + "existing_server_default": ( + self.existing_server_default + ), + "existing_comment": self.existing_comment, + }, + self.existing_nullable, + self.modify_nullable, + ) + ) + + if self.modify_server_default is not False: + col_diff.append( + ( + "modify_default", + schema, + tname, + cname, + { + "existing_nullable": self.existing_nullable, + "existing_type": self.existing_type, + "existing_comment": self.existing_comment, + }, + self.existing_server_default, + self.modify_server_default, + ) + ) + + if self.modify_comment is not False: + col_diff.append( + ( + "modify_comment", + schema, + tname, + cname, + { + "existing_nullable": self.existing_nullable, + "existing_type": self.existing_type, + "existing_server_default": ( + self.existing_server_default + ), + }, + self.existing_comment, + self.modify_comment, + ) + ) + + return col_diff + + def has_changes(self) -> bool: + hc1 = ( + self.modify_nullable is not None + or self.modify_server_default is not False + or self.modify_type is not None + or self.modify_comment is not False + ) + if hc1: + return True + for kw in self.kw: + if kw.startswith("modify_"): + return True + else: + return False + + def reverse(self) -> AlterColumnOp: + kw = self.kw.copy() + kw["existing_type"] = self.existing_type + kw["existing_nullable"] = self.existing_nullable + kw["existing_server_default"] = self.existing_server_default + kw["existing_comment"] = self.existing_comment + if self.modify_type is not None: + kw["modify_type"] = self.modify_type + if self.modify_nullable is not None: + kw["modify_nullable"] = self.modify_nullable + if self.modify_server_default is not False: + kw["modify_server_default"] = self.modify_server_default + if self.modify_comment is not False: + kw["modify_comment"] = self.modify_comment + + # TODO: make this a little simpler + all_keys = { + m.group(1) + for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw] + if m + } + + for k in all_keys: + if "modify_%s" % k in kw: + swap = kw["existing_%s" % k] + kw["existing_%s" % k] = kw["modify_%s" % k] + kw["modify_%s" % k] = swap + + return self.__class__( + self.table_name, self.column_name, schema=self.schema, **kw + ) + + @classmethod + def alter_column( + cls, + operations: Operations, + table_name: str, + column_name: str, + *, + nullable: Optional[bool] = None, + comment: Optional[Union[str, Literal[False]]] = False, + server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + new_column_name: Optional[str] = None, + type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None, + existing_type: Optional[ + Union[TypeEngine[Any], Type[TypeEngine[Any]]] + ] = None, + existing_server_default: Union[ + str, bool, Identity, Computed, TextClause, None + ] = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + r"""Issue an "alter column" instruction using the + current migration context. + + Generally, only that aspect of the column which + is being changed, i.e. name, type, nullability, + default, needs to be specified. Multiple changes + can also be specified at once and the backend should + "do the right thing", emitting each change either + separately or together as the backend allows. + + MySQL has special requirements here, since MySQL + cannot ALTER a column without a full specification. + When producing MySQL-compatible migration files, + it is recommended that the ``existing_type``, + ``existing_server_default``, and ``existing_nullable`` + parameters be present, if not being altered. + + Type changes which are against the SQLAlchemy + "schema" types :class:`~sqlalchemy.types.Boolean` + and :class:`~sqlalchemy.types.Enum` may also + add or drop constraints which accompany those + types on backends that don't support them natively. + The ``existing_type`` argument is + used in this case to identify and remove a previous + constraint that was bound to the type object. + + :param table_name: string name of the target table. + :param column_name: string name of the target column, + as it exists before the operation begins. + :param nullable: Optional; specify ``True`` or ``False`` + to alter the column's nullability. + :param server_default: Optional; specify a string + SQL expression, :func:`~sqlalchemy.sql.expression.text`, + or :class:`~sqlalchemy.schema.DefaultClause` to indicate + an alteration to the column's default value. + Set to ``None`` to have the default removed. + :param comment: optional string text of a new comment to add to the + column. + :param new_column_name: Optional; specify a string name here to + indicate the new name within a column rename operation. + :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` + type object to specify a change to the column's type. + For SQLAlchemy types that also indicate a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), + the constraint is also generated. + :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; + currently understood by the MySQL dialect. + :param existing_type: Optional; a + :class:`~sqlalchemy.types.TypeEngine` + type object to specify the previous type. This + is required for all MySQL column alter operations that + don't otherwise specify a new type, as well as for + when nullability is being changed on a SQL Server + column. It is also used if the type is a so-called + SQLAlchemy "schema" type which may define a constraint (i.e. + :class:`~sqlalchemy.types.Boolean`, + :class:`~sqlalchemy.types.Enum`), + so that the constraint can be dropped. + :param existing_server_default: Optional; The existing + default value of the column. Required on MySQL if + an existing default is not being changed; else MySQL + removes the default. + :param existing_nullable: Optional; the existing nullability + of the column. Required on MySQL if the existing nullability + is not being changed; else MySQL sets this to NULL. + :param existing_autoincrement: Optional; the existing autoincrement + of the column. Used for MySQL's system of altering a column + that specifies ``AUTO_INCREMENT``. + :param existing_comment: string text of the existing comment on the + column to be maintained. Required on MySQL if the existing comment + on the column is not being changed. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param postgresql_using: String argument which will indicate a + SQL expression to render within the Postgresql-specific USING clause + within ALTER COLUMN. This string is taken directly as raw SQL which + must explicitly include any necessary quoting or escaping of tokens + within the expression. + + """ + + alt = cls( + table_name, + column_name, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + modify_name=new_column_name, + modify_type=type_, + modify_server_default=server_default, + modify_nullable=nullable, + modify_comment=comment, + **kw, + ) + + return operations.invoke(alt) + + @classmethod + def batch_alter_column( + cls, + operations: BatchOperations, + column_name: str, + *, + nullable: Optional[bool] = None, + comment: Optional[Union[str, Literal[False]]] = False, + server_default: Any = False, + new_column_name: Optional[str] = None, + type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None, + existing_type: Optional[ + Union[TypeEngine[Any], Type[TypeEngine[Any]]] + ] = None, + existing_server_default: Optional[ + Union[str, bool, Identity, Computed] + ] = False, + existing_nullable: Optional[bool] = None, + existing_comment: Optional[str] = None, + insert_before: Optional[str] = None, + insert_after: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue an "alter column" instruction using the current + batch migration context. + + Parameters are the same as that of :meth:`.Operations.alter_column`, + as well as the following option(s): + + :param insert_before: String name of an existing column which this + column should be placed before, when creating the new table. + + :param insert_after: String name of an existing column which this + column should be placed after, when creating the new table. If + both :paramref:`.BatchOperations.alter_column.insert_before` + and :paramref:`.BatchOperations.alter_column.insert_after` are + omitted, the column is inserted after the last existing column + in the table. + + .. seealso:: + + :meth:`.Operations.alter_column` + + + """ + alt = cls( + operations.impl.table_name, + column_name, + schema=operations.impl.schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + existing_comment=existing_comment, + modify_name=new_column_name, + modify_type=type_, + modify_server_default=server_default, + modify_nullable=nullable, + modify_comment=comment, + insert_before=insert_before, + insert_after=insert_after, + **kw, + ) + + return operations.invoke(alt) + + +@Operations.register_operation("add_column") +@BatchOperations.register_operation("add_column", "batch_add_column") +class AddColumnOp(AlterTableOp): + """Represent an add column operation.""" + + def __init__( + self, + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + if_not_exists: Optional[bool] = None, + **kw: Any, + ) -> None: + super().__init__(table_name, schema=schema) + self.column = column + self.if_not_exists = if_not_exists + self.kw = kw + + def reverse(self) -> DropColumnOp: + op = DropColumnOp.from_column_and_tablename( + self.schema, self.table_name, self.column + ) + op.if_exists = self.if_not_exists + return op + + def to_diff_tuple( + self, + ) -> Tuple[str, Optional[str], str, Column[Any]]: + return ("add_column", self.schema, self.table_name, self.column) + + def to_column(self) -> Column[Any]: + return self.column + + @classmethod + def from_column(cls, col: Column[Any]) -> AddColumnOp: + return cls(col.table.name, col, schema=col.table.schema) + + @classmethod + def from_column_and_tablename( + cls, + schema: Optional[str], + tname: str, + col: Column[Any], + ) -> AddColumnOp: + return cls(tname, col, schema=schema) + + @classmethod + def add_column( + cls, + operations: Operations, + table_name: str, + column: Column[Any], + *, + schema: Optional[str] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + """Issue an "add column" instruction using the current + migration context. + + e.g.:: + + from alembic import op + from sqlalchemy import Column, String + + op.add_column("organization", Column("name", String())) + + The :meth:`.Operations.add_column` method typically corresponds + to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope + of this command, the column's name, datatype, nullability, + and optional server-generated defaults may be indicated. + + .. note:: + + With the exception of NOT NULL constraints or single-column FOREIGN + KEY constraints, other kinds of constraints such as PRIMARY KEY, + UNIQUE or CHECK constraints **cannot** be generated using this + method; for these constraints, refer to operations such as + :meth:`.Operations.create_primary_key` and + :meth:`.Operations.create_check_constraint`. In particular, the + following :class:`~sqlalchemy.schema.Column` parameters are + **ignored**: + + * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases + typically do not support an ALTER operation that can add + individual columns one at a time to an existing primary key + constraint, therefore it's less ambiguous to use the + :meth:`.Operations.create_primary_key` method, which assumes no + existing primary key constraint is present. + * :paramref:`~sqlalchemy.schema.Column.unique` - use the + :meth:`.Operations.create_unique_constraint` method + * :paramref:`~sqlalchemy.schema.Column.index` - use the + :meth:`.Operations.create_index` method + + + The provided :class:`~sqlalchemy.schema.Column` object may include a + :class:`~sqlalchemy.schema.ForeignKey` constraint directive, + referencing a remote table name. For this specific type of constraint, + Alembic will automatically emit a second ALTER statement in order to + add the single-column FOREIGN KEY constraint separately:: + + from alembic import op + from sqlalchemy import Column, INTEGER, ForeignKey + + op.add_column( + "organization", + Column("account_id", INTEGER, ForeignKey("accounts.id")), + ) + + The column argument passed to :meth:`.Operations.add_column` is a + :class:`~sqlalchemy.schema.Column` construct, used in the same way it's + used in SQLAlchemy. In particular, values or functions to be indicated + as producing the column's default value on the database side are + specified using the ``server_default`` parameter, and not ``default`` + which only specifies Python-side defaults:: + + from alembic import op + from sqlalchemy import Column, TIMESTAMP, func + + # specify "DEFAULT NOW" along with the column add + op.add_column( + "account", + Column("timestamp", TIMESTAMP, server_default=func.now()), + ) + + :param table_name: String name of the parent table. + :param column: a :class:`sqlalchemy.schema.Column` object + representing the new column. + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_not_exists: If True, adds IF NOT EXISTS operator + when creating the new column for compatible dialects + + .. versionadded:: 1.16.0 + + """ + + op = cls( + table_name, + column, + schema=schema, + if_not_exists=if_not_exists, + ) + return operations.invoke(op) + + @classmethod + def batch_add_column( + cls, + operations: BatchOperations, + column: Column[Any], + *, + insert_before: Optional[str] = None, + insert_after: Optional[str] = None, + if_not_exists: Optional[bool] = None, + ) -> None: + """Issue an "add column" instruction using the current + batch migration context. + + .. seealso:: + + :meth:`.Operations.add_column` + + """ + + kw = {} + if insert_before: + kw["insert_before"] = insert_before + if insert_after: + kw["insert_after"] = insert_after + + op = cls( + operations.impl.table_name, + column, + schema=operations.impl.schema, + if_not_exists=if_not_exists, + **kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("drop_column") +@BatchOperations.register_operation("drop_column", "batch_drop_column") +class DropColumnOp(AlterTableOp): + """Represent a drop column operation.""" + + def __init__( + self, + table_name: str, + column_name: str, + *, + schema: Optional[str] = None, + if_exists: Optional[bool] = None, + _reverse: Optional[AddColumnOp] = None, + **kw: Any, + ) -> None: + super().__init__(table_name, schema=schema) + self.column_name = column_name + self.kw = kw + self.if_exists = if_exists + self._reverse = _reverse + + def to_diff_tuple( + self, + ) -> Tuple[str, Optional[str], str, Column[Any]]: + return ( + "remove_column", + self.schema, + self.table_name, + self.to_column(), + ) + + def reverse(self) -> AddColumnOp: + if self._reverse is None: + raise ValueError( + "operation is not reversible; " + "original column is not present" + ) + + op = AddColumnOp.from_column_and_tablename( + self.schema, self.table_name, self._reverse.column + ) + op.if_not_exists = self.if_exists + return op + + @classmethod + def from_column_and_tablename( + cls, + schema: Optional[str], + tname: str, + col: Column[Any], + ) -> DropColumnOp: + return cls( + tname, + col.name, + schema=schema, + _reverse=AddColumnOp.from_column_and_tablename(schema, tname, col), + ) + + def to_column( + self, migration_context: Optional[MigrationContext] = None + ) -> Column[Any]: + if self._reverse is not None: + return self._reverse.column + schema_obj = schemaobj.SchemaObjects(migration_context) + return schema_obj.column(self.column_name, NULLTYPE) + + @classmethod + def drop_column( + cls, + operations: Operations, + table_name: str, + column_name: str, + *, + schema: Optional[str] = None, + **kw: Any, + ) -> None: + """Issue a "drop column" instruction using the current + migration context. + + e.g.:: + + drop_column("organization", "account_id") + + :param table_name: name of table + :param column_name: name of column + :param schema: Optional schema name to operate within. To control + quoting of the schema outside of the default behavior, use + the SQLAlchemy construct + :class:`~sqlalchemy.sql.elements.quoted_name`. + :param if_exists: If True, adds IF EXISTS operator when + dropping the new column for compatible dialects + + .. versionadded:: 1.16.0 + + :param mssql_drop_check: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the CHECK constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.check_constraints, + then exec's a separate DROP CONSTRAINT for that constraint. + :param mssql_drop_default: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop the DEFAULT constraint on the column using a + SQL-script-compatible + block that selects into a @variable from sys.default_constraints, + then exec's a separate DROP CONSTRAINT for that default. + :param mssql_drop_foreign_key: Optional boolean. When ``True``, on + Microsoft SQL Server only, first + drop a single FOREIGN KEY constraint on the column using a + SQL-script-compatible + block that selects into a @variable from + sys.foreign_keys/sys.foreign_key_columns, + then exec's a separate DROP CONSTRAINT for that default. Only + works if the column has exactly one FK constraint which refers to + it, at the moment. + """ + + op = cls(table_name, column_name, schema=schema, **kw) + return operations.invoke(op) + + @classmethod + def batch_drop_column( + cls, operations: BatchOperations, column_name: str, **kw: Any + ) -> None: + """Issue a "drop column" instruction using the current + batch migration context. + + .. seealso:: + + :meth:`.Operations.drop_column` + + """ + op = cls( + operations.impl.table_name, + column_name, + schema=operations.impl.schema, + **kw, + ) + return operations.invoke(op) + + +@Operations.register_operation("bulk_insert") +class BulkInsertOp(MigrateOperation): + """Represent a bulk insert operation.""" + + def __init__( + self, + table: Union[Table, TableClause], + rows: List[Dict[str, Any]], + *, + multiinsert: bool = True, + ) -> None: + self.table = table + self.rows = rows + self.multiinsert = multiinsert + + @classmethod + def bulk_insert( + cls, + operations: Operations, + table: Union[Table, TableClause], + rows: List[Dict[str, Any]], + *, + multiinsert: bool = True, + ) -> None: + """Issue a "bulk insert" operation using the current + migration context. + + This provides a means of representing an INSERT of multiple rows + which works equally well in the context of executing on a live + connection as well as that of generating a SQL script. In the + case of a SQL script, the values are rendered inline into the + statement. + + e.g.:: + + from alembic import op + from datetime import date + from sqlalchemy.sql import table, column + from sqlalchemy import String, Integer, Date + + # Create an ad-hoc table to use for the insert statement. + accounts_table = table( + "account", + column("id", Integer), + column("name", String), + column("create_date", Date), + ) + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": date(2010, 10, 5), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": date(2007, 5, 27), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": date(2008, 8, 15), + }, + ], + ) + + When using --sql mode, some datatypes may not render inline + automatically, such as dates and other special types. When this + issue is present, :meth:`.Operations.inline_literal` may be used:: + + op.bulk_insert( + accounts_table, + [ + { + "id": 1, + "name": "John Smith", + "create_date": op.inline_literal("2010-10-05"), + }, + { + "id": 2, + "name": "Ed Williams", + "create_date": op.inline_literal("2007-05-27"), + }, + { + "id": 3, + "name": "Wendy Jones", + "create_date": op.inline_literal("2008-08-15"), + }, + ], + multiinsert=False, + ) + + When using :meth:`.Operations.inline_literal` in conjunction with + :meth:`.Operations.bulk_insert`, in order for the statement to work + in "online" (e.g. non --sql) mode, the + :paramref:`~.Operations.bulk_insert.multiinsert` + flag should be set to ``False``, which will have the effect of + individual INSERT statements being emitted to the database, each + with a distinct VALUES clause, so that the "inline" values can + still be rendered, rather than attempting to pass the values + as bound parameters. + + :param table: a table object which represents the target of the INSERT. + + :param rows: a list of dictionaries indicating rows. + + :param multiinsert: when at its default of True and --sql mode is not + enabled, the INSERT statement will be executed using + "executemany()" style, where all elements in the list of + dictionaries are passed as bound parameters in a single + list. Setting this to False results in individual INSERT + statements being emitted per parameter set, and is needed + in those cases where non-literal values are present in the + parameter sets. + + """ + + op = cls(table, rows, multiinsert=multiinsert) + operations.invoke(op) + + +@Operations.register_operation("execute") +@BatchOperations.register_operation("execute", "batch_execute") +class ExecuteSQLOp(MigrateOperation): + """Represent an execute SQL operation.""" + + def __init__( + self, + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + self.sqltext = sqltext + self.execution_options = execution_options + + @classmethod + def execute( + cls, + operations: Operations, + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + r"""Execute the given SQL using the current migration context. + + The given SQL can be a plain string, e.g.:: + + op.execute("INSERT INTO table (foo) VALUES ('some value')") + + Or it can be any kind of Core SQL Expression construct, such as + below where we use an update construct:: + + from sqlalchemy.sql import table, column + from sqlalchemy import String + from alembic import op + + account = table("account", column("name", String)) + op.execute( + account.update() + .where(account.c.name == op.inline_literal("account 1")) + .values({"name": op.inline_literal("account 2")}) + ) + + Above, we made use of the SQLAlchemy + :func:`sqlalchemy.sql.expression.table` and + :func:`sqlalchemy.sql.expression.column` constructs to make a brief, + ad-hoc table construct just for our UPDATE statement. A full + :class:`~sqlalchemy.schema.Table` construct of course works perfectly + fine as well, though note it's a recommended practice to at least + ensure the definition of a table is self-contained within the migration + script, rather than imported from a module that may break compatibility + with older migrations. + + In a SQL script context, the statement is emitted directly to the + output stream. There is *no* return result, however, as this + function is oriented towards generating a change script + that can run in "offline" mode. Additionally, parameterized + statements are discouraged here, as they *will not work* in offline + mode. Above, we use :meth:`.inline_literal` where parameters are + to be used. + + For full interaction with a connected database where parameters can + also be used normally, use the "bind" available from the context:: + + from alembic import op + + connection = op.get_bind() + + connection.execute( + account.update() + .where(account.c.name == "account 1") + .values({"name": "account 2"}) + ) + + Additionally, when passing the statement as a plain string, it is first + coerced into a :func:`sqlalchemy.sql.expression.text` construct + before being passed along. In the less likely case that the + literal SQL string contains a colon, it must be escaped with a + backslash, as:: + + op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')") + + + :param sqltext: Any legal SQLAlchemy expression, including: + + * a string + * a :func:`sqlalchemy.sql.expression.text` construct. + * a :func:`sqlalchemy.sql.expression.insert` construct. + * a :func:`sqlalchemy.sql.expression.update` construct. + * a :func:`sqlalchemy.sql.expression.delete` construct. + * Any "executable" described in SQLAlchemy Core documentation, + noting that no result set is returned. + + .. note:: when passing a plain string, the statement is coerced into + a :func:`sqlalchemy.sql.expression.text` construct. This construct + considers symbols with colons, e.g. ``:foo`` to be bound parameters. + To avoid this, ensure that colon symbols are escaped, e.g. + ``\:foo``. + + :param execution_options: Optional dictionary of + execution options, will be passed to + :meth:`sqlalchemy.engine.Connection.execution_options`. + """ + op = cls(sqltext, execution_options=execution_options) + return operations.invoke(op) + + @classmethod + def batch_execute( + cls, + operations: Operations, + sqltext: Union[Executable, str], + *, + execution_options: Optional[dict[str, Any]] = None, + ) -> None: + """Execute the given SQL using the current migration context. + + .. seealso:: + + :meth:`.Operations.execute` + + """ + return cls.execute( + operations, sqltext, execution_options=execution_options + ) + + def to_diff_tuple(self) -> Tuple[str, Union[Executable, str]]: + return ("execute", self.sqltext) + + +class OpContainer(MigrateOperation): + """Represent a sequence of operations operation.""" + + def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None: + self.ops = list(ops) + + def is_empty(self) -> bool: + return not self.ops + + def as_diffs(self) -> Any: + return list(OpContainer._ops_as_diffs(self)) + + @classmethod + def _ops_as_diffs( + cls, migrations: OpContainer + ) -> Iterator[Tuple[Any, ...]]: + for op in migrations.ops: + if hasattr(op, "ops"): + yield from cls._ops_as_diffs(cast("OpContainer", op)) + else: + yield op.to_diff_tuple() + + +class ModifyTableOps(OpContainer): + """Contains a sequence of operations that all apply to a single Table.""" + + def __init__( + self, + table_name: str, + ops: Sequence[MigrateOperation], + *, + schema: Optional[str] = None, + ) -> None: + super().__init__(ops) + self.table_name = table_name + self.schema = schema + + def reverse(self) -> ModifyTableOps: + return ModifyTableOps( + self.table_name, + ops=list(reversed([op.reverse() for op in self.ops])), + schema=self.schema, + ) + + +class UpgradeOps(OpContainer): + """contains a sequence of operations that would apply to the + 'upgrade' stream of a script. + + .. seealso:: + + :ref:`customizing_revision` + + """ + + def __init__( + self, + ops: Sequence[MigrateOperation] = (), + upgrade_token: str = "upgrades", + ) -> None: + super().__init__(ops=ops) + self.upgrade_token = upgrade_token + + def reverse_into(self, downgrade_ops: DowngradeOps) -> DowngradeOps: + downgrade_ops.ops[:] = list( + reversed([op.reverse() for op in self.ops]) + ) + return downgrade_ops + + def reverse(self) -> DowngradeOps: + return self.reverse_into(DowngradeOps(ops=[])) + + +class DowngradeOps(OpContainer): + """contains a sequence of operations that would apply to the + 'downgrade' stream of a script. + + .. seealso:: + + :ref:`customizing_revision` + + """ + + def __init__( + self, + ops: Sequence[MigrateOperation] = (), + downgrade_token: str = "downgrades", + ) -> None: + super().__init__(ops=ops) + self.downgrade_token = downgrade_token + + def reverse(self) -> UpgradeOps: + return UpgradeOps( + ops=list(reversed([op.reverse() for op in self.ops])) + ) + + +class MigrationScript(MigrateOperation): + """represents a migration script. + + E.g. when autogenerate encounters this object, this corresponds to the + production of an actual script file. + + A normal :class:`.MigrationScript` object would contain a single + :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive. + These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops`` + attributes. + + In the case of an autogenerate operation that runs multiple times, + such as the multiple database example in the "multidb" template, + the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled, + and instead these objects should be accessed via the ``.upgrade_ops_list`` + and ``.downgrade_ops_list`` list-based attributes. These latter + attributes are always available at the very least as single-element lists. + + .. seealso:: + + :ref:`customizing_revision` + + """ + + _needs_render: Optional[bool] + _upgrade_ops: List[UpgradeOps] + _downgrade_ops: List[DowngradeOps] + + def __init__( + self, + rev_id: Optional[str], + upgrade_ops: UpgradeOps, + downgrade_ops: DowngradeOps, + *, + message: Optional[str] = None, + imports: Set[str] = set(), + head: Optional[str] = None, + splice: Optional[bool] = None, + branch_label: Optional[_RevIdType] = None, + version_path: Union[str, os.PathLike[str], None] = None, + depends_on: Optional[_RevIdType] = None, + ) -> None: + self.rev_id = rev_id + self.message = message + self.imports = imports + self.head = head + self.splice = splice + self.branch_label = branch_label + self.version_path = ( + pathlib.Path(version_path).as_posix() if version_path else None + ) + self.depends_on = depends_on + self.upgrade_ops = upgrade_ops + self.downgrade_ops = downgrade_ops + + @property + def upgrade_ops(self) -> Optional[UpgradeOps]: + """An instance of :class:`.UpgradeOps`. + + .. seealso:: + + :attr:`.MigrationScript.upgrade_ops_list` + """ + if len(self._upgrade_ops) > 1: + raise ValueError( + "This MigrationScript instance has a multiple-entry " + "list for UpgradeOps; please use the " + "upgrade_ops_list attribute." + ) + elif not self._upgrade_ops: + return None + else: + return self._upgrade_ops[0] + + @upgrade_ops.setter + def upgrade_ops( + self, upgrade_ops: Union[UpgradeOps, List[UpgradeOps]] + ) -> None: + self._upgrade_ops = util.to_list(upgrade_ops) + for elem in self._upgrade_ops: + assert isinstance(elem, UpgradeOps) + + @property + def downgrade_ops(self) -> Optional[DowngradeOps]: + """An instance of :class:`.DowngradeOps`. + + .. seealso:: + + :attr:`.MigrationScript.downgrade_ops_list` + """ + if len(self._downgrade_ops) > 1: + raise ValueError( + "This MigrationScript instance has a multiple-entry " + "list for DowngradeOps; please use the " + "downgrade_ops_list attribute." + ) + elif not self._downgrade_ops: + return None + else: + return self._downgrade_ops[0] + + @downgrade_ops.setter + def downgrade_ops( + self, downgrade_ops: Union[DowngradeOps, List[DowngradeOps]] + ) -> None: + self._downgrade_ops = util.to_list(downgrade_ops) + for elem in self._downgrade_ops: + assert isinstance(elem, DowngradeOps) + + @property + def upgrade_ops_list(self) -> List[UpgradeOps]: + """A list of :class:`.UpgradeOps` instances. + + This is used in place of the :attr:`.MigrationScript.upgrade_ops` + attribute when dealing with a revision operation that does + multiple autogenerate passes. + + """ + return self._upgrade_ops + + @property + def downgrade_ops_list(self) -> List[DowngradeOps]: + """A list of :class:`.DowngradeOps` instances. + + This is used in place of the :attr:`.MigrationScript.downgrade_ops` + attribute when dealing with a revision operation that does + multiple autogenerate passes. + + """ + return self._downgrade_ops diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/schemaobj.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/schemaobj.py new file mode 100644 index 0000000000000000000000000000000000000000..59c1002f109c6fcde6b76e1f2910921f349ec13d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/schemaobj.py @@ -0,0 +1,290 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import schema as sa_schema +from sqlalchemy.sql.schema import Column +from sqlalchemy.sql.schema import Constraint +from sqlalchemy.sql.schema import Index +from sqlalchemy.types import Integer +from sqlalchemy.types import NULLTYPE + +from .. import util +from ..util import sqla_compat + +if TYPE_CHECKING: + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.elements import TextClause + from sqlalchemy.sql.schema import CheckConstraint + from sqlalchemy.sql.schema import ForeignKey + from sqlalchemy.sql.schema import ForeignKeyConstraint + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import PrimaryKeyConstraint + from sqlalchemy.sql.schema import Table + from sqlalchemy.sql.schema import UniqueConstraint + from sqlalchemy.sql.type_api import TypeEngine + + from ..runtime.migration import MigrationContext + + +class SchemaObjects: + def __init__( + self, migration_context: Optional[MigrationContext] = None + ) -> None: + self.migration_context = migration_context + + def primary_key_constraint( + self, + name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + cols: Sequence[str], + schema: Optional[str] = None, + **dialect_kw, + ) -> PrimaryKeyConstraint: + m = self.metadata() + columns = [sa_schema.Column(n, NULLTYPE) for n in cols] + t = sa_schema.Table(table_name, m, *columns, schema=schema) + # SQLAlchemy primary key constraint name arg is wrongly typed on + # the SQLAlchemy side through 2.0.5 at least + p = sa_schema.PrimaryKeyConstraint( + *[t.c[n] for n in cols], name=name, **dialect_kw # type: ignore + ) + return p + + def foreign_key_constraint( + self, + name: Optional[sqla_compat._ConstraintNameDefined], + source: str, + referent: str, + local_cols: List[str], + remote_cols: List[str], + onupdate: Optional[str] = None, + ondelete: Optional[str] = None, + deferrable: Optional[bool] = None, + source_schema: Optional[str] = None, + referent_schema: Optional[str] = None, + initially: Optional[str] = None, + match: Optional[str] = None, + **dialect_kw, + ) -> ForeignKeyConstraint: + m = self.metadata() + if source == referent and source_schema == referent_schema: + t1_cols = local_cols + remote_cols + else: + t1_cols = local_cols + sa_schema.Table( + referent, + m, + *[sa_schema.Column(n, NULLTYPE) for n in remote_cols], + schema=referent_schema, + ) + + t1 = sa_schema.Table( + source, + m, + *[ + sa_schema.Column(n, NULLTYPE) + for n in util.unique_list(t1_cols) + ], + schema=source_schema, + ) + + tname = ( + "%s.%s" % (referent_schema, referent) + if referent_schema + else referent + ) + + dialect_kw["match"] = match + + f = sa_schema.ForeignKeyConstraint( + local_cols, + ["%s.%s" % (tname, n) for n in remote_cols], + name=name, + onupdate=onupdate, + ondelete=ondelete, + deferrable=deferrable, + initially=initially, + **dialect_kw, + ) + t1.append_constraint(f) + + return f + + def unique_constraint( + self, + name: Optional[sqla_compat._ConstraintNameDefined], + source: str, + local_cols: Sequence[str], + schema: Optional[str] = None, + **kw, + ) -> UniqueConstraint: + t = sa_schema.Table( + source, + self.metadata(), + *[sa_schema.Column(n, NULLTYPE) for n in local_cols], + schema=schema, + ) + kw["name"] = name + uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw) + # TODO: need event tests to ensure the event + # is fired off here + t.append_constraint(uq) + return uq + + def check_constraint( + self, + name: Optional[sqla_compat._ConstraintNameDefined], + source: str, + condition: Union[str, TextClause, ColumnElement[Any]], + schema: Optional[str] = None, + **kw, + ) -> Union[CheckConstraint]: + t = sa_schema.Table( + source, + self.metadata(), + sa_schema.Column("x", Integer), + schema=schema, + ) + ck = sa_schema.CheckConstraint(condition, name=name, **kw) + t.append_constraint(ck) + return ck + + def generic_constraint( + self, + name: Optional[sqla_compat._ConstraintNameDefined], + table_name: str, + type_: Optional[str], + schema: Optional[str] = None, + **kw, + ) -> Any: + t = self.table(table_name, schema=schema) + types: Dict[Optional[str], Any] = { + "foreignkey": lambda name: sa_schema.ForeignKeyConstraint( + [], [], name=name + ), + "primary": sa_schema.PrimaryKeyConstraint, + "unique": sa_schema.UniqueConstraint, + "check": lambda name: sa_schema.CheckConstraint("", name=name), + None: sa_schema.Constraint, + } + try: + const = types[type_] + except KeyError as ke: + raise TypeError( + "'type' can be one of %s" + % ", ".join(sorted(repr(x) for x in types)) + ) from ke + else: + const = const(name=name) + t.append_constraint(const) + return const + + def metadata(self) -> MetaData: + kw = {} + if ( + self.migration_context is not None + and "target_metadata" in self.migration_context.opts + ): + mt = self.migration_context.opts["target_metadata"] + if hasattr(mt, "naming_convention"): + kw["naming_convention"] = mt.naming_convention + return sa_schema.MetaData(**kw) + + def table(self, name: str, *columns, **kw) -> Table: + m = self.metadata() + + cols = [ + sqla_compat._copy(c) if c.table is not None else c + for c in columns + if isinstance(c, Column) + ] + # these flags have already added their UniqueConstraint / + # Index objects to the table, so flip them off here. + # SQLAlchemy tometadata() avoids this instead by preserving the + # flags and skipping the constraints that have _type_bound on them, + # but for a migration we'd rather list out the constraints + # explicitly. + _constraints_included = kw.pop("_constraints_included", False) + if _constraints_included: + for c in cols: + c.unique = c.index = False + + t = sa_schema.Table(name, m, *cols, **kw) + + constraints = [ + ( + sqla_compat._copy(elem, target_table=t) + if getattr(elem, "parent", None) is not t + and getattr(elem, "parent", None) is not None + else elem + ) + for elem in columns + if isinstance(elem, (Constraint, Index)) + ] + + for const in constraints: + t.append_constraint(const) + + for f in t.foreign_keys: + self._ensure_table_for_fk(m, f) + return t + + def column(self, name: str, type_: TypeEngine, **kw) -> Column: + return sa_schema.Column(name, type_, **kw) + + def index( + self, + name: Optional[str], + tablename: Optional[str], + columns: Sequence[Union[str, TextClause, ColumnElement[Any]]], + schema: Optional[str] = None, + **kw, + ) -> Index: + t = sa_schema.Table( + tablename or "no_table", + self.metadata(), + schema=schema, + ) + kw["_table"] = t + idx = sa_schema.Index( + name, + *[util.sqla_compat._textual_index_column(t, n) for n in columns], + **kw, + ) + return idx + + def _parse_table_key(self, table_key: str) -> Tuple[Optional[str], str]: + if "." in table_key: + tokens = table_key.split(".") + sname: Optional[str] = ".".join(tokens[0:-1]) + tname = tokens[-1] + else: + tname = table_key + sname = None + return (sname, tname) + + def _ensure_table_for_fk(self, metadata: MetaData, fk: ForeignKey) -> None: + """create a placeholder Table object for the referent of a + ForeignKey. + + """ + if isinstance(fk._colspec, str): + table_key, cname = fk._colspec.rsplit(".", 1) + sname, tname = self._parse_table_key(table_key) + if table_key not in metadata.tables: + rel_t = sa_schema.Table(tname, metadata, schema=sname) + else: + rel_t = metadata.tables[table_key] + if cname not in rel_t.c: + rel_t.append_column(sa_schema.Column(cname, NULLTYPE)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/toimpl.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/toimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..c18ec790176d6db1a848e962f190202bbed47162 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/operations/toimpl.py @@ -0,0 +1,242 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from typing import TYPE_CHECKING + +from sqlalchemy import schema as sa_schema + +from . import ops +from .base import Operations +from ..util.sqla_compat import _copy +from ..util.sqla_compat import sqla_2 + +if TYPE_CHECKING: + from sqlalchemy.sql.schema import Table + + +@Operations.implementation_for(ops.AlterColumnOp) +def alter_column( + operations: "Operations", operation: "ops.AlterColumnOp" +) -> None: + compiler = operations.impl.dialect.statement_compiler( + operations.impl.dialect, None + ) + + existing_type = operation.existing_type + existing_nullable = operation.existing_nullable + existing_server_default = operation.existing_server_default + type_ = operation.modify_type + column_name = operation.column_name + table_name = operation.table_name + schema = operation.schema + server_default = operation.modify_server_default + new_column_name = operation.modify_name + nullable = operation.modify_nullable + comment = operation.modify_comment + existing_comment = operation.existing_comment + + def _count_constraint(constraint): + return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and ( + not constraint._create_rule or constraint._create_rule(compiler) + ) + + if existing_type and type_: + t = operations.schema_obj.table( + table_name, + sa_schema.Column(column_name, existing_type), + schema=schema, + ) + for constraint in t.constraints: + if _count_constraint(constraint): + operations.impl.drop_constraint(constraint) + + operations.impl.alter_column( + table_name, + column_name, + nullable=nullable, + server_default=server_default, + name=new_column_name, + type_=type_, + schema=schema, + existing_type=existing_type, + existing_server_default=existing_server_default, + existing_nullable=existing_nullable, + comment=comment, + existing_comment=existing_comment, + **operation.kw, + ) + + if type_: + t = operations.schema_obj.table( + table_name, + operations.schema_obj.column(column_name, type_), + schema=schema, + ) + for constraint in t.constraints: + if _count_constraint(constraint): + operations.impl.add_constraint(constraint) + + +@Operations.implementation_for(ops.DropTableOp) +def drop_table(operations: "Operations", operation: "ops.DropTableOp") -> None: + kw = {} + if operation.if_exists is not None: + kw["if_exists"] = operation.if_exists + operations.impl.drop_table( + operation.to_table(operations.migration_context), **kw + ) + + +@Operations.implementation_for(ops.DropColumnOp) +def drop_column( + operations: "Operations", operation: "ops.DropColumnOp" +) -> None: + column = operation.to_column(operations.migration_context) + operations.impl.drop_column( + operation.table_name, + column, + schema=operation.schema, + if_exists=operation.if_exists, + **operation.kw, + ) + + +@Operations.implementation_for(ops.CreateIndexOp) +def create_index( + operations: "Operations", operation: "ops.CreateIndexOp" +) -> None: + idx = operation.to_index(operations.migration_context) + kw = {} + if operation.if_not_exists is not None: + kw["if_not_exists"] = operation.if_not_exists + operations.impl.create_index(idx, **kw) + + +@Operations.implementation_for(ops.DropIndexOp) +def drop_index(operations: "Operations", operation: "ops.DropIndexOp") -> None: + kw = {} + if operation.if_exists is not None: + kw["if_exists"] = operation.if_exists + + operations.impl.drop_index( + operation.to_index(operations.migration_context), + **kw, + ) + + +@Operations.implementation_for(ops.CreateTableOp) +def create_table( + operations: "Operations", operation: "ops.CreateTableOp" +) -> "Table": + kw = {} + if operation.if_not_exists is not None: + kw["if_not_exists"] = operation.if_not_exists + table = operation.to_table(operations.migration_context) + operations.impl.create_table(table, **kw) + return table + + +@Operations.implementation_for(ops.RenameTableOp) +def rename_table( + operations: "Operations", operation: "ops.RenameTableOp" +) -> None: + operations.impl.rename_table( + operation.table_name, operation.new_table_name, schema=operation.schema + ) + + +@Operations.implementation_for(ops.CreateTableCommentOp) +def create_table_comment( + operations: "Operations", operation: "ops.CreateTableCommentOp" +) -> None: + table = operation.to_table(operations.migration_context) + operations.impl.create_table_comment(table) + + +@Operations.implementation_for(ops.DropTableCommentOp) +def drop_table_comment( + operations: "Operations", operation: "ops.DropTableCommentOp" +) -> None: + table = operation.to_table(operations.migration_context) + operations.impl.drop_table_comment(table) + + +@Operations.implementation_for(ops.AddColumnOp) +def add_column(operations: "Operations", operation: "ops.AddColumnOp") -> None: + table_name = operation.table_name + column = operation.column + schema = operation.schema + kw = operation.kw + + if column.table is not None: + column = _copy(column) + + t = operations.schema_obj.table(table_name, column, schema=schema) + operations.impl.add_column( + table_name, + column, + schema=schema, + if_not_exists=operation.if_not_exists, + **kw, + ) + + for constraint in t.constraints: + if not isinstance(constraint, sa_schema.PrimaryKeyConstraint): + operations.impl.add_constraint(constraint) + for index in t.indexes: + operations.impl.create_index(index) + + with_comment = ( + operations.impl.dialect.supports_comments + and not operations.impl.dialect.inline_comments + ) + comment = column.comment + if comment and with_comment: + operations.impl.create_column_comment(column) + + +@Operations.implementation_for(ops.AddConstraintOp) +def create_constraint( + operations: "Operations", operation: "ops.AddConstraintOp" +) -> None: + operations.impl.add_constraint( + operation.to_constraint(operations.migration_context) + ) + + +@Operations.implementation_for(ops.DropConstraintOp) +def drop_constraint( + operations: "Operations", operation: "ops.DropConstraintOp" +) -> None: + kw = {} + if operation.if_exists is not None: + if not sqla_2: + raise NotImplementedError("SQLAlchemy 2.0 required") + kw["if_exists"] = operation.if_exists + operations.impl.drop_constraint( + operations.schema_obj.generic_constraint( + operation.constraint_name, + operation.table_name, + operation.constraint_type, + schema=operation.schema, + ), + **kw, + ) + + +@Operations.implementation_for(ops.BulkInsertOp) +def bulk_insert( + operations: "Operations", operation: "ops.BulkInsertOp" +) -> None: + operations.impl.bulk_insert( # type: ignore[union-attr] + operation.table, operation.rows, multiinsert=operation.multiinsert + ) + + +@Operations.implementation_for(ops.ExecuteSQLOp) +def execute_sql( + operations: "Operations", operation: "ops.ExecuteSQLOp" +) -> None: + operations.migration_context.impl.execute( + operation.sqltext, execution_options=operation.execution_options + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/py.typed b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/environment.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..80ca2b6ca3188bb4f0c8c854da14470618ef5518 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/environment.py @@ -0,0 +1,1051 @@ +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Collection +from typing import Dict +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy.sql.schema import Column +from sqlalchemy.sql.schema import FetchedValue +from typing_extensions import ContextManager +from typing_extensions import Literal + +from .migration import _ProxyTransaction +from .migration import MigrationContext +from .. import util +from ..operations import Operations +from ..script.revision import _GetRevArg + +if TYPE_CHECKING: + from sqlalchemy.engine import URL + from sqlalchemy.engine.base import Connection + from sqlalchemy.sql import Executable + from sqlalchemy.sql.schema import MetaData + from sqlalchemy.sql.schema import SchemaItem + from sqlalchemy.sql.type_api import TypeEngine + + from .migration import MigrationInfo + from ..autogenerate.api import AutogenContext + from ..config import Config + from ..ddl import DefaultImpl + from ..operations.ops import MigrationScript + from ..script.base import ScriptDirectory + +_RevNumber = Optional[Union[str, Tuple[str, ...]]] + +ProcessRevisionDirectiveFn = Callable[ + [MigrationContext, _GetRevArg, List["MigrationScript"]], None +] + +RenderItemFn = Callable[ + [str, Any, "AutogenContext"], Union[str, Literal[False]] +] + +NameFilterType = Literal[ + "schema", + "table", + "column", + "index", + "unique_constraint", + "foreign_key_constraint", +] +NameFilterParentNames = MutableMapping[ + Literal["schema_name", "table_name", "schema_qualified_table_name"], + Optional[str], +] +IncludeNameFn = Callable[ + [Optional[str], NameFilterType, NameFilterParentNames], bool +] + +IncludeObjectFn = Callable[ + [ + "SchemaItem", + Optional[str], + NameFilterType, + bool, + Optional["SchemaItem"], + ], + bool, +] + +OnVersionApplyFn = Callable[ + [MigrationContext, "MigrationInfo", Collection[Any], Mapping[str, Any]], + None, +] + +CompareServerDefault = Callable[ + [ + MigrationContext, + "Column[Any]", + "Column[Any]", + Optional[str], + Optional[FetchedValue], + Optional[str], + ], + Optional[bool], +] + +CompareType = Callable[ + [ + MigrationContext, + "Column[Any]", + "Column[Any]", + "TypeEngine[Any]", + "TypeEngine[Any]", + ], + Optional[bool], +] + + +class EnvironmentContext(util.ModuleClsProxy): + """A configurational facade made available in an ``env.py`` script. + + The :class:`.EnvironmentContext` acts as a *facade* to the more + nuts-and-bolts objects of :class:`.MigrationContext` as well as certain + aspects of :class:`.Config`, + within the context of the ``env.py`` script that is invoked by + most Alembic commands. + + :class:`.EnvironmentContext` is normally instantiated + when a command in :mod:`alembic.command` is run. It then makes + itself available in the ``alembic.context`` module for the scope + of the command. From within an ``env.py`` script, the current + :class:`.EnvironmentContext` is available by importing this module. + + :class:`.EnvironmentContext` also supports programmatic usage. + At this level, it acts as a Python context manager, that is, is + intended to be used using the + ``with:`` statement. A typical use of :class:`.EnvironmentContext`:: + + from alembic.config import Config + from alembic.script import ScriptDirectory + + config = Config() + config.set_main_option("script_location", "myapp:migrations") + script = ScriptDirectory.from_config(config) + + + def my_function(rev, context): + '''do something with revision "rev", which + will be the current database revision, + and "context", which is the MigrationContext + that the env.py will create''' + + + with EnvironmentContext( + config, + script, + fn=my_function, + as_sql=False, + starting_rev="base", + destination_rev="head", + tag="sometag", + ): + script.run_env() + + The above script will invoke the ``env.py`` script + within the migration environment. If and when ``env.py`` + calls :meth:`.MigrationContext.run_migrations`, the + ``my_function()`` function above will be called + by the :class:`.MigrationContext`, given the context + itself as well as the current revision in the database. + + .. note:: + + For most API usages other than full blown + invocation of migration scripts, the :class:`.MigrationContext` + and :class:`.ScriptDirectory` objects can be created and + used directly. The :class:`.EnvironmentContext` object + is *only* needed when you need to actually invoke the + ``env.py`` module present in the migration environment. + + """ + + _migration_context: Optional[MigrationContext] = None + + config: Config = None # type:ignore[assignment] + """An instance of :class:`.Config` representing the + configuration file contents as well as other variables + set programmatically within it.""" + + script: ScriptDirectory = None # type:ignore[assignment] + """An instance of :class:`.ScriptDirectory` which provides + programmatic access to version files within the ``versions/`` + directory. + + """ + + def __init__( + self, config: Config, script: ScriptDirectory, **kw: Any + ) -> None: + r"""Construct a new :class:`.EnvironmentContext`. + + :param config: a :class:`.Config` instance. + :param script: a :class:`.ScriptDirectory` instance. + :param \**kw: keyword options that will be ultimately + passed along to the :class:`.MigrationContext` when + :meth:`.EnvironmentContext.configure` is called. + + """ + self.config = config + self.script = script + self.context_opts = kw + + def __enter__(self) -> EnvironmentContext: + """Establish a context which provides a + :class:`.EnvironmentContext` object to + env.py scripts. + + The :class:`.EnvironmentContext` will + be made available as ``from alembic import context``. + + """ + self._install_proxy() + return self + + def __exit__(self, *arg: Any, **kw: Any) -> None: + self._remove_proxy() + + def is_offline_mode(self) -> bool: + """Return True if the current migrations environment + is running in "offline mode". + + This is ``True`` or ``False`` depending + on the ``--sql`` flag passed. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + return self.context_opts.get("as_sql", False) # type: ignore[no-any-return] # noqa: E501 + + def is_transactional_ddl(self) -> bool: + """Return True if the context is configured to expect a + transactional DDL capable backend. + + This defaults to the type of database in use, and + can be overridden by the ``transactional_ddl`` argument + to :meth:`.configure` + + This function requires that a :class:`.MigrationContext` + has first been made available via :meth:`.configure`. + + """ + return self.get_context().impl.transactional_ddl + + def requires_connection(self) -> bool: + return not self.is_offline_mode() + + def get_head_revision(self) -> _RevNumber: + """Return the hex identifier of the 'head' script revision. + + If the script directory has multiple heads, this + method raises a :class:`.CommandError`; + :meth:`.EnvironmentContext.get_head_revisions` should be preferred. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` + + """ + return self.script.as_revision_number("head") + + def get_head_revisions(self) -> _RevNumber: + """Return the hex identifier of the 'heads' script revision(s). + + This returns a tuple containing the version number of all + heads in the script directory. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + return self.script.as_revision_number("heads") + + def get_starting_revision_argument(self) -> _RevNumber: + """Return the 'starting revision' argument, + if the revision was passed using ``start:end``. + + This is only meaningful in "offline" mode. + Returns ``None`` if no value is available + or was configured. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + if self._migration_context is not None: + return self.script.as_revision_number( + self.get_context()._start_from_rev + ) + elif "starting_rev" in self.context_opts: + return self.script.as_revision_number( + self.context_opts["starting_rev"] + ) + else: + # this should raise only in the case that a command + # is being run where the "starting rev" is never applicable; + # this is to catch scripts which rely upon this in + # non-sql mode or similar + raise util.CommandError( + "No starting revision argument is available." + ) + + def get_revision_argument(self) -> _RevNumber: + """Get the 'destination' revision argument. + + This is typically the argument passed to the + ``upgrade`` or ``downgrade`` command. + + If it was specified as ``head``, the actual + version number is returned; if specified + as ``base``, ``None`` is returned. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + """ + return self.script.as_revision_number( + self.context_opts["destination_rev"] + ) + + def get_tag_argument(self) -> Optional[str]: + """Return the value passed for the ``--tag`` argument, if any. + + The ``--tag`` argument is not used directly by Alembic, + but is available for custom ``env.py`` configurations that + wish to use it; particularly for offline generation scripts + that wish to generate tagged filenames. + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: + + :meth:`.EnvironmentContext.get_x_argument` - a newer and more + open ended system of extending ``env.py`` scripts via the command + line. + + """ + return self.context_opts.get("tag", None) + + @overload + def get_x_argument(self, as_dictionary: Literal[False]) -> List[str]: ... + + @overload + def get_x_argument( + self, as_dictionary: Literal[True] + ) -> Dict[str, str]: ... + + @overload + def get_x_argument( + self, as_dictionary: bool = ... + ) -> Union[List[str], Dict[str, str]]: ... + + def get_x_argument( + self, as_dictionary: bool = False + ) -> Union[List[str], Dict[str, str]]: + """Return the value(s) passed for the ``-x`` argument, if any. + + The ``-x`` argument is an open ended flag that allows any user-defined + value or values to be passed on the command line, then available + here for consumption by a custom ``env.py`` script. + + The return value is a list, returned directly from the ``argparse`` + structure. If ``as_dictionary=True`` is passed, the ``x`` arguments + are parsed using ``key=value`` format into a dictionary that is + then returned. If there is no ``=`` in the argument, value is an empty + string. + + .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when + arguments are passed without the ``=`` symbol. + + For example, to support passing a database URL on the command line, + the standard ``env.py`` script can be modified like this:: + + cmd_line_url = context.get_x_argument( + as_dictionary=True).get('dbname') + if cmd_line_url: + engine = create_engine(cmd_line_url) + else: + engine = engine_from_config( + config.get_section(config.config_ini_section), + prefix='sqlalchemy.', + poolclass=pool.NullPool) + + This then takes effect by running the ``alembic`` script as:: + + alembic -x dbname=postgresql://user:pass@host/dbname upgrade head + + This function does not require that the :class:`.MigrationContext` + has been configured. + + .. seealso:: + + :meth:`.EnvironmentContext.get_tag_argument` + + :attr:`.Config.cmd_opts` + + """ + if self.config.cmd_opts is not None: + value = self.config.cmd_opts.x or [] + else: + value = [] + if as_dictionary: + dict_value = {} + for arg in value: + x_key, _, x_value = arg.partition("=") + dict_value[x_key] = x_value + value = dict_value + + return value + + def configure( + self, + connection: Optional[Connection] = None, + url: Optional[Union[str, URL]] = None, + dialect_name: Optional[str] = None, + dialect_opts: Optional[Dict[str, Any]] = None, + transactional_ddl: Optional[bool] = None, + transaction_per_migration: bool = False, + output_buffer: Optional[TextIO] = None, + starting_rev: Optional[str] = None, + tag: Optional[str] = None, + template_args: Optional[Dict[str, Any]] = None, + render_as_batch: bool = False, + target_metadata: Union[MetaData, Sequence[MetaData], None] = None, + include_name: Optional[IncludeNameFn] = None, + include_object: Optional[IncludeObjectFn] = None, + include_schemas: bool = False, + process_revision_directives: Optional[ + ProcessRevisionDirectiveFn + ] = None, + compare_type: Union[bool, CompareType] = True, + compare_server_default: Union[bool, CompareServerDefault] = False, + render_item: Optional[RenderItemFn] = None, + literal_binds: bool = False, + upgrade_token: str = "upgrades", + downgrade_token: str = "downgrades", + alembic_module_prefix: str = "op.", + sqlalchemy_module_prefix: str = "sa.", + user_module_prefix: Optional[str] = None, + on_version_apply: Optional[OnVersionApplyFn] = None, + **kw: Any, + ) -> None: + """Configure a :class:`.MigrationContext` within this + :class:`.EnvironmentContext` which will provide database + connectivity and other configuration to a series of + migration scripts. + + Many methods on :class:`.EnvironmentContext` require that + this method has been called in order to function, as they + ultimately need to have database access or at least access + to the dialect in use. Those which do are documented as such. + + The important thing needed by :meth:`.configure` is a + means to determine what kind of database dialect is in use. + An actual connection to that database is needed only if + the :class:`.MigrationContext` is to be used in + "online" mode. + + If the :meth:`.is_offline_mode` function returns ``True``, + then no connection is needed here. Otherwise, the + ``connection`` parameter should be present as an + instance of :class:`sqlalchemy.engine.Connection`. + + This function is typically called from the ``env.py`` + script within a migration environment. It can be called + multiple times for an invocation. The most recent + :class:`~sqlalchemy.engine.Connection` + for which it was called is the one that will be operated upon + by the next call to :meth:`.run_migrations`. + + General parameters: + + :param connection: a :class:`~sqlalchemy.engine.Connection` + to use + for SQL execution in "online" mode. When present, is also + used to determine the type of dialect in use. + :param url: a string database url, or a + :class:`sqlalchemy.engine.url.URL` object. + The type of dialect to be used will be derived from this if + ``connection`` is not passed. + :param dialect_name: string name of a dialect, such as + "postgresql", "mssql", etc. + The type of dialect to be used will be derived from this if + ``connection`` and ``url`` are not passed. + :param dialect_opts: dictionary of options to be passed to dialect + constructor. + :param transactional_ddl: Force the usage of "transactional" + DDL on or off; + this otherwise defaults to whether or not the dialect in + use supports it. + :param transaction_per_migration: if True, nest each migration script + in a transaction rather than the full series of migrations to + run. + :param output_buffer: a file-like object that will be used + for textual output + when the ``--sql`` option is used to generate SQL scripts. + Defaults to + ``sys.stdout`` if not passed here and also not present on + the :class:`.Config` + object. The value here overrides that of the :class:`.Config` + object. + :param output_encoding: when using ``--sql`` to generate SQL + scripts, apply this encoding to the string output. + :param literal_binds: when using ``--sql`` to generate SQL + scripts, pass through the ``literal_binds`` flag to the compiler + so that any literal values that would ordinarily be bound + parameters are converted to plain strings. + + .. warning:: Dialects can typically only handle simple datatypes + like strings and numbers for auto-literal generation. Datatypes + like dates, intervals, and others may still require manual + formatting, typically using :meth:`.Operations.inline_literal`. + + .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy + versions prior to 0.8 where this feature is not supported. + + .. seealso:: + + :meth:`.Operations.inline_literal` + + :param starting_rev: Override the "starting revision" argument + when using ``--sql`` mode. + :param tag: a string tag for usage by custom ``env.py`` scripts. + Set via the ``--tag`` option, can be overridden here. + :param template_args: dictionary of template arguments which + will be added to the template argument environment when + running the "revision" command. Note that the script environment + is only run within the "revision" command if the --autogenerate + option is used, or if the option "revision_environment=true" + is present in the alembic.ini file. + + :param version_table: The name of the Alembic version table. + The default is ``'alembic_version'``. + :param version_table_schema: Optional schema to place version + table within. + :param version_table_pk: boolean, whether the Alembic version table + should use a primary key constraint for the "value" column; this + only takes effect when the table is first created. + Defaults to True; setting to False should not be necessary and is + here for backwards compatibility reasons. + :param on_version_apply: a callable or collection of callables to be + run for each migration step. + The callables will be run in the order they are given, once for + each migration step, after the respective operation has been + applied but before its transaction is finalized. + Each callable accepts no positional arguments and the following + keyword arguments: + + * ``ctx``: the :class:`.MigrationContext` running the migration, + * ``step``: a :class:`.MigrationInfo` representing the + step currently being applied, + * ``heads``: a collection of version strings representing the + current heads, + * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. + + Parameters specific to the autogenerate feature, when + ``alembic revision`` is run with the ``--autogenerate`` feature: + + :param target_metadata: a :class:`sqlalchemy.schema.MetaData` + object, or a sequence of :class:`~sqlalchemy.schema.MetaData` + objects, that will be consulted during autogeneration. + The tables present in each :class:`~sqlalchemy.schema.MetaData` + will be compared against + what is locally available on the target + :class:`~sqlalchemy.engine.Connection` + to produce candidate upgrade/downgrade operations. + :param compare_type: Indicates type comparison behavior during + an autogenerate + operation. Defaults to ``True`` turning on type comparison, which + has good accuracy on most backends. See :ref:`compare_types` + for an example as well as information on other type + comparison options. Set to ``False`` which disables type + comparison. A callable can also be passed to provide custom type + comparison, see :ref:`compare_types` for additional details. + + .. versionchanged:: 1.12.0 The default value of + :paramref:`.EnvironmentContext.configure.compare_type` has been + changed to ``True``. + + .. seealso:: + + :ref:`compare_types` + + :paramref:`.EnvironmentContext.configure.compare_server_default` + + :param compare_server_default: Indicates server default comparison + behavior during + an autogenerate operation. Defaults to ``False`` which disables + server default + comparison. Set to ``True`` to turn on server default comparison, + which has + varied accuracy depending on backend. + + To customize server default comparison behavior, a callable may + be specified + which can filter server default comparisons during an + autogenerate operation. + defaults during an autogenerate operation. The format of this + callable is:: + + def my_compare_server_default(context, inspected_column, + metadata_column, inspected_default, metadata_default, + rendered_metadata_default): + # return True if the defaults are different, + # False if not, or None to allow the default implementation + # to compare these defaults + return None + + context.configure( + # ... + compare_server_default = my_compare_server_default + ) + + ``inspected_column`` is a dictionary structure as returned by + :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas + ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from + the local model environment. + + A return value of ``None`` indicates to allow default server default + comparison + to proceed. Note that some backends such as Postgresql actually + execute + the two defaults on the database side to compare for equivalence. + + .. seealso:: + + :paramref:`.EnvironmentContext.configure.compare_type` + + :param include_name: A callable function which is given + the chance to return ``True`` or ``False`` for any database reflected + object based on its name, including database schema names when + the :paramref:`.EnvironmentContext.configure.include_schemas` flag + is set to ``True``. + + The function accepts the following positional arguments: + + * ``name``: the name of the object, such as schema name or table name. + Will be ``None`` when indicating the default schema name of the + database connection. + * ``type``: a string describing the type of object; currently + ``"schema"``, ``"table"``, ``"column"``, ``"index"``, + ``"unique_constraint"``, or ``"foreign_key_constraint"`` + * ``parent_names``: a dictionary of "parent" object names, that are + relative to the name being given. Keys in this dictionary may + include: ``"schema_name"``, ``"table_name"`` or + ``"schema_qualified_table_name"``. + + E.g.:: + + def include_name(name, type_, parent_names): + if type_ == "schema": + return name in ["schema_one", "schema_two"] + else: + return True + + context.configure( + # ... + include_schemas = True, + include_name = include_name + ) + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_object` + + :paramref:`.EnvironmentContext.configure.include_schemas` + + + :param include_object: A callable function which is given + the chance to return ``True`` or ``False`` for any object, + indicating if the given object should be considered in the + autogenerate sweep. + + The function accepts the following positional arguments: + + * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such + as a :class:`~sqlalchemy.schema.Table`, + :class:`~sqlalchemy.schema.Column`, + :class:`~sqlalchemy.schema.Index` + :class:`~sqlalchemy.schema.UniqueConstraint`, + or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object + * ``name``: the name of the object. This is typically available + via ``object.name``. + * ``type``: a string describing the type of object; currently + ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, + or ``"foreign_key_constraint"`` + * ``reflected``: ``True`` if the given object was produced based on + table reflection, ``False`` if it's from a local :class:`.MetaData` + object. + * ``compare_to``: the object being compared against, if available, + else ``None``. + + E.g.:: + + def include_object(object, name, type_, reflected, compare_to): + if (type_ == "column" and + not reflected and + object.info.get("skip_autogenerate", False)): + return False + else: + return True + + context.configure( + # ... + include_object = include_object + ) + + For the use case of omitting specific schemas from a target database + when :paramref:`.EnvironmentContext.configure.include_schemas` is + set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema` + attribute can be checked for each :class:`~sqlalchemy.schema.Table` + object passed to the hook, however it is much more efficient + to filter on schemas before reflection of objects takes place + using the :paramref:`.EnvironmentContext.configure.include_name` + hook. + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_name` + + :paramref:`.EnvironmentContext.configure.include_schemas` + + :param render_as_batch: if True, commands which alter elements + within a table will be placed under a ``with batch_alter_table():`` + directive, so that batch migrations will take place. + + .. seealso:: + + :ref:`batch_migrations` + + :param include_schemas: If True, autogenerate will scan across + all schemas located by the SQLAlchemy + :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` + method, and include all differences in tables found across all + those schemas. When using this option, you may want to also + use the :paramref:`.EnvironmentContext.configure.include_name` + parameter to specify a callable which + can filter the tables/schemas that get included. + + .. seealso:: + + :ref:`autogenerate_include_hooks` + + :paramref:`.EnvironmentContext.configure.include_name` + + :paramref:`.EnvironmentContext.configure.include_object` + + :param render_item: Callable that can be used to override how + any schema item, i.e. column, constraint, type, + etc., is rendered for autogenerate. The callable receives a + string describing the type of object, the object, and + the autogen context. If it returns False, the + default rendering method will be used. If it returns None, + the item will not be rendered in the context of a Table + construct, that is, can be used to skip columns or constraints + within op.create_table():: + + def my_render_column(type_, col, autogen_context): + if type_ == "column" and isinstance(col, MySpecialCol): + return repr(col) + else: + return False + + context.configure( + # ... + render_item = my_render_column + ) + + Available values for the type string include: ``"column"``, + ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, + ``"type"``, ``"server_default"``. + + .. seealso:: + + :ref:`autogen_render_types` + + :param upgrade_token: When autogenerate completes, the text of the + candidate upgrade operations will be present in this template + variable when ``script.py.mako`` is rendered. Defaults to + ``upgrades``. + :param downgrade_token: When autogenerate completes, the text of the + candidate downgrade operations will be present in this + template variable when ``script.py.mako`` is rendered. Defaults to + ``downgrades``. + + :param alembic_module_prefix: When autogenerate refers to Alembic + :mod:`alembic.operations` constructs, this prefix will be used + (i.e. ``op.create_table``) Defaults to "``op.``". + Can be ``None`` to indicate no prefix. + + :param sqlalchemy_module_prefix: When autogenerate refers to + SQLAlchemy + :class:`~sqlalchemy.schema.Column` or type classes, this prefix + will be used + (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". + Can be ``None`` to indicate no prefix. + Note that when dialect-specific types are rendered, autogenerate + will render them using the dialect module name, i.e. ``mssql.BIT()``, + ``postgresql.UUID()``. + + :param user_module_prefix: When autogenerate refers to a SQLAlchemy + type (e.g. :class:`.TypeEngine`) where the module name is not + under the ``sqlalchemy`` namespace, this prefix will be used + within autogenerate. If left at its default of + ``None``, the ``__module__`` attribute of the type is used to + render the import module. It's a good practice to set this + and to have all custom types be available from a fixed module space, + in order to future-proof migration files against reorganizations + in modules. + + .. seealso:: + + :ref:`autogen_module_prefix` + + :param process_revision_directives: a callable function that will + be passed a structure representing the end result of an autogenerate + or plain "revision" operation, which can be manipulated to affect + how the ``alembic revision`` command ultimately outputs new + revision scripts. The structure of the callable is:: + + def process_revision_directives(context, revision, directives): + pass + + The ``directives`` parameter is a Python list containing + a single :class:`.MigrationScript` directive, which represents + the revision file to be generated. This list as well as its + contents may be freely modified to produce any set of commands. + The section :ref:`customizing_revision` shows an example of + doing this. The ``context`` parameter is the + :class:`.MigrationContext` in use, + and ``revision`` is a tuple of revision identifiers representing the + current revision of the database. + + The callable is invoked at all times when the ``--autogenerate`` + option is passed to ``alembic revision``. If ``--autogenerate`` + is not passed, the callable is invoked only if the + ``revision_environment`` variable is set to True in the Alembic + configuration, in which case the given ``directives`` collection + will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` + collections for ``.upgrade_ops`` and ``.downgrade_ops``. The + ``--autogenerate`` option itself can be inferred by inspecting + ``context.config.cmd_opts.autogenerate``. + + The callable function may optionally be an instance of + a :class:`.Rewriter` object. This is a helper object that + assists in the production of autogenerate-stream rewriter functions. + + .. seealso:: + + :ref:`customizing_revision` + + :ref:`autogen_rewriter` + + :paramref:`.command.revision.process_revision_directives` + + Parameters specific to individual backends: + + :param mssql_batch_separator: The "batch separator" which will + be placed between each statement when generating offline SQL Server + migrations. Defaults to ``GO``. Note this is in addition to the + customary semicolon ``;`` at the end of each statement; SQL Server + considers the "batch separator" to denote the end of an + individual statement execution, and cannot group certain + dependent operations in one step. + :param oracle_batch_separator: The "batch separator" which will + be placed between each statement when generating offline + Oracle migrations. Defaults to ``/``. Oracle doesn't add a + semicolon between statements like most other backends. + + """ + opts = self.context_opts + if transactional_ddl is not None: + opts["transactional_ddl"] = transactional_ddl + if output_buffer is not None: + opts["output_buffer"] = output_buffer + elif self.config.output_buffer is not None: + opts["output_buffer"] = self.config.output_buffer + if starting_rev: + opts["starting_rev"] = starting_rev + if tag: + opts["tag"] = tag + if template_args and "template_args" in opts: + opts["template_args"].update(template_args) + opts["transaction_per_migration"] = transaction_per_migration + opts["target_metadata"] = target_metadata + opts["include_name"] = include_name + opts["include_object"] = include_object + opts["include_schemas"] = include_schemas + opts["render_as_batch"] = render_as_batch + opts["upgrade_token"] = upgrade_token + opts["downgrade_token"] = downgrade_token + opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix + opts["alembic_module_prefix"] = alembic_module_prefix + opts["user_module_prefix"] = user_module_prefix + opts["literal_binds"] = literal_binds + opts["process_revision_directives"] = process_revision_directives + opts["on_version_apply"] = util.to_tuple(on_version_apply, default=()) + + if render_item is not None: + opts["render_item"] = render_item + opts["compare_type"] = compare_type + if compare_server_default is not None: + opts["compare_server_default"] = compare_server_default + opts["script"] = self.script + + opts.update(kw) + + self._migration_context = MigrationContext.configure( + connection=connection, + url=url, + dialect_name=dialect_name, + environment_context=self, + dialect_opts=dialect_opts, + opts=opts, + ) + + def run_migrations(self, **kw: Any) -> None: + """Run migrations as determined by the current command line + configuration + as well as versioning information present (or not) in the current + database connection (if one is present). + + The function accepts optional ``**kw`` arguments. If these are + passed, they are sent directly to the ``upgrade()`` and + ``downgrade()`` + functions within each target revision file. By modifying the + ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` + functions accept arguments, parameters can be passed here so that + contextual information, usually information to identify a particular + database in use, can be passed from a custom ``env.py`` script + to the migration functions. + + This function requires that a :class:`.MigrationContext` has + first been made available via :meth:`.configure`. + + """ + assert self._migration_context is not None + with Operations.context(self._migration_context): + self.get_context().run_migrations(**kw) + + def execute( + self, + sql: Union[Executable, str], + execution_options: Optional[Dict[str, Any]] = None, + ) -> None: + """Execute the given SQL using the current change context. + + The behavior of :meth:`.execute` is the same + as that of :meth:`.Operations.execute`. Please see that + function's documentation for full detail including + caveats and limitations. + + This function requires that a :class:`.MigrationContext` has + first been made available via :meth:`.configure`. + + """ + self.get_context().execute(sql, execution_options=execution_options) + + def static_output(self, text: str) -> None: + """Emit text directly to the "offline" SQL stream. + + Typically this is for emitting comments that + start with --. The statement is not treated + as a SQL execution, no ; or batch separator + is added, etc. + + """ + self.get_context().impl.static_output(text) + + def begin_transaction( + self, + ) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]: + """Return a context manager that will + enclose an operation within a "transaction", + as defined by the environment's offline + and transactional DDL settings. + + e.g.:: + + with context.begin_transaction(): + context.run_migrations() + + :meth:`.begin_transaction` is intended to + "do the right thing" regardless of + calling context: + + * If :meth:`.is_transactional_ddl` is ``False``, + returns a "do nothing" context manager + which otherwise produces no transactional + state or directives. + * If :meth:`.is_offline_mode` is ``True``, + returns a context manager that will + invoke the :meth:`.DefaultImpl.emit_begin` + and :meth:`.DefaultImpl.emit_commit` + methods, which will produce the string + directives ``BEGIN`` and ``COMMIT`` on + the output stream, as rendered by the + target backend (e.g. SQL Server would + emit ``BEGIN TRANSACTION``). + * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` + on the current online connection, which + returns a :class:`sqlalchemy.engine.Transaction` + object. This object demarcates a real + transaction and is itself a context manager, + which will roll back if an exception + is raised. + + Note that a custom ``env.py`` script which + has more specific transactional needs can of course + manipulate the :class:`~sqlalchemy.engine.Connection` + directly to produce transactional state in "online" + mode. + + """ + + return self.get_context().begin_transaction() + + def get_context(self) -> MigrationContext: + """Return the current :class:`.MigrationContext` object. + + If :meth:`.EnvironmentContext.configure` has not been + called yet, raises an exception. + + """ + + if self._migration_context is None: + raise Exception("No context has been configured yet.") + return self._migration_context + + def get_bind(self) -> Connection: + """Return the current 'bind'. + + In "online" mode, this is the + :class:`sqlalchemy.engine.Connection` currently being used + to emit SQL to the database. + + This function requires that a :class:`.MigrationContext` + has first been made available via :meth:`.configure`. + + """ + return self.get_context().bind # type: ignore[return-value] + + def get_impl(self) -> DefaultImpl: + return self.get_context().impl diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/migration.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/migration.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c7b0fc5da6efa920bdeac86bc48cf4803b627a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/runtime/migration.py @@ -0,0 +1,1395 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +from contextlib import contextmanager +from contextlib import nullcontext +import logging +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from sqlalchemy import Column +from sqlalchemy import literal_column +from sqlalchemy import select +from sqlalchemy.engine import Engine +from sqlalchemy.engine import url as sqla_url +from sqlalchemy.engine.strategies import MockEngineStrategy +from typing_extensions import ContextManager + +from .. import ddl +from .. import util +from ..util import sqla_compat +from ..util.compat import EncodedIO + +if TYPE_CHECKING: + from sqlalchemy.engine import Dialect + from sqlalchemy.engine import URL + from sqlalchemy.engine.base import Connection + from sqlalchemy.engine.base import Transaction + from sqlalchemy.engine.mock import MockConnection + from sqlalchemy.sql import Executable + + from .environment import EnvironmentContext + from ..config import Config + from ..script.base import Script + from ..script.base import ScriptDirectory + from ..script.revision import _RevisionOrBase + from ..script.revision import Revision + from ..script.revision import RevisionMap + +log = logging.getLogger(__name__) + + +class _ProxyTransaction: + def __init__(self, migration_context: MigrationContext) -> None: + self.migration_context = migration_context + + @property + def _proxied_transaction(self) -> Optional[Transaction]: + return self.migration_context._transaction + + def rollback(self) -> None: + t = self._proxied_transaction + assert t is not None + t.rollback() + self.migration_context._transaction = None + + def commit(self) -> None: + t = self._proxied_transaction + assert t is not None + t.commit() + self.migration_context._transaction = None + + def __enter__(self) -> _ProxyTransaction: + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + if self._proxied_transaction is not None: + self._proxied_transaction.__exit__(type_, value, traceback) + self.migration_context._transaction = None + + +class MigrationContext: + """Represent the database state made available to a migration + script. + + :class:`.MigrationContext` is the front end to an actual + database connection, or alternatively a string output + stream given a particular database dialect, + from an Alembic perspective. + + When inside the ``env.py`` script, the :class:`.MigrationContext` + is available via the + :meth:`.EnvironmentContext.get_context` method, + which is available at ``alembic.context``:: + + # from within env.py script + from alembic import context + + migration_context = context.get_context() + + For usage outside of an ``env.py`` script, such as for + utility routines that want to check the current version + in the database, the :meth:`.MigrationContext.configure` + method to create new :class:`.MigrationContext` objects. + For example, to get at the current revision in the + database using :meth:`.MigrationContext.get_current_revision`:: + + # in any application, outside of an env.py script + from alembic.migration import MigrationContext + from sqlalchemy import create_engine + + engine = create_engine("postgresql://mydatabase") + conn = engine.connect() + + context = MigrationContext.configure(conn) + current_rev = context.get_current_revision() + + The above context can also be used to produce + Alembic migration operations with an :class:`.Operations` + instance:: + + # in any application, outside of the normal Alembic environment + from alembic.operations import Operations + + op = Operations(context) + op.alter_column("mytable", "somecolumn", nullable=True) + + """ + + def __init__( + self, + dialect: Dialect, + connection: Optional[Connection], + opts: Dict[str, Any], + environment_context: Optional[EnvironmentContext] = None, + ) -> None: + self.environment_context = environment_context + self.opts = opts + self.dialect = dialect + self.script: Optional[ScriptDirectory] = opts.get("script") + as_sql: bool = opts.get("as_sql", False) + transactional_ddl = opts.get("transactional_ddl") + self._transaction_per_migration = opts.get( + "transaction_per_migration", False + ) + self.on_version_apply_callbacks = opts.get("on_version_apply", ()) + self._transaction: Optional[Transaction] = None + + if as_sql: + self.connection = cast( + Optional["Connection"], self._stdout_connection(connection) + ) + assert self.connection is not None + self._in_external_transaction = False + else: + self.connection = connection + self._in_external_transaction = ( + sqla_compat._get_connection_in_transaction(connection) + ) + + self._migrations_fn: Optional[ + Callable[..., Iterable[RevisionStep]] + ] = opts.get("fn") + self.as_sql = as_sql + + self.purge = opts.get("purge", False) + + if "output_encoding" in opts: + self.output_buffer = EncodedIO( + opts.get("output_buffer") + or sys.stdout, # type:ignore[arg-type] + opts["output_encoding"], + ) + else: + self.output_buffer = opts.get( + "output_buffer", sys.stdout + ) # type:ignore[assignment] # noqa: E501 + + self.transactional_ddl = transactional_ddl + + self._user_compare_type = opts.get("compare_type", True) + self._user_compare_server_default = opts.get( + "compare_server_default", False + ) + self.version_table = version_table = opts.get( + "version_table", "alembic_version" + ) + self.version_table_schema = version_table_schema = opts.get( + "version_table_schema", None + ) + + self._start_from_rev: Optional[str] = opts.get("starting_rev") + self.impl = ddl.DefaultImpl.get_by_dialect(dialect)( + dialect, + self.connection, + self.as_sql, + transactional_ddl, + self.output_buffer, + opts, + ) + + self._version = self.impl.version_table_impl( + version_table=version_table, + version_table_schema=version_table_schema, + version_table_pk=opts.get("version_table_pk", True), + ) + + log.info("Context impl %s.", self.impl.__class__.__name__) + if self.as_sql: + log.info("Generating static SQL") + log.info( + "Will assume %s DDL.", + ( + "transactional" + if self.impl.transactional_ddl + else "non-transactional" + ), + ) + + @classmethod + def configure( + cls, + connection: Optional[Connection] = None, + url: Optional[Union[str, URL]] = None, + dialect_name: Optional[str] = None, + dialect: Optional[Dialect] = None, + environment_context: Optional[EnvironmentContext] = None, + dialect_opts: Optional[Dict[str, str]] = None, + opts: Optional[Any] = None, + ) -> MigrationContext: + """Create a new :class:`.MigrationContext`. + + This is a factory method usually called + by :meth:`.EnvironmentContext.configure`. + + :param connection: a :class:`~sqlalchemy.engine.Connection` + to use for SQL execution in "online" mode. When present, + is also used to determine the type of dialect in use. + :param url: a string database url, or a + :class:`sqlalchemy.engine.url.URL` object. + The type of dialect to be used will be derived from this if + ``connection`` is not passed. + :param dialect_name: string name of a dialect, such as + "postgresql", "mssql", etc. The type of dialect to be used will be + derived from this if ``connection`` and ``url`` are not passed. + :param opts: dictionary of options. Most other options + accepted by :meth:`.EnvironmentContext.configure` are passed via + this dictionary. + + """ + if opts is None: + opts = {} + if dialect_opts is None: + dialect_opts = {} + + if connection: + if isinstance(connection, Engine): + raise util.CommandError( + "'connection' argument to configure() is expected " + "to be a sqlalchemy.engine.Connection instance, " + "got %r" % connection, + ) + + dialect = connection.dialect + elif url: + url_obj = sqla_url.make_url(url) + dialect = url_obj.get_dialect()(**dialect_opts) + elif dialect_name: + url_obj = sqla_url.make_url("%s://" % dialect_name) + dialect = url_obj.get_dialect()(**dialect_opts) + elif not dialect: + raise Exception("Connection, url, or dialect_name is required.") + assert dialect is not None + return MigrationContext(dialect, connection, opts, environment_context) + + @contextmanager + def autocommit_block(self) -> Iterator[None]: + """Enter an "autocommit" block, for databases that support AUTOCOMMIT + isolation levels. + + This special directive is intended to support the occasional database + DDL or system operation that specifically has to be run outside of + any kind of transaction block. The PostgreSQL database platform + is the most common target for this style of operation, as many + of its DDL operations must be run outside of transaction blocks, even + though the database overall supports transactional DDL. + + The method is used as a context manager within a migration script, by + calling on :meth:`.Operations.get_context` to retrieve the + :class:`.MigrationContext`, then invoking + :meth:`.MigrationContext.autocommit_block` using the ``with:`` + statement:: + + def upgrade(): + with op.get_context().autocommit_block(): + op.execute("ALTER TYPE mood ADD VALUE 'soso'") + + Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted, + which must be run outside of a transaction block at the database level. + The :meth:`.MigrationContext.autocommit_block` method makes use of the + SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the + psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting, + to ensure that the database driver is not inside of a DBAPI level + transaction block. + + .. warning:: + + As is necessary, **the database transaction preceding the block is + unconditionally committed**. This means that the run of migrations + preceding the operation will be committed, before the overall + migration operation is complete. + + It is recommended that when an application includes migrations with + "autocommit" blocks, that + :paramref:`.EnvironmentContext.transaction_per_migration` be used + so that the calling environment is tuned to expect short per-file + migrations whether or not one of them has an autocommit block. + + + """ + _in_connection_transaction = self._in_connection_transaction() + + if self.impl.transactional_ddl and self.as_sql: + self.impl.emit_commit() + + elif _in_connection_transaction: + assert self._transaction is not None + + self._transaction.commit() + self._transaction = None + + if not self.as_sql: + assert self.connection is not None + current_level = self.connection.get_isolation_level() + base_connection = self.connection + + # in 1.3 and 1.4 non-future mode, the connection gets switched + # out. we can use the base connection with the new mode + # except that it will not know it's in "autocommit" and will + # emit deprecation warnings when an autocommit action takes + # place. + self.connection = self.impl.connection = ( + base_connection.execution_options(isolation_level="AUTOCOMMIT") + ) + + # sqlalchemy future mode will "autobegin" in any case, so take + # control of that "transaction" here + fake_trans: Optional[Transaction] = self.connection.begin() + else: + fake_trans = None + try: + yield + finally: + if not self.as_sql: + assert self.connection is not None + if fake_trans is not None: + fake_trans.commit() + self.connection.execution_options( + isolation_level=current_level + ) + self.connection = self.impl.connection = base_connection + + if self.impl.transactional_ddl and self.as_sql: + self.impl.emit_begin() + + elif _in_connection_transaction: + assert self.connection is not None + self._transaction = self.connection.begin() + + def begin_transaction( + self, _per_migration: bool = False + ) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]: + """Begin a logical transaction for migration operations. + + This method is used within an ``env.py`` script to demarcate where + the outer "transaction" for a series of migrations begins. Example:: + + def run_migrations_online(): + connectable = create_engine(...) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate + where the outer logical transaction occurs around the + :meth:`.MigrationContext.run_migrations` operation. + + A "Logical" transaction means that the operation may or may not + correspond to a real database transaction. If the target database + supports transactional DDL (or + :paramref:`.EnvironmentContext.configure.transactional_ddl` is true), + the :paramref:`.EnvironmentContext.configure.transaction_per_migration` + flag is not set, and the migration is against a real database + connection (as opposed to using "offline" ``--sql`` mode), a real + transaction will be started. If ``--sql`` mode is in effect, the + operation would instead correspond to a string such as "BEGIN" being + emitted to the string output. + + The returned object is a Python context manager that should only be + used in the context of a ``with:`` statement as indicated above. + The object has no other guaranteed API features present. + + .. seealso:: + + :meth:`.MigrationContext.autocommit_block` + + """ + + if self._in_external_transaction: + return nullcontext() + + if self.impl.transactional_ddl: + transaction_now = _per_migration == self._transaction_per_migration + else: + transaction_now = _per_migration is True + + if not transaction_now: + return nullcontext() + + elif not self.impl.transactional_ddl: + assert _per_migration + + if self.as_sql: + return nullcontext() + else: + # track our own notion of a "transaction block", which must be + # committed when complete. Don't rely upon whether or not the + # SQLAlchemy connection reports as "in transaction"; this + # because SQLAlchemy future connection features autobegin + # behavior, so it may already be in a transaction from our + # emitting of queries like "has_version_table", etc. While we + # could track these operations as well, that leaves open the + # possibility of new operations or other things happening in + # the user environment that still may be triggering + # "autobegin". + + in_transaction = self._transaction is not None + + if in_transaction: + return nullcontext() + else: + assert self.connection is not None + self._transaction = ( + sqla_compat._safe_begin_connection_transaction( + self.connection + ) + ) + return _ProxyTransaction(self) + elif self.as_sql: + + @contextmanager + def begin_commit(): + self.impl.emit_begin() + yield + self.impl.emit_commit() + + return begin_commit() + else: + assert self.connection is not None + self._transaction = sqla_compat._safe_begin_connection_transaction( + self.connection + ) + return _ProxyTransaction(self) + + def get_current_revision(self) -> Optional[str]: + """Return the current revision, usually that which is present + in the ``alembic_version`` table in the database. + + This method intends to be used only for a migration stream that + does not contain unmerged branches in the target database; + if there are multiple branches present, an exception is raised. + The :meth:`.MigrationContext.get_current_heads` should be preferred + over this method going forward in order to be compatible with + branch migration support. + + If this :class:`.MigrationContext` was configured in "offline" + mode, that is with ``as_sql=True``, the ``starting_rev`` + parameter is returned instead, if any. + + """ + heads = self.get_current_heads() + if len(heads) == 0: + return None + elif len(heads) > 1: + raise util.CommandError( + "Version table '%s' has more than one head present; " + "please use get_current_heads()" % self.version_table + ) + else: + return heads[0] + + def get_current_heads(self) -> Tuple[str, ...]: + """Return a tuple of the current 'head versions' that are represented + in the target database. + + For a migration stream without branches, this will be a single + value, synonymous with that of + :meth:`.MigrationContext.get_current_revision`. However when multiple + unmerged branches exist within the target database, the returned tuple + will contain a value for each head. + + If this :class:`.MigrationContext` was configured in "offline" + mode, that is with ``as_sql=True``, the ``starting_rev`` + parameter is returned in a one-length tuple. + + If no version table is present, or if there are no revisions + present, an empty tuple is returned. + + """ + if self.as_sql: + start_from_rev: Any = self._start_from_rev + if start_from_rev == "base": + start_from_rev = None + elif start_from_rev is not None and self.script: + start_from_rev = [ + self.script.get_revision(sfr).revision + for sfr in util.to_list(start_from_rev) + if sfr not in (None, "base") + ] + return util.to_tuple(start_from_rev, default=()) + else: + if self._start_from_rev: + raise util.CommandError( + "Can't specify current_rev to context " + "when using a database connection" + ) + if not self._has_version_table(): + return () + assert self.connection is not None + return tuple( + row[0] + for row in self.connection.execute( + select(self._version.c.version_num) + ) + ) + + def _ensure_version_table(self, purge: bool = False) -> None: + with sqla_compat._ensure_scope_for_ddl(self.connection): + assert self.connection is not None + self._version.create(self.connection, checkfirst=True) + if purge: + assert self.connection is not None + self.connection.execute(self._version.delete()) + + def _has_version_table(self) -> bool: + assert self.connection is not None + return sqla_compat._connectable_has_table( + self.connection, self.version_table, self.version_table_schema + ) + + def stamp(self, script_directory: ScriptDirectory, revision: str) -> None: + """Stamp the version table with a specific revision. + + This method calculates those branches to which the given revision + can apply, and updates those branches as though they were migrated + towards that revision (either up or down). If no current branches + include the revision, it is added as a new branch head. + + """ + heads = self.get_current_heads() + if not self.as_sql and not heads: + self._ensure_version_table() + head_maintainer = HeadMaintainer(self, heads) + for step in script_directory._stamp_revs(revision, heads): + head_maintainer.update_to_step(step) + + def run_migrations(self, **kw: Any) -> None: + r"""Run the migration scripts established for this + :class:`.MigrationContext`, if any. + + The commands in :mod:`alembic.command` will set up a function + that is ultimately passed to the :class:`.MigrationContext` + as the ``fn`` argument. This function represents the "work" + that will be done when :meth:`.MigrationContext.run_migrations` + is called, typically from within the ``env.py`` script of the + migration environment. The "work function" then provides an iterable + of version callables and other version information which + in the case of the ``upgrade`` or ``downgrade`` commands are the + list of version scripts to invoke. Other commands yield nothing, + in the case that a command wants to run some other operation + against the database such as the ``current`` or ``stamp`` commands. + + :param \**kw: keyword arguments here will be passed to each + migration callable, that is the ``upgrade()`` or ``downgrade()`` + method within revision scripts. + + """ + self.impl.start_migrations() + + heads: Tuple[str, ...] + if self.purge: + if self.as_sql: + raise util.CommandError("Can't use --purge with --sql mode") + self._ensure_version_table(purge=True) + heads = () + else: + heads = self.get_current_heads() + + dont_mutate = self.opts.get("dont_mutate", False) + + if not self.as_sql and not heads and not dont_mutate: + self._ensure_version_table() + + head_maintainer = HeadMaintainer(self, heads) + + assert self._migrations_fn is not None + for step in self._migrations_fn(heads, self): + with self.begin_transaction(_per_migration=True): + if self.as_sql and not head_maintainer.heads: + # for offline mode, include a CREATE TABLE from + # the base + assert self.connection is not None + self._version.create(self.connection) + log.info("Running %s", step) + if self.as_sql: + self.impl.static_output( + "-- Running %s" % (step.short_log,) + ) + step.migration_fn(**kw) + + # previously, we wouldn't stamp per migration + # if we were in a transaction, however given the more + # complex model that involves any number of inserts + # and row-targeted updates and deletes, it's simpler for now + # just to run the operations on every version + head_maintainer.update_to_step(step) + for callback in self.on_version_apply_callbacks: + callback( + ctx=self, + step=step.info, + heads=set(head_maintainer.heads), + run_args=kw, + ) + + if self.as_sql and not head_maintainer.heads: + assert self.connection is not None + self._version.drop(self.connection) + + def _in_connection_transaction(self) -> bool: + try: + meth = self.connection.in_transaction # type:ignore[union-attr] + except AttributeError: + return False + else: + return meth() + + def execute( + self, + sql: Union[Executable, str], + execution_options: Optional[Dict[str, Any]] = None, + ) -> None: + """Execute a SQL construct or string statement. + + The underlying execution mechanics are used, that is + if this is "offline mode" the SQL is written to the + output buffer, otherwise the SQL is emitted on + the current SQLAlchemy connection. + + """ + self.impl._exec(sql, execution_options) + + def _stdout_connection( + self, connection: Optional[Connection] + ) -> MockConnection: + def dump(construct, *multiparams, **params): + self.impl._exec(construct) + + return MockEngineStrategy.MockConnection(self.dialect, dump) + + @property + def bind(self) -> Optional[Connection]: + """Return the current "bind". + + In online mode, this is an instance of + :class:`sqlalchemy.engine.Connection`, and is suitable + for ad-hoc execution of any kind of usage described + in SQLAlchemy Core documentation as well as + for usage with the :meth:`sqlalchemy.schema.Table.create` + and :meth:`sqlalchemy.schema.MetaData.create_all` methods + of :class:`~sqlalchemy.schema.Table`, + :class:`~sqlalchemy.schema.MetaData`. + + Note that when "standard output" mode is enabled, + this bind will be a "mock" connection handler that cannot + return results and is only appropriate for a very limited + subset of commands. + + """ + return self.connection + + @property + def config(self) -> Optional[Config]: + """Return the :class:`.Config` used by the current environment, + if any.""" + + if self.environment_context: + return self.environment_context.config + else: + return None + + def _compare_type( + self, inspector_column: Column[Any], metadata_column: Column + ) -> bool: + if self._user_compare_type is False: + return False + + if callable(self._user_compare_type): + user_value = self._user_compare_type( + self, + inspector_column, + metadata_column, + inspector_column.type, + metadata_column.type, + ) + if user_value is not None: + return user_value + + return self.impl.compare_type(inspector_column, metadata_column) + + def _compare_server_default( + self, + inspector_column: Column[Any], + metadata_column: Column[Any], + rendered_metadata_default: Optional[str], + rendered_column_default: Optional[str], + ) -> bool: + if self._user_compare_server_default is False: + return False + + if callable(self._user_compare_server_default): + user_value = self._user_compare_server_default( + self, + inspector_column, + metadata_column, + rendered_column_default, + metadata_column.server_default, + rendered_metadata_default, + ) + if user_value is not None: + return user_value + + return self.impl.compare_server_default( + inspector_column, + metadata_column, + rendered_metadata_default, + rendered_column_default, + ) + + +class HeadMaintainer: + def __init__(self, context: MigrationContext, heads: Any) -> None: + self.context = context + self.heads = set(heads) + + def _insert_version(self, version: str) -> None: + assert version not in self.heads + self.heads.add(version) + + self.context.impl._exec( + self.context._version.insert().values( + version_num=literal_column("'%s'" % version) + ) + ) + + def _delete_version(self, version: str) -> None: + self.heads.remove(version) + + ret = self.context.impl._exec( + self.context._version.delete().where( + self.context._version.c.version_num + == literal_column("'%s'" % version) + ) + ) + + if ( + not self.context.as_sql + and self.context.dialect.supports_sane_rowcount + and ret is not None + and ret.rowcount != 1 + ): + raise util.CommandError( + "Online migration expected to match one " + "row when deleting '%s' in '%s'; " + "%d found" + % (version, self.context.version_table, ret.rowcount) + ) + + def _update_version(self, from_: str, to_: str) -> None: + assert to_ not in self.heads + self.heads.remove(from_) + self.heads.add(to_) + + ret = self.context.impl._exec( + self.context._version.update() + .values(version_num=literal_column("'%s'" % to_)) + .where( + self.context._version.c.version_num + == literal_column("'%s'" % from_) + ) + ) + + if ( + not self.context.as_sql + and self.context.dialect.supports_sane_rowcount + and ret is not None + and ret.rowcount != 1 + ): + raise util.CommandError( + "Online migration expected to match one " + "row when updating '%s' to '%s' in '%s'; " + "%d found" + % (from_, to_, self.context.version_table, ret.rowcount) + ) + + def update_to_step(self, step: Union[RevisionStep, StampStep]) -> None: + if step.should_delete_branch(self.heads): + vers = step.delete_version_num + log.debug("branch delete %s", vers) + self._delete_version(vers) + elif step.should_create_branch(self.heads): + vers = step.insert_version_num + log.debug("new branch insert %s", vers) + self._insert_version(vers) + elif step.should_merge_branches(self.heads): + # delete revs, update from rev, update to rev + ( + delete_revs, + update_from_rev, + update_to_rev, + ) = step.merge_branch_idents(self.heads) + log.debug( + "merge, delete %s, update %s to %s", + delete_revs, + update_from_rev, + update_to_rev, + ) + for delrev in delete_revs: + self._delete_version(delrev) + self._update_version(update_from_rev, update_to_rev) + elif step.should_unmerge_branches(self.heads): + ( + update_from_rev, + update_to_rev, + insert_revs, + ) = step.unmerge_branch_idents(self.heads) + log.debug( + "unmerge, insert %s, update %s to %s", + insert_revs, + update_from_rev, + update_to_rev, + ) + for insrev in insert_revs: + self._insert_version(insrev) + self._update_version(update_from_rev, update_to_rev) + else: + from_, to_ = step.update_version_num(self.heads) + log.debug("update %s to %s", from_, to_) + self._update_version(from_, to_) + + +class MigrationInfo: + """Exposes information about a migration step to a callback listener. + + The :class:`.MigrationInfo` object is available exclusively for the + benefit of the :paramref:`.EnvironmentContext.on_version_apply` + callback hook. + + """ + + is_upgrade: bool + """True/False: indicates whether this operation ascends or descends the + version tree.""" + + is_stamp: bool + """True/False: indicates whether this operation is a stamp (i.e. whether + it results in any actual database operations).""" + + up_revision_id: Optional[str] + """Version string corresponding to :attr:`.Revision.revision`. + + In the case of a stamp operation, it is advised to use the + :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can + make a single movement from one or more branches down to a single + branchpoint, in which case there will be multiple "up" revisions. + + .. seealso:: + + :attr:`.MigrationInfo.up_revision_ids` + + """ + + up_revision_ids: Tuple[str, ...] + """Tuple of version strings corresponding to :attr:`.Revision.revision`. + + In the majority of cases, this tuple will be a single value, synonymous + with the scalar value of :attr:`.MigrationInfo.up_revision_id`. + It can be multiple revision identifiers only in the case of an + ``alembic stamp`` operation which is moving downwards from multiple + branches down to their common branch point. + + """ + + down_revision_ids: Tuple[str, ...] + """Tuple of strings representing the base revisions of this migration step. + + If empty, this represents a root revision; otherwise, the first item + corresponds to :attr:`.Revision.down_revision`, and the rest are inferred + from dependencies. + """ + + revision_map: RevisionMap + """The revision map inside of which this operation occurs.""" + + def __init__( + self, + revision_map: RevisionMap, + is_upgrade: bool, + is_stamp: bool, + up_revisions: Union[str, Tuple[str, ...]], + down_revisions: Union[str, Tuple[str, ...]], + ) -> None: + self.revision_map = revision_map + self.is_upgrade = is_upgrade + self.is_stamp = is_stamp + self.up_revision_ids = util.to_tuple(up_revisions, default=()) + if self.up_revision_ids: + self.up_revision_id = self.up_revision_ids[0] + else: + # this should never be the case with + # "upgrade", "downgrade", or "stamp" as we are always + # measuring movement in terms of at least one upgrade version + self.up_revision_id = None + self.down_revision_ids = util.to_tuple(down_revisions, default=()) + + @property + def is_migration(self) -> bool: + """True/False: indicates whether this operation is a migration. + + At present this is true if and only the migration is not a stamp. + If other operation types are added in the future, both this attribute + and :attr:`~.MigrationInfo.is_stamp` will be false. + """ + return not self.is_stamp + + @property + def source_revision_ids(self) -> Tuple[str, ...]: + """Active revisions before this migration step is applied.""" + return ( + self.down_revision_ids if self.is_upgrade else self.up_revision_ids + ) + + @property + def destination_revision_ids(self) -> Tuple[str, ...]: + """Active revisions after this migration step is applied.""" + return ( + self.up_revision_ids if self.is_upgrade else self.down_revision_ids + ) + + @property + def up_revision(self) -> Optional[Revision]: + """Get :attr:`~.MigrationInfo.up_revision_id` as + a :class:`.Revision`. + + """ + return self.revision_map.get_revision(self.up_revision_id) + + @property + def up_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]: + """Get :attr:`~.MigrationInfo.up_revision_ids` as a + :class:`.Revision`.""" + return self.revision_map.get_revisions(self.up_revision_ids) + + @property + def down_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]: + """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of + :class:`Revisions <.Revision>`.""" + return self.revision_map.get_revisions(self.down_revision_ids) + + @property + def source_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]: + """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of + :class:`Revisions <.Revision>`.""" + return self.revision_map.get_revisions(self.source_revision_ids) + + @property + def destination_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]: + """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of + :class:`Revisions <.Revision>`.""" + return self.revision_map.get_revisions(self.destination_revision_ids) + + +class MigrationStep: + from_revisions_no_deps: Tuple[str, ...] + to_revisions_no_deps: Tuple[str, ...] + is_upgrade: bool + migration_fn: Any + + if TYPE_CHECKING: + + @property + def doc(self) -> Optional[str]: ... + + @property + def name(self) -> str: + return self.migration_fn.__name__ + + @classmethod + def upgrade_from_script( + cls, revision_map: RevisionMap, script: Script + ) -> RevisionStep: + return RevisionStep(revision_map, script, True) + + @classmethod + def downgrade_from_script( + cls, revision_map: RevisionMap, script: Script + ) -> RevisionStep: + return RevisionStep(revision_map, script, False) + + @property + def is_downgrade(self) -> bool: + return not self.is_upgrade + + @property + def short_log(self) -> str: + return "%s %s -> %s" % ( + self.name, + util.format_as_comma(self.from_revisions_no_deps), + util.format_as_comma(self.to_revisions_no_deps), + ) + + def __str__(self): + if self.doc: + return "%s %s -> %s, %s" % ( + self.name, + util.format_as_comma(self.from_revisions_no_deps), + util.format_as_comma(self.to_revisions_no_deps), + self.doc, + ) + else: + return self.short_log + + +class RevisionStep(MigrationStep): + def __init__( + self, revision_map: RevisionMap, revision: Script, is_upgrade: bool + ) -> None: + self.revision_map = revision_map + self.revision = revision + self.is_upgrade = is_upgrade + if is_upgrade: + self.migration_fn = revision.module.upgrade + else: + self.migration_fn = revision.module.downgrade + + def __repr__(self): + return "RevisionStep(%r, is_upgrade=%r)" % ( + self.revision.revision, + self.is_upgrade, + ) + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, RevisionStep) + and other.revision == self.revision + and self.is_upgrade == other.is_upgrade + ) + + @property + def doc(self) -> Optional[str]: + return self.revision.doc + + @property + def from_revisions(self) -> Tuple[str, ...]: + if self.is_upgrade: + return self.revision._normalized_down_revisions + else: + return (self.revision.revision,) + + @property + def from_revisions_no_deps( # type:ignore[override] + self, + ) -> Tuple[str, ...]: + if self.is_upgrade: + return self.revision._versioned_down_revisions + else: + return (self.revision.revision,) + + @property + def to_revisions(self) -> Tuple[str, ...]: + if self.is_upgrade: + return (self.revision.revision,) + else: + return self.revision._normalized_down_revisions + + @property + def to_revisions_no_deps( # type:ignore[override] + self, + ) -> Tuple[str, ...]: + if self.is_upgrade: + return (self.revision.revision,) + else: + return self.revision._versioned_down_revisions + + @property + def _has_scalar_down_revision(self) -> bool: + return len(self.revision._normalized_down_revisions) == 1 + + def should_delete_branch(self, heads: Set[str]) -> bool: + """A delete is when we are a. in a downgrade and b. + we are going to the "base" or we are going to a version that + is implied as a dependency on another version that is remaining. + + """ + if not self.is_downgrade: + return False + + if self.revision.revision not in heads: + return False + + downrevs = self.revision._normalized_down_revisions + + if not downrevs: + # is a base + return True + else: + # determine what the ultimate "to_revisions" for an + # unmerge would be. If there are none, then we're a delete. + to_revisions = self._unmerge_to_revisions(heads) + return not to_revisions + + def merge_branch_idents( + self, heads: Set[str] + ) -> Tuple[List[str], str, str]: + other_heads = set(heads).difference(self.from_revisions) + + if other_heads: + ancestors = { + r.revision + for r in self.revision_map._get_ancestor_nodes( + self.revision_map.get_revisions(other_heads), check=False + ) + } + from_revisions = list( + set(self.from_revisions).difference(ancestors) + ) + else: + from_revisions = list(self.from_revisions) + + return ( + # delete revs, update from rev, update to rev + list(from_revisions[0:-1]), + from_revisions[-1], + self.to_revisions[0], + ) + + def _unmerge_to_revisions(self, heads: Set[str]) -> Tuple[str, ...]: + other_heads = set(heads).difference([self.revision.revision]) + if other_heads: + ancestors = { + r.revision + for r in self.revision_map._get_ancestor_nodes( + self.revision_map.get_revisions(other_heads), check=False + ) + } + return tuple(set(self.to_revisions).difference(ancestors)) + else: + # for each revision we plan to return, compute its ancestors + # (excluding self), and remove those from the final output since + # they are already accounted for. + ancestors = { + r.revision + for to_revision in self.to_revisions + for r in self.revision_map._get_ancestor_nodes( + self.revision_map.get_revisions(to_revision), check=False + ) + if r.revision != to_revision + } + return tuple(set(self.to_revisions).difference(ancestors)) + + def unmerge_branch_idents( + self, heads: Set[str] + ) -> Tuple[str, str, Tuple[str, ...]]: + to_revisions = self._unmerge_to_revisions(heads) + + return ( + # update from rev, update to rev, insert revs + self.from_revisions[0], + to_revisions[-1], + to_revisions[0:-1], + ) + + def should_create_branch(self, heads: Set[str]) -> bool: + if not self.is_upgrade: + return False + + downrevs = self.revision._normalized_down_revisions + + if not downrevs: + # is a base + return True + else: + # none of our downrevs are present, so... + # we have to insert our version. This is true whether + # or not there is only one downrev, or multiple (in the latter + # case, we're a merge point.) + if not heads.intersection(downrevs): + return True + else: + return False + + def should_merge_branches(self, heads: Set[str]) -> bool: + if not self.is_upgrade: + return False + + downrevs = self.revision._normalized_down_revisions + + if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1: + return True + + return False + + def should_unmerge_branches(self, heads: Set[str]) -> bool: + if not self.is_downgrade: + return False + + downrevs = self.revision._normalized_down_revisions + + if self.revision.revision in heads and len(downrevs) > 1: + return True + + return False + + def update_version_num(self, heads: Set[str]) -> Tuple[str, str]: + if not self._has_scalar_down_revision: + downrev = heads.intersection( + self.revision._normalized_down_revisions + ) + assert ( + len(downrev) == 1 + ), "Can't do an UPDATE because downrevision is ambiguous" + down_revision = list(downrev)[0] + else: + down_revision = self.revision._normalized_down_revisions[0] + + if self.is_upgrade: + return down_revision, self.revision.revision + else: + return self.revision.revision, down_revision + + @property + def delete_version_num(self) -> str: + return self.revision.revision + + @property + def insert_version_num(self) -> str: + return self.revision.revision + + @property + def info(self) -> MigrationInfo: + return MigrationInfo( + revision_map=self.revision_map, + up_revisions=self.revision.revision, + down_revisions=self.revision._normalized_down_revisions, + is_upgrade=self.is_upgrade, + is_stamp=False, + ) + + +class StampStep(MigrationStep): + def __init__( + self, + from_: Optional[Union[str, Collection[str]]], + to_: Optional[Union[str, Collection[str]]], + is_upgrade: bool, + branch_move: bool, + revision_map: Optional[RevisionMap] = None, + ) -> None: + self.from_: Tuple[str, ...] = util.to_tuple(from_, default=()) + self.to_: Tuple[str, ...] = util.to_tuple(to_, default=()) + self.is_upgrade = is_upgrade + self.branch_move = branch_move + self.migration_fn = self.stamp_revision + self.revision_map = revision_map + + doc: Optional[str] = None + + def stamp_revision(self, **kw: Any) -> None: + return None + + def __eq__(self, other): + return ( + isinstance(other, StampStep) + and other.from_revisions == self.from_revisions + and other.to_revisions == self.to_revisions + and other.branch_move == self.branch_move + and self.is_upgrade == other.is_upgrade + ) + + @property + def from_revisions(self): + return self.from_ + + @property + def to_revisions(self) -> Tuple[str, ...]: + return self.to_ + + @property + def from_revisions_no_deps( # type:ignore[override] + self, + ) -> Tuple[str, ...]: + return self.from_ + + @property + def to_revisions_no_deps( # type:ignore[override] + self, + ) -> Tuple[str, ...]: + return self.to_ + + @property + def delete_version_num(self) -> str: + assert len(self.from_) == 1 + return self.from_[0] + + @property + def insert_version_num(self) -> str: + assert len(self.to_) == 1 + return self.to_[0] + + def update_version_num(self, heads: Set[str]) -> Tuple[str, str]: + assert len(self.from_) == 1 + assert len(self.to_) == 1 + return self.from_[0], self.to_[0] + + def merge_branch_idents( + self, heads: Union[Set[str], List[str]] + ) -> Union[Tuple[List[Any], str, str], Tuple[List[str], str, str]]: + return ( + # delete revs, update from rev, update to rev + list(self.from_[0:-1]), + self.from_[-1], + self.to_[0], + ) + + def unmerge_branch_idents( + self, heads: Set[str] + ) -> Tuple[str, str, List[str]]: + return ( + # update from rev, update to rev, insert revs + self.from_[0], + self.to_[-1], + list(self.to_[0:-1]), + ) + + def should_delete_branch(self, heads: Set[str]) -> bool: + # TODO: we probably need to look for self.to_ inside of heads, + # in a similar manner as should_create_branch, however we have + # no tests for this yet (stamp downgrades w/ branches) + return self.is_downgrade and self.branch_move + + def should_create_branch(self, heads: Set[str]) -> Union[Set[str], bool]: + return ( + self.is_upgrade + and (self.branch_move or set(self.from_).difference(heads)) + and set(self.to_).difference(heads) + ) + + def should_merge_branches(self, heads: Set[str]) -> bool: + return len(self.from_) > 1 + + def should_unmerge_branches(self, heads: Set[str]) -> bool: + return len(self.to_) > 1 + + @property + def info(self) -> MigrationInfo: + up, down = ( + (self.to_, self.from_) + if self.is_upgrade + else (self.from_, self.to_) + ) + assert self.revision_map is not None + return MigrationInfo( + revision_map=self.revision_map, + up_revisions=up, + down_revisions=down, + is_upgrade=self.is_upgrade, + is_stamp=True, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d78f3f1dc54c13a52b64e8d668c2baf708eb20bc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/__init__.py @@ -0,0 +1,4 @@ +from .base import Script +from .base import ScriptDirectory + +__all__ = ["ScriptDirectory", "Script"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/base.py new file mode 100644 index 0000000000000000000000000000000000000000..94292316bc8e9fabc9160487239dcb93d84dbe98 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/base.py @@ -0,0 +1,1055 @@ +from __future__ import annotations + +from contextlib import contextmanager +import datetime +import os +from pathlib import Path +import re +import shutil +import sys +from types import ModuleType +from typing import Any +from typing import cast +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from . import revision +from . import write_hooks +from .. import util +from ..runtime import migration +from ..util import compat +from ..util import not_none +from ..util.pyfiles import _preserving_path_as_str + +if TYPE_CHECKING: + from .revision import _GetRevArg + from .revision import _RevIdType + from .revision import Revision + from ..config import Config + from ..config import MessagingOptions + from ..config import PostWriteHookConfig + from ..runtime.migration import RevisionStep + from ..runtime.migration import StampStep + +try: + if compat.py39: + from zoneinfo import ZoneInfo + from zoneinfo import ZoneInfoNotFoundError + else: + from backports.zoneinfo import ZoneInfo # type: ignore[import-not-found,no-redef] # noqa: E501 + from backports.zoneinfo import ZoneInfoNotFoundError # type: ignore[no-redef] # noqa: E501 +except ImportError: + ZoneInfo = None # type: ignore[assignment, misc] + +_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$") +_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$") +_legacy_rev = re.compile(r"([a-f0-9]+)\.py$") +_slug_re = re.compile(r"\w+") +_default_file_template = "%(rev)s_%(slug)s" + + +class ScriptDirectory: + """Provides operations upon an Alembic script directory. + + This object is useful to get information as to current revisions, + most notably being able to get at the "head" revision, for schemes + that want to test if the current revision in the database is the most + recent:: + + from alembic.script import ScriptDirectory + from alembic.config import Config + config = Config() + config.set_main_option("script_location", "myapp:migrations") + script = ScriptDirectory.from_config(config) + + head_revision = script.get_current_head() + + + + """ + + def __init__( + self, + dir: Union[str, os.PathLike[str]], # noqa: A002 + file_template: str = _default_file_template, + truncate_slug_length: Optional[int] = 40, + version_locations: Optional[ + Sequence[Union[str, os.PathLike[str]]] + ] = None, + sourceless: bool = False, + output_encoding: str = "utf-8", + timezone: Optional[str] = None, + hooks: list[PostWriteHookConfig] = [], + recursive_version_locations: bool = False, + messaging_opts: MessagingOptions = cast( + "MessagingOptions", util.EMPTY_DICT + ), + ) -> None: + self.dir = _preserving_path_as_str(dir) + self.version_locations = [ + _preserving_path_as_str(p) for p in version_locations or () + ] + self.file_template = file_template + self.truncate_slug_length = truncate_slug_length or 40 + self.sourceless = sourceless + self.output_encoding = output_encoding + self.revision_map = revision.RevisionMap(self._load_revisions) + self.timezone = timezone + self.hooks = hooks + self.recursive_version_locations = recursive_version_locations + self.messaging_opts = messaging_opts + + if not os.access(dir, os.F_OK): + raise util.CommandError( + f"Path doesn't exist: {dir}. Please use " + "the 'init' command to create a new " + "scripts folder." + ) + + @property + def versions(self) -> str: + """return a single version location based on the sole path passed + within version_locations. + + If multiple version locations are configured, an error is raised. + + + """ + return str(self._singular_version_location) + + @util.memoized_property + def _singular_version_location(self) -> Path: + loc = self._version_locations + if len(loc) > 1: + raise util.CommandError("Multiple version_locations present") + else: + return loc[0] + + @util.memoized_property + def _version_locations(self) -> Sequence[Path]: + if self.version_locations: + return [ + util.coerce_resource_to_filename(location).absolute() + for location in self.version_locations + ] + else: + return [Path(self.dir, "versions").absolute()] + + def _load_revisions(self) -> Iterator[Script]: + paths = [vers for vers in self._version_locations if vers.exists()] + + dupes = set() + for vers in paths: + for file_path in Script._list_py_dir(self, vers): + real_path = file_path.resolve() + if real_path in dupes: + util.warn( + f"File {real_path} loaded twice! ignoring. " + "Please ensure version_locations is unique." + ) + continue + dupes.add(real_path) + + script = Script._from_path(self, real_path) + if script is None: + continue + yield script + + @classmethod + def from_config(cls, config: Config) -> ScriptDirectory: + """Produce a new :class:`.ScriptDirectory` given a :class:`.Config` + instance. + + The :class:`.Config` need only have the ``script_location`` key + present. + + """ + script_location = config.get_alembic_option("script_location") + if script_location is None: + raise util.CommandError( + "No 'script_location' key found in configuration." + ) + truncate_slug_length: Optional[int] + tsl = config.get_alembic_option("truncate_slug_length") + if tsl is not None: + truncate_slug_length = int(tsl) + else: + truncate_slug_length = None + + prepend_sys_path = config.get_prepend_sys_paths_list() + if prepend_sys_path: + sys.path[:0] = prepend_sys_path + + rvl = config.get_alembic_boolean_option("recursive_version_locations") + return ScriptDirectory( + util.coerce_resource_to_filename(script_location), + file_template=config.get_alembic_option( + "file_template", _default_file_template + ), + truncate_slug_length=truncate_slug_length, + sourceless=config.get_alembic_boolean_option("sourceless"), + output_encoding=config.get_alembic_option( + "output_encoding", "utf-8" + ), + version_locations=config.get_version_locations_list(), + timezone=config.get_alembic_option("timezone"), + hooks=config.get_hooks_list(), + recursive_version_locations=rvl, + messaging_opts=config.messaging_opts, + ) + + @contextmanager + def _catch_revision_errors( + self, + ancestor: Optional[str] = None, + multiple_heads: Optional[str] = None, + start: Optional[str] = None, + end: Optional[str] = None, + resolution: Optional[str] = None, + ) -> Iterator[None]: + try: + yield + except revision.RangeNotAncestorError as rna: + if start is None: + start = cast(Any, rna.lower) + if end is None: + end = cast(Any, rna.upper) + if not ancestor: + ancestor = ( + "Requested range %(start)s:%(end)s does not refer to " + "ancestor/descendant revisions along the same branch" + ) + ancestor = ancestor % {"start": start, "end": end} + raise util.CommandError(ancestor) from rna + except revision.MultipleHeads as mh: + if not multiple_heads: + multiple_heads = ( + "Multiple head revisions are present for given " + "argument '%(head_arg)s'; please " + "specify a specific target revision, " + "'@%(head_arg)s' to " + "narrow to a specific head, or 'heads' for all heads" + ) + multiple_heads = multiple_heads % { + "head_arg": end or mh.argument, + "heads": util.format_as_comma(mh.heads), + } + raise util.CommandError(multiple_heads) from mh + except revision.ResolutionError as re: + if resolution is None: + resolution = "Can't locate revision identified by '%s'" % ( + re.argument + ) + raise util.CommandError(resolution) from re + except revision.RevisionError as err: + raise util.CommandError(err.args[0]) from err + + def walk_revisions( + self, base: str = "base", head: str = "heads" + ) -> Iterator[Script]: + """Iterate through all revisions. + + :param base: the base revision, or "base" to start from the + empty revision. + + :param head: the head revision; defaults to "heads" to indicate + all head revisions. May also be "head" to indicate a single + head revision. + + """ + with self._catch_revision_errors(start=base, end=head): + for rev in self.revision_map.iterate_revisions( + head, base, inclusive=True, assert_relative_length=False + ): + yield cast(Script, rev) + + def get_revisions(self, id_: _GetRevArg) -> Tuple[Script, ...]: + """Return the :class:`.Script` instance with the given rev identifier, + symbolic name, or sequence of identifiers. + + """ + with self._catch_revision_errors(): + return cast( + Tuple[Script, ...], + self.revision_map.get_revisions(id_), + ) + + def get_all_current(self, id_: Tuple[str, ...]) -> Set[Script]: + with self._catch_revision_errors(): + return cast(Set[Script], self.revision_map._get_all_current(id_)) + + def get_revision(self, id_: str) -> Script: + """Return the :class:`.Script` instance with the given rev id. + + .. seealso:: + + :meth:`.ScriptDirectory.get_revisions` + + """ + + with self._catch_revision_errors(): + return cast(Script, self.revision_map.get_revision(id_)) + + def as_revision_number( + self, id_: Optional[str] + ) -> Optional[Union[str, Tuple[str, ...]]]: + """Convert a symbolic revision, i.e. 'head' or 'base', into + an actual revision number.""" + + with self._catch_revision_errors(): + rev, branch_name = self.revision_map._resolve_revision_number(id_) + + if not rev: + # convert () to None + return None + elif id_ == "heads": + return rev + else: + return rev[0] + + def iterate_revisions( + self, + upper: Union[str, Tuple[str, ...], None], + lower: Union[str, Tuple[str, ...], None], + **kw: Any, + ) -> Iterator[Script]: + """Iterate through script revisions, starting at the given + upper revision identifier and ending at the lower. + + The traversal uses strictly the `down_revision` + marker inside each migration script, so + it is a requirement that upper >= lower, + else you'll get nothing back. + + The iterator yields :class:`.Script` objects. + + .. seealso:: + + :meth:`.RevisionMap.iterate_revisions` + + """ + return cast( + Iterator[Script], + self.revision_map.iterate_revisions(upper, lower, **kw), + ) + + def get_current_head(self) -> Optional[str]: + """Return the current head revision. + + If the script directory has multiple heads + due to branching, an error is raised; + :meth:`.ScriptDirectory.get_heads` should be + preferred. + + :return: a string revision number. + + .. seealso:: + + :meth:`.ScriptDirectory.get_heads` + + """ + with self._catch_revision_errors( + multiple_heads=( + "The script directory has multiple heads (due to branching)." + "Please use get_heads(), or merge the branches using " + "alembic merge." + ) + ): + return self.revision_map.get_current_head() + + def get_heads(self) -> List[str]: + """Return all "versioned head" revisions as strings. + + This is normally a list of length one, + unless branches are present. The + :meth:`.ScriptDirectory.get_current_head()` method + can be used normally when a script directory + has only one head. + + :return: a tuple of string revision numbers. + """ + return list(self.revision_map.heads) + + def get_base(self) -> Optional[str]: + """Return the "base" revision as a string. + + This is the revision number of the script that + has a ``down_revision`` of None. + + If the script directory has multiple bases, an error is raised; + :meth:`.ScriptDirectory.get_bases` should be + preferred. + + """ + bases = self.get_bases() + if len(bases) > 1: + raise util.CommandError( + "The script directory has multiple bases. " + "Please use get_bases()." + ) + elif bases: + return bases[0] + else: + return None + + def get_bases(self) -> List[str]: + """return all "base" revisions as strings. + + This is the revision number of all scripts that + have a ``down_revision`` of None. + + """ + return list(self.revision_map.bases) + + def _upgrade_revs( + self, destination: str, current_rev: str + ) -> List[RevisionStep]: + with self._catch_revision_errors( + ancestor="Destination %(end)s is not a valid upgrade " + "target from current head(s)", + end=destination, + ): + revs = self.iterate_revisions( + destination, current_rev, implicit_base=True + ) + return [ + migration.MigrationStep.upgrade_from_script( + self.revision_map, script + ) + for script in reversed(list(revs)) + ] + + def _downgrade_revs( + self, destination: str, current_rev: Optional[str] + ) -> List[RevisionStep]: + with self._catch_revision_errors( + ancestor="Destination %(end)s is not a valid downgrade " + "target from current head(s)", + end=destination, + ): + revs = self.iterate_revisions( + current_rev, destination, select_for_downgrade=True + ) + return [ + migration.MigrationStep.downgrade_from_script( + self.revision_map, script + ) + for script in revs + ] + + def _stamp_revs( + self, revision: _RevIdType, heads: _RevIdType + ) -> List[StampStep]: + with self._catch_revision_errors( + multiple_heads="Multiple heads are present; please specify a " + "single target revision" + ): + heads_revs = self.get_revisions(heads) + + steps = [] + + if not revision: + revision = "base" + + filtered_heads: List[Script] = [] + for rev in util.to_tuple(revision): + if rev: + filtered_heads.extend( + self.revision_map.filter_for_lineage( + cast(Sequence[Script], heads_revs), + rev, + include_dependencies=True, + ) + ) + filtered_heads = util.unique_list(filtered_heads) + + dests = self.get_revisions(revision) or [None] + + for dest in dests: + if dest is None: + # dest is 'base'. Return a "delete branch" migration + # for all applicable heads. + steps.extend( + [ + migration.StampStep( + head.revision, + None, + False, + True, + self.revision_map, + ) + for head in filtered_heads + ] + ) + continue + elif dest in filtered_heads: + # the dest is already in the version table, do nothing. + continue + + # figure out if the dest is a descendant or an + # ancestor of the selected nodes + descendants = set( + self.revision_map._get_descendant_nodes([dest]) + ) + ancestors = set(self.revision_map._get_ancestor_nodes([dest])) + + if descendants.intersection(filtered_heads): + # heads are above the target, so this is a downgrade. + # we can treat them as a "merge", single step. + assert not ancestors.intersection(filtered_heads) + todo_heads = [head.revision for head in filtered_heads] + step = migration.StampStep( + todo_heads, + dest.revision, + False, + False, + self.revision_map, + ) + steps.append(step) + continue + elif ancestors.intersection(filtered_heads): + # heads are below the target, so this is an upgrade. + # we can treat them as a "merge", single step. + todo_heads = [head.revision for head in filtered_heads] + step = migration.StampStep( + todo_heads, + dest.revision, + True, + False, + self.revision_map, + ) + steps.append(step) + continue + else: + # destination is in a branch not represented, + # treat it as new branch + step = migration.StampStep( + (), dest.revision, True, True, self.revision_map + ) + steps.append(step) + continue + + return steps + + def run_env(self) -> None: + """Run the script environment. + + This basically runs the ``env.py`` script present + in the migration environment. It is called exclusively + by the command functions in :mod:`alembic.command`. + + + """ + util.load_python_file(self.dir, "env.py") + + @property + def env_py_location(self) -> str: + return str(Path(self.dir, "env.py")) + + def _append_template(self, src: Path, dest: Path, **kw: Any) -> None: + with util.status( + f"Appending to existing {dest.absolute()}", + **self.messaging_opts, + ): + util.template_to_file( + src, + dest, + self.output_encoding, + append_with_newlines=True, + **kw, + ) + + def _generate_template(self, src: Path, dest: Path, **kw: Any) -> None: + with util.status( + f"Generating {dest.absolute()}", **self.messaging_opts + ): + util.template_to_file(src, dest, self.output_encoding, **kw) + + def _copy_file(self, src: Path, dest: Path) -> None: + with util.status( + f"Generating {dest.absolute()}", **self.messaging_opts + ): + shutil.copy(src, dest) + + def _ensure_directory(self, path: Path) -> None: + path = path.absolute() + if not path.exists(): + with util.status( + f"Creating directory {path}", **self.messaging_opts + ): + os.makedirs(path) + + def _generate_create_date(self) -> datetime.datetime: + if self.timezone is not None: + if ZoneInfo is None: + raise util.CommandError( + "Python >= 3.9 is required for timezone support or " + "the 'backports.zoneinfo' package must be installed." + ) + # First, assume correct capitalization + try: + tzinfo = ZoneInfo(self.timezone) + except ZoneInfoNotFoundError: + tzinfo = None + if tzinfo is None: + try: + tzinfo = ZoneInfo(self.timezone.upper()) + except ZoneInfoNotFoundError: + raise util.CommandError( + "Can't locate timezone: %s" % self.timezone + ) from None + + create_date = datetime.datetime.now( + tz=datetime.timezone.utc + ).astimezone(tzinfo) + else: + create_date = datetime.datetime.now() + return create_date + + def generate_revision( + self, + revid: str, + message: Optional[str], + head: Optional[_RevIdType] = None, + splice: Optional[bool] = False, + branch_labels: Optional[_RevIdType] = None, + version_path: Union[str, os.PathLike[str], None] = None, + file_template: Optional[str] = None, + depends_on: Optional[_RevIdType] = None, + **kw: Any, + ) -> Optional[Script]: + """Generate a new revision file. + + This runs the ``script.py.mako`` template, given + template arguments, and creates a new file. + + :param revid: String revision id. Typically this + comes from ``alembic.util.rev_id()``. + :param message: the revision message, the one passed + by the -m argument to the ``revision`` command. + :param head: the head revision to generate against. Defaults + to the current "head" if no branches are present, else raises + an exception. + :param splice: if True, allow the "head" version to not be an + actual head; otherwise, the selected head must be a head + (e.g. endpoint) revision. + + """ + if head is None: + head = "head" + + try: + Script.verify_rev_id(revid) + except revision.RevisionError as err: + raise util.CommandError(err.args[0]) from err + + with self._catch_revision_errors( + multiple_heads=( + "Multiple heads are present; please specify the head " + "revision on which the new revision should be based, " + "or perform a merge." + ) + ): + heads = cast( + Tuple[Optional["Revision"], ...], + self.revision_map.get_revisions(head), + ) + for h in heads: + assert h != "base" # type: ignore[comparison-overlap] + + if len(set(heads)) != len(heads): + raise util.CommandError("Duplicate head revisions specified") + + create_date = self._generate_create_date() + + if version_path is None: + if len(self._version_locations) > 1: + for head_ in heads: + if head_ is not None: + assert isinstance(head_, Script) + version_path = head_._script_path.parent + break + else: + raise util.CommandError( + "Multiple version locations present, " + "please specify --version-path" + ) + else: + version_path = self._singular_version_location + else: + version_path = Path(version_path) + + assert isinstance(version_path, Path) + norm_path = version_path.absolute() + for vers_path in self._version_locations: + if vers_path.absolute() == norm_path: + break + else: + raise util.CommandError( + f"Path {version_path} is not represented in current " + "version locations" + ) + + if self.version_locations: + self._ensure_directory(version_path) + + path = self._rev_path(version_path, revid, message, create_date) + + if not splice: + for head_ in heads: + if head_ is not None and not head_.is_head: + raise util.CommandError( + "Revision %s is not a head revision; please specify " + "--splice to create a new branch from this revision" + % head_.revision + ) + + resolved_depends_on: Optional[List[str]] + if depends_on: + with self._catch_revision_errors(): + resolved_depends_on = [ + ( + dep + if dep in rev.branch_labels # maintain branch labels + else rev.revision + ) # resolve partial revision identifiers + for rev, dep in [ + (not_none(self.revision_map.get_revision(dep)), dep) + for dep in util.to_list(depends_on) + ] + ] + else: + resolved_depends_on = None + + self._generate_template( + Path(self.dir, "script.py.mako"), + path, + up_revision=str(revid), + down_revision=revision.tuple_rev_as_scalar( + tuple(h.revision if h is not None else None for h in heads) + ), + branch_labels=util.to_tuple(branch_labels), + depends_on=revision.tuple_rev_as_scalar(resolved_depends_on), + create_date=create_date, + comma=util.format_as_comma, + message=message if message is not None else ("empty message"), + **kw, + ) + + post_write_hooks = self.hooks + if post_write_hooks: + write_hooks._run_hooks(path, post_write_hooks) + + try: + script = Script._from_path(self, path) + except revision.RevisionError as err: + raise util.CommandError(err.args[0]) from err + if script is None: + return None + if branch_labels and not script.branch_labels: + raise util.CommandError( + "Version %s specified branch_labels %s, however the " + "migration file %s does not have them; have you upgraded " + "your script.py.mako to include the " + "'branch_labels' section?" + % (script.revision, branch_labels, script.path) + ) + self.revision_map.add_revision(script) + return script + + def _rev_path( + self, + path: Union[str, os.PathLike[str]], + rev_id: str, + message: Optional[str], + create_date: datetime.datetime, + ) -> Path: + epoch = int(create_date.timestamp()) + slug = "_".join(_slug_re.findall(message or "")).lower() + if len(slug) > self.truncate_slug_length: + slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_" + filename = "%s.py" % ( + self.file_template + % { + "rev": rev_id, + "slug": slug, + "epoch": epoch, + "year": create_date.year, + "month": create_date.month, + "day": create_date.day, + "hour": create_date.hour, + "minute": create_date.minute, + "second": create_date.second, + } + ) + return Path(path) / filename + + +class Script(revision.Revision): + """Represent a single revision file in a ``versions/`` directory. + + The :class:`.Script` instance is returned by methods + such as :meth:`.ScriptDirectory.iterate_revisions`. + + """ + + def __init__( + self, + module: ModuleType, + rev_id: str, + path: Union[str, os.PathLike[str]], + ): + self.module = module + self.path = _preserving_path_as_str(path) + super().__init__( + rev_id, + module.down_revision, + branch_labels=util.to_tuple( + getattr(module, "branch_labels", None), default=() + ), + dependencies=util.to_tuple( + getattr(module, "depends_on", None), default=() + ), + ) + + module: ModuleType + """The Python module representing the actual script itself.""" + + path: str + """Filesystem path of the script.""" + + @property + def _script_path(self) -> Path: + return Path(self.path) + + _db_current_indicator: Optional[bool] = None + """Utility variable which when set will cause string output to indicate + this is a "current" version in some database""" + + @property + def doc(self) -> str: + """Return the docstring given in the script.""" + + return re.split("\n\n", self.longdoc)[0] + + @property + def longdoc(self) -> str: + """Return the docstring given in the script.""" + + doc = self.module.__doc__ + if doc: + if hasattr(self.module, "_alembic_source_encoding"): + doc = doc.decode( # type: ignore[attr-defined] + self.module._alembic_source_encoding + ) + return doc.strip() + else: + return "" + + @property + def log_entry(self) -> str: + entry = "Rev: %s%s%s%s%s\n" % ( + self.revision, + " (head)" if self.is_head else "", + " (branchpoint)" if self.is_branch_point else "", + " (mergepoint)" if self.is_merge_point else "", + " (current)" if self._db_current_indicator else "", + ) + if self.is_merge_point: + entry += "Merges: %s\n" % (self._format_down_revision(),) + else: + entry += "Parent: %s\n" % (self._format_down_revision(),) + + if self.dependencies: + entry += "Also depends on: %s\n" % ( + util.format_as_comma(self.dependencies) + ) + + if self.is_branch_point: + entry += "Branches into: %s\n" % ( + util.format_as_comma(self.nextrev) + ) + + if self.branch_labels: + entry += "Branch names: %s\n" % ( + util.format_as_comma(self.branch_labels), + ) + + entry += "Path: %s\n" % (self.path,) + + entry += "\n%s\n" % ( + "\n".join(" %s" % para for para in self.longdoc.splitlines()) + ) + return entry + + def __str__(self) -> str: + return "%s -> %s%s%s%s, %s" % ( + self._format_down_revision(), + self.revision, + " (head)" if self.is_head else "", + " (branchpoint)" if self.is_branch_point else "", + " (mergepoint)" if self.is_merge_point else "", + self.doc, + ) + + def _head_only( + self, + include_branches: bool = False, + include_doc: bool = False, + include_parents: bool = False, + tree_indicators: bool = True, + head_indicators: bool = True, + ) -> str: + text = self.revision + if include_parents: + if self.dependencies: + text = "%s (%s) -> %s" % ( + self._format_down_revision(), + util.format_as_comma(self.dependencies), + text, + ) + else: + text = "%s -> %s" % (self._format_down_revision(), text) + assert text is not None + if include_branches and self.branch_labels: + text += " (%s)" % util.format_as_comma(self.branch_labels) + if head_indicators or tree_indicators: + text += "%s%s%s" % ( + " (head)" if self._is_real_head else "", + ( + " (effective head)" + if self.is_head and not self._is_real_head + else "" + ), + " (current)" if self._db_current_indicator else "", + ) + if tree_indicators: + text += "%s%s" % ( + " (branchpoint)" if self.is_branch_point else "", + " (mergepoint)" if self.is_merge_point else "", + ) + if include_doc: + text += ", %s" % self.doc + return text + + def cmd_format( + self, + verbose: bool, + include_branches: bool = False, + include_doc: bool = False, + include_parents: bool = False, + tree_indicators: bool = True, + ) -> str: + if verbose: + return self.log_entry + else: + return self._head_only( + include_branches, include_doc, include_parents, tree_indicators + ) + + def _format_down_revision(self) -> str: + if not self.down_revision: + return "" + else: + return util.format_as_comma(self._versioned_down_revisions) + + @classmethod + def _list_py_dir( + cls, scriptdir: ScriptDirectory, path: Path + ) -> List[Path]: + paths = [] + for root, dirs, files in compat.path_walk(path, top_down=True): + if root.name.endswith("__pycache__"): + # a special case - we may include these files + # if a `sourceless` option is specified + continue + + for filename in sorted(files): + paths.append(root / filename) + + if scriptdir.sourceless: + # look for __pycache__ + py_cache_path = root / "__pycache__" + if py_cache_path.exists(): + # add all files from __pycache__ whose filename is not + # already in the names we got from the version directory. + # add as relative paths including __pycache__ token + names = { + Path(filename).name.split(".")[0] for filename in files + } + paths.extend( + py_cache_path / pyc + for pyc in py_cache_path.iterdir() + if pyc.name.split(".")[0] not in names + ) + + if not scriptdir.recursive_version_locations: + break + + # the real script order is defined by revision, + # but it may be undefined if there are many files with a same + # `down_revision`, for a better user experience (ex. debugging), + # we use a deterministic order + dirs.sort() + + return paths + + @classmethod + def _from_path( + cls, scriptdir: ScriptDirectory, path: Union[str, os.PathLike[str]] + ) -> Optional[Script]: + + path = Path(path) + dir_, filename = path.parent, path.name + + if scriptdir.sourceless: + py_match = _sourceless_rev_file.match(filename) + else: + py_match = _only_source_rev_file.match(filename) + + if not py_match: + return None + + py_filename = py_match.group(1) + + if scriptdir.sourceless: + is_c = py_match.group(2) == "c" + is_o = py_match.group(2) == "o" + else: + is_c = is_o = False + + if is_o or is_c: + py_exists = (dir_ / py_filename).exists() + pyc_exists = (dir_ / (py_filename + "c")).exists() + + # prefer .py over .pyc because we'd like to get the + # source encoding; prefer .pyc over .pyo because we'd like to + # have the docstrings which a -OO file would not have + if py_exists or is_o and pyc_exists: + return None + + module = util.load_python_file(dir_, filename) + + if not hasattr(module, "revision"): + # attempt to get the revision id from the script name, + # this for legacy only + m = _legacy_rev.match(filename) + if not m: + raise util.CommandError( + "Could not determine revision id from " + f"filename {filename}. " + "Be sure the 'revision' variable is " + "declared inside the script (please see 'Upgrading " + "from Alembic 0.1 to 0.2' in the documentation)." + ) + else: + revision = m.group(1) + else: + revision = module.revision + return Script(module, revision, dir_ / filename) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/revision.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/revision.py new file mode 100644 index 0000000000000000000000000000000000000000..587e90497ce0e4313452441b3611957453eb6349 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/revision.py @@ -0,0 +1,1728 @@ +from __future__ import annotations + +import collections +import re +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Deque +from typing import Dict +from typing import FrozenSet +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Protocol +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy import util as sqlautil + +from .. import util +from ..util import not_none + +if TYPE_CHECKING: + from typing import Literal + +_RevIdType = Union[str, List[str], Tuple[str, ...]] +_GetRevArg = Union[ + str, + Iterable[Optional[str]], + Iterable[str], +] +_RevisionIdentifierType = Union[str, Tuple[str, ...], None] +_RevisionOrStr = Union["Revision", str] +_RevisionOrBase = Union["Revision", "Literal['base']"] +_InterimRevisionMapType = Dict[str, "Revision"] +_RevisionMapType = Dict[Union[None, str, Tuple[()]], Optional["Revision"]] +_T = TypeVar("_T") +_TR = TypeVar("_TR", bound=Optional[_RevisionOrStr]) + +_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)") +_revision_illegal_chars = ["@", "-", "+"] + + +class _CollectRevisionsProtocol(Protocol): + def __call__( + self, + upper: _RevisionIdentifierType, + lower: _RevisionIdentifierType, + inclusive: bool, + implicit_base: bool, + assert_relative_length: bool, + ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]: ... + + +class RevisionError(Exception): + pass + + +class RangeNotAncestorError(RevisionError): + def __init__( + self, lower: _RevisionIdentifierType, upper: _RevisionIdentifierType + ) -> None: + self.lower = lower + self.upper = upper + super().__init__( + "Revision %s is not an ancestor of revision %s" + % (lower or "base", upper or "base") + ) + + +class MultipleHeads(RevisionError): + def __init__(self, heads: Sequence[str], argument: Optional[str]) -> None: + self.heads = heads + self.argument = argument + super().__init__( + "Multiple heads are present for given argument '%s'; " + "%s" % (argument, ", ".join(heads)) + ) + + +class ResolutionError(RevisionError): + def __init__(self, message: str, argument: str) -> None: + super().__init__(message) + self.argument = argument + + +class CycleDetected(RevisionError): + kind = "Cycle" + + def __init__(self, revisions: Sequence[str]) -> None: + self.revisions = revisions + super().__init__( + "%s is detected in revisions (%s)" + % (self.kind, ", ".join(revisions)) + ) + + +class DependencyCycleDetected(CycleDetected): + kind = "Dependency cycle" + + def __init__(self, revisions: Sequence[str]) -> None: + super().__init__(revisions) + + +class LoopDetected(CycleDetected): + kind = "Self-loop" + + def __init__(self, revision: str) -> None: + super().__init__([revision]) + + +class DependencyLoopDetected(DependencyCycleDetected, LoopDetected): + kind = "Dependency self-loop" + + def __init__(self, revision: Sequence[str]) -> None: + super().__init__(revision) + + +class RevisionMap: + """Maintains a map of :class:`.Revision` objects. + + :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain + and traverse the collection of :class:`.Script` objects, which are + themselves instances of :class:`.Revision`. + + """ + + def __init__(self, generator: Callable[[], Iterable[Revision]]) -> None: + """Construct a new :class:`.RevisionMap`. + + :param generator: a zero-arg callable that will generate an iterable + of :class:`.Revision` instances to be used. These are typically + :class:`.Script` subclasses within regular Alembic use. + + """ + self._generator = generator + + @util.memoized_property + def heads(self) -> Tuple[str, ...]: + """All "head" revisions as strings. + + This is normally a tuple of length one, + unless unmerged branches are present. + + :return: a tuple of string revision numbers. + + """ + self._revision_map + return self.heads + + @util.memoized_property + def bases(self) -> Tuple[str, ...]: + """All "base" revisions as strings. + + These are revisions that have a ``down_revision`` of None, + or empty tuple. + + :return: a tuple of string revision numbers. + + """ + self._revision_map + return self.bases + + @util.memoized_property + def _real_heads(self) -> Tuple[str, ...]: + """All "real" head revisions as strings. + + :return: a tuple of string revision numbers. + + """ + self._revision_map + return self._real_heads + + @util.memoized_property + def _real_bases(self) -> Tuple[str, ...]: + """All "real" base revisions as strings. + + :return: a tuple of string revision numbers. + + """ + self._revision_map + return self._real_bases + + @util.memoized_property + def _revision_map(self) -> _RevisionMapType: + """memoized attribute, initializes the revision map from the + initial collection. + + """ + # Ordering required for some tests to pass (but not required in + # general) + map_: _InterimRevisionMapType = sqlautil.OrderedDict() + + heads: Set[Revision] = sqlautil.OrderedSet() + _real_heads: Set[Revision] = sqlautil.OrderedSet() + bases: Tuple[Revision, ...] = () + _real_bases: Tuple[Revision, ...] = () + + has_branch_labels = set() + all_revisions = set() + + for revision in self._generator(): + all_revisions.add(revision) + + if revision.revision in map_: + util.warn( + "Revision %s is present more than once" % revision.revision + ) + map_[revision.revision] = revision + if revision.branch_labels: + has_branch_labels.add(revision) + + heads.add(revision) + _real_heads.add(revision) + if revision.is_base: + bases += (revision,) + if revision._is_real_base: + _real_bases += (revision,) + + # add the branch_labels to the map_. We'll need these + # to resolve the dependencies. + rev_map = map_.copy() + self._map_branch_labels( + has_branch_labels, cast(_RevisionMapType, map_) + ) + + # resolve dependency names from branch labels and symbolic + # names + self._add_depends_on(all_revisions, cast(_RevisionMapType, map_)) + + for rev in map_.values(): + for downrev in rev._all_down_revisions: + if downrev not in map_: + util.warn( + "Revision %s referenced from %s is not present" + % (downrev, rev) + ) + down_revision = map_[downrev] + down_revision.add_nextrev(rev) + if downrev in rev._versioned_down_revisions: + heads.discard(down_revision) + _real_heads.discard(down_revision) + + # once the map has downrevisions populated, the dependencies + # can be further refined to include only those which are not + # already ancestors + self._normalize_depends_on(all_revisions, cast(_RevisionMapType, map_)) + self._detect_cycles(rev_map, heads, bases, _real_heads, _real_bases) + + revision_map: _RevisionMapType = dict(map_.items()) + revision_map[None] = revision_map[()] = None + self.heads = tuple(rev.revision for rev in heads) + self._real_heads = tuple(rev.revision for rev in _real_heads) + self.bases = tuple(rev.revision for rev in bases) + self._real_bases = tuple(rev.revision for rev in _real_bases) + + self._add_branches(has_branch_labels, revision_map) + return revision_map + + def _detect_cycles( + self, + rev_map: _InterimRevisionMapType, + heads: Set[Revision], + bases: Tuple[Revision, ...], + _real_heads: Set[Revision], + _real_bases: Tuple[Revision, ...], + ) -> None: + if not rev_map: + return + if not heads or not bases: + raise CycleDetected(list(rev_map)) + total_space = { + rev.revision + for rev in self._iterate_related_revisions( + lambda r: r._versioned_down_revisions, + heads, + map_=cast(_RevisionMapType, rev_map), + ) + }.intersection( + rev.revision + for rev in self._iterate_related_revisions( + lambda r: r.nextrev, + bases, + map_=cast(_RevisionMapType, rev_map), + ) + ) + deleted_revs = set(rev_map.keys()) - total_space + if deleted_revs: + raise CycleDetected(sorted(deleted_revs)) + + if not _real_heads or not _real_bases: + raise DependencyCycleDetected(list(rev_map)) + total_space = { + rev.revision + for rev in self._iterate_related_revisions( + lambda r: r._all_down_revisions, + _real_heads, + map_=cast(_RevisionMapType, rev_map), + ) + }.intersection( + rev.revision + for rev in self._iterate_related_revisions( + lambda r: r._all_nextrev, + _real_bases, + map_=cast(_RevisionMapType, rev_map), + ) + ) + deleted_revs = set(rev_map.keys()) - total_space + if deleted_revs: + raise DependencyCycleDetected(sorted(deleted_revs)) + + def _map_branch_labels( + self, revisions: Collection[Revision], map_: _RevisionMapType + ) -> None: + for revision in revisions: + if revision.branch_labels: + assert revision._orig_branch_labels is not None + for branch_label in revision._orig_branch_labels: + if branch_label in map_: + map_rev = map_[branch_label] + assert map_rev is not None + raise RevisionError( + "Branch name '%s' in revision %s already " + "used by revision %s" + % ( + branch_label, + revision.revision, + map_rev.revision, + ) + ) + map_[branch_label] = revision + + def _add_branches( + self, revisions: Collection[Revision], map_: _RevisionMapType + ) -> None: + for revision in revisions: + if revision.branch_labels: + revision.branch_labels.update(revision.branch_labels) + for node in self._get_descendant_nodes( + [revision], map_, include_dependencies=False + ): + node.branch_labels.update(revision.branch_labels) + + parent = node + while ( + parent + and not parent._is_real_branch_point + and not parent.is_merge_point + ): + parent.branch_labels.update(revision.branch_labels) + if parent.down_revision: + parent = map_[parent.down_revision] + else: + break + + def _add_depends_on( + self, revisions: Collection[Revision], map_: _RevisionMapType + ) -> None: + """Resolve the 'dependencies' for each revision in a collection + in terms of actual revision ids, as opposed to branch labels or other + symbolic names. + + The collection is then assigned to the _resolved_dependencies + attribute on each revision object. + + """ + + for revision in revisions: + if revision.dependencies: + deps = [ + map_[dep] for dep in util.to_tuple(revision.dependencies) + ] + revision._resolved_dependencies = tuple( + [d.revision for d in deps if d is not None] + ) + else: + revision._resolved_dependencies = () + + def _normalize_depends_on( + self, revisions: Collection[Revision], map_: _RevisionMapType + ) -> None: + """Create a collection of "dependencies" that omits dependencies + that are already ancestor nodes for each revision in a given + collection. + + This builds upon the _resolved_dependencies collection created in the + _add_depends_on() method, looking in the fully populated revision map + for ancestors, and omitting them as the _resolved_dependencies + collection as it is copied to a new collection. The new collection is + then assigned to the _normalized_resolved_dependencies attribute on + each revision object. + + The collection is then used to determine the immediate "down revision" + identifiers for this revision. + + """ + + for revision in revisions: + if revision._resolved_dependencies: + normalized_resolved = set(revision._resolved_dependencies) + for rev in self._get_ancestor_nodes( + [revision], + include_dependencies=False, + map_=map_, + ): + if rev is revision: + continue + elif rev._resolved_dependencies: + normalized_resolved.difference_update( + rev._resolved_dependencies + ) + + revision._normalized_resolved_dependencies = tuple( + normalized_resolved + ) + else: + revision._normalized_resolved_dependencies = () + + def add_revision(self, revision: Revision, _replace: bool = False) -> None: + """add a single revision to an existing map. + + This method is for single-revision use cases, it's not + appropriate for fully populating an entire revision map. + + """ + map_ = self._revision_map + if not _replace and revision.revision in map_: + util.warn( + "Revision %s is present more than once" % revision.revision + ) + elif _replace and revision.revision not in map_: + raise Exception("revision %s not in map" % revision.revision) + + map_[revision.revision] = revision + + revisions = [revision] + self._add_branches(revisions, map_) + self._map_branch_labels(revisions, map_) + self._add_depends_on(revisions, map_) + + if revision.is_base: + self.bases += (revision.revision,) + if revision._is_real_base: + self._real_bases += (revision.revision,) + + for downrev in revision._all_down_revisions: + if downrev not in map_: + util.warn( + "Revision %s referenced from %s is not present" + % (downrev, revision) + ) + not_none(map_[downrev]).add_nextrev(revision) + + self._normalize_depends_on(revisions, map_) + + if revision._is_real_head: + self._real_heads = tuple( + head + for head in self._real_heads + if head + not in set(revision._all_down_revisions).union( + [revision.revision] + ) + ) + (revision.revision,) + if revision.is_head: + self.heads = tuple( + head + for head in self.heads + if head + not in set(revision._versioned_down_revisions).union( + [revision.revision] + ) + ) + (revision.revision,) + + def get_current_head( + self, branch_label: Optional[str] = None + ) -> Optional[str]: + """Return the current head revision. + + If the script directory has multiple heads + due to branching, an error is raised; + :meth:`.ScriptDirectory.get_heads` should be + preferred. + + :param branch_label: optional branch name which will limit the + heads considered to those which include that branch_label. + + :return: a string revision number. + + .. seealso:: + + :meth:`.ScriptDirectory.get_heads` + + """ + current_heads: Sequence[str] = self.heads + if branch_label: + current_heads = self.filter_for_lineage( + current_heads, branch_label + ) + if len(current_heads) > 1: + raise MultipleHeads( + current_heads, + "%s@head" % branch_label if branch_label else "head", + ) + + if current_heads: + return current_heads[0] + else: + return None + + def _get_base_revisions(self, identifier: str) -> Tuple[str, ...]: + return self.filter_for_lineage(self.bases, identifier) + + def get_revisions( + self, id_: Optional[_GetRevArg] + ) -> Tuple[Optional[_RevisionOrBase], ...]: + """Return the :class:`.Revision` instances with the given rev id + or identifiers. + + May be given a single identifier, a sequence of identifiers, or the + special symbols "head" or "base". The result is a tuple of one + or more identifiers, or an empty tuple in the case of "base". + + In the cases where 'head', 'heads' is requested and the + revision map is empty, returns an empty tuple. + + Supports partial identifiers, where the given identifier + is matched against all identifiers that start with the given + characters; if there is exactly one match, that determines the + full revision. + + """ + + if isinstance(id_, (list, tuple, set, frozenset)): + return sum([self.get_revisions(id_elem) for id_elem in id_], ()) + else: + resolved_id, branch_label = self._resolve_revision_number(id_) + if len(resolved_id) == 1: + try: + rint = int(resolved_id[0]) + if rint < 0: + # branch@-n -> walk down from heads + select_heads = self.get_revisions("heads") + if branch_label is not None: + select_heads = tuple( + head + for head in select_heads + if branch_label + in is_revision(head).branch_labels + ) + return tuple( + self._walk(head, steps=rint) + for head in select_heads + ) + except ValueError: + # couldn't resolve as integer + pass + return tuple( + self._revision_for_ident(rev_id, branch_label) + for rev_id in resolved_id + ) + + def get_revision(self, id_: Optional[str]) -> Optional[Revision]: + """Return the :class:`.Revision` instance with the given rev id. + + If a symbolic name such as "head" or "base" is given, resolves + the identifier into the current head or base revision. If the symbolic + name refers to multiples, :class:`.MultipleHeads` is raised. + + Supports partial identifiers, where the given identifier + is matched against all identifiers that start with the given + characters; if there is exactly one match, that determines the + full revision. + + """ + + resolved_id, branch_label = self._resolve_revision_number(id_) + if len(resolved_id) > 1: + raise MultipleHeads(resolved_id, id_) + + resolved: Union[str, Tuple[()]] = resolved_id[0] if resolved_id else () + return self._revision_for_ident(resolved, branch_label) + + def _resolve_branch(self, branch_label: str) -> Optional[Revision]: + try: + branch_rev = self._revision_map[branch_label] + except KeyError: + try: + nonbranch_rev = self._revision_for_ident(branch_label) + except ResolutionError as re: + raise ResolutionError( + "No such branch: '%s'" % branch_label, branch_label + ) from re + + else: + return nonbranch_rev + else: + return branch_rev + + def _revision_for_ident( + self, + resolved_id: Union[str, Tuple[()], None], + check_branch: Optional[str] = None, + ) -> Optional[Revision]: + branch_rev: Optional[Revision] + if check_branch: + branch_rev = self._resolve_branch(check_branch) + else: + branch_rev = None + + revision: Union[Optional[Revision], Literal[False]] + try: + revision = self._revision_map[resolved_id] + except KeyError: + # break out to avoid misleading py3k stack traces + revision = False + revs: Sequence[str] + if revision is False: + assert resolved_id + # do a partial lookup + revs = [ + x + for x in self._revision_map + if x and len(x) > 3 and x.startswith(resolved_id) + ] + + if branch_rev: + revs = self.filter_for_lineage(revs, check_branch) + if not revs: + raise ResolutionError( + "No such revision or branch '%s'%s" + % ( + resolved_id, + ( + "; please ensure at least four characters are " + "present for partial revision identifier matches" + if len(resolved_id) < 4 + else "" + ), + ), + resolved_id, + ) + elif len(revs) > 1: + raise ResolutionError( + "Multiple revisions start " + "with '%s': %s..." + % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])), + resolved_id, + ) + else: + revision = self._revision_map[revs[0]] + + if check_branch and revision is not None: + assert branch_rev is not None + assert resolved_id + if not self._shares_lineage( + revision.revision, branch_rev.revision + ): + raise ResolutionError( + "Revision %s is not a member of branch '%s'" + % (revision.revision, check_branch), + resolved_id, + ) + return revision + + def _filter_into_branch_heads( + self, targets: Iterable[Optional[_RevisionOrBase]] + ) -> Set[Optional[_RevisionOrBase]]: + targets = set(targets) + + for rev in list(targets): + assert rev + if targets.intersection( + self._get_descendant_nodes([rev], include_dependencies=False) + ).difference([rev]): + targets.discard(rev) + return targets + + def filter_for_lineage( + self, + targets: Iterable[_TR], + check_against: Optional[str], + include_dependencies: bool = False, + ) -> Tuple[_TR, ...]: + id_, branch_label = self._resolve_revision_number(check_against) + + shares = [] + if branch_label: + shares.append(branch_label) + if id_: + shares.extend(id_) + + return tuple( + tg + for tg in targets + if self._shares_lineage( + tg, shares, include_dependencies=include_dependencies + ) + ) + + def _shares_lineage( + self, + target: Optional[_RevisionOrStr], + test_against_revs: Sequence[_RevisionOrStr], + include_dependencies: bool = False, + ) -> bool: + if not test_against_revs: + return True + if not isinstance(target, Revision): + resolved_target = not_none(self._revision_for_ident(target)) + else: + resolved_target = target + + resolved_test_against_revs = [ + ( + self._revision_for_ident(test_against_rev) + if not isinstance(test_against_rev, Revision) + else test_against_rev + ) + for test_against_rev in util.to_tuple( + test_against_revs, default=() + ) + ] + + return bool( + set( + self._get_descendant_nodes( + [resolved_target], + include_dependencies=include_dependencies, + ) + ) + .union( + self._get_ancestor_nodes( + [resolved_target], + include_dependencies=include_dependencies, + ) + ) + .intersection(resolved_test_against_revs) + ) + + def _resolve_revision_number( + self, id_: Optional[_GetRevArg] + ) -> Tuple[Tuple[str, ...], Optional[str]]: + branch_label: Optional[str] + if isinstance(id_, str) and "@" in id_: + branch_label, id_ = id_.split("@", 1) + + elif id_ is not None and ( + (isinstance(id_, tuple) and id_ and not isinstance(id_[0], str)) + or not isinstance(id_, (str, tuple)) + ): + raise RevisionError( + "revision identifier %r is not a string; ensure database " + "driver settings are correct" % (id_,) + ) + + else: + branch_label = None + + # ensure map is loaded + self._revision_map + if id_ == "heads": + if branch_label: + return ( + self.filter_for_lineage(self.heads, branch_label), + branch_label, + ) + else: + return self._real_heads, branch_label + elif id_ == "head": + current_head = self.get_current_head(branch_label) + if current_head: + return (current_head,), branch_label + else: + return (), branch_label + elif id_ == "base" or id_ is None: + return (), branch_label + else: + return util.to_tuple(id_, default=None), branch_label + + def iterate_revisions( + self, + upper: _RevisionIdentifierType, + lower: _RevisionIdentifierType, + implicit_base: bool = False, + inclusive: bool = False, + assert_relative_length: bool = True, + select_for_downgrade: bool = False, + ) -> Iterator[Revision]: + """Iterate through script revisions, starting at the given + upper revision identifier and ending at the lower. + + The traversal uses strictly the `down_revision` + marker inside each migration script, so + it is a requirement that upper >= lower, + else you'll get nothing back. + + The iterator yields :class:`.Revision` objects. + + """ + fn: _CollectRevisionsProtocol + if select_for_downgrade: + fn = self._collect_downgrade_revisions + else: + fn = self._collect_upgrade_revisions + + revisions, heads = fn( + upper, + lower, + inclusive=inclusive, + implicit_base=implicit_base, + assert_relative_length=assert_relative_length, + ) + + for node in self._topological_sort(revisions, heads): + yield not_none(self.get_revision(node)) + + def _get_descendant_nodes( + self, + targets: Collection[Optional[_RevisionOrBase]], + map_: Optional[_RevisionMapType] = None, + check: bool = False, + omit_immediate_dependencies: bool = False, + include_dependencies: bool = True, + ) -> Iterator[Any]: + if omit_immediate_dependencies: + + def fn(rev: Revision) -> Iterable[str]: + if rev not in targets: + return rev._all_nextrev + else: + return rev.nextrev + + elif include_dependencies: + + def fn(rev: Revision) -> Iterable[str]: + return rev._all_nextrev + + else: + + def fn(rev: Revision) -> Iterable[str]: + return rev.nextrev + + return self._iterate_related_revisions( + fn, targets, map_=map_, check=check + ) + + def _get_ancestor_nodes( + self, + targets: Collection[Optional[_RevisionOrBase]], + map_: Optional[_RevisionMapType] = None, + check: bool = False, + include_dependencies: bool = True, + ) -> Iterator[Revision]: + if include_dependencies: + + def fn(rev: Revision) -> Iterable[str]: + return rev._normalized_down_revisions + + else: + + def fn(rev: Revision) -> Iterable[str]: + return rev._versioned_down_revisions + + return self._iterate_related_revisions( + fn, targets, map_=map_, check=check + ) + + def _iterate_related_revisions( + self, + fn: Callable[[Revision], Iterable[str]], + targets: Collection[Optional[_RevisionOrBase]], + map_: Optional[_RevisionMapType], + check: bool = False, + ) -> Iterator[Revision]: + if map_ is None: + map_ = self._revision_map + + seen = set() + todo: Deque[Revision] = collections.deque() + for target_for in targets: + target = is_revision(target_for) + todo.append(target) + if check: + per_target = set() + + while todo: + rev = todo.pop() + if check: + per_target.add(rev) + + if rev in seen: + continue + seen.add(rev) + # Check for map errors before collecting. + for rev_id in fn(rev): + next_rev = map_[rev_id] + assert next_rev is not None + if next_rev.revision != rev_id: + raise RevisionError( + "Dependency resolution failed; broken map" + ) + todo.append(next_rev) + yield rev + if check: + overlaps = per_target.intersection(targets).difference( + [target] + ) + if overlaps: + raise RevisionError( + "Requested revision %s overlaps with " + "other requested revisions %s" + % ( + target.revision, + ", ".join(r.revision for r in overlaps), + ) + ) + + def _topological_sort( + self, + revisions: Collection[Revision], + heads: Any, + ) -> List[str]: + """Yield revision ids of a collection of Revision objects in + topological sorted order (i.e. revisions always come after their + down_revisions and dependencies). Uses the order of keys in + _revision_map to sort. + + """ + + id_to_rev = self._revision_map + + def get_ancestors(rev_id: str) -> Set[str]: + return { + r.revision + for r in self._get_ancestor_nodes([id_to_rev[rev_id]]) + } + + todo = {d.revision for d in revisions} + + # Use revision map (ordered dict) key order to pre-sort. + inserted_order = list(self._revision_map) + + current_heads = list( + sorted( + {d.revision for d in heads if d.revision in todo}, + key=inserted_order.index, + ) + ) + ancestors_by_idx = [get_ancestors(rev_id) for rev_id in current_heads] + + output = [] + + current_candidate_idx = 0 + while current_heads: + candidate = current_heads[current_candidate_idx] + + for check_head_index, ancestors in enumerate(ancestors_by_idx): + # scan all the heads. see if we can continue walking + # down the current branch indicated by current_candidate_idx. + if ( + check_head_index != current_candidate_idx + and candidate in ancestors + ): + current_candidate_idx = check_head_index + # nope, another head is dependent on us, they have + # to be traversed first + break + else: + # yup, we can emit + if candidate in todo: + output.append(candidate) + todo.remove(candidate) + + # now update the heads with our ancestors. + + candidate_rev = id_to_rev[candidate] + assert candidate_rev is not None + + heads_to_add = [ + r + for r in candidate_rev._normalized_down_revisions + if r in todo and r not in current_heads + ] + + if not heads_to_add: + # no ancestors, so remove this head from the list + del current_heads[current_candidate_idx] + del ancestors_by_idx[current_candidate_idx] + current_candidate_idx = max(current_candidate_idx - 1, 0) + else: + if ( + not candidate_rev._normalized_resolved_dependencies + and len(candidate_rev._versioned_down_revisions) == 1 + ): + current_heads[current_candidate_idx] = heads_to_add[0] + + # for plain movement down a revision line without + # any mergepoints, branchpoints, or deps, we + # can update the ancestors collection directly + # by popping out the candidate we just emitted + ancestors_by_idx[current_candidate_idx].discard( + candidate + ) + + else: + # otherwise recalculate it again, things get + # complicated otherwise. This can possibly be + # improved to not run the whole ancestor thing + # each time but it was getting complicated + current_heads[current_candidate_idx] = heads_to_add[0] + current_heads.extend(heads_to_add[1:]) + ancestors_by_idx[current_candidate_idx] = ( + get_ancestors(heads_to_add[0]) + ) + ancestors_by_idx.extend( + get_ancestors(head) for head in heads_to_add[1:] + ) + + assert not todo + return output + + def _walk( + self, + start: Optional[Union[str, Revision]], + steps: int, + branch_label: Optional[str] = None, + no_overwalk: bool = True, + ) -> Optional[_RevisionOrBase]: + """ + Walk the requested number of :steps up (steps > 0) or down (steps < 0) + the revision tree. + + :branch_label is used to select branches only when walking up. + + If the walk goes past the boundaries of the tree and :no_overwalk is + True, None is returned, otherwise the walk terminates early. + + A RevisionError is raised if there is no unambiguous revision to + walk to. + """ + initial: Optional[_RevisionOrBase] + if isinstance(start, str): + initial = self.get_revision(start) + else: + initial = start + + children: Sequence[Optional[_RevisionOrBase]] + for _ in range(abs(steps)): + if steps > 0: + assert initial != "base" # type: ignore[comparison-overlap] + # Walk up + walk_up = [ + is_revision(rev) + for rev in self.get_revisions( + self.bases if initial is None else initial.nextrev + ) + ] + if branch_label: + children = self.filter_for_lineage(walk_up, branch_label) + else: + children = walk_up + else: + # Walk down + if initial == "base": # type: ignore[comparison-overlap] + children = () + else: + children = self.get_revisions( + self.heads + if initial is None + else initial.down_revision + ) + if not children: + children = ("base",) + if not children: + # This will return an invalid result if no_overwalk, otherwise + # further steps will stay where we are. + ret = None if no_overwalk else initial + return ret + elif len(children) > 1: + raise RevisionError("Ambiguous walk") + initial = children[0] + + return initial + + def _parse_downgrade_target( + self, + current_revisions: _RevisionIdentifierType, + target: _RevisionIdentifierType, + assert_relative_length: bool, + ) -> Tuple[Optional[str], Optional[_RevisionOrBase]]: + """ + Parse downgrade command syntax :target to retrieve the target revision + and branch label (if any) given the :current_revisions stamp of the + database. + + Returns a tuple (branch_label, target_revision) where branch_label + is a string from the command specifying the branch to consider (or + None if no branch given), and target_revision is a Revision object + which the command refers to. target_revisions is None if the command + refers to 'base'. The target may be specified in absolute form, or + relative to :current_revisions. + """ + if target is None: + return None, None + assert isinstance( + target, str + ), "Expected downgrade target in string form" + match = _relative_destination.match(target) + if match: + branch_label, symbol, relative = match.groups() + rel_int = int(relative) + if rel_int >= 0: + if symbol is None: + # Downgrading to current + n is not valid. + raise RevisionError( + "Relative revision %s didn't " + "produce %d migrations" % (relative, abs(rel_int)) + ) + # Find target revision relative to given symbol. + rev = self._walk( + symbol, + rel_int, + branch_label, + no_overwalk=assert_relative_length, + ) + if rev is None: + raise RevisionError("Walked too far") + return branch_label, rev + else: + relative_revision = symbol is None + if relative_revision: + # Find target revision relative to current state. + if branch_label: + cr_tuple = util.to_tuple(current_revisions) + symbol_list: Sequence[str] + symbol_list = self.filter_for_lineage( + cr_tuple, branch_label + ) + if not symbol_list: + # check the case where there are multiple branches + # but there is currently a single heads, since all + # other branch heads are dependent of the current + # single heads. + all_current = cast( + Set[Revision], self._get_all_current(cr_tuple) + ) + sl_all_current = self.filter_for_lineage( + all_current, branch_label + ) + symbol_list = [ + r.revision if r else r # type: ignore[misc] + for r in sl_all_current + ] + + assert len(symbol_list) == 1 + symbol = symbol_list[0] + else: + current_revisions = util.to_tuple(current_revisions) + if not current_revisions: + raise RevisionError( + "Relative revision %s didn't " + "produce %d migrations" + % (relative, abs(rel_int)) + ) + # Have to check uniques here for duplicate rows test. + if len(set(current_revisions)) > 1: + util.warn( + "downgrade -1 from multiple heads is " + "ambiguous; " + "this usage will be disallowed in a future " + "release." + ) + symbol = current_revisions[0] + # Restrict iteration to just the selected branch when + # ambiguous branches are involved. + branch_label = symbol + # Walk down the tree to find downgrade target. + rev = self._walk( + start=( + self.get_revision(symbol) + if branch_label is None + else self.get_revision( + "%s@%s" % (branch_label, symbol) + ) + ), + steps=rel_int, + no_overwalk=assert_relative_length, + ) + if rev is None: + if relative_revision: + raise RevisionError( + "Relative revision %s didn't " + "produce %d migrations" % (relative, abs(rel_int)) + ) + else: + raise RevisionError("Walked too far") + return branch_label, rev + + # No relative destination given, revision specified is absolute. + branch_label, _, symbol = target.rpartition("@") + if not branch_label: + branch_label = None + return branch_label, self.get_revision(symbol) + + def _parse_upgrade_target( + self, + current_revisions: _RevisionIdentifierType, + target: _RevisionIdentifierType, + assert_relative_length: bool, + ) -> Tuple[Optional[_RevisionOrBase], ...]: + """ + Parse upgrade command syntax :target to retrieve the target revision + and given the :current_revisions stamp of the database. + + Returns a tuple of Revision objects which should be iterated/upgraded + to. The target may be specified in absolute form, or relative to + :current_revisions. + """ + if isinstance(target, str): + match = _relative_destination.match(target) + else: + match = None + + if not match: + # No relative destination, target is absolute. + return self.get_revisions(target) + + current_revisions_tup: Union[str, Tuple[Optional[str], ...], None] + current_revisions_tup = util.to_tuple(current_revisions) + + branch_label, symbol, relative_str = match.groups() + relative = int(relative_str) + if relative > 0: + if symbol is None: + if not current_revisions_tup: + current_revisions_tup = (None,) + # Try to filter to a single target (avoid ambiguous branches). + start_revs = current_revisions_tup + if branch_label: + start_revs = self.filter_for_lineage( + self.get_revisions(current_revisions_tup), # type: ignore[arg-type] # noqa: E501 + branch_label, + ) + if not start_revs: + # The requested branch is not a head, so we need to + # backtrack to find a branchpoint. + active_on_branch = self.filter_for_lineage( + self._get_ancestor_nodes( + self.get_revisions(current_revisions_tup) + ), + branch_label, + ) + # Find the tips of this set of revisions (revisions + # without children within the set). + start_revs = tuple( + {rev.revision for rev in active_on_branch} + - { + down + for rev in active_on_branch + for down in rev._normalized_down_revisions + } + ) + if not start_revs: + # We must need to go right back to base to find + # a starting point for this branch. + start_revs = (None,) + if len(start_revs) > 1: + raise RevisionError( + "Ambiguous upgrade from multiple current revisions" + ) + # Walk up from unique target revision. + rev = self._walk( + start=start_revs[0], + steps=relative, + branch_label=branch_label, + no_overwalk=assert_relative_length, + ) + if rev is None: + raise RevisionError( + "Relative revision %s didn't " + "produce %d migrations" % (relative_str, abs(relative)) + ) + return (rev,) + else: + # Walk is relative to a given revision, not the current state. + return ( + self._walk( + start=self.get_revision(symbol), + steps=relative, + branch_label=branch_label, + no_overwalk=assert_relative_length, + ), + ) + else: + if symbol is None: + # Upgrading to current - n is not valid. + raise RevisionError( + "Relative revision %s didn't " + "produce %d migrations" % (relative, abs(relative)) + ) + return ( + self._walk( + start=( + self.get_revision(symbol) + if branch_label is None + else self.get_revision( + "%s@%s" % (branch_label, symbol) + ) + ), + steps=relative, + no_overwalk=assert_relative_length, + ), + ) + + def _collect_downgrade_revisions( + self, + upper: _RevisionIdentifierType, + lower: _RevisionIdentifierType, + inclusive: bool, + implicit_base: bool, + assert_relative_length: bool, + ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]: + """ + Compute the set of current revisions specified by :upper, and the + downgrade target specified by :target. Return all dependents of target + which are currently active. + + :inclusive=True includes the target revision in the set + """ + + branch_label, target_revision = self._parse_downgrade_target( + current_revisions=upper, + target=lower, + assert_relative_length=assert_relative_length, + ) + if target_revision == "base": + target_revision = None + assert target_revision is None or isinstance(target_revision, Revision) + + roots: List[Revision] + # Find candidates to drop. + if target_revision is None: + # Downgrading back to base: find all tree roots. + roots = [ + rev + for rev in self._revision_map.values() + if rev is not None and rev.down_revision is None + ] + elif inclusive: + # inclusive implies target revision should also be dropped + roots = [target_revision] + else: + # Downgrading to fixed target: find all direct children. + roots = [ + is_revision(rev) + for rev in self.get_revisions(target_revision.nextrev) + ] + + if branch_label and len(roots) > 1: + # Need to filter roots. + ancestors = { + rev.revision + for rev in self._get_ancestor_nodes( + [self._resolve_branch(branch_label)], + include_dependencies=False, + ) + } + # Intersection gives the root revisions we are trying to + # rollback with the downgrade. + roots = [ + is_revision(rev) + for rev in self.get_revisions( + {rev.revision for rev in roots}.intersection(ancestors) + ) + ] + + # Ensure we didn't throw everything away when filtering branches. + if len(roots) == 0: + raise RevisionError( + "Not a valid downgrade target from current heads" + ) + + heads = self.get_revisions(upper) + + # Aim is to drop :branch_revision; to do so we also need to drop its + # descendents and anything dependent on it. + downgrade_revisions = set( + self._get_descendant_nodes( + roots, + include_dependencies=True, + omit_immediate_dependencies=False, + ) + ) + active_revisions = set( + self._get_ancestor_nodes(heads, include_dependencies=True) + ) + + # Emit revisions to drop in reverse topological sorted order. + downgrade_revisions.intersection_update(active_revisions) + + if implicit_base: + # Wind other branches back to base. + downgrade_revisions.update( + active_revisions.difference(self._get_ancestor_nodes(roots)) + ) + + if ( + target_revision is not None + and not downgrade_revisions + and target_revision not in heads + ): + # Empty intersection: target revs are not present. + + raise RangeNotAncestorError("Nothing to drop", upper) + + return downgrade_revisions, heads + + def _collect_upgrade_revisions( + self, + upper: _RevisionIdentifierType, + lower: _RevisionIdentifierType, + inclusive: bool, + implicit_base: bool, + assert_relative_length: bool, + ) -> Tuple[Set[Revision], Tuple[Revision, ...]]: + """ + Compute the set of required revisions specified by :upper, and the + current set of active revisions specified by :lower. Find the + difference between the two to compute the required upgrades. + + :inclusive=True includes the current/lower revisions in the set + + :implicit_base=False only returns revisions which are downstream + of the current/lower revisions. Dependencies from branches with + different bases will not be included. + """ + targets: Collection[Revision] = [ + is_revision(rev) + for rev in self._parse_upgrade_target( + current_revisions=lower, + target=upper, + assert_relative_length=assert_relative_length, + ) + ] + + # assert type(targets) is tuple, "targets should be a tuple" + + # Handled named bases (e.g. branch@... -> heads should only produce + # targets on the given branch) + if isinstance(lower, str) and "@" in lower: + branch, _, _ = lower.partition("@") + branch_rev = self.get_revision(branch) + if branch_rev is not None and branch_rev.revision == branch: + # A revision was used as a label; get its branch instead + assert len(branch_rev.branch_labels) == 1 + branch = next(iter(branch_rev.branch_labels)) + targets = { + need for need in targets if branch in need.branch_labels + } + + required_node_set = set( + self._get_ancestor_nodes( + targets, check=True, include_dependencies=True + ) + ).union(targets) + + current_revisions = self.get_revisions(lower) + if not implicit_base and any( + rev not in required_node_set + for rev in current_revisions + if rev is not None + ): + raise RangeNotAncestorError(lower, upper) + assert ( + type(current_revisions) is tuple + ), "current_revisions should be a tuple" + + # Special case where lower = a relative value (get_revisions can't + # find it) + if current_revisions and current_revisions[0] is None: + _, rev = self._parse_downgrade_target( + current_revisions=upper, + target=lower, + assert_relative_length=assert_relative_length, + ) + assert rev + if rev == "base": + current_revisions = tuple() + lower = None + else: + current_revisions = (rev,) + lower = rev.revision + + current_node_set = set( + self._get_ancestor_nodes( + current_revisions, check=True, include_dependencies=True + ) + ).union(current_revisions) + + needs = required_node_set.difference(current_node_set) + + # Include the lower revision (=current_revisions?) in the iteration + if inclusive: + needs.update(is_revision(rev) for rev in self.get_revisions(lower)) + # By default, base is implicit as we want all dependencies returned. + # Base is also implicit if lower = base + # implicit_base=False -> only return direct downstreams of + # current_revisions + if current_revisions and not implicit_base: + lower_descendents = self._get_descendant_nodes( + [is_revision(rev) for rev in current_revisions], + check=True, + include_dependencies=False, + ) + needs.intersection_update(lower_descendents) + + return needs, tuple(targets) + + def _get_all_current( + self, id_: Tuple[str, ...] + ) -> Set[Optional[_RevisionOrBase]]: + top_revs: Set[Optional[_RevisionOrBase]] + top_revs = set(self.get_revisions(id_)) + top_revs.update( + self._get_ancestor_nodes(list(top_revs), include_dependencies=True) + ) + return self._filter_into_branch_heads(top_revs) + + +class Revision: + """Base class for revisioned objects. + + The :class:`.Revision` class is the base of the more public-facing + :class:`.Script` object, which represents a migration script. + The mechanics of revision management and traversal are encapsulated + within :class:`.Revision`, while :class:`.Script` applies this logic + to Python files in a version directory. + + """ + + nextrev: FrozenSet[str] = frozenset() + """following revisions, based on down_revision only.""" + + _all_nextrev: FrozenSet[str] = frozenset() + + revision: str = None # type: ignore[assignment] + """The string revision number.""" + + down_revision: Optional[_RevIdType] = None + """The ``down_revision`` identifier(s) within the migration script. + + Note that the total set of "down" revisions is + down_revision + dependencies. + + """ + + dependencies: Optional[_RevIdType] = None + """Additional revisions which this revision is dependent on. + + From a migration standpoint, these dependencies are added to the + down_revision to form the full iteration. However, the separation + of down_revision from "dependencies" is to assist in navigating + a history that contains many branches, typically a multi-root scenario. + + """ + + branch_labels: Set[str] = None # type: ignore[assignment] + """Optional string/tuple of symbolic names to apply to this + revision's branch""" + + _resolved_dependencies: Tuple[str, ...] + _normalized_resolved_dependencies: Tuple[str, ...] + + @classmethod + def verify_rev_id(cls, revision: str) -> None: + illegal_chars = set(revision).intersection(_revision_illegal_chars) + if illegal_chars: + raise RevisionError( + "Character(s) '%s' not allowed in revision identifier '%s'" + % (", ".join(sorted(illegal_chars)), revision) + ) + + def __init__( + self, + revision: str, + down_revision: Optional[Union[str, Tuple[str, ...]]], + dependencies: Optional[Union[str, Tuple[str, ...]]] = None, + branch_labels: Optional[Union[str, Tuple[str, ...]]] = None, + ) -> None: + if down_revision and revision in util.to_tuple(down_revision): + raise LoopDetected(revision) + elif dependencies is not None and revision in util.to_tuple( + dependencies + ): + raise DependencyLoopDetected(revision) + + self.verify_rev_id(revision) + self.revision = revision + self.down_revision = tuple_rev_as_scalar(util.to_tuple(down_revision)) + self.dependencies = tuple_rev_as_scalar(util.to_tuple(dependencies)) + self._orig_branch_labels = util.to_tuple(branch_labels, default=()) + self.branch_labels = set(self._orig_branch_labels) + + def __repr__(self) -> str: + args = [repr(self.revision), repr(self.down_revision)] + if self.dependencies: + args.append("dependencies=%r" % (self.dependencies,)) + if self.branch_labels: + args.append("branch_labels=%r" % (self.branch_labels,)) + return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) + + def add_nextrev(self, revision: Revision) -> None: + self._all_nextrev = self._all_nextrev.union([revision.revision]) + if self.revision in revision._versioned_down_revisions: + self.nextrev = self.nextrev.union([revision.revision]) + + @property + def _all_down_revisions(self) -> Tuple[str, ...]: + return util.dedupe_tuple( + util.to_tuple(self.down_revision, default=()) + + self._resolved_dependencies + ) + + @property + def _normalized_down_revisions(self) -> Tuple[str, ...]: + """return immediate down revisions for a rev, omitting dependencies + that are still dependencies of ancestors. + + """ + return util.dedupe_tuple( + util.to_tuple(self.down_revision, default=()) + + self._normalized_resolved_dependencies + ) + + @property + def _versioned_down_revisions(self) -> Tuple[str, ...]: + return util.to_tuple(self.down_revision, default=()) + + @property + def is_head(self) -> bool: + """Return True if this :class:`.Revision` is a 'head' revision. + + This is determined based on whether any other :class:`.Script` + within the :class:`.ScriptDirectory` refers to this + :class:`.Script`. Multiple heads can be present. + + """ + return not bool(self.nextrev) + + @property + def _is_real_head(self) -> bool: + return not bool(self._all_nextrev) + + @property + def is_base(self) -> bool: + """Return True if this :class:`.Revision` is a 'base' revision.""" + + return self.down_revision is None + + @property + def _is_real_base(self) -> bool: + """Return True if this :class:`.Revision` is a "real" base revision, + e.g. that it has no dependencies either.""" + + # we use self.dependencies here because this is called up + # in initialization where _real_dependencies isn't set up + # yet + return self.down_revision is None and self.dependencies is None + + @property + def is_branch_point(self) -> bool: + """Return True if this :class:`.Script` is a branch point. + + A branchpoint is defined as a :class:`.Script` which is referred + to by more than one succeeding :class:`.Script`, that is more + than one :class:`.Script` has a `down_revision` identifier pointing + here. + + """ + return len(self.nextrev) > 1 + + @property + def _is_real_branch_point(self) -> bool: + """Return True if this :class:`.Script` is a 'real' branch point, + taking into account dependencies as well. + + """ + return len(self._all_nextrev) > 1 + + @property + def is_merge_point(self) -> bool: + """Return True if this :class:`.Script` is a merge point.""" + + return len(self._versioned_down_revisions) > 1 + + +@overload +def tuple_rev_as_scalar(rev: None) -> None: ... + + +@overload +def tuple_rev_as_scalar( + rev: Union[Tuple[_T, ...], List[_T]], +) -> Union[_T, Tuple[_T, ...], List[_T]]: ... + + +def tuple_rev_as_scalar( + rev: Optional[Sequence[_T]], +) -> Union[_T, Sequence[_T], None]: + if not rev: + return None + elif len(rev) == 1: + return rev[0] + else: + return rev + + +def is_revision(rev: Any) -> Revision: + assert isinstance(rev, Revision) + return rev diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/write_hooks.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/write_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..f40bb35f6a8c8878fe4fe4181f235e9fc010b36b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/script/write_hooks.py @@ -0,0 +1,176 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import importlib.util +import os +import shlex +import subprocess +import sys +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from .. import util +from ..util import compat +from ..util.pyfiles import _preserving_path_as_str + +if TYPE_CHECKING: + from ..config import PostWriteHookConfig + +REVISION_SCRIPT_TOKEN = "REVISION_SCRIPT_FILENAME" + +_registry: dict = {} + + +def register(name: str) -> Callable: + """A function decorator that will register that function as a write hook. + + See the documentation linked below for an example. + + .. seealso:: + + :ref:`post_write_hooks_custom` + + + """ + + def decorate(fn): + _registry[name] = fn + return fn + + return decorate + + +def _invoke( + name: str, + revision_path: Union[str, os.PathLike[str]], + options: PostWriteHookConfig, +) -> Any: + """Invokes the formatter registered for the given name. + + :param name: The name of a formatter in the registry + :param revision: string path to the revision file + :param options: A dict containing kwargs passed to the + specified formatter. + :raises: :class:`alembic.util.CommandError` + """ + revision_path = _preserving_path_as_str(revision_path) + try: + hook = _registry[name] + except KeyError as ke: + raise util.CommandError( + f"No formatter with name '{name}' registered" + ) from ke + else: + return hook(revision_path, options) + + +def _run_hooks( + path: Union[str, os.PathLike[str]], hooks: list[PostWriteHookConfig] +) -> None: + """Invoke hooks for a generated revision.""" + + for hook in hooks: + name = hook["_hook_name"] + try: + type_ = hook["type"] + except KeyError as ke: + raise util.CommandError( + f"Key '{name}.type' (or 'type' in toml) is required " + f"for post write hook {name!r}" + ) from ke + else: + with util.status( + f"Running post write hook {name!r}", newline=True + ): + _invoke(type_, path, hook) + + +def _parse_cmdline_options(cmdline_options_str: str, path: str) -> List[str]: + """Parse options from a string into a list. + + Also substitutes the revision script token with the actual filename of + the revision script. + + If the revision script token doesn't occur in the options string, it is + automatically prepended. + """ + if REVISION_SCRIPT_TOKEN not in cmdline_options_str: + cmdline_options_str = REVISION_SCRIPT_TOKEN + " " + cmdline_options_str + cmdline_options_list = shlex.split( + cmdline_options_str, posix=compat.is_posix + ) + cmdline_options_list = [ + option.replace(REVISION_SCRIPT_TOKEN, path) + for option in cmdline_options_list + ] + return cmdline_options_list + + +def _get_required_option(options: dict, name: str) -> str: + try: + return options[name] + except KeyError as ke: + raise util.CommandError( + f"Key {options['_hook_name']}.{name} is required for post " + f"write hook {options['_hook_name']!r}" + ) from ke + + +def _run_hook( + path: str, options: dict, ignore_output: bool, command: List[str] +) -> None: + cwd: Optional[str] = options.get("cwd", None) + cmdline_options_str = options.get("options", "") + cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path) + + kw: Dict[str, Any] = {} + if ignore_output: + kw["stdout"] = kw["stderr"] = subprocess.DEVNULL + + subprocess.run([*command, *cmdline_options_list], cwd=cwd, **kw) + + +@register("console_scripts") +def console_scripts( + path: str, options: dict, ignore_output: bool = False +) -> None: + entrypoint_name = _get_required_option(options, "entrypoint") + for entry in compat.importlib_metadata_get("console_scripts"): + if entry.name == entrypoint_name: + impl: Any = entry + break + else: + raise util.CommandError( + f"Could not find entrypoint console_scripts.{entrypoint_name}" + ) + + command = [ + sys.executable, + "-c", + f"import {impl.module}; {impl.module}.{impl.attr}()", + ] + _run_hook(path, options, ignore_output, command) + + +@register("exec") +def exec_(path: str, options: dict, ignore_output: bool = False) -> None: + executable = _get_required_option(options, "executable") + _run_hook(path, options, ignore_output, command=[executable]) + + +@register("module") +def module(path: str, options: dict, ignore_output: bool = False) -> None: + module_name = _get_required_option(options, "module") + + if importlib.util.find_spec(module_name) is None: + raise util.CommandError(f"Could not find module {module_name}") + + command = [sys.executable, "-m", module_name] + _run_hook(path, options, ignore_output, command) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/README b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/README new file mode 100644 index 0000000000000000000000000000000000000000..e0d0858f266ec27b243e8b92301fc7002e1f2745 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/README @@ -0,0 +1 @@ +Generic single-database configuration with an async dbapi. \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/alembic.ini.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/alembic.ini.mako new file mode 100644 index 0000000000000000000000000000000000000000..67acc6d05426f6b03b8c650ff79c70ab2216b1b1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/alembic.ini.mako @@ -0,0 +1,147 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = ${script_location} + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. for multiple paths, the path separator +# is defined by "path_separator" below. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "path_separator" +# below. +# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions + +# path_separator; This indicates what character is used to split lists of file +# paths, including version_locations and prepend_sys_path within configparser +# files such as alembic.ini. +# The default rendered in new alembic.ini files is "os", which uses os.pathsep +# to provide os-dependent path splitting. +# +# Note that in order to support legacy alembic.ini files, this default does NOT +# take place if path_separator is not present in alembic.ini. If this +# option is omitted entirely, fallback logic is as follows: +# +# 1. Parsing of the version_locations option falls back to using the legacy +# "version_path_separator" key, which if absent then falls back to the legacy +# behavior of splitting on spaces and/or commas. +# 2. Parsing of the prepend_sys_path option falls back to the legacy +# behavior of splitting on spaces, commas, or colons. +# +# Valid values for path_separator are: +# +# path_separator = : +# path_separator = ; +# path_separator = space +# path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +path_separator = os + + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +sqlalchemy.url = driver://user:pass@localhost/dbname + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# hooks = ruff +# ruff.type = module +# ruff.module = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Alternatively, use the exec runner to execute a binary found on your PATH +# hooks = ruff +# ruff.type = exec +# ruff.executable = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Logging configuration. This is also consumed by the user-maintained +# env.py script only. +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/env.py new file mode 100644 index 0000000000000000000000000000000000000000..9f2d51940080a1a7c954a8916dab86f00d5a4aa5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/env.py @@ -0,0 +1,89 @@ +import asyncio +from logging.config import fileConfig + +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +async def run_async_migrations() -> None: + """In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + + asyncio.run(run_async_migrations()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..11016301e749297acb67822efc7974ee53c905c6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/async/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/README b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/README new file mode 100644 index 0000000000000000000000000000000000000000..98e4f9c44effe479ed38c66ba922e7bcc672916f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/alembic.ini.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/alembic.ini.mako new file mode 100644 index 0000000000000000000000000000000000000000..bb93d0e3cf17447b5aa78da5e1d54ba4d966bae6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/alembic.ini.mako @@ -0,0 +1,147 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = ${script_location} + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. for multiple paths, the path separator +# is defined by "path_separator" below. +prepend_sys_path = . + + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "path_separator" +# below. +# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions + +# path_separator; This indicates what character is used to split lists of file +# paths, including version_locations and prepend_sys_path within configparser +# files such as alembic.ini. +# The default rendered in new alembic.ini files is "os", which uses os.pathsep +# to provide os-dependent path splitting. +# +# Note that in order to support legacy alembic.ini files, this default does NOT +# take place if path_separator is not present in alembic.ini. If this +# option is omitted entirely, fallback logic is as follows: +# +# 1. Parsing of the version_locations option falls back to using the legacy +# "version_path_separator" key, which if absent then falls back to the legacy +# behavior of splitting on spaces and/or commas. +# 2. Parsing of the prepend_sys_path option falls back to the legacy +# behavior of splitting on spaces, commas, or colons. +# +# Valid values for path_separator are: +# +# path_separator = : +# path_separator = ; +# path_separator = space +# path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +sqlalchemy.url = driver://user:pass@localhost/dbname + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# hooks = ruff +# ruff.type = module +# ruff.module = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Alternatively, use the exec runner to execute a binary found on your PATH +# hooks = ruff +# ruff.type = exec +# ruff.executable = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Logging configuration. This is also consumed by the user-maintained +# env.py script only. +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/env.py new file mode 100644 index 0000000000000000000000000000000000000000..36112a3c68590d6a8e07fea0ce70a5afb38c951a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/env.py @@ -0,0 +1,78 @@ +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..11016301e749297acb67822efc7974ee53c905c6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/generic/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/README b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/README new file mode 100644 index 0000000000000000000000000000000000000000..f046ec91427e2f4edd53dcb5409b21955c8fd0af --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/README @@ -0,0 +1,12 @@ +Rudimentary multi-database configuration. + +Multi-DB isn't vastly different from generic. The primary difference is that it +will run the migrations N times (depending on how many databases you have +configured), providing one engine name and associated context for each run. + +That engine name will then allow the migration to restrict what runs within it to +just the appropriate migrations for that engine. You can see this behavior within +the mako template. + +In the provided configuration, you'll need to have `databases` provided in +alembic's config, and an `sqlalchemy.url` provided for each engine name. diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/alembic.ini.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/alembic.ini.mako new file mode 100644 index 0000000000000000000000000000000000000000..a6629839552197a124088e8e46a011446b5165ee --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/alembic.ini.mako @@ -0,0 +1,155 @@ +# a multi-database configuration. + +[alembic] +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = ${script_location} + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. for multiple paths, the path separator +# is defined by "path_separator" below. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "path_separator" +# below. +# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions + +# path_separator; This indicates what character is used to split lists of file +# paths, including version_locations and prepend_sys_path within configparser +# files such as alembic.ini. +# The default rendered in new alembic.ini files is "os", which uses os.pathsep +# to provide os-dependent path splitting. +# +# Note that in order to support legacy alembic.ini files, this default does NOT +# take place if path_separator is not present in alembic.ini. If this +# option is omitted entirely, fallback logic is as follows: +# +# 1. Parsing of the version_locations option falls back to using the legacy +# "version_path_separator" key, which if absent then falls back to the legacy +# behavior of splitting on spaces and/or commas. +# 2. Parsing of the prepend_sys_path option falls back to the legacy +# behavior of splitting on spaces, commas, or colons. +# +# Valid values for path_separator are: +# +# path_separator = : +# path_separator = ; +# path_separator = space +# path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# for multiple database configuration, new named sections are added +# which each include a distinct ``sqlalchemy.url`` entry. A custom value +# ``databases`` is added which indicates a listing of the per-database sections. +# The ``databases`` entry as well as the URLs present in the ``[engine1]`` +# and ``[engine2]`` sections continue to be consumed by the user-maintained env.py +# script only. + +databases = engine1, engine2 + +[engine1] +sqlalchemy.url = driver://user:pass@localhost/dbname + +[engine2] +sqlalchemy.url = driver://user:pass@localhost/dbname2 + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# hooks = ruff +# ruff.type = module +# ruff.module = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Alternatively, use the exec runner to execute a binary found on your PATH +# hooks = ruff +# ruff.type = exec +# ruff.executable = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Logging configuration. This is also consumed by the user-maintained +# env.py script only. +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/env.py new file mode 100644 index 0000000000000000000000000000000000000000..e937b64eeed2fd980c214ab87505d75041b581ad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/env.py @@ -0,0 +1,140 @@ +import logging +from logging.config import fileConfig +import re + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +USE_TWOPHASE = False + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) +logger = logging.getLogger("alembic.env") + +# gather section names referring to different +# databases. These are named "engine1", "engine2" +# in the sample .ini file. +db_names = config.get_main_option("databases", "") + +# add your model's MetaData objects here +# for 'autogenerate' support. These must be set +# up to hold just those tables targeting a +# particular database. table.tometadata() may be +# helpful here in case a "copy" of +# a MetaData is needed. +# from myapp import mymodel +# target_metadata = { +# 'engine1':mymodel.metadata1, +# 'engine2':mymodel.metadata2 +# } +target_metadata = {} + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + # for the --sql use case, run migrations for each URL into + # individual files. + + engines = {} + for name in re.split(r",\s*", db_names): + engines[name] = rec = {} + rec["url"] = context.config.get_section_option(name, "sqlalchemy.url") + + for name, rec in engines.items(): + logger.info("Migrating database %s" % name) + file_ = "%s.sql" % name + logger.info("Writing output to %s" % file_) + with open(file_, "w") as buffer: + context.configure( + url=rec["url"], + output_buffer=buffer, + target_metadata=target_metadata.get(name), + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + with context.begin_transaction(): + context.run_migrations(engine_name=name) + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # for the direct-to-DB use case, start a transaction on all + # engines, then run all migrations, then commit all transactions. + + engines = {} + for name in re.split(r",\s*", db_names): + engines[name] = rec = {} + rec["engine"] = engine_from_config( + context.config.get_section(name, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + for name, rec in engines.items(): + engine = rec["engine"] + rec["connection"] = conn = engine.connect() + + if USE_TWOPHASE: + rec["transaction"] = conn.begin_twophase() + else: + rec["transaction"] = conn.begin() + + try: + for name, rec in engines.items(): + logger.info("Migrating database %s" % name) + context.configure( + connection=rec["connection"], + upgrade_token="%s_upgrades" % name, + downgrade_token="%s_downgrades" % name, + target_metadata=target_metadata.get(name), + ) + context.run_migrations(engine_name=name) + + if USE_TWOPHASE: + for rec in engines.values(): + rec["transaction"].prepare() + + for rec in engines.values(): + rec["transaction"].commit() + except: + for rec in engines.values(): + rec["transaction"].rollback() + raise + finally: + for rec in engines.values(): + rec["connection"].close() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..8e667d84c8a46494a83c38d950e57e96abffd7b7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/multidb/script.py.mako @@ -0,0 +1,51 @@ +<%! +import re + +%>"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade(engine_name: str) -> None: + """Upgrade schema.""" + globals()["upgrade_%s" % engine_name]() + + +def downgrade(engine_name: str) -> None: + """Downgrade schema.""" + globals()["downgrade_%s" % engine_name]() + +<% + db_names = config.get_main_option("databases") +%> + +## generate an "upgrade_() / downgrade_()" function +## for each database name in the ini file. + +% for db_name in re.split(r',\s*', db_names): + +def upgrade_${db_name}() -> None: + """Upgrade ${db_name} schema.""" + ${context.get("%s_upgrades" % db_name, "pass")} + + +def downgrade_${db_name}() -> None: + """Downgrade ${db_name} schema.""" + ${context.get("%s_downgrades" % db_name, "pass")} + +% endfor diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/README b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/README new file mode 100644 index 0000000000000000000000000000000000000000..fdacc05f68ff5abb9bd31a924ccaa48ba8d23de9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/README @@ -0,0 +1 @@ +pyproject configuration, based on the generic configuration. \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/alembic.ini.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/alembic.ini.mako new file mode 100644 index 0000000000000000000000000000000000000000..3d10f0e46cb36643685750db6dd5084fcbb33c32 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/alembic.ini.mako @@ -0,0 +1,44 @@ +# A generic, single database configuration. + +[alembic] + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +sqlalchemy.url = driver://user:pass@localhost/dbname + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/env.py new file mode 100644 index 0000000000000000000000000000000000000000..36112a3c68590d6a8e07fea0ce70a5afb38c951a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/env.py @@ -0,0 +1,78 @@ +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/pyproject.toml.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/pyproject.toml.mako new file mode 100644 index 0000000000000000000000000000000000000000..e68cef331352ba8cd12bf1a4c9432c526c20ecec --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/pyproject.toml.mako @@ -0,0 +1,82 @@ +[tool.alembic] + +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = "${script_location}" + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = "%%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s" + +# additional paths to be prepended to sys.path. defaults to the current working directory. +prepend_sys_path = [ + "." +] + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# version_locations = [ +# "%(here)s/alembic/versions", +# "%(here)s/foo/bar" +# ] + + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = "utf-8" + +# This section defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples +# [[tool.alembic.post_write_hooks]] +# format using "black" - use the console_scripts runner, +# against the "black" entrypoint +# name = "black" +# type = "console_scripts" +# entrypoint = "black" +# options = "-l 79 REVISION_SCRIPT_FILENAME" +# +# [[tool.alembic.post_write_hooks]] +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# name = "ruff" +# type = "module" +# module = "ruff" +# options = "check --fix REVISION_SCRIPT_FILENAME" +# +# [[tool.alembic.post_write_hooks]] +# Alternatively, use the exec runner to execute a binary found on your PATH +# name = "ruff" +# type = "exec" +# executable = "ruff" +# options = "check --fix REVISION_SCRIPT_FILENAME" + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..11016301e749297acb67822efc7974ee53c905c6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/README b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/README new file mode 100644 index 0000000000000000000000000000000000000000..dfd718d3b9ba72cdc7b5f14fc5802e2bd4fa0dfa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/README @@ -0,0 +1 @@ +pyproject configuration, with an async dbapi. \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/alembic.ini.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/alembic.ini.mako new file mode 100644 index 0000000000000000000000000000000000000000..3d10f0e46cb36643685750db6dd5084fcbb33c32 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/alembic.ini.mako @@ -0,0 +1,44 @@ +# A generic, single database configuration. + +[alembic] + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +sqlalchemy.url = driver://user:pass@localhost/dbname + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/env.py new file mode 100644 index 0000000000000000000000000000000000000000..9f2d51940080a1a7c954a8916dab86f00d5a4aa5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/env.py @@ -0,0 +1,89 @@ +import asyncio +from logging.config import fileConfig + +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +async def run_async_migrations() -> None: + """In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + + asyncio.run(run_async_migrations()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/pyproject.toml.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/pyproject.toml.mako new file mode 100644 index 0000000000000000000000000000000000000000..e68cef331352ba8cd12bf1a4c9432c526c20ecec --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/pyproject.toml.mako @@ -0,0 +1,82 @@ +[tool.alembic] + +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = "${script_location}" + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = "%%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s" + +# additional paths to be prepended to sys.path. defaults to the current working directory. +prepend_sys_path = [ + "." +] + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# version_locations = [ +# "%(here)s/alembic/versions", +# "%(here)s/foo/bar" +# ] + + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = "utf-8" + +# This section defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples +# [[tool.alembic.post_write_hooks]] +# format using "black" - use the console_scripts runner, +# against the "black" entrypoint +# name = "black" +# type = "console_scripts" +# entrypoint = "black" +# options = "-l 79 REVISION_SCRIPT_FILENAME" +# +# [[tool.alembic.post_write_hooks]] +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# name = "ruff" +# type = "module" +# module = "ruff" +# options = "check --fix REVISION_SCRIPT_FILENAME" +# +# [[tool.alembic.post_write_hooks]] +# Alternatively, use the exec runner to execute a binary found on your PATH +# name = "ruff" +# type = "exec" +# executable = "ruff" +# options = "check --fix REVISION_SCRIPT_FILENAME" + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..11016301e749297acb67822efc7974ee53c905c6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/templates/pyproject_async/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..32915081d9530a6c1239778a84f34479d21026b2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/__init__.py @@ -0,0 +1,32 @@ +from sqlalchemy.testing import config +from sqlalchemy.testing import emits_warning +from sqlalchemy.testing import engines +from sqlalchemy.testing import exclusions +from sqlalchemy.testing import mock +from sqlalchemy.testing import provide_metadata +from sqlalchemy.testing import skip_if +from sqlalchemy.testing import uses_deprecated +from sqlalchemy.testing.config import combinations +from sqlalchemy.testing.config import fixture +from sqlalchemy.testing.config import requirements as requires +from sqlalchemy.testing.config import Variation +from sqlalchemy.testing.config import variation + +from .assertions import assert_raises +from .assertions import assert_raises_message +from .assertions import emits_python_deprecation_warning +from .assertions import eq_ +from .assertions import eq_ignore_whitespace +from .assertions import expect_deprecated +from .assertions import expect_raises +from .assertions import expect_raises_message +from .assertions import expect_sqlalchemy_deprecated +from .assertions import expect_sqlalchemy_deprecated_20 +from .assertions import expect_warnings +from .assertions import is_ +from .assertions import is_false +from .assertions import is_not_ +from .assertions import is_true +from .assertions import ne_ +from .fixtures import TestBase +from .util import resolve_lambda diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/assertions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/assertions.py new file mode 100644 index 0000000000000000000000000000000000000000..898fbd1677de6a161a8b993703c3cb42804a58ea --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/assertions.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import contextlib +import re +import sys +from typing import Any +from typing import Dict + +from sqlalchemy import exc as sa_exc +from sqlalchemy.engine import default +from sqlalchemy.engine import URL +from sqlalchemy.testing.assertions import _expect_warnings +from sqlalchemy.testing.assertions import eq_ # noqa +from sqlalchemy.testing.assertions import is_ # noqa +from sqlalchemy.testing.assertions import is_false # noqa +from sqlalchemy.testing.assertions import is_not_ # noqa +from sqlalchemy.testing.assertions import is_true # noqa +from sqlalchemy.testing.assertions import ne_ # noqa +from sqlalchemy.util import decorator + + +def _assert_proper_exception_context(exception): + """assert that any exception we're catching does not have a __context__ + without a __cause__, and that __suppress_context__ is never set. + + Python 3 will report nested as exceptions as "during the handling of + error X, error Y occurred". That's not what we want to do. we want + these exceptions in a cause chain. + + """ + + if ( + exception.__context__ is not exception.__cause__ + and not exception.__suppress_context__ + ): + assert False, ( + "Exception %r was correctly raised but did not set a cause, " + "within context %r as its cause." + % (exception, exception.__context__) + ) + + +def assert_raises(except_cls, callable_, *args, **kw): + return _assert_raises(except_cls, callable_, args, kw, check_context=True) + + +def assert_raises_context_ok(except_cls, callable_, *args, **kw): + return _assert_raises(except_cls, callable_, args, kw) + + +def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): + return _assert_raises( + except_cls, callable_, args, kwargs, msg=msg, check_context=True + ) + + +def assert_raises_message_context_ok( + except_cls, msg, callable_, *args, **kwargs +): + return _assert_raises(except_cls, callable_, args, kwargs, msg=msg) + + +def _assert_raises( + except_cls, callable_, args, kwargs, msg=None, check_context=False +): + with _expect_raises(except_cls, msg, check_context) as ec: + callable_(*args, **kwargs) + return ec.error + + +class _ErrorContainer: + error: Any = None + + +@contextlib.contextmanager +def _expect_raises( + except_cls, msg=None, check_context=False, text_exact=False +): + ec = _ErrorContainer() + if check_context: + are_we_already_in_a_traceback = sys.exc_info()[0] + try: + yield ec + success = False + except except_cls as err: + ec.error = err + success = True + if msg is not None: + if text_exact: + assert str(err) == msg, f"{msg} != {err}" + else: + assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}" + if check_context and not are_we_already_in_a_traceback: + _assert_proper_exception_context(err) + print(str(err).encode("utf-8")) + + # assert outside the block so it works for AssertionError too ! + assert success, "Callable did not raise an exception" + + +def expect_raises(except_cls, check_context=True): + return _expect_raises(except_cls, check_context=check_context) + + +def expect_raises_message( + except_cls, msg, check_context=True, text_exact=False +): + return _expect_raises( + except_cls, msg=msg, check_context=check_context, text_exact=text_exact + ) + + +def eq_ignore_whitespace(a, b, msg=None): + a = re.sub(r"^\s+?|\n", "", a) + a = re.sub(r" {2,}", " ", a) + b = re.sub(r"^\s+?|\n", "", b) + b = re.sub(r" {2,}", " ", b) + + assert a == b, msg or "%r != %r" % (a, b) + + +_dialect_mods: Dict[Any, Any] = {} + + +def _get_dialect(name): + if name is None or name == "default": + return default.DefaultDialect() + else: + d = URL.create(name).get_dialect()() + + if name == "postgresql": + d.implicit_returning = True + elif name == "mssql": + d.legacy_schema_aliasing = False + return d + + +def expect_warnings(*messages, **kw): + """Context manager which expects one or more warnings. + + With no arguments, squelches all SAWarnings emitted via + sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise + pass string expressions that will match selected warnings via regex; + all non-matching warnings are sent through. + + The expect version **asserts** that the warnings were in fact seen. + + Note that the test suite sets SAWarning warnings to raise exceptions. + + """ + return _expect_warnings(Warning, messages, **kw) + + +def emits_python_deprecation_warning(*messages): + """Decorator form of expect_warnings(). + + Note that emits_warning does **not** assert that the warnings + were in fact seen. + + """ + + @decorator + def decorate(fn, *args, **kw): + with _expect_warnings(DeprecationWarning, assert_=False, *messages): + return fn(*args, **kw) + + return decorate + + +def expect_deprecated(*messages, **kw): + return _expect_warnings(DeprecationWarning, messages, **kw) + + +def expect_sqlalchemy_deprecated(*messages, **kw): + return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) + + +def expect_sqlalchemy_deprecated_20(*messages, **kw): + return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/env.py new file mode 100644 index 0000000000000000000000000000000000000000..72a5e42451f11d15fd401f5cee878ddb8dcea04a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/env.py @@ -0,0 +1,557 @@ +import importlib.machinery +import os +from pathlib import Path +import shutil +import textwrap + +from sqlalchemy.testing import config +from sqlalchemy.testing import provision + +from . import util as testing_util +from .. import command +from .. import script +from .. import util +from ..script import Script +from ..script import ScriptDirectory + + +def _get_staging_directory(): + if provision.FOLLOWER_IDENT: + return f"scratch_{provision.FOLLOWER_IDENT}" + else: + return "scratch" + + +def staging_env(create=True, template="generic", sourceless=False): + cfg = _testing_config() + if create: + path = _join_path(_get_staging_directory(), "scripts") + assert not os.path.exists(path), ( + "staging directory %s already exists; poor cleanup?" % path + ) + + command.init(cfg, path, template=template) + if sourceless: + try: + # do an import so that a .pyc/.pyo is generated. + util.load_python_file(path, "env.py") + except AttributeError: + # we don't have the migration context set up yet + # so running the .env py throws this exception. + # theoretically we could be using py_compiler here to + # generate .pyc/.pyo without importing but not really + # worth it. + pass + assert sourceless in ( + "pep3147_envonly", + "simple", + "pep3147_everything", + ), sourceless + make_sourceless( + _join_path(path, "env.py"), + "pep3147" if "pep3147" in sourceless else "simple", + ) + + sc = script.ScriptDirectory.from_config(cfg) + return sc + + +def clear_staging_env(): + from sqlalchemy.testing import engines + + engines.testing_reaper.close_all() + shutil.rmtree(_get_staging_directory(), True) + + +def script_file_fixture(txt): + dir_ = _join_path(_get_staging_directory(), "scripts") + path = _join_path(dir_, "script.py.mako") + with open(path, "w") as f: + f.write(txt) + + +def env_file_fixture(txt): + dir_ = _join_path(_get_staging_directory(), "scripts") + txt = ( + """ +from alembic import context + +config = context.config +""" + + txt + ) + + path = _join_path(dir_, "env.py") + pyc_path = util.pyc_file_from_path(path) + if pyc_path: + os.unlink(pyc_path) + + with open(path, "w") as f: + f.write(txt) + + +def _sqlite_file_db(tempname="foo.db", future=False, scope=None, **options): + dir_ = _join_path(_get_staging_directory(), "scripts") + url = "sqlite:///%s/%s" % (dir_, tempname) + if scope: + options["scope"] = scope + return testing_util.testing_engine(url=url, future=future, options=options) + + +def _sqlite_testing_config(sourceless=False, future=False): + dir_ = _join_path(_get_staging_directory(), "scripts") + url = f"sqlite:///{dir_}/foo.db" + + sqlalchemy_future = future or ("future" in config.db.__class__.__module__) + + return _write_config_file( + f""" +[alembic] +script_location = {dir_} +sqlalchemy.url = {url} +sourceless = {"true" if sourceless else "false"} +{"sqlalchemy.future = true" if sqlalchemy_future else ""} + +[loggers] +keys = root,sqlalchemy + +[handlers] +keys = console + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = DEBUG +handlers = +qualname = sqlalchemy.engine + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatters] +keys = generic + +[formatter_generic] +format = %%(levelname)-5.5s [%%(name)s] %%(message)s +datefmt = %%H:%%M:%%S + """ + ) + + +def _multi_dir_testing_config(sourceless=False, extra_version_location=""): + dir_ = _join_path(_get_staging_directory(), "scripts") + sqlalchemy_future = "future" in config.db.__class__.__module__ + + url = "sqlite:///%s/foo.db" % dir_ + + return _write_config_file( + f""" +[alembic] +script_location = {dir_} +sqlalchemy.url = {url} +sqlalchemy.future = {"true" if sqlalchemy_future else "false"} +sourceless = {"true" if sourceless else "false"} +path_separator = space +version_locations = %(here)s/model1/ %(here)s/model2/ %(here)s/model3/ \ +{extra_version_location} + +[loggers] +keys = root + +[handlers] +keys = console + +[logger_root] +level = WARNING +handlers = console +qualname = + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatters] +keys = generic + +[formatter_generic] +format = %%(levelname)-5.5s [%%(name)s] %%(message)s +datefmt = %%H:%%M:%%S + """ + ) + + +def _no_sql_pyproject_config(dialect="postgresql", directives=""): + """use a postgresql url with no host so that + connections guaranteed to fail""" + dir_ = _join_path(_get_staging_directory(), "scripts") + + return _write_toml_config( + f""" +[tool.alembic] +script_location ="{dir_}" +{textwrap.dedent(directives)} + + """, + f""" +[alembic] +sqlalchemy.url = {dialect}:// + +[loggers] +keys = root + +[handlers] +keys = console + +[logger_root] +level = WARNING +handlers = console +qualname = + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatters] +keys = generic + +[formatter_generic] +format = %%(levelname)-5.5s [%%(name)s] %%(message)s +datefmt = %%H:%%M:%%S + +""", + ) + + +def _no_sql_testing_config(dialect="postgresql", directives=""): + """use a postgresql url with no host so that + connections guaranteed to fail""" + dir_ = _join_path(_get_staging_directory(), "scripts") + return _write_config_file( + f""" +[alembic] +script_location ={dir_} +sqlalchemy.url = {dialect}:// +{directives} + +[loggers] +keys = root + +[handlers] +keys = console + +[logger_root] +level = WARNING +handlers = console +qualname = + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatters] +keys = generic + +[formatter_generic] +format = %%(levelname)-5.5s [%%(name)s] %%(message)s +datefmt = %%H:%%M:%%S + +""" + ) + + +def _write_toml_config(tomltext, initext): + cfg = _write_config_file(initext) + with open(cfg.toml_file_name, "w") as f: + f.write(tomltext) + return cfg + + +def _write_config_file(text): + cfg = _testing_config() + with open(cfg.config_file_name, "w") as f: + f.write(text) + return cfg + + +def _testing_config(): + from alembic.config import Config + + if not os.access(_get_staging_directory(), os.F_OK): + os.mkdir(_get_staging_directory()) + return Config( + _join_path(_get_staging_directory(), "test_alembic.ini"), + _join_path(_get_staging_directory(), "pyproject.toml"), + ) + + +def write_script( + scriptdir, rev_id, content, encoding="ascii", sourceless=False +): + old = scriptdir.revision_map.get_revision(rev_id) + path = old.path + + content = textwrap.dedent(content) + if encoding: + content = content.encode(encoding) + with open(path, "wb") as fp: + fp.write(content) + pyc_path = util.pyc_file_from_path(path) + if pyc_path: + os.unlink(pyc_path) + script = Script._from_path(scriptdir, path) + old = scriptdir.revision_map.get_revision(script.revision) + if old.down_revision != script.down_revision: + raise Exception("Can't change down_revision on a refresh operation.") + scriptdir.revision_map.add_revision(script, _replace=True) + + if sourceless: + make_sourceless( + path, "pep3147" if sourceless == "pep3147_everything" else "simple" + ) + + +def make_sourceless(path, style): + import py_compile + + py_compile.compile(path) + + if style == "simple": + pyc_path = util.pyc_file_from_path(path) + suffix = importlib.machinery.BYTECODE_SUFFIXES[0] + filepath, ext = os.path.splitext(path) + simple_pyc_path = filepath + suffix + shutil.move(pyc_path, simple_pyc_path) + pyc_path = simple_pyc_path + else: + assert style in ("pep3147", "simple") + pyc_path = util.pyc_file_from_path(path) + + assert os.access(pyc_path, os.F_OK) + + os.unlink(path) + + +def three_rev_fixture(cfg): + a = util.rev_id() + b = util.rev_id() + c = util.rev_id() + + script = ScriptDirectory.from_config(cfg) + script.generate_revision(a, "revision a", refresh=True, head="base") + write_script( + script, + a, + f"""\ +"Rev A" +revision = '{a}' +down_revision = None + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 1") + + +def downgrade(): + op.execute("DROP STEP 1") + +""", + ) + + script.generate_revision(b, "revision b", refresh=True, head=a) + write_script( + script, + b, + f"""# coding: utf-8 +"Rev B, méil, %3" +revision = '{b}' +down_revision = '{a}' + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 2") + + +def downgrade(): + op.execute("DROP STEP 2") + +""", + encoding="utf-8", + ) + + script.generate_revision(c, "revision c", refresh=True, head=b) + write_script( + script, + c, + f"""\ +"Rev C" +revision = '{c}' +down_revision = '{b}' + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 3") + + +def downgrade(): + op.execute("DROP STEP 3") + +""", + ) + return a, b, c + + +def multi_heads_fixture(cfg, a, b, c): + """Create a multiple head fixture from the three-revs fixture""" + + # a->b->c + # -> d -> e + # -> f + d = util.rev_id() + e = util.rev_id() + f = util.rev_id() + + script = ScriptDirectory.from_config(cfg) + script.generate_revision( + d, "revision d from b", head=b, splice=True, refresh=True + ) + write_script( + script, + d, + f"""\ +"Rev D" +revision = '{d}' +down_revision = '{b}' + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 4") + + +def downgrade(): + op.execute("DROP STEP 4") + +""", + ) + + script.generate_revision( + e, "revision e from d", head=d, splice=True, refresh=True + ) + write_script( + script, + e, + f"""\ +"Rev E" +revision = '{e}' +down_revision = '{d}' + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 5") + + +def downgrade(): + op.execute("DROP STEP 5") + +""", + ) + + script.generate_revision( + f, "revision f from b", head=b, splice=True, refresh=True + ) + write_script( + script, + f, + f"""\ +"Rev F" +revision = '{f}' +down_revision = '{b}' + +from alembic import op + + +def upgrade(): + op.execute("CREATE STEP 6") + + +def downgrade(): + op.execute("DROP STEP 6") + +""", + ) + + return d, e, f + + +def _multidb_testing_config(engines): + """alembic.ini fixture to work exactly with the 'multidb' template""" + + dir_ = _join_path(_get_staging_directory(), "scripts") + + sqlalchemy_future = "future" in config.db.__class__.__module__ + + databases = ", ".join(engines.keys()) + engines = "\n\n".join( + f"[{key}]\nsqlalchemy.url = {value.url}" + for key, value in engines.items() + ) + + return _write_config_file( + f""" +[alembic] +script_location = {dir_} +sourceless = false +sqlalchemy.future = {"true" if sqlalchemy_future else "false"} +databases = {databases} + +{engines} +[loggers] +keys = root + +[handlers] +keys = console + +[logger_root] +level = WARNING +handlers = console +qualname = + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatters] +keys = generic + +[formatter_generic] +format = %%(levelname)-5.5s [%%(name)s] %%(message)s +datefmt = %%H:%%M:%%S + """ + ) + + +def _join_path(base: str, *more: str): + return str(Path(base).joinpath(*more).as_posix()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/fixtures.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..61bcd7e938cce3d1916056eeeca91eb476bbcbbb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/fixtures.py @@ -0,0 +1,333 @@ +from __future__ import annotations + +import configparser +from contextlib import contextmanager +import io +import os +import re +import shutil +from typing import Any +from typing import Dict + +from sqlalchemy import Column +from sqlalchemy import create_mock_engine +from sqlalchemy import inspect +from sqlalchemy import MetaData +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy import testing +from sqlalchemy import text +from sqlalchemy.testing import config +from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.fixtures import FutureEngineMixin +from sqlalchemy.testing.fixtures import TablesTest as SQLAlchemyTablesTest +from sqlalchemy.testing.fixtures import TestBase as SQLAlchemyTestBase + +import alembic +from .assertions import _get_dialect +from .env import _get_staging_directory +from ..environment import EnvironmentContext +from ..migration import MigrationContext +from ..operations import Operations +from ..util import sqla_compat +from ..util.sqla_compat import sqla_2 + +testing_config = configparser.ConfigParser() +testing_config.read(["test.cfg"]) + + +class TestBase(SQLAlchemyTestBase): + is_sqlalchemy_future = sqla_2 + + @testing.fixture() + def clear_staging_dir(self): + yield + location = _get_staging_directory() + for filename in os.listdir(location): + file_path = os.path.join(location, filename) + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + @contextmanager + def pushd(self, dirname): + current_dir = os.getcwd() + try: + os.chdir(dirname) + yield + finally: + os.chdir(current_dir) + + @testing.fixture() + def pop_alembic_config_env(self): + yield + os.environ.pop("ALEMBIC_CONFIG", None) + + @testing.fixture() + def ops_context(self, migration_context): + with migration_context.begin_transaction(_per_migration=True): + yield Operations(migration_context) + + @testing.fixture + def migration_context(self, connection): + return MigrationContext.configure( + connection, opts=dict(transaction_per_migration=True) + ) + + @testing.fixture + def as_sql_migration_context(self, connection): + return MigrationContext.configure( + connection, opts=dict(transaction_per_migration=True, as_sql=True) + ) + + @testing.fixture + def connection(self): + with config.db.connect() as conn: + yield conn + + +class TablesTest(TestBase, SQLAlchemyTablesTest): + pass + + +FutureEngineMixin.is_sqlalchemy_future = True + + +def capture_db(dialect="postgresql://"): + buf = [] + + def dump(sql, *multiparams, **params): + buf.append(str(sql.compile(dialect=engine.dialect))) + + engine = create_mock_engine(dialect, dump) + return engine, buf + + +_engs: Dict[Any, Any] = {} + + +@contextmanager +def capture_context_buffer(**kw): + if kw.pop("bytes_io", False): + buf = io.BytesIO() + else: + buf = io.StringIO() + + kw.update({"dialect_name": "sqlite", "output_buffer": buf}) + conf = EnvironmentContext.configure + + def configure(*arg, **opt): + opt.update(**kw) + return conf(*arg, **opt) + + with mock.patch.object(EnvironmentContext, "configure", configure): + yield buf + + +@contextmanager +def capture_engine_context_buffer(**kw): + from .env import _sqlite_file_db + from sqlalchemy import event + + buf = io.StringIO() + + eng = _sqlite_file_db() + + conn = eng.connect() + + @event.listens_for(conn, "before_cursor_execute") + def bce(conn, cursor, statement, parameters, context, executemany): + buf.write(statement + "\n") + + kw.update({"connection": conn}) + conf = EnvironmentContext.configure + + def configure(*arg, **opt): + opt.update(**kw) + return conf(*arg, **opt) + + with mock.patch.object(EnvironmentContext, "configure", configure): + yield buf + + +def op_fixture( + dialect="default", + as_sql=False, + naming_convention=None, + literal_binds=False, + native_boolean=None, +): + opts = {} + if naming_convention: + opts["target_metadata"] = MetaData(naming_convention=naming_convention) + + class buffer_: + def __init__(self): + self.lines = [] + + def write(self, msg): + msg = msg.strip() + msg = re.sub(r"[\n\t]", "", msg) + if as_sql: + # the impl produces soft tabs, + # so search for blocks of 4 spaces + msg = re.sub(r" ", "", msg) + msg = re.sub(r"\;\n*$", "", msg) + + self.lines.append(msg) + + def flush(self): + pass + + buf = buffer_() + + class ctx(MigrationContext): + def get_buf(self): + return buf + + def clear_assertions(self): + buf.lines[:] = [] + + def assert_(self, *sql): + # TODO: make this more flexible about + # whitespace and such + eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql]) + + def assert_contains(self, sql): + for stmt in buf.lines: + if re.sub(r"[\n\t]", "", sql) in stmt: + return + else: + assert False, "Could not locate fragment %r in %r" % ( + sql, + buf.lines, + ) + + if as_sql: + opts["as_sql"] = as_sql + if literal_binds: + opts["literal_binds"] = literal_binds + + ctx_dialect = _get_dialect(dialect) + if native_boolean is not None: + ctx_dialect.supports_native_boolean = native_boolean + # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server, + # which breaks assumptions in the alembic test suite + ctx_dialect.non_native_boolean_check_constraint = True + if not as_sql: + + def execute(stmt, *multiparam, **param): + if isinstance(stmt, str): + stmt = text(stmt) + assert stmt.supports_execution + sql = str(stmt.compile(dialect=ctx_dialect)) + + buf.write(sql) + + connection = mock.Mock(dialect=ctx_dialect, execute=execute) + else: + opts["output_buffer"] = buf + connection = None + context = ctx(ctx_dialect, connection, opts) + + alembic.op._proxy = Operations(context) + return context + + +class AlterColRoundTripFixture: + # since these tests are about syntax, use more recent SQLAlchemy as some of + # the type / server default compare logic might not work on older + # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle + + __requires__ = ("alter_column",) + + def setUp(self): + self.conn = config.db.connect() + self.ctx = MigrationContext.configure(self.conn) + self.op = Operations(self.ctx) + self.metadata = MetaData() + + def _compare_type(self, t1, t2): + c1 = Column("q", t1) + c2 = Column("q", t2) + assert not self.ctx.impl.compare_type( + c1, c2 + ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2) + + def _compare_server_default(self, t1, s1, t2, s2): + c1 = Column("q", t1, server_default=s1) + c2 = Column("q", t2, server_default=s2) + assert not self.ctx.impl.compare_server_default( + c1, c2, s2, s1 + ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2) + + def tearDown(self): + sqla_compat._safe_rollback_connection_transaction(self.conn) + with self.conn.begin(): + self.metadata.drop_all(self.conn) + self.conn.close() + + def _run_alter_col(self, from_, to_, compare=None): + column = Column( + from_.get("name", "colname"), + from_.get("type", String(10)), + nullable=from_.get("nullable", True), + server_default=from_.get("server_default", None), + # comment=from_.get("comment", None) + ) + t = Table("x", self.metadata, column) + + with sqla_compat._ensure_scope_for_ddl(self.conn): + t.create(self.conn) + insp = inspect(self.conn) + old_col = insp.get_columns("x")[0] + + # TODO: conditional comment support + self.op.alter_column( + "x", + column.name, + existing_type=column.type, + existing_server_default=( + column.server_default + if column.server_default is not None + else False + ), + existing_nullable=True if column.nullable else False, + # existing_comment=column.comment, + nullable=to_.get("nullable", None), + # modify_comment=False, + server_default=to_.get("server_default", False), + new_column_name=to_.get("name", None), + type_=to_.get("type", None), + ) + + insp = inspect(self.conn) + new_col = insp.get_columns("x")[0] + + if compare is None: + compare = to_ + + eq_( + new_col["name"], + compare["name"] if "name" in compare else column.name, + ) + self._compare_type( + new_col["type"], compare.get("type", old_col["type"]) + ) + eq_(new_col["nullable"], compare.get("nullable", column.nullable)) + self._compare_server_default( + new_col["type"], + new_col.get("default", None), + compare.get("type", old_col["type"]), + ( + compare["server_default"].text + if "server_default" in compare + else ( + column.server_default.arg.text + if column.server_default is not None + else None + ) + ), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/plugin/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/plugin/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/plugin/bootstrap.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/plugin/bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a2c5521847f6d34003b49b2826ae49b1d84c29 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/plugin/bootstrap.py @@ -0,0 +1,4 @@ +""" +Bootstrapper for test framework plugins. + +""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/requirements.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..8b63c16ba387812357a556505c2a535f84021307 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/requirements.py @@ -0,0 +1,176 @@ +from sqlalchemy.testing.requirements import Requirements + +from alembic import util +from ..testing import exclusions + + +class SuiteRequirements(Requirements): + @property + def schemas(self): + """Target database must support external schemas, and have one + named 'test_schema'.""" + + return exclusions.open() + + @property + def autocommit_isolation(self): + """target database should support 'AUTOCOMMIT' isolation level""" + + return exclusions.closed() + + @property + def materialized_views(self): + """needed for sqlalchemy compat""" + return exclusions.closed() + + @property + def unique_constraint_reflection(self): + def doesnt_have_check_uq_constraints(config): + from sqlalchemy import inspect + + insp = inspect(config.db) + try: + insp.get_unique_constraints("x") + except NotImplementedError: + return True + except TypeError: + return True + except Exception: + pass + return False + + return exclusions.skip_if(doesnt_have_check_uq_constraints) + + @property + def sequences(self): + """Target database must support SEQUENCEs.""" + + return exclusions.only_if( + [lambda config: config.db.dialect.supports_sequences], + "no sequence support", + ) + + @property + def foreign_key_match(self): + return exclusions.open() + + @property + def foreign_key_constraint_reflection(self): + return exclusions.open() + + @property + def check_constraints_w_enforcement(self): + """Target database must support check constraints + and also enforce them.""" + + return exclusions.open() + + @property + def reflects_pk_names(self): + return exclusions.closed() + + @property + def reflects_fk_options(self): + return exclusions.closed() + + @property + def sqlalchemy_1x(self): + return exclusions.skip_if( + lambda config: util.sqla_2, + "SQLAlchemy 1.x test", + ) + + @property + def sqlalchemy_2(self): + return exclusions.skip_if( + lambda config: not util.sqla_2, + "SQLAlchemy 2.x test", + ) + + @property + def asyncio(self): + def go(config): + try: + import greenlet # noqa: F401 + except ImportError: + return False + else: + return True + + return exclusions.only_if(go) + + @property + def comments(self): + return exclusions.only_if( + lambda config: config.db.dialect.supports_comments + ) + + @property + def alter_column(self): + return exclusions.open() + + @property + def computed_columns(self): + return exclusions.closed() + + @property + def autoincrement_on_composite_pk(self): + return exclusions.closed() + + @property + def fk_ondelete_is_reflected(self): + return exclusions.closed() + + @property + def fk_onupdate_is_reflected(self): + return exclusions.closed() + + @property + def fk_onupdate(self): + return exclusions.open() + + @property + def fk_ondelete_restrict(self): + return exclusions.open() + + @property + def fk_onupdate_restrict(self): + return exclusions.open() + + @property + def fk_ondelete_noaction(self): + return exclusions.open() + + @property + def fk_initially(self): + return exclusions.closed() + + @property + def fk_deferrable(self): + return exclusions.closed() + + @property + def fk_deferrable_is_reflected(self): + return exclusions.closed() + + @property + def fk_names(self): + return exclusions.open() + + @property + def integer_subtype_comparisons(self): + return exclusions.open() + + @property + def no_name_normalize(self): + return exclusions.skip_if( + lambda config: config.db.dialect.requires_name_normalize + ) + + @property + def identity_columns(self): + return exclusions.closed() + + @property + def identity_columns_alter(self): + return exclusions.closed() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/schemacompare.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/schemacompare.py new file mode 100644 index 0000000000000000000000000000000000000000..204cc4ddc15b1457cdbacb2c238a625e19c49100 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/schemacompare.py @@ -0,0 +1,169 @@ +from itertools import zip_longest + +from sqlalchemy import schema +from sqlalchemy.sql.elements import ClauseList + + +class CompareTable: + def __init__(self, table): + self.table = table + + def __eq__(self, other): + if self.table.name != other.name or self.table.schema != other.schema: + return False + + for c1, c2 in zip_longest(self.table.c, other.c): + if (c1 is None and c2 is not None) or ( + c2 is None and c1 is not None + ): + return False + if CompareColumn(c1) != c2: + return False + + return True + + # TODO: compare constraints, indexes + + def __ne__(self, other): + return not self.__eq__(other) + + +class CompareColumn: + def __init__(self, column): + self.column = column + + def __eq__(self, other): + return ( + self.column.name == other.name + and self.column.nullable == other.nullable + ) + # TODO: datatypes etc + + def __ne__(self, other): + return not self.__eq__(other) + + +class CompareIndex: + def __init__(self, index, name_only=False): + self.index = index + self.name_only = name_only + + def __eq__(self, other): + if self.name_only: + return self.index.name == other.name + else: + return ( + str(schema.CreateIndex(self.index)) + == str(schema.CreateIndex(other)) + and self.index.dialect_kwargs == other.dialect_kwargs + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + expr = ClauseList(*self.index.expressions) + try: + expr_str = expr.compile().string + except Exception: + expr_str = str(expr) + return f"" + + +class CompareCheckConstraint: + def __init__(self, constraint): + self.constraint = constraint + + def __eq__(self, other): + return ( + isinstance(other, schema.CheckConstraint) + and self.constraint.name == other.name + and (str(self.constraint.sqltext) == str(other.sqltext)) + and (other.table.name == self.constraint.table.name) + and other.table.schema == self.constraint.table.schema + ) + + def __ne__(self, other): + return not self.__eq__(other) + + +class CompareForeignKey: + def __init__(self, constraint): + self.constraint = constraint + + def __eq__(self, other): + r1 = ( + isinstance(other, schema.ForeignKeyConstraint) + and self.constraint.name == other.name + and (other.table.name == self.constraint.table.name) + and other.table.schema == self.constraint.table.schema + ) + if not r1: + return False + for c1, c2 in zip_longest(self.constraint.columns, other.columns): + if (c1 is None and c2 is not None) or ( + c2 is None and c1 is not None + ): + return False + if CompareColumn(c1) != c2: + return False + return True + + def __ne__(self, other): + return not self.__eq__(other) + + +class ComparePrimaryKey: + def __init__(self, constraint): + self.constraint = constraint + + def __eq__(self, other): + r1 = ( + isinstance(other, schema.PrimaryKeyConstraint) + and self.constraint.name == other.name + and (other.table.name == self.constraint.table.name) + and other.table.schema == self.constraint.table.schema + ) + if not r1: + return False + + for c1, c2 in zip_longest(self.constraint.columns, other.columns): + if (c1 is None and c2 is not None) or ( + c2 is None and c1 is not None + ): + return False + if CompareColumn(c1) != c2: + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) + + +class CompareUniqueConstraint: + def __init__(self, constraint): + self.constraint = constraint + + def __eq__(self, other): + r1 = ( + isinstance(other, schema.UniqueConstraint) + and self.constraint.name == other.name + and (other.table.name == self.constraint.table.name) + and other.table.schema == self.constraint.table.schema + ) + if not r1: + return False + + for c1, c2 in zip_longest(self.constraint.columns, other.columns): + if (c1 is None and c2 is not None) or ( + c2 is None and c1 is not None + ): + return False + if CompareColumn(c1) != c2: + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3da498d289ed11e1eb140384db8d601bfcd524aa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/__init__.py @@ -0,0 +1,7 @@ +from .test_autogen_comments import * # noqa +from .test_autogen_computed import * # noqa +from .test_autogen_diffs import * # noqa +from .test_autogen_fks import * # noqa +from .test_autogen_identity import * # noqa +from .test_environment import * # noqa +from .test_op import * # noqa diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/_autogen_fixtures.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/_autogen_fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..ed4acb26aec0344cff23d7df07c0a9294e6bc49c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/_autogen_fixtures.py @@ -0,0 +1,448 @@ +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import Set + +from sqlalchemy import CHAR +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy import event +from sqlalchemy import ForeignKey +from sqlalchemy import Index +from sqlalchemy import inspect +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import Numeric +from sqlalchemy import PrimaryKeyConstraint +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy import Text +from sqlalchemy import text +from sqlalchemy import UniqueConstraint + +from ... import autogenerate +from ... import util +from ...autogenerate import api +from ...ddl.base import _fk_spec +from ...migration import MigrationContext +from ...operations import ops +from ...testing import config +from ...testing import eq_ +from ...testing.env import clear_staging_env +from ...testing.env import staging_env + +names_in_this_test: Set[Any] = set() + + +@event.listens_for(Table, "after_parent_attach") +def new_table(table, parent): + names_in_this_test.add(table.name) + + +def _default_include_object(obj, name, type_, reflected, compare_to): + if type_ == "table": + return name in names_in_this_test + else: + return True + + +_default_object_filters: Any = _default_include_object + +_default_name_filters: Any = None + + +class ModelOne: + __requires__ = ("unique_constraint_reflection",) + + schema: Any = None + + @classmethod + def _get_db_schema(cls): + schema = cls.schema + + m = MetaData(schema=schema) + + Table( + "user", + m, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("a1", Text), + Column("pw", String(50)), + Index("pw_idx", "pw"), + ) + + Table( + "address", + m, + Column("id", Integer, primary_key=True), + Column("email_address", String(100), nullable=False), + ) + + Table( + "order", + m, + Column("order_id", Integer, primary_key=True), + Column( + "amount", + Numeric(8, 2), + nullable=False, + server_default=text("0"), + ), + CheckConstraint("amount >= 0", name="ck_order_amount"), + ) + + Table( + "extra", + m, + Column("x", CHAR), + Column("uid", Integer, ForeignKey("user.id")), + ) + + return m + + @classmethod + def _get_model_schema(cls): + schema = cls.schema + + m = MetaData(schema=schema) + + Table( + "user", + m, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", Text, server_default="x"), + ) + + Table( + "address", + m, + Column("id", Integer, primary_key=True), + Column("email_address", String(100), nullable=False), + Column("street", String(50)), + UniqueConstraint("email_address", name="uq_email"), + ) + + Table( + "order", + m, + Column("order_id", Integer, primary_key=True), + Column( + "amount", + Numeric(10, 2), + nullable=True, + server_default=text("0"), + ), + Column("user_id", Integer, ForeignKey("user.id")), + CheckConstraint("amount > -1", name="ck_order_amount"), + ) + + Table( + "item", + m, + Column("id", Integer, primary_key=True), + Column("description", String(100)), + Column("order_id", Integer, ForeignKey("order.order_id")), + CheckConstraint("len(description) > 5"), + ) + return m + + +class NamingConvModel: + __requires__ = ("unique_constraint_reflection",) + configure_opts = {"conv_all_constraint_names": True} + naming_convention = { + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(constraint_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", + } + + @classmethod + def _get_db_schema(cls): + # database side - assume all constraints have a name that + # we would assume here is a "db generated" name. need to make + # sure these all render with op.f(). + m = MetaData() + Table( + "x1", + m, + Column("q", Integer), + Index("db_x1_index_q", "q"), + PrimaryKeyConstraint("q", name="db_x1_primary_q"), + ) + Table( + "x2", + m, + Column("q", Integer), + Column("p", ForeignKey("x1.q", name="db_x2_foreign_q")), + CheckConstraint("q > 5", name="db_x2_check_q"), + ) + Table( + "x3", + m, + Column("q", Integer), + Column("r", Integer), + Column("s", Integer), + UniqueConstraint("q", name="db_x3_unique_q"), + ) + Table( + "x4", + m, + Column("q", Integer), + PrimaryKeyConstraint("q", name="db_x4_primary_q"), + ) + Table( + "x5", + m, + Column("q", Integer), + Column("p", ForeignKey("x4.q", name="db_x5_foreign_q")), + Column("r", Integer), + Column("s", Integer), + PrimaryKeyConstraint("q", name="db_x5_primary_q"), + UniqueConstraint("r", name="db_x5_unique_r"), + CheckConstraint("s > 5", name="db_x5_check_s"), + ) + # SQLite and it's "no names needed" thing. bleh. + # we can't have a name for these so you'll see "None" for the name. + Table( + "unnamed_sqlite", + m, + Column("q", Integer), + Column("r", Integer), + PrimaryKeyConstraint("q"), + UniqueConstraint("r"), + ) + return m + + @classmethod + def _get_model_schema(cls): + from sqlalchemy.sql.naming import conv + + m = MetaData(naming_convention=cls.naming_convention) + Table( + "x1", m, Column("q", Integer, primary_key=True), Index(None, "q") + ) + Table( + "x2", + m, + Column("q", Integer), + Column("p", ForeignKey("x1.q")), + CheckConstraint("q > 5", name="token_x2check1"), + ) + Table( + "x3", + m, + Column("q", Integer), + Column("r", Integer), + Column("s", Integer), + UniqueConstraint("r", name="token_x3r"), + UniqueConstraint("s", name=conv("userdef_x3_unique_s")), + ) + Table( + "x4", + m, + Column("q", Integer, primary_key=True), + Index("userdef_x4_idx_q", "q"), + ) + Table( + "x6", + m, + Column("q", Integer, primary_key=True), + Column("p", ForeignKey("x4.q")), + Column("r", Integer), + Column("s", Integer), + UniqueConstraint("r", name="token_x6r"), + CheckConstraint("s > 5", "token_x6check1"), + CheckConstraint("s < 20", conv("userdef_x6_check_s")), + ) + return m + + +class _ComparesFKs: + def _assert_fk_diff( + self, + diff, + type_, + source_table, + source_columns, + target_table, + target_columns, + name=None, + conditional_name=None, + source_schema=None, + onupdate=None, + ondelete=None, + initially=None, + deferrable=None, + ): + # the public API for ForeignKeyConstraint was not very rich + # in 0.7, 0.8, so here we use the well-known but slightly + # private API to get at its elements + ( + fk_source_schema, + fk_source_table, + fk_source_columns, + fk_target_schema, + fk_target_table, + fk_target_columns, + fk_onupdate, + fk_ondelete, + fk_deferrable, + fk_initially, + ) = _fk_spec(diff[1]) + + eq_(diff[0], type_) + eq_(fk_source_table, source_table) + eq_(fk_source_columns, source_columns) + eq_(fk_target_table, target_table) + eq_(fk_source_schema, source_schema) + eq_(fk_onupdate, onupdate) + eq_(fk_ondelete, ondelete) + eq_(fk_initially, initially) + eq_(fk_deferrable, deferrable) + + eq_([elem.column.name for elem in diff[1].elements], target_columns) + if conditional_name is not None: + if conditional_name == "servergenerated": + fks = inspect(self.bind).get_foreign_keys(source_table) + server_fk_name = fks[0]["name"] + eq_(diff[1].name, server_fk_name) + else: + eq_(diff[1].name, conditional_name) + else: + eq_(diff[1].name, name) + + +class AutogenTest(_ComparesFKs): + def _flatten_diffs(self, diffs): + for d in diffs: + if isinstance(d, list): + yield from self._flatten_diffs(d) + else: + yield d + + @classmethod + def _get_bind(cls): + return config.db + + configure_opts: Dict[Any, Any] = {} + + @classmethod + def setup_class(cls): + staging_env() + cls.bind = cls._get_bind() + cls.m1 = cls._get_db_schema() + cls.m1.create_all(cls.bind) + cls.m2 = cls._get_model_schema() + + @classmethod + def teardown_class(cls): + cls.m1.drop_all(cls.bind) + clear_staging_env() + + def setUp(self): + self.conn = conn = self.bind.connect() + ctx_opts = { + "compare_type": True, + "compare_server_default": True, + "target_metadata": self.m2, + "upgrade_token": "upgrades", + "downgrade_token": "downgrades", + "alembic_module_prefix": "op.", + "sqlalchemy_module_prefix": "sa.", + "include_object": _default_object_filters, + "include_name": _default_name_filters, + } + if self.configure_opts: + ctx_opts.update(self.configure_opts) + self.context = context = MigrationContext.configure( + connection=conn, opts=ctx_opts + ) + + self.autogen_context = api.AutogenContext(context, self.m2) + + def tearDown(self): + self.conn.close() + + def _update_context( + self, object_filters=None, name_filters=None, include_schemas=None + ): + if include_schemas is not None: + self.autogen_context.opts["include_schemas"] = include_schemas + if object_filters is not None: + self.autogen_context._object_filters = [object_filters] + if name_filters is not None: + self.autogen_context._name_filters = [name_filters] + return self.autogen_context + + +class AutogenFixtureTest(_ComparesFKs): + def _fixture( + self, + m1, + m2, + include_schemas=False, + opts=None, + object_filters=_default_object_filters, + name_filters=_default_name_filters, + return_ops=False, + max_identifier_length=None, + ): + if max_identifier_length: + dialect = self.bind.dialect + existing_length = dialect.max_identifier_length + dialect.max_identifier_length = ( + dialect._user_defined_max_identifier_length + ) = max_identifier_length + try: + self._alembic_metadata, model_metadata = m1, m2 + for m in util.to_list(self._alembic_metadata): + m.create_all(self.bind) + + with self.bind.connect() as conn: + ctx_opts = { + "compare_type": True, + "compare_server_default": True, + "target_metadata": model_metadata, + "upgrade_token": "upgrades", + "downgrade_token": "downgrades", + "alembic_module_prefix": "op.", + "sqlalchemy_module_prefix": "sa.", + "include_object": object_filters, + "include_name": name_filters, + "include_schemas": include_schemas, + } + if opts: + ctx_opts.update(opts) + self.context = context = MigrationContext.configure( + connection=conn, opts=ctx_opts + ) + + autogen_context = api.AutogenContext(context, model_metadata) + uo = ops.UpgradeOps(ops=[]) + autogenerate._produce_net_changes(autogen_context, uo) + + if return_ops: + return uo + else: + return uo.as_diffs() + finally: + if max_identifier_length: + dialect = self.bind.dialect + dialect.max_identifier_length = ( + dialect._user_defined_max_identifier_length + ) = existing_length + + def setUp(self): + staging_env() + self.bind = config.db + + def tearDown(self): + if hasattr(self, "_alembic_metadata"): + for m in util.to_list(self._alembic_metadata): + m.drop_all(self.bind) + clear_staging_env() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_comments.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_comments.py new file mode 100644 index 0000000000000000000000000000000000000000..7ef074f57893180048c6193455b0dd1d507c0603 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_comments.py @@ -0,0 +1,242 @@ +from sqlalchemy import Column +from sqlalchemy import Float +from sqlalchemy import MetaData +from sqlalchemy import String +from sqlalchemy import Table + +from ._autogen_fixtures import AutogenFixtureTest +from ...testing import eq_ +from ...testing import mock +from ...testing import TestBase + + +class AutogenerateCommentsTest(AutogenFixtureTest, TestBase): + __backend__ = True + + __requires__ = ("comments",) + + def test_existing_table_comment_no_change(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + comment="this is some table", + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + comment="this is some table", + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + def test_add_table_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table("some_table", m1, Column("test", String(10), primary_key=True)) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + comment="this is some table", + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "add_table_comment") + eq_(diffs[0][1].comment, "this is some table") + eq_(diffs[0][2], None) + + def test_remove_table_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + comment="this is some table", + ) + + Table("some_table", m2, Column("test", String(10), primary_key=True)) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "remove_table_comment") + eq_(diffs[0][1].comment, None) + + def test_alter_table_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + comment="this is some table", + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + comment="this is also some table", + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "add_table_comment") + eq_(diffs[0][1].comment, "this is also some table") + eq_(diffs[0][2], "this is some table") + + def test_existing_column_comment_no_change(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the amount"), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the amount"), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + def test_add_column_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + Column("amount", Float), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the amount"), + ) + + diffs = self._fixture(m1, m2) + eq_( + diffs, + [ + [ + ( + "modify_comment", + None, + "some_table", + "amount", + { + "existing_nullable": True, + "existing_type": mock.ANY, + "existing_server_default": False, + }, + None, + "the amount", + ) + ] + ], + ) + + def test_remove_column_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the amount"), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + Column("amount", Float), + ) + + diffs = self._fixture(m1, m2) + eq_( + diffs, + [ + [ + ( + "modify_comment", + None, + "some_table", + "amount", + { + "existing_nullable": True, + "existing_type": mock.ANY, + "existing_server_default": False, + }, + "the amount", + None, + ) + ] + ], + ) + + def test_alter_column_comment(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the amount"), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + Column("amount", Float, comment="the adjusted amount"), + ) + + diffs = self._fixture(m1, m2) + + eq_( + diffs, + [ + [ + ( + "modify_comment", + None, + "some_table", + "amount", + { + "existing_nullable": True, + "existing_type": mock.ANY, + "existing_server_default": False, + }, + "the amount", + "the adjusted amount", + ) + ] + ], + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_computed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_computed.py new file mode 100644 index 0000000000000000000000000000000000000000..fe7eb7a568cdb8e4269c392e3c94d5ffdc78771e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_computed.py @@ -0,0 +1,144 @@ +import sqlalchemy as sa +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import Table + +from ._autogen_fixtures import AutogenFixtureTest +from ... import testing +from ...testing import eq_ +from ...testing import is_ +from ...testing import is_true +from ...testing import mock +from ...testing import TestBase + + +class AutogenerateComputedTest(AutogenFixtureTest, TestBase): + __requires__ = ("computed_columns",) + __backend__ = True + + def test_add_computed_column(self): + m1 = MetaData() + m2 = MetaData() + + Table("user", m1, Column("id", Integer, primary_key=True)) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("foo", Integer, sa.Computed("5")), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "add_column") + eq_(diffs[0][2], "user") + eq_(diffs[0][3].name, "foo") + c = diffs[0][3].computed + + is_true(isinstance(c, sa.Computed)) + is_(c.persisted, None) + eq_(str(c.sqltext), "5") + + def test_remove_computed_column(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("foo", Integer, sa.Computed("5")), + ) + + Table("user", m2, Column("id", Integer, primary_key=True)) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "remove_column") + eq_(diffs[0][2], "user") + c = diffs[0][3] + eq_(c.name, "foo") + + is_true(isinstance(c.computed, sa.Computed)) + is_true(isinstance(c.server_default, sa.Computed)) + + @testing.combinations( + lambda: (None, sa.Computed("bar*5")), + (lambda: (sa.Computed("bar*5"), None)), + lambda: ( + sa.Computed("bar*5"), + sa.Computed("bar * 42", persisted=True), + ), + lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")), + ) + def test_cant_change_computed_warning(self, test_case): + arg_before, arg_after = testing.resolve_lambda(test_case, **locals()) + m1 = MetaData() + m2 = MetaData() + + arg_before = [] if arg_before is None else [arg_before] + arg_after = [] if arg_after is None else [arg_after] + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("bar", Integer), + Column("foo", Integer, *arg_before), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("bar", Integer), + Column("foo", Integer, *arg_after), + ) + + with mock.patch("alembic.util.warn") as mock_warn: + diffs = self._fixture(m1, m2) + + eq_( + mock_warn.mock_calls, + [mock.call("Computed default on user.foo cannot be modified")], + ) + + eq_(list(diffs), []) + + @testing.combinations( + lambda: (None, None), + lambda: (sa.Computed("5"), sa.Computed("5")), + lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")), + lambda: (sa.Computed("bar*5"), sa.Computed("bar * \r\n\t5")), + ) + def test_computed_unchanged(self, test_case): + arg_before, arg_after = testing.resolve_lambda(test_case, **locals()) + m1 = MetaData() + m2 = MetaData() + + arg_before = [] if arg_before is None else [arg_before] + arg_after = [] if arg_after is None else [arg_after] + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("bar", Integer), + Column("foo", Integer, *arg_before), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("bar", Integer), + Column("foo", Integer, *arg_after), + ) + + with mock.patch("alembic.util.warn") as mock_warn: + diffs = self._fixture(m1, m2) + eq_(mock_warn.mock_calls, []) + + eq_(list(diffs), []) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_diffs.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..75bcd37aeec53d4afb2447a0f7aaf8ab5ef4c160 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_diffs.py @@ -0,0 +1,273 @@ +from sqlalchemy import BigInteger +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import Table +from sqlalchemy.testing import in_ + +from ._autogen_fixtures import AutogenFixtureTest +from ... import testing +from ...testing import config +from ...testing import eq_ +from ...testing import is_ +from ...testing import TestBase + + +class AlterColumnTest(AutogenFixtureTest, TestBase): + __backend__ = True + + @testing.combinations((True,), (False,)) + @config.requirements.comments + def test_all_existings_filled(self, pk): + m1 = MetaData() + m2 = MetaData() + + Table("a", m1, Column("x", Integer, primary_key=pk)) + Table("a", m2, Column("x", Integer, comment="x", primary_key=pk)) + + alter_col = self._assert_alter_col(m1, m2, pk) + eq_(alter_col.modify_comment, "x") + + @testing.combinations((True,), (False,)) + @config.requirements.comments + def test_all_existings_filled_in_notnull(self, pk): + m1 = MetaData() + m2 = MetaData() + + Table("a", m1, Column("x", Integer, nullable=False, primary_key=pk)) + Table( + "a", + m2, + Column("x", Integer, nullable=False, comment="x", primary_key=pk), + ) + + self._assert_alter_col(m1, m2, pk, nullable=False) + + @testing.combinations((True,), (False,)) + @config.requirements.comments + def test_all_existings_filled_in_comment(self, pk): + m1 = MetaData() + m2 = MetaData() + + Table("a", m1, Column("x", Integer, comment="old", primary_key=pk)) + Table("a", m2, Column("x", Integer, comment="new", primary_key=pk)) + + alter_col = self._assert_alter_col(m1, m2, pk) + eq_(alter_col.existing_comment, "old") + + @testing.combinations((True,), (False,)) + @config.requirements.comments + def test_all_existings_filled_in_server_default(self, pk): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", m1, Column("x", Integer, server_default="5", primary_key=pk) + ) + Table( + "a", + m2, + Column( + "x", Integer, server_default="5", comment="new", primary_key=pk + ), + ) + + alter_col = self._assert_alter_col(m1, m2, pk) + in_("5", alter_col.existing_server_default.arg.text) + + def _assert_alter_col(self, m1, m2, pk, nullable=None): + ops = self._fixture(m1, m2, return_ops=True) + modify_table = ops.ops[-1] + alter_col = modify_table.ops[0] + + if nullable is None: + eq_(alter_col.existing_nullable, not pk) + else: + eq_(alter_col.existing_nullable, nullable) + assert alter_col.existing_type._compare_type_affinity(Integer()) + return alter_col + + +class AutoincrementTest(AutogenFixtureTest, TestBase): + __backend__ = True + __requires__ = ("integer_subtype_comparisons",) + + def test_alter_column_autoincrement_none(self): + m1 = MetaData() + m2 = MetaData() + + Table("a", m1, Column("x", Integer, nullable=False)) + Table("a", m2, Column("x", Integer, nullable=True)) + + ops = self._fixture(m1, m2, return_ops=True) + assert "autoincrement" not in ops.ops[0].ops[0].kw + + def test_alter_column_autoincrement_pk_false(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("x", Integer, primary_key=True, autoincrement=False), + ) + Table( + "a", + m2, + Column("x", BigInteger, primary_key=True, autoincrement=False), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], False) + + def test_alter_column_autoincrement_pk_implicit_true(self): + m1 = MetaData() + m2 = MetaData() + + Table("a", m1, Column("x", Integer, primary_key=True)) + Table("a", m2, Column("x", BigInteger, primary_key=True)) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], True) + + def test_alter_column_autoincrement_pk_explicit_true(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", m1, Column("x", Integer, primary_key=True, autoincrement=True) + ) + Table( + "a", + m2, + Column("x", BigInteger, primary_key=True, autoincrement=True), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], True) + + def test_alter_column_autoincrement_nonpk_false(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True), + Column("x", Integer, autoincrement=False), + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True), + Column("x", BigInteger, autoincrement=False), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], False) + + def test_alter_column_autoincrement_nonpk_implicit_false(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True), + Column("x", Integer), + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True), + Column("x", BigInteger), + ) + + ops = self._fixture(m1, m2, return_ops=True) + assert "autoincrement" not in ops.ops[0].ops[0].kw + + def test_alter_column_autoincrement_nonpk_explicit_true(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("x", Integer, autoincrement=True), + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("x", BigInteger, autoincrement=True), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], True) + + def test_alter_column_autoincrement_compositepk_false(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True), + Column("x", Integer, primary_key=True, autoincrement=False), + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True), + Column("x", BigInteger, primary_key=True, autoincrement=False), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], False) + + def test_alter_column_autoincrement_compositepk_implicit_false(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True), + Column("x", Integer, primary_key=True), + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True), + Column("x", BigInteger, primary_key=True), + ) + + ops = self._fixture(m1, m2, return_ops=True) + assert "autoincrement" not in ops.ops[0].ops[0].kw + + @config.requirements.autoincrement_on_composite_pk + def test_alter_column_autoincrement_compositepk_explicit_true(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "a", + m1, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("x", Integer, primary_key=True, autoincrement=True), + # on SQLA 1.0 and earlier, this being present + # trips the "add KEY for the primary key" so that the + # AUTO_INCREMENT keyword is accepted by MySQL. SQLA 1.1 and + # greater the columns are just reorganized. + mysql_engine="InnoDB", + ) + Table( + "a", + m2, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("x", BigInteger, primary_key=True, autoincrement=True), + ) + + ops = self._fixture(m1, m2, return_ops=True) + is_(ops.ops[0].ops[0].kw["autoincrement"], True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_fks.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_fks.py new file mode 100644 index 0000000000000000000000000000000000000000..0240b98d3872bad0d123493a97ccb4b30dbbb709 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_fks.py @@ -0,0 +1,1190 @@ +from sqlalchemy import Column +from sqlalchemy import ForeignKeyConstraint +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import String +from sqlalchemy import Table + +from ._autogen_fixtures import AutogenFixtureTest +from ...testing import combinations +from ...testing import config +from ...testing import eq_ +from ...testing import mock +from ...testing import TestBase + + +class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase): + __backend__ = True + __requires__ = ("foreign_key_constraint_reflection",) + + def test_remove_fk(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ForeignKeyConstraint(["test2"], ["some_table.test"]), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ) + + diffs = self._fixture(m1, m2) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["test2"], + "some_table", + ["test"], + conditional_name="servergenerated", + ) + + def test_add_fk(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ) + + Table( + "some_table", + m2, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ForeignKeyConstraint(["test2"], ["some_table.test"]), + ) + + diffs = self._fixture(m1, m2) + + self._assert_fk_diff( + diffs[0], "add_fk", "user", ["test2"], "some_table", ["test"] + ) + + def test_no_change(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", Integer), + ForeignKeyConstraint(["test2"], ["some_table.id"]), + ) + + Table( + "some_table", + m2, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", Integer), + ForeignKeyConstraint(["test2"], ["some_table.id"]), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + def test_no_change_composite_fk(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ForeignKeyConstraint( + ["other_id_1", "other_id_2"], + ["some_table.id_1", "some_table.id_2"], + ), + ) + + Table( + "some_table", + m2, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ForeignKeyConstraint( + ["other_id_1", "other_id_2"], + ["some_table.id_1", "some_table.id_2"], + ), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + def test_casing_convention_changed_so_put_drops_first(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("test", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ForeignKeyConstraint(["test2"], ["some_table.test"], name="MyFK"), + ) + + Table( + "some_table", + m2, + Column("test", String(10), primary_key=True), + ) + + # foreign key autogen currently does not take "name" into account, + # so change the def just for the purposes of testing the + # add/drop order for now. + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("test2", String(10)), + ForeignKeyConstraint(["a1"], ["some_table.test"], name="myfk"), + ) + + diffs = self._fixture(m1, m2) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["test2"], + "some_table", + ["test"], + name="MyFK" if config.requirements.fk_names.enabled else None, + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["a1"], + "some_table", + ["test"], + name="myfk", + ) + + def test_add_composite_fk_with_name(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ) + + Table( + "some_table", + m2, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ForeignKeyConstraint( + ["other_id_1", "other_id_2"], + ["some_table.id_1", "some_table.id_2"], + name="fk_test_name", + ), + ) + + diffs = self._fixture(m1, m2) + self._assert_fk_diff( + diffs[0], + "add_fk", + "user", + ["other_id_1", "other_id_2"], + "some_table", + ["id_1", "id_2"], + name="fk_test_name", + ) + + @config.requirements.no_name_normalize + def test_remove_composite_fk(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ForeignKeyConstraint( + ["other_id_1", "other_id_2"], + ["some_table.id_1", "some_table.id_2"], + name="fk_test_name", + ), + ) + + Table( + "some_table", + m2, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("a1", String(10), server_default="x"), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ) + + diffs = self._fixture(m1, m2) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["other_id_1", "other_id_2"], + "some_table", + ["id_1", "id_2"], + conditional_name="fk_test_name", + ) + + def test_add_fk_colkeys(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ) + + Table( + "some_table", + m2, + Column("id_1", String(10), key="tid1", primary_key=True), + Column("id_2", String(10), key="tid2", primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("other_id_1", String(10), key="oid1"), + Column("other_id_2", String(10), key="oid2"), + ForeignKeyConstraint( + ["oid1", "oid2"], + ["some_table.tid1", "some_table.tid2"], + name="fk_test_name", + ), + ) + + diffs = self._fixture(m1, m2) + + self._assert_fk_diff( + diffs[0], + "add_fk", + "user", + ["other_id_1", "other_id_2"], + "some_table", + ["id_1", "id_2"], + name="fk_test_name", + ) + + def test_no_change_colkeys(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id_1", String(10), primary_key=True), + Column("id_2", String(10), primary_key=True), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("other_id_1", String(10)), + Column("other_id_2", String(10)), + ForeignKeyConstraint( + ["other_id_1", "other_id_2"], + ["some_table.id_1", "some_table.id_2"], + ), + ) + + Table( + "some_table", + m2, + Column("id_1", String(10), key="tid1", primary_key=True), + Column("id_2", String(10), key="tid2", primary_key=True), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("other_id_1", String(10), key="oid1"), + Column("other_id_2", String(10), key="oid2"), + ForeignKeyConstraint( + ["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"] + ), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + +class IncludeHooksTest(AutogenFixtureTest, TestBase): + __backend__ = True + __requires__ = ("fk_names",) + + @combinations(("object",), ("name",)) + @config.requirements.no_name_normalize + def test_remove_connection_fk(self, hook_type): + m1 = MetaData() + m2 = MetaData() + + ref = Table( + "ref", + m1, + Column("id", Integer, primary_key=True), + ) + t1 = Table( + "t", + m1, + Column("x", Integer), + Column("y", Integer), + ) + t1.append_constraint( + ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1") + ) + t1.append_constraint( + ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2") + ) + + ref = Table( + "ref", + m2, + Column("id", Integer, primary_key=True), + ) + Table( + "t", + m2, + Column("x", Integer), + Column("y", Integer), + ) + + if hook_type == "object": + + def include_object(object_, name, type_, reflected, compare_to): + return not ( + isinstance(object_, ForeignKeyConstraint) + and type_ == "foreign_key_constraint" + and reflected + and name == "fk1" + ) + + diffs = self._fixture(m1, m2, object_filters=include_object) + elif hook_type == "name": + + def include_name(name, type_, parent_names): + if name == "fk1": + if type_ == "index": # MariaDB thing + return True + eq_(type_, "foreign_key_constraint") + eq_( + parent_names, + { + "schema_name": None, + "table_name": "t", + "schema_qualified_table_name": "t", + }, + ) + return False + else: + return True + + diffs = self._fixture(m1, m2, name_filters=include_name) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "t", + ["y"], + "ref", + ["id"], + conditional_name="fk2", + ) + eq_(len(diffs), 1) + + def test_add_metadata_fk(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "ref", + m1, + Column("id", Integer, primary_key=True), + ) + Table( + "t", + m1, + Column("x", Integer), + Column("y", Integer), + ) + + ref = Table( + "ref", + m2, + Column("id", Integer, primary_key=True), + ) + t2 = Table( + "t", + m2, + Column("x", Integer), + Column("y", Integer), + ) + t2.append_constraint( + ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1") + ) + t2.append_constraint( + ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2") + ) + + def include_object(object_, name, type_, reflected, compare_to): + return not ( + isinstance(object_, ForeignKeyConstraint) + and type_ == "foreign_key_constraint" + and not reflected + and name == "fk1" + ) + + diffs = self._fixture(m1, m2, object_filters=include_object) + + self._assert_fk_diff( + diffs[0], "add_fk", "t", ["y"], "ref", ["id"], name="fk2" + ) + eq_(len(diffs), 1) + + @combinations(("object",), ("name",)) + @config.requirements.no_name_normalize + def test_change_fk(self, hook_type): + m1 = MetaData() + m2 = MetaData() + + r1a = Table( + "ref_a", + m1, + Column("a", Integer, primary_key=True), + ) + Table( + "ref_b", + m1, + Column("a", Integer, primary_key=True), + Column("b", Integer, primary_key=True), + ) + t1 = Table( + "t", + m1, + Column("x", Integer), + Column("y", Integer), + Column("z", Integer), + ) + t1.append_constraint( + ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1") + ) + t1.append_constraint( + ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2") + ) + + Table( + "ref_a", + m2, + Column("a", Integer, primary_key=True), + ) + r2b = Table( + "ref_b", + m2, + Column("a", Integer, primary_key=True), + Column("b", Integer, primary_key=True), + ) + t2 = Table( + "t", + m2, + Column("x", Integer), + Column("y", Integer), + Column("z", Integer), + ) + t2.append_constraint( + ForeignKeyConstraint( + [t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1" + ) + ) + t2.append_constraint( + ForeignKeyConstraint( + [t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2" + ) + ) + + if hook_type == "object": + + def include_object(object_, name, type_, reflected, compare_to): + return not ( + isinstance(object_, ForeignKeyConstraint) + and type_ == "foreign_key_constraint" + and name == "fk1" + ) + + diffs = self._fixture(m1, m2, object_filters=include_object) + elif hook_type == "name": + + def include_name(name, type_, parent_names): + if type_ == "index": + return True # MariaDB thing + + if name == "fk1": + eq_(type_, "foreign_key_constraint") + eq_( + parent_names, + { + "schema_name": None, + "table_name": "t", + "schema_qualified_table_name": "t", + }, + ) + return False + else: + return True + + diffs = self._fixture(m1, m2, name_filters=include_name) + + if hook_type == "object": + self._assert_fk_diff( + diffs[0], "remove_fk", "t", ["y"], "ref_a", ["a"], name="fk2" + ) + self._assert_fk_diff( + diffs[1], + "add_fk", + "t", + ["y", "z"], + "ref_b", + ["a", "b"], + name="fk2", + ) + eq_(len(diffs), 2) + elif hook_type == "name": + eq_( + {(d[0], d[1].name) for d in diffs}, + {("add_fk", "fk2"), ("add_fk", "fk1"), ("remove_fk", "fk2")}, + ) + + +class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase): + __backend__ = True + + def _fk_opts_fixture(self, old_opts, new_opts): + m1 = MetaData() + m2 = MetaData() + + Table( + "some_table", + m1, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m1, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("tid", Integer), + ForeignKeyConstraint(["tid"], ["some_table.id"], **old_opts), + ) + + Table( + "some_table", + m2, + Column("id", Integer, primary_key=True), + Column("test", String(10)), + ) + + Table( + "user", + m2, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Column("tid", Integer), + ForeignKeyConstraint(["tid"], ["some_table.id"], **new_opts), + ) + + return self._fixture(m1, m2) + + @config.requirements.fk_ondelete_is_reflected + def test_add_ondelete(self): + diffs = self._fk_opts_fixture({}, {"ondelete": "cascade"}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + ondelete=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + ondelete="cascade", + ) + + @config.requirements.fk_ondelete_is_reflected + def test_remove_ondelete(self): + diffs = self._fk_opts_fixture({"ondelete": "CASCADE"}, {}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + ondelete="CASCADE", + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + ondelete=None, + ) + + def test_nochange_ondelete(self): + """test case sensitivity""" + diffs = self._fk_opts_fixture( + {"ondelete": "caSCAde"}, {"ondelete": "CasCade"} + ) + eq_(diffs, []) + + @config.requirements.fk_onupdate_is_reflected + def test_add_onupdate(self): + diffs = self._fk_opts_fixture({}, {"onupdate": "cascade"}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate="cascade", + ) + + @config.requirements.fk_onupdate_is_reflected + def test_remove_onupdate(self): + diffs = self._fk_opts_fixture({"onupdate": "CASCADE"}, {}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate="CASCADE", + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate=None, + ) + + @config.requirements.fk_onupdate + def test_nochange_onupdate(self): + """test case sensitivity""" + diffs = self._fk_opts_fixture( + {"onupdate": "caSCAde"}, {"onupdate": "CasCade"} + ) + eq_(diffs, []) + + @config.requirements.fk_ondelete_restrict + def test_nochange_ondelete_restrict(self): + """test the RESTRICT option which MySQL doesn't report on""" + + diffs = self._fk_opts_fixture( + {"ondelete": "restrict"}, {"ondelete": "restrict"} + ) + eq_(diffs, []) + + @config.requirements.fk_onupdate_restrict + def test_nochange_onupdate_restrict(self): + """test the RESTRICT option which MySQL doesn't report on""" + + diffs = self._fk_opts_fixture( + {"onupdate": "restrict"}, {"onupdate": "restrict"} + ) + eq_(diffs, []) + + @config.requirements.fk_ondelete_noaction + def test_nochange_ondelete_noaction(self): + """test the NO ACTION option which generally comes back as None""" + + diffs = self._fk_opts_fixture( + {"ondelete": "no action"}, {"ondelete": "no action"} + ) + eq_(diffs, []) + + @config.requirements.fk_onupdate + def test_nochange_onupdate_noaction(self): + """test the NO ACTION option which generally comes back as None""" + + diffs = self._fk_opts_fixture( + {"onupdate": "no action"}, {"onupdate": "no action"} + ) + eq_(diffs, []) + + @config.requirements.fk_ondelete_restrict + def test_change_ondelete_from_restrict(self): + """test the RESTRICT option which MySQL doesn't report on""" + + # note that this is impossible to detect if we change + # from RESTRICT to NO ACTION on MySQL. + diffs = self._fk_opts_fixture( + {"ondelete": "restrict"}, {"ondelete": "cascade"} + ) + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate=None, + ondelete=mock.ANY, # MySQL reports None, PG reports RESTRICT + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate=None, + ondelete="cascade", + ) + + @config.requirements.fk_ondelete_restrict + def test_change_onupdate_from_restrict(self): + """test the RESTRICT option which MySQL doesn't report on""" + + # note that this is impossible to detect if we change + # from RESTRICT to NO ACTION on MySQL. + diffs = self._fk_opts_fixture( + {"onupdate": "restrict"}, {"onupdate": "cascade"} + ) + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate=mock.ANY, # MySQL reports None, PG reports RESTRICT + ondelete=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate="cascade", + ondelete=None, + ) + + @config.requirements.fk_ondelete_is_reflected + @config.requirements.fk_onupdate_is_reflected + def test_ondelete_onupdate_combo(self): + diffs = self._fk_opts_fixture( + {"onupdate": "CASCADE", "ondelete": "SET NULL"}, + {"onupdate": "RESTRICT", "ondelete": "RESTRICT"}, + ) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + onupdate="RESTRICT", + ondelete="RESTRICT", + ) + + @config.requirements.fk_initially + def test_add_initially_deferred(self): + diffs = self._fk_opts_fixture({}, {"initially": "deferred"}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially="deferred", + ) + + @config.requirements.fk_initially + def test_remove_initially_deferred(self): + diffs = self._fk_opts_fixture({"initially": "deferred"}, {}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially="DEFERRED", + deferrable=True, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially=None, + ) + + @config.requirements.fk_deferrable + @config.requirements.fk_initially + def test_add_initially_immediate_plus_deferrable(self): + diffs = self._fk_opts_fixture( + {}, {"initially": "immediate", "deferrable": True} + ) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially="immediate", + deferrable=True, + ) + + @config.requirements.fk_deferrable + @config.requirements.fk_initially + def test_remove_initially_immediate_plus_deferrable(self): + diffs = self._fk_opts_fixture( + {"initially": "immediate", "deferrable": True}, {} + ) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially=None, # immediate is the default + deferrable=True, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + initially=None, + deferrable=None, + ) + + @config.requirements.fk_initially + @config.requirements.fk_deferrable + def test_add_initially_deferrable_nochange_one(self): + diffs = self._fk_opts_fixture( + {"deferrable": True, "initially": "immediate"}, + {"deferrable": True, "initially": "immediate"}, + ) + + eq_(diffs, []) + + @config.requirements.fk_initially + @config.requirements.fk_deferrable + def test_add_initially_deferrable_nochange_two(self): + diffs = self._fk_opts_fixture( + {"deferrable": True, "initially": "deferred"}, + {"deferrable": True, "initially": "deferred"}, + ) + + eq_(diffs, []) + + @config.requirements.fk_initially + @config.requirements.fk_deferrable + def test_add_initially_deferrable_nochange_three(self): + diffs = self._fk_opts_fixture( + {"deferrable": None, "initially": "deferred"}, + {"deferrable": None, "initially": "deferred"}, + ) + + eq_(diffs, []) + + @config.requirements.fk_deferrable + def test_add_deferrable(self): + diffs = self._fk_opts_fixture({}, {"deferrable": True}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + deferrable=None, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + deferrable=True, + ) + + @config.requirements.fk_deferrable_is_reflected + def test_remove_deferrable(self): + diffs = self._fk_opts_fixture({"deferrable": True}, {}) + + self._assert_fk_diff( + diffs[0], + "remove_fk", + "user", + ["tid"], + "some_table", + ["id"], + deferrable=True, + conditional_name="servergenerated", + ) + + self._assert_fk_diff( + diffs[1], + "add_fk", + "user", + ["tid"], + "some_table", + ["id"], + deferrable=None, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_identity.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_identity.py new file mode 100644 index 0000000000000000000000000000000000000000..3dee9fc9903f74fd06adfaa837f22af51ec6dcd1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_autogen_identity.py @@ -0,0 +1,226 @@ +import sqlalchemy as sa +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import Table + +from alembic.util import sqla_compat +from ._autogen_fixtures import AutogenFixtureTest +from ... import testing +from ...testing import config +from ...testing import eq_ +from ...testing import is_true +from ...testing import TestBase + + +class AutogenerateIdentityTest(AutogenFixtureTest, TestBase): + __requires__ = ("identity_columns",) + __backend__ = True + + def test_add_identity_column(self): + m1 = MetaData() + m2 = MetaData() + + Table("user", m1, Column("other", sa.Text)) + + Table( + "user", + m2, + Column("other", sa.Text), + Column( + "id", + Integer, + sa.Identity(start=5, increment=7), + primary_key=True, + ), + ) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "add_column") + eq_(diffs[0][2], "user") + eq_(diffs[0][3].name, "id") + i = diffs[0][3].identity + + is_true(isinstance(i, sa.Identity)) + eq_(i.start, 5) + eq_(i.increment, 7) + + def test_remove_identity_column(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "user", + m1, + Column( + "id", + Integer, + sa.Identity(start=2, increment=3), + primary_key=True, + ), + ) + + Table("user", m2) + + diffs = self._fixture(m1, m2) + + eq_(diffs[0][0], "remove_column") + eq_(diffs[0][2], "user") + c = diffs[0][3] + eq_(c.name, "id") + + is_true(isinstance(c.identity, sa.Identity)) + eq_(c.identity.start, 2) + eq_(c.identity.increment, 3) + + def test_no_change_identity_column(self): + m1 = MetaData() + m2 = MetaData() + + for m in (m1, m2): + id_ = sa.Identity(start=2) + Table("user", m, Column("id", Integer, id_)) + + diffs = self._fixture(m1, m2) + + eq_(diffs, []) + + def test_dialect_kwargs_changes(self): + m1 = MetaData() + m2 = MetaData() + + if sqla_compat.identity_has_dialect_kwargs: + args = {"oracle_on_null": True, "oracle_order": True} + else: + args = {"on_null": True, "order": True} + + Table("user", m1, Column("id", Integer, sa.Identity(start=2))) + id_ = sa.Identity(start=2, **args) + Table("user", m2, Column("id", Integer, id_)) + + diffs = self._fixture(m1, m2) + if config.db.name == "oracle": + is_true(len(diffs), 1) + eq_(diffs[0][0][0], "modify_default") + else: + eq_(diffs, []) + + @testing.combinations( + (None, dict(start=2)), + (dict(start=2), None), + (dict(start=2), dict(start=2, increment=7)), + (dict(always=False), dict(always=True)), + ( + dict(start=1, minvalue=0, maxvalue=100, cycle=True), + dict(start=1, minvalue=0, maxvalue=100, cycle=False), + ), + ( + dict(start=10, increment=3, maxvalue=9999), + dict(start=10, increment=1, maxvalue=3333), + ), + ) + @config.requirements.identity_columns_alter + def test_change_identity(self, before, after): + arg_before = (sa.Identity(**before),) if before else () + arg_after = (sa.Identity(**after),) if after else () + + m1 = MetaData() + m2 = MetaData() + + Table( + "user", + m1, + Column("id", Integer, *arg_before), + Column("other", sa.Text), + ) + + Table( + "user", + m2, + Column("id", Integer, *arg_after), + Column("other", sa.Text), + ) + + diffs = self._fixture(m1, m2) + + eq_(len(diffs[0]), 1) + diffs = diffs[0][0] + eq_(diffs[0], "modify_default") + eq_(diffs[2], "user") + eq_(diffs[3], "id") + old = diffs[5] + new = diffs[6] + + def check(kw, idt): + if kw: + is_true(isinstance(idt, sa.Identity)) + for k, v in kw.items(): + eq_(getattr(idt, k), v) + else: + is_true(idt in (None, False)) + + check(before, old) + check(after, new) + + def test_add_identity_to_column(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "user", + m1, + Column("id", Integer), + Column("other", sa.Text), + ) + + Table( + "user", + m2, + Column("id", Integer, sa.Identity(start=2, maxvalue=1000)), + Column("other", sa.Text), + ) + + diffs = self._fixture(m1, m2) + + eq_(len(diffs[0]), 1) + diffs = diffs[0][0] + eq_(diffs[0], "modify_default") + eq_(diffs[2], "user") + eq_(diffs[3], "id") + eq_(diffs[5], None) + added = diffs[6] + + is_true(isinstance(added, sa.Identity)) + eq_(added.start, 2) + eq_(added.maxvalue, 1000) + + def test_remove_identity_from_column(self): + m1 = MetaData() + m2 = MetaData() + + Table( + "user", + m1, + Column("id", Integer, sa.Identity(start=2, maxvalue=1000)), + Column("other", sa.Text), + ) + + Table( + "user", + m2, + Column("id", Integer), + Column("other", sa.Text), + ) + + diffs = self._fixture(m1, m2) + + eq_(len(diffs[0]), 1) + diffs = diffs[0][0] + eq_(diffs[0], "modify_default") + eq_(diffs[2], "user") + eq_(diffs[3], "id") + eq_(diffs[6], None) + removed = diffs[5] + + is_true(isinstance(removed, sa.Identity)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_environment.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..df2d9afbd490fb1cd3170de428eaf3190e4f2a48 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_environment.py @@ -0,0 +1,364 @@ +import io + +from ...migration import MigrationContext +from ...testing import assert_raises +from ...testing import config +from ...testing import eq_ +from ...testing import is_ +from ...testing import is_false +from ...testing import is_not_ +from ...testing import is_true +from ...testing import ne_ +from ...testing.fixtures import TestBase + + +class MigrationTransactionTest(TestBase): + __backend__ = True + + conn = None + + def _fixture(self, opts): + self.conn = conn = config.db.connect() + + if opts.get("as_sql", False): + self.context = MigrationContext.configure( + dialect=conn.dialect, opts=opts + ) + self.context.output_buffer = self.context.impl.output_buffer = ( + io.StringIO() + ) + else: + self.context = MigrationContext.configure( + connection=conn, opts=opts + ) + return self.context + + def teardown_method(self): + if self.conn: + self.conn.close() + + def test_proxy_transaction_rollback(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + + is_false(self.conn.in_transaction()) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + proxy.rollback() + is_false(self.conn.in_transaction()) + + def test_proxy_transaction_commit(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + proxy.commit() + is_false(self.conn.in_transaction()) + + def test_proxy_transaction_contextmanager_commit(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + with proxy: + pass + is_false(self.conn.in_transaction()) + + def test_proxy_transaction_contextmanager_rollback(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + + def go(): + with proxy: + raise Exception("hi") + + assert_raises(Exception, go) + is_false(self.conn.in_transaction()) + + def test_proxy_transaction_contextmanager_explicit_rollback(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + + with proxy: + is_true(self.conn.in_transaction()) + proxy.rollback() + is_false(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + + def test_proxy_transaction_contextmanager_explicit_commit(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + proxy = context.begin_transaction(_per_migration=True) + is_true(self.conn.in_transaction()) + + with proxy: + is_true(self.conn.in_transaction()) + proxy.commit() + is_false(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + + def test_transaction_per_migration_transactional_ddl(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": True} + ) + + is_false(self.conn.in_transaction()) + + with context.begin_transaction(): + is_false(self.conn.in_transaction()) + with context.begin_transaction(_per_migration=True): + is_true(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + is_false(self.conn.in_transaction()) + + def test_transaction_per_migration_non_transactional_ddl(self): + context = self._fixture( + {"transaction_per_migration": True, "transactional_ddl": False} + ) + + is_false(self.conn.in_transaction()) + + with context.begin_transaction(): + is_false(self.conn.in_transaction()) + with context.begin_transaction(_per_migration=True): + is_true(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + is_false(self.conn.in_transaction()) + + def test_transaction_per_all_transactional_ddl(self): + context = self._fixture({"transactional_ddl": True}) + + is_false(self.conn.in_transaction()) + + with context.begin_transaction(): + is_true(self.conn.in_transaction()) + with context.begin_transaction(_per_migration=True): + is_true(self.conn.in_transaction()) + + is_true(self.conn.in_transaction()) + is_false(self.conn.in_transaction()) + + def test_transaction_per_all_non_transactional_ddl(self): + context = self._fixture({"transactional_ddl": False}) + + is_false(self.conn.in_transaction()) + + with context.begin_transaction(): + is_false(self.conn.in_transaction()) + with context.begin_transaction(_per_migration=True): + is_true(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + is_false(self.conn.in_transaction()) + + def test_transaction_per_all_sqlmode(self): + context = self._fixture({"as_sql": True}) + + context.execute("step 1") + with context.begin_transaction(): + context.execute("step 2") + with context.begin_transaction(_per_migration=True): + context.execute("step 3") + + context.execute("step 4") + context.execute("step 5") + + if context.impl.transactional_ddl: + self._assert_impl_steps( + "step 1", + "BEGIN", + "step 2", + "step 3", + "step 4", + "COMMIT", + "step 5", + ) + else: + self._assert_impl_steps( + "step 1", "step 2", "step 3", "step 4", "step 5" + ) + + def test_transaction_per_migration_sqlmode(self): + context = self._fixture( + {"as_sql": True, "transaction_per_migration": True} + ) + + context.execute("step 1") + with context.begin_transaction(): + context.execute("step 2") + with context.begin_transaction(_per_migration=True): + context.execute("step 3") + + context.execute("step 4") + context.execute("step 5") + + if context.impl.transactional_ddl: + self._assert_impl_steps( + "step 1", + "step 2", + "BEGIN", + "step 3", + "COMMIT", + "step 4", + "step 5", + ) + else: + self._assert_impl_steps( + "step 1", "step 2", "step 3", "step 4", "step 5" + ) + + @config.requirements.autocommit_isolation + def test_autocommit_block(self): + context = self._fixture({"transaction_per_migration": True}) + + is_false(self.conn.in_transaction()) + + with context.begin_transaction(): + is_false(self.conn.in_transaction()) + with context.begin_transaction(_per_migration=True): + is_true(self.conn.in_transaction()) + + with context.autocommit_block(): + # in 1.x, self.conn is separate due to the + # execution_options call. however for future they are the + # same connection and there is a "transaction" block + # despite autocommit + if self.is_sqlalchemy_future: + is_(context.connection, self.conn) + else: + is_not_(context.connection, self.conn) + is_false(self.conn.in_transaction()) + + eq_( + context.connection._execution_options[ + "isolation_level" + ], + "AUTOCOMMIT", + ) + + ne_( + context.connection._execution_options.get( + "isolation_level", None + ), + "AUTOCOMMIT", + ) + is_true(self.conn.in_transaction()) + + is_false(self.conn.in_transaction()) + is_false(self.conn.in_transaction()) + + @config.requirements.autocommit_isolation + def test_autocommit_block_no_transaction(self): + context = self._fixture({"transaction_per_migration": True}) + + is_false(self.conn.in_transaction()) + + with context.autocommit_block(): + is_true(context.connection.in_transaction()) + + # in 1.x, self.conn is separate due to the execution_options + # call. however for future they are the same connection and there + # is a "transaction" block despite autocommit + if self.is_sqlalchemy_future: + is_(context.connection, self.conn) + else: + is_not_(context.connection, self.conn) + is_false(self.conn.in_transaction()) + + eq_( + context.connection._execution_options["isolation_level"], + "AUTOCOMMIT", + ) + + ne_( + context.connection._execution_options.get("isolation_level", None), + "AUTOCOMMIT", + ) + + is_false(self.conn.in_transaction()) + + def test_autocommit_block_transactional_ddl_sqlmode(self): + context = self._fixture( + { + "transaction_per_migration": True, + "transactional_ddl": True, + "as_sql": True, + } + ) + + with context.begin_transaction(): + context.execute("step 1") + with context.begin_transaction(_per_migration=True): + context.execute("step 2") + + with context.autocommit_block(): + context.execute("step 3") + + context.execute("step 4") + + context.execute("step 5") + + self._assert_impl_steps( + "step 1", + "BEGIN", + "step 2", + "COMMIT", + "step 3", + "BEGIN", + "step 4", + "COMMIT", + "step 5", + ) + + def test_autocommit_block_nontransactional_ddl_sqlmode(self): + context = self._fixture( + { + "transaction_per_migration": True, + "transactional_ddl": False, + "as_sql": True, + } + ) + + with context.begin_transaction(): + context.execute("step 1") + with context.begin_transaction(_per_migration=True): + context.execute("step 2") + + with context.autocommit_block(): + context.execute("step 3") + + context.execute("step 4") + + context.execute("step 5") + + self._assert_impl_steps( + "step 1", "step 2", "step 3", "step 4", "step 5" + ) + + def _assert_impl_steps(self, *steps): + to_check = self.context.output_buffer.getvalue() + + self.context.impl.output_buffer = buf = io.StringIO() + for step in steps: + if step == "BEGIN": + self.context.impl.emit_begin() + elif step == "COMMIT": + self.context.impl.emit_commit() + else: + self.context.impl._exec(step) + + eq_(to_check, buf.getvalue()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_op.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_op.py new file mode 100644 index 0000000000000000000000000000000000000000..a63b3f2f9f2a7bfd879e5b06a05f0c6a64277b8f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/suite/test_op.py @@ -0,0 +1,42 @@ +"""Test against the builders in the op.* module.""" + +from sqlalchemy import Column +from sqlalchemy import event +from sqlalchemy import Integer +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.sql import text + +from ...testing.fixtures import AlterColRoundTripFixture +from ...testing.fixtures import TestBase + + +@event.listens_for(Table, "after_parent_attach") +def _add_cols(table, metadata): + if table.name == "tbl_with_auto_appended_column": + table.append_column(Column("bat", Integer)) + + +class BackendAlterColumnTest(AlterColRoundTripFixture, TestBase): + __backend__ = True + + def test_rename_column(self): + self._run_alter_col({}, {"name": "newname"}) + + def test_modify_type_int_str(self): + self._run_alter_col({"type": Integer()}, {"type": String(50)}) + + def test_add_server_default_int(self): + self._run_alter_col({"type": Integer}, {"server_default": text("5")}) + + def test_modify_server_default_int(self): + self._run_alter_col( + {"type": Integer, "server_default": text("2")}, + {"server_default": text("5")}, + ) + + def test_modify_nullable_to_non(self): + self._run_alter_col({}, {"nullable": False}) + + def test_modify_non_nullable_to_nullable(self): + self._run_alter_col({"nullable": False}, {"nullable": True}) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/util.py new file mode 100644 index 0000000000000000000000000000000000000000..4517a69f6b5c4ebdc34702005074e83178cc9d95 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/util.py @@ -0,0 +1,126 @@ +# testing/util.py +# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import types +from typing import Union + +from sqlalchemy.util import inspect_getfullargspec + +from ..util import sqla_2 + + +def flag_combinations(*combinations): + """A facade around @testing.combinations() oriented towards boolean + keyword-based arguments. + + Basically generates a nice looking identifier based on the keywords + and also sets up the argument names. + + E.g.:: + + @testing.flag_combinations( + dict(lazy=False, passive=False), + dict(lazy=True, passive=False), + dict(lazy=False, passive=True), + dict(lazy=False, passive=True, raiseload=True), + ) + + + would result in:: + + @testing.combinations( + ('', False, False, False), + ('lazy', True, False, False), + ('lazy_passive', True, True, False), + ('lazy_passive', True, True, True), + id_='iaaa', + argnames='lazy,passive,raiseload' + ) + + """ + from sqlalchemy.testing import config + + keys = set() + + for d in combinations: + keys.update(d) + + keys = sorted(keys) + + return config.combinations( + *[ + ("_".join(k for k in keys if d.get(k, False)),) + + tuple(d.get(k, False) for k in keys) + for d in combinations + ], + id_="i" + ("a" * len(keys)), + argnames=",".join(keys), + ) + + +def resolve_lambda(__fn, **kw): + """Given a no-arg lambda and a namespace, return a new lambda that + has all the values filled in. + + This is used so that we can have module-level fixtures that + refer to instance-level variables using lambdas. + + """ + + pos_args = inspect_getfullargspec(__fn)[0] + pass_pos_args = {arg: kw.pop(arg) for arg in pos_args} + glb = dict(__fn.__globals__) + glb.update(kw) + new_fn = types.FunctionType(__fn.__code__, glb) + return new_fn(**pass_pos_args) + + +def metadata_fixture(ddl="function"): + """Provide MetaData for a pytest fixture.""" + + from sqlalchemy.testing import config + from . import fixture_functions + + def decorate(fn): + def run_ddl(self): + from sqlalchemy import schema + + metadata = self.metadata = schema.MetaData() + try: + result = fn(self, metadata) + metadata.create_all(config.db) + # TODO: + # somehow get a per-function dml erase fixture here + yield result + finally: + metadata.drop_all(config.db) + + return fixture_functions.fixture(scope=ddl)(run_ddl) + + return decorate + + +def _safe_int(value: str) -> Union[int, str]: + try: + return int(value) + except: + return value + + +def testing_engine(url=None, options=None, future=False): + from sqlalchemy.testing import config + from sqlalchemy.testing.engines import testing_engine + + if not future: + future = getattr(config._current.options, "future_engine", False) + + if not sqla_2: + kw = {"future": future} if future else {} + else: + kw = {} + return testing_engine(url, options, **kw) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/warnings.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..86d45a0dd558e2696ae34d277418eb43928cb8a7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/testing/warnings.py @@ -0,0 +1,31 @@ +# testing/warnings.py +# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +import warnings + +from sqlalchemy import exc as sa_exc + + +def setup_filters(): + """Set global warning behavior for the test suite.""" + + warnings.resetwarnings() + + warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) + warnings.filterwarnings("error", category=sa_exc.SAWarning) + + # some selected deprecations... + warnings.filterwarnings("error", category=DeprecationWarning) + try: + import pytest + except ImportError: + pass + else: + warnings.filterwarnings( + "once", category=pytest.PytestDeprecationWarning + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3a217968bdffa149f47528a502f3cf34b43be2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/__init__.py @@ -0,0 +1,29 @@ +from .editor import open_in_editor as open_in_editor +from .exc import AutogenerateDiffsDetected as AutogenerateDiffsDetected +from .exc import CommandError as CommandError +from .langhelpers import _with_legacy_names as _with_legacy_names +from .langhelpers import asbool as asbool +from .langhelpers import dedupe_tuple as dedupe_tuple +from .langhelpers import Dispatcher as Dispatcher +from .langhelpers import EMPTY_DICT as EMPTY_DICT +from .langhelpers import immutabledict as immutabledict +from .langhelpers import memoized_property as memoized_property +from .langhelpers import ModuleClsProxy as ModuleClsProxy +from .langhelpers import not_none as not_none +from .langhelpers import rev_id as rev_id +from .langhelpers import to_list as to_list +from .langhelpers import to_tuple as to_tuple +from .langhelpers import unique_list as unique_list +from .messaging import err as err +from .messaging import format_as_comma as format_as_comma +from .messaging import msg as msg +from .messaging import obfuscate_url_pw as obfuscate_url_pw +from .messaging import status as status +from .messaging import warn as warn +from .messaging import warn_deprecated as warn_deprecated +from .messaging import write_outstream as write_outstream +from .pyfiles import coerce_resource_to_filename as coerce_resource_to_filename +from .pyfiles import load_python_file as load_python_file +from .pyfiles import pyc_file_from_path as pyc_file_from_path +from .pyfiles import template_to_file as template_to_file +from .sqla_compat import sqla_2 as sqla_2 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/compat.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..131f16a00296ed16950151a32f712fb6fdeaf9a5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/compat.py @@ -0,0 +1,146 @@ +# mypy: no-warn-unused-ignores + +from __future__ import annotations + +from configparser import ConfigParser +import io +import os +from pathlib import Path +import sys +import typing +from typing import Any +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Union + +if True: + # zimports hack for too-long names + from sqlalchemy.util import ( # noqa: F401 + inspect_getfullargspec as inspect_getfullargspec, + ) + from sqlalchemy.util.compat import ( # noqa: F401 + inspect_formatargspec as inspect_formatargspec, + ) + +is_posix = os.name == "posix" + +py314 = sys.version_info >= (3, 14) +py313 = sys.version_info >= (3, 13) +py312 = sys.version_info >= (3, 12) +py311 = sys.version_info >= (3, 11) +py310 = sys.version_info >= (3, 10) +py39 = sys.version_info >= (3, 9) + + +# produce a wrapper that allows encoded text to stream +# into a given buffer, but doesn't close it. +# not sure of a more idiomatic approach to this. +class EncodedIO(io.TextIOWrapper): + def close(self) -> None: + pass + + +if py39: + from importlib import resources as _resources + + importlib_resources = _resources + from importlib import metadata as _metadata + + importlib_metadata = _metadata + from importlib.metadata import EntryPoint as EntryPoint +else: + import importlib_resources # type:ignore # noqa + import importlib_metadata # type:ignore # noqa + from importlib_metadata import EntryPoint # type:ignore # noqa + +if py311: + import tomllib as tomllib +else: + import tomli as tomllib # type: ignore # noqa + + +if py312: + + def path_walk( + path: Path, *, top_down: bool = True + ) -> Iterator[tuple[Path, list[str], list[str]]]: + return Path.walk(path) + + def path_relative_to( + path: Path, other: Path, *, walk_up: bool = False + ) -> Path: + return path.relative_to(other, walk_up=walk_up) + +else: + + def path_walk( + path: Path, *, top_down: bool = True + ) -> Iterator[tuple[Path, list[str], list[str]]]: + for root, dirs, files in os.walk(path, topdown=top_down): + yield Path(root), dirs, files + + def path_relative_to( + path: Path, other: Path, *, walk_up: bool = False + ) -> Path: + """ + Calculate the relative path of 'path' with respect to 'other', + optionally allowing 'path' to be outside the subtree of 'other'. + + OK I used AI for this, sorry + + """ + try: + return path.relative_to(other) + except ValueError: + if walk_up: + other_ancestors = list(other.parents) + [other] + for ancestor in other_ancestors: + try: + return path.relative_to(ancestor) + except ValueError: + continue + raise ValueError( + f"{path} is not in the same subtree as {other}" + ) + else: + raise + + +def importlib_metadata_get(group: str) -> Sequence[EntryPoint]: + ep = importlib_metadata.entry_points() + if hasattr(ep, "select"): + return ep.select(group=group) + else: + return ep.get(group, ()) # type: ignore + + +def formatannotation_fwdref( + annotation: Any, base_module: Optional[Any] = None +) -> str: + """vendored from python 3.7""" + # copied over _formatannotation from sqlalchemy 2.0 + + if isinstance(annotation, str): + return annotation + + if getattr(annotation, "__module__", None) == "typing": + return repr(annotation).replace("typing.", "").replace("~", "") + if isinstance(annotation, type): + if annotation.__module__ in ("builtins", base_module): + return repr(annotation.__qualname__) + return annotation.__module__ + "." + annotation.__qualname__ + elif isinstance(annotation, typing.TypeVar): + return repr(annotation).replace("~", "") + return repr(annotation).replace("~", "") + + +def read_config_parser( + file_config: ConfigParser, + file_argument: Sequence[Union[str, os.PathLike[str]]], +) -> List[str]: + if py310: + return file_config.read(file_argument, encoding="locale") + else: + return file_config.read(file_argument) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/editor.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/editor.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d1557f74c8977efa0b22535f45f44a2c9e2564 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/editor.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import os +from os.path import exists +from os.path import join +from os.path import splitext +from subprocess import check_call +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional + +from .compat import is_posix +from .exc import CommandError + + +def open_in_editor( + filename: str, environ: Optional[Dict[str, str]] = None +) -> None: + """ + Opens the given file in a text editor. If the environment variable + ``EDITOR`` is set, this is taken as preference. + + Otherwise, a list of commonly installed editors is tried. + + If no editor matches, an :py:exc:`OSError` is raised. + + :param filename: The filename to open. Will be passed verbatim to the + editor command. + :param environ: An optional drop-in replacement for ``os.environ``. Used + mainly for testing. + """ + env = os.environ if environ is None else environ + try: + editor = _find_editor(env) + check_call([editor, filename]) + except Exception as exc: + raise CommandError("Error executing editor (%s)" % (exc,)) from exc + + +def _find_editor(environ: Mapping[str, str]) -> str: + candidates = _default_editors() + for i, var in enumerate(("EDITOR", "VISUAL")): + if var in environ: + user_choice = environ[var] + if exists(user_choice): + return user_choice + if os.sep not in user_choice: + candidates.insert(i, user_choice) + + for candidate in candidates: + path = _find_executable(candidate, environ) + if path is not None: + return path + raise OSError( + "No suitable editor found. Please set the " + '"EDITOR" or "VISUAL" environment variables' + ) + + +def _find_executable( + candidate: str, environ: Mapping[str, str] +) -> Optional[str]: + # Assuming this is on the PATH, we need to determine it's absolute + # location. Otherwise, ``check_call`` will fail + if not is_posix and splitext(candidate)[1] != ".exe": + candidate += ".exe" + for path in environ.get("PATH", "").split(os.pathsep): + value = join(path, candidate) + if exists(value): + return value + return None + + +def _default_editors() -> List[str]: + # Look for an editor. Prefer the user's choice by env-var, fall back to + # most commonly installed editor (nano/vim) + if is_posix: + return ["sensible-editor", "editor", "nano", "vim", "code"] + else: + return ["code.exe", "notepad++.exe", "notepad.exe"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/exc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..c790e18a7457516ce347d4a1db4313ad7d8a43ae --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/exc.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from typing import Any +from typing import List +from typing import Tuple +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from alembic.autogenerate import RevisionContext + + +class CommandError(Exception): + pass + + +class AutogenerateDiffsDetected(CommandError): + def __init__( + self, + message: str, + revision_context: RevisionContext, + diffs: List[Tuple[Any, ...]], + ) -> None: + super().__init__(message) + self.revision_context = revision_context + self.diffs = diffs diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/langhelpers.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/langhelpers.py new file mode 100644 index 0000000000000000000000000000000000000000..80d88cbcec56e280c55c395d6f00b3a72c5946f1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/langhelpers.py @@ -0,0 +1,332 @@ +from __future__ import annotations + +import collections +from collections.abc import Iterable +import textwrap +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import uuid +import warnings + +from sqlalchemy.util import asbool as asbool # noqa: F401 +from sqlalchemy.util import immutabledict as immutabledict # noqa: F401 +from sqlalchemy.util import to_list as to_list # noqa: F401 +from sqlalchemy.util import unique_list as unique_list + +from .compat import inspect_getfullargspec + +if True: + # zimports workaround :( + from sqlalchemy.util import ( # noqa: F401 + memoized_property as memoized_property, + ) + + +EMPTY_DICT: Mapping[Any, Any] = immutabledict() +_T = TypeVar("_T", bound=Any) + +_C = TypeVar("_C", bound=Callable[..., Any]) + + +class _ModuleClsMeta(type): + def __setattr__(cls, key: str, value: Callable[..., Any]) -> None: + super().__setattr__(key, value) + cls._update_module_proxies(key) # type: ignore + + +class ModuleClsProxy(metaclass=_ModuleClsMeta): + """Create module level proxy functions for the + methods on a given class. + + The functions will have a compatible signature + as the methods. + + """ + + _setups: Dict[ + Type[Any], + Tuple[ + Set[str], + List[Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]], + ], + ] = collections.defaultdict(lambda: (set(), [])) + + @classmethod + def _update_module_proxies(cls, name: str) -> None: + attr_names, modules = cls._setups[cls] + for globals_, locals_ in modules: + cls._add_proxied_attribute(name, globals_, locals_, attr_names) + + def _install_proxy(self) -> None: + attr_names, modules = self._setups[self.__class__] + for globals_, locals_ in modules: + globals_["_proxy"] = self + for attr_name in attr_names: + globals_[attr_name] = getattr(self, attr_name) + + def _remove_proxy(self) -> None: + attr_names, modules = self._setups[self.__class__] + for globals_, locals_ in modules: + globals_["_proxy"] = None + for attr_name in attr_names: + del globals_[attr_name] + + @classmethod + def create_module_class_proxy( + cls, + globals_: MutableMapping[str, Any], + locals_: MutableMapping[str, Any], + ) -> None: + attr_names, modules = cls._setups[cls] + modules.append((globals_, locals_)) + cls._setup_proxy(globals_, locals_, attr_names) + + @classmethod + def _setup_proxy( + cls, + globals_: MutableMapping[str, Any], + locals_: MutableMapping[str, Any], + attr_names: Set[str], + ) -> None: + for methname in dir(cls): + cls._add_proxied_attribute(methname, globals_, locals_, attr_names) + + @classmethod + def _add_proxied_attribute( + cls, + methname: str, + globals_: MutableMapping[str, Any], + locals_: MutableMapping[str, Any], + attr_names: Set[str], + ) -> None: + if not methname.startswith("_"): + meth = getattr(cls, methname) + if callable(meth): + locals_[methname] = cls._create_method_proxy( + methname, globals_, locals_ + ) + else: + attr_names.add(methname) + + @classmethod + def _create_method_proxy( + cls, + name: str, + globals_: MutableMapping[str, Any], + locals_: MutableMapping[str, Any], + ) -> Callable[..., Any]: + fn = getattr(cls, name) + + def _name_error(name: str, from_: Exception) -> NoReturn: + raise NameError( + "Can't invoke function '%s', as the proxy object has " + "not yet been " + "established for the Alembic '%s' class. " + "Try placing this code inside a callable." + % (name, cls.__name__) + ) from from_ + + globals_["_name_error"] = _name_error + + translations = getattr(fn, "_legacy_translations", []) + if translations: + spec = inspect_getfullargspec(fn) + if spec[0] and spec[0][0] == "self": + spec[0].pop(0) + + outer_args = inner_args = "*args, **kw" + translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % ( + fn.__name__, + tuple(spec), + translations, + ) + + def translate( + fn_name: str, spec: Any, translations: Any, args: Any, kw: Any + ) -> Any: + return_kw = {} + return_args = [] + + for oldname, newname in translations: + if oldname in kw: + warnings.warn( + "Argument %r is now named %r " + "for method %s()." % (oldname, newname, fn_name) + ) + return_kw[newname] = kw.pop(oldname) + return_kw.update(kw) + + args = list(args) + if spec[3]: + pos_only = spec[0][: -len(spec[3])] + else: + pos_only = spec[0] + for arg in pos_only: + if arg not in return_kw: + try: + return_args.append(args.pop(0)) + except IndexError: + raise TypeError( + "missing required positional argument: %s" + % arg + ) + return_args.extend(args) + + return return_args, return_kw + + globals_["_translate"] = translate + else: + outer_args = "*args, **kw" + inner_args = "*args, **kw" + translate_str = "" + + func_text = textwrap.dedent( + """\ + def %(name)s(%(args)s): + %(doc)r + %(translate)s + try: + p = _proxy + except NameError as ne: + _name_error('%(name)s', ne) + return _proxy.%(name)s(%(apply_kw)s) + e + """ + % { + "name": name, + "translate": translate_str, + "args": outer_args, + "apply_kw": inner_args, + "doc": fn.__doc__, + } + ) + lcl: MutableMapping[str, Any] = {} + + exec(func_text, cast("Dict[str, Any]", globals_), lcl) + return cast("Callable[..., Any]", lcl[name]) + + +def _with_legacy_names(translations: Any) -> Any: + def decorate(fn: _C) -> _C: + fn._legacy_translations = translations # type: ignore[attr-defined] + return fn + + return decorate + + +def rev_id() -> str: + return uuid.uuid4().hex[-12:] + + +@overload +def to_tuple(x: Any, default: Tuple[Any, ...]) -> Tuple[Any, ...]: ... + + +@overload +def to_tuple(x: None, default: Optional[_T] = ...) -> _T: ... + + +@overload +def to_tuple( + x: Any, default: Optional[Tuple[Any, ...]] = None +) -> Tuple[Any, ...]: ... + + +def to_tuple( + x: Any, default: Optional[Tuple[Any, ...]] = None +) -> Optional[Tuple[Any, ...]]: + if x is None: + return default + elif isinstance(x, str): + return (x,) + elif isinstance(x, Iterable): + return tuple(x) + else: + return (x,) + + +def dedupe_tuple(tup: Tuple[str, ...]) -> Tuple[str, ...]: + return tuple(unique_list(tup)) + + +class Dispatcher: + def __init__(self, uselist: bool = False) -> None: + self._registry: Dict[Tuple[Any, ...], Any] = {} + self.uselist = uselist + + def dispatch_for( + self, target: Any, qualifier: str = "default" + ) -> Callable[[_C], _C]: + def decorate(fn: _C) -> _C: + if self.uselist: + self._registry.setdefault((target, qualifier), []).append(fn) + else: + assert (target, qualifier) not in self._registry + self._registry[(target, qualifier)] = fn + return fn + + return decorate + + def dispatch(self, obj: Any, qualifier: str = "default") -> Any: + if isinstance(obj, str): + targets: Sequence[Any] = [obj] + elif isinstance(obj, type): + targets = obj.__mro__ + else: + targets = type(obj).__mro__ + + for spcls in targets: + if qualifier != "default" and (spcls, qualifier) in self._registry: + return self._fn_or_list(self._registry[(spcls, qualifier)]) + elif (spcls, "default") in self._registry: + return self._fn_or_list(self._registry[(spcls, "default")]) + else: + raise ValueError("no dispatch function for object: %s" % obj) + + def _fn_or_list( + self, fn_or_list: Union[List[Callable[..., Any]], Callable[..., Any]] + ) -> Callable[..., Any]: + if self.uselist: + + def go(*arg: Any, **kw: Any) -> None: + if TYPE_CHECKING: + assert isinstance(fn_or_list, Sequence) + for fn in fn_or_list: + fn(*arg, **kw) + + return go + else: + return fn_or_list # type: ignore + + def branch(self) -> Dispatcher: + """Return a copy of this dispatcher that is independently + writable.""" + + d = Dispatcher() + if self.uselist: + d._registry.update( + (k, [fn for fn in self._registry[k]]) for k in self._registry + ) + else: + d._registry.update(self._registry) + return d + + +def not_none(value: Optional[_T]) -> _T: + assert value is not None + return value diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/messaging.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/messaging.py new file mode 100644 index 0000000000000000000000000000000000000000..4c08f16e7e180fbd69eb3505c4e1107fe54da0c5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/messaging.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +from collections.abc import Iterable +from contextlib import contextmanager +import logging +import sys +import textwrap +from typing import Iterator +from typing import Optional +from typing import TextIO +from typing import Union +import warnings + +from sqlalchemy.engine import url + +log = logging.getLogger(__name__) + +# disable "no handler found" errors +logging.getLogger("alembic").addHandler(logging.NullHandler()) + + +try: + import fcntl + import termios + import struct + + ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)) + _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl) + if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty + TERMWIDTH = None +except (ImportError, OSError): + TERMWIDTH = None + + +def write_outstream( + stream: TextIO, *text: Union[str, bytes], quiet: bool = False +) -> None: + if quiet: + return + encoding = getattr(stream, "encoding", "ascii") or "ascii" + for t in text: + if not isinstance(t, bytes): + t = t.encode(encoding, "replace") + t = t.decode(encoding) + try: + stream.write(t) + except OSError: + # suppress "broken pipe" errors. + # no known way to handle this on Python 3 however + # as the exception is "ignored" (noisily) in TextIOWrapper. + break + + +@contextmanager +def status( + status_msg: str, newline: bool = False, quiet: bool = False +) -> Iterator[None]: + msg(status_msg + " ...", newline, flush=True, quiet=quiet) + try: + yield + except: + if not quiet: + write_outstream(sys.stdout, " FAILED\n") + raise + else: + if not quiet: + write_outstream(sys.stdout, " done\n") + + +def err(message: str, quiet: bool = False) -> None: + log.error(message) + msg(f"FAILED: {message}", quiet=quiet) + sys.exit(-1) + + +def obfuscate_url_pw(input_url: str) -> str: + return url.make_url(input_url).render_as_string(hide_password=True) + + +def warn(msg: str, stacklevel: int = 2) -> None: + warnings.warn(msg, UserWarning, stacklevel=stacklevel) + + +def warn_deprecated(msg: str, stacklevel: int = 2) -> None: + warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) + + +def msg( + msg: str, newline: bool = True, flush: bool = False, quiet: bool = False +) -> None: + if quiet: + return + if TERMWIDTH is None: + write_outstream(sys.stdout, msg) + if newline: + write_outstream(sys.stdout, "\n") + else: + # left indent output lines + indent = " " + lines = textwrap.wrap( + msg, + TERMWIDTH, + initial_indent=indent, + subsequent_indent=indent, + ) + if len(lines) > 1: + for line in lines[0:-1]: + write_outstream(sys.stdout, line, "\n") + write_outstream(sys.stdout, lines[-1], ("\n" if newline else "")) + if flush: + sys.stdout.flush() + + +def format_as_comma(value: Optional[Union[str, Iterable[str]]]) -> str: + if value is None: + return "" + elif isinstance(value, str): + return value + elif isinstance(value, Iterable): + return ", ".join(value) + else: + raise ValueError("Don't know how to comma-format %r" % value) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/pyfiles.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/pyfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..6b75d57792384b890a44d4c096e9a617254e7344 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/pyfiles.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import atexit +from contextlib import ExitStack +import importlib +import importlib.machinery +import importlib.util +import os +import pathlib +import re +import tempfile +from types import ModuleType +from typing import Any +from typing import Optional +from typing import Union + +from mako import exceptions +from mako.template import Template + +from . import compat +from .exc import CommandError + + +def template_to_file( + template_file: Union[str, os.PathLike[str]], + dest: Union[str, os.PathLike[str]], + output_encoding: str, + *, + append_with_newlines: bool = False, + **kw: Any, +) -> None: + template = Template(filename=_preserving_path_as_str(template_file)) + try: + output = template.render_unicode(**kw).encode(output_encoding) + except: + with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf: + ntf.write( + exceptions.text_error_template() + .render_unicode() + .encode(output_encoding) + ) + fname = ntf.name + raise CommandError( + "Template rendering failed; see %s for a " + "template-oriented traceback." % fname + ) + else: + with open(dest, "ab" if append_with_newlines else "wb") as f: + if append_with_newlines: + f.write("\n\n".encode(output_encoding)) + f.write(output) + + +def coerce_resource_to_filename(fname_or_resource: str) -> pathlib.Path: + """Interpret a filename as either a filesystem location or as a package + resource. + + Names that are non absolute paths and contain a colon + are interpreted as resources and coerced to a file location. + + """ + # TODO: there seem to be zero tests for the package resource codepath + if not os.path.isabs(fname_or_resource) and ":" in fname_or_resource: + tokens = fname_or_resource.split(":") + + # from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename # noqa E501 + + file_manager = ExitStack() + atexit.register(file_manager.close) + + ref = compat.importlib_resources.files(tokens[0]) + for tok in tokens[1:]: + ref = ref / tok + fname_or_resource = file_manager.enter_context( # type: ignore[assignment] # noqa: E501 + compat.importlib_resources.as_file(ref) + ) + return pathlib.Path(fname_or_resource) + + +def pyc_file_from_path( + path: Union[str, os.PathLike[str]], +) -> Optional[pathlib.Path]: + """Given a python source path, locate the .pyc.""" + + pathpath = pathlib.Path(path) + candidate = pathlib.Path( + importlib.util.cache_from_source(pathpath.as_posix()) + ) + if candidate.exists(): + return candidate + + # even for pep3147, fall back to the old way of finding .pyc files, + # to support sourceless operation + ext = pathpath.suffix + for ext in importlib.machinery.BYTECODE_SUFFIXES: + if pathpath.with_suffix(ext).exists(): + return pathpath.with_suffix(ext) + else: + return None + + +def load_python_file( + dir_: Union[str, os.PathLike[str]], filename: Union[str, os.PathLike[str]] +) -> ModuleType: + """Load a file from the given path as a Python module.""" + + dir_ = pathlib.Path(dir_) + filename_as_path = pathlib.Path(filename) + filename = filename_as_path.name + + module_id = re.sub(r"\W", "_", filename) + path = dir_ / filename + ext = path.suffix + if ext == ".py": + if path.exists(): + module = load_module_py(module_id, path) + else: + pyc_path = pyc_file_from_path(path) + if pyc_path is None: + raise ImportError("Can't find Python file %s" % path) + else: + module = load_module_py(module_id, pyc_path) + elif ext in (".pyc", ".pyo"): + module = load_module_py(module_id, path) + else: + assert False + return module + + +def load_module_py( + module_id: str, path: Union[str, os.PathLike[str]] +) -> ModuleType: + spec = importlib.util.spec_from_file_location(module_id, path) + assert spec + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) # type: ignore + return module + + +def _preserving_path_as_str(path: Union[str, os.PathLike[str]]) -> str: + """receive str/pathlike and return a string. + + Does not convert an incoming string path to a Path first, to help with + unit tests that are doing string path round trips without OS-specific + processing if not necessary. + + """ + if isinstance(path, str): + return path + elif isinstance(path, pathlib.PurePath): + return str(path) + else: + return str(pathlib.Path(path)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/sqla_compat.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/sqla_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..a909ead7f4a5a84728a355685fe417e6c304d5aa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/alembic/util/sqla_compat.py @@ -0,0 +1,495 @@ +# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls +# mypy: no-warn-return-any, allow-any-generics + +from __future__ import annotations + +import contextlib +import re +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import Optional +from typing import Protocol +from typing import Set +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy import __version__ +from sqlalchemy import schema +from sqlalchemy import sql +from sqlalchemy import types as sqltypes +from sqlalchemy.schema import CheckConstraint +from sqlalchemy.schema import Column +from sqlalchemy.schema import ForeignKeyConstraint +from sqlalchemy.sql import visitors +from sqlalchemy.sql.base import DialectKWArgs +from sqlalchemy.sql.elements import BindParameter +from sqlalchemy.sql.elements import ColumnClause +from sqlalchemy.sql.elements import TextClause +from sqlalchemy.sql.elements import UnaryExpression +from sqlalchemy.sql.naming import _NONE_NAME as _NONE_NAME # type: ignore[attr-defined] # noqa: E501 +from sqlalchemy.sql.visitors import traverse +from typing_extensions import TypeGuard + +if TYPE_CHECKING: + from sqlalchemy import ClauseElement + from sqlalchemy import Identity + from sqlalchemy import Index + from sqlalchemy import Table + from sqlalchemy.engine import Connection + from sqlalchemy.engine import Dialect + from sqlalchemy.engine import Transaction + from sqlalchemy.sql.base import ColumnCollection + from sqlalchemy.sql.compiler import SQLCompiler + from sqlalchemy.sql.elements import ColumnElement + from sqlalchemy.sql.schema import Constraint + from sqlalchemy.sql.schema import SchemaItem + +_CE = TypeVar("_CE", bound=Union["ColumnElement[Any]", "SchemaItem"]) + + +class _CompilerProtocol(Protocol): + def __call__(self, element: Any, compiler: Any, **kw: Any) -> str: ... + + +def _safe_int(value: str) -> Union[int, str]: + try: + return int(value) + except: + return value + + +_vers = tuple( + [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)] +) +# https://docs.sqlalchemy.org/en/latest/changelog/changelog_14.html#change-0c6e0cc67dfe6fac5164720e57ef307d +sqla_14_18 = _vers >= (1, 4, 18) +sqla_14_26 = _vers >= (1, 4, 26) +sqla_2 = _vers >= (2,) +sqlalchemy_version = __version__ + +if TYPE_CHECKING: + + def compiles( + element: Type[ClauseElement], *dialects: str + ) -> Callable[[_CompilerProtocol], _CompilerProtocol]: ... + +else: + from sqlalchemy.ext.compiler import compiles # noqa: I100,I202 + + +identity_has_dialect_kwargs = issubclass(schema.Identity, DialectKWArgs) + + +def _get_identity_options_dict( + identity: Union[Identity, schema.Sequence, None], + dialect_kwargs: bool = False, +) -> Dict[str, Any]: + if identity is None: + return {} + elif identity_has_dialect_kwargs: + assert hasattr(identity, "_as_dict") + as_dict = identity._as_dict() + if dialect_kwargs: + assert isinstance(identity, DialectKWArgs) + as_dict.update(identity.dialect_kwargs) + else: + as_dict = {} + if isinstance(identity, schema.Identity): + # always=None means something different than always=False + as_dict["always"] = identity.always + if identity.on_null is not None: + as_dict["on_null"] = identity.on_null + # attributes common to Identity and Sequence + attrs = ( + "start", + "increment", + "minvalue", + "maxvalue", + "nominvalue", + "nomaxvalue", + "cycle", + "cache", + "order", + ) + as_dict.update( + { + key: getattr(identity, key, None) + for key in attrs + if getattr(identity, key, None) is not None + } + ) + return as_dict + + +if sqla_2: + from sqlalchemy.sql.base import _NoneName +else: + from sqlalchemy.util import symbol as _NoneName # type: ignore[assignment] + + +_ConstraintName = Union[None, str, _NoneName] +_ConstraintNameDefined = Union[str, _NoneName] + + +def constraint_name_defined( + name: _ConstraintName, +) -> TypeGuard[_ConstraintNameDefined]: + return name is _NONE_NAME or isinstance(name, (str, _NoneName)) + + +def constraint_name_string(name: _ConstraintName) -> TypeGuard[str]: + return isinstance(name, str) + + +def constraint_name_or_none(name: _ConstraintName) -> Optional[str]: + return name if constraint_name_string(name) else None + + +AUTOINCREMENT_DEFAULT = "auto" + + +@contextlib.contextmanager +def _ensure_scope_for_ddl( + connection: Optional[Connection], +) -> Iterator[None]: + try: + in_transaction = connection.in_transaction # type: ignore[union-attr] + except AttributeError: + # catch for MockConnection, None + in_transaction = None + pass + + # yield outside the catch + if in_transaction is None: + yield + else: + if not in_transaction(): + assert connection is not None + with connection.begin(): + yield + else: + yield + + +def _safe_begin_connection_transaction( + connection: Connection, +) -> Transaction: + transaction = connection.get_transaction() + if transaction: + return transaction + else: + return connection.begin() + + +def _safe_commit_connection_transaction( + connection: Connection, +) -> None: + transaction = connection.get_transaction() + if transaction: + transaction.commit() + + +def _safe_rollback_connection_transaction( + connection: Connection, +) -> None: + transaction = connection.get_transaction() + if transaction: + transaction.rollback() + + +def _get_connection_in_transaction(connection: Optional[Connection]) -> bool: + try: + in_transaction = connection.in_transaction # type: ignore + except AttributeError: + # catch for MockConnection + return False + else: + return in_transaction() + + +def _idx_table_bound_expressions(idx: Index) -> Iterable[ColumnElement[Any]]: + return idx.expressions # type: ignore + + +def _copy(schema_item: _CE, **kw) -> _CE: + if hasattr(schema_item, "_copy"): + return schema_item._copy(**kw) + else: + return schema_item.copy(**kw) # type: ignore[union-attr] + + +def _connectable_has_table( + connectable: Connection, tablename: str, schemaname: Union[str, None] +) -> bool: + return connectable.dialect.has_table(connectable, tablename, schemaname) + + +def _exec_on_inspector(inspector, statement, **params): + with inspector._operation_context() as conn: + return conn.execute(statement, params) + + +def _nullability_might_be_unset(metadata_column): + from sqlalchemy.sql import schema + + return metadata_column._user_defined_nullable is schema.NULL_UNSPECIFIED + + +def _server_default_is_computed(*server_default) -> bool: + return any(isinstance(sd, schema.Computed) for sd in server_default) + + +def _server_default_is_identity(*server_default) -> bool: + return any(isinstance(sd, schema.Identity) for sd in server_default) + + +def _table_for_constraint(constraint: Constraint) -> Table: + if isinstance(constraint, ForeignKeyConstraint): + table = constraint.parent + assert table is not None + return table # type: ignore[return-value] + else: + return constraint.table + + +def _columns_for_constraint(constraint): + if isinstance(constraint, ForeignKeyConstraint): + return [fk.parent for fk in constraint.elements] + elif isinstance(constraint, CheckConstraint): + return _find_columns(constraint.sqltext) + else: + return list(constraint.columns) + + +def _resolve_for_variant(type_, dialect): + if _type_has_variants(type_): + base_type, mapping = _get_variant_mapping(type_) + return mapping.get(dialect.name, base_type) + else: + return type_ + + +if hasattr(sqltypes.TypeEngine, "_variant_mapping"): # 2.0 + + def _type_has_variants(type_): + return bool(type_._variant_mapping) + + def _get_variant_mapping(type_): + return type_, type_._variant_mapping + +else: + + def _type_has_variants(type_): + return type(type_) is sqltypes.Variant + + def _get_variant_mapping(type_): + return type_.impl, type_.mapping + + +def _fk_spec(constraint: ForeignKeyConstraint) -> Any: + if TYPE_CHECKING: + assert constraint.columns is not None + assert constraint.elements is not None + assert isinstance(constraint.parent, Table) + + source_columns = [ + constraint.columns[key].name for key in constraint.column_keys + ] + + source_table = constraint.parent.name + source_schema = constraint.parent.schema + target_schema = constraint.elements[0].column.table.schema + target_table = constraint.elements[0].column.table.name + target_columns = [element.column.name for element in constraint.elements] + ondelete = constraint.ondelete + onupdate = constraint.onupdate + deferrable = constraint.deferrable + initially = constraint.initially + return ( + source_schema, + source_table, + source_columns, + target_schema, + target_table, + target_columns, + onupdate, + ondelete, + deferrable, + initially, + ) + + +def _fk_is_self_referential(constraint: ForeignKeyConstraint) -> bool: + spec = constraint.elements[0]._get_colspec() + tokens = spec.split(".") + tokens.pop(-1) # colname + tablekey = ".".join(tokens) + assert constraint.parent is not None + return tablekey == constraint.parent.key + + +def _is_type_bound(constraint: Constraint) -> bool: + # this deals with SQLAlchemy #3260, don't copy CHECK constraints + # that will be generated by the type. + # new feature added for #3260 + return constraint._type_bound + + +def _find_columns(clause): + """locate Column objects within the given expression.""" + + cols: Set[ColumnElement[Any]] = set() + traverse(clause, {}, {"column": cols.add}) + return cols + + +def _remove_column_from_collection( + collection: ColumnCollection, column: Union[Column[Any], ColumnClause[Any]] +) -> None: + """remove a column from a ColumnCollection.""" + + # workaround for older SQLAlchemy, remove the + # same object that's present + assert column.key is not None + to_remove = collection[column.key] + + # SQLAlchemy 2.0 will use more ReadOnlyColumnCollection + # (renamed from ImmutableColumnCollection) + if hasattr(collection, "_immutable") or hasattr(collection, "_readonly"): + collection._parent.remove(to_remove) + else: + collection.remove(to_remove) + + +def _textual_index_column( + table: Table, text_: Union[str, TextClause, ColumnElement[Any]] +) -> Union[ColumnElement[Any], Column[Any]]: + """a workaround for the Index construct's severe lack of flexibility""" + if isinstance(text_, str): + c = Column(text_, sqltypes.NULLTYPE) + table.append_column(c) + return c + elif isinstance(text_, TextClause): + return _textual_index_element(table, text_) + elif isinstance(text_, _textual_index_element): + return _textual_index_column(table, text_.text) + elif isinstance(text_, sql.ColumnElement): + return _copy_expression(text_, table) + else: + raise ValueError("String or text() construct expected") + + +def _copy_expression(expression: _CE, target_table: Table) -> _CE: + def replace(col): + if ( + isinstance(col, Column) + and col.table is not None + and col.table is not target_table + ): + if col.name in target_table.c: + return target_table.c[col.name] + else: + c = _copy(col) + target_table.append_column(c) + return c + else: + return None + + return visitors.replacement_traverse( # type: ignore[call-overload] + expression, {}, replace + ) + + +class _textual_index_element(sql.ColumnElement): + """Wrap around a sqlalchemy text() construct in such a way that + we appear like a column-oriented SQL expression to an Index + construct. + + The issue here is that currently the Postgresql dialect, the biggest + recipient of functional indexes, keys all the index expressions to + the corresponding column expressions when rendering CREATE INDEX, + so the Index we create here needs to have a .columns collection that + is the same length as the .expressions collection. Ultimately + SQLAlchemy should support text() expressions in indexes. + + See SQLAlchemy issue 3174. + + """ + + __visit_name__ = "_textual_idx_element" + + def __init__(self, table: Table, text: TextClause) -> None: + self.table = table + self.text = text + self.key = text.text + self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE) + table.append_column(self.fake_column) + + def get_children(self, **kw): + return [self.fake_column] + + +@compiles(_textual_index_element) +def _render_textual_index_column( + element: _textual_index_element, compiler: SQLCompiler, **kw +) -> str: + return compiler.process(element.text, **kw) + + +class _literal_bindparam(BindParameter): + pass + + +@compiles(_literal_bindparam) +def _render_literal_bindparam( + element: _literal_bindparam, compiler: SQLCompiler, **kw +) -> str: + return compiler.render_literal_bindparam(element, **kw) + + +def _get_constraint_final_name( + constraint: Union[Index, Constraint], dialect: Optional[Dialect] +) -> Optional[str]: + if constraint.name is None: + return None + assert dialect is not None + # for SQLAlchemy 1.4 we would like to have the option to expand + # the use of "deferred" names for constraints as well as to have + # some flexibility with "None" name and similar; make use of new + # SQLAlchemy API to return what would be the final compiled form of + # the name for this dialect. + return dialect.identifier_preparer.format_constraint( + constraint, _alembic_quote=False + ) + + +def _constraint_is_named( + constraint: Union[Constraint, Index], dialect: Optional[Dialect] +) -> bool: + if constraint.name is None: + return False + assert dialect is not None + name = dialect.identifier_preparer.format_constraint( + constraint, _alembic_quote=False + ) + return name is not None + + +def is_expression_index(index: Index) -> bool: + for expr in index.expressions: + if is_expression(expr): + return True + return False + + +def is_expression(expr: Any) -> bool: + while isinstance(expr, UnaryExpression): + expr = expr.element + if not isinstance(expr, ColumnClause) or expr.is_literal: + return True + return False diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/INSTALLER b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/LICENSE b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ef3747bad8bc502664a46c8971e1ac6962ac60a8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2012-2021 Sam Clements + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/METADATA b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2ea89044262cb4953178757597558a3ea001c6d0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/METADATA @@ -0,0 +1,310 @@ +Metadata-Version: 2.1 +Name: colorlog +Version: 6.9.0 +Summary: Add colours to the output of Python's logging module. +Home-page: https://github.com/borntyping/python-colorlog +Author: Sam Clements +Author-email: sam@borntyping.co.uk +License: MIT License +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Terminals +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: colorama; sys_platform == "win32" +Provides-Extra: development +Requires-Dist: black; extra == "development" +Requires-Dist: flake8; extra == "development" +Requires-Dist: mypy; extra == "development" +Requires-Dist: pytest; extra == "development" +Requires-Dist: types-colorama; extra == "development" + +# Log formatting with colors! + +[![](https://img.shields.io/pypi/v/colorlog.svg)](https://pypi.org/project/colorlog/) +[![](https://img.shields.io/pypi/l/colorlog.svg)](https://pypi.org/project/colorlog/) + +Add colours to the output of Python's `logging` module. + +* [Source on GitHub](https://github.com/borntyping/python-colorlog) +* [Packages on PyPI](https://pypi.org/pypi/colorlog/) + +## Status + +colorlog currently requires Python 3.6 or higher. Older versions (below 5.x.x) +support Python 2.6 and above. + +* colorlog 6.x requires Python 3.6 or higher. +* colorlog 5.x is an interim version that will warn Python 2 users to downgrade. +* colorlog 4.x is the final version supporting Python 2. + +[colorama] is included as a required dependency and initialised when using +colorlog on Windows. + +This library is over a decade old and supported a wide set of Python versions +for most of its life, which has made it a difficult library to add new features +to. colorlog 6 may break backwards compatibility so that newer features +can be added more easily, but may still not accept all changes or feature +requests. colorlog 4 might accept essential bugfixes but should not be +considered actively maintained and will not accept any major changes or new +features. + +## Installation + +Install from PyPI with: + +```bash +pip install colorlog +``` + +Several Linux distributions provide official packages ([Debian], [Arch], [Fedora], +[Gentoo], [OpenSuse] and [Ubuntu]), and others have user provided packages +([BSD ports], [Conda]). + +## Usage + +```python +import colorlog + +handler = colorlog.StreamHandler() +handler.setFormatter(colorlog.ColoredFormatter( + '%(log_color)s%(levelname)s:%(name)s:%(message)s')) + +logger = colorlog.getLogger('example') +logger.addHandler(handler) +``` + +The `ColoredFormatter` class takes several arguments: + +- `format`: The format string used to output the message (required). +- `datefmt`: An optional date format passed to the base class. See [`logging.Formatter`][Formatter]. +- `reset`: Implicitly adds a color reset code to the message output, unless the output already ends with one. Defaults to `True`. +- `log_colors`: A mapping of record level names to color names. The defaults can be found in `colorlog.default_log_colors`, or the below example. +- `secondary_log_colors`: A mapping of names to `log_colors` style mappings, defining additional colors that can be used in format strings. See below for an example. +- `style`: Available on Python 3.2 and above. See [`logging.Formatter`][Formatter]. + +Color escape codes can be selected based on the log records level, by adding +parameters to the format string: + +- `log_color`: Return the color associated with the records level. +- `_log_color`: Return another color based on the records level if the formatter has secondary colors configured (see `secondary_log_colors` below). + +Multiple escape codes can be used at once by joining them with commas when +configuring the color for a log level (but can't be used directly in the format +string). For example, `black,bg_white` would use the escape codes for black +text on a white background. + +The following escape codes are made available for use in the format string: + +- `{color}`, `fg_{color}`, `bg_{color}`: Foreground and background colors. +- `bold`, `bold_{color}`, `fg_bold_{color}`, `bg_bold_{color}`: Bold/bright colors. +- `thin`, `thin_{color}`, `fg_thin_{color}`: Thin colors (terminal dependent). +- `reset`: Clear all formatting (both foreground and background colors). + +The available color names are: + +- `black` +- `red` +- `green` +- `yellow` +- `blue`, +- `purple` +- `cyan` +- `white` + +You can also use "bright" colors. These aren't standard ANSI codes, and +support for these varies wildly across different terminals. + +- `light_black` +- `light_red` +- `light_green` +- `light_yellow` +- `light_blue` +- `light_purple` +- `light_cyan` +- `light_white` + +## Examples + +![Example output](docs/example.png) + +The following code creates a `ColoredFormatter` for use in a logging setup, +using the default values for each argument. + +```python +from colorlog import ColoredFormatter + +formatter = ColoredFormatter( + "%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s", + datefmt=None, + reset=True, + log_colors={ + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'red,bg_white', + }, + secondary_log_colors={}, + style='%' +) +``` + +### Using `secondary_log_colors` + +Secondary log colors are a way to have more than one color that is selected +based on the log level. Each key in `secondary_log_colors` adds an attribute +that can be used in format strings (`message` becomes `message_log_color`), and +has a corresponding value that is identical in format to the `log_colors` +argument. + +The following example highlights the level name using the default log colors, +and highlights the message in red for `error` and `critical` level log messages. + +```python +from colorlog import ColoredFormatter + +formatter = ColoredFormatter( + "%(log_color)s%(levelname)-8s%(reset)s %(message_log_color)s%(message)s", + secondary_log_colors={ + 'message': { + 'ERROR': 'red', + 'CRITICAL': 'red' + } + } +) +``` + +### With [`dictConfig`][dictConfig] + +```python +logging.config.dictConfig({ + 'formatters': { + 'colored': { + '()': 'colorlog.ColoredFormatter', + 'format': "%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s" + } + } +}) +``` + +A full example dictionary can be found in `tests/test_colorlog.py`. + +### With [`fileConfig`][fileConfig] + +```ini +... + +[formatters] +keys=color + +[formatter_color] +class=colorlog.ColoredFormatter +format=%(log_color)s%(levelname)-8s%(reset)s %(bg_blue)s[%(name)s]%(reset)s %(message)s from fileConfig +datefmt=%m-%d %H:%M:%S +``` + +An instance of ColoredFormatter created with those arguments will then be used +by any handlers that are configured to use the `color` formatter. + +A full example configuration can be found in `tests/test_config.ini`. + +### With custom log levels + +ColoredFormatter will work with custom log levels added with +[`logging.addLevelName`][addLevelName]: + +```python +import logging, colorlog +TRACE = 5 +logging.addLevelName(TRACE, 'TRACE') +formatter = colorlog.ColoredFormatter(log_colors={'TRACE': 'yellow'}) +handler = logging.StreamHandler() +handler.setFormatter(formatter) +logger = logging.getLogger('example') +logger.addHandler(handler) +logger.setLevel('TRACE') +logger.log(TRACE, 'a message using a custom level') +``` + +## Tests + +Tests similar to the above examples are found in `tests/test_colorlog.py`. + +## Status + +colorlog is in maintenance mode. I try and ensure bugfixes are published, +but compatibility with Python 2.6+ and Python 3+ makes this a difficult +codebase to add features to. Any changes that might break backwards +compatibility for existing users will not be considered. + +## Alternatives + +There are some more modern libraries for improving Python logging you may +find useful. + +- [structlog] +- [jsonlog] + +## Projects using colorlog + +GitHub provides [a list of projects that depend on colorlog][dependents]. + +Some early adopters included [Errbot], [Pythran], and [zenlog]. + +## Licence + +Copyright (c) 2012-2021 Sam Clements + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[dictConfig]: http://docs.python.org/3/library/logging.config.html#logging.config.dictConfig +[fileConfig]: http://docs.python.org/3/library/logging.config.html#logging.config.fileConfig +[addLevelName]: https://docs.python.org/3/library/logging.html#logging.addLevelName +[Formatter]: http://docs.python.org/3/library/logging.html#logging.Formatter +[tox]: http://tox.readthedocs.org/ +[Arch]: https://archlinux.org/packages/extra/any/python-colorlog/ +[BSD ports]: https://www.freshports.org/devel/py-colorlog/ +[colorama]: https://pypi.python.org/pypi/colorama +[Conda]: https://anaconda.org/conda-forge/colorlog +[Debian]: [https://packages.debian.org/buster/python3-colorlog](https://packages.debian.org/buster/python3-colorlog) +[Errbot]: http://errbot.io/ +[Fedora]: https://src.fedoraproject.org/rpms/python-colorlog +[Gentoo]: https://packages.gentoo.org/packages/dev-python/colorlog +[OpenSuse]: http://rpm.pbone.net/index.php3?stat=3&search=python-colorlog&srodzaj=3 +[Pythran]: https://github.com/serge-sans-paille/pythran +[Ubuntu]: https://launchpad.net/python-colorlog +[zenlog]: https://github.com/ManufacturaInd/python-zenlog +[structlog]: https://www.structlog.org/en/stable/ +[jsonlog]: https://github.com/borntyping/jsonlog +[dependents]: https://github.com/borntyping/python-colorlog/network/dependents?package_id=UGFja2FnZS01MDk3NDcyMQ%3D%3D diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/RECORD b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..04d5fa9c5b3d6efcabdc52630c3a2f354208ed87 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/RECORD @@ -0,0 +1,12 @@ +colorlog-6.9.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +colorlog-6.9.0.dist-info/LICENSE,sha256=sdkIK8SDYj_Vn8cnm0V_DkDZQqdkJs3iVyOeBN_kElo,1107 +colorlog-6.9.0.dist-info/METADATA,sha256=Pw74JDZRvwpYK4QaE1z-zBLvLLTqnw9_v7u1j1n0gFA,10965 +colorlog-6.9.0.dist-info/RECORD,, +colorlog-6.9.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +colorlog-6.9.0.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92 +colorlog-6.9.0.dist-info/top_level.txt,sha256=CzNs7OLwLxUhbQzCCT2ore3b_ZzAXusw0tWIX79iWow,9 +colorlog/__init__.py,sha256=wzxah0vO2HpJheG0gXY4rMx_MyFYyfHsR8rTucg5PSI,1180 +colorlog/escape_codes.py,sha256=lFpcWJqCo3d8kcVZpJFkuKrYbwtwdqdCeNFH9QnKA1Y,2438 +colorlog/formatter.py,sha256=ptHZw4Ulq5G-XBlV9DO4B7PIONBAoHL-1YAjIdQbqyk,7741 +colorlog/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +colorlog/wrappers.py,sha256=onRQIxCBw8V1vA7zz_kavg4Mk25h8FH6vr8SQw4H6_Y,2399 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/REQUESTED b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/WHEEL b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..08519a6603c2a9e5707d1c0cca7dc567c56ab5be --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/top_level.txt b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d8df46d14496059ec31bd06e8928ef5050f46f2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/colorlog-6.9.0.dist-info/top_level.txt @@ -0,0 +1 @@ +colorlog diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/LICENSE.txt b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..890839b670533e4bbd2d96c3ab36525be2aff773 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/LICENSE.txt @@ -0,0 +1,165 @@ +LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS + +This license agreement, including exhibits attached ("Agreement”) is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of a NVIDIA software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here is a description of the types of items that may be included in a SDK: source code, header files, APIs, data sets and assets (examples include images, textures, models, scenes, videos, native API input/output files), binary software, sample code, libraries, utility programs, programming code and documentation. + +This Agreement can be accepted only by an adult of legal age of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company or other legal entity, you represent that you have the legal authority to bind the entity to this Agreement, in which case “you” will mean the entity you represent. + +If you don’t have the required age or authority to accept this Agreement, or if you don’t accept all the terms and conditions of this Agreement, do not download, install or use the SDK. + +You agree to use the SDK only for purposes that are permitted by (a) this Agreement, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + +1. License. + +1.1 Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants you a non-exclusive, non-transferable license, without the right to sublicense (except as expressly provided in this Agreement) to: + +(i) Install and use the SDK, + +(ii) Modify and create derivative works of sample source code delivered in the SDK, and + +(iii) Distribute those portions of the SDK that are identified in this Agreement as distributable, as incorporated in object code format into a software application that meets the distribution requirements indicated in this Agreement. + +1.2 Distribution Requirements + +These are the distribution requirements for you to exercise the distribution grant: + +(i) Your application must have material additional functionality, beyond the included portions of the SDK. + +(ii) The distributable portions of the SDK shall only be accessed by your application. + +(iii) The following notice shall be included in modifications and derivative works of sample source code distributed: “This software contains source code provided by NVIDIA Corporation.” + +(iv) Unless a developer tool is identified in this Agreement as distributable, it is delivered for your internal use only. + +(v) The terms under which you distribute your application must be consistent with the terms of this Agreement, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. Additionally, you agree that you will protect the privacy, security and legal rights of your application users. + +(vi) You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SDK not in compliance with the requirements of this Agreement, and to enforce the terms of your agreements with respect to distributed SDK. + +1.3 Authorized Users + +You may allow employees and contractors of your entity or of your subsidiary(ies) to access and use the SDK from your secure network to perform work on your behalf. + +If you are an academic institution you may allow users enrolled or employed by the academic institution to access and use the SDK from your secure network. + +You are responsible for the compliance with the terms of this Agreement by your authorized users. If you become aware that your authorized users didn’t follow the terms of this Agreement, you agree to take reasonable steps to resolve the non-compliance and prevent new occurrences. + +1.4 Pre-Release SDK +The SDK versions identified as alpha, beta, preview or otherwise as pre-release, may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, accessibility, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. Use of a pre-release SDK may result in unexpected results, loss of data, project delays or other unpredictable damage or loss. +You may use a pre-release SDK at your own risk, understanding that pre-release SDKs are not intended for use in production or business-critical systems. +NVIDIA may choose not to make available a commercial version of any pre-release SDK. NVIDIA may also choose to abandon development and terminate the availability of a pre-release SDK at any time without liability. +1.5 Updates + +NVIDIA may, at its option, make available patches, workarounds or other updates to this SDK. Unless the updates are provided with their separate governing terms, they are deemed part of the SDK licensed to you as provided in this Agreement. + +You agree that the form and content of the SDK that NVIDIA provides may change without prior notice to you. While NVIDIA generally maintains compatibility between versions, NVIDIA may in some cases make changes that introduce incompatibilities in future versions of the SDK. + +1.6 Third Party Licenses + +The SDK may come bundled with, or otherwise include or be distributed with, third-party software licensed by a NVIDIA supplier and/or open source software provided under an open source license. Use of third-party software is subject to the third-party license terms, or in the absence of third-party terms, the terms of this Agreement. Copyright to third party software is held by the copyright holders indicated in the third-party software or license. + +1.7 Reservation of Rights + +NVIDIA reserves all rights, title and interest in and to the SDK not expressly granted to you under this Agreement. + +2. Limitations. + +The following license limitations apply to your use of the SDK: + +2.1 You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SDK or copies of the SDK. + +2.2 Except as expressly provided in this Agreement, you may not copy, sell, rent, sublicense, transfer, distribute, modify, or create derivative works of any portion of the SDK. For clarity, you may not distribute or sublicense the SDK as a stand-alone product. + +2.3 Unless you have an agreement with NVIDIA for this purpose, you may not indicate that an application created with the SDK is sponsored or endorsed by NVIDIA. + +2.4 You may not bypass, disable, or circumvent any encryption, security, digital rights management or authentication mechanism in the SDK. + +2.5 You may not use the SDK in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SDK be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + +2.6 Unless you have an agreement with NVIDIA for this purpose, you may not use the SDK with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SDK for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + +2.7 You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to your use of the SDK outside of the scope of this Agreement, or not in compliance with its terms. + +3. Ownership. + +3.1 NVIDIA or its licensors hold all rights, title and interest in and to the SDK and its modifications and derivative works, including their respective intellectual property rights, subject to your rights under Section 3.2. This SDK may include software and materials from NVIDIA’s licensors, and these licensors are intended third party beneficiaries that may enforce this Agreement with respect to their intellectual property rights. + +3.2 You hold all rights, title and interest in and to your applications and your derivative works of the sample source code delivered in the SDK, including their respective intellectual property rights, subject to NVIDIA’s rights under section 3.1. + +3.3 You may, but don’t have to, provide to NVIDIA suggestions, feature requests or other feedback regarding the SDK, including possible enhancements or modifications to the SDK. For any feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) it without the payment of any royalties or fees to you. NVIDIA will use feedback at its choice. NVIDIA is constantly looking for ways to improve its products, so you may send feedback to NVIDIA through the developer portal at https://developer.nvidia.com. + +4. No Warranties. + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF DEALING OR COURSE OF TRADE. + +5. Limitations of Liability. + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + +These exclusions and limitations of liability shall apply regardless if NVIDIA or its affiliates have been advised of the possibility of such damages, and regardless of whether a remedy fails its essential purpose. These exclusions and limitations of liability form an essential basis of the bargain between the parties, and, absent any of these exclusions or limitations of liability, the provisions of this Agreement, including, without limitation, the economic terms, would be substantially different. + +6. Termination. + +6.1 This Agreement will continue to apply until terminated by either you or NVIDIA as described below. + +6.2 If you want to terminate this Agreement, you may do so by stopping to use the SDK. + +6.3 NVIDIA may, at any time, terminate this Agreement if: (i) you fail to comply with any term of this Agreement and the non-compliance is not fixed within thirty (30) days following notice from NVIDIA (or immediately if you violate NVIDIA’s intellectual property rights); (ii) you commence or participate in any legal proceeding against NVIDIA with respect to the SDK; or (iii) NVIDIA decides to no longer provide the SDK in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. + +6.4 Upon any termination of this Agreement, you agree to promptly discontinue use of the SDK and destroy all copies in your possession or control. Your prior distributions in accordance with this Agreement are not affected by the termination of this Agreement. Upon written request, you will certify in writing that you have complied with your commitments under this section. Upon any termination of this Agreement all provisions survive except for the licenses granted to you. + +7. General. + +If you wish to assign this Agreement or your rights and obligations, including by merger, consolidation, dissolution or operation of law, contact NVIDIA to ask for permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. NVIDIA may assign, delegate or transfer this Agreement and its rights and obligations, and if to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably requested information to verify your compliance with this Agreement. + +This Agreement will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. + +The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this Agreement. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + +If any court of competent jurisdiction determines that any provision of this Agreement is illegal, invalid or unenforceable, such provision will be construed as limited to the extent necessary to be consistent with and fully enforceable under the law and the remaining provisions will remain in full force and effect. Unless otherwise specified, remedies are cumulative. + +Each party acknowledges and agrees that the other is an independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this Agreement pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SDK into any country, or use the SDK in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this Agreement, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement will be delivered via mail, email or fax. You agree that any notices that NVIDIA sends you electronically will satisfy any legal communication requirements. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2788 San Tomas Expressway, Santa Clara, California 95051, United States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this Agreement constitute the entire agreement of the parties with respect to the subject matter of this Agreement and supersede all prior negotiations or documentation exchanged between the parties relating to this subject matter. Any additional and/or conflicting terms on documents issued by you are null, void, and invalid. Any amendment or waiver under this Agreement shall be in writing and signed by representatives of both parties. + +(v. October 12, 2020) + + + + + + + + + + + + + + + +cuSPARSELt SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS + +The terms in this supplement govern your use of the NVIDIA cuSPARSELt SDK under the terms of your license agreement (“Agreement”) as modified by this supplement. Capitalized terms used but not defined below have the meaning assigned to them in the Agreement. + +This supplement is an exhibit to the Agreement and is incorporated as an integral part of the Agreement. In the event of conflict between the terms in this supplement and the terms in the Agreement, the terms in this supplement govern. + +1. License Scope. The SDK is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + +2. Distribution. The following portions of the SDK are distributable under the Agreement: the runtimes files ending with .so and .h as part of your application. + +3. Licensing. If the distribution terms in this Agreement are not suitable for your organization, or for any questions regarding this Agreement, please contact NVIDIA at nvidia-compute-license-questions@nvidia.com + +(v. October 12, 2020) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/include/cusparseLt.h b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/include/cusparseLt.h new file mode 100644 index 0000000000000000000000000000000000000000..c30c4b0a431be340b51d4ecd1662f7242e2b19b2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cusparselt/include/cusparseLt.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ +#if !defined(CUSPARSELT_HEADER_) +#define CUSPARSELT_HEADER_ + +#include "cusparse.h" // cusparseStatus_t + +#include // size_t +#include // cudaStream_t +#include // cudaDataType +#include // uint8_t + +//############################################################################## +//# CUSPARSELT VERSION INFORMATION +//############################################################################## + +#define CUSPARSELT_VER_MAJOR 0 +#define CUSPARSELT_VER_MINOR 6 +#define CUSPARSELT_VER_PATCH 3 +#define CUSPARSELT_VER_BUILD 2 +#define CUSPARSELT_VERSION (CUSPARSELT_VER_MAJOR * 1000 + \ + CUSPARSELT_VER_MINOR * 100 + \ + CUSPARSELT_VER_PATCH) + +// ############################################################################# +// # MACRO +// ############################################################################# + +#if !defined(CUSPARSELT_API) +# if defined(_WIN32) +# define CUSPARSELT_API __stdcall +# else +# define CUSPARSELT_API +# endif +#endif + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) +extern "C" { +#endif // defined(__cplusplus) + +//############################################################################## +//# OPAQUE DATA STRUCTURES +//############################################################################## + +typedef struct { uint8_t data[13072]; } cusparseLtHandle_t; + +typedef struct { uint8_t data[13072]; } cusparseLtMatDescriptor_t; + +typedef struct { uint8_t data[13072]; } cusparseLtMatmulDescriptor_t; + +typedef struct { uint8_t data[13072]; } cusparseLtMatmulAlgSelection_t; + +typedef struct { uint8_t data[13072]; } cusparseLtMatmulPlan_t; + +//############################################################################## +//# INITIALIZATION, DESTROY +//############################################################################## + +cusparseStatus_t CUSPARSELT_API +cusparseLtInit(cusparseLtHandle_t* handle); + +cusparseStatus_t CUSPARSELT_API +cusparseLtDestroy(const cusparseLtHandle_t* handle); + +cusparseStatus_t CUSPARSELT_API +cusparseLtGetVersion(const cusparseLtHandle_t* handle, + int* version); + +cusparseStatus_t CUSPARSELT_API +cusparseLtGetProperty(libraryPropertyType propertyType, + int* value); + +//############################################################################## +//# MATRIX DESCRIPTOR +//############################################################################## +// Dense Matrix + +cusparseStatus_t CUSPARSELT_API +cusparseLtDenseDescriptorInit(const cusparseLtHandle_t* handle, + cusparseLtMatDescriptor_t* matDescr, + int64_t rows, + int64_t cols, + int64_t ld, + uint32_t alignment, + cudaDataType valueType, + cusparseOrder_t order); + +//------------------------------------------------------------------------------ +// Structured Matrix + +typedef enum { + CUSPARSELT_SPARSITY_50_PERCENT +} cusparseLtSparsity_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtStructuredDescriptorInit(const cusparseLtHandle_t* handle, + cusparseLtMatDescriptor_t* matDescr, + int64_t rows, + int64_t cols, + int64_t ld, + uint32_t alignment, + cudaDataType valueType, + cusparseOrder_t order, + cusparseLtSparsity_t sparsity); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatDescriptorDestroy(const cusparseLtMatDescriptor_t* matDescr); + +//------------------------------------------------------------------------------ + +typedef enum { + CUSPARSELT_MAT_NUM_BATCHES, // READ/WRITE + CUSPARSELT_MAT_BATCH_STRIDE // READ/WRITE +} cusparseLtMatDescAttribute_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatDescSetAttribute(const cusparseLtHandle_t* handle, + cusparseLtMatDescriptor_t* matmulDescr, + cusparseLtMatDescAttribute_t matAttribute, + const void* data, + size_t dataSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatDescGetAttribute(const cusparseLtHandle_t* handle, + const cusparseLtMatDescriptor_t* matmulDescr, + cusparseLtMatDescAttribute_t matAttribute, + void* data, + size_t dataSize); + +//############################################################################## +//# MATMUL DESCRIPTOR +//############################################################################## + +typedef enum { + CUSPARSE_COMPUTE_32I, + CUSPARSE_COMPUTE_16F, + CUSPARSE_COMPUTE_32F +} cusparseComputeType; + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulDescriptorInit(const cusparseLtHandle_t* handle, + cusparseLtMatmulDescriptor_t* matmulDescr, + cusparseOperation_t opA, + cusparseOperation_t opB, + const cusparseLtMatDescriptor_t* matA, + const cusparseLtMatDescriptor_t* matB, + const cusparseLtMatDescriptor_t* matC, + const cusparseLtMatDescriptor_t* matD, + cusparseComputeType computeType); + +//------------------------------------------------------------------------------ + +typedef enum { + CUSPARSELT_MATMUL_ACTIVATION_RELU, // READ/WRITE + CUSPARSELT_MATMUL_ACTIVATION_RELU_UPPERBOUND, // READ/WRITE + CUSPARSELT_MATMUL_ACTIVATION_RELU_THRESHOLD, // READ/WRITE + CUSPARSELT_MATMUL_ACTIVATION_GELU, // READ/WRITE + CUSPARSELT_MATMUL_ACTIVATION_GELU_SCALING, // READ/WRITE + CUSPARSELT_MATMUL_ALPHA_VECTOR_SCALING, // READ/WRITE + CUSPARSELT_MATMUL_BETA_VECTOR_SCALING, // READ/WRITE + CUSPARSELT_MATMUL_BIAS_STRIDE, // READ/WRITE + CUSPARSELT_MATMUL_BIAS_POINTER, // READ/WRITE + CUSPARSELT_MATMUL_SPARSE_MAT_POINTER, // READ/WRITE +} cusparseLtMatmulDescAttribute_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulDescSetAttribute(const cusparseLtHandle_t* handle, + cusparseLtMatmulDescriptor_t* matmulDescr, + cusparseLtMatmulDescAttribute_t matmulAttribute, + const void* data, + size_t dataSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulDescGetAttribute( + const cusparseLtHandle_t* handle, + const cusparseLtMatmulDescriptor_t* matmulDescr, + cusparseLtMatmulDescAttribute_t matmulAttribute, + void* data, + size_t dataSize); + +//############################################################################## +//# ALGORITHM SELECTION +//############################################################################## + +typedef enum { + CUSPARSELT_MATMUL_ALG_DEFAULT +} cusparseLtMatmulAlg_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulAlgSelectionInit( + const cusparseLtHandle_t* handle, + cusparseLtMatmulAlgSelection_t* algSelection, + const cusparseLtMatmulDescriptor_t* matmulDescr, + cusparseLtMatmulAlg_t alg); + +typedef enum { + CUSPARSELT_MATMUL_ALG_CONFIG_ID, // READ/WRITE + CUSPARSELT_MATMUL_ALG_CONFIG_MAX_ID, // READ-ONLY + CUSPARSELT_MATMUL_SEARCH_ITERATIONS, // READ/WRITE + CUSPARSELT_MATMUL_SPLIT_K, // READ/WRITE + CUSPARSELT_MATMUL_SPLIT_K_MODE, // READ/WRITE + CUSPARSELT_MATMUL_SPLIT_K_BUFFERS // READ/WRITE +} cusparseLtMatmulAlgAttribute_t; + +typedef enum { + CUSPARSELT_INVALID_MODE = 0, + CUSPARSELT_SPLIT_K_MODE_ONE_KERNEL = 1, + CUSPARSELT_SPLIT_K_MODE_TWO_KERNELS = 2 +} cusparseLtSplitKMode_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulAlgSetAttribute(const cusparseLtHandle_t* handle, + cusparseLtMatmulAlgSelection_t* algSelection, + cusparseLtMatmulAlgAttribute_t attribute, + const void* data, + size_t dataSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulAlgGetAttribute( + const cusparseLtHandle_t* handle, + const cusparseLtMatmulAlgSelection_t* algSelection, + cusparseLtMatmulAlgAttribute_t attribute, + void* data, + size_t dataSize); + +//############################################################################## +//# MATMUL PLAN +//############################################################################## + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulGetWorkspace( + const cusparseLtHandle_t* handle, + const cusparseLtMatmulPlan_t* plan, + size_t* workspaceSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulPlanInit(const cusparseLtHandle_t* handle, + cusparseLtMatmulPlan_t* plan, + const cusparseLtMatmulDescriptor_t* matmulDescr, + const cusparseLtMatmulAlgSelection_t* algSelection); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulPlanDestroy(const cusparseLtMatmulPlan_t* plan); + +//############################################################################## +//# MATMUL EXECUTION +//############################################################################## + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmul(const cusparseLtHandle_t* handle, + const cusparseLtMatmulPlan_t* plan, + const void* alpha, + const void* d_A, + const void* d_B, + const void* beta, + const void* d_C, + void* d_D, + void* workspace, + cudaStream_t* streams, + int32_t numStreams); + +cusparseStatus_t CUSPARSELT_API +cusparseLtMatmulSearch(const cusparseLtHandle_t* handle, + cusparseLtMatmulPlan_t* plan, + const void* alpha, + const void* d_A, + const void* d_B, + const void* beta, + const void* d_C, + void* d_D, + void* workspace, + // void* device_buf, + cudaStream_t* streams, + int32_t numStreams); + +//############################################################################## +//# HELPER ROUTINES +//############################################################################## +// PRUNING + +typedef enum { + CUSPARSELT_PRUNE_SPMMA_TILE = 0, + CUSPARSELT_PRUNE_SPMMA_STRIP = 1 +} cusparseLtPruneAlg_t; + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMAPrune(const cusparseLtHandle_t* handle, + const cusparseLtMatmulDescriptor_t* matmulDescr, + const void* d_in, + void* d_out, + cusparseLtPruneAlg_t pruneAlg, + cudaStream_t stream); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMAPruneCheck(const cusparseLtHandle_t* handle, + const cusparseLtMatmulDescriptor_t* matmulDescr, + const void* d_in, + int* valid, + cudaStream_t stream); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMAPrune2(const cusparseLtHandle_t* handle, + const cusparseLtMatDescriptor_t* sparseMatDescr, + int isSparseA, + cusparseOperation_t op, + const void* d_in, + void* d_out, + cusparseLtPruneAlg_t pruneAlg, + cudaStream_t stream); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMAPruneCheck2(const cusparseLtHandle_t* handle, + const cusparseLtMatDescriptor_t* sparseMatDescr, + int isSparseA, + cusparseOperation_t op, + const void* d_in, + int* d_valid, + cudaStream_t stream); + +//------------------------------------------------------------------------------ +// COMPRESSION + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMACompressedSize( + const cusparseLtHandle_t* handle, + const cusparseLtMatmulPlan_t* plan, + size_t* compressedSize, + size_t* compressedBufferSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMACompress(const cusparseLtHandle_t* handle, + const cusparseLtMatmulPlan_t* plan, + const void* d_dense, + void* d_compressed, + void* d_compressed_buffer, + cudaStream_t stream); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMACompressedSize2( + const cusparseLtHandle_t* handle, + const cusparseLtMatDescriptor_t* sparseMatDescr, + size_t* compressedSize, + size_t* compressedBufferSize); + +cusparseStatus_t CUSPARSELT_API +cusparseLtSpMMACompress2(const cusparseLtHandle_t* handle, + const cusparseLtMatDescriptor_t* sparseMatDescr, + int isSparseA, + cusparseOperation_t op, + const void* d_dense, + void* d_compressed, + void* d_compressed_buffer, + cudaStream_t stream); + +//============================================================================== +//============================================================================== + +#if defined(__cplusplus) +} +#endif // defined(__cplusplus) + +#endif // !defined(CUSPARSELT_HEADER_) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d41d808995af2d59db2496a3ae772ca3d849cab2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, matplotlib project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the matplotlib project nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e81ab4fa3c9649ef7bc6355d1042f0344c90d83b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.1 +Name: cycler +Version: 0.12.1 +Summary: Composable style cycles +Author-email: Thomas A Caswell +License: Copyright (c) 2015, matplotlib project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the matplotlib project nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Project-URL: homepage, https://matplotlib.org/cycler/ +Project-URL: repository, https://github.com/matplotlib/cycler +Keywords: cycle kwargs +Classifier: License :: OSI Approved :: BSD License +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: ipython ; extra == 'docs' +Requires-Dist: matplotlib ; extra == 'docs' +Requires-Dist: numpydoc ; extra == 'docs' +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: tests +Requires-Dist: pytest ; extra == 'tests' +Requires-Dist: pytest-cov ; extra == 'tests' +Requires-Dist: pytest-xdist ; extra == 'tests' + +|PyPi|_ |Conda|_ |Supported Python versions|_ |GitHub Actions|_ |Codecov|_ + +.. |PyPi| image:: https://img.shields.io/pypi/v/cycler.svg?style=flat +.. _PyPi: https://pypi.python.org/pypi/cycler + +.. |Conda| image:: https://img.shields.io/conda/v/conda-forge/cycler +.. _Conda: https://anaconda.org/conda-forge/cycler + +.. |Supported Python versions| image:: https://img.shields.io/pypi/pyversions/cycler.svg +.. _Supported Python versions: https://pypi.python.org/pypi/cycler + +.. |GitHub Actions| image:: https://github.com/matplotlib/cycler/actions/workflows/tests.yml/badge.svg +.. _GitHub Actions: https://github.com/matplotlib/cycler/actions + +.. |Codecov| image:: https://codecov.io/github/matplotlib/cycler/badge.svg?branch=main&service=github +.. _Codecov: https://codecov.io/github/matplotlib/cycler?branch=main + +cycler: composable cycles +========================= + +Docs: https://matplotlib.org/cycler/ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c4d2a934d278c9e13af9985461f1623f3537d266 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD @@ -0,0 +1,9 @@ +cycler-0.12.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +cycler-0.12.1.dist-info/LICENSE,sha256=8SGBQ9dm2j_qZvEzlrfxXfRqgzA_Kb-Wum6Y601C9Ag,1497 +cycler-0.12.1.dist-info/METADATA,sha256=IyieGbdvHgE5Qidpbmryts0c556JcxIJv5GVFIsY7TY,3779 +cycler-0.12.1.dist-info/RECORD,, +cycler-0.12.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +cycler-0.12.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +cycler-0.12.1.dist-info/top_level.txt,sha256=D8BVVDdAAelLb2FOEz7lDpc6-AL21ylKPrMhtG6yzyE,7 +cycler/__init__.py,sha256=1JdRgv5Zzxo-W1ev7B_LWquysWP6LZH6CHk_COtIaXE,16709 +cycler/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/REQUESTED b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/WHEEL b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/top_level.txt b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..22546440faf3e339c5fb7ec3956bd03cb602ac92 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/cycler-0.12.1.dist-info/top_level.txt @@ -0,0 +1 @@ +cycler diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d8c5b8ebe565a652b3671b3dfa066f7346af45 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__init__.py @@ -0,0 +1,70 @@ +""" +A platform independent file lock that supports the with-statement. + +.. autodata:: filelock.__version__ + :no-value: + +""" + +from __future__ import annotations + +import sys +import warnings +from typing import TYPE_CHECKING + +from ._api import AcquireReturnProxy, BaseFileLock +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock, has_fcntl +from ._windows import WindowsFileLock +from .asyncio import ( + AsyncAcquireReturnProxy, + AsyncSoftFileLock, + AsyncUnixFileLock, + AsyncWindowsFileLock, + BaseAsyncFileLock, +) +from .version import version + +#: version of the project as a string +__version__: str = version + + +if sys.platform == "win32": # pragma: win32 cover + _FileLock: type[BaseFileLock] = WindowsFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncWindowsFileLock +else: # pragma: win32 no cover # noqa: PLR5501 + if has_fcntl: + _FileLock: type[BaseFileLock] = UnixFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncUnixFileLock + else: + _FileLock = SoftFileLock + _AsyncFileLock = AsyncSoftFileLock + if warnings is not None: + warnings.warn("only soft file lock is available", stacklevel=2) + +if TYPE_CHECKING: + FileLock = SoftFileLock + AsyncFileLock = AsyncSoftFileLock +else: + #: Alias for the lock, which should be used for the current platform. + FileLock = _FileLock + AsyncFileLock = _AsyncFileLock + + +__all__ = [ + "AcquireReturnProxy", + "AsyncAcquireReturnProxy", + "AsyncFileLock", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", + "BaseFileLock", + "FileLock", + "SoftFileLock", + "Timeout", + "UnixFileLock", + "WindowsFileLock", + "__version__", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366f1aeec0345854857e85df561fc15c19d40104 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2be04e1edb3c1f1503b42748191f18894898f4b1 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..885360b90a7beb81ba0d8ca714589c3ae71e7a62 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51e01470f4ff53ca1bebd95f09a7deb3c39a1a70 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e3f6a5450de6580c76c09408c5916800dd0db21 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce0fdde0e22e83bc55c8a465de3c0f3b0441e1b6 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a90823364c821409568fa98672a959505605c6dd Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e991c95465e3f17b96c322efc133a4011056acc5 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5151cc6a5879fdf39ed19d566ca867a3fafedaad Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_api.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..8fde69a0fef7badcc123d17735cd784a99baed52 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_api.py @@ -0,0 +1,403 @@ +from __future__ import annotations + +import contextlib +import inspect +import logging +import os +import time +import warnings +from abc import ABCMeta, abstractmethod +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any, cast +from weakref import WeakValueDictionary + +from ._error import Timeout + +if TYPE_CHECKING: + import sys + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: + self.lock = lock + + def __enter__(self) -> BaseFileLock: + return self.lock + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.lock.release() + + +@dataclass +class FileLockContext: + """A dataclass which holds the context for a ``BaseFileLock`` object.""" + + # The context is held in a separate class to allow optional use of thread local storage via the + # ThreadLocalFileContext class. + + #: The path to the lock file. + lock_file: str + + #: The default timeout value. + timeout: float + + #: The mode for the lock files + mode: int + + #: Whether the lock should be blocking or not + blocking: bool + + #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held + lock_file_fd: int | None = None + + #: The lock counter is used for implementing the nested locking mechanism. + lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 + + +class ThreadLocalFileContext(FileLockContext, local): + """A thread local version of the ``FileLockContext`` class.""" + + +class FileLockMeta(ABCMeta): + def __call__( # noqa: PLR0913 + cls, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + **kwargs: Any, # capture remaining kwargs for subclasses # noqa: ANN401 + ) -> BaseFileLock: + if is_singleton: + instance = cls._instances.get(str(lock_file)) # type: ignore[attr-defined] + if instance: + params_to_check = { + "thread_local": (thread_local, instance.is_thread_local()), + "timeout": (timeout, instance.timeout), + "mode": (mode, instance.mode), + "blocking": (blocking, instance.blocking), + } + + non_matching_params = { + name: (passed_param, set_param) + for name, (passed_param, set_param) in params_to_check.items() + if passed_param != set_param + } + if not non_matching_params: + return cast("BaseFileLock", instance) + + # parameters do not match; raise error + msg = "Singleton lock instances cannot be initialized with differing arguments" + msg += "\nNon-matching arguments: " + for param_name, (passed_param, set_param) in non_matching_params.items(): + msg += f"\n\t{param_name} (existing lock has {set_param} but {passed_param} was passed)" + raise ValueError(msg) + + # Workaround to make `__init__`'s params optional in subclasses + # E.g. virtualenv changes the signature of the `__init__` method in the `BaseFileLock` class descendant + # (https://github.com/tox-dev/filelock/pull/340) + + all_params = { + "timeout": timeout, + "mode": mode, + "thread_local": thread_local, + "blocking": blocking, + "is_singleton": is_singleton, + **kwargs, + } + + present_params = inspect.signature(cls.__init__).parameters # type: ignore[misc] + init_params = {key: value for key, value in all_params.items() if key in present_params} + + instance = super().__call__(lock_file, **init_params) + + if is_singleton: + cls._instances[str(lock_file)] = instance # type: ignore[attr-defined] + + return cast("BaseFileLock", instance) + + +class BaseFileLock(contextlib.ContextDecorator, metaclass=FileLockMeta): + """Abstract base class for a file lock object.""" + + _instances: WeakValueDictionary[str, BaseFileLock] + + def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None: + """Setup unique state for lock subclasses.""" + super().__init_subclass__(**kwargs) + cls._instances = WeakValueDictionary() + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \ + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \ + to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \ + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \ + per lock file. This is useful if you want to use the lock object for reentrant locking without needing \ + to pass the same object around. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + } + self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) + + def is_thread_local(self) -> bool: + """:return: a flag indicating if this lock is thread local or not""" + return self._is_thread_local + + @property + def is_singleton(self) -> bool: + """:return: a flag indicating if this lock is singleton or not""" + return self._is_singleton + + @property + def lock_file(self) -> str: + """:return: path to the lock file""" + return self._context.lock_file + + @property + def timeout(self) -> float: + """ + :return: the default timeout value, in seconds + + .. versionadded:: 2.0.0 + """ + return self._context.timeout + + @timeout.setter + def timeout(self, value: float | str) -> None: + """ + Change the default timeout value. + + :param value: the new value, in seconds + + """ + self._context.timeout = float(value) + + @property + def blocking(self) -> bool: + """:return: whether the locking is blocking or not""" + return self._context.blocking + + @blocking.setter + def blocking(self, value: bool) -> None: + """ + Change the default blocking value. + + :param value: the new value as bool + + """ + self._context.blocking = value + + @property + def mode(self) -> int: + """:return: the file permissions for the lockfile""" + return self._context.mode + + @abstractmethod + def _acquire(self) -> None: + """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" + raise NotImplementedError + + @abstractmethod + def _release(self) -> None: + """Releases the lock and sets self._context.lock_file_fd to None.""" + raise NotImplementedError + + @property + def is_locked(self) -> bool: + """ + + :return: A boolean indicating if the lock file is holding the lock currently. + + .. versionchanged:: 2.0.0 + + This was previously a method and is now a property. + """ + return self._context.lock_file_fd is not None + + @property + def lock_counter(self) -> int: + """:return: The number of times this lock has been acquired (but not yet released).""" + return self._context.lock_counter + + def acquire( + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + poll_intervall: float | None = None, + blocking: bool | None = None, + ) -> AcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and + if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + .. versionchanged:: 2.0.0 + + This method returns now a *proxy* object instead of *self*, + so that it can be used in a with statement without side effects. + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + if poll_intervall is not None: + msg = "use poll_interval instead of poll_intervall" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + poll_interval = poll_intervall + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + self._acquire() + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + time.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AcquireReturnProxy(lock=self) + + def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. + Also note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + self._release() + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + def __enter__(self) -> Self: + """ + Acquire the lock. + + :return: the lock object + + """ + self.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + self.release(force=True) + + +__all__ = [ + "AcquireReturnProxy", + "BaseFileLock", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_error.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_error.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ff08c0f508ad7077eb6ed1990898840c952b3a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_error.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Any + + +class Timeout(TimeoutError): # noqa: N818 + """Raised when the lock could not be acquired in *timeout* seconds.""" + + def __init__(self, lock_file: str) -> None: + super().__init__() + self._lock_file = lock_file + + def __reduce__(self) -> str | tuple[Any, ...]: + return self.__class__, (self._lock_file,) # Properly pickle the exception + + def __str__(self) -> str: + return f"The file lock '{self._lock_file}' could not be acquired." + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.lock_file!r})" + + @property + def lock_file(self) -> str: + """:return: The path of the file lock.""" + return self._lock_file + + +__all__ = [ + "Timeout", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_soft.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_soft.py new file mode 100644 index 0000000000000000000000000000000000000000..28c67f74cc82b8f55e47afd6a71972cc1fb95eb6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_soft.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES, EEXIST +from pathlib import Path + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + + +class SoftFileLock(BaseFileLock): + """Simply watches the existence of the lock file.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + # first check for exists and read-only mode as the open will mask this case as EEXIST + flags = ( + os.O_WRONLY # open for writing only + | os.O_CREAT + | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists + | os.O_TRUNC # truncate the file to zero byte + ) + try: + file_handler = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: # re-raise unless expected exception + if not ( + exception.errno == EEXIST # lock already exist + or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock + ): # pragma: win32 no cover + raise + else: + self._context.lock_file_fd = file_handler + + def _release(self) -> None: + assert self._context.lock_file_fd is not None # noqa: S101 + os.close(self._context.lock_file_fd) # the lock file is definitely not None + self._context.lock_file_fd = None + with suppress(OSError): # the file is already deleted and that's what we want + Path(self.lock_file).unlink() + + +__all__ = [ + "SoftFileLock", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_unix.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_unix.py new file mode 100644 index 0000000000000000000000000000000000000000..b2fd0f33d25d2bdf4a2a883380154771b4a25f9b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_unix.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import ENOSYS +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists + +#: a flag to indicate if the fcntl API is available +has_fcntl = False +if sys.platform == "win32": # pragma: win32 cover + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + +else: # pragma: win32 no cover + try: + import fcntl + + _ = (fcntl.flock, fcntl.LOCK_EX, fcntl.LOCK_NB, fcntl.LOCK_UN) + except (ImportError, AttributeError): + pass + else: + has_fcntl = True + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + ensure_directory_exists(self.lock_file) + open_flags = os.O_RDWR | os.O_TRUNC + if not Path(self.lock_file).exists(): + open_flags |= os.O_CREAT + fd = os.open(self.lock_file, open_flags, self._context.mode) + with suppress(PermissionError): # This locked is not owned by this UID + os.fchmod(fd, self._context.mode) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as exception: + os.close(fd) + if exception.errno == ENOSYS: # NotImplemented error + msg = "FileSystem does not appear to support flock; use SoftFileLock instead" + raise NotImplementedError(msg) from exception + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + # Do not remove the lockfile: + # https://github.com/tox-dev/py-filelock/issues/31 + # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition + fd = cast("int", self._context.lock_file_fd) + self._context.lock_file_fd = None + fcntl.flock(fd, fcntl.LOCK_UN) + os.close(fd) + + +__all__ = [ + "UnixFileLock", + "has_fcntl", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c671e8533873948f0e1b5575ff952c722019f067 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_util.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import os +import stat +import sys +from errno import EACCES, EISDIR +from pathlib import Path + + +def raise_on_not_writable_file(filename: str) -> None: + """ + Raise an exception if attempting to open the file for writing would fail. + + This is done so files that will never be writable can be separated from files that are writable but currently + locked. + + :param filename: file to check + :raises OSError: as if the file was opened for writing. + + """ + try: # use stat to do exists + can write to check without race condition + file_stat = os.stat(filename) # noqa: PTH116 + except OSError: + return # swallow does not exist or other errors + + if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it + if not (file_stat.st_mode & stat.S_IWUSR): + raise PermissionError(EACCES, "Permission denied", filename) + + if stat.S_ISDIR(file_stat.st_mode): + if sys.platform == "win32": # pragma: win32 cover + # On Windows, this is PermissionError + raise PermissionError(EACCES, "Permission denied", filename) + else: # pragma: win32 no cover # noqa: RET506 + # On linux / macOS, this is IsADirectoryError + raise IsADirectoryError(EISDIR, "Is a directory", filename) + + +def ensure_directory_exists(filename: Path | str) -> None: + """ + Ensure the directory containing the file exists (create it if necessary). + + :param filename: file. + + """ + Path(filename).parent.mkdir(parents=True, exist_ok=True) + + +__all__ = [ + "ensure_directory_exists", + "raise_on_not_writable_file", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_windows.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..348251d1067c28c55a6a267f8d11337abfae837f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/_windows.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + +if sys.platform == "win32": # pragma: win32 cover + import msvcrt + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + flags = ( + os.O_RDWR # open for read and write + | os.O_CREAT # create file if not exists + | os.O_TRUNC # truncate file if not empty + ) + try: + fd = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: + if exception.errno != EACCES: # has no access to this lock + raise + else: + try: + msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) + except OSError as exception: + os.close(fd) # close file first + if exception.errno != EACCES: # file is already locked + raise + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + fd = cast("int", self._context.lock_file_fd) + self._context.lock_file_fd = None + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + os.close(fd) + + with suppress(OSError): # Probably another instance of the application hat acquired the file lock. + Path(self.lock_file).unlink() + +else: # pragma: win32 no cover + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + + +__all__ = [ + "WindowsFileLock", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/asyncio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/asyncio.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9c9f05bdbc41e13a2c4c3094d43acab8207089 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/asyncio.py @@ -0,0 +1,342 @@ +"""An asyncio-based implementation of the file lock.""" + +from __future__ import annotations + +import asyncio +import contextlib +import logging +import os +import time +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any, Callable, NoReturn, cast + +from ._api import BaseFileLock, FileLockContext, FileLockMeta +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock +from ._windows import WindowsFileLock + +if TYPE_CHECKING: + import sys + from concurrent import futures + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: # noqa: D107 + self.lock = lock + + async def __aenter__(self) -> BaseAsyncFileLock: # noqa: D105 + return self.lock + + async def __aexit__( # noqa: D105 + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self.lock.release() + + +class AsyncFileLockMeta(FileLockMeta): + def __call__( # type: ignore[override] # noqa: PLR0913 + cls, # noqa: N805 + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> BaseAsyncFileLock: + if thread_local and run_in_executor: + msg = "run_in_executor is not supported when thread_local is True" + raise ValueError(msg) + instance = super().__call__( + lock_file=lock_file, + timeout=timeout, + mode=mode, + thread_local=thread_local, + blocking=blocking, + is_singleton=is_singleton, + loop=loop, + run_in_executor=run_in_executor, + executor=executor, + ) + return cast("BaseAsyncFileLock", instance) + + +class BaseAsyncFileLock(BaseFileLock, metaclass=AsyncFileLockMeta): + """Base class for asynchronous file locks.""" + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \ + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \ + to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \ + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \ + per lock file. This is useful if you want to use the lock object for reentrant locking without needing \ + to pass the same object around. + :param loop: The event loop to use. If not specified, the running event loop will be used. + :param run_in_executor: If this is set to ``True`` then the lock will be acquired in an executor. + :param executor: The executor to use. If not specified, the default executor will be used. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + "loop": loop, + "run_in_executor": run_in_executor, + "executor": executor, + } + self._context: AsyncFileLockContext = (AsyncThreadLocalFileContext if thread_local else AsyncFileLockContext)( + **kwargs + ) + + @property + def run_in_executor(self) -> bool: + """::return: whether run in executor.""" + return self._context.run_in_executor + + @property + def executor(self) -> futures.Executor | None: + """::return: the executor.""" + return self._context.executor + + @executor.setter + def executor(self, value: futures.Executor | None) -> None: # pragma: no cover + """ + Change the executor. + + :param value: the new executor or ``None`` + :type value: futures.Executor | None + + """ + self._context.executor = value + + @property + def loop(self) -> asyncio.AbstractEventLoop | None: + """::return: the event loop.""" + return self._context.loop + + async def acquire( # type: ignore[override] + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + blocking: bool | None = None, + ) -> AsyncAcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default + :attr:`~BaseFileLock.timeout` is and if ``timeout < 0``, there is no timeout and + this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._acquire) + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + await asyncio.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AsyncAcquireReturnProxy(lock=self) + + async def release(self, force: bool = False) -> None: # type: ignore[override] # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. + Also note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._release) + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + async def _run_internal_method(self, method: Callable[[], Any]) -> None: + if asyncio.iscoroutinefunction(method): + await method() + elif self.run_in_executor: + loop = self.loop or asyncio.get_running_loop() + await loop.run_in_executor(self.executor, method) + else: + method() + + def __enter__(self) -> NoReturn: + """ + Replace old __enter__ method to avoid using it. + + NOTE: DO NOT USE `with` FOR ASYNCIO LOCKS, USE `async with` INSTEAD. + + :return: none + :rtype: NoReturn + """ + msg = "Do not use `with` for asyncio locks, use `async with` instead." + raise NotImplementedError(msg) + + async def __aenter__(self) -> Self: + """ + Acquire the lock. + + :return: the lock object + + """ + await self.acquire() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + await self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + with contextlib.suppress(RuntimeError): + loop = self.loop or asyncio.get_running_loop() + if not loop.is_running(): # pragma: no cover + loop.run_until_complete(self.release(force=True)) + else: + loop.create_task(self.release(force=True)) + + +class AsyncSoftFileLock(SoftFileLock, BaseAsyncFileLock): + """Simply watches the existence of the lock file.""" + + +class AsyncUnixFileLock(UnixFileLock, BaseAsyncFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + +class AsyncWindowsFileLock(WindowsFileLock, BaseAsyncFileLock): + """Uses the :func:`msvcrt.locking` to hard lock the lock file on windows systems.""" + + +__all__ = [ + "AsyncAcquireReturnProxy", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/py.typed b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/version.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/version.py new file mode 100644 index 0000000000000000000000000000000000000000..68cfbf97cf730dd2cd88931283036e275c15db4f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/filelock/version.py @@ -0,0 +1,21 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '3.18.0' +__version_tuple__ = version_tuple = (3, 18, 0) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: mpmath +Version: 1.3.0 +Summary: Python library for arbitrary-precision floating-point arithmetic +Home-page: http://mpmath.org/ +Author: Fredrik Johansson +Author-email: fredrik.johansson@gmail.com +License: BSD +Project-URL: Source, https://github.com/fredrik-johansson/mpmath +Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues +Project-URL: Documentation, http://mpmath.org/doc/current/ +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +License-File: LICENSE +Provides-Extra: develop +Requires-Dist: pytest (>=4.6) ; extra == 'develop' +Requires-Dist: pycodestyle ; extra == 'develop' +Requires-Dist: pytest-cov ; extra == 'develop' +Requires-Dist: codecov ; extra == 'develop' +Requires-Dist: wheel ; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: gmpy +Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy' +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' + +mpmath +====== + +|pypi version| |Build status| |Code coverage status| |Zenodo Badge| + +.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg + :target: https://pypi.python.org/pypi/mpmath +.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg + :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test +.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fredrik-johansson/mpmath +.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg + :target: https://zenodo.org/badge/latestdoi/2934512 + +A Python library for arbitrary-precision floating-point arithmetic. + +Website: http://mpmath.org/ +Main author: Fredrik Johansson + +Mpmath is free software released under the New BSD License (see the +LICENSE file for details) + +0. History and credits +---------------------- + +The following people (among others) have contributed major patches +or new features to mpmath: + +* Pearu Peterson +* Mario Pernici +* Ondrej Certik +* Vinzent Steinberg +* Nimish Telang +* Mike Taschuk +* Case Van Horsen +* Jorn Baayen +* Chris Smith +* Juan Arias de Reyna +* Ioannis Tziakos +* Aaron Meurer +* Stefan Krastanov +* Ken Allen +* Timo Hartmann +* Sergey B Kirpichev +* Kris Kuhlman +* Paul Masson +* Michael Kagalenko +* Jonathan Warner +* Max Gaukler +* Guillermo Navas-Palencia +* Nike Dattani + +Numerous other people have contributed by reporting bugs, +requesting new features, or suggesting improvements to the +documentation. + +For a detailed changelog, including individual contributions, +see the CHANGES file. + +Fredrik's work on mpmath during summer 2008 was sponsored by Google +as part of the Google Summer of Code program. + +Fredrik's work on mpmath during summer 2009 was sponsored by the +American Institute of Mathematics under the support of the National Science +Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms). + +Any opinions, findings, and conclusions or recommendations expressed in this +material are those of the author(s) and do not necessarily reflect the +views of the sponsors. + +Credit also goes to: + +* The authors of the GMP library and the Python wrapper + gmpy, enabling mpmath to become much faster at + high precision +* The authors of MPFR, pari/gp, MPFUN, and other arbitrary- + precision libraries, whose documentation has been helpful + for implementing many of the algorithms in mpmath +* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik; + Wolfram Research for MathWorld and the Wolfram Functions site. + These are the main references used for special functions + implementations. +* George Brandl for developing the Sphinx documentation tool + used to build mpmath's documentation + +Release history: + +* Version 1.3.0 released on March 7, 2023 +* Version 1.2.0 released on February 1, 2021 +* Version 1.1.0 released on December 11, 2018 +* Version 1.0.0 released on September 27, 2017 +* Version 0.19 released on June 10, 2014 +* Version 0.18 released on December 31, 2013 +* Version 0.17 released on February 1, 2011 +* Version 0.16 released on September 24, 2010 +* Version 0.15 released on June 6, 2010 +* Version 0.14 released on February 5, 2010 +* Version 0.13 released on August 13, 2009 +* Version 0.12 released on June 9, 2009 +* Version 0.11 released on January 26, 2009 +* Version 0.10 released on October 15, 2008 +* Version 0.9 released on August 23, 2008 +* Version 0.8 released on April 20, 2008 +* Version 0.7 released on March 12, 2008 +* Version 0.6 released on January 13, 2008 +* Version 0.5 released on November 24, 2007 +* Version 0.4 released on November 3, 2007 +* Version 0.3 released on October 5, 2007 +* Version 0.2 released on October 2, 2007 +* Version 0.1 released on September 27, 2007 + +1. Download & installation +-------------------------- + +Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested +with CPython 2.7, 3.5 through 3.7 and for PyPy. + +The latest release of mpmath can be downloaded from the mpmath +website and from https://github.com/fredrik-johansson/mpmath/releases + +It should also be available in the Python Package Index at +https://pypi.python.org/pypi/mpmath + +To install latest release of Mpmath with pip, simply run + +``pip install mpmath`` + +Or unpack the mpmath archive and run + +``python setup.py install`` + +Mpmath can also be installed using + +``python -m easy_install mpmath`` + +The latest development code is available from +https://github.com/fredrik-johansson/mpmath + +See the main documentation for more detailed instructions. + +2. Running tests +---------------- + +The unit tests in mpmath/tests/ can be run via the script +runtests.py, but it is recommended to run them with py.test +(https://pytest.org/), especially +to generate more useful reports in case there are failures. + +You may also want to check out the demo scripts in the demo +directory. + +The master branch is automatically tested by Travis CI. + +3. Documentation +---------------- + +Documentation in reStructuredText format is available in the +doc directory included with the source package. These files +are human-readable, but can be compiled to prettier HTML using +the build.py script (requires Sphinx, http://sphinx.pocoo.org/). + +See setup.txt in the documentation for more information. + +The most recent documentation is also available in HTML format: + +http://mpmath.org/doc/current/ + +4. Known problems +----------------- + +Mpmath is a work in progress. Major issues include: + +* Some functions may return incorrect values when given extremely + large arguments or arguments very close to singularities. + +* Directed rounding works for arithmetic operations. It is implemented + heuristically for other operations, and their results may be off by one + or two units in the last place (even if otherwise accurate). + +* Some IEEE 754 features are not available. Inifinities and NaN are + partially supported; denormal rounding is currently not available + at all. + +* The interface for switching precision and rounding is not finalized. + The current method is not threadsafe. + +5. Help and bug reports +----------------------- + +General questions and comments can be sent to the mpmath mailinglist, +mpmath@googlegroups.com + +You can also report bugs and send patches to the mpmath issue tracker, +https://github.com/fredrik-johansson/mpmath/issues diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..45e6fb5270fece6b354c40b8f57c1cfa617e890a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD @@ -0,0 +1,94 @@ +mpmath-1.3.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537 +mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630 +mpmath-1.3.0.dist-info/RECORD,, +mpmath-1.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7 +mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765 +mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162 +mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817 +mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112 +mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226 +mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306 +mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056 +mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908 +mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856 +mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877 +mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432 +mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985 +mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572 +mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211 +mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452 +mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815 +mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512 +mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330 +mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938 +mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237 +mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644 +mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273 +mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100 +mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570 +mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097 +mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633 +mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184 +mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703 +mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320 +mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410 +mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858 +mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253 +mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790 +mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360 +mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469 +mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861 +mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624 +mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688 +mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875 +mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021 +mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622 +mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561 +mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94 +mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609 +mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394 +mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534 +mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958 +mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331 +mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976 +mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228 +mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003 +mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189 +mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348 +mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686 +mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187 +mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306 +mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834 +mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466 +mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340 +mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905 +mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778 +mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225 +mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997 +mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955 +mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990 +mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917 +mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461 +mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692 +mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527 +mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090 +mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440 +mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944 +mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196 +mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822 +mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401 +mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227 +mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893 +mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132 +mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848 +mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544 +mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035 +mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799 +mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944 +mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868 +mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029 +mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/REQUESTED b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mpmath diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b057eb43802b02d6a10d2c5a62046db1d271670c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/__init__.py @@ -0,0 +1,57 @@ +from optuna import distributions +from optuna import exceptions +from optuna import integration +from optuna import logging +from optuna import pruners +from optuna import samplers +from optuna import search_space +from optuna import storages +from optuna import study +from optuna import trial +from optuna import version +from optuna._imports import _LazyImport +from optuna.exceptions import TrialPruned +from optuna.study import copy_study +from optuna.study import create_study +from optuna.study import delete_study +from optuna.study import get_all_study_names +from optuna.study import get_all_study_summaries +from optuna.study import load_study +from optuna.study import Study +from optuna.trial import create_trial +from optuna.trial import Trial +from optuna.version import __version__ + + +__all__ = [ + "Study", + "Trial", + "TrialPruned", + "__version__", + "artifacts", + "copy_study", + "create_study", + "create_trial", + "delete_study", + "distributions", + "exceptions", + "get_all_study_names", + "get_all_study_summaries", + "importance", + "integration", + "load_study", + "logging", + "pruners", + "samplers", + "search_space", + "storages", + "study", + "trial", + "version", + "visualization", +] + + +artifacts = _LazyImport("optuna.artifacts") +importance = _LazyImport("optuna.importance") +visualization = _LazyImport("optuna.visualization") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_callbacks.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb1897eb4c65b8ab738289156ac7f12fb3c16c2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_callbacks.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from collections.abc import Container + + from optuna.study import Study + from optuna.trial import FrozenTrial + + +class MaxTrialsCallback: + """Set a maximum number of trials before ending the study. + + While the ``n_trials`` argument of :meth:`optuna.study.Study.optimize` sets the number of + trials that will be run, you may want to continue running until you have a certain number of + successfully completed trials or stop the study when you have a certain number of trials that + fail. This ``MaxTrialsCallback`` class allows you to set a maximum number of trials for a + particular :class:`~optuna.trial.TrialState` before stopping the study. + + Example: + + .. testcode:: + + import optuna + from optuna.study import MaxTrialsCallback + from optuna.trial import TrialState + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + return x**2 + + + study = optuna.create_study() + study.optimize( + objective, + callbacks=[MaxTrialsCallback(10, states=(TrialState.COMPLETE,))], + ) + + Args: + n_trials: + The max number of trials. Must be set to an integer. + states: + Tuple of the :class:`~optuna.trial.TrialState` to be counted + towards the max trials limit. Default value is ``(TrialState.COMPLETE,)``. + If :obj:`None`, count all states. + """ + + def __init__( + self, n_trials: int, states: Container[TrialState] | None = (TrialState.COMPLETE,) + ) -> None: + self._n_trials = n_trials + self._states = states + + def __call__(self, study: Study, trial: FrozenTrial) -> None: + trials = study.get_trials(deepcopy=False, states=self._states) + n_complete = len(trials) + if n_complete >= self._n_trials: + study.stop() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_convert_positional_args.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_convert_positional_args.py new file mode 100644 index 0000000000000000000000000000000000000000..4865d9e6297cee46073e167d12ac04c0fca64ed7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_convert_positional_args.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from functools import wraps +from inspect import Parameter +from inspect import signature +from typing import Any +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +from optuna._deprecated import _validate_two_version +from optuna._experimental import _validate_version + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Sequence + + from typing_extensions import ParamSpec + + _P = ParamSpec("_P") + _T = TypeVar("_T") + + +_DEPRECATION_WARNING_TEMPLATE = ( + "Positional arguments {deprecated_positional_arg_names} in {func_name}() " + "have been deprecated since v{d_ver}. " + "They will be replaced with the corresponding keyword arguments in v{r_ver}, " + "so please use the keyword specification instead. " + "See https://github.com/optuna/optuna/releases/tag/v{d_ver} for details." +) + + +def _get_positional_arg_names(func: "Callable[_P, _T]") -> list[str]: + params = signature(func).parameters + positional_arg_names = [ + name + for name, p in params.items() + if p.default == Parameter.empty and p.kind == p.POSITIONAL_OR_KEYWORD + ] + return positional_arg_names + + +def _infer_kwargs(previous_positional_arg_names: Sequence[str], *args: Any) -> dict[str, Any]: + inferred_kwargs = {arg_name: val for val, arg_name in zip(args, previous_positional_arg_names)} + return inferred_kwargs + + +def convert_positional_args( + *, + previous_positional_arg_names: Sequence[str], + deprecated_version: str, + removed_version: str, + warning_stacklevel: int = 2, +) -> "Callable[[Callable[_P, _T]], Callable[_P, _T]]": + """Convert positional arguments to keyword arguments. + + Args: + previous_positional_arg_names: + List of names previously given as positional arguments. + warning_stacklevel: + Level of the stack trace where decorated function locates. + deprecated_version: + The version in which the use of positional arguments is deprecated. + removed_version: + The version in which the use of positional arguments will be removed. + """ + + if deprecated_version is not None or removed_version is not None: + if deprecated_version is None: + raise ValueError( + "deprecated_version must not be None when removed_version is specified." + ) + if removed_version is None: + raise ValueError( + "removed_version must not be None when deprecated_version is specified." + ) + + _validate_version(deprecated_version) + _validate_version(removed_version) + _validate_two_version(deprecated_version, removed_version) + + def converter_decorator(func: "Callable[_P, _T]") -> "Callable[_P, _T]": + + assert set(previous_positional_arg_names).issubset(set(signature(func).parameters)), ( + f"{set(previous_positional_arg_names)} is not a subset of" + f" {set(signature(func).parameters)}" + ) + + @wraps(func) + def converter_wrapper(*args: Any, **kwargs: Any) -> "_T": + warning_messages = [] + positional_arg_names = _get_positional_arg_names(func) + inferred_kwargs = _infer_kwargs(previous_positional_arg_names, *args) + + if len(inferred_kwargs) > len(positional_arg_names): + expected_kwds = set(inferred_kwargs) - set(positional_arg_names) + warning_messages.append( + f"{func.__name__}() got {expected_kwds} as positional arguments " + "but they were expected to be given as keyword arguments." + ) + + if deprecated_version or removed_version: + warning_messages.append( + _DEPRECATION_WARNING_TEMPLATE.format( + deprecated_positional_arg_names=previous_positional_arg_names, + func_name=func.__name__, + d_ver=deprecated_version, + r_ver=removed_version, + ) + ) + + if warning_messages: + warnings.warn( + "\n".join(warning_messages), FutureWarning, stacklevel=warning_stacklevel + ) + + if len(args) > len(previous_positional_arg_names): + raise TypeError( + f"{func.__name__}() takes {len(previous_positional_arg_names)} positional" + f" arguments but {len(args)} were given." + ) + + duplicated_kwds = set(kwargs).intersection(inferred_kwargs) + if len(duplicated_kwds): + # When specifying positional arguments that are not located at the end of args as + # keyword arguments, raise TypeError as follows by imitating the Python standard + # behavior + raise TypeError( + f"{func.__name__}() got multiple values for arguments {duplicated_kwds}." + ) + + kwargs.update(inferred_kwargs) + + return func(**kwargs) # type: ignore[call-arg] + + return converter_wrapper + + return converter_decorator diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_deprecated.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c621352933058afe207f205c56760f89a69d22 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_deprecated.py @@ -0,0 +1,199 @@ +from __future__ import annotations + +import functools +import textwrap +from typing import Any +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +from packaging import version + +from optuna._experimental import _get_docstring_indent +from optuna._experimental import _validate_version + + +if TYPE_CHECKING: + from collections.abc import Callable + + from typing_extensions import ParamSpec + + FT = TypeVar("FT") + FP = ParamSpec("FP") + CT = TypeVar("CT") + + +_DEPRECATION_NOTE_TEMPLATE = """ + +.. warning:: + Deprecated in v{d_ver}. This feature will be removed in the future. The removal of this + feature is currently scheduled for v{r_ver}, but this schedule is subject to change. + See https://github.com/optuna/optuna/releases/tag/v{d_ver}. +""" + + +_DEPRECATION_WARNING_TEMPLATE = ( + "{name} has been deprecated in v{d_ver}. " + "This feature will be removed in v{r_ver}. " + "See https://github.com/optuna/optuna/releases/tag/v{d_ver}." +) + + +def _validate_two_version(old_version: str, new_version: str) -> None: + if version.parse(old_version) > version.parse(new_version): + raise ValueError( + "Invalid version relationship. The deprecated version must be smaller than " + "the removed version, but (deprecated version, removed version) = ({}, {}) are " + "specified.".format(old_version, new_version) + ) + + +def _format_text(text: str) -> str: + return "\n\n" + textwrap.indent(text.strip(), " ") + "\n" + + +def deprecated_func( + deprecated_version: str, + removed_version: str, + name: str | None = None, + text: str | None = None, +) -> "Callable[[Callable[FP, FT]], Callable[FP, FT]]": + """Decorate function as deprecated. + + Args: + deprecated_version: + The version in which the target feature is deprecated. + removed_version: + The version in which the target feature will be removed. + name: + The name of the feature. Defaults to the function name. Optional. + text: + The additional text for the deprecation note. The default note is build using specified + ``deprecated_version`` and ``removed_version``. If you want to provide additional + information, please specify this argument yourself. + + .. note:: + The default deprecation note is as follows: "Deprecated in v{d_ver}. This feature + will be removed in the future. The removal of this feature is currently scheduled + for v{r_ver}, but this schedule is subject to change. See + https://github.com/optuna/optuna/releases/tag/v{d_ver}." + + .. note:: + The specified text is concatenated after the default deprecation note. + """ + + _validate_version(deprecated_version) + _validate_version(removed_version) + _validate_two_version(deprecated_version, removed_version) + + def decorator(func: "Callable[FP, FT]") -> "Callable[FP, FT]": + if func.__doc__ is None: + func.__doc__ = "" + + note = _DEPRECATION_NOTE_TEMPLATE.format(d_ver=deprecated_version, r_ver=removed_version) + if text is not None: + note += _format_text(text) + indent = _get_docstring_indent(func.__doc__) + func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent + + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> "FT": + """Decorates a function as deprecated. + + This decorator is supposed to be applied to the deprecated function. + """ + + message = _DEPRECATION_WARNING_TEMPLATE.format( + name=(name if name is not None else func.__name__), + d_ver=deprecated_version, + r_ver=removed_version, + ) + if text is not None: + message += " " + text + warnings.warn(message, FutureWarning, stacklevel=2) + + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def deprecated_class( + deprecated_version: str, + removed_version: str, + name: str | None = None, + text: str | None = None, +) -> "Callable[[CT], CT]": + """Decorate class as deprecated. + + Args: + deprecated_version: + The version in which the target feature is deprecated. + removed_version: + The version in which the target feature will be removed. + name: + The name of the feature. Defaults to the class name. Optional. + text: + The additional text for the deprecation note. The default note is build using specified + ``deprecated_version`` and ``removed_version``. If you want to provide additional + information, please specify this argument yourself. + + .. note:: + The default deprecation note is as follows: "Deprecated in v{d_ver}. This feature + will be removed in the future. The removal of this feature is currently scheduled + for v{r_ver}, but this schedule is subject to change. See + https://github.com/optuna/optuna/releases/tag/v{d_ver}." + + .. note:: + The specified text is concatenated after the default deprecation note. + """ + + _validate_version(deprecated_version) + _validate_version(removed_version) + _validate_two_version(deprecated_version, removed_version) + + def decorator(cls: "CT") -> "CT": + def wrapper(cls: "CT") -> "CT": + """Decorates a class as deprecated. + + This decorator is supposed to be applied to the deprecated class. + """ + _original_init = getattr(cls, "__init__") + _original_name = getattr(cls, "__name__") + + @functools.wraps(_original_init) + def wrapped_init(self: Any, *args: Any, **kwargs: Any) -> None: + message = _DEPRECATION_WARNING_TEMPLATE.format( + name=(name if name is not None else _original_name), + d_ver=deprecated_version, + r_ver=removed_version, + ) + if text is not None: + message += " " + text + warnings.warn( + message, + FutureWarning, + stacklevel=2, + ) + + _original_init(self, *args, **kwargs) + + setattr(cls, "__init__", wrapped_init) + + if cls.__doc__ is None: + cls.__doc__ = "" + + note = _DEPRECATION_NOTE_TEMPLATE.format( + d_ver=deprecated_version, r_ver=removed_version + ) + if text is not None: + note += _format_text(text) + indent = _get_docstring_indent(cls.__doc__) + cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent + + return cls + + return wrapper(cls) + + return decorator diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_experimental.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..f3a4ff532de674789d4a67e2329ff1896e99fd4c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_experimental.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import functools +import textwrap +from typing import Any +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +from optuna.exceptions import ExperimentalWarning + + +if TYPE_CHECKING: + from collections.abc import Callable + + from typing_extensions import ParamSpec + + FT = TypeVar("FT") + FP = ParamSpec("FP") + CT = TypeVar("CT") + + +_EXPERIMENTAL_NOTE_TEMPLATE = """ + +.. note:: + Added in v{ver} as an experimental feature. The interface may change in newer versions + without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. +""" + + +def warn_experimental_argument(option_name: str) -> None: + warnings.warn( + f"Argument ``{option_name}`` is an experimental feature." + " The interface can change in the future.", + ExperimentalWarning, + ) + + +def _validate_version(version: str) -> None: + if not isinstance(version, str) or len(version.split(".")) != 3: + raise ValueError( + "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format( + version + ) + ) + + +def _get_docstring_indent(docstring: str) -> str: + return docstring.split("\n")[-1] if "\n" in docstring else "" + + +def experimental_func( + version: str, + name: str | None = None, +) -> Callable[[Callable[FP, FT]], Callable[FP, FT]]: + """Decorate function as experimental. + + Args: + version: The first version that supports the target feature. + name: The name of the feature. Defaults to the function name. Optional. + """ + + _validate_version(version) + + def decorator(func: Callable[FP, FT]) -> Callable[FP, FT]: + if func.__doc__ is None: + func.__doc__ = "" + + note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version) + indent = _get_docstring_indent(func.__doc__) + func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent + + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> FT: + warnings.warn( + "{} is experimental (supported from v{}). " + "The interface can change in the future.".format( + name if name is not None else func.__name__, version + ), + ExperimentalWarning, + stacklevel=2, + ) + + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def experimental_class( + version: str, + name: str | None = None, +) -> Callable[[CT], CT]: + """Decorate class as experimental. + + Args: + version: The first version that supports the target feature. + name: The name of the feature. Defaults to the class name. Optional. + """ + + _validate_version(version) + + def decorator(cls: CT) -> CT: + def wrapper(cls: CT) -> CT: + """Decorates a class as experimental. + + This decorator is supposed to be applied to the experimental class. + """ + _original_init = getattr(cls, "__init__") + _original_name = getattr(cls, "__name__") + + @functools.wraps(_original_init) + def wrapped_init(self: Any, *args: Any, **kwargs: Any) -> None: + warnings.warn( + "{} is experimental (supported from v{}). " + "The interface can change in the future.".format( + name if name is not None else _original_name, version + ), + ExperimentalWarning, + stacklevel=2, + ) + + _original_init(self, *args, **kwargs) + + setattr(cls, "__init__", wrapped_init) + + if cls.__doc__ is None: + cls.__doc__ = "" + + note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version) + indent = _get_docstring_indent(cls.__doc__) + cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent + + return cls + + return wrapper(cls) + + return decorator diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/acqf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/acqf.py new file mode 100644 index 0000000000000000000000000000000000000000..968e9123b730f044bee44b4e938584d33814fa79 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/acqf.py @@ -0,0 +1,347 @@ +from __future__ import annotations + +from dataclasses import dataclass +from enum import IntEnum +import math +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._gp.gp import kernel +from optuna._gp.gp import KernelParamsTensor +from optuna._gp.gp import posterior +from optuna._gp.search_space import ScaleType +from optuna._gp.search_space import SearchSpace +from optuna._hypervolume import get_non_dominated_box_bounds +from optuna.study._multi_objective import _is_pareto_front + + +if TYPE_CHECKING: + import torch +else: + from optuna._imports import _LazyImport + + torch = _LazyImport("torch") + + +def _sample_from_normal_sobol(dim: int, n_samples: int, seed: int | None) -> torch.Tensor: + # NOTE(nabenabe): Normal Sobol sampling based on BoTorch. + # https://github.com/pytorch/botorch/blob/v0.13.0/botorch/sampling/qmc.py#L26-L97 + # https://github.com/pytorch/botorch/blob/v0.13.0/botorch/utils/sampling.py#L109-L138 + sobol_samples = torch.quasirandom.SobolEngine( # type: ignore[no-untyped-call] + dimension=dim, scramble=True, seed=seed + ).draw(n_samples, dtype=torch.float64) + samples = 2.0 * (sobol_samples - 0.5) # The Sobol sequence in [-1, 1]. + # Inverse transform to standard normal (values to close to -1 or 1 result in infinity). + return torch.erfinv(samples) * float(np.sqrt(2)) + + +def logehvi( + Y_post: torch.Tensor, # (..., n_qmc_samples, n_objectives) + non_dominated_box_lower_bounds: torch.Tensor, # (n_boxes, n_objectives) + non_dominated_box_upper_bounds: torch.Tensor, # (n_boxes, n_objectives) +) -> torch.Tensor: # (..., ) + log_n_qmc_samples = float(np.log(Y_post.shape[-2])) + # This function calculates Eq. (1) of https://arxiv.org/abs/2006.05078. + # TODO(nabenabe): Adapt to Eq. (3) when we support batch optimization. + # TODO(nabenabe): Make the calculation here more numerically stable. + # cf. https://arxiv.org/abs/2310.20708 + # Check the implementations here: + # https://github.com/pytorch/botorch/blob/v0.13.0/botorch/utils/safe_math.py + # https://github.com/pytorch/botorch/blob/v0.13.0/botorch/acquisition/multi_objective/logei.py#L146-L266 + _EPS = torch.tensor(1e-12, dtype=torch.float64) # NOTE(nabenabe): grad becomes nan when EPS=0. + diff = torch.maximum( + _EPS, + torch.minimum(Y_post[..., torch.newaxis, :], non_dominated_box_upper_bounds) + - non_dominated_box_lower_bounds, + ) + # NOTE(nabenabe): logsumexp with dim=-1 is for the HVI calculation and that with dim=-2 is for + # expectation of the HVIs over the fixed_samples. + return torch.special.logsumexp(diff.log().sum(dim=-1), dim=(-2, -1)) - log_n_qmc_samples + + +def standard_logei(z: torch.Tensor) -> torch.Tensor: + # Return E_{x ~ N(0, 1)}[max(0, x+z)] + + # We switch the implementation depending on the value of z to + # avoid numerical instability. + small = z < -25 + + vals = torch.empty_like(z) + # Eq. (9) in ref: https://arxiv.org/pdf/2310.20708.pdf + # NOTE: We do not use the third condition because ours is good enough. + z_small = z[small] + z_normal = z[~small] + sqrt_2pi = math.sqrt(2 * math.pi) + # First condition + cdf = 0.5 * torch.special.erfc(-z_normal * math.sqrt(0.5)) + pdf = torch.exp(-0.5 * z_normal**2) * (1 / sqrt_2pi) + vals[~small] = torch.log(z_normal * cdf + pdf) + # Second condition + r = math.sqrt(0.5 * math.pi) * torch.special.erfcx(-z_small * math.sqrt(0.5)) + vals[small] = -0.5 * z_small**2 + torch.log((z_small * r + 1) * (1 / sqrt_2pi)) + return vals + + +def logei(mean: torch.Tensor, var: torch.Tensor, f0: float) -> torch.Tensor: + # Return E_{y ~ N(mean, var)}[max(0, y-f0)] + sigma = torch.sqrt(var) + st_val = standard_logei((mean - f0) / sigma) + val = torch.log(sigma) + st_val + return val + + +def logpi(mean: torch.Tensor, var: torch.Tensor, f0: float) -> torch.Tensor: + # Return the integral of N(mean, var) from -inf to f0 + # This is identical to the integral of N(0, 1) from -inf to (f0-mean)/sigma + # Return E_{y ~ N(mean, var)}[bool(y <= f0)] + sigma = torch.sqrt(var) + return torch.special.log_ndtr((f0 - mean) / sigma) + + +def ucb(mean: torch.Tensor, var: torch.Tensor, beta: float) -> torch.Tensor: + return mean + torch.sqrt(beta * var) + + +def lcb(mean: torch.Tensor, var: torch.Tensor, beta: float) -> torch.Tensor: + return mean - torch.sqrt(beta * var) + + +# TODO(contramundum53): consider abstraction for acquisition functions. +# NOTE: Acquisition function is not class on purpose to integrate numba in the future. +class AcquisitionFunctionType(IntEnum): + LOG_EI = 0 + UCB = 1 + LCB = 2 + LOG_PI = 3 + LOG_EHVI = 4 + + +@dataclass(frozen=True) +class AcquisitionFunctionParams: + acqf_type: AcquisitionFunctionType + kernel_params: KernelParamsTensor + X: np.ndarray + search_space: SearchSpace + cov_Y_Y_inv: np.ndarray + cov_Y_Y_inv_Y: np.ndarray + # TODO(kAIto47802): Want to change the name to a generic name like threshold, + # since it is not actually in operation as max_Y + max_Y: float + beta: float | None + acqf_stabilizing_noise: float + + +@dataclass(frozen=True) +class ConstrainedAcquisitionFunctionParams(AcquisitionFunctionParams): + acqf_params_for_constraints: list[AcquisitionFunctionParams] + + @classmethod + def from_acqf_params( + cls, + acqf_params: AcquisitionFunctionParams, + acqf_params_for_constraints: list[AcquisitionFunctionParams], + ) -> ConstrainedAcquisitionFunctionParams: + return cls( + acqf_type=acqf_params.acqf_type, + kernel_params=acqf_params.kernel_params, + X=acqf_params.X, + search_space=acqf_params.search_space, + cov_Y_Y_inv=acqf_params.cov_Y_Y_inv, + cov_Y_Y_inv_Y=acqf_params.cov_Y_Y_inv_Y, + max_Y=acqf_params.max_Y, + beta=acqf_params.beta, + acqf_stabilizing_noise=acqf_params.acqf_stabilizing_noise, + acqf_params_for_constraints=acqf_params_for_constraints, + ) + + +@dataclass(frozen=True) +class MultiObjectiveAcquisitionFunctionParams(AcquisitionFunctionParams): + acqf_params_for_objectives: list[AcquisitionFunctionParams] + non_dominated_box_lower_bounds: torch.Tensor + non_dominated_box_upper_bounds: torch.Tensor + fixed_samples: torch.Tensor + + @classmethod + def from_acqf_params( + cls, + acqf_params_for_objectives: list[AcquisitionFunctionParams], + Y: np.ndarray, + n_qmc_samples: int, + qmc_seed: int | None, + ) -> MultiObjectiveAcquisitionFunctionParams: + def _get_non_dominated_box_bounds() -> tuple[torch.Tensor, torch.Tensor]: + loss_vals = -Y # NOTE(nabenabe): Y is to be maximized, loss_vals is to be minimized. + pareto_sols = loss_vals[_is_pareto_front(loss_vals, assume_unique_lexsorted=False)] + ref_point = np.max(loss_vals, axis=0) + ref_point = np.nextafter(np.maximum(1.1 * ref_point, 0.9 * ref_point), np.inf) + lbs, ubs = get_non_dominated_box_bounds(pareto_sols, ref_point) + # NOTE(nabenabe): Flip back the sign to make them compatible with maximization. + return torch.from_numpy(-ubs), torch.from_numpy(-lbs) + + fixed_samples = _sample_from_normal_sobol( + dim=Y.shape[-1], n_samples=n_qmc_samples, seed=qmc_seed + ) + non_dominated_box_lower_bounds, non_dominated_box_upper_bounds = ( + _get_non_dominated_box_bounds() + ) + # Since all the objectives are equally important, we simply use the mean of + # inverse of squared mean lengthscales over all the objectives. + mean_lengthscales = np.mean( + [ + 1 + / np.sqrt(acqf_params.kernel_params.inverse_squared_lengthscales.detach().numpy()) + for acqf_params in acqf_params_for_objectives + ], + axis=0, + ) + dummy_kernel_params = KernelParamsTensor( + # inverse_squared_lengthscales is used in optim_mixed.py. + # cf. https://github.com/optuna/optuna/blob/v4.3.0/optuna/_gp/optim_mixed.py#L200-L209 + inverse_squared_lengthscales=torch.from_numpy(1.0 / mean_lengthscales**2), + # These parameters will not be used anywhere. + kernel_scale=torch.empty(0), + noise_var=torch.empty(0), + ) + repr_acqf_params = acqf_params_for_objectives[0] + return cls( + acqf_type=AcquisitionFunctionType.LOG_EHVI, + X=repr_acqf_params.X, + search_space=repr_acqf_params.search_space, + acqf_stabilizing_noise=repr_acqf_params.acqf_stabilizing_noise, + acqf_params_for_objectives=acqf_params_for_objectives, + non_dominated_box_lower_bounds=non_dominated_box_lower_bounds, + non_dominated_box_upper_bounds=non_dominated_box_upper_bounds, + fixed_samples=fixed_samples, + kernel_params=dummy_kernel_params, + # The variables below will not be used anywhere, so we simply set dummy values. + cov_Y_Y_inv=np.empty(0), + cov_Y_Y_inv_Y=np.empty(0), + max_Y=np.nan, + beta=None, + ) + + +def create_acqf_params( + acqf_type: AcquisitionFunctionType, + kernel_params: KernelParamsTensor, + search_space: SearchSpace, + X: np.ndarray, + Y: np.ndarray, + max_Y: float | None = None, + beta: float | None = None, + acqf_stabilizing_noise: float = 1e-12, +) -> AcquisitionFunctionParams: + X_tensor = torch.from_numpy(X) + is_categorical = torch.from_numpy(search_space.scale_types == ScaleType.CATEGORICAL) + with torch.no_grad(): + cov_Y_Y = kernel(is_categorical, kernel_params, X_tensor, X_tensor).detach().numpy() + + cov_Y_Y[np.diag_indices(X.shape[0])] += kernel_params.noise_var.item() + cov_Y_Y_inv = np.linalg.inv(cov_Y_Y) + + return AcquisitionFunctionParams( + acqf_type=acqf_type, + kernel_params=kernel_params, + X=X, + search_space=search_space, + cov_Y_Y_inv=cov_Y_Y_inv, + cov_Y_Y_inv_Y=cov_Y_Y_inv @ Y, + max_Y=max_Y if max_Y is not None else np.max(Y), + beta=beta, + acqf_stabilizing_noise=acqf_stabilizing_noise, + ) + + +def _eval_ehvi( + ehvi_acqf_params: MultiObjectiveAcquisitionFunctionParams, x: torch.Tensor +) -> torch.Tensor: + X = torch.from_numpy(ehvi_acqf_params.X) + is_categorical = torch.from_numpy( + ehvi_acqf_params.search_space.scale_types == ScaleType.CATEGORICAL + ) + Y_post = [] + fixed_samples = ehvi_acqf_params.fixed_samples + for i, acqf_params in enumerate(ehvi_acqf_params.acqf_params_for_objectives): + mean, var = posterior( + kernel_params=acqf_params.kernel_params, + X=X, + is_categorical=is_categorical, + cov_Y_Y_inv=torch.from_numpy(acqf_params.cov_Y_Y_inv), + cov_Y_Y_inv_Y=torch.from_numpy(acqf_params.cov_Y_Y_inv_Y), + x=x, + ) + stdev = torch.sqrt(var + ehvi_acqf_params.acqf_stabilizing_noise) + # NOTE(nabenabe): By using fixed samples from the Sobol sequence, EHVI becomes + # deterministic, making it possible to optimize the acqf by l-BFGS. + # Sobol is better than the standard Monte-Carlo w.r.t. the approximation stability. + # cf. Appendix D of https://arxiv.org/pdf/2006.05078 + Y_post.append(mean[..., torch.newaxis] + stdev[..., torch.newaxis] * fixed_samples[..., i]) + + # NOTE(nabenabe): Use the following once multi-task GP is supported. + # L = torch.linalg.cholesky(cov) + # Y_post = means[..., torch.newaxis, :] + torch.einsum("...MM,SM->...SM", L, fixed_samples) + return logehvi( + Y_post=torch.stack(Y_post, dim=-1), + non_dominated_box_lower_bounds=ehvi_acqf_params.non_dominated_box_lower_bounds, + non_dominated_box_upper_bounds=ehvi_acqf_params.non_dominated_box_upper_bounds, + ) + + +def eval_acqf(acqf_params: AcquisitionFunctionParams, x: torch.Tensor) -> torch.Tensor: + if acqf_params.acqf_type == AcquisitionFunctionType.LOG_EHVI: + assert isinstance(acqf_params, MultiObjectiveAcquisitionFunctionParams) + return _eval_ehvi(ehvi_acqf_params=acqf_params, x=x) + + mean, var = posterior( + acqf_params.kernel_params, + torch.from_numpy(acqf_params.X), + torch.from_numpy(acqf_params.search_space.scale_types == ScaleType.CATEGORICAL), + torch.from_numpy(acqf_params.cov_Y_Y_inv), + torch.from_numpy(acqf_params.cov_Y_Y_inv_Y), + x, + ) + + if acqf_params.acqf_type == AcquisitionFunctionType.LOG_EI: + # If there are no feasible trials, max_Y is set to -np.inf. + # If max_Y is set to -np.inf, we set logEI to zero to ignore it. + f_val = ( + logei(mean=mean, var=var + acqf_params.acqf_stabilizing_noise, f0=acqf_params.max_Y) + if not np.isneginf(acqf_params.max_Y) + else torch.tensor(0.0, dtype=torch.float64) + ) + elif acqf_params.acqf_type == AcquisitionFunctionType.LOG_PI: + f_val = logpi( + mean=mean, var=var + acqf_params.acqf_stabilizing_noise, f0=acqf_params.max_Y + ) + elif acqf_params.acqf_type == AcquisitionFunctionType.UCB: + assert acqf_params.beta is not None, "beta must be given to UCB." + f_val = ucb(mean=mean, var=var, beta=acqf_params.beta) + elif acqf_params.acqf_type == AcquisitionFunctionType.LCB: + assert acqf_params.beta is not None, "beta must be given to LCB." + f_val = lcb(mean=mean, var=var, beta=acqf_params.beta) + else: + assert False, "Unknown acquisition function type." + + if isinstance(acqf_params, ConstrainedAcquisitionFunctionParams): + c_val = sum(eval_acqf(params, x) for params in acqf_params.acqf_params_for_constraints) + return f_val + c_val + else: + return f_val + + +def eval_acqf_no_grad(acqf_params: AcquisitionFunctionParams, x: np.ndarray) -> np.ndarray: + with torch.no_grad(): + return eval_acqf(acqf_params, torch.from_numpy(x)).detach().numpy() + + +def eval_acqf_with_grad( + acqf_params: AcquisitionFunctionParams, x: np.ndarray +) -> tuple[float, np.ndarray]: + assert x.ndim == 1 + x_tensor = torch.from_numpy(x) + x_tensor.requires_grad_(True) + val = eval_acqf(acqf_params, x_tensor) + val.backward() # type: ignore + return val.item(), x_tensor.grad.detach().numpy() # type: ignore diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/gp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/gp.py new file mode 100644 index 0000000000000000000000000000000000000000..214f4e98062c176af7c90892e0d3b2f57726526e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/gp.py @@ -0,0 +1,282 @@ +from __future__ import annotations + +from dataclasses import dataclass +import math +from typing import Any +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from optuna.logging import get_logger + + +if TYPE_CHECKING: + from collections.abc import Callable + + import scipy.optimize as so + import torch +else: + from optuna._imports import _LazyImport + + so = _LazyImport("scipy.optimize") + torch = _LazyImport("torch") + +logger = get_logger(__name__) + +# This GP implementation uses the following notation: +# X[len(trials), len(params)]: observed parameter values. +# Y[len(trials)]: observed objective values. +# x[(batch_len,) len(params)]: parameter value to evaluate. Possibly batched. +# cov_fX_fX[len(trials), len(trials)]: kernel matrix of X = V[f(X)] +# cov_fx_fX[(batch_len,) len(trials)]: kernel matrix of x and X = Cov[f(x), f(X)] +# cov_fx_fx: kernel value (scalar) of x = V[f(x)]. +# Since we use a Matern 5/2 kernel, we assume this value to be a constant. +# cov_Y_Y_inv[len(trials), len(trials)]: inv of the covariance matrix of Y = (V[f(X) + noise])^-1 +# cov_Y_Y_inv_Y[len(trials)]: cov_Y_Y_inv @ Y +# max_Y: maximum of Y (Note that we transform the objective values such that it is maximized.) +# d2: squared distance between two points + + +def warn_and_convert_inf(values: np.ndarray) -> np.ndarray: + is_values_finite = np.isfinite(values) + if np.all(is_values_finite): + return values + + warnings.warn("Clip non-finite values to the min/max finite values for GP fittings.") + is_any_finite = np.any(is_values_finite, axis=0) + # NOTE(nabenabe): values cannot include nan to apply np.clip properly, but Optuna anyways won't + # pass nan in values by design. + return np.clip( + values, + np.where(is_any_finite, np.min(np.where(is_values_finite, values, np.inf), axis=0), 0.0), + np.where(is_any_finite, np.max(np.where(is_values_finite, values, -np.inf), axis=0), 0.0), + ) + + +class Matern52Kernel(torch.autograd.Function): + @staticmethod + def forward(ctx: Any, squared_distance: torch.Tensor) -> torch.Tensor: + sqrt5d = torch.sqrt(5 * squared_distance) + exp_part = torch.exp(-sqrt5d) + val = exp_part * ((5 / 3) * squared_distance + sqrt5d + 1) + # Notice that the derivative is taken w.r.t. d^2, but not w.r.t. d. + deriv = (-5 / 6) * (sqrt5d + 1) * exp_part + ctx.save_for_backward(deriv) + return val + + @staticmethod + def backward(ctx: Any, grad: torch.Tensor) -> torch.Tensor: + # Let x be squared_distance, f(x) be forward(ctx, x), and g(f) be a provided function, + # then deriv := df/dx, grad := dg/df, and deriv * grad = df/dx * dg/df = dg/dx. + (deriv,) = ctx.saved_tensors + return deriv * grad + + +def matern52_kernel_from_squared_distance(squared_distance: torch.Tensor) -> torch.Tensor: + # sqrt5d = sqrt(5 * squared_distance) + # exp(sqrt5d) * (1/3 * sqrt5d ** 2 + sqrt5d + 1) + # + # We cannot let PyTorch differentiate the above expression because + # the gradient runs into 0/0 at squared_distance=0. + return Matern52Kernel.apply(squared_distance) # type: ignore + + +@dataclass(frozen=True) +class KernelParamsTensor: + # Kernel parameters to fit. + inverse_squared_lengthscales: torch.Tensor # [len(params)] + kernel_scale: torch.Tensor # Scalar + noise_var: torch.Tensor # Scalar + + +def kernel( + is_categorical: torch.Tensor, # [len(params)] + kernel_params: KernelParamsTensor, + X1: torch.Tensor, # [...batch_shape, n_A, len(params)] + X2: torch.Tensor, # [...batch_shape, n_B, len(params)] +) -> torch.Tensor: # [...batch_shape, n_A, n_B] + # kernel(x1, x2) = kernel_scale * matern52_kernel_from_squared_distance( + # d2(x1, x2) * inverse_squared_lengthscales) + # d2(x1, x2) = sum_i d2_i(x1_i, x2_i) + # d2_i(x1_i, x2_i) = (x1_i - x2_i) ** 2 # if x_i is continuous + # d2_i(x1_i, x2_i) = 1 if x1_i != x2_i else 0 # if x_i is categorical + + d2 = (X1[..., :, None, :] - X2[..., None, :, :]) ** 2 + + # Use the Hamming distance for categorical parameters. + d2[..., is_categorical] = (d2[..., is_categorical] > 0.0).type(torch.float64) + d2 = (d2 * kernel_params.inverse_squared_lengthscales).sum(dim=-1) + return matern52_kernel_from_squared_distance(d2) * kernel_params.kernel_scale + + +def kernel_at_zero_distance( + kernel_params: KernelParamsTensor, +) -> torch.Tensor: # [...batch_shape, n_A, n_B] + # kernel(x, x) = kernel_scale + return kernel_params.kernel_scale + + +def posterior( + kernel_params: KernelParamsTensor, + X: torch.Tensor, # [len(trials), len(params)] + is_categorical: torch.Tensor, # bool[len(params)] + cov_Y_Y_inv: torch.Tensor, # [len(trials), len(trials)] + cov_Y_Y_inv_Y: torch.Tensor, # [len(trials)] + x: torch.Tensor, # [(batch,) len(params)] +) -> tuple[torch.Tensor, torch.Tensor]: # (mean: [(batch,)], var: [(batch,)]) + cov_fx_fX = kernel(is_categorical, kernel_params, x[..., None, :], X)[..., 0, :] + cov_fx_fx = kernel_at_zero_distance(kernel_params) + + # mean = cov_fx_fX @ inv(cov_fX_fX + noise * I) @ Y + # var = cov_fx_fx - cov_fx_fX @ inv(cov_fX_fX + noise * I) @ cov_fx_fX.T + mean = cov_fx_fX @ cov_Y_Y_inv_Y # [batch] + var = cov_fx_fx - (cov_fx_fX * (cov_fx_fX @ cov_Y_Y_inv)).sum(dim=-1) # [batch] + # We need to clamp the variance to avoid negative values due to numerical errors. + return (mean, torch.clamp(var, min=0.0)) + + +def marginal_log_likelihood( + X: torch.Tensor, # [len(trials), len(params)] + Y: torch.Tensor, # [len(trials)] + is_categorical: torch.Tensor, # [len(params)] + kernel_params: KernelParamsTensor, +) -> torch.Tensor: # Scalar + # -0.5 * log((2pi)^n |C|) - 0.5 * Y^T C^-1 Y, where C^-1 = cov_Y_Y_inv + # We apply the cholesky decomposition to efficiently compute log(|C|) and C^-1. + + cov_fX_fX = kernel(is_categorical, kernel_params, X, X) + + cov_Y_Y_chol = torch.linalg.cholesky( + cov_fX_fX + kernel_params.noise_var * torch.eye(X.shape[0], dtype=torch.float64) + ) + # log |L| = 0.5 * log|L^T L| = 0.5 * log|C| + logdet = 2 * torch.log(torch.diag(cov_Y_Y_chol)).sum() + # cov_Y_Y_chol @ cov_Y_Y_chol_inv_Y = Y --> cov_Y_Y_chol_inv_Y = inv(cov_Y_Y_chol) @ Y + cov_Y_Y_chol_inv_Y = torch.linalg.solve_triangular(cov_Y_Y_chol, Y[:, None], upper=False)[:, 0] + return -0.5 * ( + logdet + + X.shape[0] * math.log(2 * math.pi) + # Y^T C^-1 Y = Y^T inv(L^T L) Y --> cov_Y_Y_chol_inv_Y @ cov_Y_Y_chol_inv_Y + + (cov_Y_Y_chol_inv_Y @ cov_Y_Y_chol_inv_Y) + ) + + +def _fit_kernel_params( + X: np.ndarray, # [len(trials), len(params)] + Y: np.ndarray, # [len(trials)] + is_categorical: np.ndarray, # [len(params)] + log_prior: Callable[[KernelParamsTensor], torch.Tensor], + minimum_noise: float, + deterministic_objective: bool, + initial_kernel_params: KernelParamsTensor, + gtol: float, +) -> KernelParamsTensor: + n_params = X.shape[1] + + # We apply log transform to enforce the positivity of the kernel parameters. + # Note that we cannot just use the constraint because of the numerical unstability + # of the marginal log likelihood. + # We also enforce the noise parameter to be greater than `minimum_noise` to avoid + # pathological behavior of maximum likelihood estimation. + initial_raw_params = np.concatenate( + [ + np.log(initial_kernel_params.inverse_squared_lengthscales.detach().numpy()), + [ + np.log(initial_kernel_params.kernel_scale.item()), + # We add 0.01 * minimum_noise to initial noise_var to avoid instability. + np.log(initial_kernel_params.noise_var.item() - 0.99 * minimum_noise), + ], + ] + ) + + def loss_func(raw_params: np.ndarray) -> tuple[float, np.ndarray]: + raw_params_tensor = torch.from_numpy(raw_params) + raw_params_tensor.requires_grad_(True) + with torch.enable_grad(): # type: ignore[no-untyped-call] + params = KernelParamsTensor( + inverse_squared_lengthscales=torch.exp(raw_params_tensor[:n_params]), + kernel_scale=torch.exp(raw_params_tensor[n_params]), + noise_var=( + torch.tensor(minimum_noise, dtype=torch.float64) + if deterministic_objective + else torch.exp(raw_params_tensor[n_params + 1]) + minimum_noise + ), + ) + loss = -marginal_log_likelihood( + torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(is_categorical), params + ) - log_prior(params) + loss.backward() # type: ignore + # scipy.minimize requires all the gradients to be zero for termination. + raw_noise_var_grad = raw_params_tensor.grad[n_params + 1] # type: ignore + assert not deterministic_objective or raw_noise_var_grad == 0 + return loss.item(), raw_params_tensor.grad.detach().numpy() # type: ignore + + # jac=True means loss_func returns the gradient for gradient descent. + res = so.minimize( + # Too small `gtol` causes instability in loss_func optimization. + loss_func, + initial_raw_params, + jac=True, + method="l-bfgs-b", + options={"gtol": gtol}, + ) + if not res.success: + raise RuntimeError(f"Optimization failed: {res.message}") + + raw_params_opt_tensor = torch.from_numpy(res.x) + + res = KernelParamsTensor( + inverse_squared_lengthscales=torch.exp(raw_params_opt_tensor[:n_params]), + kernel_scale=torch.exp(raw_params_opt_tensor[n_params]), + noise_var=( + torch.tensor(minimum_noise, dtype=torch.float64) + if deterministic_objective + else minimum_noise + torch.exp(raw_params_opt_tensor[n_params + 1]) + ), + ) + return res + + +def fit_kernel_params( + X: np.ndarray, + Y: np.ndarray, + is_categorical: np.ndarray, + log_prior: Callable[[KernelParamsTensor], torch.Tensor], + minimum_noise: float, + deterministic_objective: bool, + initial_kernel_params: KernelParamsTensor | None = None, + gtol: float = 1e-2, +) -> KernelParamsTensor: + default_initial_kernel_params = KernelParamsTensor( + inverse_squared_lengthscales=torch.ones(X.shape[1], dtype=torch.float64), + kernel_scale=torch.tensor(1.0, dtype=torch.float64), + noise_var=torch.tensor(1.0, dtype=torch.float64), + ) + if initial_kernel_params is None: + initial_kernel_params = default_initial_kernel_params + + error = None + # First try optimizing the kernel params with the provided initial_kernel_params, + # but if it fails, rerun the optimization with the default initial_kernel_params. + # This increases the robustness of the optimization. + for init_kernel_params in [initial_kernel_params, default_initial_kernel_params]: + try: + return _fit_kernel_params( + X=X, + Y=Y, + is_categorical=is_categorical, + log_prior=log_prior, + minimum_noise=minimum_noise, + initial_kernel_params=init_kernel_params, + deterministic_objective=deterministic_objective, + gtol=gtol, + ) + except RuntimeError as e: + error = e + + logger.warning( + f"The optimization of kernel_params failed: \n{error}\n" + "The default initial kernel params will be used instead." + ) + return default_initial_kernel_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_mixed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_mixed.py new file mode 100644 index 0000000000000000000000000000000000000000..7b115c1cedbd8b53fe2045f84e8ad82a913fcbd9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_mixed.py @@ -0,0 +1,330 @@ +from __future__ import annotations + +import math +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._gp.acqf import AcquisitionFunctionParams +from optuna._gp.acqf import eval_acqf_no_grad +from optuna._gp.acqf import eval_acqf_with_grad +from optuna._gp.search_space import normalize_one_param +from optuna._gp.search_space import sample_normalized_params +from optuna._gp.search_space import ScaleType +from optuna.logging import get_logger + + +if TYPE_CHECKING: + import scipy.optimize as so +else: + from optuna import _LazyImport + + so = _LazyImport("scipy.optimize") + +_logger = get_logger(__name__) + + +def _gradient_ascent( + acqf_params: AcquisitionFunctionParams, + initial_params: np.ndarray, + initial_fval: float, + continuous_indices: np.ndarray, + lengthscales: np.ndarray, + tol: float, +) -> tuple[np.ndarray, float, bool]: + """ + This function optimizes the acquisition function using preconditioning. + Preconditioning equalizes the variances caused by each parameter and + speeds up the convergence. + + In Optuna, acquisition functions use Matern 5/2 kernel, which is a function of `x / l` + where `x` is `normalized_params` and `l` is the corresponding lengthscales. + Then acquisition functions are a function of `x / l`, i.e. `f(x / l)`. + As `l` has different values for each param, it makes the function ill-conditioned. + By transforming `x / l` to `zl / l = z`, the function becomes `f(z)` and has + equal variances w.r.t. `z`. + So optimization w.r.t. `z` instead of `x` is the preconditioning here and + speeds up the convergence. + As the domain of `x` is [0, 1], that of `z` becomes [0, 1/l]. + """ + if len(continuous_indices) == 0: + return initial_params, initial_fval, False + normalized_params = initial_params.copy() + + def negative_acqf_with_grad(scaled_x: np.ndarray) -> tuple[float, np.ndarray]: + # Scale back to the original domain, i.e. [0, 1], from [0, 1/s]. + normalized_params[continuous_indices] = scaled_x * lengthscales + (fval, grad) = eval_acqf_with_grad(acqf_params, normalized_params) + # Flip sign because scipy minimizes functions. + # Let the scaled acqf be g(x) and the acqf be f(sx), then dg/dx = df/dx * s. + return -fval, -grad[continuous_indices] * lengthscales + + scaled_cont_x_opt, neg_fval_opt, info = so.fmin_l_bfgs_b( + func=negative_acqf_with_grad, + x0=normalized_params[continuous_indices] / lengthscales, + bounds=[(0, 1 / s) for s in lengthscales], + pgtol=math.sqrt(tol), + maxiter=200, + ) + + if -neg_fval_opt > initial_fval and info["nit"] > 0: # Improved. + # `nit` is the number of iterations. + normalized_params[continuous_indices] = scaled_cont_x_opt * lengthscales + return normalized_params, -neg_fval_opt, True + + return initial_params, initial_fval, False # No improvement. + + +def _exhaustive_search( + acqf_params: AcquisitionFunctionParams, + initial_params: np.ndarray, + initial_fval: float, + param_idx: int, + choices: np.ndarray, +) -> tuple[np.ndarray, float, bool]: + choices_except_current = choices[choices != initial_params[param_idx]] + + all_params = np.repeat(initial_params[None, :], len(choices_except_current), axis=0) + all_params[:, param_idx] = choices_except_current + fvals = eval_acqf_no_grad(acqf_params, all_params) + best_idx = np.argmax(fvals) + + if fvals[best_idx] > initial_fval: # Improved. + return all_params[best_idx, :], fvals[best_idx], True + + return initial_params, initial_fval, False # No improvement. + + +def _discrete_line_search( + acqf_params: AcquisitionFunctionParams, + initial_params: np.ndarray, + initial_fval: float, + param_idx: int, + grids: np.ndarray, + xtol: float, +) -> tuple[np.ndarray, float, bool]: + if len(grids) == 1: + # Do not optimize anything when there's only one choice. + return initial_params, initial_fval, False + + def find_nearest_index(x: float) -> int: + i = int(np.clip(np.searchsorted(grids, x), 1, len(grids) - 1)) + return i - 1 if abs(x - grids[i - 1]) < abs(x - grids[i]) else i + + current_choice_i = find_nearest_index(initial_params[param_idx]) + assert np.isclose(initial_params[param_idx], grids[current_choice_i]) + + negative_fval_cache = {current_choice_i: -initial_fval} + + normalized_params = initial_params.copy() + + def negative_acqf_with_cache(i: int) -> float: + # Function value at choices[i]. + cache_val = negative_fval_cache.get(i) + if cache_val is not None: + return cache_val + normalized_params[param_idx] = grids[i] + + # Flip sign because scipy minimizes functions. + negval = -float(eval_acqf_no_grad(acqf_params, normalized_params)) + negative_fval_cache[i] = negval + return negval + + def interpolated_negative_acqf(x: float) -> float: + if x < grids[0] or x > grids[-1]: + return np.inf + right = int(np.clip(np.searchsorted(grids, x), 1, len(grids) - 1)) + left = right - 1 + neg_acqf_left, neg_acqf_right = negative_acqf_with_cache(left), negative_acqf_with_cache( + right + ) + w_left = (grids[right] - x) / (grids[right] - grids[left]) + w_right = 1.0 - w_left + return w_left * neg_acqf_left + w_right * neg_acqf_right + + EPS = 1e-12 + res = so.minimize_scalar( + interpolated_negative_acqf, + # The values of this bracket are (inf, -fval, inf). + # This trivially satisfies the bracket condition if fval is finite. + bracket=(grids[0] - EPS, grids[current_choice_i], grids[-1] + EPS), + method="brent", + tol=xtol, + ) + opt_idx = find_nearest_index(res.x) + fval_opt = -negative_acqf_with_cache(opt_idx) + + # We check both conditions because of numerical errors. + if opt_idx != current_choice_i and fval_opt > initial_fval: + normalized_params[param_idx] = grids[opt_idx] + return normalized_params, fval_opt, True + + return initial_params, initial_fval, False # No improvement. + + +def _local_search_discrete( + acqf_params: AcquisitionFunctionParams, + initial_params: np.ndarray, + initial_fval: float, + param_idx: int, + choices: np.ndarray, + xtol: float, +) -> tuple[np.ndarray, float, bool]: + + # If the number of possible parameter values is small, we just perform an exhaustive search. + # This is faster and better than the line search. + MAX_INT_EXHAUSTIVE_SEARCH_PARAMS = 16 + + scale_type = acqf_params.search_space.scale_types[param_idx] + if scale_type == ScaleType.CATEGORICAL or len(choices) <= MAX_INT_EXHAUSTIVE_SEARCH_PARAMS: + return _exhaustive_search(acqf_params, initial_params, initial_fval, param_idx, choices) + else: + return _discrete_line_search( + acqf_params, initial_params, initial_fval, param_idx, choices, xtol + ) + + +def local_search_mixed( + acqf_params: AcquisitionFunctionParams, + initial_normalized_params: np.ndarray, + *, + tol: float = 1e-4, + max_iter: int = 100, +) -> tuple[np.ndarray, float]: + scale_types = acqf_params.search_space.scale_types + bounds = acqf_params.search_space.bounds + steps = acqf_params.search_space.steps + + continuous_indices = np.where(steps == 0.0)[0] + + inverse_squared_lengthscales = ( + acqf_params.kernel_params.inverse_squared_lengthscales.detach().numpy() + ) + # This is a technique for speeding up optimization. + # We use an isotropic kernel, so scaling the gradient will make + # the hessian better-conditioned. + # NOTE: Ideally, separating lengthscales should be used for the constraint functions, + # but for simplicity, the ones from the objective function are being reused. + # TODO(kAIto47802): Think of a better way to handle this. + lengthscales = 1 / np.sqrt(inverse_squared_lengthscales[continuous_indices]) + + # NOTE(nabenabe): MyPy Redefinition for NumPy v2.2.0. (Cast signed int to int) + discrete_indices = np.where(steps > 0)[0].astype(int) + choices_of_discrete_params = [ + ( + np.arange(bounds[i, 1]) + if scale_types[i] == ScaleType.CATEGORICAL + else normalize_one_param( + param_value=np.arange(bounds[i, 0], bounds[i, 1] + 0.5 * steps[i], steps[i]), + scale_type=ScaleType(scale_types[i]), + bounds=(bounds[i, 0], bounds[i, 1]), + step=steps[i], + ) + ) + for i in discrete_indices + ] + + discrete_xtols = [ + # Terminate discrete optimizations once the change in x becomes smaller than this. + # Basically, if the change is smaller than min(dx) / 4, it is useless to see more details. + np.min(np.diff(choices), initial=np.inf) / 4 + for choices in choices_of_discrete_params + ] + + best_normalized_params = initial_normalized_params.copy() + best_fval = float(eval_acqf_no_grad(acqf_params, best_normalized_params)) + + CONTINUOUS = -1 + last_changed_param: int | None = None + + for _ in range(max_iter): + if last_changed_param == CONTINUOUS: + # Parameters not changed since last time. + return best_normalized_params, best_fval + (best_normalized_params, best_fval, updated) = _gradient_ascent( + acqf_params, + best_normalized_params, + best_fval, + continuous_indices, + lengthscales, + tol, + ) + if updated: + last_changed_param = CONTINUOUS + + for i, choices, xtol in zip(discrete_indices, choices_of_discrete_params, discrete_xtols): + if last_changed_param == i: + # Parameters not changed since last time. + return best_normalized_params, best_fval + (best_normalized_params, best_fval, updated) = _local_search_discrete( + acqf_params, best_normalized_params, best_fval, i, choices, xtol + ) + if updated: + last_changed_param = i + + if last_changed_param is None: + # Parameters not changed from the beginning. + return best_normalized_params, best_fval + + _logger.warning("local_search_mixed: Local search did not converge.") + return best_normalized_params, best_fval + + +def optimize_acqf_mixed( + acqf_params: AcquisitionFunctionParams, + *, + warmstart_normalized_params_array: np.ndarray | None = None, + n_preliminary_samples: int = 2048, + n_local_search: int = 10, + tol: float = 1e-4, + rng: np.random.RandomState | None = None, +) -> tuple[np.ndarray, float]: + + rng = rng or np.random.RandomState() + + dim = acqf_params.search_space.scale_types.shape[0] + if warmstart_normalized_params_array is None: + warmstart_normalized_params_array = np.empty((0, dim)) + + assert ( + len(warmstart_normalized_params_array) <= n_local_search - 1 + ), "We must choose at least 1 best sampled point + given_initial_xs as start points." + + sampled_xs = sample_normalized_params(n_preliminary_samples, acqf_params.search_space, rng=rng) + + # Evaluate all values at initial samples + f_vals = eval_acqf_no_grad(acqf_params, sampled_xs) + assert isinstance(f_vals, np.ndarray) + + max_i = np.argmax(f_vals) + + # TODO(nabenabe): Benchmark the BoTorch roulette selection as well. + # https://github.com/pytorch/botorch/blob/v0.14.0/botorch/optim/initializers.py#L942 + # We use a modified roulette wheel selection to pick the initial param for each local search. + probs = np.exp(f_vals - f_vals[max_i]) + probs[max_i] = 0.0 # We already picked the best param, so remove it from roulette. + probs /= probs.sum() + n_non_zero_probs_improvement = int(np.count_nonzero(probs > 0.0)) + # n_additional_warmstart becomes smaller when study starts to converge. + n_additional_warmstart = min( + n_local_search - len(warmstart_normalized_params_array) - 1, n_non_zero_probs_improvement + ) + if n_additional_warmstart == n_non_zero_probs_improvement: + _logger.warning("Study already converged, so the number of local search is reduced.") + chosen_idxs = np.array([max_i]) + if n_additional_warmstart > 0: + additional_idxs = rng.choice( + len(sampled_xs), size=n_additional_warmstart, replace=False, p=probs + ) + chosen_idxs = np.append(chosen_idxs, additional_idxs) + + best_x = sampled_xs[max_i, :] + best_f = float(f_vals[max_i]) + + for x_warmstart in np.vstack([sampled_xs[chosen_idxs, :], warmstart_normalized_params_array]): + x, f = local_search_mixed(acqf_params, x_warmstart, tol=tol) + if f > best_f: + best_x = x + best_f = f + + return best_x, best_f diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_sample.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..72656d1631519b457c625b14f2ecea662a635406 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/optim_sample.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +import numpy as np + +from optuna._gp import acqf +from optuna._gp.search_space import sample_normalized_params + + +def optimize_acqf_sample( + acqf_params: acqf.AcquisitionFunctionParams, + *, + n_samples: int = 2048, + rng: np.random.RandomState | None = None, +) -> tuple[np.ndarray, float]: + # Normalized parameter values are sampled. + xs = sample_normalized_params(n_samples, acqf_params.search_space, rng=rng) + res = acqf.eval_acqf_no_grad(acqf_params, xs) + + best_i = np.argmax(res) + return xs[best_i, :], res[best_i] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/prior.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/prior.py new file mode 100644 index 0000000000000000000000000000000000000000..232db797a6db5862d7c49340b04a029090fd3ede --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/prior.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + import torch + + from optuna._gp import gp +else: + from optuna._imports import _LazyImport + + torch = _LazyImport("torch") + + +DEFAULT_MINIMUM_NOISE_VAR = 1e-6 + + +def default_log_prior(kernel_params: gp.KernelParamsTensor) -> torch.Tensor: + # Log of prior distribution of kernel parameters. + + def gamma_log_prior(x: torch.Tensor, concentration: float, rate: float) -> torch.Tensor: + # We omit the constant factor `rate ** concentration / Gamma(concentration)`. + return (concentration - 1) * torch.log(x) - rate * x + + # NOTE(contramundum53): The priors below (params and function + # shape for inverse_squared_lengthscales) were picked by heuristics. + # TODO(contramundum53): Check whether these priors are appropriate. + return ( + -( + 0.1 / kernel_params.inverse_squared_lengthscales + + 0.1 * kernel_params.inverse_squared_lengthscales + ).sum() + + gamma_log_prior(kernel_params.kernel_scale, 2, 1) + + gamma_log_prior(kernel_params.noise_var, 1.1, 30) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/search_space.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/search_space.py new file mode 100644 index 0000000000000000000000000000000000000000..07a0fa8c54559584112bee9148aed50c90021e88 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_gp/search_space.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from dataclasses import dataclass +from enum import IntEnum +import math +import threading +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution + + +if TYPE_CHECKING: + import scipy.stats.qmc as qmc + + from optuna.trial import FrozenTrial +else: + from optuna._imports import _LazyImport + + qmc = _LazyImport("scipy.stats.qmc") + + +_threading_lock = threading.Lock() + + +class ScaleType(IntEnum): + LINEAR = 0 + LOG = 1 + CATEGORICAL = 2 + + +@dataclass(frozen=True) +class SearchSpace: + scale_types: np.ndarray + bounds: np.ndarray + steps: np.ndarray + + +def unnormalize_one_param( + param_value: np.ndarray, scale_type: ScaleType, bounds: tuple[float, float], step: float +) -> np.ndarray: + # param_value can be batched, or not. + if scale_type == ScaleType.CATEGORICAL: + return param_value + low, high = (bounds[0] - 0.5 * step, bounds[1] + 0.5 * step) + if scale_type == ScaleType.LOG: + low, high = (math.log(low), math.log(high)) + param_value = param_value * (high - low) + low + if scale_type == ScaleType.LOG: + param_value = np.exp(param_value) + return param_value + + +def normalize_one_param( + param_value: np.ndarray, scale_type: ScaleType, bounds: tuple[float, float], step: float +) -> np.ndarray: + # param_value can be batched, or not. + if scale_type == ScaleType.CATEGORICAL: + return param_value + low, high = (bounds[0] - 0.5 * step, bounds[1] + 0.5 * step) + if scale_type == ScaleType.LOG: + low, high = (math.log(low), math.log(high)) + param_value = np.log(param_value) + if high == low: + return np.full_like(param_value, 0.5) + param_value = (param_value - low) / (high - low) + return param_value + + +def round_one_normalized_param( + param_value: np.ndarray, scale_type: ScaleType, bounds: tuple[float, float], step: float +) -> np.ndarray: + assert scale_type != ScaleType.CATEGORICAL + if step == 0.0: + return param_value + + param_value = unnormalize_one_param(param_value, scale_type, bounds, step) + param_value = np.clip( + (param_value - bounds[0] + 0.5 * step) // step * step + bounds[0], + bounds[0], + bounds[1], + ) + param_value = normalize_one_param(param_value, scale_type, bounds, step) + return param_value + + +def sample_normalized_params( + n: int, search_space: SearchSpace, rng: np.random.RandomState | None +) -> np.ndarray: + rng = rng or np.random.RandomState() + dim = search_space.scale_types.shape[0] + scale_types = search_space.scale_types + bounds = search_space.bounds + steps = search_space.steps + + # Sobol engine likely shares its internal state among threads. + # Without threading.Lock, ValueError exceptions are raised in Sobol engine as discussed in + # https://github.com/optuna/optunahub-registry/pull/168#pullrequestreview-2404054969 + with _threading_lock: + qmc_engine = qmc.Sobol(dim, scramble=True, seed=rng.randint(np.iinfo(np.int32).max)) + param_values = qmc_engine.random(n) + + for i in range(dim): + if scale_types[i] == ScaleType.CATEGORICAL: + param_values[:, i] = np.floor(param_values[:, i] * bounds[i, 1]) + elif steps[i] != 0.0: + param_values[:, i] = round_one_normalized_param( + param_values[:, i], scale_types[i], (bounds[i, 0], bounds[i, 1]), steps[i] + ) + return param_values + + +def get_search_space_and_normalized_params( + trials: list[FrozenTrial], + optuna_search_space: dict[str, BaseDistribution], +) -> tuple[SearchSpace, np.ndarray]: + scale_types = np.zeros(len(optuna_search_space), dtype=np.int64) + bounds = np.zeros((len(optuna_search_space), 2), dtype=np.float64) + steps = np.zeros(len(optuna_search_space), dtype=np.float64) + values = np.zeros((len(trials), len(optuna_search_space)), dtype=np.float64) + for i, (param, distribution) in enumerate(optuna_search_space.items()): + if isinstance(distribution, CategoricalDistribution): + scale_types[i] = ScaleType.CATEGORICAL + bounds[i, :] = (0.0, len(distribution.choices)) + steps[i] = 1.0 + values[:, i] = np.array( + [distribution.to_internal_repr(trial.params[param]) for trial in trials] + ) + else: + assert isinstance( + distribution, + ( + FloatDistribution, + IntDistribution, + ), + ) + scale_types[i] = ScaleType.LOG if distribution.log else ScaleType.LINEAR + steps[i] = 0.0 if distribution.step is None else distribution.step + bounds[i, :] = (distribution.low, distribution.high) + + values[:, i] = normalize_one_param( + np.array([trial.params[param] for trial in trials]), + scale_types[i], + (bounds[i, 0], bounds[i, 1]), + steps[i], + ) + return SearchSpace(scale_types, bounds, steps), values + + +def get_unnormalized_param( + optuna_search_space: dict[str, BaseDistribution], + normalized_param: np.ndarray, +) -> dict[str, Any]: + ret = {} + for i, (param, distribution) in enumerate(optuna_search_space.items()): + if isinstance(distribution, CategoricalDistribution): + ret[param] = distribution.to_external_repr(normalized_param[i]) + else: + assert isinstance( + distribution, + ( + FloatDistribution, + IntDistribution, + ), + ) + scale_type = ScaleType.LOG if distribution.log else ScaleType.LINEAR + step = 0.0 if distribution.step is None else distribution.step + bounds = (distribution.low, distribution.high) + param_value = float( + np.clip( + unnormalize_one_param(normalized_param[i], scale_type, bounds, step), + distribution.low, + distribution.high, + ) + ) + if isinstance(distribution, IntDistribution): + param_value = round(param_value) + ret[param] = param_value + return ret diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc68fbd8095d3b8d664cad352a94f6e56cc22e1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/__init__.py @@ -0,0 +1,6 @@ +from optuna._hypervolume.box_decomposition import get_non_dominated_box_bounds +from optuna._hypervolume.hssp import _solve_hssp +from optuna._hypervolume.wfg import compute_hypervolume + + +__all__ = ["_solve_hssp", "compute_hypervolume", "get_non_dominated_box_bounds"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/box_decomposition.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/box_decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..3897755fc42c1122c54fb0666e987d4ed7161a10 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/box_decomposition.py @@ -0,0 +1,158 @@ +""" +The functions in this file are mostly based on BoTorch v0.13.0, +but they are refactored significantly from the original version. + +For ``_get_upper_bound_set``, look at: + * https://github.com/pytorch/botorch/blob/v0.13.0/botorch/utils/multi_objective/box_decompositions/utils.py#L101-L160 + +For ``_get_box_bounds``, look at: + * https://github.com/pytorch/botorch/blob/v0.13.0/botorch/utils/multi_objective/box_decompositions/utils.py#L163-L193 + +For ``_get_non_dominated_box_bounds``, look at: + * https://github.com/pytorch/botorch/blob/v0.13.0/botorch/utils/multi_objective/box_decompositions/non_dominated.py#L395-L430 + +The preprocessing for four or fewer objectives, we use the algorithm proposed by: + Title: A Box Decomposition Algorithm to Compute the Hypervolume Indicator + Authors: Renaud Lacour, Kathrin Klamroth, and Carlos M. Fonseca + URL: https://arxiv.org/abs/1510.01963 +We refer this paper as Lacour17 in this file. + +""" # NOQA: E501 + +from __future__ import annotations + +import warnings + +import numpy as np + +from optuna.study._multi_objective import _is_pareto_front + + +def _get_upper_bound_set( + sorted_pareto_sols: np.ndarray, ref_point: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + This function follows Algorithm 2 of Lacour17. + + Args: + sorted_pareto_sols: Pareto solutions sorted with respect to the first objective. + ref_point: The reference point. + + Returns: + upper_bound_set: The upper bound set, which is ``U(N)`` in the paper. The shape is + ``(n_bounds, n_objectives)``. + def_points: The defining points of each vector in ``U(N)``. The shape is + ``(n_bounds, n_objectives, n_objectives)``. + + NOTE: + ``pareto_sols`` corresponds to ``N`` and ``upper_bound_set`` corresponds to ``U(N)`` in the + paper. + ``def_points`` (the shape is ``(n_bounds, n_objectives, n_objectives)``) is not well + explained in the paper, but basically, ``def_points[i, j] = z[j]`` of + ``upper_bound_set[i]``. + """ + (_, n_objectives) = sorted_pareto_sols.shape + objective_indices = np.arange(n_objectives) + skip_ineq_judge = np.eye(n_objectives, dtype=bool) + # NOTE(nabenabe): True at 0 comes from Line 2 of Alg. 2. (loss_vals is sorted w.r.t. 0-th obj) + skip_ineq_judge[:, 0] = True + + def update(sol: np.ndarray, ubs: np.ndarray, dps: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + # The update rule is written in Section 2.2 of Lacour17. + is_dominated = np.all(sol < ubs, axis=-1) + if not any(is_dominated): + return ubs, dps + + # The defining points `z(u)` for each `u in A` in Line 5 of Alg. 2. + dominated_dps = dps[is_dominated] + n_bounds = dominated_dps.shape[0] + # NOTE(nabenabe): `-inf` comes from Line 2 and `k!=j` in Line 3 of Alg. 2. + # NOTE(nabenabe): If `update[i,j]=True`, update `ubs[i,j]` to `(z_j, u_{-j})`, i.e., + # `np.where(objective_indices != j, ubs[i], sol[j])` in `new_ubs`. cf. Lines 2,3 of Alg. 2. + update = sol >= np.max(np.where(skip_ineq_judge, -np.inf, dominated_dps), axis=-2) + # NOTE(nabenabe): The indices of `u` with `True` in update. Each `u` may yield `True` + # multiple times for different indices `j`. + ubs_indices_to_update = np.tile(np.arange(n_bounds)[:, np.newaxis], n_objectives)[update] + # The dimension `j` for each `u` s.t. `\hat{z}_j \geq \max_{k \neq j}{z_j^k(u)}`. + dimensions_to_update = np.tile(objective_indices, (n_bounds, 1))[update] + assert ubs_indices_to_update.size == dimensions_to_update.size + indices_for_sweeping = np.arange(dimensions_to_update.size) + # The last Eq in Page 5. + new_dps = dominated_dps[ubs_indices_to_update] + new_dps[indices_for_sweeping, dimensions_to_update] = sol + # Line 3 of Alg. 2. `sol[dimensions_to_update]` is equivalent to `\bar{z}_j`. + new_ubs = ubs[is_dominated][ubs_indices_to_update] + new_ubs[indices_for_sweeping, dimensions_to_update] = sol[dimensions_to_update] + return np.vstack([ubs[~is_dominated], new_ubs]), np.vstack([dps[~is_dominated], new_dps]) + + upper_bound_set = np.asarray([ref_point]) # Line 1 of Alg. 2. + def_points = np.full((1, n_objectives, n_objectives), -np.inf) # z^k(z^r) = \hat{z}^k + def_points[0, objective_indices, objective_indices] = ref_point # \hat{z}^k is a dummy point. + for solution in sorted_pareto_sols: # NOTE(nabenabe): Sorted must be fulfilled. + upper_bound_set, def_points = update(solution, upper_bound_set, def_points) + + return upper_bound_set, def_points + + +def _get_box_bounds( + upper_bound_set: np.ndarray, def_points: np.ndarray, ref_point: np.ndarray +) -> np.ndarray: + # Eq. (2) of Lacour17. + n_objectives = upper_bound_set.shape[-1] + assert n_objectives > 1, "This function is used only for multi-objective problems." + bounds = np.empty((2, *upper_bound_set.shape)) + bounds[0, :, 0] = def_points[:, 0, 0] + bounds[1, :, 0] = ref_point[0] + row, col = np.diag_indices(n_objectives - 1) + bounds[0, :, 1:] = np.maximum.accumulate(def_points, axis=-2)[:, row, col + 1] + bounds[1, :, 1:] = upper_bound_set[:, 1:] + not_empty = ~np.any(bounds[1] <= bounds[0], axis=-1) # Remove [inf, inf] or [-inf, -inf]. + return bounds[:, not_empty] + + +def _get_non_dominated_box_bounds( + sorted_pareto_sols: np.ndarray, ref_point: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: # (n_bounds, n_objectives) and (n_bounds, n_objectives) + # The calculation of u[k] and l[k] in the paper: https://arxiv.org/abs/2006.05078 + # See below for the proof of this function's validity: + # cf. https://github.com/optuna/optuna/pull/6039#issuecomment-2831926573 + # NOTE(nabenabe): The paper handles maximization problems, but we consider minimization here. + neg_upper_bound_set = -_get_upper_bound_set(sorted_pareto_sols, ref_point)[0] + sorted_neg_upper_bound_set = np.unique(neg_upper_bound_set, axis=0) # lexsort by np.unique. + # Use the sign-flipped upper_bound_set as the Pareto solutions. Then we can calculate the + # lower bound set as well. + point_at_infinity = np.full_like(ref_point, np.inf) + # NOTE(nabenabe): Since our goal is to partition the non-dominated space, we only need + # the Pareto solutions in the `neg_upper_bound_set`. + neg_lower_bound_set, neg_def_points = _get_upper_bound_set( + sorted_pareto_sols=sorted_neg_upper_bound_set[ + _is_pareto_front(sorted_neg_upper_bound_set, assume_unique_lexsorted=True) + ], + ref_point=point_at_infinity, + ) + box_upper_bounds, box_lower_bounds = -_get_box_bounds( + neg_lower_bound_set, neg_def_points, point_at_infinity + ) + return box_lower_bounds, box_upper_bounds + + +def get_non_dominated_box_bounds( + loss_vals: np.ndarray, ref_point: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: # (n_bounds, n_objectives) and (n_bounds, n_objectives) + assert np.all(np.isfinite(loss_vals)), "loss_vals must be clipped before box decomposition." + # Remove duplications and lexsort the solutions by ``np.unique``. + unique_lexsorted_loss_vals = np.unique(loss_vals, axis=0) + sorted_pareto_sols = unique_lexsorted_loss_vals[ + _is_pareto_front(unique_lexsorted_loss_vals, assume_unique_lexsorted=True) + ] + n_objectives = loss_vals.shape[-1] + # The condition here follows BoTorch. + # https://github.com/pytorch/botorch/blob/v0.13.0/botorch/acquisition/multi_objective/utils.py#L55-L63 + assert n_objectives > 1, "This function is used only for multi-objective problems." + if n_objectives > 4: + warnings.warn( + "Box decomposition (typically used by `GPSampler`) might be significantly slow for " + "n_objectives > 4. Please consider using another sampler instead." + ) + + return _get_non_dominated_box_bounds(sorted_pareto_sols, ref_point) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/hssp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/hssp.py new file mode 100644 index 0000000000000000000000000000000000000000..e758ae376280287d1ccbf798a554be8ad4a25e58 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/hssp.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +import numpy as np + +import optuna + + +def _solve_hssp_2d( + rank_i_loss_vals: np.ndarray, + rank_i_indices: np.ndarray, + subset_size: int, + reference_point: np.ndarray, +) -> np.ndarray: + # This function can be used for non-unique rank_i_loss_vals as well. + # The time complexity is O(subset_size * rank_i_loss_vals.shape[0]). + assert rank_i_loss_vals.shape[-1] == 2 and subset_size <= rank_i_loss_vals.shape[0] + n_trials = rank_i_loss_vals.shape[0] + # rank_i_loss_vals is unique-lexsorted in solve_hssp. + sorted_indices = np.arange(rank_i_loss_vals.shape[0]) + sorted_loss_vals = rank_i_loss_vals.copy() + # The diagonal points for each rectangular to calculate the hypervolume contributions. + rect_diags = np.repeat(reference_point[np.newaxis, :], n_trials, axis=0) + selected_indices = np.zeros(subset_size, dtype=int) + for i in range(subset_size): + contribs = np.prod(rect_diags - sorted_loss_vals, axis=-1) + max_index = np.argmax(contribs) + selected_indices[i] = rank_i_indices[sorted_indices[max_index]] + loss_vals = sorted_loss_vals[max_index].copy() + + keep = np.ones(n_trials - i, dtype=bool) + keep[max_index] = False + # Remove the chosen point. + sorted_indices = sorted_indices[keep] + rect_diags = rect_diags[keep] + sorted_loss_vals = sorted_loss_vals[keep] + # Update the diagonal points for each hypervolume contribution calculation. + rect_diags[:max_index, 0] = np.minimum(loss_vals[0], rect_diags[:max_index, 0]) + rect_diags[max_index:, 1] = np.minimum(loss_vals[1], rect_diags[max_index:, 1]) + + return selected_indices + + +def _lazy_contribs_update( + contribs: np.ndarray, + pareto_loss_values: np.ndarray, + selected_vecs: np.ndarray, + reference_point: np.ndarray, +) -> np.ndarray: + """Lazy update the hypervolume contributions. + + S=selected_indices - {indices[max_index]}, T=selected_indices, and S' is a subset of S. + As we would like to know argmax H(T v {i}) in the next iteration, we can skip HV + calculations for j if H(T v {i}) - H(T) > H(S' v {j}) - H(S') >= H(T v {j}) - H(T). + We used the submodularity for the inequality above. As the upper bound of contribs[i] is + H(S' v {j}) - H(S'), we start to update from i with a higher upper bound so that we can + skip more HV calculations. + """ + hv_selected = optuna._hypervolume.compute_hypervolume( + selected_vecs[:-1], reference_point, assume_pareto=True + ) + max_contrib = 0.0 + index_from_larger_upper_bound_contrib = np.argsort(-contribs) + for i in index_from_larger_upper_bound_contrib: + if contribs[i] < max_contrib: + # Lazy evaluation to reduce HV calculations. + # If contribs[i] will not be the maximum next, it is unnecessary to compute it. + continue + + selected_vecs[-1] = pareto_loss_values[i].copy() + hv_plus = optuna._hypervolume.compute_hypervolume( + selected_vecs, reference_point, assume_pareto=True + ) + # inf - inf in the contribution calculation is always inf. + contribs[i] = hv_plus - hv_selected if not np.isinf(hv_plus) else np.inf + max_contrib = max(contribs[i], max_contrib) + + return contribs + + +def _solve_hssp_on_unique_loss_vals( + rank_i_loss_vals: np.ndarray, + rank_i_indices: np.ndarray, + subset_size: int, + reference_point: np.ndarray, +) -> np.ndarray: + if not np.isfinite(reference_point).all(): + return rank_i_indices[:subset_size] + if rank_i_indices.size == subset_size: + return rank_i_indices + if rank_i_loss_vals.shape[-1] == 2: + return _solve_hssp_2d(rank_i_loss_vals, rank_i_indices, subset_size, reference_point) + + assert subset_size < rank_i_indices.size + # The following logic can be used for non-unique rank_i_loss_vals as well. + diff_of_loss_vals_and_ref_point = reference_point - rank_i_loss_vals + (n_solutions, n_objectives) = rank_i_loss_vals.shape + contribs = np.prod(diff_of_loss_vals_and_ref_point, axis=-1) + selected_indices = np.zeros(subset_size, dtype=int) + selected_vecs = np.empty((subset_size, n_objectives)) + indices = np.arange(n_solutions) + for k in range(subset_size): + max_index = int(np.argmax(contribs)) + selected_indices[k] = indices[max_index] + selected_vecs[k] = rank_i_loss_vals[max_index].copy() + keep = np.ones(contribs.size, dtype=bool) + keep[max_index] = False + contribs = contribs[keep] + indices = indices[keep] + rank_i_loss_vals = rank_i_loss_vals[keep] + if k == subset_size - 1: + # We do not need to update contribs at the last iteration. + break + + contribs = _lazy_contribs_update( + contribs, rank_i_loss_vals, selected_vecs[: k + 2], reference_point + ) + + return rank_i_indices[selected_indices] + + +def _solve_hssp( + rank_i_loss_vals: np.ndarray, + rank_i_indices: np.ndarray, + subset_size: int, + reference_point: np.ndarray, +) -> np.ndarray: + """Solve a hypervolume subset selection problem (HSSP) via a greedy algorithm. + + This method is a 1-1/e approximation algorithm to solve HSSP. + + For further information about algorithms to solve HSSP, please refer to the following + paper: + + - `Greedy Hypervolume Subset Selection in Low Dimensions + `__ + """ + if subset_size == rank_i_indices.size: + return rank_i_indices + + rank_i_unique_loss_vals, indices_of_unique_loss_vals = np.unique( + rank_i_loss_vals, return_index=True, axis=0 + ) + n_unique = indices_of_unique_loss_vals.size + if n_unique < subset_size: + chosen = np.zeros(rank_i_indices.size, dtype=bool) + chosen[indices_of_unique_loss_vals] = True + duplicated_indices = np.arange(rank_i_indices.size)[~chosen] + chosen[duplicated_indices[: subset_size - n_unique]] = True + return rank_i_indices[chosen] + + selected_indices_of_unique_loss_vals = _solve_hssp_on_unique_loss_vals( + rank_i_unique_loss_vals, indices_of_unique_loss_vals, subset_size, reference_point + ) + return rank_i_indices[selected_indices_of_unique_loss_vals] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/wfg.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/wfg.py new file mode 100644 index 0000000000000000000000000000000000000000..ac2e2dcebb2e3d965c2c80c36440feacc25ca3fa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_hypervolume/wfg.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +import numpy as np + +from optuna.study._multi_objective import _is_pareto_front + + +def _compute_2d(sorted_pareto_sols: np.ndarray, reference_point: np.ndarray) -> float: + assert sorted_pareto_sols.shape[1] == reference_point.shape[0] == 2 + rect_diag_y = np.concatenate([reference_point[1:], sorted_pareto_sols[:-1, 1]]) + edge_length_x = reference_point[0] - sorted_pareto_sols[:, 0] + edge_length_y = rect_diag_y - sorted_pareto_sols[:, 1] + return edge_length_x @ edge_length_y + + +def _compute_3d(sorted_pareto_sols: np.ndarray, reference_point: np.ndarray) -> float: + """ + Compute hypervolume in 3D. Time complexity is O(N^2) where N is sorted_pareto_sols.shape[0]. + If X, Y, Z coordinates are permutations of 0, 1, ..., N-1 and reference_point is (N, N, N), the + hypervolume is calculated as the number of voxels (x, y, z) dominated by at least one point. + If we fix x and y, this number is equal to the minimum of z' over all points (x', y', z') + satisfying x' <= x and y' <= y. This can be efficiently computed using cumulative minimum + (`np.minimum.accumulate`). Non-permutation coordinates can be transformed into permutation + coordinates by using coordinate compression. + """ + assert sorted_pareto_sols.shape[1] == reference_point.shape[0] == 3 + n = sorted_pareto_sols.shape[0] + y_order = np.argsort(sorted_pareto_sols[:, 1]) + z_delta = np.zeros((n, n), dtype=float) + z_delta[y_order, np.arange(n)] = reference_point[2] - sorted_pareto_sols[y_order, 2] + z_delta = np.maximum.accumulate(np.maximum.accumulate(z_delta, axis=0), axis=1) + # The x axis is already sorted, so no need to compress this coordinate. + x_vals = sorted_pareto_sols[:, 0] + y_vals = sorted_pareto_sols[y_order, 1] + x_delta = np.concatenate([x_vals[1:], reference_point[:1]]) - x_vals + y_delta = np.concatenate([y_vals[1:], reference_point[1:2]]) - y_vals + # NOTE(nabenabe): Below is the faster alternative of `np.sum(dx[:, None] * dy * dz)`. + return np.dot(np.dot(z_delta, y_delta), x_delta) + + +def _compute_hv(sorted_loss_vals: np.ndarray, reference_point: np.ndarray) -> float: + inclusive_hvs = np.prod(reference_point - sorted_loss_vals, axis=-1) + if inclusive_hvs.shape[0] == 1: + return float(inclusive_hvs[0]) + elif inclusive_hvs.shape[0] == 2: + # S(A v B) = S(A) + S(B) - S(A ^ B). + intersec = np.prod(reference_point - np.maximum(sorted_loss_vals[0], sorted_loss_vals[1])) + return np.sum(inclusive_hvs) - intersec + + # c.f. Eqs. (6) and (7) of ``A Fast Way of Calculating Exact Hypervolumes``. + limited_sols_array = np.maximum(sorted_loss_vals[:, np.newaxis], sorted_loss_vals) + return sum( + _compute_exclusive_hv(limited_sols_array[i, i + 1 :], inclusive_hv, reference_point) + for i, inclusive_hv in enumerate(inclusive_hvs) + ) + + +def _compute_exclusive_hv( + limited_sols: np.ndarray, inclusive_hv: float, reference_point: np.ndarray +) -> float: + if limited_sols.shape[0] == 0: + return inclusive_hv + + # NOTE(nabenabe): As the following line is a hack for speedup, I will describe several + # important points to note. Even if we do not run _is_pareto_front below or use + # assume_unique_lexsorted=False instead, the result of this function does not change, but this + # function simply becomes slower. + # + # For simplicity, I call an array ``quasi-lexsorted`` if it is sorted by the first objective. + # + # Reason why it will be faster with _is_pareto_front + # Hypervolume of a given solution set and a reference point does not change even when we + # remove non Pareto solutions from the solution set. However, the calculation becomes slower + # if the solution set contains many non Pareto solutions. By removing some obvious non Pareto + # solutions, the calculation becomes faster. + # + # Reason why assume_unique_lexsorted must be True for _is_pareto_front + # assume_unique_lexsorted=True actually checks weak dominance and solutions will be weakly + # dominated if there are duplications, so we can remove duplicated solutions by this option. + # In other words, assume_unique_lexsorted=False may significantly slow down when limited_sols + # has many duplicated Pareto solutions because this function becomes an exponential algorithm + # without duplication removal. + # + # NOTE(nabenabe): limited_sols can be non-unique and/or non-lexsorted, so I will describe why + # it is fine. + # + # Reason why we can specify assume_unique_lexsorted=True even when limited_sols is not + # All ``False`` in on_front will be correct (, but it may not be the case for ``True``) even + # if limited_sols is not unique or not lexsorted as long as limited_sols is quasi-lexsorted, + # which is guaranteed. As mentioned earlier, if all ``False`` in on_front is correct, the + # result of this function does not change. + on_front = _is_pareto_front(limited_sols, assume_unique_lexsorted=True) + return inclusive_hv - _compute_hv(limited_sols[on_front], reference_point) + + +def compute_hypervolume( + loss_vals: np.ndarray, reference_point: np.ndarray, assume_pareto: bool = False +) -> float: + """Hypervolume calculator for any dimension. + + This class exactly calculates the hypervolume for any dimension. + For 3 dimensions or higher, the WFG algorithm will be used. + Please refer to ``A Fast Way of Calculating Exact Hypervolumes`` for the WFG algorithm. + + .. note:: + This class is used for computing the hypervolumes of points in multi-objective space. + Each coordinate of each point represents a ``values`` of the multi-objective function. + + .. note:: + We check that each objective is to be minimized. Transform objective values that are + to be maximized before calling this class's ``compute`` method. + + Args: + loss_vals: + An array of loss value vectors to calculate the hypervolume. + reference_point: + The reference point used to calculate the hypervolume. + assume_pareto: + Whether to assume the Pareto optimality to ``loss_vals``. + In other words, if ``True``, none of loss vectors are dominated by another. + ``assume_pareto`` is used only for speedup and it does not change the result even if + this argument is wrongly given. If there are many non-Pareto solutions in + ``loss_vals``, ``assume_pareto=True`` will speed up the calculation. + + Returns: + The hypervolume of the given arguments. + + """ + + if not np.all(loss_vals <= reference_point): + raise ValueError( + "All points must dominate or equal the reference point. " + "That is, for all points in the loss_vals and the coordinate `i`, " + "`loss_vals[i] <= reference_point[i]`." + ) + if not np.all(np.isfinite(reference_point)): + # reference_point does not have nan, thanks to the verification above. + return float("inf") + + if not assume_pareto: + unique_lexsorted_loss_vals = np.unique(loss_vals, axis=0) + on_front = _is_pareto_front(unique_lexsorted_loss_vals, assume_unique_lexsorted=True) + sorted_pareto_sols = unique_lexsorted_loss_vals[on_front] + else: + # NOTE(nabenabe): The result of this function does not change both by + # np.argsort(loss_vals[:, 0]) and np.unique(loss_vals, axis=0). + # But many duplications in loss_vals significantly slows down the function. + # TODO(nabenabe): Make an option to use np.unique. + sorted_pareto_sols = loss_vals[loss_vals[:, 0].argsort()] + + if reference_point.shape[0] == 2: + hv = _compute_2d(sorted_pareto_sols, reference_point) + elif reference_point.shape[0] == 3: + # NOTE: For 3D points, we always prefer _compute_3d to _compute_hv because the time + # complexity of _compute_3d is O(N^2), while that of _compute_nd is \\Omega(N^3) + # - It calls _compute_exclusive_hv with i points for i = 0, 1, ..., N-1 + # - _compute_exclusive_hv calls _is_pareto_front, which is quadratic + # with the number of points + hv = _compute_3d(sorted_pareto_sols, reference_point) + else: + hv = _compute_hv(sorted_pareto_sols, reference_point) + + # NOTE(nabenabe): `nan` happens when inf - inf happens, but this is inf in hypervolume due to + # the submodularity. + return hv if np.isfinite(hv) else float("inf") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_imports.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..9cce3851a8a7ce1377f40cf21a1f629deafd724b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_imports.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import importlib +import types +from types import TracebackType +from typing import Any + + +_INTEGRATION_IMPORT_ERROR_TEMPLATE = ( + "\nCould not find `optuna-integration` for `{0}`.\n" + "Please run `pip install optuna-integration[{0}]`." +) + + +class _DeferredImportExceptionContextManager: + """Context manager to defer exceptions from imports. + + Catches :exc:`ImportError` and :exc:`SyntaxError`. + If any exception is caught, this class raises an :exc:`ImportError` when being checked. + + """ + + def __init__(self) -> None: + self._deferred: tuple[Exception, str] | None = None + + def __enter__(self) -> "_DeferredImportExceptionContextManager": + """Enter the context manager. + + Returns: + Itself. + + """ + return self + + def __exit__( + self, + exc_type: type[Exception] | None, + exc_value: Exception | None, + traceback: TracebackType | None, + ) -> bool | None: + """Exit the context manager. + + Args: + exc_type: + Raised exception type. :obj:`None` if nothing is raised. + exc_value: + Raised exception object. :obj:`None` if nothing is raised. + traceback: + Associated traceback. :obj:`None` if nothing is raised. + + Returns: + :obj:`None` if nothing is deferred, otherwise :obj:`True`. + :obj:`True` will suppress any exceptions avoiding them from propagating. + + """ + if isinstance(exc_value, (ImportError, SyntaxError)): + if isinstance(exc_value, ImportError): + message = ( + "Tried to import '{}' but failed. Please make sure that the package is " + "installed correctly to use this feature. Actual error: {}." + ).format(exc_value.name, exc_value) + elif isinstance(exc_value, SyntaxError): + message = ( + "Tried to import a package but failed due to a syntax error in {}. Please " + "make sure that the Python version is correct to use this feature. Actual " + "error: {}." + ).format(exc_value.filename, exc_value) + else: + assert False + + self._deferred = (exc_value, message) + return True + return None + + def is_successful(self) -> bool: + """Return whether the context manager has caught any exceptions. + + Returns: + :obj:`True` if no exceptions are caught, :obj:`False` otherwise. + + """ + return self._deferred is None + + def check(self) -> None: + """Check whether the context manager has caught any exceptions. + + Raises: + :exc:`ImportError`: + If any exception was caught from the caught exception. + + """ + if self._deferred is not None: + exc_value, message = self._deferred + raise ImportError(message) from exc_value + + +def try_import() -> _DeferredImportExceptionContextManager: + """Create a context manager that can wrap imports of optional packages to defer exceptions. + + Returns: + Deferred import context manager. + + """ + return _DeferredImportExceptionContextManager() + + +class _LazyImport(types.ModuleType): + """Module wrapper for lazy import. + + This class wraps the specified modules and lazily imports them only when accessed. + Otherwise, `import optuna` is slowed down by importing all submodules and + dependencies even if not required. + Within this project's usage, importlib override this module's attribute on the first + access and the imported submodule is directly accessed from the second access. + + Args: + name: Name of module to apply lazy import. + """ + + def __init__(self, name: str) -> None: + super().__init__(name) + self._name = name + + def _load(self) -> types.ModuleType: + module = importlib.import_module(self._name) + self.__dict__.update(module.__dict__) + return module + + def __getattr__(self, item: str) -> Any: + return getattr(self._load(), item) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_transform.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9ce7047419c203118dbbe1fd1fc833521f300a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_transform.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +import math +from typing import Any + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution + + +class _SearchSpaceTransform: + """Transform a search space and parameter configurations to continuous space. + + The search space bounds and parameter configurations are represented as ``numpy.ndarray``s and + transformed into continuous space. Bounds and parameters associated with categorical + distributions are one-hot encoded. Parameter configurations in this space can additionally be + untransformed, or mapped back to the original space. This type of + transformation/untransformation is useful for e.g. implementing samplers without having to + condition on distribution types before sampling parameter values. + + Args: + search_space: + The search space. If any transformations are to be applied, parameter configurations + are assumed to hold parameter values for all of the distributions defined in this + search space. Otherwise, assertion failures will be raised. + transform_log: + If :obj:`True`, apply log/exp operations to the bounds and parameters with + corresponding distributions in log space during transformation/untransformation. + Should always be :obj:`True` if any parameters are going to be sampled from the + transformed space. + transform_step: + If :obj:`True`, offset the lower and higher bounds by a half step each, increasing the + space by one step. This allows fair sampling for values close to the bounds. + Should always be :obj:`True` if any parameters are going to be sampled from the + transformed space. + transform_0_1: + If :obj:`True`, apply a linear transformation to the bounds and parameters so that + they are in the unit cube. + + Attributes: + bounds: + Constructed bounds from the given search space. + column_to_encoded_columns: + Constructed mapping from original parameter column index to encoded column indices. + encoded_column_to_column: + Constructed mapping from encoded column index to original parameter column index. + + Note: + Parameter values are not scaled to the unit cube. + + Note: + ``transform_log`` and ``transform_step`` are useful for constructing bounds and parameters + without any actual transformations by setting those arguments to :obj:`False`. This is + needed for e.g. the hyperparameter importance assessments. + + """ + + def __init__( + self, + search_space: dict[str, BaseDistribution], + transform_log: bool = True, + transform_step: bool = True, + transform_0_1: bool = False, + ) -> None: + bounds, column_to_encoded_columns, encoded_column_to_column = _transform_search_space( + search_space, transform_log, transform_step + ) + self._raw_bounds = bounds + self._column_to_encoded_columns = column_to_encoded_columns + self._encoded_column_to_column = encoded_column_to_column + self._search_space = search_space + self._transform_log = transform_log + self._transform_0_1 = transform_0_1 + + @property + def bounds(self) -> np.ndarray: + if self._transform_0_1: + return np.array([[0.0, 1.0]] * self._raw_bounds.shape[0]) + else: + return self._raw_bounds + + @property + def column_to_encoded_columns(self) -> list[np.ndarray]: + return self._column_to_encoded_columns + + @property + def encoded_column_to_column(self) -> np.ndarray: + return self._encoded_column_to_column + + def transform(self, params: dict[str, Any]) -> np.ndarray: + """Transform a parameter configuration from actual values to continuous space. + + Args: + params: + A parameter configuration to transform. + + Returns: + A 1-dimensional ``numpy.ndarray`` holding the transformed parameters in the + configuration. + + """ + trans_params = np.zeros(self._raw_bounds.shape[0], dtype=np.float64) + + bound_idx = 0 + for name, distribution in self._search_space.items(): + assert name in params, "Parameter configuration must contain all distributions." + param = params[name] + + if isinstance(distribution, CategoricalDistribution): + choice_idx = int(distribution.to_internal_repr(param)) + trans_params[bound_idx + choice_idx] = 1 + bound_idx += len(distribution.choices) + else: + trans_params[bound_idx] = _transform_numerical_param( + param, distribution, self._transform_log + ) + bound_idx += 1 + + if self._transform_0_1: + single_mask = self._raw_bounds[:, 0] == self._raw_bounds[:, 1] + trans_params[single_mask] = 0.5 + trans_params[~single_mask] = ( + trans_params[~single_mask] - self._raw_bounds[~single_mask, 0] + ) / (self._raw_bounds[~single_mask, 1] - self._raw_bounds[~single_mask, 0]) + + return trans_params + + def untransform(self, trans_params: np.ndarray) -> dict[str, Any]: + """Untransform a parameter configuration from continuous space to actual values. + + Args: + trans_params: + A 1-dimensional ``numpy.ndarray`` in the transformed space corresponding to a + parameter configuration. + + Returns: + A dictionary of an untransformed parameter configuration. Keys are parameter names. + Values are untransformed parameter values. + + """ + assert trans_params.shape == (self._raw_bounds.shape[0],) + + if self._transform_0_1: + trans_params = self._raw_bounds[:, 0] + trans_params * ( + self._raw_bounds[:, 1] - self._raw_bounds[:, 0] + ) + + params = {} + + for (name, distribution), encoded_columns in zip( + self._search_space.items(), self.column_to_encoded_columns + ): + trans_param = trans_params[encoded_columns] + + if isinstance(distribution, CategoricalDistribution): + # Select the highest rated one-hot encoding. + param = distribution.to_external_repr(trans_param.argmax()) + else: + param = _untransform_numerical_param( + trans_param.item(), distribution, self._transform_log + ) + + params[name] = param + + return params + + +def _transform_search_space( + search_space: dict[str, BaseDistribution], transform_log: bool, transform_step: bool +) -> tuple[np.ndarray, list[np.ndarray], np.ndarray]: + assert len(search_space) > 0, "Cannot transform if no distributions are given." + + n_bounds = sum( + len(d.choices) if isinstance(d, CategoricalDistribution) else 1 + for d in search_space.values() + ) + + bounds = np.empty((n_bounds, 2), dtype=np.float64) + column_to_encoded_columns: list[np.ndarray] = [] + encoded_column_to_column = np.empty(n_bounds, dtype=np.int64) + + bound_idx = 0 + for distribution in search_space.values(): + d = distribution + if isinstance(d, CategoricalDistribution): + n_choices = len(d.choices) + bounds[bound_idx : bound_idx + n_choices] = (0, 1) # Broadcast across all choices. + encoded_columns = np.arange(bound_idx, bound_idx + n_choices) + encoded_column_to_column[encoded_columns] = len(column_to_encoded_columns) + column_to_encoded_columns.append(encoded_columns) + bound_idx += n_choices + elif isinstance( + d, + ( + FloatDistribution, + IntDistribution, + ), + ): + if isinstance(d, FloatDistribution): + if d.step is not None: + half_step = 0.5 * d.step if transform_step else 0.0 + bds = ( + _transform_numerical_param(d.low, d, transform_log) - half_step, + _transform_numerical_param(d.high, d, transform_log) + half_step, + ) + else: + bds = ( + _transform_numerical_param(d.low, d, transform_log), + _transform_numerical_param(d.high, d, transform_log), + ) + elif isinstance(d, IntDistribution): + half_step = 0.5 * d.step if transform_step else 0.0 + if d.log: + bds = ( + _transform_numerical_param(d.low - half_step, d, transform_log), + _transform_numerical_param(d.high + half_step, d, transform_log), + ) + else: + bds = ( + _transform_numerical_param(d.low, d, transform_log) - half_step, + _transform_numerical_param(d.high, d, transform_log) + half_step, + ) + else: + assert False, "Should not reach. Unexpected distribution." + + bounds[bound_idx] = bds + encoded_column = np.atleast_1d(bound_idx) + encoded_column_to_column[encoded_column] = len(column_to_encoded_columns) + column_to_encoded_columns.append(encoded_column) + bound_idx += 1 + else: + assert False, "Should not reach. Unexpected distribution." + + assert bound_idx == n_bounds + + return bounds, column_to_encoded_columns, encoded_column_to_column + + +def _transform_numerical_param( + param: int | float, distribution: BaseDistribution, transform_log: bool +) -> float: + d = distribution + + if isinstance(d, CategoricalDistribution): + assert False, "Should not reach. Should be one-hot encoded." + elif isinstance(d, FloatDistribution): + if d.log: + trans_param = math.log(param) if transform_log else float(param) + else: + trans_param = float(param) + elif isinstance(d, IntDistribution): + if d.log: + trans_param = math.log(param) if transform_log else float(param) + else: + trans_param = float(param) + else: + assert False, "Should not reach. Unexpected distribution." + + return trans_param + + +def _untransform_numerical_param( + trans_param: float, distribution: BaseDistribution, transform_log: bool +) -> int | float: + d = distribution + + if isinstance(d, CategoricalDistribution): + assert False, "Should not reach. Should be one-hot encoded." + elif isinstance(d, FloatDistribution): + if d.log: + param = math.exp(trans_param) if transform_log else trans_param + if d.single(): + pass + else: + param = min(param, np.nextafter(d.high, d.high - 1)) + elif d.step is not None: + param = float( + np.clip(np.round((trans_param - d.low) / d.step) * d.step + d.low, d.low, d.high) + ) + else: + if d.single(): + param = trans_param + else: + param = min(trans_param, np.nextafter(d.high, d.high - 1)) + elif isinstance(d, IntDistribution): + if d.log: + if transform_log: + param = int(np.clip(np.round(math.exp(trans_param)), d.low, d.high)) + else: + param = int(trans_param) + else: + param = int( + np.clip(np.round((trans_param - d.low) / d.step) * d.step + d.low, d.low, d.high) + ) + else: + assert False, "Should not reach. Unexpected distribution." + + return param diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_typing.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8ebbb3642d685d88f4cd1d5a4490d8c7ea22ec --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/_typing.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from typing import Mapping +from typing import Sequence +from typing import Union + + +JSONSerializable = Union[ + Mapping[str, "JSONSerializable"], + Sequence["JSONSerializable"], + str, + int, + float, + bool, + None, +] + +__all__ = ["JSONSerializable"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae52b244a0055f4ab2aabdf7d823787472e27940 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/__init__.py @@ -0,0 +1,20 @@ +from optuna.artifacts._backoff import Backoff +from optuna.artifacts._boto3 import Boto3ArtifactStore +from optuna.artifacts._download import download_artifact +from optuna.artifacts._filesystem import FileSystemArtifactStore +from optuna.artifacts._gcs import GCSArtifactStore +from optuna.artifacts._list_artifact_meta import get_all_artifact_meta +from optuna.artifacts._upload import ArtifactMeta +from optuna.artifacts._upload import upload_artifact + + +__all__ = [ + "ArtifactMeta", + "FileSystemArtifactStore", + "Boto3ArtifactStore", + "GCSArtifactStore", + "Backoff", + "get_all_artifact_meta", + "upload_artifact", + "download_artifact", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_backoff.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_backoff.py new file mode 100644 index 0000000000000000000000000000000000000000..823a9c62b22465f651d1b875c388876a7fc1c10f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_backoff.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import logging +import time +from typing import TYPE_CHECKING + +from optuna.artifacts.exceptions import ArtifactNotFound + + +_logger = logging.getLogger(__name__) + + +if TYPE_CHECKING: + from typing import BinaryIO + + from optuna.artifacts._protocol import ArtifactStore + + +class Backoff: + """An artifact store's middleware for exponential backoff. + + Example: + .. code-block:: python + + import optuna + from optuna.artifacts import upload_artifact + from optuna.artifacts import Boto3ArtifactStore + from optuna.artifacts import Backoff + + + artifact_store = Backoff(Boto3ArtifactStore("my-bucket")) + + + def objective(trial: optuna.Trial) -> float: + ... = trial.suggest_float("x", -10, 10) + file_path = generate_example(...) + upload_artifact( + artifact_store=artifact_store, + file_path=file_path, + study_or_trial=trial, + ) + return ... + """ + + def __init__( + self, + backend: ArtifactStore, + *, + max_retries: int = 10, + multiplier: float = 2, + min_delay: float = 0.1, + max_delay: float = 30, + ) -> None: + # Default sleep seconds: + # 0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8, 25.6, 30 + self._backend = backend + assert max_retries > 0 + assert multiplier > 0 + assert min_delay > 0 + assert max_delay > min_delay + self._max_retries = max_retries + self._multiplier = multiplier + self._min_delay = min_delay + self._max_delay = max_delay + + def _get_sleep_secs(self, n_retry: int) -> float: + return min(self._min_delay * self._multiplier**n_retry, self._max_delay) + + def open_reader(self, artifact_id: str) -> BinaryIO: + for i in range(self._max_retries): + try: + return self._backend.open_reader(artifact_id) + except ArtifactNotFound: + raise + except Exception as e: + if i == self._max_retries - 1: + raise + else: + _logger.error(f"Failed to open artifact={artifact_id} n_retry={i}", exc_info=e) + time.sleep(self._get_sleep_secs(i)) + assert False, "must not reach here" + + def write(self, artifact_id: str, content_body: BinaryIO) -> None: + for i in range(self._max_retries): + try: + self._backend.write(artifact_id, content_body) + break + except ArtifactNotFound: + raise + except Exception as e: + if i == self._max_retries - 1: + raise + else: + _logger.error(f"Failed to open artifact={artifact_id} n_retry={i}", exc_info=e) + content_body.seek(0) + time.sleep(self._get_sleep_secs(i)) + + def remove(self, artifact_id: str) -> None: + for i in range(self._max_retries): + try: + self._backend.remove(artifact_id) + except ArtifactNotFound: + raise + except Exception as e: + if i == self._max_retries - 1: + raise + else: + _logger.error(f"Failed to delete artifact={artifact_id}", exc_info=e) + time.sleep(self._get_sleep_secs(i)) + + +if TYPE_CHECKING: + # A mypy-runtime assertion to ensure that the Backoff middleware implements + # all abstract methods in ArtifactStore. + from optuna.artifacts import FileSystemArtifactStore + + _: ArtifactStore = Backoff(FileSystemArtifactStore(".")) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_boto3.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_boto3.py new file mode 100644 index 0000000000000000000000000000000000000000..d896cc951e14a6e3465d7691386a0a87f8f148a3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_boto3.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import io +import shutil +from typing import TYPE_CHECKING + +from optuna._imports import try_import +from optuna.artifacts.exceptions import ArtifactNotFound + + +if TYPE_CHECKING: + from typing import BinaryIO + + from mypy_boto3_s3 import S3Client + +with try_import() as _imports: + import boto3 + from botocore.exceptions import ClientError + + +class Boto3ArtifactStore: + """An artifact backend for Boto3. + + Args: + bucket_name: + The name of the bucket to store artifacts. + + client: + A Boto3 client to use for storage operations. If not specified, a new client will + be created. + + avoid_buf_copy: + If True, skip procedure to copy the content of the source file object to a buffer + before uploading it to S3 ins. This is default to False because using + ``upload_fileobj()`` method of Boto3 client might close the source file object. + + Example: + .. code-block:: python + + import optuna + from optuna.artifacts import upload_artifact + from optuna.artifacts import Boto3ArtifactStore + + + artifact_store = Boto3ArtifactStore("my-bucket") + + + def objective(trial: optuna.Trial) -> float: + ... = trial.suggest_float("x", -10, 10) + file_path = generate_example(...) + upload_artifact( + artifact_store=artifact_store, + file_path=file_path, + study_or_trial=trial, + ) + return ... + """ + + def __init__( + self, bucket_name: str, client: S3Client | None = None, *, avoid_buf_copy: bool = False + ) -> None: + _imports.check() + self.bucket = bucket_name + self.client = client or boto3.client("s3") + # This flag is added to avoid that upload_fileobj() method of Boto3 client may close the + # source file object. See https://github.com/boto/boto3/issues/929. + self._avoid_buf_copy = avoid_buf_copy + + def open_reader(self, artifact_id: str) -> BinaryIO: + try: + obj = self.client.get_object(Bucket=self.bucket, Key=artifact_id) + except ClientError as e: + if _is_not_found_error(e): + raise ArtifactNotFound( + f"Artifact storage with bucket: {self.bucket}, artifact_id: {artifact_id} was" + " not found" + ) from e + raise + body = obj.get("Body") + assert body is not None + return body + + def write(self, artifact_id: str, content_body: BinaryIO) -> None: + fsrc: BinaryIO = content_body + if not self._avoid_buf_copy: + buf = io.BytesIO() + shutil.copyfileobj(content_body, buf) + buf.seek(0) + fsrc = buf + self.client.upload_fileobj(fsrc, self.bucket, artifact_id) + + def remove(self, artifact_id: str) -> None: + self.client.delete_object(Bucket=self.bucket, Key=artifact_id) + + +def _is_not_found_error(e: ClientError) -> bool: + error_code = e.response.get("Error", {}).get("Code") + http_status_code = e.response.get("ResponseMetadata", {}).get("HTTPStatusCode") + return error_code == "NoSuchKey" or http_status_code == 404 + + +if TYPE_CHECKING: + # A mypy-runtime assertion to ensure that Boto3ArtifactStore implements all abstract methods + # in ArtifactStore. + from optuna.artifacts._protocol import ArtifactStore + + _: ArtifactStore = Boto3ArtifactStore("") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_download.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_download.py new file mode 100644 index 0000000000000000000000000000000000000000..9295dd1167ec4387bab0ba14f1dd1c29262bf91e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_download.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +import os +import shutil + +from optuna.artifacts._protocol import ArtifactStore + + +def download_artifact(*, artifact_store: ArtifactStore, file_path: str, artifact_id: str) -> None: + """Download an artifact from the artifact store. + + Args: + artifact_store: + An artifact store. + file_path: + A path to save the downloaded artifact. + artifact_id: + The identifier of the artifact to download. + """ + if os.path.exists(file_path): + raise FileExistsError(f"File already exists: {file_path}") + + with artifact_store.open_reader(artifact_id) as reader, open(file_path, "wb") as writer: + shutil.copyfileobj(reader, writer) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_filesystem.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..32318996bd9509375cca88ad6bd02740575fd17f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_filesystem.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +import os +from pathlib import Path +import shutil +from typing import TYPE_CHECKING + +from optuna.artifacts.exceptions import ArtifactNotFound + + +if TYPE_CHECKING: + from typing import BinaryIO + + +class FileSystemArtifactStore: + """An artifact store for file systems. + + Args: + base_path: + The base path to a directory to store artifacts. + + Example: + .. code-block:: python + + import os + + import optuna + from optuna.artifacts import FileSystemArtifactStore + from optuna.artifacts import upload_artifact + + + base_path = "./artifacts" + os.makedirs(base_path, exist_ok=True) + artifact_store = FileSystemArtifactStore(base_path=base_path) + + + def objective(trial: optuna.Trial) -> float: + ... = trial.suggest_float("x", -10, 10) + file_path = generate_example(...) + upload_artifact( + artifact_store=artifact_store, + file_path=file_path, + study_or_trial=trial, + ) + return ... + """ + + def __init__(self, base_path: str | Path) -> None: + if isinstance(base_path, str): + base_path = Path(base_path) + # TODO(Shinichi): Check if the base_path is valid directory. + self._base_path = base_path + + def open_reader(self, artifact_id: str) -> BinaryIO: + filepath = os.path.join(self._base_path, artifact_id) + try: + f = open(filepath, "rb") + except FileNotFoundError as e: + raise ArtifactNotFound("not found") from e + return f + + def write(self, artifact_id: str, content_body: BinaryIO) -> None: + filepath = os.path.join(self._base_path, artifact_id) + with open(filepath, "wb") as f: + shutil.copyfileobj(content_body, f) + + def remove(self, artifact_id: str) -> None: + filepath = os.path.join(self._base_path, artifact_id) + try: + os.remove(filepath) + except FileNotFoundError as e: + raise ArtifactNotFound("not found") from e + + +if TYPE_CHECKING: + # A mypy-runtime assertion to ensure that LocalArtifactBackend + # implements all abstract methods in ArtifactBackendProtocol. + from optuna.artifacts._protocol import ArtifactStore + + _: ArtifactStore = FileSystemArtifactStore("") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_gcs.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_gcs.py new file mode 100644 index 0000000000000000000000000000000000000000..63e469ce7f3ee20dc1fa9cdf5a0a3884feb322ef --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_gcs.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from io import BytesIO +from typing import TYPE_CHECKING + +from optuna._experimental import experimental_class +from optuna._imports import try_import +from optuna.artifacts.exceptions import ArtifactNotFound + + +if TYPE_CHECKING: + from typing import BinaryIO + +with try_import() as _imports: + import google.cloud.storage + + +@experimental_class("3.4.0") +class GCSArtifactStore: + """An artifact backend for Google Cloud Storage (GCS). + + Args: + bucket_name: + The name of the bucket to store artifacts. + + client: + A google-cloud-storage ``Client`` to use for storage operations. If not specified, a + new client will be created with default settings. + + Example: + .. code-block:: python + + import optuna + from optuna.artifacts import GCSArtifactStore, upload_artifact + + + artifact_backend = GCSArtifactStore("my-bucket") + + + def objective(trial: optuna.Trial) -> float: + ... = trial.suggest_float("x", -10, 10) + file_path = generate_example(...) + upload_artifact( + artifact_store=artifact_store, + file_path=file_path, + study_or_trial=trial, + ) + return ... + + Before running this code, you will have to install ``gcloud`` and run + + .. code-block:: bash + + gcloud auth application-default login + + so that the Cloud Storage library can automatically find the credential. + """ + + def __init__( + self, + bucket_name: str, + client: google.cloud.storage.Client | None = None, + ) -> None: + _imports.check() + self.bucket_name = bucket_name + self.client = client or google.cloud.storage.Client() + self.bucket_obj = self.client.bucket(bucket_name) + + def open_reader(self, artifact_id: str) -> "BinaryIO": + blob = self.bucket_obj.get_blob(artifact_id) + + if blob is None: + raise ArtifactNotFound( + f"Artifact storage with bucket: {self.bucket_name}, artifact_id: {artifact_id} was" + " not found" + ) + + body = blob.download_as_bytes() + return BytesIO(body) + + def write(self, artifact_id: str, content_body: "BinaryIO") -> None: + blob = self.bucket_obj.blob(artifact_id) + data = content_body.read() + blob.upload_from_string(data) + + def remove(self, artifact_id: str) -> None: + self.bucket_obj.delete_blob(artifact_id) + + +if TYPE_CHECKING: + # A mypy-runtime assertion to ensure that GCS3ArtifactStore implements all abstract methods + # in ArtifactStore. + from optuna.artifacts._protocol import ArtifactStore + + _: ArtifactStore = GCSArtifactStore("") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_list_artifact_meta.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_list_artifact_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..86200257049d3724e7ec8a982436f8cda96cbfbd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_list_artifact_meta.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import json + +from optuna.artifacts._upload import ArtifactMeta +from optuna.artifacts._upload import ARTIFACTS_ATTR_PREFIX +from optuna.storages import BaseStorage +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import Trial + + +def get_all_artifact_meta( + study_or_trial: Trial | FrozenTrial | Study, *, storage: BaseStorage | None = None +) -> list[ArtifactMeta]: + """List the associated artifact information of the provided trial or study. + + Args: + study_or_trial: + A :class:`~optuna.trial.Trial` object, a :class:`~optuna.trial.FrozenTrial`, or + a :class:`~optuna.study.Study` object. + storage: + A storage object. This argument is required only if ``study_or_trial`` is + :class:`~optuna.trial.FrozenTrial`. + + Example: + An example where this function is useful: + + .. code:: + + import os + + import optuna + + + # Get the storage that contains the study of interest. + storage = optuna.storages.get_storage(storage=...) + + # Instantiate the artifact store used for the study. + # Optuna does not provide the API that stores the used artifact store information, so + # please manage the information in the user side. + artifact_store = ... + + # Load study that contains the artifacts of interest. + study = optuna.load_study(study_name=..., storage=storage) + + # Fetch the best trial. + best_trial = study.best_trial + + # Fetch all the artifact meta connected to the best trial. + artifact_metas = optuna.artifacts.get_all_artifact_meta(best_trial, storage=storage) + + download_dir_path = "./best_trial_artifacts/" + os.makedirs(download_dir_path, exist_ok=True) + + for artifact_meta in artifact_metas: + download_file_path = os.path.join(download_dir_path, artifact_meta.filename) + # Download the artifacts to ``download_file_path``. + optuna.artifacts.download_artifact( + artifact_store=artifact_store, + artifact_id=artifact_meta.artifact_id, + file_path=download_file_path, + ) + + Returns: + The list of artifact meta in the trial or study. + Each artifact meta includes ``artifact_id``, ``filename``, ``mimetype``, and ``encoding``. + Note that if :class:`~optuna.study.Study` is provided, we return the information of the + artifacts uploaded to ``study``, but not to all the trials in the study. + """ + if isinstance(study_or_trial, Trial) and storage is None: + storage = study_or_trial.storage + elif isinstance(study_or_trial, Study) and storage is None: + storage = study_or_trial._storage + + if storage is None: + raise ValueError("storage is required for FrozenTrial.") + + if isinstance(study_or_trial, (Trial, FrozenTrial)): + system_attrs = storage.get_trial_system_attrs(study_or_trial._trial_id) + else: + system_attrs = storage.get_study_system_attrs(study_or_trial._study_id) + + artifact_meta_list: list[ArtifactMeta] = [] + for attr_key, attr_json_string in system_attrs.items(): + if not attr_key.startswith(ARTIFACTS_ATTR_PREFIX): + continue + + attr_content = json.loads(attr_json_string) + artifact_meta = ArtifactMeta( + artifact_id=attr_content["artifact_id"], + filename=attr_content["filename"], + mimetype=attr_content["mimetype"], + encoding=attr_content["encoding"], + ) + artifact_meta_list.append(artifact_meta) + + return artifact_meta_list diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_protocol.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..02cc08937ad2e132fd14199b64099afc42c59ea8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_protocol.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +try: + from typing import Protocol +except ImportError: + from typing_extensions import Protocol # type: ignore + + +if TYPE_CHECKING: + from typing import BinaryIO + + +class ArtifactStore(Protocol): + """A protocol defining the interface for an artifact backend. + + The methods defined in this protocol are not supposed to be directly called by library users. + + An artifact backend is responsible for managing the storage and retrieval + of artifact data. The backend should provide methods for opening, writing + and removing artifacts. + """ + + def open_reader(self, artifact_id: str) -> BinaryIO: + """Open the artifact identified by the artifact_id. + + This method should return a binary file-like object in read mode, similar to + ``open(..., mode="rb")``. If the artifact does not exist, an + :exc:`~optuna.artifacts.exceptions.ArtifactNotFound` exception + should be raised. + + Args: + artifact_id: The identifier of the artifact to open. + + Returns: + BinaryIO: A binary file-like object that can be read from. + """ + ... + + def write(self, artifact_id: str, content_body: BinaryIO) -> None: + """Save the content to the backend. + + Args: + artifact_id: The identifier of the artifact to write to. + content_body: The content to write to the artifact. + """ + ... + + def remove(self, artifact_id: str) -> None: + """Remove the artifact identified by the artifact_id. + + This method should delete the artifact from the backend. If the artifact does not + exist, an :exc:`~optuna.artifacts.exceptions.ArtifactNotFound` exception + may be raised. + + Args: + artifact_id: The identifier of the artifact to remove. + """ + ... diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_upload.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..04725ac082cde5e10f696c539786402d1aec4a67 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/_upload.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from dataclasses import asdict +from dataclasses import dataclass +import json +import mimetypes +import os +import uuid + +from optuna._convert_positional_args import convert_positional_args +from optuna.artifacts._protocol import ArtifactStore +from optuna.storages import BaseStorage +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import Trial + + +ARTIFACTS_ATTR_PREFIX = "artifacts:" +DEFAULT_MIME_TYPE = "application/octet-stream" + + +@dataclass +class ArtifactMeta: + """Meta information for an artifact. + + .. note:: + All the artifact meta linked to a study or trial can be listed by + :func:`~optuna.artifacts.get_all_artifact_meta`. + The artifact meta can be used for :func:`~optuna.artifacts.download_artifact`. + + Args: + artifact_id: + The identifier of the artifact. + filename: + The artifact file name used for the upload. + mimetype: + A MIME type of the artifact. + If not specified, the MIME type is guessed from the file extension. + encoding: + An encoding of the artifact, which is suitable for use as a Content-Encoding header, + e.g., gzip. If not specified, the encoding is guessed from the file extension. + """ + + artifact_id: str + filename: str + mimetype: str + encoding: str | None + + +@convert_positional_args( + previous_positional_arg_names=["study_or_trial", "file_path", "artifact_store"], + deprecated_version="4.0.0", + removed_version="6.0.0", +) +def upload_artifact( + *, + artifact_store: ArtifactStore, + file_path: str, + study_or_trial: Trial | FrozenTrial | Study, + storage: BaseStorage | None = None, + mimetype: str | None = None, + encoding: str | None = None, +) -> str: + """Upload an artifact to the artifact store. + + Args: + artifact_store: + An artifact store. + file_path: + A path to the file to be uploaded. + study_or_trial: + A :class:`~optuna.trial.Trial` object, a :class:`~optuna.trial.FrozenTrial`, or + a :class:`~optuna.study.Study` object. + storage: + A storage object. This argument is required only if ``study_or_trial`` is + :class:`~optuna.trial.FrozenTrial`. + mimetype: + A MIME type of the artifact. If not specified, the MIME type is guessed from the file + extension. + encoding: + An encoding of the artifact, which is suitable for use as a ``Content-Encoding`` + header (e.g. gzip). If not specified, the encoding is guessed from the file extension. + + Returns: + An artifact ID. + """ + + filename = os.path.basename(file_path) + + if isinstance(study_or_trial, Trial) and storage is None: + storage = study_or_trial.storage + elif isinstance(study_or_trial, Study) and storage is None: + storage = study_or_trial._storage + + if storage is None: + raise ValueError("storage is required for FrozenTrial.") + + artifact_id = str(uuid.uuid4()) + guess_mimetype, guess_encoding = mimetypes.guess_type(filename) + artifact = ArtifactMeta( + artifact_id=artifact_id, + filename=filename, + mimetype=mimetype or guess_mimetype or DEFAULT_MIME_TYPE, + encoding=encoding or guess_encoding, + ) + attr_key = ARTIFACTS_ATTR_PREFIX + artifact_id + if isinstance(study_or_trial, (Trial, FrozenTrial)): + trial_id = study_or_trial._trial_id + storage.set_trial_system_attr(trial_id, attr_key, json.dumps(asdict(artifact))) + else: + study_id = study_or_trial._study_id + storage.set_study_system_attr(study_id, attr_key, json.dumps(asdict(artifact))) + + with open(file_path, "rb") as f: + artifact_store.write(artifact_id, f) + return artifact_id diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/exceptions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..724afe74022414e985776c9eabb28e718c34e279 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/artifacts/exceptions.py @@ -0,0 +1,12 @@ +from optuna.exceptions import OptunaError + + +class ArtifactNotFound(OptunaError): + """Exception raised when an artifact is not found. + + It is typically raised while calling + :meth:`~optuna.artifacts._protocol.ArtifactStore.open_reader` or + :meth:`~optuna.artifacts._protocol.ArtifactStore.remove` methods. + """ + + ... diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/cli.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..3b87f210c22a6648a47ab5760cea915cf869b51d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/cli.py @@ -0,0 +1,1007 @@ +"""Optuna CLI module. +If you want to add a new command, you also need to update the constant `_COMMANDS` +""" + +from __future__ import annotations + +import argparse +from argparse import ArgumentParser +from argparse import Namespace +import datetime +from enum import Enum +import inspect +import json +import logging +import os +import sys +from typing import Any +import warnings + +import sqlalchemy.exc +import yaml + +import optuna +from optuna._imports import _LazyImport +from optuna.exceptions import CLIUsageError +from optuna.exceptions import ExperimentalWarning +from optuna.storages import BaseStorage +from optuna.storages import JournalFileStorage +from optuna.storages import JournalRedisStorage +from optuna.storages import JournalStorage +from optuna.storages import RDBStorage +from optuna.storages.journal import JournalFileBackend +from optuna.storages.journal import JournalRedisBackend +from optuna.trial import TrialState + + +_dataframe = _LazyImport("optuna.study._dataframe") + +_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" + + +def _check_storage_url(storage_url: str | None) -> str: + if storage_url is not None: + return storage_url + + env_storage = os.environ.get("OPTUNA_STORAGE") + if env_storage is not None: + warnings.warn( + "Specifying the storage url via 'OPTUNA_STORAGE' environment variable" + " is an experimental feature. The interface can change in the future.", + ExperimentalWarning, + ) + return env_storage + raise CLIUsageError("Storage URL is not specified.") + + +def _get_storage(storage_url: str | None, storage_class: str | None) -> BaseStorage: + storage_url = _check_storage_url(storage_url) + if storage_class: + if storage_class == JournalRedisBackend.__name__: + return JournalStorage(JournalRedisBackend(storage_url)) + if storage_class == JournalRedisStorage.__name__: + return JournalStorage(JournalRedisStorage(storage_url)) + if storage_class == JournalFileBackend.__name__: + return JournalStorage(JournalFileBackend(storage_url)) + if storage_class == JournalFileStorage.__name__: + return JournalStorage(JournalFileStorage(storage_url)) + if storage_class == RDBStorage.__name__: + return RDBStorage(storage_url) + raise CLIUsageError("Unsupported storage class") + + if storage_url.startswith("redis"): + return JournalStorage(JournalRedisBackend(storage_url)) + if os.path.isfile(storage_url): + return JournalStorage(JournalFileBackend(storage_url)) + try: + return RDBStorage(storage_url) + except sqlalchemy.exc.ArgumentError: + raise CLIUsageError("Failed to guess storage class from storage_url") + + +def _format_value(value: Any) -> Any: + # Format value that can be serialized to JSON or YAML. + if value is None or isinstance(value, (int, float)): + return value + elif isinstance(value, datetime.datetime): + return value.strftime(_DATETIME_FORMAT) + elif isinstance(value, list): + return list(_format_value(v) for v in value) + elif isinstance(value, tuple): + return tuple(_format_value(v) for v in value) + elif isinstance(value, dict): + return {_format_value(k): _format_value(v) for k, v in value.items()} + else: + return str(value) + + +def _convert_to_dict( + records: list[dict[tuple[str, str], Any]], columns: list[tuple[str, str]], flatten: bool +) -> tuple[list[dict[str, Any]], list[str]]: + header = [] + ret = [] + if flatten: + for column in columns: + if column[1] != "": + header.append(f"{column[0]}_{column[1]}") + elif any(isinstance(record.get(column), (list, tuple)) for record in records): + max_length = 0 + for record in records: + if column in record: + max_length = max(max_length, len(record[column])) + for i in range(max_length): + header.append(f"{column[0]}_{i}") + else: + header.append(column[0]) + for record in records: + row = {} + for column in columns: + if column not in record: + continue + value = _format_value(record[column]) + if column[1] != "": + row[f"{column[0]}_{column[1]}"] = value + elif any(isinstance(record.get(column), (list, tuple)) for record in records): + for i, v in enumerate(value): + row[f"{column[0]}_{i}"] = v + else: + row[f"{column[0]}"] = value + ret.append(row) + else: + for column in columns: + if column[0] not in header: + header.append(column[0]) + for record in records: + attrs: dict[str, Any] = {column_name: {} for column_name in header} + for column in columns: + if column not in record: + continue + value = _format_value(record[column]) + if isinstance(column[1], int): + # Reconstruct list of values. `_dataframe._create_records_and_aggregate_column` + # returns indices of list as the second key of column. + if attrs[column[0]] == {}: + attrs[column[0]] = [] + attrs[column[0]] += [None] * max(column[1] + 1 - len(attrs[column[0]]), 0) + attrs[column[0]][column[1]] = value + elif column[1] != "": + attrs[column[0]][column[1]] = value + else: + attrs[column[0]] = value + ret.append(attrs) + + return ret, header + + +class ValueType(Enum): + NONE = 0 + NUMERIC = 1 + STRING = 2 + + +class CellValue: + def __init__(self, value: Any) -> None: + self.value = value + if value is None: + self.value_type = ValueType.NONE + elif isinstance(value, (int, float)): + self.value_type = ValueType.NUMERIC + else: + self.value_type = ValueType.STRING + + def __str__(self) -> str: + if isinstance(self.value, datetime.datetime): + return self.value.strftime(_DATETIME_FORMAT) + else: + return str(self.value) + + def width(self) -> int: + return len(str(self.value)) + + def get_string(self, value_type: ValueType, width: int) -> str: + value = str(self.value) + if self.value is None: + return " " * width + elif value_type == ValueType.NUMERIC: + return f"{value:>{width}}" + else: + return f"{value:<{width}}" + + +def _dump_value(records: list[dict[str, Any]], header: list[str]) -> str: + values = [] + for record in records: + row = [] + for column_name in header: + # Below follows the table formatting convention where record[column_name] is treated as + # an empty string if record[column_name] is None. e.g., {"a": None} is replaced with + # {"a": ""} + row.append(str(record[column_name]) if record.get(column_name) is not None else "") + values.append(" ".join(row)) + return "\n".join(values) + + +def _dump_table(records: list[dict[str, Any]], header: list[str]) -> str: + rows = [] + for record in records: + row = [] + for column_name in header: + row.append(CellValue(record.get(column_name))) + rows.append(row) + + separator = "+" + header_string = "|" + rows_string = ["|" for _ in rows] + for column in range(len(header)): + value_types = [row[column].value_type for row in rows] + value_type = ValueType.NUMERIC + for t in value_types: + if t == ValueType.STRING: + value_type = ValueType.STRING + if len(rows) == 0: + max_width = len(header[column]) + else: + max_width = max(len(header[column]), max(row[column].width() for row in rows)) + separator += "-" * (max_width + 2) + "+" + if value_type == ValueType.NUMERIC: + header_string += f" {header[column]:>{max_width}} |" + else: + header_string += f" {header[column]:<{max_width}} |" + for i, row in enumerate(rows): + rows_string[i] += " " + row[column].get_string(value_type, max_width) + " |" + + ret = "" + ret += separator + "\n" + ret += header_string + "\n" + ret += separator + "\n" + for row_string in rows_string: + ret += row_string + "\n" + ret += separator + "\n" + + return ret + + +def _format_output( + records: list[dict[tuple[str, str], Any]] | dict[tuple[str, str], Any], + columns: list[tuple[str, str]], + output_format: str, + flatten: bool, +) -> str: + if isinstance(records, list): + values, header = _convert_to_dict(records, columns, flatten) + else: + values, header = _convert_to_dict([records], columns, flatten) + + if output_format == "value": + return _dump_value(values, header).strip() + elif output_format == "table": + return _dump_table(values, header).strip() + elif output_format == "json": + if isinstance(records, list): + return json.dumps(values).strip() + else: + return json.dumps(values[0]).strip() + elif output_format == "yaml": + if isinstance(records, list): + return yaml.safe_dump(values).strip() + else: + return yaml.safe_dump(values[0]).strip() + else: + raise CLIUsageError(f"Optuna CLI does not supported the {output_format} format.") + + +class _BaseCommand: + """Base class for commands. + + Note that command classes are not intended to be called by library users. + They are exclusively used within this file to manage Optuna CLI commands. + """ + + def __init__(self) -> None: + self.logger = optuna.logging.get_logger(__name__) + + def add_arguments(self, parser: ArgumentParser) -> None: + """Add arguments required for each command. + + Args: + parser: + `ArgumentParser` object to add arguments + """ + pass + + def take_action(self, parsed_args: Namespace) -> int: + """Define action if the command is called. + + Args: + parsed_args: + `Namespace` object including arguments specified by user. + + Returns: + Running status of the action. + 0 if this method finishes normally, otherwise 1. + """ + + raise NotImplementedError + + +class _CreateStudy(_BaseCommand): + """Create a new study.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--study-name", + default=None, + help="A human-readable name of a study to distinguish it from others.", + ) + parser.add_argument( + "--direction", + default=None, + type=str, + choices=("minimize", "maximize"), + help="Set direction of optimization to a new study. Set 'minimize' " + "for minimization and 'maximize' for maximization.", + ) + parser.add_argument( + "--skip-if-exists", + default=False, + action="store_true", + help="If specified, the creation of the study is skipped " + "without any error when the study name is duplicated.", + ) + parser.add_argument( + "--directions", + type=str, + default=None, + choices=("minimize", "maximize"), + help="Set directions of optimization to a new study." + " Put whitespace between directions. Each direction should be" + ' either "minimize" or "maximize".', + nargs="+", + ) + + def take_action(self, parsed_args: Namespace) -> int: + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + study_name = optuna.create_study( + storage=storage, + study_name=parsed_args.study_name, + direction=parsed_args.direction, + directions=parsed_args.directions, + load_if_exists=parsed_args.skip_if_exists, + ).study_name + print(study_name) + return 0 + + +class _DeleteStudy(_BaseCommand): + """Delete a specified study.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument("--study-name", default=None, help="The name of the study to delete.") + + def take_action(self, parsed_args: Namespace) -> int: + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + study_id = storage.get_study_id_from_name(parsed_args.study_name) + storage.delete_study(study_id) + return 0 + + +class _StudySetUserAttribute(_BaseCommand): + """Set a user attribute to a study.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--study-name", + required=True, + help="The name of the study to set the user attribute to.", + ) + parser.add_argument("--key", "-k", required=True, help="Key of the user attribute.") + parser.add_argument("--value", required=True, help="Value to be set.") + + def take_action(self, parsed_args: Namespace) -> int: + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + + study = optuna.load_study(storage=storage, study_name=parsed_args.study_name) + + study.set_user_attr(parsed_args.key, parsed_args.value) + + self.logger.info("Attribute successfully written.") + return 0 + + +class _StudyNames(_BaseCommand): + """Get all study names stored in a specified storage""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="value", + help="Output format.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + all_study_names = optuna.get_all_study_names(storage) + records = [] + record_key = ("name", "") + for study_name in all_study_names: + records.append({record_key: study_name}) + print(_format_output(records, [record_key], parsed_args.format, flatten=False)) + return 0 + + +class _Studies(_BaseCommand): + """Show a list of studies.""" + + _study_list_header = [ + ("name", ""), + ("direction", ""), + ("n_trials", ""), + ("datetime_start", ""), + ] + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="table", + help="Output format.", + ) + parser.add_argument( + "--flatten", + default=False, + action="store_true", + help="Flatten nested columns such as directions.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + summaries = optuna.get_all_study_summaries(storage, include_best_trial=False) + + records = [] + for s in summaries: + start = ( + s.datetime_start.strftime(_DATETIME_FORMAT) + if s.datetime_start is not None + else None + ) + record: dict[tuple[str, str], Any] = {} + record[("name", "")] = s.study_name + record[("direction", "")] = tuple(d.name for d in s.directions) + record[("n_trials", "")] = s.n_trials + record[("datetime_start", "")] = start + record[("user_attrs", "")] = s.user_attrs + records.append(record) + + if any(r[("user_attrs", "")] != {} for r in records): + self._study_list_header.append(("user_attrs", "")) + print( + _format_output( + records, self._study_list_header, parsed_args.format, parsed_args.flatten + ) + ) + return 0 + + +class _Trials(_BaseCommand): + """Show a list of trials.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--study-name", + type=str, + required=True, + help="The name of the study which includes trials.", + ) + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="table", + help="Output format.", + ) + parser.add_argument( + "--flatten", + default=False, + action="store_true", + help="Flatten nested columns such as params and user_attrs.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + warnings.warn( + "'trials' is an experimental CLI command. The interface can change in the future.", + ExperimentalWarning, + ) + + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + study = optuna.load_study(storage=storage, study_name=parsed_args.study_name) + attrs = ( + "number", + "value" if not study._is_multi_objective() else "values", + "datetime_start", + "datetime_complete", + "duration", + "params", + "user_attrs", + "state", + ) + + records, columns = _dataframe._create_records_and_aggregate_column(study, attrs) + print(_format_output(records, columns, parsed_args.format, parsed_args.flatten)) + + return 0 + + +class _BestTrial(_BaseCommand): + """Show the best trial.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--study-name", + type=str, + required=True, + help="The name of the study to get the best trial.", + ) + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="table", + help="Output format.", + ) + parser.add_argument( + "--flatten", + default=False, + action="store_true", + help="Flatten nested columns such as params and user_attrs.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + warnings.warn( + "'best-trial' is an experimental CLI command. The interface can change in the future.", + ExperimentalWarning, + ) + + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + study = optuna.load_study(storage=storage, study_name=parsed_args.study_name) + attrs = ( + "number", + "value" if not study._is_multi_objective() else "values", + "datetime_start", + "datetime_complete", + "duration", + "params", + "user_attrs", + "state", + ) + + records, columns = _dataframe._create_records_and_aggregate_column(study, attrs) + print( + _format_output( + records[study.best_trial.number], columns, parsed_args.format, parsed_args.flatten + ) + ) + return 0 + + +class _BestTrials(_BaseCommand): + """Show a list of trials located at the Pareto front.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--study-name", + type=str, + required=True, + help="The name of the study to get the best trials (trials at the Pareto front).", + ) + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="table", + help="Output format.", + ) + parser.add_argument( + "--flatten", + default=False, + action="store_true", + help="Flatten nested columns such as params and user_attrs.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + warnings.warn( + "'best-trials' is an experimental CLI command. The interface can change in the " + "future.", + ExperimentalWarning, + ) + + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + study = optuna.load_study(storage=storage, study_name=parsed_args.study_name) + best_trials = [trial.number for trial in study.best_trials] + attrs = ( + "number", + "value" if not study._is_multi_objective() else "values", + "datetime_start", + "datetime_complete", + "duration", + "params", + "user_attrs", + "state", + ) + + records, columns = _dataframe._create_records_and_aggregate_column(study, attrs) + best_records = list(filter(lambda record: record[("number", "")] in best_trials, records)) + print(_format_output(best_records, columns, parsed_args.format, parsed_args.flatten)) + return 0 + + +class _StorageUpgrade(_BaseCommand): + """Upgrade the schema of an RDB storage.""" + + def take_action(self, parsed_args: Namespace) -> int: + storage_url = _check_storage_url(parsed_args.storage) + try: + storage = RDBStorage( + storage_url, skip_compatibility_check=True, skip_table_creation=True + ) + except sqlalchemy.exc.ArgumentError: + self.logger.error("Invalid RDBStorage URL.") + return 1 + current_version = storage.get_current_version() + head_version = storage.get_head_version() + known_versions = storage.get_all_versions() + if current_version == head_version: + self.logger.info("This storage is up-to-date.") + elif current_version in known_versions: + self.logger.info("Upgrading the storage schema to the latest version.") + storage.upgrade() + self.logger.info("Completed to upgrade the storage.") + else: + warnings.warn( + "Your optuna version seems outdated against the storage version. " + "Please try updating optuna to the latest version by " + "`$ pip install -U optuna`." + ) + return 0 + + +class _Ask(_BaseCommand): + """Create a new trial and suggest parameters.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument("--study-name", type=str, help="Name of study.") + parser.add_argument("--sampler", type=str, help="Class name of sampler object to create.") + parser.add_argument( + "--sampler-kwargs", + type=str, + help="Sampler object initialization keyword arguments as JSON.", + ) + parser.add_argument( + "--search-space", + type=str, + help=( + "Search space as JSON. Keys are names and values are outputs from " + ":func:`~optuna.distributions.distribution_to_json`." + ), + ) + parser.add_argument( + "-f", + "--format", + type=str, + choices=("value", "json", "table", "yaml"), + default="json", + help="Output format.", + ) + parser.add_argument( + "--flatten", + default=False, + action="store_true", + help="Flatten nested columns such as params.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + warnings.warn( + "'ask' is an experimental CLI command. The interface can change in the future.", + ExperimentalWarning, + ) + + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + + create_study_kwargs = { + "storage": storage, + "study_name": parsed_args.study_name, + "load_if_exists": True, + } + + if parsed_args.sampler is not None: + if parsed_args.sampler_kwargs is not None: + sampler_kwargs = json.loads(parsed_args.sampler_kwargs) + else: + sampler_kwargs = {} + sampler_cls = getattr(optuna.samplers, parsed_args.sampler) + sampler = sampler_cls(**sampler_kwargs) + create_study_kwargs["sampler"] = sampler + else: + if parsed_args.sampler_kwargs is not None: + raise ValueError( + "`--sampler_kwargs` is set without `--sampler`. Please specify `--sampler` as" + " well or omit `--sampler-kwargs`." + ) + + if parsed_args.search_space is not None: + # The search space is expected to be a JSON serialized string, e.g. + # '{"x": {"name": "FloatDistribution", "attributes": {"low": 0.0, "high": 1.0}}, + # "y": ...}'. + search_space = { + name: optuna.distributions.json_to_distribution(json.dumps(dist)) + for name, dist in json.loads(parsed_args.search_space).items() + } + else: + search_space = {} + + try: + study = optuna.load_study( + study_name=create_study_kwargs["study_name"], + storage=create_study_kwargs["storage"], + sampler=create_study_kwargs.get("sampler"), + ) + + except KeyError: + raise KeyError( + "Implicit study creation within the 'ask' command was dropped in Optuna v4.0.0. " + "Please use the 'create-study' command beforehand." + ) + trial = study.ask(fixed_distributions=search_space) + + self.logger.info(f"Asked trial {trial.number} with parameters {trial.params}.") + + record: dict[tuple[str, str], Any] = {("number", ""): trial.number} + columns = [("number", "")] + + if len(trial.params) == 0 and not parsed_args.flatten: + record[("params", "")] = {} + columns.append(("params", "")) + else: + for param_name, param_value in trial.params.items(): + record[("params", param_name)] = param_value + columns.append(("params", param_name)) + + print(_format_output(record, columns, parsed_args.format, parsed_args.flatten)) + return 0 + + +class _Tell(_BaseCommand): + """Finish a trial, which was created by the ask command.""" + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument("--study-name", type=str, help="Name of study.") + parser.add_argument("--trial-number", type=int, help="Trial number.") + parser.add_argument("--values", type=float, nargs="+", help="Objective values.") + parser.add_argument( + "--state", + type=str, + help="Trial state.", + choices=("complete", "pruned", "fail"), + ) + parser.add_argument( + "--skip-if-finished", + default=False, + action="store_true", + help="If specified, tell is skipped without any error when the trial is already " + "finished.", + ) + + def take_action(self, parsed_args: Namespace) -> int: + warnings.warn( + "'tell' is an experimental CLI command. The interface can change in the future.", + ExperimentalWarning, + ) + + storage = _get_storage(parsed_args.storage, parsed_args.storage_class) + + study = optuna.load_study( + storage=storage, + study_name=parsed_args.study_name, + ) + + if parsed_args.state is not None: + state: TrialState | None = TrialState[parsed_args.state.upper()] + else: + state = None + + trial_number = parsed_args.trial_number + values = parsed_args.values + + study.tell( + trial=trial_number, + values=values, + state=state, + skip_if_finished=parsed_args.skip_if_finished, + ) + + self.logger.info(f"Told trial {trial_number} with values {values} and state {state}.") + + return 0 + + +_COMMANDS: dict[str, type[_BaseCommand]] = { + "create-study": _CreateStudy, + "delete-study": _DeleteStudy, + "study set-user-attr": _StudySetUserAttribute, + "study-names": _StudyNames, + "studies": _Studies, + "trials": _Trials, + "best-trial": _BestTrial, + "best-trials": _BestTrials, + "storage upgrade": _StorageUpgrade, + "ask": _Ask, + "tell": _Tell, +} + + +def _parse_storage_class_without_suggesting_deprecated_choices(value: str) -> str: + choices = [ + RDBStorage.__name__, + JournalFileBackend.__name__, + JournalRedisBackend.__name__, + ] + deprecated_choices = [ + JournalFileStorage.__name__, + JournalRedisStorage.__name__, + ] + if value in choices + deprecated_choices: + return value + raise argparse.ArgumentTypeError( + f"Invalid choice: {value} (choose from {str(choices)[1:-1]})" + ) + + +def _add_common_arguments(parser: ArgumentParser) -> ArgumentParser: + parser.add_argument( + "--storage", + default=None, + help=( + "DB URL. (e.g. sqlite:///example.db) " + "Also can be specified via OPTUNA_STORAGE environment variable." + ), + ) + parser.add_argument( + "--storage-class", + help="Storage class hint (e.g. JournalFileBackend)", + default=None, + type=_parse_storage_class_without_suggesting_deprecated_choices, + ) + verbose_group = parser.add_mutually_exclusive_group() + verbose_group.add_argument( + "-v", + "--verbose", + action="count", + dest="verbose_level", + default=1, + help="Increase verbosity of output. Can be repeated.", + ) + verbose_group.add_argument( + "-q", + "--quiet", + action="store_const", + dest="verbose_level", + const=0, + help="Suppress output except warnings and errors.", + ) + parser.add_argument( + "--log-file", + action="store", + default=None, + help="Specify a file to log output. Disabled by default.", + ) + parser.add_argument( + "--debug", + default=False, + action="store_true", + help="Show tracebacks on errors.", + ) + return parser + + +def _add_commands( + main_parser: ArgumentParser, parent_parser: ArgumentParser +) -> dict[str, ArgumentParser]: + subparsers = main_parser.add_subparsers() + command_name_to_subparser = {} + + for command_name, command_type in _COMMANDS.items(): + command = command_type() + subparser = subparsers.add_parser( + command_name, parents=[parent_parser], help=inspect.getdoc(command_type) + ) + command.add_arguments(subparser) + subparser.set_defaults(handler=command.take_action) + command_name_to_subparser[command_name] = subparser + + def _print_help(args: Namespace) -> None: + main_parser.print_help() + + subparsers.add_parser("help", help="Show help message and exit.").set_defaults( + handler=_print_help + ) + return command_name_to_subparser + + +def _get_parser(description: str = "") -> tuple[ArgumentParser, dict[str, ArgumentParser]]: + # Use `parent_parser` is necessary to avoid namespace conflict for -h/--help + # between `main_parser` and `subparser`. + parent_parser = ArgumentParser(add_help=False) + parent_parser = _add_common_arguments(parent_parser) + + main_parser = ArgumentParser(description=description, parents=[parent_parser]) + main_parser.add_argument( + "--version", action="version", version="{0} {1}".format("optuna", optuna.__version__) + ) + command_name_to_subparser = _add_commands(main_parser, parent_parser) + return main_parser, command_name_to_subparser + + +def _preprocess_argv(argv: list[str]) -> list[str]: + # Some preprocess is necessary for argv because some subcommand includes space + # (e.g. optuna storage upgrade). + argv = argv[1:] if len(argv) > 1 else ["help"] + + for i in range(len(argv)): + for j in range(i, i + 2): # Commands consist of one or two words. + command_candidate = " ".join(argv[i : j + 1]) + if command_candidate in _COMMANDS: + options = argv[:i] + argv[j + 1 :] + return [command_candidate] + options + + # No subcommand is found. + return argv + + +def _set_verbosity(args: Namespace) -> None: + root_logger = logging.getLogger() + root_logger.setLevel(logging.DEBUG) + stream_handler = logging.StreamHandler(sys.stderr) + + logging_level = { + 0: logging.WARNING, + 1: logging.INFO, + 2: logging.DEBUG, + }.get(args.verbose_level, logging.DEBUG) + + stream_handler.setLevel(logging_level) + stream_handler.setFormatter(optuna.logging.create_default_formatter()) + root_logger.addHandler(stream_handler) + + optuna.logging.set_verbosity(logging_level) + + +def _set_log_file(args: Namespace) -> None: + if args.log_file is None: + return + + root_logger = logging.getLogger() + root_logger.setLevel(logging.DEBUG) + + file_handler = logging.FileHandler( + filename=args.log_file, + ) + file_handler.setFormatter(optuna.logging.create_default_formatter()) + root_logger.addHandler(file_handler) + + +def main() -> int: + main_parser, command_name_to_subparser = _get_parser() + + argv = sys.argv + preprocessed_argv = _preprocess_argv(argv) + args = main_parser.parse_args(preprocessed_argv) + + _set_verbosity(args) + _set_log_file(args) + + logger = logging.getLogger("optuna") + try: + return args.handler(args) + except CLIUsageError as e: + if args.debug: + logger.exception(e) + else: + logger.error(e) + # This code is required to show help for each subcommand. + # NOTE: the first element of `preprocessed_argv` is command name. + command_name_to_subparser[preprocessed_argv[0]].print_help() + return 1 + except AttributeError: + # Exception for the case -v/--verbose/-q/--quiet/--log-file/--debug + # without any subcommand. + argv_str = " ".join(argv[1:]) + logger.error(f"'{argv_str}' is not an optuna command. see 'optuna --help'") + main_parser.print_help() + return 1 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/distributions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9984f20e61eec0fe63654a42745f71e535b118 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/distributions.py @@ -0,0 +1,792 @@ +from __future__ import annotations + +import abc +import copy +import decimal +import json +import math +from numbers import Real +from typing import Any +from typing import cast +from typing import TYPE_CHECKING +from typing import Union +import warnings + +from optuna._deprecated import deprecated_class + + +if TYPE_CHECKING: + from collections.abc import Sequence + + +CategoricalChoiceType = Union[None, bool, int, float, str] + + +_float_distribution_deprecated_msg = ( + "Use :class:`~optuna.distributions.FloatDistribution` instead." +) +_int_distribution_deprecated_msg = "Use :class:`~optuna.distributions.IntDistribution` instead." + + +class BaseDistribution(abc.ABC): + """Base class for distributions. + + Note that distribution classes are not supposed to be called by library users. + They are used by :class:`~optuna.trial.Trial` and :class:`~optuna.samplers` internally. + """ + + def to_external_repr(self, param_value_in_internal_repr: float) -> Any: + """Convert internal representation of a parameter value into external representation. + + Args: + param_value_in_internal_repr: + Optuna's internal representation of a parameter value. + + Returns: + Optuna's external representation of a parameter value. + """ + + return param_value_in_internal_repr + + @abc.abstractmethod + def to_internal_repr(self, param_value_in_external_repr: Any) -> float: + """Convert external representation of a parameter value into internal representation. + + Args: + param_value_in_external_repr: + Optuna's external representation of a parameter value. + + Returns: + Optuna's internal representation of a parameter value. + """ + + raise NotImplementedError + + @abc.abstractmethod + def single(self) -> bool: + """Test whether the range of this distribution contains just a single value. + + Returns: + :obj:`True` if the range of this distribution contains just a single value, + otherwise :obj:`False`. + """ + + raise NotImplementedError + + @abc.abstractmethod + def _contains(self, param_value_in_internal_repr: float) -> bool: + """Test if a parameter value is contained in the range of this distribution. + + Args: + param_value_in_internal_repr: + Optuna's internal representation of a parameter value. + + Returns: + :obj:`True` if the parameter value is contained in the range of this distribution, + otherwise :obj:`False`. + """ + + raise NotImplementedError + + def _asdict(self) -> dict: + return self.__dict__ + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, BaseDistribution): + return NotImplemented + if type(self) is not type(other): + return False + return self.__dict__ == other.__dict__ + + def __hash__(self) -> int: + return hash((self.__class__,) + tuple(sorted(self.__dict__.items()))) + + def __repr__(self) -> str: + kwargs = ", ".join("{}={}".format(k, v) for k, v in sorted(self._asdict().items())) + return "{}({})".format(self.__class__.__name__, kwargs) + + +class FloatDistribution(BaseDistribution): + """A distribution on floats. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_float`, and passed to + :mod:`~optuna.samplers` in general. + + .. note:: + When ``step`` is not :obj:`None`, if the range :math:`[\\mathsf{low}, \\mathsf{high}]` + is not divisible by :math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced + with the maximum of :math:`k \\times \\mathsf{step} + \\mathsf{low} < \\mathsf{high}`, + where :math:`k` is an integer. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`, + ``low`` must be larger than 0. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + log: + If ``log`` is :obj:`True`, this distribution is in log-scaled domain. + In this case, all parameters enqueued to the distribution must be positive values. + This parameter must be :obj:`False` when the parameter ``step`` is not :obj:`None`. + step: + A discretization step. ``step`` must be larger than 0. + This parameter must be :obj:`None` when the parameter ``log`` is :obj:`True`. + + """ + + def __init__( + self, low: float, high: float, log: bool = False, step: None | float = None + ) -> None: + if log and step is not None: + raise ValueError("The parameter `step` is not supported when `log` is true.") + + if low > high: + raise ValueError( + "The `low` value must be smaller than or equal to the `high` value " + "(low={}, high={}).".format(low, high) + ) + + if log and low <= 0.0: + raise ValueError( + "The `low` value must be larger than 0 for a log distribution " + "(low={}, high={}).".format(low, high) + ) + + if step is not None and step <= 0: + raise ValueError( + "The `step` value must be non-zero positive value, " "but step={}.".format(step) + ) + + self.step = None + if step is not None: + high = _adjust_discrete_uniform_high(low, high, step) + self.step = float(step) + + self.low = float(low) + self.high = float(high) + self.log = log + + def single(self) -> bool: + if self.step is None: + return self.low == self.high + else: + if self.low == self.high: + return True + high = decimal.Decimal(str(self.high)) + low = decimal.Decimal(str(self.low)) + step = decimal.Decimal(str(self.step)) + return (high - low) < step + + def _contains(self, param_value_in_internal_repr: float) -> bool: + value = param_value_in_internal_repr + if self.step is None: + return self.low <= value <= self.high + else: + k = (value - self.low) / self.step + return self.low <= value <= self.high and abs(k - round(k)) < 1.0e-8 + + def to_internal_repr(self, param_value_in_external_repr: float) -> float: + try: + internal_repr = float(param_value_in_external_repr) + except (ValueError, TypeError) as e: + raise ValueError( + f"'{param_value_in_external_repr}' is not a valid type. " + "float-castable value is expected." + ) from e + + if math.isnan(internal_repr): + raise ValueError(f"`{param_value_in_external_repr}` is invalid value.") + if self.log and internal_repr <= 0.0: + raise ValueError( + f"`{param_value_in_external_repr}` is invalid value for the case log=True." + ) + return internal_repr + + +@deprecated_class("3.0.0", "6.0.0", text=_float_distribution_deprecated_msg) +class UniformDistribution(FloatDistribution): + """A uniform distribution in the linear domain. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_float`, and passed to + :mod:`~optuna.samplers` in general. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + + """ + + def __init__(self, low: float, high: float) -> None: + super().__init__(low=low, high=high, log=False, step=None) + + def _asdict(self) -> dict: + d = copy.deepcopy(self.__dict__) + d.pop("log") + d.pop("step") + return d + + +@deprecated_class("3.0.0", "6.0.0", text=_float_distribution_deprecated_msg) +class LogUniformDistribution(FloatDistribution): + """A uniform distribution in the log domain. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_float` with ``log=True``, + and passed to :mod:`~optuna.samplers` in general. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be larger than 0. ``low`` must be less than or equal to ``high``. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + + """ + + def __init__(self, low: float, high: float) -> None: + super().__init__(low=low, high=high, log=True, step=None) + + def _asdict(self) -> dict: + d = copy.deepcopy(self.__dict__) + d.pop("log") + d.pop("step") + return d + + +@deprecated_class("3.0.0", "6.0.0", text=_float_distribution_deprecated_msg) +class DiscreteUniformDistribution(FloatDistribution): + """A discretized uniform distribution in the linear domain. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_float` with ``step`` + argument, and passed to :mod:`~optuna.samplers` in general. + + .. note:: + If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by :math:`q`, + :math:`\\mathsf{high}` will be replaced with the maximum of :math:`k q + \\mathsf{low} + < \\mathsf{high}`, where :math:`k` is an integer. + + Args: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + q: + A discretization step. ``q`` must be larger than 0. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + + """ + + def __init__(self, low: float, high: float, q: float) -> None: + super().__init__(low=low, high=high, step=q) + + def _asdict(self) -> dict: + d = copy.deepcopy(self.__dict__) + d.pop("log") + + step = d.pop("step") + d["q"] = step + return d + + @property + def q(self) -> float: + """Discretization step. + + :class:`~optuna.distributions.DiscreteUniformDistribution` is a subtype of + :class:`~optuna.distributions.FloatDistribution`. + This property is a proxy for its ``step`` attribute. + """ + return cast("float", self.step) + + @q.setter + def q(self, v: float) -> None: + self.step = v + + +class IntDistribution(BaseDistribution): + """A distribution on integers. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to + :mod:`~optuna.samplers` in general. + + .. note:: + When ``step`` is not :obj:`None`, if the range :math:`[\\mathsf{low}, \\mathsf{high}]` + is not divisible by :math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced + with the maximum of :math:`k \\times \\mathsf{step} + \\mathsf{low} < \\mathsf{high}`, + where :math:`k` is an integer. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`, + ``low`` must be larger than or equal to 1. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + log: + If ``log`` is :obj:`True`, this distribution is in log-scaled domain. + In this case, all parameters enqueued to the distribution must be positive values. + This parameter must be :obj:`False` when the parameter ``step`` is not 1. + step: + A discretization step. ``step`` must be a positive integer. This parameter must be 1 + when the parameter ``log`` is :obj:`True`. + + """ + + def __init__(self, low: int, high: int, log: bool = False, step: int = 1) -> None: + if log and step != 1: + raise ValueError( + "Samplers and other components in Optuna only accept step is 1 " + "when `log` argument is True." + ) + + if low > high: + raise ValueError( + "The `low` value must be smaller than or equal to the `high` value " + "(low={}, high={}).".format(low, high) + ) + + if log and low < 1: + raise ValueError( + "The `low` value must be equal to or greater than 1 for a log distribution " + "(low={}, high={}).".format(low, high) + ) + + if step <= 0: + raise ValueError( + "The `step` value must be non-zero positive value, but step={}.".format(step) + ) + + self.log = log + self.step = int(step) + self.low = int(low) + high = int(high) + self.high = _adjust_int_uniform_high(self.low, high, self.step) + + def to_external_repr(self, param_value_in_internal_repr: float) -> int: + return int(param_value_in_internal_repr) + + def to_internal_repr(self, param_value_in_external_repr: int) -> float: + try: + internal_repr = float(param_value_in_external_repr) + except (ValueError, TypeError) as e: + raise ValueError( + f"'{param_value_in_external_repr}' is not a valid type. " + "float-castable value is expected." + ) from e + + if math.isnan(internal_repr): + raise ValueError(f"`{param_value_in_external_repr}` is invalid value.") + if self.log and internal_repr <= 0.0: + raise ValueError( + f"`{param_value_in_external_repr}` is invalid value for the case log=True." + ) + return internal_repr + + def single(self) -> bool: + if self.log: + return self.low == self.high + + if self.low == self.high: + return True + return (self.high - self.low) < self.step + + def _contains(self, param_value_in_internal_repr: float) -> bool: + value = param_value_in_internal_repr + return self.low <= value <= self.high and (value - self.low) % self.step == 0 + + +@deprecated_class("3.0.0", "6.0.0", text=_int_distribution_deprecated_msg) +class IntUniformDistribution(IntDistribution): + """A uniform distribution on integers. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to + :mod:`~optuna.samplers` in general. + + .. note:: + If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by + :math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced with the maximum of + :math:`k \\times \\mathsf{step} + \\mathsf{low} < \\mathsf{high}`, where :math:`k` is + an integer. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + step: + A discretization step. ``step`` must be a positive integer. + + """ + + def __init__(self, low: int, high: int, step: int = 1) -> None: + super().__init__(low=low, high=high, log=False, step=step) + + def _asdict(self) -> dict: + d = copy.deepcopy(self.__dict__) + d.pop("log") + return d + + +@deprecated_class("3.0.0", "6.0.0", text=_int_distribution_deprecated_msg) +class IntLogUniformDistribution(IntDistribution): + """A uniform distribution on integers in the log domain. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to + :mod:`~optuna.samplers` in general. + + Attributes: + low: + Lower endpoint of the range of the distribution. ``low`` is included in the range + and must be larger than or equal to 1. ``low`` must be less than or equal to ``high``. + high: + Upper endpoint of the range of the distribution. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + step: + A discretization step. ``step`` must be a positive integer. + + """ + + def __init__(self, low: int, high: int, step: int = 1) -> None: + super().__init__(low=low, high=high, log=True, step=step) + + def _asdict(self) -> dict: + d = copy.deepcopy(self.__dict__) + d.pop("log") + return d + + +def _categorical_choice_equal( + value1: CategoricalChoiceType, value2: CategoricalChoiceType +) -> bool: + """A function to check two choices equal considering NaN. + + This function can handle NaNs like np.float32("nan") other than float. + """ + + value1_is_nan = isinstance(value1, Real) and math.isnan(float(value1)) + value2_is_nan = isinstance(value2, Real) and math.isnan(float(value2)) + return (value1 == value2) or (value1_is_nan and value2_is_nan) + + +class CategoricalDistribution(BaseDistribution): + """A categorical distribution. + + This object is instantiated by :func:`~optuna.trial.Trial.suggest_categorical`, and + passed to :mod:`~optuna.samplers` in general. + + Args: + choices: + Parameter value candidates. ``choices`` must have one element at least. + + .. note:: + + Not all types are guaranteed to be compatible with all storages. It is recommended to + restrict the types of the choices to :obj:`None`, :class:`bool`, :class:`int`, + :class:`float` and :class:`str`. + + Attributes: + choices: + Parameter value candidates. + + """ + + def __init__(self, choices: Sequence[CategoricalChoiceType]) -> None: + if len(choices) == 0: + raise ValueError("The `choices` must contain one or more elements.") + for choice in choices: + if choice is not None and not isinstance(choice, (bool, int, float, str)): + message = ( + "Choices for a categorical distribution should be a tuple of None, bool, " + "int, float and str for persistent storage but contains {} which is of type " + "{}.".format(choice, type(choice).__name__) + ) + warnings.warn(message) + + self.choices = tuple(choices) + + def to_external_repr(self, param_value_in_internal_repr: float) -> CategoricalChoiceType: + return self.choices[int(param_value_in_internal_repr)] + + def to_internal_repr(self, param_value_in_external_repr: CategoricalChoiceType) -> float: + try: + # NOTE(nabenabe): With this implementation, we cannot distinguish some values + # such as True and 1, or 1.0 and 1. For example, if choices=[True, 1] and external_repr + # is 1, this method wrongly returns 0 instead of 1. However, we decided to accept this + # bug for such exceptional choices for less complexity and faster processing. + return self.choices.index(param_value_in_external_repr) + except ValueError: # ValueError: param_value_in_external_repr is not in choices. + # ValueError also happens if external_repr is nan or includes precision error in float. + for index, choice in enumerate(self.choices): + if _categorical_choice_equal(param_value_in_external_repr, choice): + return index + + raise ValueError(f"'{param_value_in_external_repr}' not in {self.choices}.") + + def single(self) -> bool: + return len(self.choices) == 1 + + def _contains(self, param_value_in_internal_repr: float) -> bool: + index = int(param_value_in_internal_repr) + return 0 <= index < len(self.choices) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, BaseDistribution): + return NotImplemented + if not isinstance(other, self.__class__): + return False + if self.__dict__.keys() != other.__dict__.keys(): + return False + for key, value in self.__dict__.items(): + if key == "choices": + if len(value) != len(getattr(other, key)): + return False + for choice, other_choice in zip(value, getattr(other, key)): + if not _categorical_choice_equal(choice, other_choice): + return False + else: + if value != getattr(other, key): + return False + return True + + __hash__ = BaseDistribution.__hash__ + + +DISTRIBUTION_CLASSES = ( + IntDistribution, + IntLogUniformDistribution, + IntUniformDistribution, + FloatDistribution, + UniformDistribution, + LogUniformDistribution, + DiscreteUniformDistribution, + CategoricalDistribution, +) + + +def json_to_distribution(json_str: str) -> BaseDistribution: + """Deserialize a distribution in JSON format. + + Args: + json_str: A JSON-serialized distribution. + + Returns: + A deserialized distribution. + + """ + + json_dict = json.loads(json_str) + + if "name" in json_dict: + if json_dict["name"] == CategoricalDistribution.__name__: + json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"]) + + for cls in DISTRIBUTION_CLASSES: + if json_dict["name"] == cls.__name__: + return cls(**json_dict["attributes"]) + + raise ValueError("Unknown distribution class: {}".format(json_dict["name"])) + + else: + # Deserialize a distribution from an abbreviated format. + if json_dict["type"] == "categorical": + return CategoricalDistribution(json_dict["choices"]) + elif json_dict["type"] in ("float", "int"): + low = json_dict["low"] + high = json_dict["high"] + step = json_dict.get("step") + log = json_dict.get("log", False) + + if json_dict["type"] == "float": + return FloatDistribution(low, high, log=log, step=step) + + else: + if step is None: + step = 1 + return IntDistribution(low=low, high=high, log=log, step=step) + + raise ValueError("Unknown distribution type: {}".format(json_dict["type"])) + + +def distribution_to_json(dist: BaseDistribution) -> str: + """Serialize a distribution to JSON format. + + Args: + dist: A distribution to be serialized. + + Returns: + A JSON string of a given distribution. + + """ + + return json.dumps({"name": dist.__class__.__name__, "attributes": dist._asdict()}) + + +def check_distribution_compatibility( + dist_old: BaseDistribution, dist_new: BaseDistribution +) -> None: + """A function to check compatibility of two distributions. + + It checks whether ``dist_old`` and ``dist_new`` are the same kind of distributions. + If ``dist_old`` is :class:`~optuna.distributions.CategoricalDistribution`, + it further checks ``choices`` are the same between ``dist_old`` and ``dist_new``. + Note that this method is not supposed to be called by library users. + + Args: + dist_old: + A distribution previously recorded in storage. + dist_new: + A distribution newly added to storage. + + """ + + if dist_old.__class__ != dist_new.__class__: + raise ValueError("Cannot set different distribution kind to the same parameter name.") + + if isinstance(dist_old, (FloatDistribution, IntDistribution)): + # For mypy. + assert isinstance(dist_new, (FloatDistribution, IntDistribution)) + + if dist_old.log != dist_new.log: + raise ValueError("Cannot set different log configuration to the same parameter name.") + + if not isinstance(dist_old, CategoricalDistribution): + return + if not isinstance(dist_new, CategoricalDistribution): + return + if dist_old != dist_new: + raise ValueError( + CategoricalDistribution.__name__ + " does not support dynamic value space." + ) + + +def _adjust_discrete_uniform_high(low: float, high: float, step: float) -> float: + d_high = decimal.Decimal(str(high)) + d_low = decimal.Decimal(str(low)) + d_step = decimal.Decimal(str(step)) + + d_r = d_high - d_low + + if d_r % d_step != decimal.Decimal("0"): + old_high = high + high = float((d_r // d_step) * d_step + d_low) + warnings.warn( + "The distribution is specified by [{low}, {old_high}] and step={step}, but the range " + "is not divisible by `step`. It will be replaced by [{low}, {high}].".format( + low=low, old_high=old_high, high=high, step=step + ) + ) + + return high + + +def _adjust_int_uniform_high(low: int, high: int, step: int) -> int: + r = high - low + if r % step != 0: + old_high = high + high = r // step * step + low + warnings.warn( + "The distribution is specified by [{low}, {old_high}] and step={step}, but the range " + "is not divisible by `step`. It will be replaced by [{low}, {high}].".format( + low=low, old_high=old_high, high=high, step=step + ) + ) + return high + + +def _get_single_value(distribution: BaseDistribution) -> int | float | CategoricalChoiceType: + assert distribution.single() + + if isinstance( + distribution, + ( + FloatDistribution, + IntDistribution, + ), + ): + return distribution.low + elif isinstance(distribution, CategoricalDistribution): + return distribution.choices[0] + assert False + + +# TODO(himkt): Remove this method with the deletion of deprecated distributions. +# https://github.com/optuna/optuna/issues/2941 +def _convert_old_distribution_to_new_distribution( + distribution: BaseDistribution, + suppress_warning: bool = False, +) -> BaseDistribution: + new_distribution: BaseDistribution + + # Float distributions. + if isinstance(distribution, UniformDistribution): + new_distribution = FloatDistribution( + low=distribution.low, + high=distribution.high, + log=False, + step=None, + ) + elif isinstance(distribution, LogUniformDistribution): + new_distribution = FloatDistribution( + low=distribution.low, + high=distribution.high, + log=True, + step=None, + ) + elif isinstance(distribution, DiscreteUniformDistribution): + new_distribution = FloatDistribution( + low=distribution.low, + high=distribution.high, + log=False, + step=distribution.q, + ) + + # Integer distributions. + elif isinstance(distribution, IntUniformDistribution): + new_distribution = IntDistribution( + low=distribution.low, + high=distribution.high, + log=False, + step=distribution.step, + ) + elif isinstance(distribution, IntLogUniformDistribution): + new_distribution = IntDistribution( + low=distribution.low, + high=distribution.high, + log=True, + step=distribution.step, + ) + + # Categorical distribution. + else: + new_distribution = distribution + + if new_distribution != distribution and not suppress_warning: + message = ( + f"{distribution} is deprecated and internally converted to" + f" {new_distribution}. See https://github.com/optuna/optuna/issues/2941." + ) + warnings.warn(message, FutureWarning) + + return new_distribution + + +def _is_distribution_log(distribution: BaseDistribution) -> bool: + if isinstance(distribution, (FloatDistribution, IntDistribution)): + return distribution.log + + return False diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/exceptions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d21204157ae66f6a04987d480a72958ff861034a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/exceptions.py @@ -0,0 +1,101 @@ +class OptunaError(Exception): + """Base class for Optuna specific errors.""" + + pass + + +class TrialPruned(OptunaError): + """Exception for pruned trials. + + This error tells a trainer that the current :class:`~optuna.trial.Trial` was pruned. It is + supposed to be raised after :func:`optuna.trial.Trial.should_prune` as shown in the following + example. + + See also: + :class:`optuna.TrialPruned` is an alias of :class:`optuna.exceptions.TrialPruned`. + + Example: + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=20) + """ + + pass + + +class CLIUsageError(OptunaError): + """Exception for CLI. + + CLI raises this exception when it receives invalid configuration. + """ + + pass + + +class StorageInternalError(OptunaError): + """Exception for storage operation. + + This error is raised when an operation failed in backend DB of storage. + """ + + pass + + +class DuplicatedStudyError(OptunaError): + """Exception for a duplicated study name. + + This error is raised when a specified study name already exists in the storage. + """ + + pass + + +class UpdateFinishedTrialError(OptunaError, RuntimeError): + """Exception for updating a finished trial. + + This error is raised when attempting to update a finished trial. + """ + + pass + + +class ExperimentalWarning(Warning): + """Experimental Warning class. + + This implementation exists here because the policy of `FutureWarning` has been changed + since Python 3.7 was released. See the details in + https://docs.python.org/3/library/warnings.html#warning-categories. + """ + + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccdedbb7d79d24c96819a637ce578454bd6944a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/__init__.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +from collections.abc import Callable + +from optuna._experimental import warn_experimental_argument +from optuna.importance._base import BaseImportanceEvaluator +from optuna.importance._fanova import FanovaImportanceEvaluator +from optuna.importance._mean_decrease_impurity import MeanDecreaseImpurityImportanceEvaluator +from optuna.importance._ped_anova import PedAnovaImportanceEvaluator +from optuna.study import Study +from optuna.trial import FrozenTrial + + +__all__ = [ + "BaseImportanceEvaluator", + "FanovaImportanceEvaluator", + "MeanDecreaseImpurityImportanceEvaluator", + "PedAnovaImportanceEvaluator", + "get_param_importances", +] + + +def get_param_importances( + study: Study, + *, + evaluator: BaseImportanceEvaluator | None = None, + params: list[str] | None = None, + target: Callable[[FrozenTrial], float] | None = None, + normalize: bool = True, +) -> dict[str, float]: + """Evaluate parameter importances based on completed trials in the given study. + + The parameter importances are returned as a dictionary where the keys consist of parameter + names and their values importances. + The importances are represented by non-negative floating point numbers, where higher values + mean that the parameters are more important. + The returned dictionary is ordered by its values in a descending order. + By default, the sum of the importance values are normalized to 1.0. + + If ``params`` is :obj:`None`, all parameter that are present in all of the completed trials are + assessed. + This implies that conditional parameters will be excluded from the evaluation. + To assess the importances of conditional parameters, a :obj:`list` of parameter names can be + specified via ``params``. + If specified, only completed trials that contain all of the parameters will be considered. + If no such trials are found, an error will be raised. + + If the given study does not contain completed trials, an error will be raised. + + .. note:: + + If ``params`` is specified as an empty list, an empty dictionary is returned. + + .. seealso:: + + See :func:`~optuna.visualization.plot_param_importances` to plot importances. + + Args: + study: + An optimized study. + evaluator: + An importance evaluator object that specifies which algorithm to base the importance + assessment on. + Defaults to + :class:`~optuna.importance.FanovaImportanceEvaluator`. + + .. note:: + :class:`~optuna.importance.FanovaImportanceEvaluator` takes over 1 minute + when given a study that contains 1000+ trials. We published + `optuna-fast-fanova `__ library, + that is a Cython accelerated fANOVA implementation. + By using it, you can get hyperparameter importances within a few seconds. + If ``n_trials`` is more than 10000, the Cython implementation takes more than + a minute, so you can use :class:`~optuna.importance.PedAnovaImportanceEvaluator` + instead, enabling the evaluation to finish in a second. + + params: + A list of names of parameters to assess. + If :obj:`None`, all parameters that are present in all of the completed trials are + assessed. + target: + A function to specify the value to evaluate importances. + If it is :obj:`None` and ``study`` is being used for single-objective optimization, + the objective values are used. ``target`` must be specified if ``study`` is being + used for multi-objective optimization. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective + optimization. For example, to get the hyperparameter importance of the first + objective, use ``target=lambda t: t.values[0]`` for the target parameter. + normalize: + A boolean option to specify whether the sum of the importance values should be + normalized to 1.0. + Defaults to :obj:`True`. + + .. note:: + Added in v3.0.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v3.0.0. + + Returns: + A :obj:`dict` where the keys are parameter names and the values are assessed importances. + + """ + if evaluator is None: + evaluator = FanovaImportanceEvaluator() + + if not isinstance(evaluator, BaseImportanceEvaluator): + raise TypeError("Evaluator must be a subclass of BaseImportanceEvaluator.") + + res = evaluator.evaluate(study, params=params, target=target) + if normalize: + s = sum(res.values()) + if s == 0.0: + n_params = len(res) + return dict((param, 1.0 / n_params) for param in res.keys()) + else: + return dict((param, value / s) for (param, value) in res.items()) + else: + warn_experimental_argument("normalize") + return res diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..ab2a770553472a96312e6d4024871ad88f35b287 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_base.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +import abc +from typing import cast +from typing import TYPE_CHECKING + +import numpy as np + +from optuna.search_space import intersection_search_space +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Collection + + from optuna._transform import _SearchSpaceTransform + from optuna.distributions import BaseDistribution + from optuna.study import Study + from optuna.trial import FrozenTrial + + +class BaseImportanceEvaluator(abc.ABC): + """Abstract parameter importance evaluator.""" + + @abc.abstractmethod + def evaluate( + self, + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + ) -> dict[str, float]: + """Evaluate parameter importances based on completed trials in the given study. + + .. note:: + + This method is not meant to be called by library users. + + .. seealso:: + + Please refer to :func:`~optuna.importance.get_param_importances` for how a concrete + evaluator should implement this method. + + Args: + study: + An optimized study. + params: + A list of names of parameters to assess. + If :obj:`None`, all parameters that are present in all of the completed trials are + assessed. + target: + A function to specify the value to evaluate importances. + If it is :obj:`None` and ``study`` is being used for single-objective optimization, + the objective values are used. Can also be used for other trial attributes, such as + the duration, like ``target=lambda t: t.duration.total_seconds()``. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective + optimization. For example, to get the hyperparameter importance of the first + objective, use ``target=lambda t: t.values[0]`` for the target parameter. + + Returns: + A :obj:`dict` where the keys are parameter names and the values are assessed + importances. + + """ + # TODO(hvy): Reconsider the interface as logic might violate DRY among multiple evaluators. + raise NotImplementedError + + +def _get_distributions(study: Study, params: list[str] | None) -> dict[str, BaseDistribution]: + completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + _check_evaluate_args(completed_trials, params) + + if params is None: + return intersection_search_space(study.get_trials(deepcopy=False)) + + # New temporary required to pass mypy. Seems like a bug. + params_not_none = params + assert params_not_none is not None + + # Compute the search space based on the subset of trials containing all parameters. + distributions = None + for trial in completed_trials: + trial_distributions = trial.distributions + if not all(name in trial_distributions for name in params_not_none): + continue + + if distributions is None: + distributions = dict( + filter( + lambda name_and_distribution: name_and_distribution[0] in params_not_none, + trial_distributions.items(), + ) + ) + continue + + if any( + trial_distributions[name] != distribution + for name, distribution in distributions.items() + ): + raise ValueError( + "Parameters importances cannot be assessed with dynamic search spaces if " + "parameters are specified. Specified parameters: {}.".format(params) + ) + + assert distributions is not None # Required to pass mypy. + distributions = dict( + sorted(distributions.items(), key=lambda name_and_distribution: name_and_distribution[0]) + ) + return distributions + + +def _check_evaluate_args(completed_trials: list[FrozenTrial], params: list[str] | None) -> None: + if len(completed_trials) == 0: + raise ValueError("Cannot evaluate parameter importances without completed trials.") + if len(completed_trials) == 1: + raise ValueError("Cannot evaluate parameter importances with only a single trial.") + + if params is not None: + if not isinstance(params, (list, tuple)): + raise TypeError( + "Parameters must be specified as a list. Actual parameters: {}.".format(params) + ) + if any(not isinstance(p, str) for p in params): + raise TypeError( + "Parameters must be specified by their names with strings. Actual parameters: " + "{}.".format(params) + ) + + if len(params) > 0: + at_least_one_trial = False + for trial in completed_trials: + if all(p in trial.distributions for p in params): + at_least_one_trial = True + break + if not at_least_one_trial: + raise ValueError( + "Study must contain completed trials with all specified parameters. " + "Specified parameters: {}.".format(params) + ) + + +def _get_filtered_trials( + study: Study, params: Collection[str], target: Callable[[FrozenTrial], float] | None +) -> list[FrozenTrial]: + trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + return [ + trial + for trial in trials + if set(params) <= set(trial.params) + and np.isfinite( + target(trial) if target is not None else cast("float", trial.value) + ) # TC006 + ] + + +def _param_importances_to_dict( + params: Collection[str], param_importances: np.ndarray | float +) -> dict[str, float]: + return { + name: value + for name, value in zip(params, np.broadcast_to(param_importances, (len(params),))) + } + + +def _get_trans_params(trials: list[FrozenTrial], trans: _SearchSpaceTransform) -> np.ndarray: + return np.array([trans.transform(trial.params) for trial in trials]) + + +def _get_target_values( + trials: list[FrozenTrial], target: Callable[[FrozenTrial], float] | None +) -> np.ndarray: + return np.array([target(trial) if target is not None else trial.value for trial in trials]) + + +def _sort_dict_by_importance(param_importances: dict[str, float]) -> dict[str, float]: + return dict( + reversed( + sorted( + param_importances.items(), key=lambda name_and_importance: name_and_importance[1] + ) + ) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3976c06ae2452b09fe628db9e339984906ac6d7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/__init__.py @@ -0,0 +1,4 @@ +from optuna.importance._fanova._evaluator import FanovaImportanceEvaluator + + +__all__ = ["FanovaImportanceEvaluator"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_evaluator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..69c99b077ec824f77db63c11f628ba708053e819 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_evaluator.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +from collections.abc import Callable + +import numpy as np + +from optuna._transform import _SearchSpaceTransform +from optuna.importance._base import _get_distributions +from optuna.importance._base import _get_filtered_trials +from optuna.importance._base import _get_target_values +from optuna.importance._base import _get_trans_params +from optuna.importance._base import _param_importances_to_dict +from optuna.importance._base import _sort_dict_by_importance +from optuna.importance._base import BaseImportanceEvaluator +from optuna.importance._fanova._fanova import _Fanova +from optuna.study import Study +from optuna.trial import FrozenTrial + + +class FanovaImportanceEvaluator(BaseImportanceEvaluator): + """fANOVA importance evaluator. + + Implements the fANOVA hyperparameter importance evaluation algorithm in + `An Efficient Approach for Assessing Hyperparameter Importance + `__. + + fANOVA fits a random forest regression model that predicts the objective values + of :class:`~optuna.trial.TrialState.COMPLETE` trials given their parameter configurations. + The more accurate this model is, the more reliable the importances assessed + by this class are. + + .. note:: + + This class takes over 1 minute when given a study that contains 1000+ trials. + We published `optuna-fast-fanova `__ library, + that is a Cython accelerated fANOVA implementation. By using it, you can get hyperparameter + importances within a few seconds. + + .. note:: + + Requires the `sklearn `__ Python package. + + .. note:: + + The performance of fANOVA depends on the prediction performance of the underlying + random forest model. In order to obtain high prediction performance, it is necessary to + cover a wide range of the hyperparameter search space. It is recommended to use an + exploration-oriented sampler such as :class:`~optuna.samplers.RandomSampler`. + + .. note:: + + For how to cite the original work, please refer to + https://automl.github.io/fanova/cite.html. + + Args: + n_trees: + The number of trees in the forest. + max_depth: + The maximum depth of the trees in the forest. + seed: + Controls the randomness of the forest. For deterministic behavior, specify a value + other than :obj:`None`. + + """ + + def __init__(self, *, n_trees: int = 64, max_depth: int = 64, seed: int | None = None) -> None: + self._evaluator = _Fanova( + n_trees=n_trees, + max_depth=max_depth, + min_samples_split=2, + min_samples_leaf=1, + seed=seed, + ) + + def evaluate( + self, + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + ) -> dict[str, float]: + if target is None and study._is_multi_objective(): + raise ValueError( + "If the `study` is being used for multi-objective optimization, " + "please specify the `target`. For example, use " + "`target=lambda t: t.values[0]` for the first objective value." + ) + + distributions = _get_distributions(study, params=params) + if params is None: + params = list(distributions.keys()) + assert params is not None + + # fANOVA does not support parameter distributions with a single value. + # However, there is no reason to calculate parameter importance in such case anyway, + # since it will always be 0 as the parameter is constant in the objective function. + non_single_distributions = { + name: dist for name, dist in distributions.items() if not dist.single() + } + single_distributions = { + name: dist for name, dist in distributions.items() if dist.single() + } + + if len(non_single_distributions) == 0: + return {} + + trials: list[FrozenTrial] = _get_filtered_trials(study, params=params, target=target) + + trans = _SearchSpaceTransform( + non_single_distributions, transform_log=False, transform_step=False + ) + + trans_params: np.ndarray = _get_trans_params(trials, trans) + target_values: np.ndarray = _get_target_values(trials, target) + + evaluator = self._evaluator + evaluator.fit( + X=trans_params, + y=target_values, + search_spaces=trans.bounds, + column_to_encoded_columns=trans.column_to_encoded_columns, + ) + param_importances = np.array( + [evaluator.get_importance(i)[0] for i in range(len(non_single_distributions))] + ) + + return _sort_dict_by_importance( + { + **_param_importances_to_dict(non_single_distributions.keys(), param_importances), + **_param_importances_to_dict(single_distributions.keys(), 0.0), + } + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_fanova.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_fanova.py new file mode 100644 index 0000000000000000000000000000000000000000..122e12bbda75a7076b9e8f78314c83cec25b23b4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_fanova.py @@ -0,0 +1,108 @@ +"""An implementation of `An Efficient Approach for Assessing Hyperparameter Importance`. + +See http://proceedings.mlr.press/v32/hutter14.pdf and https://automl.github.io/fanova/cite.html +for how to cite the original work. + +This implementation is inspired by the efficient algorithm in +`fanova` (https://github.com/automl/fanova) and +`pyrfr` (https://github.com/automl/random_forest_run) by the original authors. + +Differences include relying on scikit-learn to fit random forests +(`sklearn.ensemble.RandomForestRegressor`) and that it is otherwise written entirely in Python. +This stands in contrast to the original implementation which is partially written in C++. +Since Python runtime overhead may become noticeable, included are instead several +optimizations, e.g. vectorized NumPy functions to compute the marginals, instead of keeping all +running statistics. Known cases include assessing categorical features with a larger +number of choices since each choice is given a unique one-hot encoded raw feature. +""" + +from __future__ import annotations + +import numpy as np + +from optuna._imports import try_import +from optuna.importance._fanova._tree import _FanovaTree + + +with try_import() as _imports: + from sklearn.ensemble import RandomForestRegressor + + +class _Fanova: + def __init__( + self, + n_trees: int, + max_depth: int, + min_samples_split: int | float, + min_samples_leaf: int | float, + seed: int | None, + ) -> None: + _imports.check() + + self._forest = RandomForestRegressor( + n_estimators=n_trees, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + random_state=seed, + ) + self._trees: list[_FanovaTree] | None = None + self._variances: dict[int, np.ndarray] | None = None + self._column_to_encoded_columns: list[np.ndarray] | None = None + + def fit( + self, + X: np.ndarray, + y: np.ndarray, + search_spaces: np.ndarray, + column_to_encoded_columns: list[np.ndarray], + ) -> None: + assert X.shape[0] == y.shape[0] + assert X.shape[1] == search_spaces.shape[0] + assert search_spaces.shape[1] == 2 + + self._forest.fit(X, y) + + self._trees = [_FanovaTree(e.tree_, search_spaces) for e in self._forest.estimators_] + self._column_to_encoded_columns = column_to_encoded_columns + self._variances = {} + + if all(tree.variance == 0 for tree in self._trees): + # If all trees have 0 variance, we cannot assess any importances. + # This could occur if for instance `X.shape[0] == 1`. + raise RuntimeError("Encountered zero total variance in all trees.") + + def get_importance(self, feature: int) -> tuple[float, float]: + # Assert that `fit` has been called. + assert self._trees is not None + assert self._variances is not None + + self._compute_variances(feature) + + fractions: list[float] | np.ndarray = [] + + for tree_index, tree in enumerate(self._trees): + tree_variance = tree.variance + if tree_variance > 0.0: + fraction = self._variances[feature][tree_index] / tree_variance + fractions = np.append(fractions, fraction) + + fractions = np.asarray(fractions) + + return float(fractions.mean()), float(fractions.std()) + + def _compute_variances(self, feature: int) -> None: + assert self._trees is not None + assert self._variances is not None + assert self._column_to_encoded_columns is not None + + if feature in self._variances: + return + + raw_features = self._column_to_encoded_columns[feature] + variances = np.empty(len(self._trees), dtype=np.float64) + + for tree_index, tree in enumerate(self._trees): + marginal_variance = tree.get_marginal_variance(raw_features) + variances[tree_index] = np.clip(marginal_variance, 0.0, None) + self._variances[feature] = variances diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_tree.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..84fd915c9aaad99f9c780b6d15a93535d7f1940a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_fanova/_tree.py @@ -0,0 +1,319 @@ +from __future__ import annotations + +from functools import lru_cache +import itertools +from typing import TYPE_CHECKING + +import numpy as np + + +if TYPE_CHECKING: + import sklearn.tree + + +class _FanovaTree: + def __init__(self, tree: "sklearn.tree._tree.Tree", search_spaces: np.ndarray) -> None: + assert search_spaces.shape[0] == tree.n_features + assert search_spaces.shape[1] == 2 + + self._tree = tree + self._search_spaces = search_spaces + + statistics = self._precompute_statistics() + split_midpoints, split_sizes = self._precompute_split_midpoints_and_sizes() + subtree_active_features = self._precompute_subtree_active_features() + + self._statistics = statistics + self._split_midpoints = split_midpoints + self._split_sizes = split_sizes + self._subtree_active_features = subtree_active_features + self._variance = None # Computed lazily and requires `self._statistics`. + + @property + def variance(self) -> float: + if self._variance is None: + leaf_node_indices = np.nonzero(np.array(self._tree.feature) < 0)[0] + statistics = self._statistics[leaf_node_indices] + values = statistics[:, 0] + weights = statistics[:, 1] + average_values = np.average(values, weights=weights) + variance = np.average((values - average_values) ** 2, weights=weights) + + self._variance = variance + + assert self._variance is not None + return self._variance + + def get_marginal_variance(self, features: np.ndarray) -> float: + assert features.size > 0 + + # For each midpoint along the given dimensions, traverse this tree to compute the + # marginal predictions. + selected_midpoints = [self._split_midpoints[f] for f in features] + selected_sizes = [self._split_sizes[f] for f in features] + + product_midpoints = itertools.product(*selected_midpoints) + product_sizes = itertools.product(*selected_sizes) + + sample = np.full(self._n_features, fill_value=np.nan, dtype=np.float64) + + values: list[float] | np.ndarray = [] + weights: list[float] | np.ndarray = [] + + for midpoints, sizes in zip(product_midpoints, product_sizes): + sample[features] = np.array(midpoints) + + value, weight = self._get_marginalized_statistics(sample) + weight *= float(np.prod(sizes)) + + values = np.append(values, value) + weights = np.append(weights, weight) + + weights = np.asarray(weights) + values = np.asarray(values) + average_values = np.average(values, weights=weights) + variance = np.average((values - average_values) ** 2, weights=weights) + + assert variance >= 0.0 + return variance + + def _get_marginalized_statistics(self, feature_vector: np.ndarray) -> tuple[float, float]: + assert feature_vector.size == self._n_features + + marginalized_features = np.isnan(feature_vector) + active_features = ~marginalized_features + + # Reduce search space cardinalities to 1 for non-active features. + search_spaces = self._search_spaces.copy() + search_spaces[marginalized_features] = [0.0, 1.0] + + # Start from the root and traverse towards the leafs. + active_nodes = [0] + active_search_spaces = [search_spaces] + + node_indices = [] + active_leaf_search_spaces = [] + + while len(active_nodes) > 0: + node_index = active_nodes.pop() + search_spaces = active_search_spaces.pop() + + feature = self._get_node_split_feature(node_index) + if feature >= 0: # Not leaf. Avoid unnecessary call to `_is_node_leaf`. + # If node splits on an active feature, push the child node that we end up in. + response = feature_vector[feature] + if not np.isnan(response): + if response <= self._get_node_split_threshold(node_index): + next_node_index = self._get_node_left_child(node_index) + next_subspace = self._get_node_left_child_subspaces( + node_index, search_spaces + ) + else: + next_node_index = self._get_node_right_child(node_index) + next_subspace = self._get_node_right_child_subspaces( + node_index, search_spaces + ) + + active_nodes.append(next_node_index) + active_search_spaces.append(next_subspace) + continue + + # If subtree starting from node splits on an active feature, push both child nodes. + # Here, we use `any` for list because `ndarray.any` is slow. + if any(self._subtree_active_features[node_index][active_features].tolist()): + for child_node_index in self._get_node_children(node_index): + active_nodes.append(child_node_index) + active_search_spaces.append(search_spaces) + continue + + # If node is a leaf or the subtree does not split on any of the active features. + node_indices.append(node_index) + active_leaf_search_spaces.append(search_spaces) + + statistics = self._statistics[node_indices] + values = statistics[:, 0] + weights = statistics[:, 1] + active_features_cardinalities = _get_cardinality_batched(active_leaf_search_spaces) + weights = weights / active_features_cardinalities + + value = np.average(values, weights=weights) + weight = weights.sum() + + return value, weight + + def _precompute_statistics(self) -> np.ndarray: + n_nodes = self._n_nodes + + # Holds for each node, its weighted average value and the sum of weights. + statistics = np.empty((n_nodes, 2), dtype=np.float64) + + subspaces = np.array([None for _ in range(n_nodes)]) + subspaces[0] = self._search_spaces + + # Compute marginals for leaf nodes. + for node_index in range(n_nodes): + subspace = subspaces[node_index] + + if self._is_node_leaf(node_index): + value = self._get_node_value(node_index) + weight = _get_cardinality(subspace) + statistics[node_index] = [value, weight] + else: + for child_node_index, child_subspace in zip( + self._get_node_children(node_index), + self._get_node_children_subspaces(node_index, subspace), + ): + assert subspaces[child_node_index] is None + subspaces[child_node_index] = child_subspace + + # Compute marginals for internal nodes. + for node_index in reversed(range(n_nodes)): + if not self._is_node_leaf(node_index): + child_values = [] + child_weights = [] + for child_node_index in self._get_node_children(node_index): + child_values.append(statistics[child_node_index, 0]) + child_weights.append(statistics[child_node_index, 1]) + value = np.average(child_values, weights=child_weights) + weight = float(np.sum(child_weights)) + statistics[node_index] = [value, weight] + + return statistics + + def _precompute_split_midpoints_and_sizes( + self, + ) -> tuple[list[np.ndarray], list[np.ndarray]]: + midpoints = [] + sizes = [] + + search_spaces = self._search_spaces + for feature, feature_split_values in enumerate(self._compute_features_split_values()): + feature_split_values = np.concatenate( + ( + np.atleast_1d(search_spaces[feature, 0]), + feature_split_values, + np.atleast_1d(search_spaces[feature, 1]), + ) + ) + midpoint = 0.5 * (feature_split_values[1:] + feature_split_values[:-1]) + size = feature_split_values[1:] - feature_split_values[:-1] + + midpoints.append(midpoint) + sizes.append(size) + + return midpoints, sizes + + def _compute_features_split_values(self) -> list[np.ndarray]: + all_split_values: list[set[float]] = [set() for _ in range(self._n_features)] + + for node_index in range(self._n_nodes): + feature = self._get_node_split_feature(node_index) + if feature >= 0: # Not leaf. Avoid unnecessary call to `_is_node_leaf`. + threshold = self._get_node_split_threshold(node_index) + all_split_values[feature].add(threshold) + + sorted_all_split_values: list[np.ndarray] = [] + + for split_values in all_split_values: + split_values_array = np.array(list(split_values), dtype=np.float64) + split_values_array.sort() + sorted_all_split_values.append(split_values_array) + + return sorted_all_split_values + + def _precompute_subtree_active_features(self) -> np.ndarray: + subtree_active_features = np.full((self._n_nodes, self._n_features), fill_value=False) + + for node_index in reversed(range(self._n_nodes)): + feature = self._get_node_split_feature(node_index) + if feature >= 0: # Not leaf. Avoid unnecessary call to `_is_node_leaf`. + subtree_active_features[node_index, feature] = True + for child_node_index in self._get_node_children(node_index): + subtree_active_features[node_index] |= subtree_active_features[ + child_node_index + ] + + return subtree_active_features + + @property + def _n_features(self) -> int: + return len(self._search_spaces) + + @property + def _n_nodes(self) -> int: + return self._tree.node_count + + @lru_cache(maxsize=None) + def _is_node_leaf(self, node_index: int) -> bool: + return self._tree.feature[node_index] < 0 + + @lru_cache(maxsize=None) + def _get_node_left_child(self, node_index: int) -> int: + return self._tree.children_left[node_index] + + @lru_cache(maxsize=None) + def _get_node_right_child(self, node_index: int) -> int: + return self._tree.children_right[node_index] + + @lru_cache(maxsize=None) + def _get_node_children(self, node_index: int) -> tuple[int, int]: + return self._get_node_left_child(node_index), self._get_node_right_child(node_index) + + @lru_cache(maxsize=None) + def _get_node_value(self, node_index: int) -> float: + # self._tree.value: sklearn.tree._tree.Tree.value has + # the shape (node_count, n_outputs, max_n_classes) + return float(self._tree.value[node_index].reshape(-1)[0]) + + @lru_cache(maxsize=None) + def _get_node_split_threshold(self, node_index: int) -> float: + return self._tree.threshold[node_index] + + @lru_cache(maxsize=None) + def _get_node_split_feature(self, node_index: int) -> int: + return self._tree.feature[node_index] + + def _get_node_left_child_subspaces( + self, node_index: int, search_spaces: np.ndarray + ) -> np.ndarray: + return _get_subspaces( + search_spaces, + search_spaces_column=1, + feature=self._get_node_split_feature(node_index), + threshold=self._get_node_split_threshold(node_index), + ) + + def _get_node_right_child_subspaces( + self, node_index: int, search_spaces: np.ndarray + ) -> np.ndarray: + return _get_subspaces( + search_spaces, + search_spaces_column=0, + feature=self._get_node_split_feature(node_index), + threshold=self._get_node_split_threshold(node_index), + ) + + def _get_node_children_subspaces( + self, node_index: int, search_spaces: np.ndarray + ) -> tuple[np.ndarray, np.ndarray]: + return ( + self._get_node_left_child_subspaces(node_index, search_spaces), + self._get_node_right_child_subspaces(node_index, search_spaces), + ) + + +def _get_cardinality(search_spaces: np.ndarray) -> float: + return np.prod(search_spaces[:, 1] - search_spaces[:, 0]) + + +def _get_cardinality_batched(search_spaces_list: list[np.ndarray]) -> float: + search_spaces = np.asarray(search_spaces_list) + return np.prod(search_spaces[:, :, 1] - search_spaces[:, :, 0], axis=1) + + +def _get_subspaces( + search_spaces: np.ndarray, *, search_spaces_column: int, feature: int, threshold: float +) -> np.ndarray: + search_spaces_subspace = np.copy(search_spaces) + search_spaces_subspace[feature, search_spaces_column] = threshold + return search_spaces_subspace diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_mean_decrease_impurity.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_mean_decrease_impurity.py new file mode 100644 index 0000000000000000000000000000000000000000..deff21fbe8dc4eedc221515db4209676242ee99e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_mean_decrease_impurity.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from collections.abc import Callable + +import numpy as np + +from optuna._imports import try_import +from optuna._transform import _SearchSpaceTransform +from optuna.importance._base import _get_distributions +from optuna.importance._base import _get_filtered_trials +from optuna.importance._base import _get_target_values +from optuna.importance._base import _get_trans_params +from optuna.importance._base import _param_importances_to_dict +from optuna.importance._base import _sort_dict_by_importance +from optuna.importance._base import BaseImportanceEvaluator +from optuna.study import Study +from optuna.trial import FrozenTrial + + +with try_import() as _imports: + from sklearn.ensemble import RandomForestRegressor + + +class MeanDecreaseImpurityImportanceEvaluator(BaseImportanceEvaluator): + """Mean Decrease Impurity (MDI) parameter importance evaluator. + + This evaluator fits fits a random forest regression model that predicts the objective values + of :class:`~optuna.trial.TrialState.COMPLETE` trials given their parameter configurations. + Feature importances are then computed using MDI. + + .. note:: + + This evaluator requires the `sklearn `__ Python package + and is based on `sklearn.ensemble.RandomForestClassifier.feature_importances_ + `__. + + Args: + n_trees: + Number of trees in the random forest. + max_depth: + The maximum depth of each tree in the random forest. + seed: + Seed for the random forest. + """ + + def __init__(self, *, n_trees: int = 64, max_depth: int = 64, seed: int | None = None) -> None: + _imports.check() + + self._forest = RandomForestRegressor( + n_estimators=n_trees, + max_depth=max_depth, + min_samples_split=2, + min_samples_leaf=1, + random_state=seed, + ) + self._trans_params = np.empty(0) + self._trans_values = np.empty(0) + self._param_names: list[str] = list() + + def evaluate( + self, + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + ) -> dict[str, float]: + if target is None and study._is_multi_objective(): + raise ValueError( + "If the `study` is being used for multi-objective optimization, " + "please specify the `target`. For example, use " + "`target=lambda t: t.values[0]` for the first objective value." + ) + + distributions = _get_distributions(study, params=params) + if params is None: + params = list(distributions.keys()) + assert params is not None + if len(params) == 0: + return {} + + trials: list[FrozenTrial] = _get_filtered_trials(study, params=params, target=target) + trans = _SearchSpaceTransform(distributions, transform_log=False, transform_step=False) + trans_params: np.ndarray = _get_trans_params(trials, trans) + target_values: np.ndarray = _get_target_values(trials, target) + + forest = self._forest + forest.fit(X=trans_params, y=target_values) + feature_importances = forest.feature_importances_ + + # Untransform feature importances to param importances + # by adding up relevant feature importances. + param_importances = np.zeros(len(params)) + np.add.at(param_importances, trans.encoded_column_to_column, feature_importances) + + return _sort_dict_by_importance(_param_importances_to_dict(params, param_importances)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1e0c1c57844b80a76f1432d2f7bedeb540b457 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/__init__.py @@ -0,0 +1,4 @@ +from optuna.importance._ped_anova.evaluator import PedAnovaImportanceEvaluator + + +__all__ = ["PedAnovaImportanceEvaluator"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/evaluator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..22fce6593392a51bfc8f7883ffb16a47067751bb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/evaluator.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +from collections.abc import Callable +import warnings + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.distributions import BaseDistribution +from optuna.importance._base import _get_distributions +from optuna.importance._base import _get_filtered_trials +from optuna.importance._base import _sort_dict_by_importance +from optuna.importance._base import BaseImportanceEvaluator +from optuna.importance._ped_anova.scott_parzen_estimator import _build_parzen_estimator +from optuna.logging import get_logger +from optuna.study import Study +from optuna.study import StudyDirection +from optuna.trial import FrozenTrial + + +_logger = get_logger(__name__) + + +class _QuantileFilter: + def __init__( + self, + quantile: float, + is_lower_better: bool, + min_n_top_trials: int, + target: Callable[[FrozenTrial], float] | None, + ): + assert 0 <= quantile <= 1, "quantile must be in [0, 1]." + assert min_n_top_trials > 0, "min_n_top_trials must be positive." + + self._quantile = quantile + self._is_lower_better = is_lower_better + self._min_n_top_trials = min_n_top_trials + self._target = target + + def filter(self, trials: list[FrozenTrial]) -> list[FrozenTrial]: + target, min_n_top_trials = self._target, self._min_n_top_trials + sign = 1.0 if self._is_lower_better else -1.0 + loss_values = sign * np.asarray([t.value if target is None else target(t) for t in trials]) + err_msg = "len(trials) must be larger than or equal to min_n_top_trials" + assert min_n_top_trials <= loss_values.size, err_msg + + def _quantile(v: np.ndarray, q: float) -> float: + cutoff_index = int(np.ceil(q * loss_values.size)) - 1 + return float(np.partition(loss_values, cutoff_index)[cutoff_index]) + + cutoff_val = max( + np.partition(loss_values, min_n_top_trials - 1)[min_n_top_trials - 1], + # TODO(nabenabe0928): After dropping Python3.10, replace below with + # np.quantile(loss_values, self._quantile, method="inverted_cdf"). + _quantile(loss_values, self._quantile), + ) + should_keep_trials = loss_values <= cutoff_val + return [t for t, should_keep in zip(trials, should_keep_trials) if should_keep] + + +@experimental_class("3.6.0") +class PedAnovaImportanceEvaluator(BaseImportanceEvaluator): + """PED-ANOVA importance evaluator. + + Implements the PED-ANOVA hyperparameter importance evaluation algorithm. + + PED-ANOVA fits Parzen estimators of :class:`~optuna.trial.TrialState.COMPLETE` trials better + than a user-specified baseline. Users can specify the baseline by a quantile. + The importance can be interpreted as how important each hyperparameter is to get + the performance better than baseline. + + For further information about PED-ANOVA algorithm, please refer to the following paper: + + - `PED-ANOVA: Efficiently Quantifying Hyperparameter Importance in Arbitrary Subspaces + `__ + + .. note:: + + The performance of PED-ANOVA depends on how many trials to consider above baseline. + To stabilize the analysis, it is preferable to include at least 5 trials above baseline. + + .. note:: + + Please refer to `the original work `__. + + Args: + baseline_quantile: + Compute the importance of achieving top-``baseline_quantile`` quantile objective value. + For example, ``baseline_quantile=0.1`` means that the importances give the information + of which parameters were important to achieve the top-10% performance during + optimization. + evaluate_on_local: + Whether we measure the importance in the local or global space. + If :obj:`True`, the importances imply how importance each parameter is during + optimization. Meanwhile, ``evaluate_on_local=False`` gives the importances in the + specified search_space. ``evaluate_on_local=True`` is especially useful when users + modify search space during optimization. + + Example: + An example of using PED-ANOVA is as follows: + + .. testcode:: + + import optuna + from optuna.importance import PedAnovaImportanceEvaluator + + + def objective(trial): + x1 = trial.suggest_float("x1", -10, 10) + x2 = trial.suggest_float("x2", -10, 10) + return x1 + x2 / 1000 + + + study = optuna.create_study() + study.optimize(objective, n_trials=100) + evaluator = PedAnovaImportanceEvaluator() + importance = optuna.importance.get_param_importances(study, evaluator=evaluator) + + """ + + def __init__( + self, + *, + baseline_quantile: float = 0.1, + evaluate_on_local: bool = True, + ): + assert 0.0 <= baseline_quantile <= 1.0, "baseline_quantile must be in [0, 1]." + self._baseline_quantile = baseline_quantile + self._evaluate_on_local = evaluate_on_local + + # Advanced Setups. + # Discretize a domain [low, high] as `np.linspace(low, high, n_steps)`. + self._n_steps: int = 50 + # Control the regularization effect by prior. + self._prior_weight = 1.0 + # How many `trials` must be included in `top_trials`. + self._min_n_top_trials = 2 + + def _get_top_trials( + self, + study: Study, + trials: list[FrozenTrial], + params: list[str], + target: Callable[[FrozenTrial], float] | None, + ) -> list[FrozenTrial]: + is_lower_better = study.directions[0] == StudyDirection.MINIMIZE + if target is not None: + warnings.warn( + f"{self.__class__.__name__} computes the importances of params to achieve " + "low `target` values. If this is not what you want, " + "please modify target, e.g., by multiplying the output by -1." + ) + is_lower_better = True + + top_trials = _QuantileFilter( + self._baseline_quantile, is_lower_better, self._min_n_top_trials, target + ).filter(trials) + + if len(trials) == len(top_trials): + _logger.warning("All trials are in top trials, which gives equal importances.") + + return top_trials + + def _compute_pearson_divergence( + self, + param_name: str, + dist: BaseDistribution, + top_trials: list[FrozenTrial], + all_trials: list[FrozenTrial], + ) -> float: + # When pdf_all == pdf_top, i.e. all_trials == top_trials, this method will give 0.0. + prior_weight = self._prior_weight + pe_top = _build_parzen_estimator(param_name, dist, top_trials, self._n_steps, prior_weight) + # NOTE: pe_top.n_steps could be different from self._n_steps. + grids = np.arange(pe_top.n_steps) + pdf_top = pe_top.pdf(grids) + 1e-12 + + if self._evaluate_on_local: # The importance of param during the study. + pe_local = _build_parzen_estimator( + param_name, dist, all_trials, self._n_steps, prior_weight + ) + pdf_local = pe_local.pdf(grids) + 1e-12 + else: # The importance of param in the search space. + pdf_local = np.full(pe_top.n_steps, 1.0 / pe_top.n_steps) + + return float(pdf_local @ ((pdf_top / pdf_local - 1) ** 2)) + + def evaluate( + self, + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + ) -> dict[str, float]: + dists = _get_distributions(study, params=params) + if params is None: + params = list(dists.keys()) + + assert params is not None + # PED-ANOVA does not support parameter distributions with a single value, + # because the importance of such params become zero. + non_single_dists = {name: dist for name, dist in dists.items() if not dist.single()} + single_dists = {name: dist for name, dist in dists.items() if dist.single()} + if len(non_single_dists) == 0: + return {} + + trials = _get_filtered_trials(study, params=params, target=target) + n_params = len(non_single_dists) + # The following should be tested at _get_filtered_trials. + assert target is not None or max([len(t.values) for t in trials], default=1) == 1 + if len(trials) <= self._min_n_top_trials: + param_importances = {k: 1.0 / n_params for k in non_single_dists} + param_importances.update({k: 0.0 for k in single_dists}) + return {k: 0.0 for k in param_importances} + + top_trials = self._get_top_trials(study, trials, params, target) + quantile = len(top_trials) / len(trials) + importance_sum = 0.0 + param_importances = {} + for param_name, dist in non_single_dists.items(): + param_importances[param_name] = quantile * self._compute_pearson_divergence( + param_name, dist, top_trials=top_trials, all_trials=trials + ) + importance_sum += param_importances[param_name] + + param_importances.update({k: 0.0 for k in single_dists}) + return _sort_dict_by_importance(param_importances) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/scott_parzen_estimator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/scott_parzen_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..51abf97f26643e7006983c57876d5098065b3d57 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/importance/_ped_anova/scott_parzen_estimator.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.samplers._tpe.parzen_estimator import _ParzenEstimator +from optuna.samplers._tpe.parzen_estimator import _ParzenEstimatorParameters +from optuna.samplers._tpe.probability_distributions import _BatchedDiscreteTruncNormDistributions +from optuna.samplers._tpe.probability_distributions import _BatchedDistributions +from optuna.trial import FrozenTrial + + +class _ScottParzenEstimator(_ParzenEstimator): + """1D ParzenEstimator using the bandwidth selection by Scott's rule.""" + + def __init__( + self, + param_name: str, + dist: IntDistribution | CategoricalDistribution, + counts: np.ndarray, + prior_weight: float, + ): + assert isinstance(dist, (CategoricalDistribution, IntDistribution)) + assert not isinstance(dist, IntDistribution) or dist.low == 0 + n_choices = dist.high + 1 if isinstance(dist, IntDistribution) else len(dist.choices) + assert len(counts) == n_choices, counts + + self._n_steps = len(counts) + self._param_name = param_name + self._counts = counts.copy() + super().__init__( + observations={param_name: np.arange(self._n_steps)[counts > 0.0]}, + search_space={param_name: dist}, + parameters=_ParzenEstimatorParameters( + prior_weight=prior_weight, + consider_magic_clip=False, + consider_endpoints=False, + weights=lambda x: np.empty(0), + multivariate=True, + categorical_distance_func={}, + ), + predetermined_weights=counts[counts > 0.0], + ) + + def _calculate_numerical_distributions( + self, + observations: np.ndarray, + low: float, # The type is actually int, but typing follows the original. + high: float, # The type is actually int, but typing follows the original. + step: float | None, + parameters: _ParzenEstimatorParameters, + ) -> _BatchedDistributions: + # NOTE: The Optuna TPE bandwidth selection is too wide for this analysis. + # So use the Scott's rule by Scott, D.W. (1992), + # Multivariate Density Estimation: Theory, Practice, and Visualization. + assert step is not None and np.isclose(step, 1.0), "MyPy redefinition." + + n_trials = np.sum(self._counts) + counts_non_zero = self._counts[self._counts > 0] + weights = counts_non_zero / n_trials + mus = np.arange(self.n_steps)[self._counts > 0] + mean_est = mus @ weights + sigma_est = np.sqrt((mus - mean_est) ** 2 @ counts_non_zero / max(1, n_trials - 1)) + + count_cum = np.cumsum(counts_non_zero) + idx_q25 = np.searchsorted(count_cum, n_trials // 4, side="left") + idx_q75 = np.searchsorted(count_cum, n_trials * 3 // 4, side="right") + interquantile_range = mus[min(mus.size - 1, idx_q75)] - mus[idx_q25] + sigma_est = 1.059 * min(interquantile_range / 1.34, sigma_est) * n_trials ** (-0.2) + # To avoid numerical errors. 0.5/1.64 means 1.64sigma (=90%) will fit in the target grid. + sigma_min = 0.5 / 1.64 + sigmas = np.full_like(mus, max(sigma_est, sigma_min), dtype=np.float64) + mus = np.append(mus, [0.5 * (low + high)]) + sigmas = np.append(sigmas, [1.0 * (high - low + 1)]) + + return _BatchedDiscreteTruncNormDistributions( + mu=mus, sigma=sigmas, low=0, high=self.n_steps - 1, step=1 + ) + + @property + def n_steps(self) -> int: + return self._n_steps + + def pdf(self, samples: np.ndarray) -> np.ndarray: + return np.exp(self.log_pdf({self._param_name: samples})) + + +def _get_grids_and_grid_indices_of_trials( + param_name: str, + dist: IntDistribution | FloatDistribution, + trials: list[FrozenTrial], + n_steps: int, +) -> tuple[int, np.ndarray]: + assert isinstance(dist, (FloatDistribution, IntDistribution)), "Unexpected distribution." + if isinstance(dist, IntDistribution) and dist.log: + log2_domain_size = int(np.ceil(np.log(dist.high - dist.low + 1) / np.log(2))) + 1 + n_steps = min(log2_domain_size, n_steps) + elif dist.step is not None: + assert not dist.log, "log must be False when step is not None." + n_steps = min(round((dist.high - dist.low) / dist.step) + 1, n_steps) + + scaler = np.log if dist.log else np.asarray + grids = np.linspace(scaler(dist.low), scaler(dist.high), n_steps) + params = scaler([t.params[param_name] for t in trials]) + step_size = grids[1] - grids[0] + # grids[indices[n] - 1] < param - step_size / 2 <= grids[indices[n]] + indices = np.searchsorted(grids, params - step_size / 2) + return grids.size, indices + + +def _count_numerical_param_in_grid( + param_name: str, + dist: IntDistribution | FloatDistribution, + trials: list[FrozenTrial], + n_steps: int, +) -> np.ndarray: + n_grids, grid_indices_of_trials = _get_grids_and_grid_indices_of_trials( + param_name, dist, trials, n_steps + ) + unique_vals, counts_in_unique = np.unique(grid_indices_of_trials, return_counts=True) + counts = np.zeros(n_grids, dtype=np.int32) + counts[unique_vals] += counts_in_unique + return counts + + +def _count_categorical_param_in_grid( + param_name: str, dist: CategoricalDistribution, trials: list[FrozenTrial] +) -> np.ndarray: + cat_indices = [int(dist.to_internal_repr(t.params[param_name])) for t in trials] + unique_vals, counts_in_unique = np.unique(cat_indices, return_counts=True) + counts = np.zeros(len(dist.choices), dtype=np.int32) + counts[unique_vals] += counts_in_unique + return counts + + +def _build_parzen_estimator( + param_name: str, + dist: BaseDistribution, + trials: list[FrozenTrial], + n_steps: int, + prior_weight: float, +) -> _ScottParzenEstimator: + rounded_dist: IntDistribution | CategoricalDistribution + if isinstance(dist, (IntDistribution, FloatDistribution)): + counts = _count_numerical_param_in_grid(param_name, dist, trials, n_steps) + rounded_dist = IntDistribution(low=0, high=counts.size - 1) + elif isinstance(dist, CategoricalDistribution): + counts = _count_categorical_param_in_grid(param_name, dist, trials) + rounded_dist = dist + else: + assert False, f"Got an unknown dist with the type {type(dist)}." + + # counts.astype(float) is necessary for weight calculation in ParzenEstimator. + return _ScottParzenEstimator(param_name, rounded_dist, counts.astype(np.float64), prior_weight) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..29d74d848a4a731109f2d7ba7ffc2049cb9e952f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/__init__.py @@ -0,0 +1,136 @@ +import os +import sys +from types import ModuleType +from typing import Any +from typing import TYPE_CHECKING + +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +_import_structure = { + "allennlp": ["AllenNLPExecutor", "AllenNLPPruningCallback"], + "botorch": ["BoTorchSampler"], + "catboost": ["CatBoostPruningCallback"], + "chainer": ["ChainerPruningExtension"], + "chainermn": ["ChainerMNStudy"], + "cma": ["PyCmaSampler"], + "dask": ["DaskStorage"], + "mlflow": ["MLflowCallback"], + "wandb": ["WeightsAndBiasesCallback"], + "keras": ["KerasPruningCallback"], + "lightgbm": ["LightGBMPruningCallback", "LightGBMTuner", "LightGBMTunerCV"], + "pytorch_distributed": ["TorchDistributedTrial"], + "pytorch_ignite": ["PyTorchIgnitePruningHandler"], + "pytorch_lightning": ["PyTorchLightningPruningCallback"], + "sklearn": ["OptunaSearchCV"], + "shap": ["ShapleyImportanceEvaluator"], + "skorch": ["SkorchPruningCallback"], + "mxnet": ["MXNetPruningCallback"], + "tensorboard": ["TensorBoardCallback"], + "tensorflow": ["TensorFlowPruningHook"], + "tfkeras": ["TFKerasPruningCallback"], + "xgboost": ["XGBoostPruningCallback"], + "fastaiv2": ["FastAIV2PruningCallback", "FastAIPruningCallback"], +} + + +__all__ = [ + "AllenNLPExecutor", + "AllenNLPPruningCallback", + "BoTorchSampler", + "CatBoostPruningCallback", + "ChainerPruningExtension", + "ChainerMNStudy", + "PyCmaSampler", + "DaskStorage", + "MLflowCallback", + "WeightsAndBiasesCallback", + "KerasPruningCallback", + "LightGBMPruningCallback", + "LightGBMTuner", + "LightGBMTunerCV", + "TorchDistributedTrial", + "PyTorchIgnitePruningHandler", + "PyTorchLightningPruningCallback", + "OptunaSearchCV", + "ShapleyImportanceEvaluator", + "SkorchPruningCallback", + "MXNetPruningCallback", + "TensorBoardCallback", + "TensorFlowPruningHook", + "TFKerasPruningCallback", + "XGBoostPruningCallback", + "FastAIV2PruningCallback", + "FastAIPruningCallback", +] + + +if TYPE_CHECKING: + from optuna.integration.allennlp import AllenNLPExecutor + from optuna.integration.allennlp import AllenNLPPruningCallback + from optuna.integration.botorch import BoTorchSampler + from optuna.integration.catboost import CatBoostPruningCallback + from optuna.integration.chainer import ChainerPruningExtension + from optuna.integration.chainermn import ChainerMNStudy + from optuna.integration.cma import PyCmaSampler + from optuna.integration.dask import DaskStorage + from optuna.integration.fastaiv2 import FastAIPruningCallback + from optuna.integration.fastaiv2 import FastAIV2PruningCallback + from optuna.integration.keras import KerasPruningCallback + from optuna.integration.lightgbm import LightGBMPruningCallback + from optuna.integration.lightgbm import LightGBMTuner + from optuna.integration.lightgbm import LightGBMTunerCV + from optuna.integration.mlflow import MLflowCallback + from optuna.integration.mxnet import MXNetPruningCallback + from optuna.integration.pytorch_distributed import TorchDistributedTrial + from optuna.integration.pytorch_ignite import PyTorchIgnitePruningHandler + from optuna.integration.pytorch_lightning import PyTorchLightningPruningCallback + from optuna.integration.shap import ShapleyImportanceEvaluator + from optuna.integration.sklearn import OptunaSearchCV + from optuna.integration.skorch import SkorchPruningCallback + from optuna.integration.tensorboard import TensorBoardCallback + from optuna.integration.tensorflow import TensorFlowPruningHook + from optuna.integration.tfkeras import TFKerasPruningCallback + from optuna.integration.wandb import WeightsAndBiasesCallback + from optuna.integration.xgboost import XGBoostPruningCallback +else: + + class _IntegrationModule(ModuleType): + """Module class that implements `optuna.integration` package. + + This class applies lazy import under `optuna.integration`, where submodules are imported + when they are actually accessed. Otherwise, `import optuna` becomes much slower because it + imports all submodules and their dependencies (e.g., chainer, keras, lightgbm) all at once. + """ + + __all__ = __all__ + __file__ = globals()["__file__"] + __path__ = [os.path.dirname(__file__)] + + _modules = set(_import_structure.keys()) + _class_to_module = {} + for key, values in _import_structure.items(): + for value in values: + _class_to_module[value] = key + + def __getattr__(self, name: str) -> Any: + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError("module {} has no attribute {}".format(self.__name__, name)) + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str) -> ModuleType: + import importlib + + try: + return importlib.import_module("." + module_name, self.__name__) + except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format(module_name)) + + sys.modules[__name__] = _IntegrationModule(__name__) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/allennlp/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/allennlp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7dee7092ce293e8e6d187663c1e6c1b9eba042 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/allennlp/__init__.py @@ -0,0 +1,12 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.allennlp._dump_best_config import dump_best_config + from optuna_integration.allennlp._executor import AllenNLPExecutor + from optuna_integration.allennlp._pruner import AllenNLPPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("allennlp")) + + +__all__ = ["dump_best_config", "AllenNLPExecutor", "AllenNLPPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/botorch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/botorch.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd4f035f8d38110bbdf96a325572eba5b5d8377 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/botorch.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration import BoTorchSampler +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("botorch")) + + +__all__ = ["BoTorchSampler"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/catboost.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/catboost.py new file mode 100644 index 0000000000000000000000000000000000000000..3d3e37a7b72a4ac27e425f46f4224f31f81cb453 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/catboost.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.catboost import CatBoostPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("catboost")) + + +__all__ = ["CatBoostPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainer.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainer.py new file mode 100644 index 0000000000000000000000000000000000000000..523363c7e59a8101630dd412c060dc3e24f9bb28 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainer.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.chainer import ChainerPruningExtension +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("chainer")) + + +__all__ = ["ChainerPruningExtension"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainermn.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainermn.py new file mode 100644 index 0000000000000000000000000000000000000000..42c61345b1ad4f6238b9748bb88f1a65dfbb6481 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/chainermn.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.chainermn import ChainerMNStudy +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("chainermn")) + + +__all__ = ["ChainerMNStudy"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/cma.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/cma.py new file mode 100644 index 0000000000000000000000000000000000000000..b6a09c310e0ca2c110a9bbd0ee29ebd4e2d04509 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/cma.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.cma import PyCmaSampler +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("cma")) + + +__all__ = ["PyCmaSampler"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/dask.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/dask.py new file mode 100644 index 0000000000000000000000000000000000000000..d017547fe431a46aae8b1885ae4c90b04c5c0ff2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/dask.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.dask import DaskStorage +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("dask")) + + +__all__ = ["DaskStorage"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/fastaiv2.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/fastaiv2.py new file mode 100644 index 0000000000000000000000000000000000000000..2142db65242bca77a6be6ae0348ffc9d641dfc88 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/fastaiv2.py @@ -0,0 +1,11 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.fastaiv2 import FastAIPruningCallback + from optuna_integration.fastaiv2 import FastAIV2PruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("fastaiv2")) + + +__all__ = ["FastAIV2PruningCallback", "FastAIPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/keras.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/keras.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e93e9dad5284467ca074784693321ec07df0e9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/keras.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.keras import KerasPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("keras")) + + +__all__ = ["KerasPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/lightgbm.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/lightgbm.py new file mode 100644 index 0000000000000000000000000000000000000000..7b2ed0764b9a078fc9c73940de403b0b66b8d3c2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/lightgbm.py @@ -0,0 +1,43 @@ +import os +import sys +from types import ModuleType +from typing import Any +from typing import TYPE_CHECKING + +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + import optuna_integration.lightgbm as lgb +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("lightgbm")) + + +if TYPE_CHECKING: + # These modules are from optuna-integration. + from optuna.integration.lightgbm_tuner import LightGBMPruningCallback + from optuna.integration.lightgbm_tuner import LightGBMTuner + from optuna.integration.lightgbm_tuner import LightGBMTunerCV + from optuna.integration.lightgbm_tuner import train + + +__all__ = [ + "LightGBMPruningCallback", + "LightGBMTuner", + "LightGBMTunerCV", + "train", +] + + +class _LightGBMModule(ModuleType): + """Module class that implements `optuna.integration.lightgbm` package.""" + + __all__ = __all__ + __file__ = globals()["__file__"] + __path__ = [os.path.dirname(__file__)] + + def __getattr__(self, name: str) -> Any: + return lgb.__dict__[name] + + +sys.modules[__name__] = _LightGBMModule(__name__) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mlflow.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mlflow.py new file mode 100644 index 0000000000000000000000000000000000000000..67b9505b6b286dfffbf189359867c30d03d50fcc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mlflow.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.mlflow import MLflowCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("mlflow")) + + +__all__ = ["MLflowCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mxnet.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mxnet.py new file mode 100644 index 0000000000000000000000000000000000000000..14aa51aec782a4867d9496f320ac78c4ddba4a60 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/mxnet.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.mxnet import MXNetPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("mxnet")) + + +__all__ = ["MXNetPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_distributed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..1325a3250b719abd6eb1dc1d24c674d05926d257 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_distributed.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.pytorch_distributed import TorchDistributedTrial +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("pytorch_distributed")) + + +__all__ = ["TorchDistributedTrial"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_ignite.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_ignite.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf38027b532f524fab2a5eab5413ebb54645f16 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_ignite.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.pytorch_ignite import PyTorchIgnitePruningHandler +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("pytorch_ignite")) + + +__all__ = ["PyTorchIgnitePruningHandler"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_lightning.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_lightning.py new file mode 100644 index 0000000000000000000000000000000000000000..470146be9657b6de5da8b27afd1a2e97aad82f3c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/pytorch_lightning.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.pytorch_lightning import PyTorchLightningPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("pytorch_lightning")) + + +__all__ = ["PyTorchLightningPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/shap.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/shap.py new file mode 100644 index 0000000000000000000000000000000000000000..c9882233b50d875d769e81e873f4a1e7a0e7b80b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/shap.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.shap import ShapleyImportanceEvaluator +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("shap")) + + +__all__ = ["ShapleyImportanceEvaluator"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/sklearn.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/sklearn.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f30433b39334b5bb009871db89ced057581cc8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/sklearn.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.sklearn import OptunaSearchCV +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("sklearn")) + + +__all__ = ["OptunaSearchCV"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/skorch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/skorch.py new file mode 100644 index 0000000000000000000000000000000000000000..f147960401247caf7d891284f9efea7635817f41 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/skorch.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.skorch import SkorchPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("skorch")) + + +__all__ = ["SkorchPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorboard.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorboard.py new file mode 100644 index 0000000000000000000000000000000000000000..ad360d48861660917fd2b04516b631767c9ba822 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorboard.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.tensorboard import TensorBoardCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("tensorboard")) + + +__all__ = ["TensorBoardCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorflow.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..4e04699b38b1dbd98b240818213c49f039d98f37 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tensorflow.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.tensorflow import TensorFlowPruningHook +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("tensorflow")) + + +__all__ = ["TensorFlowPruningHook"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tfkeras.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tfkeras.py new file mode 100644 index 0000000000000000000000000000000000000000..622b5a599f3c57428e538dcd379c7f4f00cfadb4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/tfkeras.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.tfkeras import TFKerasPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("tfkeras")) + + +__all__ = ["TFKerasPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/wandb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/wandb.py new file mode 100644 index 0000000000000000000000000000000000000000..19863e5da8eb3897b6e1c9004aa0571e5068c3a2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/wandb.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.wandb import WeightsAndBiasesCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("wandb")) + + +__all__ = ["WeightsAndBiasesCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/xgboost.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/xgboost.py new file mode 100644 index 0000000000000000000000000000000000000000..acd9b4a2fc0554cbcf760ea21473b9bb2943434c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/integration/xgboost.py @@ -0,0 +1,10 @@ +from optuna._imports import _INTEGRATION_IMPORT_ERROR_TEMPLATE + + +try: + from optuna_integration.xgboost import XGBoostPruningCallback +except ModuleNotFoundError: + raise ModuleNotFoundError(_INTEGRATION_IMPORT_ERROR_TEMPLATE.format("xgboost")) + + +__all__ = ["XGBoostPruningCallback"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/logging.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..b5a07d67e34212414b7c8f87ed8e140fb2524576 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/logging.py @@ -0,0 +1,357 @@ +from __future__ import annotations + +import logging +from logging import CRITICAL +from logging import DEBUG +from logging import ERROR +from logging import FATAL +from logging import INFO +from logging import WARN +from logging import WARNING +import os +import sys +import threading + +import colorlog + + +__all__ = [ + "CRITICAL", + "DEBUG", + "ERROR", + "FATAL", + "INFO", + "WARN", + "WARNING", +] + +_lock: threading.Lock = threading.Lock() +_default_handler: logging.Handler | None = None + + +def create_default_formatter() -> logging.Formatter: + """Create a default formatter of log messages. + + This function is not supposed to be directly accessed by library users. + """ + header = "[%(levelname)1.1s %(asctime)s]" + message = "%(message)s" + if _color_supported(): + return colorlog.ColoredFormatter( + f"%(log_color)s{header}%(reset)s {message}", + ) + return logging.Formatter(f"{header} {message}") + + +def _color_supported() -> bool: + """Detection of color support.""" + # NO_COLOR environment variable: + if os.environ.get("NO_COLOR", None): + return False + + if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty(): + return False + else: + return True + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + _default_handler.setFormatter(create_default_formatter()) + + # Apply our default configuration to the library root logger. + library_root_logger: logging.Logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(logging.INFO) + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger: logging.Logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_logger(name: str) -> logging.Logger: + """Return a logger with the specified name. + + This function is not supposed to be directly accessed by library users. + """ + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the Optuna's root logger. + + Example: + + Get the default verbosity level. + + .. testsetup:: + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + return x**2 + y + + .. testcode:: + + import optuna + + # The default verbosity level of Optuna is `optuna.logging.INFO`. + print(optuna.logging.get_verbosity()) + # 20 + print(optuna.logging.INFO) + # 20 + + # There are logs of the INFO level. + study = optuna.create_study() + study.optimize(objective, n_trials=5) + # [I 2021-10-31 05:35:17,232] A new study created ... + # [I 2021-10-31 05:35:17,238] Trial 0 finished with value: ... + # [I 2021-10-31 05:35:17,245] Trial 1 finished with value: ... + # ... + + .. testoutput:: + :hide: + + 20 + 20 + Returns: + Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``. + + .. note:: + Optuna has following logging levels: + + - ``optuna.logging.CRITICAL``, ``optuna.logging.FATAL`` + - ``optuna.logging.ERROR`` + - ``optuna.logging.WARNING``, ``optuna.logging.WARN`` + - ``optuna.logging.INFO`` + - ``optuna.logging.DEBUG`` + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """Set the level for the Optuna's root logger. + + Example: + + Set the logging level ``optuna.logging.WARNING``. + + .. testsetup:: + + def objective(trial): + x = trial.suggest_int("x", -10, 10) + return x**2 + + .. testcode:: + + import optuna + + # There are INFO level logs. + study = optuna.create_study() + study.optimize(objective, n_trials=10) + # [I 2021-10-31 02:59:35,088] Trial 0 finished with value: 16.0 ... + # [I 2021-10-31 02:59:35,091] Trial 1 finished with value: 1.0 ... + # [I 2021-10-31 02:59:35,096] Trial 2 finished with value: 1.0 ... + + # Setting the logging level WARNING, the INFO logs are suppressed. + optuna.logging.set_verbosity(optuna.logging.WARNING) + study.optimize(objective, n_trials=10) + + .. testcleanup:: + + optuna.logging.set_verbosity(optuna.logging.INFO) + + + Args: + verbosity: + Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``. + + .. note:: + Optuna has following logging levels: + + - ``optuna.logging.CRITICAL``, ``optuna.logging.FATAL`` + - ``optuna.logging.ERROR`` + - ``optuna.logging.WARNING``, ``optuna.logging.WARN`` + - ``optuna.logging.INFO`` + - ``optuna.logging.DEBUG`` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def disable_default_handler() -> None: + """Disable the default handler of the Optuna's root logger. + + Example: + + Stop and then resume logging to :obj:`sys.stderr`. + + .. testsetup:: + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + return x**2 + y + + .. testcode:: + + import optuna + + study = optuna.create_study() + + # There are no logs in sys.stderr. + optuna.logging.disable_default_handler() + study.optimize(objective, n_trials=10) + + # There are logs in sys.stderr. + optuna.logging.enable_default_handler() + study.optimize(objective, n_trials=10) + # [I 2020-02-23 17:00:54,314] Trial 10 finished with value: ... + # [I 2020-02-23 17:00:54,356] Trial 11 finished with value: ... + # ... + + """ + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the Optuna's root logger. + + Please refer to the example shown in :func:`~optuna.logging.disable_default_handler()`. + """ + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def disable_propagation() -> None: + """Disable propagation of the library log outputs. + + Note that log propagation is disabled by default. You only need to use this function + to stop log propagation when you use :func:`~optuna.logging.enable_propagation()`. + + Example: + + Stop propagating logs to the root logger on the second optimize call. + + .. testsetup:: + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + return x**2 + y + + .. testcode:: + + import optuna + import logging + + optuna.logging.disable_default_handler() # Disable the default handler. + logger = logging.getLogger() + + logger.setLevel(logging.INFO) # Setup the root logger. + logger.addHandler(logging.FileHandler("foo.log", mode="w")) + + optuna.logging.enable_propagation() # Propagate logs to the root logger. + + study = optuna.create_study() + + logger.info("Logs from first optimize call") # The logs are saved in the logs file. + study.optimize(objective, n_trials=10) + + optuna.logging.disable_propagation() # Stop propogating logs to the root logger. + + logger.info("Logs from second optimize call") + # The new logs for second optimize call are not saved. + study.optimize(objective, n_trials=10) + + with open("foo.log") as f: + assert f.readline().startswith("A new study created") + assert f.readline() == "Logs from first optimize call\\n" + # Check for logs after second optimize call. + assert f.read().split("Logs from second optimize call\\n")[-1] == "" + + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """Enable propagation of the library log outputs. + + Please disable the Optuna's default handler to prevent double logging if the root logger has + been configured. + + Example: + + Propagate all log output to the root logger in order to save them to the file. + + .. testsetup:: + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + return x**2 + y + + .. testcode:: + + import optuna + import logging + + logger = logging.getLogger() + + logger.setLevel(logging.INFO) # Setup the root logger. + logger.addHandler(logging.FileHandler("foo.log", mode="w")) + + optuna.logging.enable_propagation() # Propagate logs to the root logger. + optuna.logging.disable_default_handler() # Stop showing logs in sys.stderr. + + study = optuna.create_study() + + logger.info("Start optimization.") + study.optimize(objective, n_trials=10) + + with open("foo.log") as f: + assert f.readline().startswith("A new study created") + assert f.readline() == "Start optimization.\\n" + + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/multi_objective/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/multi_objective/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f9293782aa4abfee78cb469a1df07925945cd83e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/multi_objective/__init__.py @@ -0,0 +1,10 @@ +# TODO(nabenabe0928): Come up with any ways to remove this file. +# NOTE(nabenabe0928): Discuss when to remove this class. +migration_url = "https://github.com/optuna/optuna/discussions/5573" +raise ModuleNotFoundError( + "\nThe features in `optuna.multi_objective` were integrated with the" + "\nsingle objective optimization API and `optuna.multi_objective` were" + "\ndeleted at v4.0.0. Please update your code based on the migration guide" + f"\nat {migration_url}" + "\nor downgrade your Optuna version." +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/progress_bar.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/progress_bar.py new file mode 100644 index 0000000000000000000000000000000000000000..63e0e5b8147e6e5e24a854bbdd41ae2bf30053a6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/progress_bar.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import logging +from typing import Any +from typing import TYPE_CHECKING +import warnings + +from tqdm.auto import tqdm + +from optuna import logging as optuna_logging + + +if TYPE_CHECKING: + from optuna.study import Study + +_tqdm_handler: _TqdmLoggingHandler | None = None + + +# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02 +class _TqdmLoggingHandler(logging.StreamHandler): + def emit(self, record: Any) -> None: + try: + msg = self.format(record) + tqdm.write(msg) + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + except Exception: + self.handleError(record) + + +class _ProgressBar: + """Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`. + + Args: + is_valid: + Whether to show progress bars in :func:`~optuna.study.Study.optimize`. + n_trials: + The number of trials. + timeout: + Stop study after the given number of second(s). + """ + + def __init__( + self, + is_valid: bool, + n_trials: int | None = None, + timeout: float | None = None, + ) -> None: + if is_valid and n_trials is None and timeout is None: + warnings.warn("Progress bar won't be displayed because n_trials and timeout are None.") + + self._is_valid = is_valid and (n_trials or timeout) is not None + self._n_trials = n_trials + self._timeout = timeout + self._last_elapsed_seconds = 0.0 + + if self._is_valid: + if self._n_trials is not None: + self._progress_bar = tqdm(total=self._n_trials) + elif self._timeout is not None: + total = tqdm.format_interval(self._timeout) + fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total + self._progress_bar = tqdm(total=self._timeout, bar_format=fmt) + else: + assert False + + global _tqdm_handler + + _tqdm_handler = _TqdmLoggingHandler() + _tqdm_handler.setLevel(logging.INFO) + _tqdm_handler.setFormatter(optuna_logging.create_default_formatter()) + optuna_logging.disable_default_handler() + optuna_logging._get_library_root_logger().addHandler(_tqdm_handler) + + def update(self, elapsed_seconds: float, study: Study) -> None: + """Update the progress bars if ``is_valid`` is :obj:`True`. + + Args: + elapsed_seconds: + The time past since :func:`~optuna.study.Study.optimize` started. + study: + The current study object. + """ + + if self._is_valid: + if not study._is_multi_objective(): + # Not updating the progress bar when there are no complete trial. + try: + msg = ( + f"Best trial: {study.best_trial.number}. " + f"Best value: {study.best_value:.6g}" + ) + + self._progress_bar.set_description(msg) + except ValueError: + pass + + if self._n_trials is not None: + self._progress_bar.update(1) + if self._timeout is not None: + self._progress_bar.set_postfix_str( + "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout) + ) + + elif self._timeout is not None: + time_diff = elapsed_seconds - self._last_elapsed_seconds + if elapsed_seconds > self._timeout: + # Clip elapsed time to avoid tqdm warnings. + time_diff -= elapsed_seconds - self._timeout + + self._progress_bar.update(time_diff) + self._last_elapsed_seconds = elapsed_seconds + + else: + assert False + + def close(self) -> None: + """Close progress bars.""" + + if self._is_valid: + self._progress_bar.close() + assert _tqdm_handler is not None + optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler) + optuna_logging.enable_default_handler() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae189d185dfe4e39abbfd9cd87d0153736b8bfd6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/__init__.py @@ -0,0 +1,38 @@ +from typing import TYPE_CHECKING + +from optuna.pruners._base import BasePruner +from optuna.pruners._hyperband import HyperbandPruner +from optuna.pruners._median import MedianPruner +from optuna.pruners._nop import NopPruner +from optuna.pruners._patient import PatientPruner +from optuna.pruners._percentile import PercentilePruner +from optuna.pruners._successive_halving import SuccessiveHalvingPruner +from optuna.pruners._threshold import ThresholdPruner +from optuna.pruners._wilcoxon import WilcoxonPruner + + +if TYPE_CHECKING: + from optuna.study import Study + from optuna.trial import FrozenTrial + + +__all__ = [ + "BasePruner", + "HyperbandPruner", + "MedianPruner", + "NopPruner", + "PatientPruner", + "PercentilePruner", + "SuccessiveHalvingPruner", + "ThresholdPruner", + "WilcoxonPruner", +] + + +def _filter_study(study: "Study", trial: "FrozenTrial") -> "Study": + if isinstance(study.pruner, HyperbandPruner): + # Create `_BracketStudy` to use trials that have the same bracket id. + pruner: HyperbandPruner = study.pruner + return pruner._create_bracket_study(study, pruner._get_bracket_id(study, trial)) + else: + return study diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6230cf1b2f60daa3a4569b091029fd5a53242a1d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_base.py @@ -0,0 +1,28 @@ +import abc + +import optuna + + +class BasePruner(abc.ABC): + """Base class for pruners.""" + + @abc.abstractmethod + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + """Judge whether the trial should be pruned based on the reported values. + + Note that this method is not supposed to be called by library users. Instead, + :func:`optuna.trial.Trial.report` and :func:`optuna.trial.Trial.should_prune` provide + user interfaces to implement pruning mechanism in an objective function. + + Args: + study: + Study object of the target study. + trial: + FrozenTrial object of the target trial. + Take a copy before modifying this object. + + Returns: + A boolean value representing whether the trial should be pruned. + """ + + raise NotImplementedError diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_hyperband.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_hyperband.py new file mode 100644 index 0000000000000000000000000000000000000000..fcd26af285eb712b005be0c3efdb0e30a49337fd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_hyperband.py @@ -0,0 +1,324 @@ +from __future__ import annotations + +import binascii +from collections.abc import Container +import math + +import optuna +from optuna import logging +from optuna.pruners._base import BasePruner +from optuna.pruners._successive_halving import SuccessiveHalvingPruner +from optuna.trial._state import TrialState + + +_logger = logging.get_logger(__name__) + + +class HyperbandPruner(BasePruner): + """Pruner using Hyperband. + + As SuccessiveHalving (SHA) requires the number of configurations + :math:`n` as its hyperparameter. For a given finite budget :math:`B`, + all the configurations have the resources of :math:`B \\over n` on average. + As you can see, there will be a trade-off of :math:`B` and :math:`B \\over n`. + `Hyperband `__ attacks this trade-off + by trying different :math:`n` values for a fixed budget. + + .. note:: + * In the Hyperband paper, the counterpart of :class:`~optuna.samplers.RandomSampler` + is used. + * Optuna uses :class:`~optuna.samplers.TPESampler` by default. + * `The benchmark result + `__ + shows that :class:`optuna.pruners.HyperbandPruner` supports both samplers. + + .. note:: + If you use ``HyperbandPruner`` with :class:`~optuna.samplers.TPESampler`, + it's recommended to consider setting larger ``n_trials`` or ``timeout`` to make full use of + the characteristics of :class:`~optuna.samplers.TPESampler` + because :class:`~optuna.samplers.TPESampler` uses some (by default, :math:`10`) + :class:`~optuna.trial.Trial`\\ s for its startup. + + As Hyperband runs multiple :class:`~optuna.pruners.SuccessiveHalvingPruner` and collects + trials based on the current :class:`~optuna.trial.Trial`\\ 's bracket ID, each bracket + needs to observe more than :math:`10` :class:`~optuna.trial.Trial`\\ s + for :class:`~optuna.samplers.TPESampler` to adapt its search space. + + Thus, for example, if ``HyperbandPruner`` has :math:`4` pruners in it, + at least :math:`4 \\times 10` trials are consumed for startup. + + .. note:: + Hyperband has several :class:`~optuna.pruners.SuccessiveHalvingPruner`\\ s. Each + :class:`~optuna.pruners.SuccessiveHalvingPruner` is referred to as "bracket" in the + original paper. The number of brackets is an important factor to control the early + stopping behavior of Hyperband and is automatically determined by ``min_resource``, + ``max_resource`` and ``reduction_factor`` as + :math:`\\mathrm{The\\ number\\ of\\ brackets} = + \\mathrm{floor}(\\log_{\\texttt{reduction}\\_\\texttt{factor}} + (\\frac{\\texttt{max}\\_\\texttt{resource}}{\\texttt{min}\\_\\texttt{resource}})) + 1`. + Please set ``reduction_factor`` so that the number of brackets is not too large (about 4 – + 6 in most use cases). Please see Section 3.6 of the `original paper + `__ for the detail. + + .. note:: + ``HyperbandPruner`` computes bracket ID for each trial with a + function taking ``study_name`` of :class:`~optuna.study.Study` and + :attr:`~optuna.trial.Trial.number`. Please specify ``study_name`` + to make the pruning algorithm reproducible. + + Example: + + We minimize an objective function with Hyperband pruning algorithm. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + n_train_iter = 100 + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study( + direction="maximize", + pruner=optuna.pruners.HyperbandPruner( + min_resource=1, max_resource=n_train_iter, reduction_factor=3 + ), + ) + study.optimize(objective, n_trials=20) + + Args: + min_resource: + A parameter for specifying the minimum resource allocated to a trial noted as :math:`r` + in the paper. A smaller :math:`r` will give a result faster, but a larger + :math:`r` will give a better guarantee of successful judging between configurations. + See the details for :class:`~optuna.pruners.SuccessiveHalvingPruner`. + max_resource: + A parameter for specifying the maximum resource allocated to a trial. :math:`R` in the + paper corresponds to ``max_resource / min_resource``. This value represents and should + match the maximum iteration steps (e.g., the number of epochs for neural networks). + When this argument is "auto", the maximum resource is estimated according to the + completed trials. The default value of this argument is "auto". + + .. note:: + With "auto", the maximum resource will be the largest step reported by + :meth:`~optuna.trial.Trial.report` in the first, or one of the first if trained in + parallel, completed trial. No trials will be pruned until the maximum resource is + determined. + + .. note:: + If the step of the last intermediate value may change with each trial, please + manually specify the maximum possible step to ``max_resource``. + reduction_factor: + A parameter for specifying reduction factor of promotable trials noted as + :math:`\\eta` in the paper. + See the details for :class:`~optuna.pruners.SuccessiveHalvingPruner`. + bootstrap_count: + Parameter specifying the number of trials required in a rung before any trial can be + promoted. Incompatible with ``max_resource`` is ``"auto"``. + See the details for :class:`~optuna.pruners.SuccessiveHalvingPruner`. + """ + + def __init__( + self, + min_resource: int = 1, + max_resource: str | int = "auto", + reduction_factor: int = 3, + bootstrap_count: int = 0, + ) -> None: + self._min_resource = min_resource + self._max_resource = max_resource + self._reduction_factor = reduction_factor + self._pruners: list[SuccessiveHalvingPruner] = [] + self._bootstrap_count = bootstrap_count + self._total_trial_allocation_budget = 0 + self._trial_allocation_budgets: list[int] = [] + self._n_brackets: int | None = None + + if not isinstance(self._max_resource, int) and self._max_resource != "auto": + raise ValueError( + "The 'max_resource' should be integer or 'auto'. " + "But max_resource = {}".format(self._max_resource) + ) + + if self._bootstrap_count > 0 and self._max_resource == "auto": + raise ValueError( + "bootstrap_count > 0 and max_resource == 'auto' " + "are mutually incompatible, bootstrap_count is {}".format(self._bootstrap_count) + ) + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + if len(self._pruners) == 0: + self._try_initialization(study) + if len(self._pruners) == 0: + return False + + bracket_id = self._get_bracket_id(study, trial) + _logger.debug("{}th bracket is selected".format(bracket_id)) + bracket_study = self._create_bracket_study(study, bracket_id) + return self._pruners[bracket_id].prune(bracket_study, trial) + + def _try_initialization(self, study: "optuna.study.Study") -> None: + if self._max_resource == "auto": + trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + n_steps = [t.last_step for t in trials if t.last_step is not None] + + if not n_steps: + return + + self._max_resource = max(n_steps) + 1 + + assert isinstance(self._max_resource, int) + + if self._n_brackets is None: + # In the original paper http://www.jmlr.org/papers/volume18/16-558/16-558.pdf, the + # inputs of Hyperband are `R`: max resource and `\eta`: reduction factor. The + # number of brackets (this is referred as `s_{max} + 1` in the paper) is calculated + # by s_{max} + 1 = \floor{\log_{\eta} (R)} + 1 in Algorithm 1 of the original paper. + # In this implementation, we combine this formula and that of ASHA paper + # https://arxiv.org/abs/1502.07943 as + # `n_brackets = floor(log_{reduction_factor}(max_resource / min_resource)) + 1` + self._n_brackets = ( + math.floor( + math.log(self._max_resource / self._min_resource, self._reduction_factor) + ) + + 1 + ) + + _logger.debug("Hyperband has {} brackets".format(self._n_brackets)) + + for bracket_id in range(self._n_brackets): + trial_allocation_budget = self._calculate_trial_allocation_budget(bracket_id) + self._total_trial_allocation_budget += trial_allocation_budget + self._trial_allocation_budgets.append(trial_allocation_budget) + + pruner = SuccessiveHalvingPruner( + min_resource=self._min_resource, + reduction_factor=self._reduction_factor, + min_early_stopping_rate=bracket_id, + bootstrap_count=self._bootstrap_count, + ) + self._pruners.append(pruner) + + def _calculate_trial_allocation_budget(self, bracket_id: int) -> int: + """Compute the trial allocated budget for a bracket of ``bracket_id``. + + In the `original paper `, the + number of trials per one bracket is referred as ``n`` in Algorithm 1. Since we do not know + the total number of trials in the leaning scheme of Optuna, we calculate the ratio of the + number of trials here instead. + """ + + assert self._n_brackets is not None + s = self._n_brackets - 1 - bracket_id + return math.ceil(self._n_brackets * (self._reduction_factor**s) / (s + 1)) + + def _get_bracket_id( + self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial" + ) -> int: + """Compute the index of bracket for a trial of ``trial_number``. + + The index of a bracket is noted as :math:`s` in + `Hyperband paper `__. + """ + + if len(self._pruners) == 0: + return 0 + + assert self._n_brackets is not None + n = ( + binascii.crc32("{}_{}".format(study.study_name, trial.number).encode()) + % self._total_trial_allocation_budget + ) + for bracket_id in range(self._n_brackets): + n -= self._trial_allocation_budgets[bracket_id] + if n < 0: + return bracket_id + + assert False, "This line should be unreachable." + + def _create_bracket_study( + self, study: "optuna.study.Study", bracket_id: int + ) -> "optuna.study.Study": + # This class is assumed to be passed to + # `SuccessiveHalvingPruner.prune` in which `get_trials`, + # `direction`, and `storage` are used. + # But for safety, prohibit the other attributes explicitly. + class _BracketStudy(optuna.study.Study): + _VALID_ATTRS = ( + "get_trials", + "_get_trials", + "directions", + "direction", + "_directions", + "_storage", + "_study_id", + "pruner", + "study_name", + "_bracket_id", + "sampler", + "trials", + "_is_multi_objective", + "stop", + "_study", + "_thread_local", + ) + + def __init__( + self, study: "optuna.study.Study", pruner: HyperbandPruner, bracket_id: int + ) -> None: + super().__init__( + study_name=study.study_name, + storage=study._storage, + sampler=study.sampler, + pruner=pruner, + ) + self._study = study + self._bracket_id = bracket_id + + def get_trials( + self, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list["optuna.trial.FrozenTrial"]: + trials = super()._get_trials(deepcopy=deepcopy, states=states) + pruner = self.pruner + assert isinstance(pruner, HyperbandPruner) + return [t for t in trials if pruner._get_bracket_id(self, t) == self._bracket_id] + + def stop(self) -> None: + # `stop` should stop the original study's optimization loop instead of + # `_BracketStudy`. + self._study.stop() + + def __getattribute__(self, attr_name): # type: ignore + if attr_name not in _BracketStudy._VALID_ATTRS: + raise AttributeError( + "_BracketStudy does not have attribute of '{}'".format(attr_name) + ) + else: + return object.__getattribute__(self, attr_name) + + return _BracketStudy(study, self, bracket_id) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_median.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_median.py new file mode 100644 index 0000000000000000000000000000000000000000..8ebcd6e0bfa64f08cfa3f6e31091b73586f54965 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_median.py @@ -0,0 +1,86 @@ +from optuna.pruners._percentile import PercentilePruner + + +class MedianPruner(PercentilePruner): + """Pruner using the median stopping rule. + + Prune if the trial's best intermediate result is worse than median of intermediate results of + previous trials at the same step. It stops unpromising trials early based on the + intermediate results compared against the median of previous completed trials. + + The pruner handles NaN values in the following manner: + 1. If all intermediate values of the current trial are NaN, the trial will be pruned. + 2. During the median calculation across completed trials, NaN values are ignored. + Only valid numeric values are considered. + + Example: + + We minimize an objective function with the median stopping rule. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study( + direction="maximize", + pruner=optuna.pruners.MedianPruner( + n_startup_trials=5, n_warmup_steps=30, interval_steps=10 + ), + ) + study.optimize(objective, n_trials=20) + + Args: + n_startup_trials: + Pruning is disabled until the given number of trials finish in the same study. + n_warmup_steps: + Pruning is disabled until the trial exceeds the given number of step. Note that + this feature assumes that ``step`` starts at zero. + interval_steps: + Interval in number of steps between the pruning checks, offset by the warmup steps. + If no value has been reported at the time of a pruning check, that particular check + will be postponed until a value is reported. + n_min_trials: + Minimum number of reported trial results at a step to judge whether to prune. + If the number of reported intermediate values from all trials at the current step + is less than ``n_min_trials``, the trial will not be pruned. This can be used to ensure + that a minimum number of trials are run to completion without being pruned. + """ + + def __init__( + self, + n_startup_trials: int = 5, + n_warmup_steps: int = 0, + interval_steps: int = 1, + *, + n_min_trials: int = 1, + ) -> None: + super().__init__( + 50.0, n_startup_trials, n_warmup_steps, interval_steps, n_min_trials=n_min_trials + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_nop.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_nop.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1c155887b15eb4779444d3446ddb06929953e3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_nop.py @@ -0,0 +1,47 @@ +import optuna +from optuna.pruners import BasePruner + + +class NopPruner(BasePruner): + """Pruner which never prunes trials. + + Example: + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + assert False, "should_prune() should always return False with this pruner." + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize", pruner=optuna.pruners.NopPruner()) + study.optimize(objective, n_trials=20) + """ + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + return False diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_patient.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_patient.py new file mode 100644 index 0000000000000000000000000000000000000000..51a160af8d83cc056fb4c22a2e387cff7617f18d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_patient.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import numpy as np + +import optuna +from optuna._experimental import experimental_class +from optuna.pruners import BasePruner +from optuna.study._study_direction import StudyDirection + + +@experimental_class("2.8.0") +class PatientPruner(BasePruner): + """Pruner which wraps another pruner with tolerance. + + This pruner monitors intermediate values in a trial and prunes the trial if the improvement in + the intermediate values after a patience period is less than a threshold. + + The pruner handles NaN values in the following manner: + 1. If all intermediate values before or during the patient period are NaN, the trial will + not be pruned + 2. During the pruning calculations, NaN values are ignored. Only valid numeric values are + considered. + + Example: + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study( + direction="maximize", + pruner=optuna.pruners.PatientPruner(optuna.pruners.MedianPruner(), patience=1), + ) + study.optimize(objective, n_trials=20) + + Args: + wrapped_pruner: + Wrapped pruner to perform pruning when :class:`~optuna.pruners.PatientPruner` allows a + trial to be pruned. If it is :obj:`None`, this pruner is equivalent to + early-stopping taken the intermediate values in the individual trial. + patience: + Pruning is disabled until the objective doesn't improve for + ``patience`` consecutive steps. + min_delta: + Tolerance value to check whether or not the objective improves. + This value should be non-negative. + + """ + + def __init__( + self, wrapped_pruner: BasePruner | None, patience: int, min_delta: float = 0.0 + ) -> None: + if patience < 0: + raise ValueError(f"patience cannot be negative but got {patience}.") + + if min_delta < 0: + raise ValueError(f"min_delta cannot be negative but got {min_delta}.") + + self._wrapped_pruner = wrapped_pruner + self._patience = patience + self._min_delta = min_delta + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + step = trial.last_step + if step is None: + return False + + intermediate_values = trial.intermediate_values + steps = np.asarray(list(intermediate_values.keys())) + + # Do not prune if number of step to determine are insufficient. + if steps.size <= self._patience + 1: + return False + + steps.sort() + # This is the score patience steps ago + steps_before_patience = steps[: -self._patience - 1] + scores_before_patience = np.asarray( + list(intermediate_values[step] for step in steps_before_patience) + ) + # And these are the scores after that + steps_after_patience = steps[-self._patience - 1 :] + scores_after_patience = np.asarray( + list(intermediate_values[step] for step in steps_after_patience) + ) + + direction = study.direction + if direction == StudyDirection.MINIMIZE: + maybe_prune = np.nanmin(scores_before_patience) + self._min_delta < np.nanmin( + scores_after_patience + ) + else: + maybe_prune = np.nanmax(scores_before_patience) - self._min_delta > np.nanmax( + scores_after_patience + ) + + if maybe_prune: + if self._wrapped_pruner is not None: + return self._wrapped_pruner.prune(study, trial) + else: + return True + else: + return False diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_percentile.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_percentile.py new file mode 100644 index 0000000000000000000000000000000000000000..805e65dee01164d8983830f6a7da1f17e29aaf17 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_percentile.py @@ -0,0 +1,208 @@ +from __future__ import annotations + +from collections.abc import KeysView +import functools +import math + +import numpy as np + +import optuna +from optuna.pruners import BasePruner +from optuna.study._study_direction import StudyDirection +from optuna.trial._state import TrialState + + +def _get_best_intermediate_result_over_steps( + trial: "optuna.trial.FrozenTrial", direction: StudyDirection +) -> float: + values = np.asarray(list(trial.intermediate_values.values()), dtype=float) + if direction == StudyDirection.MAXIMIZE: + return np.nanmax(values) + return np.nanmin(values) + + +def _get_percentile_intermediate_result_over_trials( + completed_trials: list["optuna.trial.FrozenTrial"], + direction: StudyDirection, + step: int, + percentile: float, + n_min_trials: int, +) -> float: + if len(completed_trials) == 0: + raise ValueError("No trials have been completed.") + + intermediate_values = [ + t.intermediate_values[step] for t in completed_trials if step in t.intermediate_values + ] + + if len(intermediate_values) < n_min_trials: + return math.nan + + if direction == StudyDirection.MAXIMIZE: + percentile = 100 - percentile + + return float( + np.nanpercentile( + np.array(intermediate_values, dtype=float), + percentile, + ) + ) + + +def _is_first_in_interval_step( + step: int, intermediate_steps: KeysView[int], n_warmup_steps: int, interval_steps: int +) -> bool: + nearest_lower_pruning_step = ( + step - n_warmup_steps + ) // interval_steps * interval_steps + n_warmup_steps + assert nearest_lower_pruning_step >= 0 + + # `intermediate_steps` may not be sorted so we must go through all elements. + second_last_step = functools.reduce( + lambda second_last_step, s: s if s > second_last_step and s != step else second_last_step, + intermediate_steps, + -1, + ) + + return second_last_step < nearest_lower_pruning_step + + +class PercentilePruner(BasePruner): + """Pruner to keep the specified percentile of the trials. + + Prune if the best intermediate value is in the bottom percentile among trials at the same step. + + Example: + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study( + direction="maximize", + pruner=optuna.pruners.PercentilePruner( + 25.0, n_startup_trials=5, n_warmup_steps=30, interval_steps=10 + ), + ) + study.optimize(objective, n_trials=20) + + Args: + percentile: + Percentile which must be between 0 and 100 inclusive + (e.g., When given 25.0, top of 25th percentile trials are kept). + n_startup_trials: + Pruning is disabled until the given number of trials finish in the same study. + n_warmup_steps: + Pruning is disabled until the trial exceeds the given number of step. Note that + this feature assumes that ``step`` starts at zero. + interval_steps: + Interval in number of steps between the pruning checks, offset by the warmup steps. + If no value has been reported at the time of a pruning check, that particular check + will be postponed until a value is reported. Value must be at least 1. + n_min_trials: + Minimum number of reported trial results at a step to judge whether to prune. + If the number of reported intermediate values from all trials at the current step + is less than ``n_min_trials``, the trial will not be pruned. This can be used to ensure + that a minimum number of trials are run to completion without being pruned. + """ + + def __init__( + self, + percentile: float, + n_startup_trials: int = 5, + n_warmup_steps: int = 0, + interval_steps: int = 1, + *, + n_min_trials: int = 1, + ) -> None: + if not 0.0 <= percentile <= 100: + raise ValueError( + "Percentile must be between 0 and 100 inclusive but got {}.".format(percentile) + ) + if n_startup_trials < 0: + raise ValueError( + "Number of startup trials cannot be negative but got {}.".format(n_startup_trials) + ) + if n_warmup_steps < 0: + raise ValueError( + "Number of warmup steps cannot be negative but got {}.".format(n_warmup_steps) + ) + if interval_steps < 1: + raise ValueError( + "Pruning interval steps must be at least 1 but got {}.".format(interval_steps) + ) + if n_min_trials < 1: + raise ValueError( + "Number of trials for pruning must be at least 1 but got {}.".format(n_min_trials) + ) + + self._percentile = percentile + self._n_startup_trials = n_startup_trials + self._n_warmup_steps = n_warmup_steps + self._interval_steps = interval_steps + self._n_min_trials = n_min_trials + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + n_trials = len(completed_trials) + + if n_trials == 0: + return False + + if n_trials < self._n_startup_trials: + return False + + step = trial.last_step + if step is None: + return False + + n_warmup_steps = self._n_warmup_steps + if step < n_warmup_steps: + return False + + if not _is_first_in_interval_step( + step, trial.intermediate_values.keys(), n_warmup_steps, self._interval_steps + ): + return False + + direction = study.direction + best_intermediate_result = _get_best_intermediate_result_over_steps(trial, direction) + if math.isnan(best_intermediate_result): + return True + + p = _get_percentile_intermediate_result_over_trials( + completed_trials, direction, step, self._percentile, self._n_min_trials + ) + if math.isnan(p): + return False + + if direction == StudyDirection.MAXIMIZE: + return best_intermediate_result < p + return best_intermediate_result > p diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_successive_halving.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..6a7480cbe359315d6120eccb70df3275580a03ad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_successive_halving.py @@ -0,0 +1,267 @@ +from __future__ import annotations + +import math + +import optuna +from optuna.pruners._base import BasePruner +from optuna.study._study_direction import StudyDirection +from optuna.trial._state import TrialState + + +class SuccessiveHalvingPruner(BasePruner): + """Pruner using Asynchronous Successive Halving Algorithm. + + `Successive Halving `__ is a bandit-based + algorithm to identify the best one among multiple configurations. This class implements an + asynchronous version of Successive Halving. Please refer to the paper of + `Asynchronous Successive Halving `__ for detailed descriptions. + + Note that, this class does not take care of the parameter for the maximum + resource, referred to as :math:`R` in the paper. The maximum resource allocated to a trial is + typically limited inside the objective function (e.g., ``step`` number in `simple_pruning.py + `__, + ``EPOCH`` number in `chainer_integration.py + `__). + + .. seealso:: + Please refer to :meth:`~optuna.trial.Trial.report`. + + Example: + + We minimize an objective function with ``SuccessiveHalvingPruner``. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + classes = np.unique(y) + + + def objective(trial): + alpha = trial.suggest_float("alpha", 0.0, 1.0) + clf = SGDClassifier(alpha=alpha) + n_train_iter = 100 + + for step in range(n_train_iter): + clf.partial_fit(X_train, y_train, classes=classes) + + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step) + + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study( + direction="maximize", pruner=optuna.pruners.SuccessiveHalvingPruner() + ) + study.optimize(objective, n_trials=20) + + Args: + min_resource: + A parameter for specifying the minimum resource allocated to a trial + (in the `paper `__ this parameter is referred to as + :math:`r`). + This parameter defaults to 'auto' where the value is determined based on a heuristic + that looks at the number of required steps for the first trial to complete. + + A trial is never pruned until it executes + :math:`\\mathsf{min}\\_\\mathsf{resource} \\times + \\mathsf{reduction}\\_\\mathsf{factor}^{ + \\mathsf{min}\\_\\mathsf{early}\\_\\mathsf{stopping}\\_\\mathsf{rate}}` + steps (i.e., the completion point of the first rung). When the trial completes + the first rung, it will be promoted to the next rung only + if the value of the trial is placed in the top + :math:`{1 \\over \\mathsf{reduction}\\_\\mathsf{factor}}` fraction of + the all trials that already have reached the point (otherwise it will be pruned there). + If the trial won the competition, it runs until the next completion point (i.e., + :math:`\\mathsf{min}\\_\\mathsf{resource} \\times + \\mathsf{reduction}\\_\\mathsf{factor}^{ + (\\mathsf{min}\\_\\mathsf{early}\\_\\mathsf{stopping}\\_\\mathsf{rate} + + \\mathsf{rung})}` steps) + and repeats the same procedure. + + .. note:: + If the step of the last intermediate value may change with each trial, please + manually specify the minimum possible step to ``min_resource``. + reduction_factor: + A parameter for specifying reduction factor of promotable trials + (in the `paper `__ this parameter is + referred to as :math:`\\eta`). At the completion point of each rung, + about :math:`{1 \\over \\mathsf{reduction}\\_\\mathsf{factor}}` + trials will be promoted. + min_early_stopping_rate: + A parameter for specifying the minimum early-stopping rate + (in the `paper `__ this parameter is + referred to as :math:`s`). + bootstrap_count: + Minimum number of trials that need to complete a rung before any trial + is considered for promotion into the next rung. + """ + + def __init__( + self, + min_resource: str | int = "auto", + reduction_factor: int = 4, + min_early_stopping_rate: int = 0, + bootstrap_count: int = 0, + ) -> None: + if isinstance(min_resource, str) and min_resource != "auto": + raise ValueError( + "The value of `min_resource` is {}, " + "but must be either `min_resource` >= 1 or 'auto'".format(min_resource) + ) + + if isinstance(min_resource, int) and min_resource < 1: + raise ValueError( + "The value of `min_resource` is {}, " + "but must be either `min_resource >= 1` or 'auto'".format(min_resource) + ) + + if reduction_factor < 2: + raise ValueError( + "The value of `reduction_factor` is {}, " + "but must be `reduction_factor >= 2`".format(reduction_factor) + ) + + if min_early_stopping_rate < 0: + raise ValueError( + "The value of `min_early_stopping_rate` is {}, " + "but must be `min_early_stopping_rate >= 0`".format(min_early_stopping_rate) + ) + + if bootstrap_count < 0: + raise ValueError( + "The value of `bootstrap_count` is {}, " + "but must be `bootstrap_count >= 0`".format(bootstrap_count) + ) + + if bootstrap_count > 0 and min_resource == "auto": + raise ValueError( + "bootstrap_count > 0 and min_resource == 'auto' " + "are mutually incompatible, bootstrap_count is {}".format(bootstrap_count) + ) + + self._min_resource: int | None = None + if isinstance(min_resource, int): + self._min_resource = min_resource + self._reduction_factor = reduction_factor + self._min_early_stopping_rate = min_early_stopping_rate + self._bootstrap_count = bootstrap_count + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + step = trial.last_step + if step is None: + return False + + rung = _get_current_rung(trial) + value = trial.intermediate_values[step] + trials: list["optuna.trial.FrozenTrial"] | None = None + + while True: + if self._min_resource is None: + if trials is None: + trials = study.get_trials(deepcopy=False) + self._min_resource = _estimate_min_resource(trials) + if self._min_resource is None: + return False + + assert self._min_resource is not None + rung_promotion_step = self._min_resource * ( + self._reduction_factor ** (self._min_early_stopping_rate + rung) + ) + if step < rung_promotion_step: + return False + + if math.isnan(value): + return True + + if trials is None: + trials = study.get_trials(deepcopy=False) + + rung_key = _completed_rung_key(rung) + + study._storage.set_trial_system_attr(trial._trial_id, rung_key, value) + + competing = _get_competing_values(trials, value, rung_key) + + # 'competing' already includes the current trial + # Therefore, we need to use the '<=' operator here + if len(competing) <= self._bootstrap_count: + return True + + if not _is_trial_promotable_to_next_rung( + value, + competing, + self._reduction_factor, + study.direction, + ): + return True + + rung += 1 + + +def _estimate_min_resource(trials: list["optuna.trial.FrozenTrial"]) -> int | None: + n_steps = [ + t.last_step for t in trials if t.state == TrialState.COMPLETE and t.last_step is not None + ] + + if not n_steps: + return None + + # Get the maximum number of steps and divide it by 100. + last_step = max(n_steps) + return max(last_step // 100, 1) + + +def _get_current_rung(trial: "optuna.trial.FrozenTrial") -> int: + # The following loop takes `O(log step)` iterations. + rung = 0 + while _completed_rung_key(rung) in trial.system_attrs: + rung += 1 + return rung + + +def _completed_rung_key(rung: int) -> str: + return "completed_rung_{}".format(rung) + + +def _get_competing_values( + trials: list["optuna.trial.FrozenTrial"], value: float, rung_key: str +) -> list[float]: + competing_values = [t.system_attrs[rung_key] for t in trials if rung_key in t.system_attrs] + competing_values.append(value) + return competing_values + + +def _is_trial_promotable_to_next_rung( + value: float, + competing_values: list[float], + reduction_factor: int, + study_direction: StudyDirection, +) -> bool: + promotable_idx = (len(competing_values) // reduction_factor) - 1 + + if promotable_idx == -1: + # Optuna does not support suspending or resuming ongoing trials. Therefore, for the first + # `eta - 1` trials, this implementation instead promotes the trial if its value is the + # smallest one among the competing values. + promotable_idx = 0 + + competing_values.sort() + if study_direction == StudyDirection.MAXIMIZE: + return value >= competing_values[-(promotable_idx + 1)] + return value <= competing_values[promotable_idx] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_threshold.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..4d2c29cccf4435b1d9651edc0d939db72abced91 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_threshold.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import math +from typing import Any + +import optuna +from optuna.pruners import BasePruner +from optuna.pruners._percentile import _is_first_in_interval_step + + +def _check_value(value: Any) -> float: + try: + # For convenience, we allow users to report a value that can be cast to `float`. + value = float(value) + except (TypeError, ValueError): + message = "The `value` argument is of type '{}' but supposed to be a float.".format( + type(value).__name__ + ) + raise TypeError(message) from None + + return value + + +class ThresholdPruner(BasePruner): + """Pruner to detect outlying metrics of the trials. + + Prune if a metric exceeds upper threshold, + falls behind lower threshold or reaches ``nan``. + + Example: + .. testcode:: + + from optuna import create_study + from optuna.pruners import ThresholdPruner + from optuna import TrialPruned + + + def objective_for_upper(trial): + for step, y in enumerate(ys_for_upper): + trial.report(y, step) + + if trial.should_prune(): + raise TrialPruned() + return ys_for_upper[-1] + + + def objective_for_lower(trial): + for step, y in enumerate(ys_for_lower): + trial.report(y, step) + + if trial.should_prune(): + raise TrialPruned() + return ys_for_lower[-1] + + + ys_for_upper = [0.0, 0.1, 0.2, 0.5, 1.2] + ys_for_lower = [100.0, 90.0, 0.1, 0.0, -1] + + study = create_study(pruner=ThresholdPruner(upper=1.0)) + study.optimize(objective_for_upper, n_trials=10) + + study = create_study(pruner=ThresholdPruner(lower=0.0)) + study.optimize(objective_for_lower, n_trials=10) + + Args: + lower: + A minimum value which determines whether pruner prunes or not. + If an intermediate value is smaller than lower, it prunes. + upper: + A maximum value which determines whether pruner prunes or not. + If an intermediate value is larger than upper, it prunes. + n_warmup_steps: + Pruning is disabled if the step is less than the given number of warmup steps. + interval_steps: + Interval in number of steps between the pruning checks, offset by the warmup steps. + If no value has been reported at the time of a pruning check, that particular check + will be postponed until a value is reported. Value must be at least 1. + + """ + + def __init__( + self, + lower: float | None = None, + upper: float | None = None, + n_warmup_steps: int = 0, + interval_steps: int = 1, + ) -> None: + if lower is None and upper is None: + raise TypeError("Either lower or upper must be specified.") + if lower is not None: + lower = _check_value(lower) + if upper is not None: + upper = _check_value(upper) + + lower = lower if lower is not None else -float("inf") + upper = upper if upper is not None else float("inf") + + if lower > upper: + raise ValueError("lower should be smaller than upper.") + if n_warmup_steps < 0: + raise ValueError( + "Number of warmup steps cannot be negative but got {}.".format(n_warmup_steps) + ) + if interval_steps < 1: + raise ValueError( + "Pruning interval steps must be at least 1 but got {}.".format(interval_steps) + ) + + self._lower = lower + self._upper = upper + self._n_warmup_steps = n_warmup_steps + self._interval_steps = interval_steps + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + step = trial.last_step + if step is None: + return False + + n_warmup_steps = self._n_warmup_steps + if step < n_warmup_steps: + return False + + if not _is_first_in_interval_step( + step, trial.intermediate_values.keys(), n_warmup_steps, self._interval_steps + ): + return False + + latest_value = trial.intermediate_values[step] + if math.isnan(latest_value): + return True + + if latest_value < self._lower: + return True + + if latest_value > self._upper: + return True + + return False diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_wilcoxon.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_wilcoxon.py new file mode 100644 index 0000000000000000000000000000000000000000..fbdd0fde671b94095910b9dd8a86951c667274e5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/pruners/_wilcoxon.py @@ -0,0 +1,220 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +import optuna +from optuna._experimental import experimental_class +from optuna.pruners import BasePruner +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + import scipy.stats as ss +else: + from optuna._imports import _LazyImport + + ss = _LazyImport("scipy.stats") + + +@experimental_class("3.6.0") +class WilcoxonPruner(BasePruner): + """Pruner based on the `Wilcoxon signed-rank test `__. + + This pruner performs the Wilcoxon signed-rank test between the current trial and the current best trial, + and stops whenever the pruner is sure up to a given p-value that the current trial is worse than the best one. + + This pruner is effective for optimizing the mean/median of some (costly-to-evaluate) performance scores over a set of problem instances. + Example applications include the optimization of: + + * the mean performance of a heuristic method (simulated annealing, genetic algorithm, SAT solver, etc.) on a set of problem instances, + * the k-fold cross-validation score of a machine learning model, and + * the accuracy of outputs of a large language model (LLM) on a set of questions. + + There can be "easy" or "hard" instances (the pruner handles correspondence of the instances between different trials). + In each trial, it is recommended to shuffle the evaluation order, so that the optimization doesn't overfit to the instances in the beginning. + + When you use this pruner, you must call ``Trial.report(value, step)`` method for each step (instance id) with + the evaluated value. The instance id may not be in ascending order. + This is different from other pruners in that the reported value need not converge + to the real value. To use pruners such as :class:`~optuna.pruners.SuccessiveHalvingPruner` + in the same setting, you must provide e.g., the historical average of the evaluated values. + + .. seealso:: + Please refer to :meth:`~optuna.trial.Trial.report`. + + Example: + + .. testcode:: + + import optuna + import numpy as np + + + # We minimize the mean evaluation loss over all the problem instances. + def evaluate(param, instance): + # A toy loss function for demonstrative purpose. + return (param - instance) ** 2 + + + problem_instances = np.linspace(-1, 1, 100) + + + def objective(trial): + # Sample a parameter. + param = trial.suggest_float("param", -1, 1) + + # Evaluate performance of the parameter. + results = [] + + # For best results, shuffle the evaluation order in each trial. + instance_ids = np.random.permutation(len(problem_instances)) + for instance_id in instance_ids: + loss = evaluate(param, problem_instances[instance_id]) + results.append(loss) + + # Report loss together with the instance id. + # CAVEAT: You need to pass the same id for the same instance, + # otherwise WilcoxonPruner cannot correctly pair the losses across trials and + # the pruning performance will degrade. + trial.report(loss, instance_id) + + if trial.should_prune(): + # Return the current predicted value instead of raising `TrialPruned`. + # This is a workaround to tell the Optuna about the evaluation + # results in pruned trials. (See the note below.) + return sum(results) / len(results) + + return sum(results) / len(results) + + + study = optuna.create_study(pruner=optuna.pruners.WilcoxonPruner(p_threshold=0.1)) + study.optimize(objective, n_trials=100) + + + + .. note:: + This pruner cannot handle ``infinity`` or ``nan`` values. + Trials containing those values are never pruned. + + .. note:: + If :func:`~optuna.trial.FrozenTrial.should_prune` returns :obj:`True`, you can return an + estimation of the final value (e.g., the average of all evaluated + values) instead of ``raise optuna.TrialPruned()``. + This is a workaround for the problem that currently there is no way + to tell Optuna the predicted objective value for trials raising + :class:`optuna.TrialPruned`. + + Args: + p_threshold: + The p-value threshold for pruning. This value should be between 0 and 1. + A trial will be pruned whenever the pruner is sure up to the given p-value + that the current trial is worse than the best trial. + The larger this value is, the more aggressive pruning will be performed. + Defaults to 0.1. + + .. note:: + This pruner repeatedly performs statistical tests between the + current trial and the current best trial with increasing samples. + The false-positive rate of such a sequential test is different from + performing the test only once. To get the nominal false-positive rate, + please specify the Pocock-corrected p-value. + + n_startup_steps: + The number of steps before which no trials are pruned. + Pruning starts only after you have ``n_startup_steps`` steps of + available observations for comparison between the current trial + and the best trial. + Defaults to 2. Note that the trial is not pruned at the first and second steps even if + the `n_startup_steps` is set to 0 or 1 due to the lack of enough data for comparison. + """ # NOQA: E501 + + def __init__( + self, + *, + p_threshold: float = 0.1, + n_startup_steps: int = 2, + ) -> None: + if n_startup_steps < 0: # TODO: Consider changing the RHS to 2. + raise ValueError(f"n_startup_steps must be nonnegative but got {n_startup_steps}.") + if not 0.0 <= p_threshold <= 1.0: + raise ValueError(f"p_threshold must be between 0 and 1 but got {p_threshold}.") + + self._n_startup_steps = n_startup_steps + self._p_threshold = p_threshold + + def prune(self, study: "optuna.study.Study", trial: FrozenTrial) -> bool: + if len(trial.intermediate_values) == 0: + return False + + steps, step_values = np.array(list(trial.intermediate_values.items())).T + + if np.any(~np.isfinite(step_values)): + warnings.warn( + f"The intermediate values of the current trial (trial {trial.number}) " + f"contain infinity/NaNs. WilcoxonPruner will not prune this trial." + ) + return False + + try: + best_trial = study.best_trial + except ValueError: + return False + + if len(best_trial.intermediate_values) == 0: + warnings.warn( + "The best trial has no intermediate values so WilcoxonPruner cannot prune trials. " + "If you have added the best trial with Study.add_trial, please consider setting " + "intermediate_values argument." + ) + return False + + best_steps, best_step_values = np.array(list(best_trial.intermediate_values.items())).T + + if np.any(~np.isfinite(best_step_values)): + warnings.warn( + f"The intermediate values of the best trial (trial {best_trial.number}) " + f"contain infinity/NaNs. WilcoxonPruner will not prune the current trial." + ) + return False + + _, idx1, idx2 = np.intersect1d(steps, best_steps, return_indices=True) + + if len(idx1) < len(step_values): + # This if-statement is never satisfied if following "average_is_best" safety works, + # because the safety ensures that the best trial always has the all steps. + warnings.warn( + "WilcoxonPruner finds steps existing in the current trial " + "but does not exist in the best trial. " + "Those values are ignored." + ) + + diff_values = step_values[idx1] - best_step_values[idx2] + + if len(diff_values) < max(2, self._n_startup_steps): + return False + + if study.direction == StudyDirection.MAXIMIZE: + alt = "less" + average_is_best = sum(best_step_values) / len(best_step_values) <= sum( + step_values + ) / len(step_values) + else: + alt = "greater" + average_is_best = sum(best_step_values) / len(best_step_values) >= sum( + step_values + ) / len(step_values) + + # We use zsplit to avoid the problem when all values are zero. + p = ss.wilcoxon(diff_values, alternative=alt, zero_method="zsplit").pvalue + + if p < self._p_threshold and average_is_best: + # ss.wilcoxon found the current trial is probably worse than the best trial, + # but the value of the best trial was not better than + # the average of the current trial's intermediate values. + # For safety, WilcoxonPruner concludes not to prune it for now. + return False + return p < self._p_threshold diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/py.typed b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c2713f03137dae008c8e49082f1270563642a5b3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/__init__.py @@ -0,0 +1,30 @@ +from optuna.samplers import nsgaii +from optuna.samplers._base import BaseSampler +from optuna.samplers._brute_force import BruteForceSampler +from optuna.samplers._cmaes import CmaEsSampler +from optuna.samplers._ga import BaseGASampler +from optuna.samplers._gp.sampler import GPSampler +from optuna.samplers._grid import GridSampler +from optuna.samplers._nsgaiii._sampler import NSGAIIISampler +from optuna.samplers._partial_fixed import PartialFixedSampler +from optuna.samplers._qmc import QMCSampler +from optuna.samplers._random import RandomSampler +from optuna.samplers._tpe.sampler import TPESampler +from optuna.samplers.nsgaii._sampler import NSGAIISampler + + +__all__ = [ + "BaseSampler", + "BaseGASampler", + "BruteForceSampler", + "CmaEsSampler", + "GridSampler", + "NSGAIISampler", + "NSGAIIISampler", + "PartialFixedSampler", + "QMCSampler", + "RandomSampler", + "TPESampler", + "GPSampler", + "nsgaii", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..62ee4b0d1cf9adbba45fd2dce1bd4e82f0a1d425 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_base.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +import abc +from collections.abc import Callable +from collections.abc import Sequence +from typing import Any +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +class BaseSampler(abc.ABC): + """Base class for samplers. + + Optuna combines two types of sampling strategies, which are called *relative sampling* and + *independent sampling*. + + *The relative sampling* determines values of multiple parameters simultaneously so that + sampling algorithms can use relationship between parameters (e.g., correlation). + Target parameters of the relative sampling are described in a relative search space, which + is determined by :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`. + + *The independent sampling* determines a value of a single parameter without considering any + relationship between parameters. Target parameters of the independent sampling are the + parameters not described in the relative search space. + + More specifically, parameters are sampled by the following procedure. + At the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` + is called to determine the relative search space for the trial. + During the execution of the objective function, + :meth:`~optuna.samplers.BaseSampler.sample_relative` is called only once + when sampling the parameters belonging to the relative search space for the first time. + :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample + parameters that don't belong to the relative search space. + + The following figure depicts the lifetime of a trial and how the above three methods are + called in the trial. + + .. image:: ../../../../image/sampling-sequence.png + + | + + """ + + def __str__(self) -> str: + return self.__class__.__name__ + + @abc.abstractmethod + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + """Infer the search space that will be used by relative sampling in the target trial. + + This method is called right before :func:`~optuna.samplers.BaseSampler.sample_relative` + method, and the search space returned by this method is passed to it. The parameters not + contained in the search space will be sampled by using + :func:`~optuna.samplers.BaseSampler.sample_independent` method. + + Args: + study: + Target study object. + trial: + Target trial object. + Take a copy before modifying this object. + + Returns: + A dictionary containing the parameter names and parameter's distributions. + + .. seealso:: + Please refer to :func:`~optuna.search_space.intersection_search_space` as an + implementation of :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`. + """ + + raise NotImplementedError + + @abc.abstractmethod + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + """Sample parameters in a given search space. + + This method is called once at the beginning of each trial, i.e., right before the + evaluation of the objective function. This method is suitable for sampling algorithms + that use relationship between parameters such as Gaussian Process and CMA-ES. + + .. note:: + The failed trials are ignored by any build-in samplers when they sample new + parameters. Thus, failed trials are regarded as deleted in the samplers' + perspective. + + Args: + study: + Target study object. + trial: + Target trial object. + Take a copy before modifying this object. + search_space: + The search space returned by + :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`. + + Returns: + A dictionary containing the parameter names and the values. + + """ + + raise NotImplementedError + + @abc.abstractmethod + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + """Sample a parameter for a given distribution. + + This method is called only for the parameters not contained in the search space returned + by :func:`~optuna.samplers.BaseSampler.sample_relative` method. This method is suitable + for sampling algorithms that do not use relationship between parameters such as random + sampling and TPE. + + .. note:: + The failed trials are ignored by any build-in samplers when they sample new + parameters. Thus, failed trials are regarded as deleted in the samplers' + perspective. + + Args: + study: + Target study object. + trial: + Target trial object. + Take a copy before modifying this object. + param_name: + Name of the sampled parameter. + param_distribution: + Distribution object that specifies a prior and/or scale of the sampling algorithm. + + Returns: + A parameter value. + + """ + + raise NotImplementedError + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + """Trial pre-processing. + + This method is called before the objective function is called and right after the trial is + instantiated. More precisely, this method is called during trial initialization, just + before the :func:`~optuna.samplers.BaseSampler.infer_relative_search_space` call. In other + words, it is responsible for pre-processing that should be done before inferring the search + space. + + .. note:: + Added in v3.3.0 as an experimental feature. The interface may change in newer versions + without prior notice. See https://github.com/optuna/optuna/releases/tag/v3.3.0. + + Args: + study: + Target study object. + trial: + Target trial object. + """ + + pass + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + """Trial post-processing. + + This method is called after the objective function returns and right before the trial is + finished and its state is stored. + + .. note:: + Added in v2.4.0 as an experimental feature. The interface may change in newer versions + without prior notice. See https://github.com/optuna/optuna/releases/tag/v2.4.0. + + Args: + study: + Target study object. + trial: + Target trial object. + Take a copy before modifying this object. + state: + Resulting trial state. + values: + Resulting trial values. Guaranteed to not be :obj:`None` if trial succeeded. + + """ + + pass + + def reseed_rng(self) -> None: + """Reseed sampler's random number generator. + + This method is called by the :class:`~optuna.study.Study` instance if trials are executed + in parallel with the option ``n_jobs>1``. In that case, the sampler instance will be + replicated including the state of the random number generator, and they may suggest the + same values. To prevent this issue, this method assigns a different seed to each random + number generator. + """ + + pass + + def _raise_error_if_multi_objective(self, study: Study) -> None: + if study._is_multi_objective(): + raise ValueError( + "If the study is being used for multi-objective optimization, " + f"{self.__class__.__name__} cannot be used." + ) + + +_CONSTRAINTS_KEY = "constraints" + + +def _process_constraints_after_trial( + constraints_func: Callable[[FrozenTrial], Sequence[float]], + study: Study, + trial: FrozenTrial, + state: TrialState, +) -> None: + if state not in [TrialState.COMPLETE, TrialState.PRUNED]: + return + + constraints = None + try: + con = constraints_func(trial) + if np.any(np.isnan(con)): + raise ValueError("Constraint values cannot be NaN.") + if not isinstance(con, (tuple, list)): + warnings.warn( + f"Constraints should be a sequence of floats but got {type(con).__name__}." + ) + constraints = tuple(con) + finally: + assert constraints is None or isinstance(constraints, tuple) + + study._storage.set_trial_system_attr( + trial._trial_id, + _CONSTRAINTS_KEY, + constraints, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_brute_force.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_brute_force.py new file mode 100644 index 0000000000000000000000000000000000000000..0e4b273f3dfc77cb77a9c60388fe372c9c1597e3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_brute_force.py @@ -0,0 +1,299 @@ +from __future__ import annotations + +from collections.abc import Iterable +from collections.abc import Sequence +from dataclasses import dataclass +import decimal +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.samplers import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.trial import create_trial +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +@dataclass +class _TreeNode: + # This is a class to represent the tree of search space. + + # A tree node has three states: + # 1. Unexpanded. This is represented by children=None. + # 2. Leaf. This is represented by children={} and param_name=None. + # 3. Normal node. It has a param_name and non-empty children. + + param_name: str | None = None + children: dict[float, "_TreeNode"] | None = None + is_running: bool = False + + def expand(self, param_name: str | None, search_space: Iterable[float]) -> None: + # If the node is unexpanded, expand it. + # Otherwise, check if the node is compatible with the given search space. + if self.children is None: + # Expand the node + self.param_name = param_name + self.children = {value: _TreeNode() for value in search_space} + else: + if self.param_name != param_name: + raise ValueError(f"param_name mismatch: {self.param_name} != {param_name}") + if self.children.keys() != set(search_space): + raise ValueError( + f"search_space mismatch: {set(self.children.keys())} != {set(search_space)}" + ) + + def set_running(self) -> None: + self.is_running = True + + def set_leaf(self) -> None: + self.expand(None, []) + + def add_path( + self, params_and_search_spaces: Iterable[tuple[str, Iterable[float], float]] + ) -> "_TreeNode" | None: + # Add a path (i.e. a list of suggested parameters in one trial) to the tree. + current_node = self + for param_name, search_space, value in params_and_search_spaces: + current_node.expand(param_name, search_space) + assert current_node.children is not None + if value not in current_node.children: + return None + current_node = current_node.children[value] + return current_node + + def count_unexpanded(self, exclude_running: bool) -> int: + # Count the number of unexpanded nodes in the subtree. + if self.children is None: + return 0 if exclude_running and self.is_running else 1 + else: + return sum(child.count_unexpanded(exclude_running) for child in self.children.values()) + + def sample_child(self, rng: np.random.RandomState, exclude_running: bool) -> float: + assert self.children is not None + # Sample an unexpanded node in the subtree uniformly, and return the first + # parameter value in the path to the node. + # Equivalently, we sample the child node with weights proportional to the number + # of unexpanded nodes in the subtree. + weights = np.array( + [child.count_unexpanded(exclude_running) for child in self.children.values()], + dtype=np.float64, + ) + if any( + not value.is_running and weights[i] > 0 + for i, value in enumerate(self.children.values()) + ): + # Prioritize picking non-running and unexpanded nodes. + for i, child in enumerate(self.children.values()): + if child.is_running: + weights[i] = 0.0 + weights /= weights.sum() + return rng.choice(list(self.children.keys()), p=weights) + + +@experimental_class("3.1.0") +class BruteForceSampler(BaseSampler): + """Sampler using brute force. + + This sampler performs exhaustive search on the defined search space. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + c = trial.suggest_categorical("c", ["float", "int"]) + if c == "float": + return trial.suggest_float("x", 1, 3, step=0.5) + elif c == "int": + a = trial.suggest_int("a", 1, 3) + b = trial.suggest_int("b", a, 3) + return a + b + + + study = optuna.create_study(sampler=optuna.samplers.BruteForceSampler()) + study.optimize(objective) + + Note: + The defined search space must be finite. Therefore, when using + :class:`~optuna.distributions.FloatDistribution` or + :func:`~optuna.trial.Trial.suggest_float`, ``step=None`` is not allowed. + + Note: + The sampler may fail to try the entire search space in when the suggestion ranges or + parameters are changed in the same :class:`~optuna.study.Study`. + + Args: + seed: + A seed to fix the order of trials as the search order randomly shuffled. Please note + that it is not recommended using this option in distributed optimization settings since + this option cannot ensure the order of trials and may increase the number of duplicate + suggestions during distributed optimization. + avoid_premature_stop: + If :obj:`True`, the sampler performs a strict exhaustive search. Please note + that enabling this option may increase the likelihood of duplicate sampling. + When this option is not enabled (default), the sampler applies a looser criterion for + determining when to stop the search, which may result in incomplete coverage of the + search space. For more information, see https://github.com/optuna/optuna/issues/5780. + """ + + def __init__(self, seed: int | None = None, avoid_premature_stop: bool = False) -> None: + self._rng = LazyRandomState(seed) + self._avoid_premature_stop = avoid_premature_stop + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + return {} + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + return {} + + @staticmethod + def _populate_tree( + tree: _TreeNode, trials: Iterable[FrozenTrial], params: dict[str, Any] + ) -> None: + # Populate tree under given params from the given trials. + for trial in trials: + if not all(p in trial.params and trial.params[p] == v for p, v in params.items()): + continue + leaf = tree.add_path( + ( + ( + param_name, + _enumerate_candidates(param_distribution), + param_distribution.to_internal_repr(trial.params[param_name]), + ) + for param_name, param_distribution in trial.distributions.items() + if param_name not in params + ) + ) + if leaf is not None: + # The parameters are on the defined grid. + if trial.state.is_finished(): + leaf.set_leaf() + else: + leaf.set_running() + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + exclude_running = not self._avoid_premature_stop + + # We directly query the storage to get trials here instead of `study.get_trials`, + # since some pruners such as `HyperbandPruner` use the study transformed + # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details. + trials = study._storage.get_all_trials( + study._study_id, + deepcopy=False, + states=( + TrialState.COMPLETE, + TrialState.PRUNED, + TrialState.RUNNING, + TrialState.FAIL, + ), + ) + tree = _TreeNode() + candidates = _enumerate_candidates(param_distribution) + tree.expand(param_name, candidates) + # Populating must happen after the initialization above to prevent `tree` from + # being initialized as an empty graph, which is created with n_jobs > 1 + # where we get trials[i].params = {} for some i. + self._populate_tree(tree, (t for t in trials if t.number != trial.number), trial.params) + if tree.count_unexpanded(exclude_running) == 0: + return param_distribution.to_external_repr(self._rng.rng.choice(candidates)) + else: + return param_distribution.to_external_repr( + tree.sample_child(self._rng.rng, exclude_running) + ) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + exclude_running = not self._avoid_premature_stop + + # We directly query the storage to get trials here instead of `study.get_trials`, + # since some pruners such as `HyperbandPruner` use the study transformed + # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details. + trials = study._storage.get_all_trials( + study._study_id, + deepcopy=False, + states=( + TrialState.COMPLETE, + TrialState.PRUNED, + TrialState.RUNNING, + TrialState.FAIL, + ), + ) + tree = _TreeNode() + self._populate_tree( + tree, + ( + ( + t + if t.number != trial.number + else create_trial( + state=state, # Set current trial as complete. + values=values, + params=trial.params, + distributions=trial.distributions, + ) + ) + for t in trials + ), + {}, + ) + + if tree.count_unexpanded(exclude_running) == 0: + study.stop() + + +def _enumerate_candidates(param_distribution: BaseDistribution) -> Sequence[float]: + if isinstance(param_distribution, FloatDistribution): + if param_distribution.step is None: + raise ValueError( + "FloatDistribution.step must be given for BruteForceSampler" + " (otherwise, the search space will be infinite)." + ) + low = decimal.Decimal(str(param_distribution.low)) + high = decimal.Decimal(str(param_distribution.high)) + step = decimal.Decimal(str(param_distribution.step)) + + ret = [] + value = low + while value <= high: + ret.append(float(value)) + value += step + + return ret + elif isinstance(param_distribution, IntDistribution): + return list( + range(param_distribution.low, param_distribution.high + 1, param_distribution.step) + ) + elif isinstance(param_distribution, CategoricalDistribution): + return list(range(len(param_distribution.choices))) # Internal representations. + else: + raise ValueError(f"Unknown distribution {param_distribution}.") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_cmaes.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_cmaes.py new file mode 100644 index 0000000000000000000000000000000000000000..f2609e9693cb1220ae85a50428773eb46cd02f56 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_cmaes.py @@ -0,0 +1,648 @@ +from __future__ import annotations + +from collections.abc import Sequence +import copy +import math +import pickle +from typing import Any +from typing import cast +from typing import TYPE_CHECKING +from typing import Union +import warnings + +import numpy as np + +import optuna +from optuna import _deprecated +from optuna import logging +from optuna._experimental import warn_experimental_argument +from optuna._imports import _LazyImport +from optuna._transform import _SearchSpaceTransform +from optuna.distributions import BaseDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.samplers import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.search_space import IntersectionSearchSpace +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + import cmaes + + CmaClass = Union[cmaes.CMA, cmaes.SepCMA, cmaes.CMAwM] +else: + cmaes = _LazyImport("cmaes") + +_logger = logging.get_logger(__name__) + +_EPS = 1e-10 +# The value of system_attrs must be less than 2046 characters on RDBStorage. +_SYSTEM_ATTR_MAX_LENGTH = 2045 + + +class CmaEsSampler(BaseSampler): + """A sampler using `cmaes `__ as the backend. + + Example: + + Optimize a simple quadratic function by using :class:`~optuna.samplers.CmaEsSampler`. + + .. code-block:: console + + $ pip install cmaes + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + y = trial.suggest_int("y", -1, 1) + return x**2 + y + + + sampler = optuna.samplers.CmaEsSampler() + study = optuna.create_study(sampler=sampler) + study.optimize(objective, n_trials=20) + + Please note that this sampler does not support CategoricalDistribution. + However, :class:`~optuna.distributions.FloatDistribution` with ``step``, + (:func:`~optuna.trial.Trial.suggest_float`) and + :class:`~optuna.distributions.IntDistribution` (:func:`~optuna.trial.Trial.suggest_int`) + are supported. + + If your search space contains categorical parameters, I recommend you + to use :class:`~optuna.samplers.TPESampler` instead. + Furthermore, there is room for performance improvements in parallel + optimization settings. This sampler cannot use some trials for updating + the parameters of multivariate normal distribution. + + For further information about CMA-ES algorithm, please refer to the following papers: + + - `N. Hansen, The CMA Evolution Strategy: A Tutorial. arXiv:1604.00772, 2016. + `__ + - `A. Auger and N. Hansen. A restart CMA evolution strategy with increasing population + size. In Proceedings of the IEEE Congress on Evolutionary Computation (CEC 2005), + pages 1769–1776. IEEE Press, 2005. `__ + - `N. Hansen. Benchmarking a BI-Population CMA-ES on the BBOB-2009 Function Testbed. + GECCO Workshop, 2009. `__ + - `Raymond Ros, Nikolaus Hansen. A Simple Modification in CMA-ES Achieving Linear Time and + Space Complexity. 10th International Conference on Parallel Problem Solving From Nature, + Sep 2008, Dortmund, Germany. inria-00287367. `__ + - `Masahiro Nomura, Shuhei Watanabe, Youhei Akimoto, Yoshihiko Ozaki, Masaki Onishi. + Warm Starting CMA-ES for Hyperparameter Optimization, AAAI. 2021. + `__ + - `R. Hamano, S. Saito, M. Nomura, S. Shirakawa. CMA-ES with Margin: Lower-Bounding Marginal + Probability for Mixed-Integer Black-Box Optimization, GECCO. 2022. + `__ + - `M. Nomura, Y. Akimoto, I. Ono. CMA-ES with Learning Rate Adaptation: Can CMA-ES with + Default Population Size Solve Multimodal and Noisy Problems?, GECCO. 2023. + `__ + + .. seealso:: + You can also use `optuna_integration.PyCmaSampler `__ which is a sampler using cma + library as the backend. + + Args: + + x0: + A dictionary of an initial parameter values for CMA-ES. By default, the mean of ``low`` + and ``high`` for each distribution is used. Note that ``x0`` is sampled uniformly + within the search space domain for each restart if you specify ``restart_strategy`` + argument. + + sigma0: + Initial standard deviation of CMA-ES. By default, ``sigma0`` is set to + ``min_range / 6``, where ``min_range`` denotes the minimum range of the distributions + in the search space. + + seed: + A random seed for CMA-ES. + + n_startup_trials: + The independent sampling is used instead of the CMA-ES algorithm until the given number + of trials finish in the same study. + + independent_sampler: + A :class:`~optuna.samplers.BaseSampler` instance that is used for independent + sampling. The parameters not contained in the relative search space are sampled + by this sampler. + The search space for :class:`~optuna.samplers.CmaEsSampler` is determined by + :func:`~optuna.search_space.intersection_search_space()`. + + If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used + as the default. + + .. seealso:: + :class:`optuna.samplers` module provides built-in independent samplers + such as :class:`~optuna.samplers.RandomSampler` and + :class:`~optuna.samplers.TPESampler`. + + warn_independent_sampling: + If this is :obj:`True`, a warning message is emitted when + the value of a parameter is sampled by using an independent sampler. + + Note that the parameters of the first trial in a study are always sampled + via an independent sampler, so no warning messages are emitted in this case. + + restart_strategy: + Strategy for restarting CMA-ES optimization when converges to a local minimum. + If :obj:`None` is given, CMA-ES will not restart (default). + If 'ipop' is given, CMA-ES will restart with increasing population size. + if 'bipop' is given, CMA-ES will restart with the population size + increased or decreased. + Please see also ``inc_popsize`` parameter. + + .. warning:: + Deprecated in v4.4.0. ``restart_strategy`` argument will be removed in the future. + The removal of this feature is currently scheduled for v6.0.0, + but this schedule is subject to change. + From v4.4.0 onward, ``restart_strategy`` automatically falls back to ``None``, and + ``restart_strategy`` will be supported in OptunaHub. + See https://github.com/optuna/optuna/releases/tag/v4.4.0. + + popsize: + A population size of CMA-ES. + + inc_popsize: + Multiplier for increasing population size before each restart. + This argument will be used when ``restart_strategy = 'ipop'`` + or ``restart_strategy = 'bipop'`` is specified. + + .. warning:: + Deprecated in v4.4.0. ``inc_popsize`` argument will be removed in the future. + The removal of this feature is currently scheduled for v6.0.0, + but this schedule is subject to change. + From v4.4.0 onward, ``inc_popsize`` is no longer utilized within Optuna, and + ``inc_popsize`` will be supported in OptunaHub. + See https://github.com/optuna/optuna/releases/tag/v4.4.0. + + consider_pruned_trials: + If this is :obj:`True`, the PRUNED trials are considered for sampling. + + .. note:: + Added in v2.0.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.0.0. + + .. note:: + It is suggested to set this flag :obj:`False` when the + :class:`~optuna.pruners.MedianPruner` is used. On the other hand, it is suggested + to set this flag :obj:`True` when the :class:`~optuna.pruners.HyperbandPruner` is + used. Please see `the benchmark result + `__ for the details. + + use_separable_cma: + If this is :obj:`True`, the covariance matrix is constrained to be diagonal. + Due to reduce the model complexity, the learning rate for the covariance matrix + is increased. Consequently, this algorithm outperforms CMA-ES on separable functions. + + .. note:: + Added in v2.6.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.6.0. + + with_margin: + If this is :obj:`True`, CMA-ES with margin is used. This algorithm prevents samples in + each discrete distribution (:class:`~optuna.distributions.FloatDistribution` with + ``step`` and :class:`~optuna.distributions.IntDistribution`) from being fixed to a single + point. + Currently, this option cannot be used with ``use_separable_cma=True``. + + .. note:: + Added in v3.1.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v3.1.0. + + lr_adapt: + If this is :obj:`True`, CMA-ES with learning rate adaptation is used. + This algorithm focuses on working well on multimodal and/or noisy problems + with default settings. + Currently, this option cannot be used with ``use_separable_cma=True`` or + ``with_margin=True``. + + .. note:: + Added in v3.3.0 or later, as an experimental feature. + The interface may change in newer versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v3.3.0. + + source_trials: + This option is for Warm Starting CMA-ES, a method to transfer prior knowledge on + similar HPO tasks through the initialization of CMA-ES. This method estimates a + promising distribution from ``source_trials`` and generates the parameter of + multivariate gaussian distribution. Please note that it is prohibited to use + ``x0``, ``sigma0``, or ``use_separable_cma`` argument together. + + .. note:: + Added in v2.6.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.6.0. + + """ # NOQA: E501 + + def __init__( + self, + x0: dict[str, Any] | None = None, + sigma0: float | None = None, + n_startup_trials: int = 1, + independent_sampler: BaseSampler | None = None, + warn_independent_sampling: bool = True, + seed: int | None = None, + *, + consider_pruned_trials: bool = False, + restart_strategy: str | None = None, + popsize: int | None = None, + inc_popsize: int = -1, + use_separable_cma: bool = False, + with_margin: bool = False, + lr_adapt: bool = False, + source_trials: list[FrozenTrial] | None = None, + ) -> None: + if restart_strategy is not None or inc_popsize != -1: + msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format( + name="`restart_strategy`", d_ver="4.4.0", r_ver="6.0.0" + ) + warnings.warn( + f"{msg} From v4.4.0 onward, `restart_strategy` automatically falls back to " + "`None`. `restart_strategy` will be supported in OptunaHub.", + FutureWarning, + ) + + self._x0 = x0 + self._sigma0 = sigma0 + self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed) + self._n_startup_trials = n_startup_trials + self._warn_independent_sampling = warn_independent_sampling + self._cma_rng = LazyRandomState(seed) + self._search_space = IntersectionSearchSpace() + self._consider_pruned_trials = consider_pruned_trials + self._popsize = popsize + self._use_separable_cma = use_separable_cma + self._with_margin = with_margin + self._lr_adapt = lr_adapt + self._source_trials = source_trials + + if self._use_separable_cma: + self._attr_prefix = "sepcma:" + elif self._with_margin: + self._attr_prefix = "cmawm:" + else: + self._attr_prefix = "cma:" + + if self._consider_pruned_trials: + warn_experimental_argument("consider_pruned_trials") + + if self._use_separable_cma: + warn_experimental_argument("use_separable_cma") + + if self._source_trials is not None: + warn_experimental_argument("source_trials") + + if self._with_margin: + warn_experimental_argument("with_margin") + + if self._lr_adapt: + warn_experimental_argument("lr_adapt") + + if source_trials is not None and (x0 is not None or sigma0 is not None): + raise ValueError( + "It is prohibited to pass `source_trials` argument when " + "x0 or sigma0 is specified." + ) + + # TODO(c-bata): Support WS-sep-CMA-ES. + if source_trials is not None and use_separable_cma: + raise ValueError( + "It is prohibited to pass `source_trials` argument when using separable CMA-ES." + ) + + if lr_adapt and (use_separable_cma or with_margin): + raise ValueError( + "It is prohibited to pass `use_separable_cma` or `with_margin` argument when " + "using `lr_adapt`." + ) + + # TODO(knshnb): Support sep-CMA-ES with margin. + if self._use_separable_cma and self._with_margin: + raise ValueError( + "Currently, we do not support `use_separable_cma=True` and `with_margin=True`." + ) + + def reseed_rng(self) -> None: + # _cma_rng doesn't require reseeding because the relative sampling reseeds in each trial. + self._independent_sampler.reseed_rng() + + def infer_relative_search_space( + self, study: "optuna.Study", trial: "optuna.trial.FrozenTrial" + ) -> dict[str, BaseDistribution]: + search_space: dict[str, BaseDistribution] = {} + for name, distribution in self._search_space.calculate(study).items(): + if distribution.single(): + # `cma` cannot handle distributions that contain just a single value, so we skip + # them. Note that the parameter values for such distributions are sampled in + # `Trial`. + continue + + if not isinstance(distribution, (FloatDistribution, IntDistribution)): + # Categorical distribution is unsupported. + continue + search_space[name] = distribution + + return search_space + + def sample_relative( + self, + study: "optuna.Study", + trial: "optuna.trial.FrozenTrial", + search_space: dict[str, BaseDistribution], + ) -> dict[str, Any]: + self._raise_error_if_multi_objective(study) + + if len(search_space) == 0: + return {} + + completed_trials = self._get_trials(study) + if len(completed_trials) < self._n_startup_trials: + return {} + + if len(search_space) == 1: + if self._warn_independent_sampling: + _logger.warning( + "`CmaEsSampler` only supports two or more dimensional continuous " + "search space. `{}` is used instead of `CmaEsSampler`.".format( + self._independent_sampler.__class__.__name__ + ) + ) + self._warn_independent_sampling = False + return {} + + # When `with_margin=True`, bounds in discrete dimensions are handled inside `CMAwM`. + trans = _SearchSpaceTransform( + search_space, transform_step=not self._with_margin, transform_0_1=True + ) + + optimizer = self._restore_optimizer(completed_trials) + if optimizer is None: + optimizer = self._init_optimizer(trans, study.direction) + + if optimizer.dim != len(trans.bounds): + if self._warn_independent_sampling: + _logger.warning( + "`CmaEsSampler` does not support dynamic search space. " + "`{}` is used instead of `CmaEsSampler`.".format( + self._independent_sampler.__class__.__name__ + ) + ) + self._warn_independent_sampling = False + return {} + + # TODO(c-bata): Reduce the number of wasted trials during parallel optimization. + # See https://github.com/optuna/optuna/pull/920#discussion_r385114002 for details. + solution_trials = self._get_solution_trials(completed_trials, optimizer.generation) + + if len(solution_trials) >= optimizer.population_size: + solutions: list[tuple[np.ndarray, float]] = [] + for t in solution_trials[: optimizer.population_size]: + assert t.value is not None, "completed trials must have a value" + if isinstance(optimizer, cmaes.CMAwM): + x = np.array(t.system_attrs["x_for_tell"]) + else: + x = trans.transform(t.params) + y = t.value if study.direction == StudyDirection.MINIMIZE else -t.value + solutions.append((x, y)) + + optimizer.tell(solutions) + + # Store optimizer. + optimizer_str = pickle.dumps(optimizer).hex() + optimizer_attrs = self._split_optimizer_str(optimizer_str) + for key in optimizer_attrs: + study._storage.set_trial_system_attr(trial._trial_id, key, optimizer_attrs[key]) + + # Caution: optimizer should update its seed value. + seed = self._cma_rng.rng.randint(1, 2**16) + trial.number + optimizer._rng.seed(seed) + if isinstance(optimizer, cmaes.CMAwM): + params, x_for_tell = optimizer.ask() + study._storage.set_trial_system_attr( + trial._trial_id, "x_for_tell", x_for_tell.tolist() + ) + else: + params = optimizer.ask() + + generation_attr_key = self._attr_key_generation + study._storage.set_trial_system_attr( + trial._trial_id, generation_attr_key, optimizer.generation + ) + + external_values = trans.untransform(params) + + return external_values + + @property + def _attr_key_generation(self) -> str: + return self._attr_prefix + "generation" + + @property + def _attr_key_optimizer(self) -> str: + return self._attr_prefix + "optimizer" + + def _concat_optimizer_attrs(self, optimizer_attrs: dict[str, str]) -> str: + return "".join( + optimizer_attrs["{}:{}".format(self._attr_key_optimizer, i)] + for i in range(len(optimizer_attrs)) + ) + + def _split_optimizer_str(self, optimizer_str: str) -> dict[str, str]: + optimizer_len = len(optimizer_str) + attrs = {} + for i in range(math.ceil(optimizer_len / _SYSTEM_ATTR_MAX_LENGTH)): + start = i * _SYSTEM_ATTR_MAX_LENGTH + end = min((i + 1) * _SYSTEM_ATTR_MAX_LENGTH, optimizer_len) + attrs["{}:{}".format(self._attr_key_optimizer, i)] = optimizer_str[start:end] + return attrs + + def _restore_optimizer( + self, + completed_trials: "list[optuna.trial.FrozenTrial]", + ) -> "CmaClass" | None: + # Restore a previous CMA object. + for trial in reversed(completed_trials): + optimizer_attrs = { + key: value + for key, value in trial.system_attrs.items() + if key.startswith(self._attr_key_optimizer) + } + if len(optimizer_attrs) == 0: + continue + + optimizer_str = self._concat_optimizer_attrs(optimizer_attrs) + return pickle.loads(bytes.fromhex(optimizer_str)) + return None + + def _init_optimizer( + self, + trans: _SearchSpaceTransform, + direction: StudyDirection, + ) -> "CmaClass": + lower_bounds = trans.bounds[:, 0] + upper_bounds = trans.bounds[:, 1] + n_dimension = len(trans.bounds) + + if self._source_trials is None: + if self._x0 is None: + mean = lower_bounds + (upper_bounds - lower_bounds) / 2 + else: + # `self._x0` is external representations. + mean = trans.transform(self._x0) + + if self._sigma0 is None: + sigma0 = np.min((upper_bounds - lower_bounds) / 6) + else: + sigma0 = self._sigma0 + + cov = None + else: + expected_states = [TrialState.COMPLETE] + if self._consider_pruned_trials: + expected_states.append(TrialState.PRUNED) + + # TODO(c-bata): Filter parameters by their values instead of checking search space. + sign = 1 if direction == StudyDirection.MINIMIZE else -1 + source_solutions = [ + (trans.transform(t.params), sign * cast(float, t.value)) + for t in self._source_trials + if t.state in expected_states + and _is_compatible_search_space(trans, t.distributions) + ] + if len(source_solutions) == 0: + raise ValueError("No compatible source_trials") + + # TODO(c-bata): Add options to change prior parameters (alpha and gamma). + mean, sigma0, cov = cmaes.get_warm_start_mgd(source_solutions) + + # Avoid ZeroDivisionError in cmaes. + sigma0 = max(sigma0, _EPS) + + if self._use_separable_cma: + return cmaes.SepCMA( + mean=mean, + sigma=sigma0, + bounds=trans.bounds, + seed=self._cma_rng.rng.randint(1, 2**31 - 2), + n_max_resampling=10 * n_dimension, + population_size=self._popsize, + ) + + if self._with_margin: + steps = np.empty(len(trans._search_space), dtype=float) + for i, dist in enumerate(trans._search_space.values()): + assert isinstance(dist, (IntDistribution, FloatDistribution)) + # Set step 0.0 for continuous search space. + if dist.step is None or dist.log: + steps[i] = 0.0 + elif dist.low == dist.high: + steps[i] = 1.0 + else: + steps[i] = dist.step / (dist.high - dist.low) + + return cmaes.CMAwM( + mean=mean, + sigma=sigma0, + bounds=trans.bounds, + steps=steps, + cov=cov, + seed=self._cma_rng.rng.randint(1, 2**31 - 2), + n_max_resampling=10 * n_dimension, + population_size=self._popsize, + ) + + return cmaes.CMA( + mean=mean, + sigma=sigma0, + cov=cov, + bounds=trans.bounds, + seed=self._cma_rng.rng.randint(1, 2**31 - 2), + n_max_resampling=10 * n_dimension, + population_size=self._popsize, + lr_adapt=self._lr_adapt, + ) + + def sample_independent( + self, + study: "optuna.Study", + trial: "optuna.trial.FrozenTrial", + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + self._raise_error_if_multi_objective(study) + + if self._warn_independent_sampling: + complete_trials = self._get_trials(study) + if len(complete_trials) >= self._n_startup_trials: + self._log_independent_sampling(trial, param_name) + + return self._independent_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None: + _logger.warning( + "The parameter '{}' in trial#{} is sampled independently " + "by using `{}` instead of `CmaEsSampler` " + "(optimization performance may be degraded). " + "`CmaEsSampler` does not support dynamic search space or `CategoricalDistribution`. " + "You can suppress this warning by setting `warn_independent_sampling` " + "to `False` in the constructor of `CmaEsSampler`, " + "if this independent sampling is intended behavior.".format( + param_name, trial.number, self._independent_sampler.__class__.__name__ + ) + ) + + def _get_trials(self, study: "optuna.Study") -> list[FrozenTrial]: + complete_trials = [] + for t in study._get_trials(deepcopy=False, use_cache=True): + if t.state == TrialState.COMPLETE: + complete_trials.append(t) + elif ( + t.state == TrialState.PRUNED + and len(t.intermediate_values) > 0 + and self._consider_pruned_trials + ): + _, value = max(t.intermediate_values.items()) + if value is None: + continue + # We rewrite the value of the trial `t` for sampling, so we need a deepcopy. + copied_t = copy.deepcopy(t) + copied_t.value = value + complete_trials.append(copied_t) + return complete_trials + + def _get_solution_trials( + self, trials: list[FrozenTrial], generation: int + ) -> list[FrozenTrial]: + generation_attr_key = self._attr_key_generation + return [t for t in trials if generation == t.system_attrs.get(generation_attr_key, -1)] + + def before_trial(self, study: optuna.Study, trial: FrozenTrial) -> None: + self._independent_sampler.before_trial(study, trial) + + def after_trial( + self, + study: "optuna.Study", + trial: "optuna.trial.FrozenTrial", + state: TrialState, + values: Sequence[float] | None, + ) -> None: + self._independent_sampler.after_trial(study, trial, state, values) + + +def _is_compatible_search_space( + trans: _SearchSpaceTransform, search_space: dict[str, BaseDistribution] +) -> bool: + intersection_size = len(set(trans._search_space.keys()).intersection(search_space.keys())) + return intersection_size == len(trans._search_space) == len(search_space) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c76b3b0eaae4aab4ca0e9f40c94860acd3674cf5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/__init__.py @@ -0,0 +1,4 @@ +from optuna.samplers._ga._base import BaseGASampler + + +__all__ = ["BaseGASampler"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..323d4735ba208471137f58cd6877336d673f5707 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_ga/_base.py @@ -0,0 +1,183 @@ +from __future__ import annotations + +import abc +from typing import Any + +import optuna +from optuna.samplers._base import BaseSampler +from optuna.trial._frozen import FrozenTrial +from optuna.trial._state import TrialState + + +# TODO(gen740): Add the experimental decorator? +class BaseGASampler(BaseSampler, abc.ABC): + """Base class for Genetic Algorithm (GA) samplers. + + Genetic Algorithm samplers generate new trials by mimicking natural selection, using + generations and populations to iteratively improve solutions. This base class defines the + interface for GA samplers in Optuna and provides utility methods for managing generations and + populations. + + The selection process is handled by :meth:`~BaseGASampler.select_parent`, which must be + implemented by subclasses to define the parent selection strategy. + + Generation and population management is facilitated by methods like + :meth:`~BaseGASampler.get_generation` and :meth:`~BaseGASampler.get_population`, ensuring + consistent tracking and selection. + + Note: + This class should be extended by subclasses that define specific GA sampling strategies, + including parent selection and crossover operations. + """ + + _GENERATION_KEY = "BaseGASampler:generation" + _PARENT_CACHE_KEY_PREFIX = "BaseGASampler:parent:" + + def __init_subclass__(cls, **kwargs: Any): + super().__init_subclass__(**kwargs) + cls._GENERATION_KEY = f"{cls.__name__}:generation" + cls._PARENT_CACHE_KEY_PREFIX = f"{cls.__name__}:parent:" + + @classmethod + def _get_generation_key(cls) -> str: + return cls._GENERATION_KEY + + @classmethod + def _get_parent_cache_key_prefix(cls) -> str: + return cls._PARENT_CACHE_KEY_PREFIX + + def __init__(self, population_size: int | None): + self._population_size = population_size + + @property + def population_size(self) -> int | None: + return self._population_size + + @population_size.setter + def population_size(self, value: int) -> None: + self._population_size = value + + @abc.abstractmethod + def select_parent(self, study: optuna.Study, generation: int) -> list[FrozenTrial]: + """Select parent trials from the population for the given generation. + + This method is called once per generation to select parents from + the population of the current generation. + + Output of this function is cached in the study system attributes. + + This method must be implemented in a subclass to define the specific selection strategy. + + Args: + study: + Target study object. + generation: + Target generation number. + + Returns: + List of parent frozen trials. + """ + raise NotImplementedError + + def get_trial_generation(self, study: optuna.Study, trial: FrozenTrial) -> int: + """Get the generation number of the given trial. + + This method returns the generation number of the specified trial. If the generation number + is not set in the trial's system attributes, it will calculate and set the generation + number. + + The current generation number depends on the maximum generation number of all completed + trials. + + Args: + study: + Study object which trial belongs to. + trial: + Trial object to get the generation number. + + Returns: + Generation number of the given trial. + """ + generation = trial.system_attrs.get(self._get_generation_key(), None) + if generation is not None: + return generation + + trials = study._get_trials(deepcopy=False, states=[TrialState.COMPLETE], use_cache=True) + + max_generation, max_generation_count = 0, 0 + + for t in reversed(trials): + generation = t.system_attrs.get(self._get_generation_key(), -1) + + if generation < max_generation: + continue + elif generation > max_generation: + max_generation = generation + max_generation_count = 1 + else: + max_generation_count += 1 + + assert self._population_size is not None, "Population size must be set." + if max_generation_count < self._population_size: + generation = max_generation + else: + generation = max_generation + 1 + study._storage.set_trial_system_attr( + trial._trial_id, self._get_generation_key(), generation + ) + return generation + + def get_population(self, study: optuna.Study, generation: int) -> list[FrozenTrial]: + """Get the population of the given generation. + + Args: + study: + Target study object. + generation: + Target generation number. + + Returns: + List of frozen trials in the given generation. + """ + return [ + trial + for trial in study._get_trials( + deepcopy=False, states=[TrialState.COMPLETE], use_cache=True + ) + if trial.system_attrs.get(self._get_generation_key(), None) == generation + ] + + def get_parent_population(self, study: optuna.Study, generation: int) -> list[FrozenTrial]: + """Get the parent population of the given generation. + + This method caches the parent population in the study's system attributes. + + Args: + study: + Target study object. + generation: + Target generation number. + + Returns: + List of parent frozen trials. If `generation == 0`, returns an empty list. + """ + if generation == 0: + return [] + + study_system_attrs = study._storage.get_study_system_attrs(study._study_id) + cached_parent_population_ids = study_system_attrs.get( + self._get_parent_cache_key_prefix() + str(generation), None + ) + + if cached_parent_population_ids is not None: + trials = study._get_trials(deepcopy=False) + parent_population_ids = set(cached_parent_population_ids) + return [trial for trial in trials if trial._trial_id in parent_population_ids] + else: + parent_population = self.select_parent(study, generation) + study._storage.set_study_system_attr( + study._study_id, + self._get_parent_cache_key_prefix() + str(generation), + [trial._trial_id for trial in parent_population], + ) + return parent_population diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff09f1235a93da0998f5eee1df2a7aab2e291140 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/__init__.py @@ -0,0 +1,4 @@ +from optuna.samplers._gp.sampler import GPSampler + + +__all__ = ["GPSampler"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/sampler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..b2555bdbf201ebaaf88055d17d9e357005ccfe53 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_gp/sampler.py @@ -0,0 +1,430 @@ +from __future__ import annotations + +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +import optuna +from optuna._experimental import experimental_class +from optuna._experimental import warn_experimental_argument +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.samplers._base import _process_constraints_after_trial +from optuna.samplers._base import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.study import StudyDirection +from optuna.study._multi_objective import _is_pareto_front +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Sequence + + import torch + + import optuna._gp.acqf as acqf + import optuna._gp.gp as gp + import optuna._gp.optim_mixed as optim_mixed + import optuna._gp.prior as prior + import optuna._gp.search_space as gp_search_space + from optuna.distributions import BaseDistribution + from optuna.study import Study +else: + from optuna._imports import _LazyImport + + torch = _LazyImport("torch") + gp_search_space = _LazyImport("optuna._gp.search_space") + gp = _LazyImport("optuna._gp.gp") + optim_mixed = _LazyImport("optuna._gp.optim_mixed") + acqf = _LazyImport("optuna._gp.acqf") + prior = _LazyImport("optuna._gp.prior") + + +EPS = 1e-10 + + +def _standardize_values(values: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + clipped_values = gp.warn_and_convert_inf(values) + means = np.mean(clipped_values, axis=0) + stds = np.std(clipped_values, axis=0) + standardized_values = (clipped_values - means) / np.maximum(EPS, stds) + return standardized_values, means, stds + + +@experimental_class("3.6.0") +class GPSampler(BaseSampler): + """Sampler using Gaussian process-based Bayesian optimization. + + This sampler fits a Gaussian process (GP) to the objective function and optimizes + the acquisition function to suggest the next parameters. + + The current implementation uses Matern kernel with nu=2.5 (twice differentiable) with automatic + relevance determination (ARD) for the length scale of each parameter. + The hyperparameters of the kernel are obtained by maximizing the marginal log-likelihood of the + hyperparameters given the past trials. + To prevent overfitting, Gamma prior is introduced for kernel scale and noise variance and + a hand-crafted prior is introduced for inverse squared lengthscales. + + As an acquisition function, we use: + + - log expected improvement (logEI) for single-objective optimization, + - log expected hypervolume improvement (logEHVI) for Multi-objective optimization, and + - the summation of logEI and the logarithm of the feasible probability with the independent + assumption of each constraint for (black-box inequality) constrained optimization. + + For further information about these acquisition functions, please refer to the following + papers: + + - `Unexpected Improvements to Expected Improvement for Bayesian Optimization + `__ + - `Differentiable Expected Hypervolume Improvement for Parallel Multi-Objective Bayesian + Optimization `__ + - `Bayesian Optimization with Inequality Constraints + `__ + + The optimization of the acquisition function is performed via: + + 1. Collect the best param from the past trials, + 2. Collect ``n_preliminary_samples`` points using Quasi-Monte Carlo (QMC) sampling, + 3. Choose the best point from the collected points, + 4. Choose ``n_local_search - 2`` points from the collected points using the roulette + selection, + 5. Perform a local search for each chosen point as an initial point, and + 6. Return the point with the best acquisition function value as the next parameter. + + Note that the procedures for non single-objective optimization setups are slightly different + from the single-objective version described above, but we omit the descriptions for the others + for brevity. + + The local search iteratively optimizes the acquisition function by repeating: + + 1. Gradient ascent using l-BFGS-B for continuous parameters, and + 2. Line search or exhaustive search for each discrete parameter independently. + + The local search is terminated if the routine stops updating the best parameter set or the + maximum number of iterations is reached. + + We use line search instead of rounding the results from the continuous optimization since EI + typically yields a high value between one grid and its adjacent grid. + + .. note:: + This sampler requires ``scipy`` and ``torch``. + You can install these dependencies with ``pip install scipy torch``. + + Args: + seed: + Random seed to initialize internal random number generator. + Defaults to :obj:`None` (a seed is picked randomly). + independent_sampler: + Sampler used for initial sampling (for the first ``n_startup_trials`` trials) + and for conditional parameters. Defaults to :obj:`None` + (a random sampler with the same ``seed`` is used). + n_startup_trials: + Number of initial trials. Defaults to 10. + deterministic_objective: + Whether the objective function is deterministic or not. + If :obj:`True`, the sampler will fix the noise variance of the surrogate model to + the minimum value (slightly above 0 to ensure numerical stability). + Defaults to :obj:`False`. Currently, all the objectives will be assume to be + deterministic if :obj:`True`. + constraints_func: + An optional function that computes the objective constraints. It must take a + :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must + be a sequence of :obj:`float` s. A value strictly larger than 0 means that a + constraints is violated. A value equal to or smaller than 0 is considered feasible. + If ``constraints_func`` returns more than one value for a trial, that trial is + considered feasible if and only if all values are equal to 0 or smaller. + + The ``constraints_func`` will be evaluated after each successful trial. + The function won't be called when trials fail or are pruned, but this behavior is + subject to change in future releases. + Currently, the ``constraints_func`` option is not supported for multi-objective + optimization. + """ + + def __init__( + self, + *, + seed: int | None = None, + independent_sampler: BaseSampler | None = None, + n_startup_trials: int = 10, + deterministic_objective: bool = False, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + ) -> None: + self._rng = LazyRandomState(seed) + self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed) + self._intersection_search_space = optuna.search_space.IntersectionSearchSpace() + self._n_startup_trials = n_startup_trials + self._log_prior: Callable[[gp.KernelParamsTensor], torch.Tensor] = prior.default_log_prior + self._minimum_noise: float = prior.DEFAULT_MINIMUM_NOISE_VAR + # We cache the kernel parameters for initial values of fitting the next time. + # TODO(nabenabe): Make the cache lists system_attrs to make GPSampler stateless. + self._kernel_params_cache_list: list[gp.KernelParamsTensor] | None = None + self._constraints_kernel_params_cache: list[gp.KernelParamsTensor] | None = None + self._deterministic = deterministic_objective + self._constraints_func = constraints_func + + if constraints_func is not None: + warn_experimental_argument("constraints_func") + + # Control parameters of the acquisition function optimization. + self._n_preliminary_samples: int = 2048 + # NOTE(nabenabe): ehvi in BoTorchSampler uses 20. + self._n_local_search = 10 + self._tol = 1e-4 + + def reseed_rng(self) -> None: + self._rng.rng.seed() + self._independent_sampler.reseed_rng() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + search_space = {} + for name, distribution in self._intersection_search_space.calculate(study).items(): + if distribution.single(): + continue + search_space[name] = distribution + + return search_space + + def _optimize_acqf( + self, + acqf_params: acqf.AcquisitionFunctionParams, + best_params: np.ndarray | None, + ) -> np.ndarray: + # Advanced users can override this method to change the optimization algorithm. + # However, we do not make any effort to keep backward compatibility between versions. + # Particularly, we may remove this function in future refactoring. + assert best_params is None or len(best_params.shape) == 2 + normalized_params, _acqf_val = optim_mixed.optimize_acqf_mixed( + acqf_params, + warmstart_normalized_params_array=best_params, + n_preliminary_samples=self._n_preliminary_samples, + n_local_search=self._n_local_search, + tol=self._tol, + rng=self._rng.rng, + ) + return normalized_params + + def _get_constraints_acqf_params( + self, + constraint_vals: np.ndarray, + internal_search_space: gp_search_space.SearchSpace, + normalized_params: np.ndarray, + ) -> list[acqf.AcquisitionFunctionParams]: + standardized_constraint_vals, means, stds = _standardize_values(constraint_vals) + if self._kernel_params_cache_list is not None and len( + self._kernel_params_cache_list[0].inverse_squared_lengthscales + ) != len(internal_search_space.scale_types): + # Clear cache if the search space changes. + self._constraints_kernel_params_cache = None + + is_categorical = internal_search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL + constraints_kernel_params = [] + constraints_acqf_params = [] + for i, (vals, mean, std) in enumerate(zip(standardized_constraint_vals.T, means, stds)): + cache = ( + self._constraints_kernel_params_cache[i] + if self._constraints_kernel_params_cache is not None + else None + ) + kernel_params = gp.fit_kernel_params( + X=normalized_params, + Y=vals, + is_categorical=is_categorical, + log_prior=self._log_prior, + minimum_noise=self._minimum_noise, + initial_kernel_params=cache, + deterministic_objective=self._deterministic, + ) + constraints_kernel_params.append(kernel_params) + + constraints_acqf_params.append( + acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_PI, + kernel_params=kernel_params, + search_space=internal_search_space, + X=normalized_params, + Y=vals, + # Since 0 is the threshold value, we use the normalized value of 0. + max_Y=-mean / max(EPS, std), + ) + ) + + self._constraints_kernel_params_cache = constraints_kernel_params + + return constraints_acqf_params + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + if study._is_multi_objective() and self._constraints_func is not None: + raise ValueError( + "GPSampler does not support constrained multi-objective optimization." + ) + + if search_space == {}: + return {} + + states = (TrialState.COMPLETE,) + trials = study._get_trials(deepcopy=False, states=states, use_cache=True) + + if len(trials) < self._n_startup_trials: + return {} + + ( + internal_search_space, + normalized_params, + ) = gp_search_space.get_search_space_and_normalized_params(trials, search_space) + + _sign = np.array([-1.0 if d == StudyDirection.MINIMIZE else 1.0 for d in study.directions]) + standardized_score_vals, _, _ = _standardize_values( + _sign * np.array([trial.values for trial in trials]) + ) + + if self._kernel_params_cache_list is not None and len( + self._kernel_params_cache_list[0].inverse_squared_lengthscales + ) != len(internal_search_space.scale_types): + # Clear cache if the search space changes. + self._kernel_params_cache_list = None + + kernel_params_list = [] + n_objectives = standardized_score_vals.shape[-1] + is_categorical = internal_search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL + for i in range(n_objectives): + cache = ( + self._kernel_params_cache_list[i] + if self._kernel_params_cache_list is not None + else None + ) + kernel_params_list.append( + gp.fit_kernel_params( + X=normalized_params, + Y=standardized_score_vals[:, i], + is_categorical=is_categorical, + log_prior=self._log_prior, + minimum_noise=self._minimum_noise, + initial_kernel_params=cache, + deterministic_objective=self._deterministic, + ) + ) + self._kernel_params_cache_list = kernel_params_list + + best_params: np.ndarray | None + if self._constraints_func is None: + if n_objectives == 1: + assert len(kernel_params_list) == 1 + acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_EI, + kernel_params=kernel_params_list[0], + search_space=internal_search_space, + X=normalized_params, + Y=standardized_score_vals[:, 0], + ) + best_params = normalized_params[np.argmax(standardized_score_vals), np.newaxis] + else: + acqf_params_for_objectives = [] + for i in range(n_objectives): + acqf_params_for_objectives.append( + acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_EHVI, + kernel_params=kernel_params_list[i], + search_space=internal_search_space, + X=normalized_params, + Y=standardized_score_vals[:, i], + ) + ) + acqf_params = acqf.MultiObjectiveAcquisitionFunctionParams.from_acqf_params( + acqf_params_for_objectives=acqf_params_for_objectives, + Y=standardized_score_vals, + n_qmc_samples=128, # NOTE(nabenabe): The BoTorch default value. + qmc_seed=self._rng.rng.randint(1 << 30), + ) + pareto_params = normalized_params[ + _is_pareto_front(-standardized_score_vals, assume_unique_lexsorted=False) + ] + n_pareto_sols = len(pareto_params) + # TODO(nabenabe): Verify the validity of this choice. + size = min(self._n_local_search // 2, n_pareto_sols) + chosen_indices = self._rng.rng.choice(n_pareto_sols, size=size, replace=False) + best_params = pareto_params[chosen_indices] + else: + assert ( + n_objectives == len(kernel_params_list) == 1 + ), "Multi-objective has not been supported." + constraint_vals, is_feasible = _get_constraint_vals_and_feasibility(study, trials) + is_all_infeasible = not np.any(is_feasible) + + # TODO(kAIto47802): If is_all_infeasible, the acquisition function for the objective + # function is ignored, so skipping the computation of kernel_params and acqf_params + # can improve speed. + # TODO(kAIto47802): Consider the case where all trials are feasible. We can ignore + # constraints in this case. + max_Y = -np.inf if is_all_infeasible else np.max(standardized_score_vals[is_feasible]) + acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_EI, + kernel_params=kernel_params_list[0], + search_space=internal_search_space, + X=normalized_params, + Y=standardized_score_vals[:, 0], + max_Y=max_Y, + ) + constraints_acqf_params = self._get_constraints_acqf_params( + constraint_vals, internal_search_space, normalized_params + ) + acqf_params = acqf.ConstrainedAcquisitionFunctionParams.from_acqf_params( + acqf_params, constraints_acqf_params + ) + best_params = ( + None + if is_all_infeasible + else normalized_params[np.argmax(standardized_score_vals[is_feasible]), np.newaxis] + ) + + normalized_param = self._optimize_acqf(acqf_params, best_params) + return gp_search_space.get_unnormalized_param(search_space, normalized_param) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + return self._independent_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._independent_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + if self._constraints_func is not None: + _process_constraints_after_trial(self._constraints_func, study, trial, state) + self._independent_sampler.after_trial(study, trial, state, values) + + +def _get_constraint_vals_and_feasibility( + study: Study, trials: list[FrozenTrial] +) -> tuple[np.ndarray, np.ndarray]: + _constraint_vals = [ + study._storage.get_trial_system_attrs(trial._trial_id).get(_CONSTRAINTS_KEY, ()) + for trial in trials + ] + if any(len(_constraint_vals[0]) != len(c) for c in _constraint_vals): + raise ValueError("The number of constraints must be the same for all trials.") + + constraint_vals = np.array(_constraint_vals) + assert len(constraint_vals.shape) == 2, "constraint_vals must be a 2d array." + is_feasible = np.all(constraint_vals <= 0, axis=1) + assert not isinstance(is_feasible, np.bool_), "MyPy Redefinition for NumPy v2.2.0." + return constraint_vals, is_feasible diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_grid.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_grid.py new file mode 100644 index 0000000000000000000000000000000000000000..7a87576ed8e237080c9aa2ddd03ebdb4486cd1d6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_grid.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +from collections.abc import Mapping +from collections.abc import Sequence +import itertools +from numbers import Real +from typing import Any +from typing import TYPE_CHECKING +from typing import Union +import warnings + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.logging import get_logger +from optuna.samplers import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +GridValueType = Union[str, float, int, bool, None] + + +_logger = get_logger(__name__) + + +class GridSampler(BaseSampler): + """Sampler using grid search. + + With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters + in the given search space during the study. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_int("y", -100, 100) + return x**2 + y**2 + + + search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]} + study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space)) + study.optimize(objective) + + Note: + + This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating + the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically + stops the optimization if all combinations in the passed ``search_space`` have already + been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method. + As a workaround, we need to handle the error manually as in + https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910. + + Note: + + :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization + specified by discrete suggest methods but just samples one of values specified in the + search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is + sampled as ``x`` instead of an integer point. + + .. testcode:: + + import optuna + + + def objective(trial): + # The following suggest method specifies integer points between -5 and 5. + x = trial.suggest_float("x", -5, 5, step=1) + return x**2 + + + # Non-int points are specified in the grid. + search_space = {"x": [-0.5, 0.5]} + study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space)) + study.optimize(objective, n_trials=2) + + Note: + A parameter configuration in the grid is not considered finished until its trial is + finished. Therefore, during distributed optimization where trials run concurrently, + different workers will occasionally suggest the same parameter configuration. + The total number of actual trials may therefore exceed the size of the grid. + + Note: + All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with + :meth:`~optuna.study.Study.enqueue_trial`. + + Args: + search_space: + A dictionary whose key and value are a parameter name and the corresponding candidates + of values, respectively. + seed: + A seed to fix the order of trials as the grid is randomly shuffled. This shuffle is + beneficial when the number of grids is larger than ``n_trials`` in + :meth:`~optuna.Study.optimize` to suppress suggesting similar grids. Please note + that fixing ``seed`` for each process is strongly recommended in distributed + optimization to avoid duplicated suggestions. + """ + + def __init__( + self, search_space: Mapping[str, Sequence[GridValueType]], seed: int | None = None + ) -> None: + for param_name, param_values in search_space.items(): + for value in param_values: + self._check_value(param_name, value) + + self._search_space = {} + for param_name, param_values in sorted(search_space.items()): + self._search_space[param_name] = list(param_values) + + self._all_grids = list(itertools.product(*self._search_space.values())) + self._param_names = sorted(search_space.keys()) + self._n_min_trials = len(self._all_grids) + self._rng = LazyRandomState(seed or 0) + self._rng.rng.shuffle(self._all_grids) # type: ignore[arg-type] + + def reseed_rng(self) -> None: + self._rng.rng.seed() + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + # Instead of returning param values, GridSampler puts the target grid id as a system attr, + # and the values are returned from `sample_independent`. This is because the distribution + # object is hard to get at the beginning of trial, while we need the access to the object + # to validate the sampled value. + + # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not + # assign a new grid_id. + if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs: + return + + if 0 <= trial.number and trial.number < self._n_min_trials: + study._storage.set_trial_system_attr( + trial._trial_id, "search_space", self._search_space + ) + study._storage.set_trial_system_attr(trial._trial_id, "grid_id", trial.number) + return + + target_grids = self._get_unvisited_grid_ids(study) + + if len(target_grids) == 0: + # This case may occur with distributed optimization or trial queue. If there is no + # target grid, `GridSampler` evaluates a visited, duplicated point with the current + # trial. After that, the optimization stops. + + _logger.warning( + "`GridSampler` is re-evaluating a configuration because the grid has been " + "exhausted. This may happen due to a timing issue during distributed optimization " + "or when re-running optimizations on already finished studies." + ) + + # One of all grids is randomly picked up in this case. + target_grids = list(range(len(self._all_grids))) + + # In distributed optimization, multiple workers may simultaneously pick up the same grid. + # To make the conflict less frequent, the grid is chosen randomly. + grid_id = int(self._rng.rng.choice(target_grids)) + + study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space) + study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id) + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + return {} + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + return {} + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + if "grid_id" not in trial.system_attrs: + message = "All parameters must be specified when using GridSampler with enqueue_trial." + raise ValueError(message) + + if param_name not in self._search_space: + message = "The parameter name, {}, is not found in the given grid.".format(param_name) + raise ValueError(message) + + grid_id = trial.system_attrs["grid_id"] + param_value = self._all_grids[grid_id][self._param_names.index(param_name)] + contains = param_distribution._contains(param_distribution.to_internal_repr(param_value)) + if not contains: + warnings.warn( + f"The value `{param_value}` is out of range of the parameter `{param_name}`. " + f"The value will be used but the actual distribution is: `{param_distribution}`." + ) + + return param_value + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + target_grids = self._get_unvisited_grid_ids(study) + + if len(target_grids) == 0: + study.stop() + elif len(target_grids) == 1: + grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"] + if grid_id == target_grids[0]: + study.stop() + + @staticmethod + def _check_value(param_name: str, param_value: Any) -> None: + if param_value is None or isinstance(param_value, (str, int, float, bool)): + return + + message = ( + "{} contains a value with the type of {}, which is not supported by " + "`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`" + " or `None` for persistent storage.".format(param_name, type(param_value)) + ) + warnings.warn(message) + + def _get_unvisited_grid_ids(self, study: Study) -> list[int]: + # List up unvisited grids based on already finished ones. + visited_grids = [] + running_grids = [] + + # We directly query the storage to get trials here instead of `study.get_trials`, + # since some pruners such as `HyperbandPruner` use the study transformed + # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details. + trials = study._storage.get_all_trials(study._study_id, deepcopy=False) + + for t in trials: + if "grid_id" in t.system_attrs and self._same_search_space( + t.system_attrs["search_space"] + ): + if t.state.is_finished(): + visited_grids.append(t.system_attrs["grid_id"]) + elif t.state == TrialState.RUNNING: + running_grids.append(t.system_attrs["grid_id"]) + + unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids) + + # If evaluations for all grids have been started, return grids that have not yet finished + # because all grids should be evaluated before stopping the optimization. + if len(unvisited_grids) == 0: + unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) + + return list(unvisited_grids) + + @staticmethod + def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool: + value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1)) + value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2)) + return (value1 == value2) or (value1_is_nan and value2_is_nan) + + def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool: + if set(search_space.keys()) != set(self._search_space.keys()): + return False + + for param_name in search_space.keys(): + if len(search_space[param_name]) != len(self._search_space[param_name]): + return False + + for i, param_value in enumerate(search_space[param_name]): + if not self._grid_value_equal(param_value, self._search_space[param_name][i]): + return False + + return True + + def is_exhausted(self, study: Study) -> bool: + """ + Return True if all the possible params are evaluated, otherwise return False. + """ + return len(self._get_unvisited_grid_ids(study)) == 0 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_lazy_random_state.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_lazy_random_state.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2e23d0269adaad0adc03c88f5129927fbeff16 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_lazy_random_state.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +import numpy as np + + +class LazyRandomState: + """Lazy Random State class. + + + This is a class to initialize the random state just before use to prevent + duplication of the same random state when deepcopy is applied to the instance of sampler. + """ + + def __init__(self, seed: int | None = None) -> None: + self._rng: np.random.RandomState | None = None + if seed is not None: + self.rng.seed(seed=seed) + + def _set_rng(self) -> None: + self._rng = np.random.RandomState() + + @property + def rng(self) -> np.random.RandomState: + if self._rng is None: + self._set_rng() + assert self._rng is not None + return self._rng diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_elite_population_selection_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_elite_population_selection_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..38154ffe6b329e0990ac84b49284f7f93d129dda --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_elite_population_selection_strategy.py @@ -0,0 +1,305 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Sequence +from itertools import combinations_with_replacement +from typing import TYPE_CHECKING + +import numpy as np + +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.samplers.nsgaii._constraints_evaluation import _validate_constraints +from optuna.samplers.nsgaii._elite_population_selection_strategy import _rank_population +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + from optuna.study import Study + + +# Define a coefficient for scaling intervals, used in _filter_inf() to replace +-inf. +_COEF = 3 + + +class NSGAIIIElitePopulationSelectionStrategy: + def __init__( + self, + *, + population_size: int, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + reference_points: np.ndarray | None = None, + dividing_parameter: int = 3, + rng: LazyRandomState, + ) -> None: + if population_size < 2: + raise ValueError("`population_size` must be greater than or equal to 2.") + + self._population_size = population_size + self._constraints_func = constraints_func + self._reference_points = reference_points + self._dividing_parameter = dividing_parameter + self._rng = rng + + def __call__(self, study: Study, population: list[FrozenTrial]) -> list[FrozenTrial]: + """Select elite population from the given trials by NSGA-III algorithm. + + Args: + study: + Target study object. + population: + Trials in the study. + + Returns: + A list of trials that are selected as elite population. + """ + _validate_constraints(population, is_constrained=self._constraints_func is not None) + population_per_rank = _rank_population( + population, study.directions, is_constrained=self._constraints_func is not None + ) + + elite_population: list[FrozenTrial] = [] + for population in population_per_rank: + if len(elite_population) + len(population) < self._population_size: + elite_population.extend(population) + else: + n_objectives = len(study.directions) + # Construct reference points in the first run. + if self._reference_points is None: + self._reference_points = _generate_default_reference_point( + n_objectives, self._dividing_parameter + ) + elif np.shape(self._reference_points)[1] != n_objectives: + raise ValueError( + "The dimension of reference points vectors must be the same as the number " + "of objectives of the study." + ) + + # Normalize objective values after filtering +-inf. + objective_matrix = _normalize_objective_values( + _filter_inf(elite_population + population) + ) + ( + closest_reference_points, + distance_reference_points, + ) = _associate_individuals_with_reference_points( + objective_matrix, self._reference_points + ) + + elite_population_num = len(elite_population) + target_population_size = self._population_size - elite_population_num + additional_elite_population = _preserve_niche_individuals( + target_population_size, + elite_population_num, + population, + closest_reference_points, + distance_reference_points, + self._rng.rng, + ) + elite_population.extend(additional_elite_population) + break + return elite_population + + +def _generate_default_reference_point( + n_objectives: int, dividing_parameter: int = 3 +) -> np.ndarray: + """Generates default reference points which are `uniformly` spread on a hyperplane.""" + indices = np.array( + list(combinations_with_replacement(range(n_objectives), dividing_parameter)) + ) + row_indices = np.repeat(np.arange(len(indices)), dividing_parameter) + col_indices = indices.flatten() + reference_points = np.zeros((len(indices), n_objectives), dtype=float) + np.add.at(reference_points, (row_indices, col_indices), 1.0) + return reference_points + + +def _filter_inf(population: list[FrozenTrial]) -> np.ndarray: + objective_matrix = np.asarray([t.values for t in population]) + objective_matrix_with_nan = np.where(np.isfinite(objective_matrix), objective_matrix, np.nan) + max_objectives = np.nanmax(objective_matrix_with_nan, axis=0) + min_objectives = np.nanmin(objective_matrix_with_nan, axis=0) + margins = _COEF * (max_objectives - min_objectives) + return np.clip(objective_matrix, min_objectives - margins, max_objectives + margins) + + +def _normalize_objective_values(objective_matrix: np.ndarray) -> np.ndarray: + """Normalizes objective values of population. + + An ideal point z* consists of minimums in each axis. Each objective value of population is + then subtracted by the ideal point. + An extreme point of each axis is (originally) defined as a minimum solution of achievement + scalarizing function from the population. After that, intercepts are calculate as intercepts + of hyperplane which has all the extreme points on it and used to rescale objective values. + + We adopt weights and achievement scalarizing function(ASF) used in pre-print of the NSGA-III + paper (See https://www.egr.msu.edu/~kdeb/papers/k2012009.pdf). + """ + n_objectives = np.shape(objective_matrix)[1] + # Subtract ideal point from objective values. + objective_matrix -= np.min(objective_matrix, axis=0) + # Initialize weights. + weights = np.eye(n_objectives) + weights[weights == 0] = 1e6 + + # Calculate extreme points to normalize objective values. + # TODO(Shinichi) Reimplement to reduce time complexity. + asf_value = np.max( + np.einsum("nm,dm->dnm", objective_matrix, weights), + axis=2, + ) + extreme_points = objective_matrix[np.argmin(asf_value, axis=1), :] + + # Normalize objective_matrix with extreme points. + # Note that extreme_points can be degenerate, but no proper operation is remarked in the + # paper. Therefore, the maximum value of population in each axis is used in such cases. + if np.all(np.isfinite(extreme_points)) and np.linalg.matrix_rank(extreme_points) == len( + extreme_points + ): + intercepts_inv = np.linalg.solve(extreme_points, np.ones(n_objectives)) + else: + intercepts = np.max(objective_matrix, axis=0) + intercepts_inv = 1 / np.where(intercepts == 0, 1, intercepts) + objective_matrix *= np.where(np.isfinite(intercepts_inv), intercepts_inv, 1) + + return objective_matrix + + +def _associate_individuals_with_reference_points( + objective_matrix: np.ndarray, reference_points: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """Associates each objective value to the closest reference point. + + Associate each normalized objective value to the closest reference point. The distance is + calculated by Euclidean norm. + + Args: + objective_matrix: + A 2 dimension ``numpy.ndarray`` with columns of objective dimension and rows of + generation size. Each row is the normalized objective value of the corresponding + individual. + + Returns: + closest_reference_points: + A ``numpy.ndarray`` with rows of generation size. Each row is the index of + the closest reference point to the corresponding individual. + distance_reference_points: + A ``numpy.ndarray`` with rows of generation size. Each row is the distance from + the corresponding individual to the closest reference point. + """ + # TODO(Shinichi) Implement faster assignment for the default reference points because it does + # not seem necessary to calculate distance from all reference points. + + # TODO(Shinichi) Normalize reference_points in constructor to remove reference_point_norms. + # In addition, the minimum distance from each reference point can be replaced with maximum + # inner product between the given individual and each normalized reference points. + + # distance_from_reference_lines is a ndarray of shape (n, p), where n is the size of the + # population and p is the number of reference points. Its (i,j) entry keeps distance between + # the i-th individual values and the j-th reference line. + reference_point_norm_squared = np.linalg.norm(reference_points, axis=1) ** 2 + perpendicular_vectors_to_reference_lines = np.einsum( + "ni,pi,p,pm->npm", + objective_matrix, + reference_points, + 1 / reference_point_norm_squared, + reference_points, + ) + distance_from_reference_lines = np.linalg.norm( + objective_matrix[:, np.newaxis, :] - perpendicular_vectors_to_reference_lines, + axis=2, + ) + closest_reference_points: np.ndarray = np.argmin(distance_from_reference_lines, axis=1) + distance_reference_points: np.ndarray = np.min(distance_from_reference_lines, axis=1) + + return closest_reference_points, distance_reference_points + + +def _preserve_niche_individuals( + target_population_size: int, + elite_population_num: int, + population: list[FrozenTrial], + closest_reference_points: np.ndarray, + distance_reference_points: np.ndarray, + rng: np.random.RandomState, +) -> list[FrozenTrial]: + """Determine who survives form the borderline front. + + Who survive form the borderline front is determined according to the sparsity of each closest + reference point. The algorithm picks a reference point from those who have the least neighbors + in elite population and adds one of borderline front member who has the same closest reference + point. + + Args: + target_population_size: + The number of individuals to select. + elite_population_num: + The number of individuals which are already selected as the elite population. + population: + List of all the trials in the current surviving generation. + distance_reference_points: + A ``numpy.ndarray`` with rows of generation size. Each row is the distance from the + corresponding individual to the closest reference point. + closest_reference_points: + A ``numpy.ndarray`` with rows of generation size. Each row is the index of the closest + reference point to the corresponding individual. + rng: + Random number generator. + + Returns: + A list of trials which are selected as the next generation. + """ + if len(population) < target_population_size: + raise ValueError( + "The population size must be greater than or equal to the target population size." + ) + + # reference_point_to_borderline_population keeps pairs of a neighbor and the distance of + # each reference point from borderline front population. + reference_point_to_borderline_population = defaultdict(list) + for i, reference_point_idx in enumerate(closest_reference_points[elite_population_num:]): + population_idx = i + elite_population_num + reference_point_to_borderline_population[reference_point_idx].append( + (distance_reference_points[population_idx], i) + ) + + # reference_points_to_elite_population_count keeps how many elite neighbors each reference + # point has. + reference_point_to_elite_population_count: dict[int, int] = defaultdict(int) + for i, reference_point_idx in enumerate(closest_reference_points[:elite_population_num]): + reference_point_to_elite_population_count[reference_point_idx] += 1 + # nearest_points_count_to_reference_points classifies reference points which have at least one + # closest borderline population member by the number of elite neighbors they have. Each key + # corresponds to the number of elite neighbors and the value to the reference point indices. + nearest_points_count_to_reference_points = defaultdict(list) + for reference_point_idx in reference_point_to_borderline_population: + elite_population_count = reference_point_to_elite_population_count[reference_point_idx] + nearest_points_count_to_reference_points[elite_population_count].append( + reference_point_idx + ) + + count = -1 + additional_elite_population: list[FrozenTrial] = [] + is_shuffled: defaultdict[int, bool] = defaultdict(bool) + while len(additional_elite_population) < target_population_size: + if len(nearest_points_count_to_reference_points[count]) == 0: + count += 1 + rng.shuffle(nearest_points_count_to_reference_points[count]) + continue + + reference_point_idx = nearest_points_count_to_reference_points[count].pop() + if count > 0 and not is_shuffled[reference_point_idx]: + rng.shuffle(reference_point_to_borderline_population[reference_point_idx]) + is_shuffled[reference_point_idx] = True + elif count == 0: + reference_point_to_borderline_population[reference_point_idx].sort(reverse=True) + + _, selected_individual_id = reference_point_to_borderline_population[ + reference_point_idx + ].pop() + additional_elite_population.append(population[selected_individual_id]) + if reference_point_to_borderline_population[reference_point_idx]: + nearest_points_count_to_reference_points[count + 1].append(reference_point_idx) + + return additional_elite_population diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_sampler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..53c7c9c82b9c37c223c763446432d827e8482c85 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_nsgaiii/_sampler.py @@ -0,0 +1,291 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Sequence +import hashlib +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +import optuna +from optuna._experimental import experimental_class +from optuna.distributions import BaseDistribution +from optuna.samplers._base import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.samplers._nsgaiii._elite_population_selection_strategy import ( + NSGAIIIElitePopulationSelectionStrategy, +) +from optuna.samplers._random import RandomSampler +from optuna.samplers.nsgaii._after_trial_strategy import NSGAIIAfterTrialStrategy +from optuna.samplers.nsgaii._child_generation_strategy import NSGAIIChildGenerationStrategy +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover +from optuna.samplers.nsgaii._crossovers._uniform import UniformCrossover +from optuna.search_space import IntersectionSearchSpace +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +# Define key names of `Trial.system_attrs`. +_GENERATION_KEY = "nsga3:generation" +_POPULATION_CACHE_KEY_PREFIX = "nsga3:population" + + +@experimental_class("3.2.0") +class NSGAIIISampler(BaseSampler): + """Multi-objective sampler using the NSGA-III algorithm. + + NSGA-III stands for "Nondominated Sorting Genetic Algorithm III", + which is a modified version of NSGA-II for many objective optimization problem. + + For further information about NSGA-III, please refer to the following papers: + + - `An Evolutionary Many-Objective Optimization Algorithm Using Reference-Point-Based + Nondominated Sorting Approach, Part I: Solving Problems With Box Constraints + `__ + - `An Evolutionary Many-Objective Optimization Algorithm Using Reference-Point-Based + Nondominated Sorting Approach, Part II: Handling Constraints and Extending to an Adaptive + Approach + `__ + + Args: + reference_points: + A 2 dimension ``numpy.ndarray`` with objective dimension columns. Represents + a list of reference points which is used to determine who to survive. + After non-dominated sort, who out of borderline front are going to survived is + determined according to how sparse the closest reference point of each individual is. + In the default setting the algorithm uses `uniformly` spread points to diversify the + result. It is also possible to reflect your `preferences` by giving an arbitrary set of + `target` points since the algorithm prioritizes individuals around reference points. + + dividing_parameter: + A parameter to determine the density of default reference points. This parameter + determines how many divisions are made between reference points on each axis. The + smaller this value is, the less reference points you have. The default value is 3. + Note that this parameter is not used when ``reference_points`` is not :obj:`None`. + + .. note:: + Other parameters than ``reference_points`` and ``dividing_parameter`` are the same as + :class:`~optuna.samplers.NSGAIISampler`. + + """ + + def __init__( + self, + *, + population_size: int = 50, + mutation_prob: float | None = None, + crossover: BaseCrossover | None = None, + crossover_prob: float = 0.9, + swapping_prob: float = 0.5, + seed: int | None = None, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + reference_points: np.ndarray | None = None, + dividing_parameter: int = 3, + elite_population_selection_strategy: ( + Callable[[Study, list[FrozenTrial]], list[FrozenTrial]] | None + ) = None, + child_generation_strategy: ( + Callable[[Study, dict[str, BaseDistribution], list[FrozenTrial]], dict[str, Any]] + | None + ) = None, + after_trial_strategy: ( + Callable[[Study, FrozenTrial, TrialState, Sequence[float] | None], None] | None + ) = None, + ) -> None: + # TODO(ohta): Reconsider the default value of each parameter. + + if population_size < 2: + raise ValueError("`population_size` must be greater than or equal to 2.") + + if crossover is None: + crossover = UniformCrossover(swapping_prob) + + if not isinstance(crossover, BaseCrossover): + raise ValueError( + f"'{crossover}' is not a valid crossover." + " For valid crossovers see" + " https://optuna.readthedocs.io/en/stable/reference/samplers.html." + ) + + if population_size < crossover.n_parents: + raise ValueError( + f"Using {crossover}," + f" the population size should be greater than or equal to {crossover.n_parents}." + f" The specified `population_size` is {population_size}." + ) + + self._population_size = population_size + self._random_sampler = RandomSampler(seed=seed) + self._rng = LazyRandomState(seed) + self._constraints_func = constraints_func + self._search_space = IntersectionSearchSpace() + + self._elite_population_selection_strategy = ( + elite_population_selection_strategy + or NSGAIIIElitePopulationSelectionStrategy( + population_size=population_size, + constraints_func=constraints_func, + reference_points=reference_points, + dividing_parameter=dividing_parameter, + rng=self._rng, + ) + ) + self._child_generation_strategy = ( + child_generation_strategy + or NSGAIIChildGenerationStrategy( + crossover_prob=crossover_prob, + mutation_prob=mutation_prob, + swapping_prob=swapping_prob, + crossover=crossover, + constraints_func=constraints_func, + rng=self._rng, + ) + ) + self._after_trial_strategy = after_trial_strategy or NSGAIIAfterTrialStrategy( + constraints_func=constraints_func + ) + + def reseed_rng(self) -> None: + self._random_sampler.reseed_rng() + self._rng.rng.seed() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + search_space: dict[str, BaseDistribution] = {} + for name, distribution in self._search_space.calculate(study).items(): + if distribution.single(): + # The `untransform` method of `optuna._transform._SearchSpaceTransform` + # does not assume a single value, + # so single value objects are not sampled with the `sample_relative` method, + # but with the `sample_independent` method. + continue + search_space[name] = distribution + return search_space + + def sample_relative( + self, + study: Study, + trial: FrozenTrial, + search_space: dict[str, BaseDistribution], + ) -> dict[str, Any]: + parent_generation, parent_population = self._collect_parent_population(study) + + generation = parent_generation + 1 + study._storage.set_trial_system_attr(trial._trial_id, _GENERATION_KEY, generation) + + if parent_generation < 0: + return {} + + return self._child_generation_strategy(study, search_space, parent_population) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + # Following parameters are randomly sampled here. + # 1. A parameter in the initial population/first generation. + # 2. A parameter to mutate. + # 3. A parameter excluded from the intersection search space. + + return self._random_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + def _collect_parent_population(self, study: Study) -> tuple[int, list[FrozenTrial]]: + trials = study.get_trials(deepcopy=False) + + generation_to_runnings = defaultdict(list) + generation_to_population = defaultdict(list) + for trial in trials: + if _GENERATION_KEY not in trial.system_attrs: + continue + + generation = trial.system_attrs[_GENERATION_KEY] + if trial.state != optuna.trial.TrialState.COMPLETE: + if trial.state == optuna.trial.TrialState.RUNNING: + generation_to_runnings[generation].append(trial) + continue + + # Do not use trials whose states are not COMPLETE, or `constraint` will be unavailable. + generation_to_population[generation].append(trial) + + hasher = hashlib.sha256() + parent_population: list[FrozenTrial] = [] + parent_generation = -1 + while True: + generation = parent_generation + 1 + population = generation_to_population[generation] + + # Under multi-worker settings, the population size might become larger than + # `self._population_size`. + if len(population) < self._population_size: + break + + # [NOTE] + # It's generally safe to assume that once the above condition is satisfied, + # there are no additional individuals added to the generation (i.e., the members of + # the generation have been fixed). + # If the number of parallel workers is huge, this assumption can be broken, but + # this is a very rare case and doesn't significantly impact optimization performance. + # So we can ignore the case. + + # The cache key is calculated based on the key of the previous generation and + # the remaining running trials in the current population. + # If there are no running trials, the new cache key becomes exactly the same as + # the previous one, and the cached content will be overwritten. This allows us to + # skip redundant cache key calculations when this method is called for the subsequent + # trials. + for trial in generation_to_runnings[generation]: + hasher.update(bytes(str(trial.number), "utf-8")) + + cache_key = "{}:{}".format(_POPULATION_CACHE_KEY_PREFIX, hasher.hexdigest()) + study_system_attrs = study._storage.get_study_system_attrs(study._study_id) + cached_generation, cached_population_numbers = study_system_attrs.get( + cache_key, (-1, []) + ) + if cached_generation >= generation: + generation = cached_generation + population = [trials[n] for n in cached_population_numbers] + else: + population.extend(parent_population) + population = self._elite_population_selection_strategy(study, population) + + # To reduce the number of system attribute entries, + # we cache the population information only if there are no running trials + # (i.e., the information of the population has been fixed). + # Usually, if there are no too delayed running trials, the single entry + # will be used. + if len(generation_to_runnings[generation]) == 0: + population_numbers = [t.number for t in population] + study._storage.set_study_system_attr( + study._study_id, cache_key, (generation, population_numbers) + ) + + parent_generation = generation + parent_population = population + + return parent_generation, parent_population + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._random_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + assert state in [TrialState.COMPLETE, TrialState.FAIL, TrialState.PRUNED] + self._after_trial_strategy(study, trial, state, values) + self._random_sampler.after_trial(study, trial, state, values) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_partial_fixed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_partial_fixed.py new file mode 100644 index 0000000000000000000000000000000000000000..419860e89b86a1dd4e3835e4ba8423bbb298f760 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_partial_fixed.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any +from typing import TYPE_CHECKING +import warnings + +from optuna._experimental import experimental_class +from optuna.distributions import BaseDistribution +from optuna.samplers import BaseSampler +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("2.4.0") +class PartialFixedSampler(BaseSampler): + """Sampler with partially fixed parameters. + + Example: + + After several steps of optimization, you can fix the value of ``y`` and re-optimize it. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + y = trial.suggest_int("y", -1, 1) + return x**2 + y + + + study = optuna.create_study() + study.optimize(objective, n_trials=10) + + best_params = study.best_params + fixed_params = {"y": best_params["y"]} + partial_sampler = optuna.samplers.PartialFixedSampler(fixed_params, study.sampler) + + study.sampler = partial_sampler + study.optimize(objective, n_trials=10) + + Args: + + fixed_params: + A dictionary of parameters to be fixed. + + base_sampler: + A sampler which samples unfixed parameters. + + """ + + def __init__(self, fixed_params: dict[str, Any], base_sampler: BaseSampler) -> None: + self._fixed_params = fixed_params + self._base_sampler = base_sampler + + def reseed_rng(self) -> None: + self._base_sampler.reseed_rng() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + search_space = self._base_sampler.infer_relative_search_space(study, trial) + + # Remove fixed params from relative search space to return fixed values. + for param_name in self._fixed_params.keys(): + if param_name in search_space: + del search_space[param_name] + + return search_space + + def sample_relative( + self, + study: Study, + trial: FrozenTrial, + search_space: dict[str, BaseDistribution], + ) -> dict[str, Any]: + # Fixed params are never sampled here. + return self._base_sampler.sample_relative(study, trial, search_space) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + if param_name not in self._fixed_params: + # Unfixed params are sampled here. + return self._base_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + else: + # Fixed params are sampled here. + # Check if a parameter value is contained in the range of this distribution. + param_value = self._fixed_params[param_name] + + param_value_in_internal_repr = param_distribution.to_internal_repr(param_value) + contained = param_distribution._contains(param_value_in_internal_repr) + + if not contained: + warnings.warn( + f"Fixed parameter '{param_name}' with value {param_value} is out of range " + f"for distribution {param_distribution}." + ) + return param_value + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._base_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + self._base_sampler.after_trial(study, trial, state, values) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_qmc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_qmc.py new file mode 100644 index 0000000000000000000000000000000000000000..f3dcc0cad07f14f8ac546e5d4a503ffb19263c6b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_qmc.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +from collections.abc import Sequence +import threading +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +import optuna +from optuna import logging +from optuna._experimental import experimental_class +from optuna._imports import _LazyImport +from optuna._transform import _SearchSpaceTransform +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalDistribution +from optuna.samplers import BaseSampler +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +_logger = logging.get_logger(__name__) + +_SUGGESTED_STATES = (TrialState.COMPLETE, TrialState.PRUNED) +_threading_lock = threading.Lock() + + +@experimental_class("3.0.0") +class QMCSampler(BaseSampler): + """A Quasi Monte Carlo Sampler that generates low-discrepancy sequences. + + Quasi Monte Carlo (QMC) sequences are designed to have lower discrepancies than + standard random sequences. They are known to perform better than the standard + random sequences in hyperparameter optimization. + + For further information about the use of QMC sequences for hyperparameter optimization, + please refer to the following paper: + + - `Bergstra, James, and Yoshua Bengio. Random search for hyper-parameter optimization. + Journal of machine learning research 13.2, 2012. + `__ + + We use the QMC implementations in Scipy. For the details of the QMC algorithm, + see the Scipy API references on `scipy.stats.qmc + `__. + + .. note: + If your search space contains categorical parameters, it samples the categorical + parameters by its `independent_sampler` without using QMC algorithm. + + .. note:: + The search space of the sampler is determined by either previous trials in the study or + the first trial that this sampler samples. + + If there are previous trials in the study, :class:`~optuna.samplers.QMCSampler` infers its + search space using the trial which was created first in the study. + + Otherwise (if the study has no previous trials), :class:`~optuna.samplers.QMCSampler` + samples the first trial using its `independent_sampler` and then infers the search space + in the second trial. + + As mentioned above, the search space of the :class:`~optuna.samplers.QMCSampler` is + determined by the first trial of the study. Once the search space is determined, it cannot + be changed afterwards. + + Args: + qmc_type: + The type of QMC sequence to be sampled. This must be one of + `"halton"` and `"sobol"`. Default is `"sobol"`. + + .. note:: + Sobol' sequence is designed to have low-discrepancy property when the number of + samples is :math:`n=2^m` for each positive integer :math:`m`. When it is possible + to pre-specify the number of trials suggested by `QMCSampler`, it is recommended + that the number of trials should be set as power of two. + + scramble: + If this option is :obj:`True`, scrambling (randomization) is applied to the QMC + sequences. + + seed: + A seed for ``QMCSampler``. This argument is used only when ``scramble`` is :obj:`True`. + If this is :obj:`None`, the seed is initialized randomly. Default is :obj:`None`. + + .. note:: + When using multiple :class:`~optuna.samplers.QMCSampler`'s in parallel and/or + distributed optimization, all the samplers must share the same seed when the + `scrambling` is enabled. Otherwise, the low-discrepancy property of the samples + will be degraded. + + independent_sampler: + A :class:`~optuna.samplers.BaseSampler` instance that is used for independent + sampling. The first trial of the study and the parameters not contained in the + relative search space are sampled by this sampler. + + If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used + as the default. + + .. seealso:: + :class:`~optuna.samplers` module provides built-in independent samplers + such as :class:`~optuna.samplers.RandomSampler` and + :class:`~optuna.samplers.TPESampler`. + + warn_independent_sampling: + If this is :obj:`True`, a warning message is emitted when + the value of a parameter is sampled by using an independent sampler. + + Note that the parameters of the first trial in a study are sampled via an + independent sampler in most cases, so no warning messages are emitted in such cases. + + warn_asynchronous_seeding: + If this is :obj:`True`, a warning message is emitted when the scrambling + (randomization) is applied to the QMC sequence and the random seed of the sampler is + not set manually. + + .. note:: + When using parallel and/or distributed optimization without manually + setting the seed, the seed is set randomly for each instances of + :class:`~optuna.samplers.QMCSampler` for different workers, which ends up + asynchronous seeding for multiple samplers used in the optimization. + + .. seealso:: + See parameter ``seed`` in :class:`~optuna.samplers.QMCSampler`. + + Example: + + Optimize a simple quadratic function by using :class:`~optuna.samplers.QMCSampler`. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + y = trial.suggest_int("y", -1, 1) + return x**2 + y + + + sampler = optuna.samplers.QMCSampler() + study = optuna.create_study(sampler=sampler) + study.optimize(objective, n_trials=8) + + """ + + def __init__( + self, + *, + qmc_type: str = "sobol", + scramble: bool = False, # default is False for simplicity in distributed environment. + seed: int | None = None, + independent_sampler: BaseSampler | None = None, + warn_asynchronous_seeding: bool = True, + warn_independent_sampling: bool = True, + ) -> None: + self._scramble = scramble + self._seed = np.random.PCG64().random_raw() if seed is None else seed + self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed) + self._initial_search_space: dict[str, BaseDistribution] | None = None + self._warn_independent_sampling = warn_independent_sampling + + if qmc_type in ("halton", "sobol"): + self._qmc_type = qmc_type + else: + message = ( + f'The `qmc_type`, "{qmc_type}", is not a valid. ' + 'It must be one of "halton" and "sobol".' + ) + raise ValueError(message) + + if seed is None and scramble and warn_asynchronous_seeding: + # Sobol/Halton sequences without scrambling do not use seed. + self._log_asynchronous_seeding() + + def reseed_rng(self) -> None: + # We must not reseed the `self._seed` like below. Otherwise, workers will have different + # seed under parallel execution because `self.reseed_rng()` is called when starting each + # parallel executor. + # >>> self._seed = np.random.MT19937().random_raw() + + self._independent_sampler.reseed_rng() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + if self._initial_search_space is not None: + return self._initial_search_space + + past_trials = study._get_trials(deepcopy=False, states=_SUGGESTED_STATES, use_cache=True) + # The initial trial is sampled by the independent sampler. + if len(past_trials) == 0: + return {} + # If an initial trial was already made, + # construct search_space of this sampler from the initial trial. + first_trial = min(past_trials, key=lambda t: t.number) + self._initial_search_space = self._infer_initial_search_space(first_trial) + return self._initial_search_space + + def _infer_initial_search_space(self, trial: FrozenTrial) -> dict[str, BaseDistribution]: + search_space: dict[str, BaseDistribution] = {} + for param_name, distribution in trial.distributions.items(): + if isinstance(distribution, CategoricalDistribution): + continue + search_space[param_name] = distribution + + return search_space + + @staticmethod + def _log_asynchronous_seeding() -> None: + _logger.warning( + "No seed is provided for `QMCSampler` and the seed is set randomly. " + "If you are running multiple `QMCSampler`s in parallel and/or distributed " + " environment, the same seed must be used in all samplers to ensure that resulting " + "samples are taken from the same QMC sequence. " + ) + + def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None: + _logger.warning( + f"The parameter '{param_name}' in trial#{trial.number} is sampled independently " + f"by using `{self._independent_sampler.__class__.__name__}` instead of `QMCSampler` " + "(optimization performance may be degraded). " + "`QMCSampler` does not support dynamic search space or `CategoricalDistribution`. " + "You can suppress this warning by setting `warn_independent_sampling` " + "to `False` in the constructor of `QMCSampler`, " + "if this independent sampling is intended behavior." + ) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + if self._initial_search_space is not None: + if self._warn_independent_sampling: + self._log_independent_sampling(trial, param_name) + + return self._independent_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + if search_space == {}: + return {} + + sample = self._sample_qmc(study, search_space) + trans = _SearchSpaceTransform(search_space) + sample = trans.bounds[:, 0] + sample * (trans.bounds[:, 1] - trans.bounds[:, 0]) + return trans.untransform(sample[0, :]) + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._independent_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: "optuna.trial.FrozenTrial", + state: TrialState, + values: Sequence[float] | None, + ) -> None: + self._independent_sampler.after_trial(study, trial, state, values) + + def _sample_qmc(self, study: Study, search_space: dict[str, BaseDistribution]) -> np.ndarray: + # Lazy import because the `scipy.stats.qmc` is slow to import. + qmc_module = _LazyImport("scipy.stats.qmc") + + sample_id = self._find_sample_id(study) + d = len(search_space) + + if self._qmc_type == "halton": + qmc_engine = qmc_module.Halton(d, seed=self._seed, scramble=self._scramble) + elif self._qmc_type == "sobol": + # Sobol engine likely shares its internal state among threads. + # Without threading.Lock, ValueError exceptions are raised in Sobol engine as discussed + # in https://github.com/optuna/optunahub-registry/pull/168#pullrequestreview-2404054969 + with _threading_lock: + qmc_engine = qmc_module.Sobol(d, seed=self._seed, scramble=self._scramble) + else: + raise ValueError("Invalid `qmc_type`") + + forward_size = sample_id # `sample_id` starts from 0. + # Skip fast_forward with forward_size==0 because Sobol doesn't support the case, + # and fast_forward(0) doesn't affect sampling. + if forward_size > 0: + qmc_engine.fast_forward(forward_size) + sample = qmc_engine.random(1) + + return sample + + def _find_sample_id(self, study: Study) -> int: + qmc_id = "" + qmc_id += self._qmc_type + # Sobol/Halton sequences without scrambling do not use seed. + if self._scramble: + qmc_id += f" (scramble=True, seed={self._seed})" + else: + qmc_id += " (scramble=False)" + key_qmc_id = qmc_id + "'s last sample id" + + # TODO(kstoneriv3): Here, we ideally assume that the following block is + # an atomic transaction. Without such an assumption, the current implementation + # only ensures that each `sample_id` is sampled at least once. + system_attrs = study._storage.get_study_system_attrs(study._study_id) + if key_qmc_id in system_attrs.keys(): + sample_id = system_attrs[key_qmc_id] + sample_id += 1 + else: + sample_id = 0 + study._storage.set_study_system_attr(study._study_id, key_qmc_id, sample_id) + + return sample_id diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_random.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_random.py new file mode 100644 index 0000000000000000000000000000000000000000..f93a4ee2cee1acdd5a9cd9677b45d1d2311d8b49 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_random.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from typing import Any +from typing import TYPE_CHECKING + +from optuna import distributions +from optuna._transform import _SearchSpaceTransform +from optuna.distributions import BaseDistribution +from optuna.samplers import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + from optuna.study import Study + + +class RandomSampler(BaseSampler): + """Sampler using random sampling. + + This sampler is based on *independent sampling*. + See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'. + + Example: + + .. testcode:: + + import optuna + from optuna.samplers import RandomSampler + + + def objective(trial): + x = trial.suggest_float("x", -5, 5) + return x**2 + + + study = optuna.create_study(sampler=RandomSampler()) + study.optimize(objective, n_trials=10) + + Args: + seed: Seed for random number generator. + """ + + def __init__(self, seed: int | None = None) -> None: + self._rng = LazyRandomState(seed) + + def reseed_rng(self) -> None: + self._rng.rng.seed() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + return {} + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + return {} + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: distributions.BaseDistribution, + ) -> Any: + search_space = {param_name: param_distribution} + trans = _SearchSpaceTransform(search_space) + trans_params = self._rng.rng.uniform(trans.bounds[:, 0], trans.bounds[:, 1]) + + return trans.untransform(trans_params)[param_name] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_erf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_erf.py new file mode 100644 index 0000000000000000000000000000000000000000..c90c90450c0c44b37b6112cc63842bcdab065758 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_erf.py @@ -0,0 +1,185 @@ +# This code is the modified version of erf function in FreeBSD's standard C library. +# origin: FreeBSD /usr/src/lib/msun/src/s_erf.c +# https://github.com/freebsd/freebsd-src/blob/main/lib/msun/src/s_erf.c + +# /* @(#)s_erf.c 5.1 93/09/24 */ +# /* +# * ==================================================== +# * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +# * +# * Developed at SunPro, a Sun Microsystems, Inc. business. +# * Permission to use, copy, modify, and distribute this +# * software is freely granted, provided that this notice +# * is preserved. +# * ==================================================== +# */ + +import numpy as np +from numpy.polynomial import Polynomial + + +half = 0.5 +one = 1 +two = 2 + +erx = 8.45062911510467529297e-01 +# /* +# * In the domain [0, 2**-28], only the first term in the power series +# * expansion of erf(x) is used. The magnitude of the first neglected +# * terms is less than 2**-84. +# */ +efx = 1.28379167095512586316e-01 +efx8 = 1.02703333676410069053e00 + +# Coefficients for approximation to erf on [0,0.84375] + +pp0 = 1.28379167095512558561e-01 +pp1 = -3.25042107247001499370e-01 +pp2 = -2.84817495755985104766e-02 +pp3 = -5.77027029648944159157e-03 +pp4 = -2.37630166566501626084e-05 +pp = Polynomial([pp0, pp1, pp2, pp3, pp4]) +qq1 = 3.97917223959155352819e-01 +qq2 = 6.50222499887672944485e-02 +qq3 = 5.08130628187576562776e-03 +qq4 = 1.32494738004321644526e-04 +qq5 = -3.96022827877536812320e-06 +qq = Polynomial([one, qq1, qq2, qq3, qq4, qq5]) + +# Coefficients for approximation to erf in [0.84375,1.25] + +pa0 = -2.36211856075265944077e-03 +pa1 = 4.14856118683748331666e-01 +pa2 = -3.72207876035701323847e-01 +pa3 = 3.18346619901161753674e-01 +pa4 = -1.10894694282396677476e-01 +pa5 = 3.54783043256182359371e-02 +pa6 = -2.16637559486879084300e-03 +pa = Polynomial([pa0, pa1, pa2, pa3, pa4, pa5, pa6]) +qa1 = 1.06420880400844228286e-01 +qa2 = 5.40397917702171048937e-01 +qa3 = 7.18286544141962662868e-02 +qa4 = 1.26171219808761642112e-01 +qa5 = 1.36370839120290507362e-02 +qa6 = 1.19844998467991074170e-02 +qa = Polynomial([one, qa1, qa2, qa3, qa4, qa5, qa6]) + +# Coefficients for approximation to erfc in [1.25,1/0.35] + +ra0 = -9.86494403484714822705e-03 +ra1 = -6.93858572707181764372e-01 +ra2 = -1.05586262253232909814e01 +ra3 = -6.23753324503260060396e01 +ra4 = -1.62396669462573470355e02 +ra5 = -1.84605092906711035994e02 +ra6 = -8.12874355063065934246e01 +ra7 = -9.81432934416914548592e00 +ra = Polynomial([ra0, ra1, ra2, ra3, ra4, ra5, ra6, ra7]) +sa1 = 1.96512716674392571292e01 +sa2 = 1.37657754143519042600e02 +sa3 = 4.34565877475229228821e02 +sa4 = 6.45387271733267880336e02 +sa5 = 4.29008140027567833386e02 +sa6 = 1.08635005541779435134e02 +sa7 = 6.57024977031928170135e00 +sa8 = -6.04244152148580987438e-02 +sa = Polynomial([one, sa1, sa2, sa3, sa4, sa5, sa6, sa7, sa8]) + +# Coefficients for approximation to erfc in [1/.35,28] + +rb0 = -9.86494292470009928597e-03 +rb1 = -7.99283237680523006574e-01 +rb2 = -1.77579549177547519889e01 +rb3 = -1.60636384855821916062e02 +rb4 = -6.37566443368389627722e02 +rb5 = -1.02509513161107724954e03 +rb6 = -4.83519191608651397019e02 +rb = Polynomial([rb0, rb1, rb2, rb3, rb4, rb5, rb6]) +sb1 = 3.03380607434824582924e01 +sb2 = 3.25792512996573918826e02 +sb3 = 1.53672958608443695994e03 +sb4 = 3.19985821950859553908e03 +sb5 = 2.55305040643316442583e03 +sb6 = 4.74528541206955367215e02 +sb7 = -2.24409524465858183362e01 +sb = Polynomial([one, sb1, sb2, sb3, sb4, sb5, sb6, sb7]) + + +def erf(x: np.ndarray) -> np.ndarray: + a = np.abs(x) + + case_nan = np.isnan(x) + case_posinf = np.isposinf(x) + case_neginf = np.isneginf(x) + case_tiny = a < 2**-28 + case_small1 = (2**-28 <= a) & (a < 0.84375) + case_small2 = (0.84375 <= a) & (a < 1.25) + case_med1 = (1.25 <= a) & (a < 1 / 0.35) + case_med2 = (1 / 0.35 <= a) & (a < 6) + case_big = a >= 6 + + def calc_case_tiny(x: np.ndarray) -> np.ndarray: + return x + efx * x + + def calc_case_small1(x: np.ndarray) -> np.ndarray: + z = x * x + r = pp(z) + s = qq(z) + y = r / s + return x + x * y + + def calc_case_small2(x: np.ndarray) -> np.ndarray: + s = np.abs(x) - one + P = pa(s) + Q = qa(s) + absout = erx + P / Q + return absout * np.sign(x) + + def calc_case_med1(x: np.ndarray) -> np.ndarray: + sign = np.sign(x) + x = np.abs(x) + s = one / (x * x) + R = ra(s) + S = sa(s) + # the following 3 lines are omitted for the following reasons: + # (1) there are no easy way to implement SET_LOW_WORD equivalent method in NumPy + # (2) we don't need very high accuracy in our use case. + # z = x + # SET_LOW_WORD(z, 0) + # r = np.exp(-z * z - 0.5625) * np.exp((z - x) * (z + x) + R / S) + r = np.exp(-x * x - 0.5625) * np.exp(R / S) + return (one - r / x) * sign + + def calc_case_med2(x: np.ndarray) -> np.ndarray: + sign = np.sign(x) + x = np.abs(x) + s = one / (x * x) + R = rb(s) + S = sb(s) + # z = x + # SET_LOW_WORD(z, 0) + # r = np.exp(-z * z - 0.5625) * np.exp((z - x) * (z + x) + R / S) + r = np.exp(-x * x - 0.5625) * np.exp(R / S) + return (one - r / x) * sign + + def calc_case_big(x: np.ndarray) -> np.ndarray: + return np.sign(x) + + out = np.full_like(a, fill_value=np.nan, dtype=np.float64) + out[case_nan] = np.nan + out[case_posinf] = 1.0 + out[case_neginf] = -1.0 + if x[case_tiny].size: + out[case_tiny] = calc_case_tiny(x[case_tiny]) + if x[case_small1].size: + out[case_small1] = calc_case_small1(x[case_small1]) + if x[case_small2].size: + out[case_small2] = calc_case_small2(x[case_small2]) + if x[case_med1].size: + out[case_med1] = calc_case_med1(x[case_med1]) + if x[case_med2].size: + out[case_med2] = calc_case_med2(x[case_med2]) + if x[case_big].size: + out[case_big] = calc_case_big(x[case_big]) + + return out diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_truncnorm.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_truncnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..dd02ccb32976a95a4a2c2910ca8c253421c6a7a4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/_truncnorm.py @@ -0,0 +1,237 @@ +# This file contains the codes from SciPy project. +# +# Copyright (c) 2001-2002 Enthought, Inc. 2003-2022, SciPy Developers. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: + +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import annotations + +from collections.abc import Callable +import functools +import math +import sys + +import numpy as np + +from optuna.samplers._tpe._erf import erf + + +_norm_pdf_C = math.sqrt(2 * math.pi) +_norm_pdf_logC = math.log(_norm_pdf_C) + + +def _log_sum(log_p: np.ndarray, log_q: np.ndarray) -> np.ndarray: + return np.logaddexp(log_p, log_q) + + +def _log_diff(log_p: np.ndarray, log_q: np.ndarray) -> np.ndarray: + return log_p + np.log1p(-np.exp(log_q - log_p)) + + +@functools.lru_cache(1000) +def _ndtr_single(a: float) -> float: + x = a / 2**0.5 + + if x < -1 / 2**0.5: + y = 0.5 * math.erfc(-x) + elif x < 1 / 2**0.5: + y = 0.5 + 0.5 * math.erf(x) + else: + y = 1.0 - 0.5 * math.erfc(x) + + return y + + +def _ndtr(a: np.ndarray) -> np.ndarray: + # todo(amylase): implement erfc in _erf.py and use it for big |a| inputs. + return 0.5 + 0.5 * erf(a / 2**0.5) + + +@functools.lru_cache(1000) +def _log_ndtr_single(a: float) -> float: + if a > 6: + return -_ndtr_single(-a) + if a > -20: + return math.log(_ndtr_single(a)) + + log_LHS = -0.5 * a**2 - math.log(-a) - 0.5 * math.log(2 * math.pi) + last_total = 0.0 + right_hand_side = 1.0 + numerator = 1.0 + denom_factor = 1.0 + denom_cons = 1 / a**2 + sign = 1 + i = 0 + + while abs(last_total - right_hand_side) > sys.float_info.epsilon: + i += 1 + last_total = right_hand_side + sign = -sign + denom_factor *= denom_cons + numerator *= 2 * i - 1 + right_hand_side += sign * numerator * denom_factor + + return log_LHS + math.log(right_hand_side) + + +def _log_ndtr(a: np.ndarray) -> np.ndarray: + return np.frompyfunc(_log_ndtr_single, 1, 1)(a).astype(float) + + +def _norm_logpdf(x: np.ndarray) -> np.ndarray: + return -(x**2) / 2.0 - _norm_pdf_logC + + +def _log_gauss_mass(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """Log of Gaussian probability mass within an interval""" + + # Calculations in right tail are inaccurate, so we'll exploit the + # symmetry and work only in the left tail + case_left = b <= 0 + case_right = a > 0 + case_central = ~(case_left | case_right) + + def mass_case_left(a: np.ndarray, b: np.ndarray) -> np.ndarray: + return _log_diff(_log_ndtr(b), _log_ndtr(a)) + + def mass_case_right(a: np.ndarray, b: np.ndarray) -> np.ndarray: + return mass_case_left(-b, -a) + + def mass_case_central(a: np.ndarray, b: np.ndarray) -> np.ndarray: + # Previously, this was implemented as: + # left_mass = mass_case_left(a, 0) + # right_mass = mass_case_right(0, b) + # return _log_sum(left_mass, right_mass) + # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1. + # Correct for this with an alternative formulation. + # We're not concerned with underflow here: if only one term + # underflows, it was insignificant; if both terms underflow, + # the result can't accurately be represented in logspace anyway + # because sc.log1p(x) ~ x for small x. + return np.log1p(-_ndtr(a) - _ndtr(-b)) + + # _lazyselect not working; don't care to debug it + out = np.full_like(a, fill_value=np.nan, dtype=np.complex128) + if a[case_left].size: + out[case_left] = mass_case_left(a[case_left], b[case_left]) + if a[case_right].size: + out[case_right] = mass_case_right(a[case_right], b[case_right]) + if a[case_central].size: + out[case_central] = mass_case_central(a[case_central], b[case_central]) + return np.real(out) # discard ~0j + + +def _bisect(f: Callable[[float], float], a: float, b: float, c: float) -> float: + if f(a) > c: + a, b = b, a + # In the algorithm, it is assumed that all of (a + b), (a * 2), and (b * 2) are finite. + for _ in range(100): + m = (a + b) / 2 + if a == m or b == m: + return m + if f(m) < c: + a = m + else: + b = m + return (a + b) / 2 + + +def _ndtri_exp_single(y: float) -> float: + # TODO(amylase): Justify this constant + return _bisect(_log_ndtr_single, -100, +100, y) + + +def _ndtri_exp(y: np.ndarray) -> np.ndarray: + return np.frompyfunc(_ndtri_exp_single, 1, 1)(y).astype(float) + + +def ppf(q: np.ndarray, a: np.ndarray | float, b: np.ndarray | float) -> np.ndarray: + q, a, b = np.atleast_1d(q, a, b) + q, a, b = np.broadcast_arrays(q, a, b) + + case_left = a < 0 + case_right = ~case_left + + def ppf_left(q: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray: + log_Phi_x = _log_sum(_log_ndtr(a), np.log(q) + _log_gauss_mass(a, b)) + return _ndtri_exp(log_Phi_x) + + def ppf_right(q: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray: + log_Phi_x = _log_sum(_log_ndtr(-b), np.log1p(-q) + _log_gauss_mass(a, b)) + return -_ndtri_exp(log_Phi_x) + + out = np.empty_like(q) + + q_left = q[case_left] + q_right = q[case_right] + + if q_left.size: + out[case_left] = ppf_left(q_left, a[case_left], b[case_left]) + if q_right.size: + out[case_right] = ppf_right(q_right, a[case_right], b[case_right]) + + out[q == 0] = a[q == 0] + out[q == 1] = b[q == 1] + out[a == b] = math.nan + + return out + + +def rvs( + a: np.ndarray, + b: np.ndarray, + loc: np.ndarray | float = 0, + scale: np.ndarray | float = 1, + random_state: np.random.RandomState | None = None, +) -> np.ndarray: + random_state = random_state or np.random.RandomState() + size = np.broadcast(a, b, loc, scale).shape + percentiles = random_state.uniform(low=0, high=1, size=size) + return ppf(percentiles, a, b) * scale + loc + + +def logpdf( + x: np.ndarray, + a: np.ndarray | float, + b: np.ndarray | float, + loc: np.ndarray | float = 0, + scale: np.ndarray | float = 1, +) -> np.ndarray: + x = (x - loc) / scale + + x, a, b = np.atleast_1d(x, a, b) + + out = _norm_logpdf(x) - _log_gauss_mass(a, b) - np.log(scale) + + x, a, b = np.broadcast_arrays(x, a, b) + out[(x < a) | (b < x)] = -np.inf + out[a == b] = math.nan + + return out diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/parzen_estimator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/parzen_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..bf625caab12187067462cd1a7b688de933433bf1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/parzen_estimator.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import NamedTuple + +import numpy as np + +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.samplers._tpe.probability_distributions import _BatchedCategoricalDistributions +from optuna.samplers._tpe.probability_distributions import _BatchedDiscreteTruncNormDistributions +from optuna.samplers._tpe.probability_distributions import _BatchedDistributions +from optuna.samplers._tpe.probability_distributions import _BatchedTruncNormDistributions +from optuna.samplers._tpe.probability_distributions import _MixtureOfProductDistribution + + +EPS = 1e-12 + + +class _ParzenEstimatorParameters(NamedTuple): + prior_weight: float + consider_magic_clip: bool + consider_endpoints: bool + weights: Callable[[int], np.ndarray] + multivariate: bool + categorical_distance_func: dict[ + str, Callable[[CategoricalChoiceType, CategoricalChoiceType], float] + ] + + +class _ParzenEstimator: + def __init__( + self, + observations: dict[str, np.ndarray], + search_space: dict[str, BaseDistribution], + parameters: _ParzenEstimatorParameters, + predetermined_weights: np.ndarray | None = None, + ) -> None: + if parameters.prior_weight < 0: + raise ValueError( + "A non-negative value must be specified for prior_weight," + f" but got {parameters.prior_weight}." + ) + + self._search_space = search_space + + transformed_observations = self._transform(observations) + + assert predetermined_weights is None or len(transformed_observations) == len( + predetermined_weights + ) + weights = ( + predetermined_weights + if predetermined_weights is not None + else self._call_weights_func(parameters.weights, len(transformed_observations)) + ) + + if len(transformed_observations) == 0: + weights = np.array([1.0]) + else: + weights = np.append(weights, [parameters.prior_weight]) + weights /= weights.sum() + self._mixture_distribution = _MixtureOfProductDistribution( + weights=weights, + distributions=[ + self._calculate_distributions( + transformed_observations[:, i], param, search_space[param], parameters + ) + for i, param in enumerate(search_space) + ], + ) + + def sample(self, rng: np.random.RandomState, size: int) -> dict[str, np.ndarray]: + sampled = self._mixture_distribution.sample(rng, size) + return self._untransform(sampled) + + def log_pdf(self, samples_dict: dict[str, np.ndarray]) -> np.ndarray: + transformed_samples = self._transform(samples_dict) + return self._mixture_distribution.log_pdf(transformed_samples) + + @staticmethod + def _call_weights_func(weights_func: Callable[[int], np.ndarray], n: int) -> np.ndarray: + w = np.array(weights_func(n))[:n] + if np.any(w < 0): + raise ValueError( + f"The `weights` function is not allowed to return negative values {w}. " + + f"The argument of the `weights` function is {n}." + ) + if len(w) > 0 and np.sum(w) <= 0: + raise ValueError( + f"The `weight` function is not allowed to return all-zero values {w}." + + f" The argument of the `weights` function is {n}." + ) + if not np.all(np.isfinite(w)): + raise ValueError( + "The `weights`function is not allowed to return infinite or NaN values " + + f"{w}. The argument of the `weights` function is {n}." + ) + + # TODO(HideakiImamura) Raise `ValueError` if the weight function returns an ndarray of + # unexpected size. + return w + + @staticmethod + def _is_log(dist: BaseDistribution) -> bool: + return isinstance(dist, (FloatDistribution, IntDistribution)) and dist.log + + def _transform(self, samples_dict: dict[str, np.ndarray]) -> np.ndarray: + return np.array( + [ + ( + np.log(samples_dict[param]) + if self._is_log(self._search_space[param]) + else samples_dict[param] + ) + for param in self._search_space + ] + ).T + + def _untransform(self, samples_array: np.ndarray) -> dict[str, np.ndarray]: + res = { + param: ( + np.exp(samples_array[:, i]) + if self._is_log(self._search_space[param]) + else samples_array[:, i] + ) + for i, param in enumerate(self._search_space) + } + # TODO(contramundum53): Remove this line after fixing log-Int hack. + return { + param: ( + np.clip( + dist.low + np.round((res[param] - dist.low) / dist.step) * dist.step, + dist.low, + dist.high, + ) + if isinstance(dist, IntDistribution) + else res[param] + ) + for (param, dist) in self._search_space.items() + } + + def _calculate_distributions( + self, + transformed_observations: np.ndarray, + param_name: str, + search_space: BaseDistribution, + parameters: _ParzenEstimatorParameters, + ) -> _BatchedDistributions: + if isinstance(search_space, CategoricalDistribution): + return self._calculate_categorical_distributions( + transformed_observations, param_name, search_space, parameters + ) + else: + assert isinstance(search_space, (FloatDistribution, IntDistribution)) + if search_space.log: + low = np.log(search_space.low) + high = np.log(search_space.high) + else: + low = search_space.low + high = search_space.high + step = search_space.step + + # TODO(contramundum53): This is a hack and should be fixed. + if step is not None and search_space.log: + low = np.log(search_space.low - step / 2) + high = np.log(search_space.high + step / 2) + step = None + + return self._calculate_numerical_distributions( + transformed_observations, low, high, step, parameters + ) + + def _calculate_categorical_distributions( + self, + observations: np.ndarray, + param_name: str, + search_space: CategoricalDistribution, + parameters: _ParzenEstimatorParameters, + ) -> _BatchedDistributions: + choices = search_space.choices + n_choices = len(choices) + if len(observations) == 0: + return _BatchedCategoricalDistributions( + weights=np.full((1, n_choices), fill_value=1.0 / n_choices) + ) + + n_kernels = len(observations) + 1 # NOTE(sawa3030): +1 for prior. + weights = np.full( + shape=(n_kernels, n_choices), + fill_value=parameters.prior_weight / n_kernels, + ) + observed_indices = observations.astype(int) + if param_name in parameters.categorical_distance_func: + # TODO(nabenabe0928): Think about how to handle combinatorial explosion. + # The time complexity is O(n_choices * used_indices.size), so n_choices cannot be huge. + used_indices, rev_indices = np.unique(observed_indices, return_inverse=True) + dist_func = parameters.categorical_distance_func[param_name] + dists = np.array([[dist_func(choices[i], c) for c in choices] for i in used_indices]) + coef = np.log(n_kernels / parameters.prior_weight) * np.log(n_choices) / np.log(6) + cat_weights = np.exp(-((dists / np.max(dists, axis=1)[:, np.newaxis]) ** 2) * coef) + weights[: len(observed_indices)] = cat_weights[rev_indices] + else: + weights[np.arange(len(observed_indices)), observed_indices] += 1 + + row_sums = weights.sum(axis=1, keepdims=True) + weights /= np.where(row_sums == 0, 1, row_sums) + return _BatchedCategoricalDistributions(weights) + + def _calculate_numerical_distributions( + self, + observations: np.ndarray, + low: float, + high: float, + step: float | None, + parameters: _ParzenEstimatorParameters, + ) -> _BatchedDistributions: + step_or_0 = step or 0 + + mus = observations + + def compute_sigmas() -> np.ndarray: + if parameters.multivariate: + SIGMA0_MAGNITUDE = 0.2 + sigma = ( + SIGMA0_MAGNITUDE + * max(len(observations), 1) ** (-1.0 / (len(self._search_space) + 4)) + * (high - low + step_or_0) + ) + sigmas = np.full(shape=(len(observations),), fill_value=sigma) + else: + # TODO(contramundum53): Remove dependency on prior_mu + prior_mu = 0.5 * (low + high) + mus_with_prior = np.append(mus, prior_mu) + + sorted_indices = np.argsort(mus_with_prior) + sorted_mus = mus_with_prior[sorted_indices] + sorted_mus_with_endpoints = np.empty(len(mus_with_prior) + 2, dtype=float) + sorted_mus_with_endpoints[0] = low - step_or_0 / 2 + sorted_mus_with_endpoints[1:-1] = sorted_mus + sorted_mus_with_endpoints[-1] = high + step_or_0 / 2 + + sorted_sigmas = np.maximum( + sorted_mus_with_endpoints[1:-1] - sorted_mus_with_endpoints[0:-2], + sorted_mus_with_endpoints[2:] - sorted_mus_with_endpoints[1:-1], + ) + + if not parameters.consider_endpoints and sorted_mus_with_endpoints.shape[0] >= 4: + sorted_sigmas[0] = sorted_mus_with_endpoints[2] - sorted_mus_with_endpoints[1] + sorted_sigmas[-1] = ( + sorted_mus_with_endpoints[-2] - sorted_mus_with_endpoints[-3] + ) + + sigmas = sorted_sigmas[np.argsort(sorted_indices)][: len(observations)] + + # We adjust the range of the 'sigmas' according to the 'consider_magic_clip' flag. + maxsigma = 1.0 * (high - low + step_or_0) + if parameters.consider_magic_clip: + # TODO(contramundum53): Remove dependency of minsigma on consider_prior. + n_kernels = len(observations) + 1 # NOTE(sawa3030): +1 for prior. + minsigma = 1.0 * (high - low + step_or_0) / min(100.0, (1.0 + n_kernels)) + else: + minsigma = EPS + return np.asarray(np.clip(sigmas, minsigma, maxsigma)) + + sigmas = compute_sigmas() + + mus = np.append(mus, [0.5 * (low + high)]) + sigmas = np.append(sigmas, [1.0 * (high - low + step_or_0)]) + + if step is None: + return _BatchedTruncNormDistributions(mus, sigmas, low, high) + else: + return _BatchedDiscreteTruncNormDistributions(mus, sigmas, low, high, step) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/probability_distributions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/probability_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..563307b728e62e58d9b0926af00f7791ba519779 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/probability_distributions.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +from typing import NamedTuple +from typing import Union + +import numpy as np + +from optuna.samplers._tpe import _truncnorm + + +class _BatchedCategoricalDistributions(NamedTuple): + weights: np.ndarray + + +class _BatchedTruncNormDistributions(NamedTuple): + mu: np.ndarray + sigma: np.ndarray + low: float # Currently, low and high do not change per trial. + high: float + + +class _BatchedDiscreteTruncNormDistributions(NamedTuple): + mu: np.ndarray + sigma: np.ndarray + low: float # Currently, low, high and step do not change per trial. + high: float + step: float + + +_BatchedDistributions = Union[ + _BatchedCategoricalDistributions, + _BatchedTruncNormDistributions, + _BatchedDiscreteTruncNormDistributions, +] + + +class _MixtureOfProductDistribution(NamedTuple): + weights: np.ndarray + distributions: list[_BatchedDistributions] + + def sample(self, rng: np.random.RandomState, batch_size: int) -> np.ndarray: + active_indices = rng.choice(len(self.weights), p=self.weights, size=batch_size) + + ret = np.empty((batch_size, len(self.distributions)), dtype=np.float64) + for i, d in enumerate(self.distributions): + if isinstance(d, _BatchedCategoricalDistributions): + active_weights = d.weights[active_indices, :] + rnd_quantile = rng.rand(batch_size) + cum_probs = np.cumsum(active_weights, axis=-1) + assert np.isclose(cum_probs[:, -1], 1).all() + cum_probs[:, -1] = 1 # Avoid numerical errors. + ret[:, i] = np.sum(cum_probs < rnd_quantile[:, None], axis=-1) + elif isinstance(d, _BatchedTruncNormDistributions): + active_mus = d.mu[active_indices] + active_sigmas = d.sigma[active_indices] + ret[:, i] = _truncnorm.rvs( + a=(d.low - active_mus) / active_sigmas, + b=(d.high - active_mus) / active_sigmas, + loc=active_mus, + scale=active_sigmas, + random_state=rng, + ) + elif isinstance(d, _BatchedDiscreteTruncNormDistributions): + active_mus = d.mu[active_indices] + active_sigmas = d.sigma[active_indices] + samples = _truncnorm.rvs( + a=(d.low - d.step / 2 - active_mus) / active_sigmas, + b=(d.high + d.step / 2 - active_mus) / active_sigmas, + loc=active_mus, + scale=active_sigmas, + random_state=rng, + ) + ret[:, i] = np.clip( + d.low + np.round((samples - d.low) / d.step) * d.step, d.low, d.high + ) + else: + assert False + + return ret + + def log_pdf(self, x: np.ndarray) -> np.ndarray: + batch_size, n_vars = x.shape + log_pdfs = np.empty((batch_size, len(self.weights), n_vars), dtype=np.float64) + for i, d in enumerate(self.distributions): + xi = x[:, i] + if isinstance(d, _BatchedCategoricalDistributions): + log_pdfs[:, :, i] = np.log( + np.take_along_axis( + d.weights[None, :, :], xi[:, None, None].astype(np.int64), axis=-1 + ) + )[:, :, 0] + elif isinstance(d, _BatchedTruncNormDistributions): + log_pdfs[:, :, i] = _truncnorm.logpdf( + x=xi[:, None], + a=(d.low - d.mu[None, :]) / d.sigma[None, :], + b=(d.high - d.mu[None, :]) / d.sigma[None, :], + loc=d.mu[None, :], + scale=d.sigma[None, :], + ) + elif isinstance(d, _BatchedDiscreteTruncNormDistributions): + lower_limit = d.low - d.step / 2 + upper_limit = d.high + d.step / 2 + x_lower = np.maximum(xi - d.step / 2, lower_limit) + x_upper = np.minimum(xi + d.step / 2, upper_limit) + log_gauss_mass = _truncnorm._log_gauss_mass( + (x_lower[:, None] - d.mu[None, :]) / d.sigma[None, :], + (x_upper[:, None] - d.mu[None, :]) / d.sigma[None, :], + ) + log_p_accept = _truncnorm._log_gauss_mass( + (d.low - d.step / 2 - d.mu[None, :]) / d.sigma[None, :], + (d.high + d.step / 2 - d.mu[None, :]) / d.sigma[None, :], + ) + log_pdfs[:, :, i] = log_gauss_mass - log_p_accept + + else: + assert False + weighted_log_pdf = np.sum(log_pdfs, axis=-1) + np.log(self.weights[None, :]) + max_ = weighted_log_pdf.max(axis=1) + # We need to avoid (-inf) - (-inf) when the probability is zero. + max_[np.isneginf(max_)] = 0 + with np.errstate(divide="ignore"): # Suppress warning in log(0). + return np.log(np.exp(weighted_log_pdf - max_[:, None]).sum(axis=1)) + max_ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/sampler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..2254a3c45f519fed2fdd8c956853aadac4a91b6c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/_tpe/sampler.py @@ -0,0 +1,799 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +import math +from typing import Any +from typing import cast +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from optuna import _deprecated +from optuna._convert_positional_args import convert_positional_args +from optuna._experimental import warn_experimental_argument +from optuna._hypervolume import compute_hypervolume +from optuna._hypervolume.hssp import _solve_hssp +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.samplers._base import _process_constraints_after_trial +from optuna.samplers._base import BaseSampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.samplers._random import RandomSampler +from optuna.samplers._tpe.parzen_estimator import _ParzenEstimator +from optuna.samplers._tpe.parzen_estimator import _ParzenEstimatorParameters +from optuna.search_space import IntersectionSearchSpace +from optuna.search_space.group_decomposed import _GroupDecomposedSearchSpace +from optuna.search_space.group_decomposed import _SearchSpaceGroup +from optuna.study._multi_objective import _fast_non_domination_rank +from optuna.study._multi_objective import _is_pareto_front +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +EPS = 1e-12 +_logger = get_logger(__name__) + + +def default_gamma(x: int) -> int: + return min(int(np.ceil(0.1 * x)), 25) + + +def hyperopt_default_gamma(x: int) -> int: + return min(int(np.ceil(0.25 * np.sqrt(x))), 25) + + +def default_weights(x: int) -> np.ndarray: + if x == 0: + return np.asarray([]) + elif x < 25: + return np.ones(x) + else: + ramp = np.linspace(1.0 / x, 1.0, num=x - 25) + flat = np.ones(25) + return np.concatenate([ramp, flat], axis=0) + + +class TPESampler(BaseSampler): + """Sampler using TPE (Tree-structured Parzen Estimator) algorithm. + + On each trial, for each parameter, TPE fits one Gaussian Mixture Model (GMM) ``l(x)`` to + the set of parameter values associated with the best objective values, and another GMM + ``g(x)`` to the remaining parameter values. It chooses the parameter value ``x`` that + maximizes the ratio ``l(x)/g(x)``. + + For further information about TPE algorithm, please refer to the following papers: + + - `Algorithms for Hyper-Parameter Optimization + `__ + - `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of + Dimensions for Vision Architectures `__ + - `Tree-Structured Parzen Estimator: Understanding Its Algorithm Components and Their Roles for + Better Empirical Performance `__ + + For multi-objective TPE (MOTPE), please refer to the following papers: + + - `Multiobjective Tree-Structured Parzen Estimator for Computationally Expensive Optimization + Problems `__ + - `Multiobjective Tree-Structured Parzen Estimator `__ + + Please also check our articles: + + - `Significant Speed Up of Multi-Objective TPESampler in Optuna v4.0.0 + `__ + - `Multivariate TPE Makes Optuna Even More Powerful + `__ + + Example: + An example of a single-objective optimization is as follows: + + .. testcode:: + + import optuna + from optuna.samplers import TPESampler + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return x**2 + + + study = optuna.create_study(sampler=TPESampler()) + study.optimize(objective, n_trials=10) + + .. note:: + :class:`~optuna.samplers.TPESampler`, which became much faster in v4.0.0, c.f. `our article + `__, + can handle multi-objective optimization with many trials as well. + Please note that :class:`~optuna.samplers.NSGAIISampler` will be used by default for + multi-objective optimization, so if users would like to use + :class:`~optuna.samplers.TPESampler` for multi-objective optimization, ``sampler`` must be + explicitly specified when study is created. + + Args: + consider_prior: + Enhance the stability of Parzen estimator by imposing a Gaussian prior when + :obj:`True`. The prior is only effective if the sampling distribution is + either :class:`~optuna.distributions.FloatDistribution`, + or :class:`~optuna.distributions.IntDistribution`. + + .. warning:: + Deprecated in v4.3.0. ``consider_prior`` argument will be removed in the future. + The removal of this feature is currently scheduled for v6.0.0, + but this schedule is subject to change. + From v4.3.0 onward, ``consider_prior`` automatically falls back to ``True``. + See https://github.com/optuna/optuna/releases/tag/v4.3.0. + prior_weight: + The weight of the prior. This argument is used in + :class:`~optuna.distributions.FloatDistribution`, + :class:`~optuna.distributions.IntDistribution`, and + :class:`~optuna.distributions.CategoricalDistribution`. + consider_magic_clip: + Enable a heuristic to limit the smallest variances of Gaussians used in + the Parzen estimator. + consider_endpoints: + Take endpoints of domains into account when calculating variances of Gaussians + in Parzen estimator. See the original paper for details on the heuristics + to calculate the variances. + n_startup_trials: + The random sampling is used instead of the TPE algorithm until the given number + of trials finish in the same study. + n_ei_candidates: + Number of candidate samples used to calculate the expected improvement. + gamma: + A function that takes the number of finished trials and returns the number + of trials to form a density function for samples with low grains. + See the original paper for more details. + weights: + A function that takes the number of finished trials and returns a weight for them. + See `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of + Dimensions for Vision Architectures + `__ for more details. + + .. note:: + In the multi-objective case, this argument is only used to compute the weights of + bad trials, i.e., trials to construct `g(x)` in the `paper + `__ + ). The weights of good trials, i.e., trials to construct `l(x)`, are computed by a + rule based on the hypervolume contribution proposed in the `paper of MOTPE + `__. + seed: + Seed for random number generator. + multivariate: + If this is :obj:`True`, the multivariate TPE is used when suggesting parameters. + The multivariate TPE is reported to outperform the independent TPE. See `BOHB: Robust + and Efficient Hyperparameter Optimization at Scale + `__ and `our article + `__ + for more details. + + .. note:: + Added in v2.2.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.2.0. + group: + If this and ``multivariate`` are :obj:`True`, the multivariate TPE with the group + decomposed search space is used when suggesting parameters. + The sampling algorithm decomposes the search space based on past trials and samples + from the joint distribution in each decomposed subspace. + The decomposed subspaces are a partition of the whole search space. Each subspace + is a maximal subset of the whole search space, which satisfies the following: + for a trial in completed trials, the intersection of the subspace and the search space + of the trial becomes subspace itself or an empty set. + Sampling from the joint distribution on the subspace is realized by multivariate TPE. + If ``group`` is :obj:`True`, ``multivariate`` must be :obj:`True` as well. + + .. note:: + Added in v2.8.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.8.0. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_categorical("x", ["A", "B"]) + if x == "A": + return trial.suggest_float("y", -10, 10) + else: + return trial.suggest_int("z", -10, 10) + + + sampler = optuna.samplers.TPESampler(multivariate=True, group=True) + study = optuna.create_study(sampler=sampler) + study.optimize(objective, n_trials=10) + warn_independent_sampling: + If this is :obj:`True` and ``multivariate=True``, a warning message is emitted when + the value of a parameter is sampled by using an independent sampler. + If ``multivariate=False``, this flag has no effect. + constant_liar: + If :obj:`True`, penalize running trials to avoid suggesting parameter configurations + nearby. + + .. note:: + Abnormally terminated trials often leave behind a record with a state of + ``RUNNING`` in the storage. + Such "zombie" trial parameters will be avoided by the constant liar algorithm + during subsequent sampling. + When using an :class:`~optuna.storages.RDBStorage`, it is possible to enable the + ``heartbeat_interval`` to change the records for abnormally terminated trials to + ``FAIL``. + + .. note:: + It is recommended to set this value to :obj:`True` during distributed + optimization to avoid having multiple workers evaluating similar parameter + configurations. In particular, if each objective function evaluation is costly + and the durations of the running states are significant, and/or the number of + workers is high. + + .. note:: + Added in v2.8.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.8.0. + constraints_func: + An optional function that computes the objective constraints. It must take a + :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must + be a sequence of :obj:`float` s. A value strictly larger than 0 means that a + constraints is violated. A value equal to or smaller than 0 is considered feasible. + If ``constraints_func`` returns more than one value for a trial, that trial is + considered feasible if and only if all values are equal to 0 or smaller. + + The ``constraints_func`` will be evaluated after each successful trial. + The function won't be called when trials fail or they are pruned, but this behavior is + subject to change in the future releases. + + .. note:: + Added in v3.0.0 as an experimental feature. The interface may change in newer + versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.0.0. + categorical_distance_func: + A dictionary of distance functions for categorical parameters. The key is the name of + the categorical parameter and the value is a distance function that takes two + :class:`~optuna.distributions.CategoricalChoiceType` s and returns a :obj:`float` + value. The distance function must return a non-negative value. + + While categorical choices are handled equally by default, this option allows users to + specify prior knowledge on the structure of categorical parameters. When specified, + categorical choices closer to current best choices are more likely to be sampled. + + .. note:: + Added in v3.4.0 as an experimental feature. The interface may change in newer + versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.4.0. + """ + + @convert_positional_args( + previous_positional_arg_names=[ + "self", + "consider_prior", + "prior_weight", + "consider_magic_clip", + "consider_endpoints", + "n_startup_trials", + "n_ei_candidates", + "gamma", + "weights", + "seed", + ], + deprecated_version="4.4.0", + removed_version="6.0.0", + ) + def __init__( + self, + *, + consider_prior: bool = True, + prior_weight: float = 1.0, + consider_magic_clip: bool = True, + consider_endpoints: bool = False, + n_startup_trials: int = 10, + n_ei_candidates: int = 24, + gamma: Callable[[int], int] = default_gamma, + weights: Callable[[int], np.ndarray] = default_weights, + seed: int | None = None, + multivariate: bool = False, + group: bool = False, + warn_independent_sampling: bool = True, + constant_liar: bool = False, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + categorical_distance_func: ( + dict[str, Callable[[CategoricalChoiceType, CategoricalChoiceType], float]] | None + ) = None, + ) -> None: + if not consider_prior: + msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format( + name="`consider_prior`", d_ver="4.3.0", r_ver="6.0.0" + ) + warnings.warn( + f"{msg} From v4.3.0 onward, `consider_prior` automatically falls back to `True`.", + FutureWarning, + ) + + self._parzen_estimator_parameters = _ParzenEstimatorParameters( + prior_weight=prior_weight, + consider_magic_clip=consider_magic_clip, + consider_endpoints=consider_endpoints, + weights=weights, + multivariate=multivariate, + categorical_distance_func=categorical_distance_func or {}, + ) + self._n_startup_trials = n_startup_trials + self._n_ei_candidates = n_ei_candidates + self._gamma = gamma + + self._warn_independent_sampling = warn_independent_sampling + self._rng = LazyRandomState(seed) + self._random_sampler = RandomSampler(seed=seed) + + self._multivariate = multivariate + self._group = group + self._group_decomposed_search_space: _GroupDecomposedSearchSpace | None = None + self._search_space_group: _SearchSpaceGroup | None = None + self._search_space = IntersectionSearchSpace(include_pruned=True) + self._constant_liar = constant_liar + self._constraints_func = constraints_func + # NOTE(nabenabe0928): Users can overwrite _ParzenEstimator to customize the TPE behavior. + self._parzen_estimator_cls = _ParzenEstimator + + if multivariate: + warn_experimental_argument("multivariate") + + if group: + if not multivariate: + raise ValueError( + "``group`` option can only be enabled when ``multivariate`` is enabled." + ) + warn_experimental_argument("group") + self._group_decomposed_search_space = _GroupDecomposedSearchSpace(True) + + if constant_liar: + warn_experimental_argument("constant_liar") + + if constraints_func is not None: + warn_experimental_argument("constraints_func") + + if categorical_distance_func is not None: + warn_experimental_argument("categorical_distance_func") + + def reseed_rng(self) -> None: + self._rng.rng.seed() + self._random_sampler.reseed_rng() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + if not self._multivariate: + return {} + + search_space: dict[str, BaseDistribution] = {} + + if self._group: + assert self._group_decomposed_search_space is not None + self._search_space_group = self._group_decomposed_search_space.calculate(study) + for sub_space in self._search_space_group.search_spaces: + # Sort keys because Python's string hashing is nondeterministic. + for name, distribution in sorted(sub_space.items()): + if distribution.single(): + continue + search_space[name] = distribution + return search_space + + for name, distribution in self._search_space.calculate(study).items(): + if distribution.single(): + continue + search_space[name] = distribution + + return search_space + + def sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + if self._group: + assert self._search_space_group is not None + params = {} + for sub_space in self._search_space_group.search_spaces: + search_space = {} + # Sort keys because Python's string hashing is nondeterministic. + for name, distribution in sorted(sub_space.items()): + if not distribution.single(): + search_space[name] = distribution + params.update(self._sample_relative(study, trial, search_space)) + return params + else: + return self._sample_relative(study, trial, search_space) + + def _sample_relative( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + if search_space == {}: + return {} + + states = (TrialState.COMPLETE, TrialState.PRUNED) + trials = study._get_trials(deepcopy=False, states=states, use_cache=True) + # If the number of samples is insufficient, we run random trial. + if len(trials) < self._n_startup_trials: + return {} + + return self._sample(study, trial, search_space) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + states = (TrialState.COMPLETE, TrialState.PRUNED) + trials = study._get_trials(deepcopy=False, states=states, use_cache=True) + + # If the number of samples is insufficient, we run random trial. + if len(trials) < self._n_startup_trials: + return self._random_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + if self._warn_independent_sampling and self._multivariate: + # Avoid independent warning at the first sampling of `param_name`. + if any(param_name in trial.params for trial in trials): + _logger.warning( + f"The parameter '{param_name}' in trial#{trial.number} is sampled " + "independently instead of being sampled by multivariate TPE sampler. " + "(optimization performance may be degraded). " + "You can suppress this warning by setting `warn_independent_sampling` " + "to `False` in the constructor of `TPESampler`, " + "if this independent sampling is intended behavior." + ) + + return self._sample(study, trial, {param_name: param_distribution})[param_name] + + def _get_internal_repr( + self, trials: list[FrozenTrial], search_space: dict[str, BaseDistribution] + ) -> dict[str, np.ndarray]: + values: dict[str, list[float]] = {param_name: [] for param_name in search_space} + for trial in trials: + if all((param_name in trial.params) for param_name in search_space): + for param_name in search_space: + param = trial.params[param_name] + distribution = trial.distributions[param_name] + values[param_name].append(distribution.to_internal_repr(param)) + return {k: np.asarray(v) for k, v in values.items()} + + def _sample( + self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution] + ) -> dict[str, Any]: + if self._constant_liar: + states = [TrialState.COMPLETE, TrialState.PRUNED, TrialState.RUNNING] + else: + states = [TrialState.COMPLETE, TrialState.PRUNED] + use_cache = not self._constant_liar + trials = study._get_trials(deepcopy=False, states=states, use_cache=use_cache) + + # We divide data into below and above. + n = sum(trial.state != TrialState.RUNNING for trial in trials) # Ignore running trials. + below_trials, above_trials = _split_trials( + study, + trials, + self._gamma(n), + self._constraints_func is not None, + ) + + mpe_below = self._build_parzen_estimator( + study, search_space, below_trials, handle_below=True + ) + mpe_above = self._build_parzen_estimator( + study, search_space, above_trials, handle_below=False + ) + + samples_below = mpe_below.sample(self._rng.rng, self._n_ei_candidates) + acq_func_vals = self._compute_acquisition_func(samples_below, mpe_below, mpe_above) + ret = TPESampler._compare(samples_below, acq_func_vals) + + for param_name, dist in search_space.items(): + ret[param_name] = dist.to_external_repr(ret[param_name]) + + return ret + + def _build_parzen_estimator( + self, + study: Study, + search_space: dict[str, BaseDistribution], + trials: list[FrozenTrial], + handle_below: bool, + ) -> _ParzenEstimator: + observations = self._get_internal_repr(trials, search_space) + if handle_below and study._is_multi_objective(): + param_mask_below = [] + for trial in trials: + param_mask_below.append( + all((param_name in trial.params) for param_name in search_space) + ) + weights_below = _calculate_weights_below_for_multi_objective( + study, trials, self._constraints_func + )[param_mask_below] + assert np.isfinite(weights_below).all() + mpe = self._parzen_estimator_cls( + observations, search_space, self._parzen_estimator_parameters, weights_below + ) + else: + mpe = self._parzen_estimator_cls( + observations, search_space, self._parzen_estimator_parameters + ) + + if not isinstance(mpe, _ParzenEstimator): + raise RuntimeError("_parzen_estimator_cls must override _ParzenEstimator.") + + return mpe + + def _compute_acquisition_func( + self, + samples: dict[str, np.ndarray], + mpe_below: _ParzenEstimator, + mpe_above: _ParzenEstimator, + ) -> np.ndarray: + log_likelihoods_below = mpe_below.log_pdf(samples) + log_likelihoods_above = mpe_above.log_pdf(samples) + acq_func_vals = log_likelihoods_below - log_likelihoods_above + return acq_func_vals + + @classmethod + def _compare( + cls, samples: dict[str, np.ndarray], acquisition_func_vals: np.ndarray + ) -> dict[str, int | float]: + sample_size = next(iter(samples.values())).size + if sample_size == 0: + raise ValueError(f"The size of `samples` must be positive, but got {sample_size}.") + + if sample_size != acquisition_func_vals.size: + raise ValueError( + "The sizes of `samples` and `acquisition_func_vals` must be same, but got " + "(samples.size, acquisition_func_vals.size) = " + f"({sample_size}, {acquisition_func_vals.size})." + ) + + best_idx = np.argmax(acquisition_func_vals) + return {k: v[best_idx].item() for k, v in samples.items()} + + @staticmethod + def hyperopt_parameters() -> dict[str, Any]: + """Return the the default parameters of hyperopt (v0.1.2). + + :class:`~optuna.samplers.TPESampler` can be instantiated with the parameters returned + by this method. + + Example: + + Create a :class:`~optuna.samplers.TPESampler` instance with the default + parameters of `hyperopt `__. + + .. testcode:: + + import optuna + from optuna.samplers import TPESampler + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return x**2 + + + sampler = TPESampler(**TPESampler.hyperopt_parameters()) + study = optuna.create_study(sampler=sampler) + study.optimize(objective, n_trials=10) + + Returns: + A dictionary containing the default parameters of hyperopt. + + """ + + return { + "consider_prior": True, + "prior_weight": 1.0, + "consider_magic_clip": True, + "consider_endpoints": False, + "n_startup_trials": 20, + "n_ei_candidates": 24, + "gamma": hyperopt_default_gamma, + "weights": default_weights, + } + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._random_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + assert state in [TrialState.COMPLETE, TrialState.FAIL, TrialState.PRUNED] + if self._constraints_func is not None: + _process_constraints_after_trial(self._constraints_func, study, trial, state) + self._random_sampler.after_trial(study, trial, state, values) + + +def _get_reference_point(loss_vals: np.ndarray) -> np.ndarray: + worst_point = np.max(loss_vals, axis=0) + reference_point = np.maximum(1.1 * worst_point, 0.9 * worst_point) + reference_point[reference_point == 0] = EPS + return reference_point + + +def _split_trials( + study: Study, trials: list[FrozenTrial], n_below: int, constraints_enabled: bool +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + complete_trials = [] + pruned_trials = [] + running_trials = [] + infeasible_trials = [] + + for trial in trials: + if trial.state == TrialState.RUNNING: + # We should check if the trial is RUNNING before the feasibility check + # because its constraint values have not yet been set. + running_trials.append(trial) + elif constraints_enabled and _get_infeasible_trial_score(trial) > 0: + infeasible_trials.append(trial) + elif trial.state == TrialState.COMPLETE: + complete_trials.append(trial) + elif trial.state == TrialState.PRUNED: + pruned_trials.append(trial) + else: + assert False + + # We divide data into below and above. + below_complete, above_complete = _split_complete_trials(complete_trials, study, n_below) + # This ensures `n_below` is non-negative to prevent unexpected trial splits. + n_below = max(0, n_below - len(below_complete)) + below_pruned, above_pruned = _split_pruned_trials(pruned_trials, study, n_below) + # This ensures `n_below` is non-negative to prevent unexpected trial splits. + n_below = max(0, n_below - len(below_pruned)) + below_infeasible, above_infeasible = _split_infeasible_trials(infeasible_trials, n_below) + + below_trials = below_complete + below_pruned + below_infeasible + above_trials = above_complete + above_pruned + above_infeasible + running_trials + below_trials.sort(key=lambda trial: trial.number) + above_trials.sort(key=lambda trial: trial.number) + + return below_trials, above_trials + + +def _split_complete_trials( + trials: Sequence[FrozenTrial], study: Study, n_below: int +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + n_below = min(n_below, len(trials)) + if len(study.directions) <= 1: + return _split_complete_trials_single_objective(trials, study, n_below) + else: + return _split_complete_trials_multi_objective(trials, study, n_below) + + +def _split_complete_trials_single_objective( + trials: Sequence[FrozenTrial], study: Study, n_below: int +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + if study.direction == StudyDirection.MINIMIZE: + sorted_trials = sorted(trials, key=lambda trial: cast(float, trial.value)) + else: + sorted_trials = sorted(trials, key=lambda trial: cast(float, trial.value), reverse=True) + return sorted_trials[:n_below], sorted_trials[n_below:] + + +def _split_complete_trials_multi_objective( + trials: Sequence[FrozenTrial], study: Study, n_below: int +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + if n_below == 0: + return [], list(trials) + elif n_below == len(trials): + return list(trials), [] + + assert 0 < n_below < len(trials) + lvals = np.array([trial.values for trial in trials]) + lvals *= np.array([-1.0 if d == StudyDirection.MAXIMIZE else 1.0 for d in study.directions]) + nondomination_ranks = _fast_non_domination_rank(lvals, n_below=n_below) + ranks, rank_counts = np.unique(nondomination_ranks, return_counts=True) + last_rank_before_tiebreak = int(np.max(ranks[np.cumsum(rank_counts) <= n_below], initial=-1)) + assert all(ranks[: last_rank_before_tiebreak + 1] == np.arange(last_rank_before_tiebreak + 1)) + indices = np.arange(len(trials)) + indices_below = indices[nondomination_ranks <= last_rank_before_tiebreak] + + if indices_below.size < n_below: # Tie-break with Hypervolume subset selection problem (HSSP). + assert ranks[last_rank_before_tiebreak + 1] == last_rank_before_tiebreak + 1 + need_tiebreak = nondomination_ranks == last_rank_before_tiebreak + 1 + rank_i_lvals = lvals[need_tiebreak] + subset_size = n_below - indices_below.size + selected_indices = _solve_hssp( + rank_i_lvals, indices[need_tiebreak], subset_size, _get_reference_point(rank_i_lvals) + ) + indices_below = np.append(indices_below, selected_indices) + + below_indices_set = set(cast(list, indices_below.tolist())) + below_trials = [trials[i] for i in range(len(trials)) if i in below_indices_set] + above_trials = [trials[i] for i in range(len(trials)) if i not in below_indices_set] + return below_trials, above_trials + + +def _get_pruned_trial_score(trial: FrozenTrial, study: Study) -> tuple[float, float]: + if len(trial.intermediate_values) > 0: + step, intermediate_value = max(trial.intermediate_values.items()) + if math.isnan(intermediate_value): + return -step, float("inf") + elif study.direction == StudyDirection.MINIMIZE: + return -step, intermediate_value + else: + return -step, -intermediate_value + else: + return 1, 0.0 + + +def _split_pruned_trials( + trials: Sequence[FrozenTrial], study: Study, n_below: int +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + n_below = min(n_below, len(trials)) + sorted_trials = sorted(trials, key=lambda trial: _get_pruned_trial_score(trial, study)) + return sorted_trials[:n_below], sorted_trials[n_below:] + + +def _get_infeasible_trial_score(trial: FrozenTrial) -> float: + constraint = trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraint is None: + warnings.warn( + f"Trial {trial.number} does not have constraint values." + " It will be treated as a lower priority than other trials." + ) + return float("inf") + else: + # Violation values of infeasible dimensions are summed up. + return sum(v for v in constraint if v > 0) + + +def _split_infeasible_trials( + trials: Sequence[FrozenTrial], n_below: int +) -> tuple[list[FrozenTrial], list[FrozenTrial]]: + n_below = min(n_below, len(trials)) + sorted_trials = sorted(trials, key=_get_infeasible_trial_score) + return sorted_trials[:n_below], sorted_trials[n_below:] + + +def _calculate_weights_below_for_multi_objective( + study: Study, + below_trials: list[FrozenTrial], + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None, +) -> np.ndarray: + def _feasible(trial: FrozenTrial) -> bool: + return constraints_func is None or all(c <= 0 for c in constraints_func(trial)) + + is_feasible = np.asarray([_feasible(t) for t in below_trials]) + weights_below = np.where(is_feasible, 1.0, EPS) # Assign EPS to infeasible trials. + n_below_feasible = np.count_nonzero(is_feasible) + if n_below_feasible <= 1: + return weights_below + + lvals = np.asarray([t.values for t in below_trials])[is_feasible] + lvals *= np.array([-1.0 if d == StudyDirection.MAXIMIZE else 1.0 for d in study.directions]) + ref_point = _get_reference_point(lvals) + on_front = _is_pareto_front(lvals, assume_unique_lexsorted=False) + pareto_sols = lvals[on_front] + hv = compute_hypervolume(pareto_sols, ref_point, assume_pareto=True) + if np.isinf(hv): + # TODO(nabenabe): Assign EPS to non-Pareto solutions, and + # solutions with finite contrib if hv is inf. Ref: PR#5813. + return weights_below + + loo_mat = ~np.eye(pareto_sols.shape[0], dtype=bool) # Leave-one-out bool matrix. + contribs = np.zeros(n_below_feasible, dtype=float) + contribs[on_front] = hv - np.array( + [compute_hypervolume(pareto_sols[loo], ref_point, assume_pareto=True) for loo in loo_mat] + ) + weights_below[is_feasible] = np.maximum(contribs / max(np.max(contribs), EPS), EPS) + return weights_below diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87247b277b4456c089d30869804ecfeed898e898 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/__init__.py @@ -0,0 +1,18 @@ +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover +from optuna.samplers.nsgaii._crossovers._blxalpha import BLXAlphaCrossover +from optuna.samplers.nsgaii._crossovers._sbx import SBXCrossover +from optuna.samplers.nsgaii._crossovers._spx import SPXCrossover +from optuna.samplers.nsgaii._crossovers._undx import UNDXCrossover +from optuna.samplers.nsgaii._crossovers._uniform import UniformCrossover +from optuna.samplers.nsgaii._crossovers._vsbx import VSBXCrossover + + +__all__ = [ + "BaseCrossover", + "BLXAlphaCrossover", + "SBXCrossover", + "SPXCrossover", + "UNDXCrossover", + "UniformCrossover", + "VSBXCrossover", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_after_trial_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_after_trial_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..34cb75c487ddcc37f4d4a053f1f565cbaa4c0266 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_after_trial_strategy.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from optuna.samplers._base import _process_constraints_after_trial +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +class NSGAIIAfterTrialStrategy: + def __init__( + self, *, constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None + ) -> None: + self._constraints_func = constraints_func + + def __call__( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None = None, + ) -> None: + """Carry out the after trial process of default NSGA-II. + + This method is called after each trial of the study, examines whether the trial result is + valid in terms of constraints, and store the results in system_attrs of the study. + """ + if self._constraints_func is not None: + _process_constraints_after_trial(self._constraints_func, study, trial, state) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_child_generation_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_child_generation_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..ef633a44a42367fabc5d3dce3d1086ac5ccd92b3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_child_generation_strategy.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import Any +from typing import TYPE_CHECKING + +from optuna.distributions import BaseDistribution +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.samplers.nsgaii._constraints_evaluation import _constrained_dominates +from optuna.samplers.nsgaii._crossover import perform_crossover +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover +from optuna.study._multi_objective import _dominates +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + from optuna.study import Study + + +class NSGAIIChildGenerationStrategy: + def __init__( + self, + *, + mutation_prob: float | None = None, + crossover: BaseCrossover, + crossover_prob: float, + swapping_prob: float, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + rng: LazyRandomState, + ) -> None: + if not (mutation_prob is None or 0.0 <= mutation_prob <= 1.0): + raise ValueError( + "`mutation_prob` must be None or a float value within the range [0.0, 1.0]." + ) + + if not (0.0 <= crossover_prob <= 1.0): + raise ValueError("`crossover_prob` must be a float value within the range [0.0, 1.0].") + + if not (0.0 <= swapping_prob <= 1.0): + raise ValueError("`swapping_prob` must be a float value within the range [0.0, 1.0].") + + if not isinstance(crossover, BaseCrossover): + raise ValueError( + f"'{crossover}' is not a valid crossover." + " For valid crossovers see" + " https://optuna.readthedocs.io/en/stable/reference/samplers.html." + ) + + self._crossover_prob = crossover_prob + self._mutation_prob = mutation_prob + self._swapping_prob = swapping_prob + self._crossover = crossover + self._constraints_func = constraints_func + self._rng = rng + + def __call__( + self, + study: Study, + search_space: dict[str, BaseDistribution], + parent_population: list[FrozenTrial], + ) -> dict[str, Any]: + """Generate a child parameter from the given parent population by NSGA-II algorithm. + Args: + study: + Target study object. + search_space: + A dictionary containing the parameter names and parameter's distributions. + parent_population: + A list of trials that are selected as parent population. + Returns: + A dictionary containing the parameter names and parameter's values. + """ + dominates = _dominates if self._constraints_func is None else _constrained_dominates + # We choose a child based on the specified crossover method. + if self._rng.rng.rand() < self._crossover_prob: + child_params = perform_crossover( + self._crossover, + study, + parent_population, + search_space, + self._rng.rng, + self._swapping_prob, + dominates, + ) + else: + parent_population_size = len(parent_population) + parent_params = parent_population[self._rng.rng.choice(parent_population_size)].params + child_params = {name: parent_params[name] for name in search_space.keys()} + + n_params = len(child_params) + if self._mutation_prob is None: + mutation_prob = 1.0 / max(1.0, n_params) + else: + mutation_prob = self._mutation_prob + + params = {} + for param_name in child_params.keys(): + if self._rng.rng.rand() >= mutation_prob: + params[param_name] = child_params[param_name] + return params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_constraints_evaluation.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_constraints_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e14638c9a6f8de698af5492d800175bfe9a969 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_constraints_evaluation.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +from collections.abc import Sequence +import warnings + +import numpy as np + +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import StudyDirection +from optuna.study._multi_objective import _dominates +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +def _constrained_dominates( + trial0: FrozenTrial, trial1: FrozenTrial, directions: Sequence[StudyDirection] +) -> bool: + """Checks constrained-domination. + + A trial x is said to constrained-dominate a trial y, if any of the following conditions is + true: + 1) Trial x is feasible and trial y is not. + 2) Trial x and y are both infeasible, but solution x has a smaller overall constraint + violation. + 3) Trial x and y are feasible and trial x dominates trial y. + """ + + constraints0 = trial0.system_attrs.get(_CONSTRAINTS_KEY) + constraints1 = trial1.system_attrs.get(_CONSTRAINTS_KEY) + + if constraints0 is None: + warnings.warn( + f"Trial {trial0.number} does not have constraint values." + " It will be dominated by the other trials." + ) + + if constraints1 is None: + warnings.warn( + f"Trial {trial1.number} does not have constraint values." + " It will be dominated by the other trials." + ) + + if constraints0 is None and constraints1 is None: + # Neither Trial x nor y has constraints values + return _dominates(trial0, trial1, directions) + + if constraints0 is not None and constraints1 is None: + # Trial x has constraint values, but y doesn't. + return True + + if constraints0 is None and constraints1 is not None: + # If Trial y has constraint values, but x doesn't. + return False + + assert isinstance(constraints0, (list, tuple)) + assert isinstance(constraints1, (list, tuple)) + + if len(constraints0) != len(constraints1): + raise ValueError("Trials with different numbers of constraints cannot be compared.") + + if trial0.state != TrialState.COMPLETE: + return False + + if trial1.state != TrialState.COMPLETE: + return True + + satisfy_constraints0 = all(v <= 0 for v in constraints0) + satisfy_constraints1 = all(v <= 0 for v in constraints1) + + if satisfy_constraints0 and satisfy_constraints1: + # Both trials satisfy the constraints. + return _dominates(trial0, trial1, directions) + + if satisfy_constraints0: + # trial0 satisfies the constraints, but trial1 violates them. + return True + + if satisfy_constraints1: + # trial1 satisfies the constraints, but trial0 violates them. + return False + + # Both trials violate the constraints. + violation0 = sum(v for v in constraints0 if v > 0) + violation1 = sum(v for v in constraints1 if v > 0) + return violation0 < violation1 + + +def _evaluate_penalty(population: Sequence[FrozenTrial]) -> np.ndarray: + """Evaluate feasibility of trials in population. + Returns: + A list of feasibility status T/F/None of trials in population, where T/F means + feasible/infeasible and None means that the trial does not have constraint values. + """ + + penalty: list[float] = [] + for trial in population: + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraints is None: + penalty.append(np.nan) + else: + penalty.append(sum(v for v in constraints if v > 0)) + return np.array(penalty) + + +def _validate_constraints( + population: list[FrozenTrial], + *, + is_constrained: bool = False, +) -> None: + if not is_constrained: + return + + num_constraints = max( + [len(t.system_attrs.get(_CONSTRAINTS_KEY, [])) for t in population], default=0 + ) + for _trial in population: + _constraints = _trial.system_attrs.get(_CONSTRAINTS_KEY) + if _constraints is None: + warnings.warn( + f"Trial {_trial.number} does not have constraint values." + " It will be dominated by the other trials." + ) + continue + if np.any(np.isnan(np.array(_constraints))): + raise ValueError("NaN is not acceptable as constraint value.") + elif len(_constraints) != num_constraints: + raise ValueError("Trials with different numbers of constraints cannot be compared.") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossover.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossover.py new file mode 100644 index 0000000000000000000000000000000000000000..ef27fd97109f1626662da62a143ed3acf7c76462 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossover.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import Any +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._transform import _SearchSpaceTransform +from optuna.distributions import BaseDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover +from optuna.study import StudyDirection +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + from optuna.study import Study + + +_NUMERICAL_DISTRIBUTIONS = ( + FloatDistribution, + IntDistribution, +) + + +def _try_crossover( + parents: list[FrozenTrial], + crossover: BaseCrossover, + study: Study, + rng: np.random.RandomState, + swapping_prob: float, + categorical_search_space: dict[str, BaseDistribution], + numerical_search_space: dict[str, BaseDistribution], + numerical_transform: _SearchSpaceTransform | None, +) -> dict[str, Any]: + child_params: dict[str, Any] = {} + + if len(categorical_search_space) > 0: + parents_categorical_params = np.array( + [ + [parent.params[p] for p in categorical_search_space] + for parent in [parents[0], parents[-1]] + ], + dtype=object, + ) + + child_categorical_array = _inlined_categorical_uniform_crossover( + parents_categorical_params, rng, swapping_prob, categorical_search_space + ) + child_categorical_params = { + param: value for param, value in zip(categorical_search_space, child_categorical_array) + } + child_params.update(child_categorical_params) + + if numerical_transform is None: + return child_params + + # The following is applied only for numerical parameters. + parents_numerical_params = np.stack( + [ + numerical_transform.transform( + { + param_key: parent.params[param_key] + for param_key in numerical_search_space.keys() + } + ) + for parent in parents + ] + ) # Parent individual with NUMERICAL_DISTRIBUTIONS parameter. + + child_numerical_array = crossover.crossover( + parents_numerical_params, rng, study, numerical_transform.bounds + ) + child_numerical_params = numerical_transform.untransform(child_numerical_array) + child_params.update(child_numerical_params) + + return child_params + + +def perform_crossover( + crossover: BaseCrossover, + study: Study, + parent_population: Sequence[FrozenTrial], + search_space: dict[str, BaseDistribution], + rng: np.random.RandomState, + swapping_prob: float, + dominates: Callable[[FrozenTrial, FrozenTrial, Sequence[StudyDirection]], bool], +) -> dict[str, Any]: + numerical_search_space: dict[str, BaseDistribution] = {} + categorical_search_space: dict[str, BaseDistribution] = {} + for key, value in search_space.items(): + if isinstance(value, _NUMERICAL_DISTRIBUTIONS): + numerical_search_space[key] = value + else: + categorical_search_space[key] = value + + numerical_transform: _SearchSpaceTransform | None = None + if len(numerical_search_space) != 0: + numerical_transform = _SearchSpaceTransform(numerical_search_space) + + while True: # Repeat while parameters lie outside search space boundaries. + parents = _select_parents(crossover, study, parent_population, rng, dominates) + child_params = _try_crossover( + parents, + crossover, + study, + rng, + swapping_prob, + categorical_search_space, + numerical_search_space, + numerical_transform, + ) + + if _is_contained(child_params, search_space): + break + + return child_params + + +def _select_parents( + crossover: BaseCrossover, + study: Study, + parent_population: Sequence[FrozenTrial], + rng: np.random.RandomState, + dominates: Callable[[FrozenTrial, FrozenTrial, Sequence[StudyDirection]], bool], +) -> list[FrozenTrial]: + parents: list[FrozenTrial] = [] + for _ in range(crossover.n_parents): + parent = _select_parent( + study, [t for t in parent_population if t not in parents], rng, dominates + ) + parents.append(parent) + + return parents + + +def _select_parent( + study: Study, + parent_population: Sequence[FrozenTrial], + rng: np.random.RandomState, + dominates: Callable[[FrozenTrial, FrozenTrial, Sequence[StudyDirection]], bool], +) -> FrozenTrial: + population_size = len(parent_population) + candidate0 = parent_population[rng.choice(population_size)] + candidate1 = parent_population[rng.choice(population_size)] + + # TODO(ohta): Consider crowding distance. + if dominates(candidate0, candidate1, study.directions): + return candidate0 + else: + return candidate1 + + +def _is_contained(params: dict[str, Any], search_space: dict[str, BaseDistribution]) -> bool: + for param_name in params.keys(): + param, param_distribution = params[param_name], search_space[param_name] + + if not param_distribution._contains(param_distribution.to_internal_repr(param)): + return False + return True + + +def _inlined_categorical_uniform_crossover( + parent_params: np.ndarray, + rng: np.random.RandomState, + swapping_prob: float, + search_space: dict[str, BaseDistribution], +) -> np.ndarray: + # We can't use uniform crossover implementation of `BaseCrossover` for + # parameters from `CategoricalDistribution`, since categorical params are + # passed to crossover untransformed, which is not what `BaseCrossover` + # implementations expect. + n_categorical_params = len(search_space) + masks = (rng.rand(n_categorical_params) >= swapping_prob).astype(int) + return parent_params[masks, range(n_categorical_params)] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6b3af466e565156c184630227c7ce92cf62b20c1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_base.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +import numpy as np + + +if TYPE_CHECKING: + from optuna.study import Study + + +class BaseCrossover(abc.ABC): + """Base class for crossovers. + + A crossover operation is used by :class:`~optuna.samplers.NSGAIISampler` + to create new parameter combination from parameters of ``n`` parent individuals. + + .. note:: + Concrete implementations of this class are expected to only accept parameters + from numerical distributions. At the moment, only crossover operation for categorical + parameters (uniform crossover) is built-in into :class:`~optuna.samplers.NSGAIISampler`. + """ + + def __str__(self) -> str: + return self.__class__.__name__ + + @property + @abc.abstractmethod + def n_parents(self) -> int: + """Number of parent individuals required to perform crossover.""" + + raise NotImplementedError + + @abc.abstractmethod + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + """Perform crossover of selected parent individuals. + + This method is called in :func:`~optuna.samplers.NSGAIISampler.sample_relative`. + + Args: + parents_params: + A ``numpy.ndarray`` with dimensions ``num_parents x num_parameters``. + Represents a parameter space for each parent individual. This space is + continuous for numerical parameters. + rng: + An instance of ``numpy.random.RandomState``. + study: + Target study object. + search_space_bounds: + A ``numpy.ndarray`` with dimensions ``len_search_space x 2`` representing + numerical distribution bounds constructed from transformed search space. + + Returns: + A 1-dimensional ``numpy.ndarray`` containing new parameter combination. + """ + + raise NotImplementedError diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_blxalpha.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_blxalpha.py new file mode 100644 index 0000000000000000000000000000000000000000..f226e0041f696f1568e0aac9abd3387f884397e7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_blxalpha.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("3.0.0") +class BLXAlphaCrossover(BaseCrossover): + """Blend Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`. + + Uniformly samples child individuals from the hyper-rectangles created + by the two parent individuals. For further information about BLX-alpha crossover, + please refer to the following paper: + + - `Eshelman, L. and J. D. Schaffer. + Real-Coded Genetic Algorithms and Interval-Schemata. FOGA (1992). + `__ + + Args: + alpha: + Parametrizes blend operation. + """ + + n_parents = 2 + + def __init__(self, alpha: float = 0.5) -> None: + self._alpha = alpha + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://doi.org/10.1109/CEC.2001.934452 + # Section 2 Crossover Operators for RCGA 2.1 Blend Crossover + + parents_min = parents_params.min(axis=0) + parents_max = parents_params.max(axis=0) + diff = self._alpha * (parents_max - parents_min) # Equation (1). + low = parents_min - diff # Equation (1). + high = parents_max + diff # Equation (1). + r = rng.rand(len(search_space_bounds)) + child_params = (high - low) * r + low + + return child_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_sbx.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_sbx.py new file mode 100644 index 0000000000000000000000000000000000000000..fd4c6ae1520253bb99b52320ad1d2fc7bcebda74 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_sbx.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("3.0.0") +class SBXCrossover(BaseCrossover): + """Simulated Binary Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`. + + Generates a child from two parent individuals + according to the polynomial probability distribution. + + In the paper, SBX has only one argument, ``eta``, + and generate two child individuals. + However, Optuna can only return one child individual in one crossover operation, + so it uses the ``uniform_crossover_prob`` and ``use_child_gene_prob`` arguments + to make two individuals into one. + + - `Deb, K. and R. Agrawal. + “Simulated Binary Crossover for Continuous Search Space.” + Complex Syst. 9 (1995): n. pag. + `__ + + Args: + eta: + Distribution index. A small value of ``eta`` allows distant solutions + to be selected as children solutions. If not specified, takes default + value of ``2`` for single objective functions and ``20`` for multi objective. + uniform_crossover_prob: + ``uniform_crossover_prob`` is the probability of uniform crossover + between two individuals selected as candidate child individuals. + This argument is whether or not two individuals are + crossover to make one child individual. + If the ``uniform_crossover_prob`` exceeds 0.5, + the result is equivalent to ``1-uniform_crossover_prob``, + because it returns one of the two individuals of the crossover result. + If not specified, takes default value of ``0.5``. + The range of values is ``[0.0, 1.0]``. + use_child_gene_prob: + ``use_child_gene_prob`` is the probability of using the value of the generated + child variable rather than the value of the parent. + This probability is applied to each variable individually. + where ``1-use_chile_gene_prob`` is the probability of + using the parent's values as it is. + If not specified, takes default value of ``0.5``. + The range of values is ``(0.0, 1.0]``. + """ + + n_parents = 2 + + def __init__( + self, + eta: float | None = None, + uniform_crossover_prob: float = 0.5, + use_child_gene_prob: float = 0.5, + ) -> None: + if (eta is not None) and (eta < 0.0): + raise ValueError("The value of `eta` must be greater than or equal to 0.0.") + self._eta = eta + + if uniform_crossover_prob < 0.0 or uniform_crossover_prob > 1.0: + raise ValueError( + "The value of `uniform_crossover_prob` must be in the range [0.0, 1.0]." + ) + if use_child_gene_prob <= 0.0 or use_child_gene_prob > 1.0: + raise ValueError("The value of `use_child_gene_prob` must be in the range (0.0, 1.0].") + self._uniform_crossover_prob = uniform_crossover_prob + self._use_child_gene_prob = use_child_gene_prob + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://www.researchgate.net/profile/M-M-Raghuwanshi/publication/267198495_Simulated_Binary_Crossover_with_Lognormal_Distribution/links/5576c78408ae7536375205d7/Simulated-Binary-Crossover-with-Lognormal-Distribution.pdf + # Section 2 Simulated Binary Crossover (SBX) + + # To avoid generating solutions that violate the box constraints, + # alpha1, alpha2, xls and xus are introduced, unlike the reference. + xls = search_space_bounds[..., 0] + xus = search_space_bounds[..., 1] + + xs_min = np.min(parents_params, axis=0) + xs_max = np.max(parents_params, axis=0) + if self._eta is None: + eta = 20.0 if study._is_multi_objective() else 2.0 + else: + eta = self._eta + + xs_diff = np.clip(xs_max - xs_min, 1e-10, None) + beta1 = 1 + 2 * (xs_min - xls) / xs_diff + beta2 = 1 + 2 * (xus - xs_max) / xs_diff + alpha1 = 2 - np.power(beta1, -(eta + 1)) + alpha2 = 2 - np.power(beta2, -(eta + 1)) + + us = rng.rand(len(search_space_bounds)) + mask1 = us > 1 / alpha1 # Equation (3). + betaq1 = np.power(us * alpha1, 1 / (eta + 1)) # Equation (3). + betaq1[mask1] = np.power((1 / (2 - us * alpha1)), 1 / (eta + 1))[mask1] # Equation (3). + + mask2 = us > 1 / alpha2 # Equation (3). + betaq2 = np.power(us * alpha2, 1 / (eta + 1)) # Equation (3) + betaq2[mask2] = np.power((1 / (2 - us * alpha2)), 1 / (eta + 1))[mask2] # Equation (3). + + c1 = 0.5 * ((xs_min + xs_max) - betaq1 * xs_diff) # Equation (4). + c2 = 0.5 * ((xs_min + xs_max) + betaq2 * xs_diff) # Equation (5). + + # SBX applies crossover with use_child_gene_prob and uniform_crossover_prob. + # the gene of the parent individual is the gene of the child individual. + # The original SBX creates two child individuals, + # but optuna's implementation creates only one child individual. + # Therefore, when there is no crossover, + # the gene is selected with equal probability from the parent individuals x1 and x2. + + child1_params_list = [] + child2_params_list = [] + + for c1_i, c2_i, x1_i, x2_i in zip(c1, c2, parents_params[0], parents_params[1]): + if rng.rand() < self._use_child_gene_prob: + if rng.rand() >= self._uniform_crossover_prob: + child1_params_list.append(c1_i) + child2_params_list.append(c2_i) + else: + child1_params_list.append(c2_i) + child2_params_list.append(c1_i) + else: + if rng.rand() >= self._uniform_crossover_prob: + child1_params_list.append(x1_i) + child2_params_list.append(x2_i) + else: + child1_params_list.append(x2_i) + child2_params_list.append(x1_i) + + child_params_list = child1_params_list if rng.rand() < 0.5 else child2_params_list + child_params = np.array(child_params_list) + + return child_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_spx.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_spx.py new file mode 100644 index 0000000000000000000000000000000000000000..7e49ade4c7e6a5bd4e2af90ef5e4cc2bef92948b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_spx.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("3.0.0") +class SPXCrossover(BaseCrossover): + """Simplex Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`. + + Uniformly samples child individuals from within a single simplex + that is similar to the simplex produced by the parent individual. + For further information about SPX crossover, please refer to the following paper: + + - `Shigeyoshi Tsutsui and Shigeyoshi Tsutsui and David E. Goldberg and + David E. Goldberg and Kumara Sastry and Kumara Sastry + Progress Toward Linkage Learning in Real-Coded GAs with Simplex Crossover. + IlliGAL Report. 2000. + `__ + + Args: + epsilon: + Expansion rate. If not specified, defaults to ``sqrt(len(search_space) + 2)``. + """ + + n_parents = 3 + + def __init__(self, epsilon: float | None = None) -> None: + self._epsilon = epsilon + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://www.researchgate.net/publication/2388486_Progress_Toward_Linkage_Learning_in_Real-Coded_GAs_with_Simplex_Crossover + # Section 2 A Brief Review of SPX + + n = self.n_parents - 1 + G = np.mean(parents_params, axis=0) # Equation (1). + rs = np.power(rng.rand(n), 1 / (np.arange(n) + 1)) # Equation (2). + + epsilon = np.sqrt(len(search_space_bounds) + 2) if self._epsilon is None else self._epsilon + xks = [G + epsilon * (pk - G) for pk in parents_params] # Equation (3). + + ck = 0 # Equation (4). + for k in range(1, self.n_parents): + ck = rs[k - 1] * (xks[k - 1] - xks[k] + ck) + + child_params = xks[-1] + ck # Equation (5). + + return child_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_undx.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_undx.py new file mode 100644 index 0000000000000000000000000000000000000000..e46f98691d1db8c7484553faa06684d5ca802bb0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_undx.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("3.0.0") +class UNDXCrossover(BaseCrossover): + """Unimodal Normal Distribution Crossover used by :class:`~optuna.samplers.NSGAIISampler`. + + Generates child individuals from the three parents + using a multivariate normal distribution. + + - `H. Kita, I. Ono and S. Kobayashi, + Multi-parental extension of the unimodal normal distribution crossover + for real-coded genetic algorithms, + Proceedings of the 1999 Congress on Evolutionary Computation-CEC99 + (Cat. No. 99TH8406), 1999, pp. 1581-1588 Vol. 2 + `__ + + Args: + sigma_xi: + Parametrizes normal distribution from which ``xi`` is drawn. + sigma_eta: + Parametrizes normal distribution from which ``etas`` are drawn. + If not specified, defaults to ``0.35 / sqrt(len(search_space))``. + """ + + n_parents = 3 + + def __init__(self, sigma_xi: float = 0.5, sigma_eta: float | None = None) -> None: + self._sigma_xi = sigma_xi + self._sigma_eta = sigma_eta + + def _distance_from_x_to_psl(self, parents_params: np.ndarray) -> np.floating: + # The line connecting x1 to x2 is called psl (primary search line). + # Compute the 2-norm of the vector orthogonal to psl from x3. + e_12 = UNDXCrossover._normalized_x1_to_x2( + parents_params + ) # Normalized vector from x1 to x2. + v_13 = parents_params[2] - parents_params[0] # Vector from x1 to x3. + v_12_3 = v_13 - np.dot(v_13, e_12) * e_12 # Vector orthogonal to v_12 through x3. + m_12_3 = np.linalg.norm(v_12_3, ord=2) # 2-norm of v_12_3. + + return m_12_3 + + def _orthonormal_basis_vector_to_psl(self, parents_params: np.ndarray, n: int) -> np.ndarray: + # Compute orthogonal basis vectors for the subspace orthogonal to psl. + e_12 = UNDXCrossover._normalized_x1_to_x2( + parents_params + ) # Normalized vector from x1 to x2. + basis_matrix = np.identity(n) + + if np.count_nonzero(e_12) != 0: + basis_matrix[0] = e_12 + + basis_matrix_t = basis_matrix.T + Q, _ = np.linalg.qr(basis_matrix_t) + + return Q.T[1:] + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://doi.org/10.1109/CEC.1999.782672 + # Section 2 Unimodal Normal Distribution Crossover + n = len(search_space_bounds) + xp = (parents_params[0] + parents_params[1]) / 2 # Section 2 (2). + d = parents_params[0] - parents_params[1] # Section 2 (3). + if self._sigma_eta is None: + sigma_eta = 0.35 / np.sqrt(n) + else: + sigma_eta = self._sigma_eta + + etas = rng.normal(0, sigma_eta**2, size=n) + xi = rng.normal(0, self._sigma_xi**2) + es = self._orthonormal_basis_vector_to_psl( + parents_params, n + ) # Orthonormal basis vectors of the subspace orthogonal to the psl. + one = xp # Section 2 (5). + two = xi * d # Section 2 (5). + + if n > 1: # When n=1, there is no subsearch component. + three = np.zeros(n) # Section 2 (5). + D = self._distance_from_x_to_psl(parents_params) # Section 2 (4). + for i in range(n - 1): + three += etas[i] * es[i] + three *= D + child_params = one + two + three + + else: + child_params = one + two + + return child_params + + @staticmethod + def _normalized_x1_to_x2(parents_params: np.ndarray) -> np.ndarray: + # Compute the normalized vector from x1 to x2. + v_12 = parents_params[1] - parents_params[0] + m_12 = np.linalg.norm(v_12, ord=2) + e_12 = v_12 / np.clip(m_12, 1e-10, None) + + return e_12 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_uniform.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..673ea80d031e0a77c9c441a56da4beaf717745a8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_uniform.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +class UniformCrossover(BaseCrossover): + """Uniform Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`. + + Select each parameter with equal probability from the two parent individuals. + For further information about uniform crossover, please refer to the following paper: + + - `Gilbert Syswerda. 1989. Uniform Crossover in Genetic Algorithms. + In Proceedings of the 3rd International Conference on Genetic Algorithms. + Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 2-9. + `__ + + Args: + swapping_prob: + Probability of swapping each parameter of the parents during crossover. + """ + + n_parents = 2 + + def __init__(self, swapping_prob: float = 0.5) -> None: + if not (0.0 <= swapping_prob <= 1.0): + raise ValueError("`swapping_prob` must be a float value within the range [0.0, 1.0].") + self._swapping_prob = swapping_prob + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://www.researchgate.net/publication/201976488_Uniform_Crossover_in_Genetic_Algorithms + # Section 1 Introduction + + n_params = len(search_space_bounds) + masks = (rng.rand(n_params) >= self._swapping_prob).astype(int) + child_params = parents_params[masks, range(n_params)] + + return child_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_vsbx.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_vsbx.py new file mode 100644 index 0000000000000000000000000000000000000000..8e49c68db2b6ff9ff8b05161b014b87d6a77951c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_crossovers/_vsbx.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover + + +if TYPE_CHECKING: + from optuna.study import Study + + +@experimental_class("3.0.0") +class VSBXCrossover(BaseCrossover): + """Modified Simulated Binary Crossover operation used by + :class:`~optuna.samplers.NSGAIISampler`. + + vSBX generates child individuals without excluding any region of the parameter space, + while maintaining the excellent properties of SBX. + + In the paper, vSBX has only one argument, ``eta``, + and generate two child individuals. + However, Optuna can only return one child individual in one crossover operation, + so it uses the ``uniform_crossover_prob`` and ``use_child_gene_prob`` arguments + to make two individuals into one. + + - `Pedro J. Ballester, Jonathan N. Carter. + Real-Parameter Genetic Algorithms for Finding Multiple Optimal Solutions + in Multi-modal Optimization. GECCO 2003: 706-717 + `__ + + Args: + eta: + Distribution index. A small value of ``eta`` allows distant solutions + to be selected as children solutions. If not specified, takes default + value of ``2`` for single objective functions and ``20`` for multi objective. + uniform_crossover_prob: + ``uniform_crossover_prob`` is the probability of uniform crossover + between two individuals selected as candidate child individuals. + This argument is whether or not two individuals are + crossover to make one child individual. + If the ``uniform_crossover_prob`` exceeds 0.5, + the result is equivalent to ``1-uniform_crossover_prob``, + because it returns one of the two individuals of the crossover result. + If not specified, takes default value of ``0.5``. + The range of values is ``[0.0, 1.0]``. + use_child_gene_prob: + ``use_child_gene_prob`` is the probability of using the value of the generated + child variable rather than the value of the parent. + This probability is applied to each variable individually. + where ``1-use_chile_gene_prob`` is the probability of + using the parent's values as it is. + If not specified, takes default value of ``0.5``. + The range of values is ``(0.0, 1.0]``. + """ + + n_parents = 2 + + def __init__( + self, + eta: float | None = None, + uniform_crossover_prob: float = 0.5, + use_child_gene_prob: float = 0.5, + ) -> None: + if (eta is not None) and (eta < 0.0): + raise ValueError("The value of `eta` must be greater than or equal to 0.0.") + self._eta = eta + + if uniform_crossover_prob < 0.0 or uniform_crossover_prob > 1.0: + raise ValueError( + "The value of `uniform_crossover_prob` must be in the range [0.0, 1.0]." + ) + if use_child_gene_prob <= 0.0 or use_child_gene_prob > 1.0: + raise ValueError("The value of `use_child_gene_prob` must be in the range (0.0, 1.0].") + self._uniform_crossover_prob = uniform_crossover_prob + self._use_child_gene_prob = use_child_gene_prob + + def crossover( + self, + parents_params: np.ndarray, + rng: np.random.RandomState, + study: Study, + search_space_bounds: np.ndarray, + ) -> np.ndarray: + # https://doi.org/10.1007/3-540-45105-6_86 + # Section 3.2 Crossover Schemes (vSBX) + if self._eta is None: + eta = 20.0 if study._is_multi_objective() else 2.0 + else: + eta = self._eta + + eps = 1e-10 + us = rng.rand(len(search_space_bounds)) + beta_1 = np.power(1 / np.maximum((2 * us), eps), 1 / (eta + 1)) + beta_2 = np.power(1 / np.maximum((2 * (1 - us)), eps), 1 / (eta + 1)) + + u_1 = rng.rand() + if u_1 <= 0.5: + c1 = 0.5 * ((1 + beta_1) * parents_params[0] + (1 - beta_2) * parents_params[1]) + else: + c1 = 0.5 * ((1 - beta_1) * parents_params[0] + (1 + beta_2) * parents_params[1]) + u_2 = rng.rand() + if u_2 <= 0.5: + c2 = 0.5 * ((3 - beta_1) * parents_params[0] - (1 - beta_2) * parents_params[1]) + else: + c2 = 0.5 * (-(1 - beta_1) * parents_params[0] + (3 - beta_2) * parents_params[1]) + + # vSBX applies crossover with use_child_gene_prob and uniform_crossover_prob. + # the gene of the parent individual is the gene of the child individual. + # The original vSBX creates two child individuals, + # but optuna's implementation creates only one child individual. + # Therefore, when there is no crossover, + # the gene is selected with equal probability from the parent individuals x1 and x2. + + child1_params_list = [] + child2_params_list = [] + + for c1_i, c2_i, x1_i, x2_i in zip(c1, c2, parents_params[0], parents_params[1]): + if rng.rand() < self._use_child_gene_prob: + if rng.rand() >= self._uniform_crossover_prob: + child1_params_list.append(c1_i) + child2_params_list.append(c2_i) + else: + child1_params_list.append(c2_i) + child2_params_list.append(c1_i) + else: + if rng.rand() >= self._uniform_crossover_prob: + child1_params_list.append(x1_i) + child2_params_list.append(x2_i) + else: + child1_params_list.append(x2_i) + child2_params_list.append(x1_i) + + child_params_list = child1_params_list if rng.rand() < 0.5 else child2_params_list + child_params = np.array(child_params_list) + + return child_params diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_elite_population_selection_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_elite_population_selection_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..bf416267aee9480768bae72845052c8b2e5c0663 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_elite_population_selection_strategy.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np + +from optuna.samplers.nsgaii._constraints_evaluation import _evaluate_penalty +from optuna.samplers.nsgaii._constraints_evaluation import _validate_constraints +from optuna.study import StudyDirection +from optuna.study._multi_objective import _fast_non_domination_rank +from optuna.trial import FrozenTrial + + +if TYPE_CHECKING: + from optuna.study import Study + + +class NSGAIIElitePopulationSelectionStrategy: + def __init__( + self, + *, + population_size: int, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + ) -> None: + if population_size < 2: + raise ValueError("`population_size` must be greater than or equal to 2.") + + self._population_size = population_size + self._constraints_func = constraints_func + + def __call__(self, study: Study, population: list[FrozenTrial]) -> list[FrozenTrial]: + """Select elite population from the given trials by NSGA-II algorithm. + + Args: + study: + Target study object. + population: + Trials in the study. + + Returns: + A list of trials that are selected as elite population. + """ + _validate_constraints(population, is_constrained=self._constraints_func is not None) + population_per_rank = _rank_population( + population, study.directions, is_constrained=self._constraints_func is not None + ) + elite_population: list[FrozenTrial] = [] + for individuals in population_per_rank: + if len(elite_population) + len(individuals) < self._population_size: + elite_population.extend(individuals) + else: + n = self._population_size - len(elite_population) + _crowding_distance_sort(individuals) + elite_population.extend(individuals[:n]) + break + + return elite_population + + +def _calc_crowding_distance(population: list[FrozenTrial]) -> defaultdict[int, float]: + """Calculates the crowding distance of population. + + We define the crowding distance as the summation of the crowding distance of each dimension + of value calculated as follows: + + * If all values in that dimension are the same, i.e., [1, 1, 1] or [inf, inf], + the crowding distances of all trials in that dimension are zero. + * Otherwise, the crowding distances of that dimension is the difference between + two nearest values besides that value, one above and one below, divided by the difference + between the maximal and minimal finite value of that dimension. Please note that: + * the nearest value below the minimum is considered to be -inf and the + nearest value above the maximum is considered to be inf, and + * inf - inf and (-inf) - (-inf) is considered to be zero. + """ + + manhattan_distances: defaultdict[int, float] = defaultdict(float) + if len(population) == 0: + return manhattan_distances + + for i in range(len(population[0].values)): + population.sort(key=lambda x: x.values[i]) + + # If all trials in population have the same value in the i-th dimension, ignore the + # objective dimension since it does not make difference. + if population[0].values[i] == population[-1].values[i]: + continue + + vs = [-float("inf")] + [trial.values[i] for trial in population] + [float("inf")] + + # Smallest finite value. + v_min = next(x for x in vs if x != -float("inf")) + + # Largest finite value. + v_max = next(x for x in reversed(vs) if x != float("inf")) + + width = v_max - v_min + if width <= 0: + # width == 0 or width == -inf + width = 1.0 + + for j in range(len(population)): + # inf - inf and (-inf) - (-inf) is considered to be zero. + gap = 0.0 if vs[j] == vs[j + 2] else vs[j + 2] - vs[j] + manhattan_distances[population[j].number] += gap / width + return manhattan_distances + + +def _crowding_distance_sort(population: list[FrozenTrial]) -> None: + manhattan_distances = _calc_crowding_distance(population) + population.sort(key=lambda x: manhattan_distances[x.number]) + population.reverse() + + +def _rank_population( + population: list[FrozenTrial], + directions: Sequence[StudyDirection], + *, + is_constrained: bool = False, +) -> list[list[FrozenTrial]]: + if len(population) == 0: + return [] + + objective_values = np.array([trial.values for trial in population], dtype=np.float64) + objective_values *= np.array( + [-1.0 if d == StudyDirection.MAXIMIZE else 1.0 for d in directions] + ) + penalty = _evaluate_penalty(population) if is_constrained else None + + domination_ranks = _fast_non_domination_rank(objective_values, penalty=penalty) + population_per_rank: list[list[FrozenTrial]] = [[] for _ in range(max(domination_ranks) + 1)] + for trial, rank in zip(population, domination_ranks): + if rank == -1: + continue + population_per_rank[rank].append(trial) + + return population_per_rank diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_sampler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..66d6f2eba0eb58b09bfe61a5ac592e78082569ba --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/samplers/nsgaii/_sampler.py @@ -0,0 +1,302 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import Any +from typing import TYPE_CHECKING + +from optuna._experimental import warn_experimental_argument +from optuna.distributions import BaseDistribution +from optuna.samplers._ga import BaseGASampler +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.samplers._random import RandomSampler +from optuna.samplers.nsgaii._after_trial_strategy import NSGAIIAfterTrialStrategy +from optuna.samplers.nsgaii._child_generation_strategy import NSGAIIChildGenerationStrategy +from optuna.samplers.nsgaii._crossovers._base import BaseCrossover +from optuna.samplers.nsgaii._crossovers._uniform import UniformCrossover +from optuna.samplers.nsgaii._elite_population_selection_strategy import ( + NSGAIIElitePopulationSelectionStrategy, +) +from optuna.search_space import IntersectionSearchSpace +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +class NSGAIISampler(BaseGASampler): + """Multi-objective sampler using the NSGA-II algorithm. + + NSGA-II stands for "Nondominated Sorting Genetic Algorithm II", + which is a well known, fast and elitist multi-objective genetic algorithm. + + For further information about NSGA-II, please refer to the following paper: + + - `A fast and elitist multiobjective genetic algorithm: NSGA-II + `__ + + .. note:: + :class:`~optuna.samplers.TPESampler` became much faster in v4.0.0 and supports several + features not supported by ``NSGAIISampler`` such as handling of dynamic search + space and categorical distance. To use :class:`~optuna.samplers.TPESampler`, you need to + explicitly specify the sampler as follows: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + f1 = x**2 + y + f2 = -((x - 2) ** 2 + y) + return f1, f2 + + + # We minimize the first objective and maximize the second objective. + sampler = optuna.samplers.TPESampler() + study = optuna.create_study(directions=["minimize", "maximize"], sampler=sampler) + study.optimize(objective, n_trials=100) + + Please also check `our article + `__ + for more details of the speedup in v4.0.0. + + Args: + population_size: + Number of individuals (trials) in a generation. + ``population_size`` must be greater than or equal to ``crossover.n_parents``. + For :class:`~optuna.samplers.nsgaii.UNDXCrossover` and + :class:`~optuna.samplers.nsgaii.SPXCrossover`, ``n_parents=3``, and for the other + algorithms, ``n_parents=2``. + + mutation_prob: + Probability of mutating each parameter when creating a new individual. + If :obj:`None` is specified, the value ``1.0 / len(parent_trial.params)`` is used + where ``parent_trial`` is the parent trial of the target individual. + + crossover: + Crossover to be applied when creating child individuals. + The available crossovers are listed here: + https://optuna.readthedocs.io/en/stable/reference/samplers/nsgaii.html. + + :class:`~optuna.samplers.nsgaii.UniformCrossover` is always applied to parameters + sampled from :class:`~optuna.distributions.CategoricalDistribution`, and by + default for parameters sampled from other distributions unless this argument + is specified. + + For more information on each of the crossover method, please refer to + specific crossover documentation. + + crossover_prob: + Probability that a crossover (parameters swapping between parents) will occur + when creating a new individual. + + swapping_prob: + Probability of swapping each parameter of the parents during crossover. + + seed: + Seed for random number generator. + + constraints_func: + An optional function that computes the objective constraints. It must take a + :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must + be a sequence of :obj:`float` s. A value strictly larger than 0 means that a + constraints is violated. A value equal to or smaller than 0 is considered feasible. + If ``constraints_func`` returns more than one value for a trial, that trial is + considered feasible if and only if all values are equal to 0 or smaller. + + The ``constraints_func`` will be evaluated after each successful trial. + The function won't be called when trials fail or they are pruned, but this behavior is + subject to change in the future releases. + + The constraints are handled by the constrained domination. A trial x is said to + constrained-dominate a trial y, if any of the following conditions is true: + + 1. Trial x is feasible and trial y is not. + 2. Trial x and y are both infeasible, but trial x has a smaller overall violation. + 3. Trial x and y are feasible and trial x dominates trial y. + + .. note:: + Added in v2.5.0 as an experimental feature. The interface may change in newer + versions without prior notice. See + https://github.com/optuna/optuna/releases/tag/v2.5.0. + + elite_population_selection_strategy: + The selection strategy for determining the individuals to survive from the current + population pool. Default to :obj:`None`. + + .. note:: + The arguments ``elite_population_selection_strategy`` was added in v3.3.0 as an + experimental feature. The interface may change in newer versions without prior + notice. + See https://github.com/optuna/optuna/releases/tag/v3.3.0. + + child_generation_strategy: + The strategy for generating child parameters from parent trials. Defaults to + :obj:`None`. + + .. note:: + The arguments ``child_generation_strategy`` was added in v3.3.0 as an experimental + feature. The interface may change in newer versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.3.0. + + after_trial_strategy: + A set of procedure to be conducted after each trial. Defaults to :obj:`None`. + + .. note:: + The arguments ``after_trial_strategy`` was added in v3.3.0 as an experimental + feature. The interface may change in newer versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.3.0. + """ + + def __init__( + self, + *, + population_size: int = 50, + mutation_prob: float | None = None, + crossover: BaseCrossover | None = None, + crossover_prob: float = 0.9, + swapping_prob: float = 0.5, + seed: int | None = None, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + elite_population_selection_strategy: ( + Callable[[Study, list[FrozenTrial]], list[FrozenTrial]] | None + ) = None, + child_generation_strategy: ( + Callable[[Study, dict[str, BaseDistribution], list[FrozenTrial]], dict[str, Any]] + | None + ) = None, + after_trial_strategy: ( + Callable[[Study, FrozenTrial, TrialState, Sequence[float] | None], None] | None + ) = None, + ) -> None: + # TODO(ohta): Reconsider the default value of each parameter. + + if population_size < 2: + raise ValueError("`population_size` must be greater than or equal to 2.") + + if constraints_func is not None: + warn_experimental_argument("constraints_func") + if after_trial_strategy is not None: + warn_experimental_argument("after_trial_strategy") + + if child_generation_strategy is not None: + warn_experimental_argument("child_generation_strategy") + + if elite_population_selection_strategy is not None: + warn_experimental_argument("elite_population_selection_strategy") + + if crossover is None: + crossover = UniformCrossover(swapping_prob) + + if not isinstance(crossover, BaseCrossover): + raise ValueError( + f"'{crossover}' is not a valid crossover." + " For valid crossovers see" + " https://optuna.readthedocs.io/en/stable/reference/samplers.html." + ) + + if population_size < crossover.n_parents: + raise ValueError( + f"Using {crossover}," + f" the population size should be greater than or equal to {crossover.n_parents}." + f" The specified `population_size` is {population_size}." + ) + + super().__init__(population_size=population_size) + self._random_sampler = RandomSampler(seed=seed) + self._rng = LazyRandomState(seed) + self._constraints_func = constraints_func + self._search_space = IntersectionSearchSpace() + + self._elite_population_selection_strategy = ( + elite_population_selection_strategy + or NSGAIIElitePopulationSelectionStrategy( + population_size=population_size, constraints_func=constraints_func + ) + ) + self._child_generation_strategy = ( + child_generation_strategy + or NSGAIIChildGenerationStrategy( + crossover_prob=crossover_prob, + mutation_prob=mutation_prob, + swapping_prob=swapping_prob, + crossover=crossover, + constraints_func=constraints_func, + rng=self._rng, + ) + ) + self._after_trial_strategy = after_trial_strategy or NSGAIIAfterTrialStrategy( + constraints_func=constraints_func + ) + + def reseed_rng(self) -> None: + self._random_sampler.reseed_rng() + self._rng.rng.seed() + + def infer_relative_search_space( + self, study: Study, trial: FrozenTrial + ) -> dict[str, BaseDistribution]: + search_space: dict[str, BaseDistribution] = {} + for name, distribution in self._search_space.calculate(study).items(): + if distribution.single(): + # The `untransform` method of `optuna._transform._SearchSpaceTransform` + # does not assume a single value, + # so single value objects are not sampled with the `sample_relative` method, + # but with the `sample_independent` method. + continue + search_space[name] = distribution + return search_space + + def select_parent(self, study: Study, generation: int) -> list[FrozenTrial]: + return self._elite_population_selection_strategy( + study, + self.get_population(study, generation - 1) + + self.get_parent_population(study, generation - 1), + ) + + def sample_relative( + self, + study: Study, + trial: FrozenTrial, + search_space: dict[str, BaseDistribution], + ) -> dict[str, Any]: + generation = self.get_trial_generation(study, trial) + parent_population = self.get_parent_population(study, generation) + if len(parent_population) == 0: + return {} + return self._child_generation_strategy(study, search_space, parent_population) + + def sample_independent( + self, + study: Study, + trial: FrozenTrial, + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + # Following parameters are randomly sampled here. + # 1. A parameter in the initial population/first generation. + # 2. A parameter to mutate. + # 3. A parameter excluded from the intersection search space. + + return self._random_sampler.sample_independent( + study, trial, param_name, param_distribution + ) + + def before_trial(self, study: Study, trial: FrozenTrial) -> None: + self._random_sampler.before_trial(study, trial) + + def after_trial( + self, + study: Study, + trial: FrozenTrial, + state: TrialState, + values: Sequence[float] | None, + ) -> None: + assert state in [TrialState.COMPLETE, TrialState.FAIL, TrialState.PRUNED] + self._after_trial_strategy(study, trial, state, values) + self._random_sampler.after_trial(study, trial, state, values) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f7abb8361daeab07e76af475fc3dd44d0cc97b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/__init__.py @@ -0,0 +1,12 @@ +from optuna.search_space.group_decomposed import _GroupDecomposedSearchSpace +from optuna.search_space.group_decomposed import _SearchSpaceGroup +from optuna.search_space.intersection import intersection_search_space +from optuna.search_space.intersection import IntersectionSearchSpace + + +__all__ = [ + "_GroupDecomposedSearchSpace", + "_SearchSpaceGroup", + "IntersectionSearchSpace", + "intersection_search_space", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/group_decomposed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/group_decomposed.py new file mode 100644 index 0000000000000000000000000000000000000000..ccddd28a1c2b43299a84144c1c772eace6998c73 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/group_decomposed.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING + +from optuna.distributions import BaseDistribution +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna.study import Study + + +class _SearchSpaceGroup: + def __init__(self) -> None: + self._search_spaces: list[dict[str, BaseDistribution]] = [] + + @property + def search_spaces(self) -> list[dict[str, BaseDistribution]]: + return self._search_spaces + + def add_distributions(self, distributions: dict[str, BaseDistribution]) -> None: + dist_keys = set(distributions.keys()) + next_search_spaces = [] + + for search_space in self._search_spaces: + keys = set(search_space.keys()) + + next_search_spaces.append({name: search_space[name] for name in keys & dist_keys}) + next_search_spaces.append({name: search_space[name] for name in keys - dist_keys}) + + dist_keys -= keys + + next_search_spaces.append({name: distributions[name] for name in dist_keys}) + self._search_spaces = list( + filter(lambda search_space: len(search_space) > 0, next_search_spaces) + ) + + +class _GroupDecomposedSearchSpace: + def __init__(self, include_pruned: bool = False) -> None: + self._search_space = _SearchSpaceGroup() + self._study_id: int | None = None + self._include_pruned = include_pruned + + def calculate(self, study: Study) -> _SearchSpaceGroup: + if self._study_id is None: + self._study_id = study._study_id + else: + # Note that the check below is meaningless when + # :class:`~optuna.storages.InMemoryStorage` is used because + # :func:`~optuna.storages.InMemoryStorage.create_new_study` + # always returns the same study ID. + if self._study_id != study._study_id: + raise ValueError("`_GroupDecomposedSearchSpace` cannot handle multiple studies.") + + states_of_interest: tuple[TrialState, ...] + if self._include_pruned: + states_of_interest = (TrialState.COMPLETE, TrialState.PRUNED) + else: + states_of_interest = (TrialState.COMPLETE,) + + for trial in study._get_trials(deepcopy=False, states=states_of_interest, use_cache=False): + self._search_space.add_distributions(trial.distributions) + + return copy.deepcopy(self._search_space) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/intersection.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/intersection.py new file mode 100644 index 0000000000000000000000000000000000000000..a44d970d6e0f22fe1408c0c8286e90e2eef7b37a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/search_space/intersection.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING + +import optuna +from optuna.distributions import BaseDistribution + + +if TYPE_CHECKING: + from optuna.study import Study + + +def _calculate( + trials: list[optuna.trial.FrozenTrial], + include_pruned: bool = False, + search_space: dict[str, BaseDistribution] | None = None, + cached_trial_number: int = -1, +) -> tuple[dict[str, BaseDistribution] | None, int]: + states_of_interest = [ + optuna.trial.TrialState.COMPLETE, + optuna.trial.TrialState.WAITING, + optuna.trial.TrialState.RUNNING, + ] + + if include_pruned: + states_of_interest.append(optuna.trial.TrialState.PRUNED) + + next_cached_trial_number = -1 + + for trial in reversed(trials): + if trial.state not in states_of_interest: + continue + + if next_cached_trial_number == -1: + next_cached_trial_number = trial.number + 1 + + if cached_trial_number > trial.number: + break + + if not trial.state.is_finished(): + next_cached_trial_number = trial.number + continue + + if search_space is None: + search_space = copy.copy(trial.distributions) + continue + + search_space = { + name: distribution + for name, distribution in search_space.items() + if trial.distributions.get(name) == distribution + } + + return search_space, next_cached_trial_number + + +class IntersectionSearchSpace: + """A class to calculate the intersection search space of a :class:`~optuna.study.Study`. + + Intersection search space contains the intersection of parameter distributions that have been + suggested in the completed trials of the study so far. + If there are multiple parameters that have the same name but different distributions, + neither is included in the resulting search space + (i.e., the parameters with dynamic value ranges are excluded). + + Note that an instance of this class is supposed to be used for only one study. + If different studies are passed to + :func:`~optuna.search_space.IntersectionSearchSpace.calculate`, + a :obj:`ValueError` is raised. + + Args: + include_pruned: + Whether pruned trials should be included in the search space. + """ + + def __init__(self, include_pruned: bool = False) -> None: + self._cached_trial_number: int = -1 + self._search_space: dict[str, BaseDistribution] | None = None + self._study_id: int | None = None + + self._include_pruned = include_pruned + + def calculate(self, study: Study) -> dict[str, BaseDistribution]: + """Returns the intersection search space of the :class:`~optuna.study.Study`. + + Args: + study: + A study with completed trials. The same study must be passed for one instance + of this class through its lifetime. + + Returns: + A dictionary containing the parameter names and parameter's distributions sorted by + parameter names. + """ + + if self._study_id is None: + self._study_id = study._study_id + else: + # Note that the check below is meaningless when + # :class:`~optuna.storages.InMemoryStorage` is used because + # :func:`~optuna.storages.InMemoryStorage.create_new_study` + # always returns the same study ID. + if self._study_id != study._study_id: + raise ValueError("`IntersectionSearchSpace` cannot handle multiple studies.") + + self._search_space, self._cached_trial_number = _calculate( + study.get_trials(deepcopy=False), + self._include_pruned, + self._search_space, + self._cached_trial_number, + ) + search_space = self._search_space or {} + search_space = dict(sorted(search_space.items(), key=lambda x: x[0])) + return copy.deepcopy(search_space) + + +def intersection_search_space( + trials: list[optuna.trial.FrozenTrial], + include_pruned: bool = False, +) -> dict[str, BaseDistribution]: + """Return the intersection search space of the given trials. + + Intersection search space contains the intersection of parameter distributions that have been + suggested in the completed trials of the study so far. + If there are multiple parameters that have the same name but different distributions, + neither is included in the resulting search space + (i.e., the parameters with dynamic value ranges are excluded). + + .. note:: + :class:`~optuna.search_space.IntersectionSearchSpace` provides the same functionality with + a much faster way. Please consider using it if you want to reduce execution time + as much as possible. + + Args: + trials: + A list of trials. + include_pruned: + Whether pruned trials should be included in the search space. + + Returns: + A dictionary containing the parameter names and parameter's distributions sorted by + parameter names. + """ + + search_space, _ = _calculate(trials, include_pruned) + search_space = search_space or {} + search_space = dict(sorted(search_space.items(), key=lambda x: x[0])) + return search_space diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..31d788eb2f5591410531e6d5e23a27d9fadbc7fc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/__init__.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from optuna.storages._base import BaseStorage +from optuna.storages._cached_storage import _CachedStorage +from optuna.storages._callbacks import RetryFailedTrialCallback +from optuna.storages._grpc import GrpcStorageProxy +from optuna.storages._grpc import run_grpc_proxy_server +from optuna.storages._heartbeat import fail_stale_trials +from optuna.storages._in_memory import InMemoryStorage +from optuna.storages._rdb.storage import RDBStorage +from optuna.storages.journal._base import BaseJournalLogStorage +from optuna.storages.journal._file import ( + DeprecatedJournalFileSymlinkLock as JournalFileSymlinkLock, +) +from optuna.storages.journal._file import DeprecatedJournalFileOpenLock as JournalFileOpenLock +from optuna.storages.journal._file import JournalFileStorage +from optuna.storages.journal._redis import JournalRedisStorage +from optuna.storages.journal._storage import JournalStorage + + +__all__ = [ + "BaseStorage", + "BaseJournalLogStorage", + "InMemoryStorage", + "RDBStorage", + "JournalStorage", + "JournalFileStorage", + "JournalRedisStorage", + "JournalFileSymlinkLock", + "JournalFileOpenLock", + "RetryFailedTrialCallback", + "_CachedStorage", + "fail_stale_trials", + "GrpcStorageProxy", + "run_grpc_proxy_server", +] + + +def get_storage(storage: None | str | BaseStorage) -> BaseStorage: + """Only for internal usage. It might be deprecated in the future.""" + + if storage is None: + return InMemoryStorage() + if isinstance(storage, str): + if storage.startswith("redis"): + raise ValueError( + "RedisStorage is removed at Optuna v3.1.0. Please use JournalRedisBackend instead." + ) + return _CachedStorage(RDBStorage(storage)) + elif isinstance(storage, RDBStorage): + return _CachedStorage(storage) + else: + return storage diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..5acce3a24677bba39b5807aad02ac9985cf37e4c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_base.py @@ -0,0 +1,625 @@ +from __future__ import annotations + +import abc +from collections.abc import Container +from collections.abc import Sequence +from typing import Any +from typing import cast + +from optuna._typing import JSONSerializable +from optuna.distributions import BaseDistribution +from optuna.exceptions import UpdateFinishedTrialError +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +DEFAULT_STUDY_NAME_PREFIX = "no-name-" + + +class BaseStorage(abc.ABC): + """Base class for storages. + + This class is not supposed to be directly accessed by library users. + + This class abstracts a backend database and provides internal interfaces to + read/write histories of studies and trials. + + A storage class implementing this class must meet the following requirements. + + **Thread safety** + + A storage class instance can be shared among multiple threads, and must therefore be + thread-safe. It must guarantee that a data instance read from the storage must not be modified + by subsequent writes. For example, `FrozenTrial` instance returned by `get_trial` + should not be updated by the subsequent `set_trial_xxx`. This is usually achieved by replacing + the old data with a copy on `set_trial_xxx`. + + A storage class can also assume that a data instance returned are never modified by its user. + When a user modifies a return value from a storage class, the internal state of the storage + may become inconsistent. Consequences are undefined. + + **Ownership of RUNNING trials** + + Trials in finished states are not allowed to be modified. + Trials in the WAITING state are not allowed to be modified except for the `state` field. + """ + + # Basic study manipulation + + @abc.abstractmethod + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + """Create a new study from a name. + + If no name is specified, the storage class generates a name. + The returned study ID is unique among all current and deleted studies. + + Args: + directions: + A sequence of direction whose element is either + :obj:`~optuna.study.StudyDirection.MAXIMIZE` or + :obj:`~optuna.study.StudyDirection.MINIMIZE`. + study_name: + Name of the new study to create. + + Returns: + ID of the created study. + + Raises: + :exc:`optuna.exceptions.DuplicatedStudyError`: + If a study with the same ``study_name`` already exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_study(self, study_id: int) -> None: + """Delete a study. + + Args: + study_id: + ID of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + """Register a user-defined attribute to a study. + + This method overwrites any existing attribute. + + Args: + study_id: + ID of the study. + key: + Attribute key. + value: + Attribute value. It should be JSON serializable. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: + """Register an optuna-internal attribute to a study. + + This method overwrites any existing attribute. + + Args: + study_id: + ID of the study. + key: + Attribute key. + value: + Attribute value. It should be JSON serializable. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + # Basic study access + + @abc.abstractmethod + def get_study_id_from_name(self, study_name: str) -> int: + """Read the ID of a study. + + Args: + study_name: + Name of the study. + + Returns: + ID of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_name`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_study_name_from_id(self, study_id: int) -> str: + """Read the study name of a study. + + Args: + study_id: + ID of the study. + + Returns: + Name of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + """Read whether a study maximizes or minimizes an objective. + + Args: + study_id: + ID of a study. + + Returns: + Optimization directions list of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + """Read the user-defined attributes of a study. + + Args: + study_id: + ID of the study. + + Returns: + Dictionary with the user attributes of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + """Read the optuna-internal attributes of a study. + + Args: + study_id: + ID of the study. + + Returns: + Dictionary with the optuna-internal attributes of the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_all_studies(self) -> list[FrozenStudy]: + """Read a list of :class:`~optuna.study.FrozenStudy` objects. + + Returns: + A list of :class:`~optuna.study.FrozenStudy` objects, sorted by ``study_id``. + + """ + raise NotImplementedError + + # Basic trial manipulation + + @abc.abstractmethod + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + """Create and add a new trial to a study. + + The returned trial ID is unique among all current and deleted trials. + + Args: + study_id: + ID of the study. + template_trial: + Template :class:`~optuna.trial.FrozenTrial` with default user-attributes, + system-attributes, intermediate-values, and a state. + + Returns: + ID of the created trial. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: BaseDistribution, + ) -> None: + """Set a parameter to a trial. + + Args: + trial_id: + ID of the trial. + param_name: + Name of the parameter. + param_value_internal: + Internal representation of the parameter value. + distribution: + Sampled distribution of the parameter. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + raise NotImplementedError + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + """Read the trial ID of a trial. + + Args: + study_id: + ID of the study. + trial_number: + Number of the trial. + + Returns: + ID of the trial. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``study_id`` and ``trial_number`` exists. + """ + trials = self.get_all_trials(study_id, deepcopy=False) + if len(trials) <= trial_number: + raise KeyError( + "No trial with trial number {} exists in study with study_id {}.".format( + trial_number, study_id + ) + ) + return trials[trial_number]._trial_id + + def get_trial_number_from_id(self, trial_id: int) -> int: + """Read the trial number of a trial. + + .. note:: + + The trial number is only unique within a study, and is sequential. + + Args: + trial_id: + ID of the trial. + + Returns: + Number of the trial. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + """ + return self.get_trial(trial_id).number + + def get_trial_param(self, trial_id: int, param_name: str) -> float: + """Read the parameter of a trial. + + Args: + trial_id: + ID of the trial. + param_name: + Name of the parameter. + + Returns: + Internal representation of the parameter. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + If no such parameter exists. + """ + trial = self.get_trial(trial_id) + return trial.distributions[param_name].to_internal_repr(trial.params[param_name]) + + @abc.abstractmethod + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + """Update the state and values of a trial. + + Set return values of an objective function to values argument. + If values argument is not :obj:`None`, this method overwrites any existing trial values. + + Args: + trial_id: + ID of the trial. + state: + New state of the trial. + values: + Values of the objective function. + + Returns: + :obj:`True` if the state is successfully updated. + :obj:`False` if the state is kept the same. + The latter happens when this method tries to update the state of + :obj:`~optuna.trial.TrialState.RUNNING` trial to + :obj:`~optuna.trial.TrialState.RUNNING`. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + """Report an intermediate value of an objective function. + + This method overwrites any existing intermediate value associated with the given step. + + Args: + trial_id: + ID of the trial. + step: + Step of the trial (e.g., the epoch when training a neural network). + intermediate_value: + Intermediate value corresponding to the step. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + """Set a user-defined attribute to a trial. + + This method overwrites any existing attribute. + + Args: + trial_id: + ID of the trial. + key: + Attribute key. + value: + Attribute value. It should be JSON serializable. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + raise NotImplementedError + + @abc.abstractmethod + def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: + """Set an optuna-internal attribute to a trial. + + This method overwrites any existing attribute. + + Args: + trial_id: + ID of the trial. + key: + Attribute key. + value: + Attribute value. It should be JSON serializable. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + raise NotImplementedError + + # Basic trial access + + @abc.abstractmethod + def get_trial(self, trial_id: int) -> FrozenTrial: + """Read a trial. + + Args: + trial_id: + ID of the trial. + + Returns: + Trial with a matching trial ID. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + """Read all trials in a study. + + Args: + study_id: + ID of the study. + deepcopy: + Whether to copy the list of trials before returning. + Set to :obj:`True` if you intend to update the list or elements of the list. + states: + Trial states to filter on. If :obj:`None`, include all states. + + Returns: + List of trials in the study, sorted by ``trial_id``. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + raise NotImplementedError + + def get_n_trials( + self, study_id: int, state: tuple[TrialState, ...] | TrialState | None = None + ) -> int: + """Count the number of trials in a study. + + Args: + study_id: + ID of the study. + state: + Trial states to filter on. If :obj:`None`, include all states. + + Returns: + Number of trials in the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + """ + # TODO(hvy): Align the name and the behavior or the `state` parameter with + # `get_all_trials`'s `states`. + if isinstance(state, TrialState): + state = (state,) + return len(self.get_all_trials(study_id, deepcopy=False, states=state)) + + def get_best_trial(self, study_id: int) -> FrozenTrial: + """Return the trial with the best value in a study. + + This method is valid only during single-objective optimization. + + Args: + study_id: + ID of the study. + + Returns: + The trial with the best objective value among all finished trials in the study. + + Raises: + :exc:`KeyError`: + If no study with the matching ``study_id`` exists. + :exc:`RuntimeError`: + If the study has more than one direction. + :exc:`ValueError`: + If no trials have been completed. + """ + all_trials = self.get_all_trials(study_id, deepcopy=False, states=[TrialState.COMPLETE]) + + if len(all_trials) == 0: + raise ValueError("No trials are completed yet.") + + directions = self.get_study_directions(study_id) + if len(directions) > 1: + raise RuntimeError( + "Best trial can be obtained only for single-objective optimization." + ) + direction = directions[0] + + if direction == StudyDirection.MAXIMIZE: + best_trial = max(all_trials, key=lambda t: cast(float, t.value)) + else: + best_trial = min(all_trials, key=lambda t: cast(float, t.value)) + + return best_trial + + def get_trial_params(self, trial_id: int) -> dict[str, Any]: + """Read the parameter dictionary of a trial. + + Args: + trial_id: + ID of the trial. + + Returns: + Dictionary of a parameters. Keys are parameter names and values are external + representations of the parameter values. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + """ + return self.get_trial(trial_id).params + + def get_trial_user_attrs(self, trial_id: int) -> dict[str, Any]: + """Read the user-defined attributes of a trial. + + Args: + trial_id: + ID of the trial. + + Returns: + Dictionary with the user-defined attributes of the trial. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + """ + return self.get_trial(trial_id).user_attrs + + def get_trial_system_attrs(self, trial_id: int) -> dict[str, Any]: + """Read the optuna-internal attributes of a trial. + + Args: + trial_id: + ID of the trial. + + Returns: + Dictionary with the optuna-internal attributes of the trial. + + Raises: + :exc:`KeyError`: + If no trial with the matching ``trial_id`` exists. + """ + return self.get_trial(trial_id).system_attrs + + def remove_session(self) -> None: + """Clean up all connections to a database.""" + pass + + def check_trial_is_updatable(self, trial_id: int, trial_state: TrialState) -> None: + """Check whether a trial state is updatable. + + Args: + trial_id: + ID of the trial. + Only used for an error message. + trial_state: + Trial state to check. + + Raises: + :exc:`~optuna.exceptions.UpdateFinishedTrialError`: + If the trial is already finished. + """ + if trial_state.is_finished(): + trial = self.get_trial(trial_id) + raise UpdateFinishedTrialError( + "Trial#{} has already finished and can not be updated.".format(trial.number) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_cached_storage.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_cached_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..c90f2b53f2653609087561b46eeb4457c7a570e3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_cached_storage.py @@ -0,0 +1,279 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Container +from collections.abc import Sequence +import copy +import threading +from typing import Any + +import optuna +from optuna import distributions +from optuna._typing import JSONSerializable +from optuna.storages import BaseStorage +from optuna.storages._heartbeat import BaseHeartbeat +from optuna.storages._rdb.storage import RDBStorage +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +class _StudyInfo: + def __init__(self) -> None: + # Trial number to corresponding FrozenTrial. + self.trials: dict[int, FrozenTrial] = {} + # A list of trials and the last trial number which require storage access to read latest + # attributes. + self.unfinished_trial_ids: set[int] = set() + self.last_finished_trial_id: int = -1 + self.directions: list[StudyDirection] | None = None + self.name: str | None = None + + +class _CachedStorage(BaseStorage, BaseHeartbeat): + """A wrapper class of storage backends. + + This class is used in :func:`~optuna.get_storage` function and automatically + wraps :class:`~optuna.storages.RDBStorage` class. + + :class:`~optuna.storages._CachedStorage` meets the following **Data persistence** requirements. + + **Data persistence** + + :class:`~optuna.storages._CachedStorage` does not guarantee that write operations are logged + into a persistent storage, even when write methods succeed. + Thus, when process failure occurs, some writes might be lost. + As exceptions, when a persistent storage is available, any writes on any attributes + of `Study` and writes on `state` of `Trial` are guaranteed to be persistent. + Additionally, any preceding writes on any attributes of `Trial` are guaranteed to + be written into a persistent storage before writes on `state` of `Trial` succeed. + The same applies for `param`, `user_attrs', 'system_attrs' and 'intermediate_values` + attributes. + + Args: + backend: + :class:`~optuna.storages.RDBStorage` class instance to wrap. + """ + + def __init__(self, backend: RDBStorage) -> None: + self._backend = backend + self._studies: dict[int, _StudyInfo] = {} + self._trial_id_to_study_id_and_number: dict[int, tuple[int, int]] = {} + self._study_id_and_number_to_trial_id: dict[tuple[int, int], int] = {} + self._lock = threading.Lock() + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_lock"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._lock = threading.Lock() + + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + study_id = self._backend.create_new_study(directions=directions, study_name=study_name) + with self._lock: + study = _StudyInfo() + study.name = study_name + study.directions = list(directions) + self._studies[study_id] = study + + return study_id + + def delete_study(self, study_id: int) -> None: + with self._lock: + if study_id in self._studies: + for trial_number in self._studies[study_id].trials: + trial_id = self._study_id_and_number_to_trial_id.get((study_id, trial_number)) + if trial_id in self._trial_id_to_study_id_and_number: + del self._trial_id_to_study_id_and_number[trial_id] + if (study_id, trial_number) in self._study_id_and_number_to_trial_id: + del self._study_id_and_number_to_trial_id[(study_id, trial_number)] + del self._studies[study_id] + + self._backend.delete_study(study_id) + + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + self._backend.set_study_user_attr(study_id, key, value) + + def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: + self._backend.set_study_system_attr(study_id, key, value) + + def get_study_id_from_name(self, study_name: str) -> int: + return self._backend.get_study_id_from_name(study_name) + + def get_study_name_from_id(self, study_id: int) -> str: + with self._lock: + if study_id in self._studies: + name = self._studies[study_id].name + if name is not None: + return name + + name = self._backend.get_study_name_from_id(study_id) + with self._lock: + if study_id not in self._studies: + self._studies[study_id] = _StudyInfo() + self._studies[study_id].name = name + return name + + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + with self._lock: + if study_id in self._studies: + directions = self._studies[study_id].directions + if directions is not None: + return directions + + directions = self._backend.get_study_directions(study_id) + with self._lock: + if study_id not in self._studies: + self._studies[study_id] = _StudyInfo() + self._studies[study_id].directions = directions + return directions + + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + return self._backend.get_study_user_attrs(study_id) + + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + return self._backend.get_study_system_attrs(study_id) + + def get_all_studies(self) -> list[FrozenStudy]: + return self._backend.get_all_studies() + + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + frozen_trial = self._backend._create_new_trial(study_id, template_trial) + trial_id = frozen_trial._trial_id + with self._lock: + if study_id not in self._studies: + self._studies[study_id] = _StudyInfo() + study = self._studies[study_id] + self._add_trials_to_cache(study_id, [frozen_trial]) + # Since finished trials will not be modified by any worker, we do not + # need storage access for them. + if frozen_trial.state.is_finished(): + study.last_finished_trial_id = max(study.last_finished_trial_id, trial_id) + else: + study.unfinished_trial_ids.add(trial_id) + return trial_id + + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: distributions.BaseDistribution, + ) -> None: + self._backend.set_trial_param(trial_id, param_name, param_value_internal, distribution) + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + key = (study_id, trial_number) + with self._lock: + if key in self._study_id_and_number_to_trial_id: + return self._study_id_and_number_to_trial_id[key] + + return self._backend.get_trial_id_from_study_id_trial_number(study_id, trial_number) + + def get_best_trial(self, study_id: int) -> FrozenTrial: + return self._backend.get_best_trial(study_id) + + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + return self._backend.set_trial_state_values(trial_id, state=state, values=values) + + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + self._backend.set_trial_intermediate_value(trial_id, step, intermediate_value) + + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + self._backend.set_trial_user_attr(trial_id, key=key, value=value) + + def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: + self._backend.set_trial_system_attr(trial_id, key=key, value=value) + + def _get_cached_trial(self, trial_id: int) -> FrozenTrial | None: + if trial_id not in self._trial_id_to_study_id_and_number: + return None + study_id, number = self._trial_id_to_study_id_and_number[trial_id] + study = self._studies[study_id] + return study.trials[number] if trial_id not in study.unfinished_trial_ids else None + + def get_trial(self, trial_id: int) -> FrozenTrial: + with self._lock: + trial = self._get_cached_trial(trial_id) + if trial is not None: + return trial + + return self._backend.get_trial(trial_id) + + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + self._read_trials_from_remote_storage(study_id) + + with self._lock: + study = self._studies[study_id] + # We need to sort trials by their number because some samplers assume this behavior. + # The following two lines are latency-sensitive. + + trials: dict[int, FrozenTrial] | list[FrozenTrial] + + if states is not None: + trials = {number: t for number, t in study.trials.items() if t.state in states} + else: + trials = study.trials + trials = list(sorted(trials.values(), key=lambda t: t.number)) + return copy.deepcopy(trials) if deepcopy else trials + + def _read_trials_from_remote_storage(self, study_id: int) -> None: + with self._lock: + if study_id not in self._studies: + self._studies[study_id] = _StudyInfo() + study = self._studies[study_id] + trials = self._backend._get_trials( + study_id, + states=None, + included_trial_ids=study.unfinished_trial_ids, + trial_id_greater_than=study.last_finished_trial_id, + ) + if not trials: + return + + self._add_trials_to_cache(study_id, trials) + for trial in trials: + if not trial.state.is_finished(): + study.unfinished_trial_ids.add(trial._trial_id) + continue + + study.last_finished_trial_id = max(study.last_finished_trial_id, trial._trial_id) + if trial._trial_id in study.unfinished_trial_ids: + study.unfinished_trial_ids.remove(trial._trial_id) + + def _add_trials_to_cache(self, study_id: int, trials: list[FrozenTrial]) -> None: + study = self._studies[study_id] + for trial in trials: + self._trial_id_to_study_id_and_number[trial._trial_id] = ( + study_id, + trial.number, + ) + self._study_id_and_number_to_trial_id[(study_id, trial.number)] = trial._trial_id + study.trials[trial.number] = trial + + def record_heartbeat(self, trial_id: int) -> None: + self._backend.record_heartbeat(trial_id) + + def _get_stale_trial_ids(self, study_id: int) -> list[int]: + return self._backend._get_stale_trial_ids(study_id) + + def get_heartbeat_interval(self) -> int | None: + return self._backend.get_heartbeat_interval() + + def get_failed_trial_callback(self) -> Callable[["optuna.Study", FrozenTrial], None] | None: + return self._backend.get_failed_trial_callback() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_callbacks.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..2f60a10cb5353292077862934a8a6f79629c0690 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_callbacks.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +from typing import Any + +import optuna +from optuna._experimental import experimental_class +from optuna._experimental import experimental_func +from optuna.trial import FrozenTrial + + +@experimental_class("2.8.0") +class RetryFailedTrialCallback: + """Retry a failed trial up to a maximum number of times. + + When a trial fails, this callback can be used with a class in :mod:`optuna.storages` to + recreate the trial in ``TrialState.WAITING`` to queue up the trial to be run again. + + The failed trial can be identified by the + :func:`~optuna.storages.RetryFailedTrialCallback.retried_trial_number` function. + Even if repetitive failure occurs (a retried trial fails again), + this method returns the number of the original trial. + To get a full list including the numbers of the retried trials as well as their original trial, + call the :func:`~optuna.storages.RetryFailedTrialCallback.retry_history` function. + + This callback is helpful in environments where trials may fail due to external conditions, + such as being preempted by other processes. + + Usage: + + .. testcode:: + + import optuna + from optuna.storages import RetryFailedTrialCallback + + storage = optuna.storages.RDBStorage( + url="sqlite:///:memory:", + heartbeat_interval=60, + grace_period=120, + failed_trial_callback=RetryFailedTrialCallback(max_retry=3), + ) + + study = optuna.create_study( + storage=storage, + ) + + .. seealso:: + See :class:`~optuna.storages.RDBStorage`. + + Args: + max_retry: + The max number of times a trial can be retried. Must be set to :obj:`None` or an + integer. If set to the default value of :obj:`None` will retry indefinitely. + If set to an integer, will only retry that many times. + inherit_intermediate_values: + Option to inherit `trial.intermediate_values` reported by + :func:`optuna.trial.Trial.report` from the failed trial. Default is :obj:`False`. + """ + + def __init__( + self, max_retry: int | None = None, inherit_intermediate_values: bool = False + ) -> None: + self._max_retry = max_retry + self._inherit_intermediate_values = inherit_intermediate_values + + def __call__(self, study: "optuna.study.Study", trial: FrozenTrial) -> None: + system_attrs: dict[str, Any] = { + "failed_trial": trial.number, + "retry_history": [], + **trial.system_attrs, + } + system_attrs["retry_history"].append(trial.number) + if self._max_retry is not None: + if self._max_retry < len(system_attrs["retry_history"]): + return + + study.add_trial( + optuna.create_trial( + state=optuna.trial.TrialState.WAITING, + params=trial.params, + distributions=trial.distributions, + user_attrs=trial.user_attrs, + system_attrs=system_attrs, + intermediate_values=( + trial.intermediate_values if self._inherit_intermediate_values else None + ), + ) + ) + + @staticmethod + @experimental_func("2.8.0") + def retried_trial_number(trial: FrozenTrial) -> int | None: + """Return the number of the original trial being retried. + + Args: + trial: + The trial object. + + Returns: + The number of the first failed trial. If not retry of a previous trial, + returns :obj:`None`. + """ + + return trial.system_attrs.get("failed_trial", None) + + @staticmethod + @experimental_func("3.0.0") + def retry_history(trial: FrozenTrial) -> list[int]: + """Return the list of retried trial numbers with respect to the specified trial. + + Args: + trial: + The trial object. + + Returns: + A list of trial numbers in ascending order of the series of retried trials. + The first item of the list indicates the original trial which is identical + to the :func:`~optuna.storages.RetryFailedTrialCallback.retried_trial_number`, + and the last item is the one right before the specified trial in the retry series. + If the specified trial is not a retry of any trial, returns an empty list. + """ + return trial.system_attrs.get("retry_history", []) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f35d9da974755ba56f291985fa13ce3b0954049c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/__init__.py @@ -0,0 +1,8 @@ +from optuna.storages._grpc.client import GrpcStorageProxy +from optuna.storages._grpc.server import run_grpc_proxy_server + + +__all__ = [ + "run_grpc_proxy_server", + "GrpcStorageProxy", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..5a03e6861cb992df0237eaa736bde51a7d450134 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: api.proto +# Protobuf Python Version: 5.28.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 28, + 1, + '', + 'api.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tapi.proto\x12\x06optuna\"W\n\x15\x43reateNewStudyRequest\x12*\n\ndirections\x18\x01 \x03(\x0e\x32\x16.optuna.StudyDirection\x12\x12\n\nstudy_name\x18\x02 \x01(\t\"\'\n\x13\x43reateNewStudyReply\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"&\n\x12\x44\x65leteStudyRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"\x12\n\x10\x44\x65leteStudyReply\"L\n\x1cSetStudyUserAttributeRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\x1c\n\x1aSetStudyUserAttributeReply\"N\n\x1eSetStudySystemAttributeRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\x1e\n\x1cSetStudySystemAttributeReply\"/\n\x19GetStudyIdFromNameRequest\x12\x12\n\nstudy_name\x18\x01 \x01(\t\"+\n\x17GetStudyIdFromNameReply\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"-\n\x19GetStudyNameFromIdRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"-\n\x17GetStudyNameFromIdReply\x12\x12\n\nstudy_name\x18\x01 \x01(\t\"-\n\x19GetStudyDirectionsRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"E\n\x17GetStudyDirectionsReply\x12*\n\ndirections\x18\x01 \x03(\x0e\x32\x16.optuna.StudyDirection\"1\n\x1dGetStudyUserAttributesRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"\xa6\x01\n\x1bGetStudyUserAttributesReply\x12P\n\x0fuser_attributes\x18\x01 \x03(\x0b\x32\x37.optuna.GetStudyUserAttributesReply.UserAttributesEntry\x1a\x35\n\x13UserAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"3\n\x1fGetStudySystemAttributesRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\"\xb0\x01\n\x1dGetStudySystemAttributesReply\x12V\n\x11system_attributes\x18\x01 \x03(\x0b\x32;.optuna.GetStudySystemAttributesReply.SystemAttributesEntry\x1a\x37\n\x15SystemAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14GetAllStudiesRequest\"4\n\x12GetAllStudiesReply\x12\x1e\n\x07studies\x18\x01 \x03(\x0b\x32\r.optuna.Study\"p\n\x15\x43reateNewTrialRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12%\n\x0etemplate_trial\x18\x02 \x01(\x0b\x32\r.optuna.Trial\x12\x1e\n\x16template_trial_is_none\x18\x03 \x01(\x08\"\'\n\x13\x43reateNewTrialReply\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\"t\n\x18SetTrialParameterRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12\x12\n\nparam_name\x18\x02 \x01(\t\x12\x1c\n\x14param_value_internal\x18\x03 \x01(\x01\x12\x14\n\x0c\x64istribution\x18\x04 \x01(\t\"\x18\n\x16SetTrialParameterReply\"Q\n\'GetTrialIdFromStudyIdTrialNumberRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12\x14\n\x0ctrial_number\x18\x02 \x01(\x03\"9\n%GetTrialIdFromStudyIdTrialNumberReply\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\"a\n\x1aSetTrialStateValuesRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12!\n\x05state\x18\x02 \x01(\x0e\x32\x12.optuna.TrialState\x12\x0e\n\x06values\x18\x03 \x03(\x01\"1\n\x18SetTrialStateValuesReply\x12\x15\n\rtrial_updated\x18\x01 \x01(\x08\"^\n SetTrialIntermediateValueRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x1a\n\x12intermediate_value\x18\x03 \x01(\x01\" \n\x1eSetTrialIntermediateValueReply\"L\n\x1cSetTrialUserAttributeRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\x1c\n\x1aSetTrialUserAttributeReply\"N\n\x1eSetTrialSystemAttributeRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\x1e\n\x1cSetTrialSystemAttributeReply\"#\n\x0fGetTrialRequest\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\"-\n\rGetTrialReply\x12\x1c\n\x05trial\x18\x01 \x01(\x0b\x32\r.optuna.Trial\"_\n\x10GetTrialsRequest\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12\x1a\n\x12included_trial_ids\x18\x02 \x03(\x03\x12\x1d\n\x15trial_id_greater_than\x18\x03 \x01(\x03\"/\n\x0eGetTrialsReply\x12\x1d\n\x06trials\x18\x01 \x03(\x0b\x32\r.optuna.Trial\"\xc5\x02\n\x05Study\x12\x10\n\x08study_id\x18\x01 \x01(\x03\x12\x12\n\nstudy_name\x18\x02 \x01(\t\x12*\n\ndirections\x18\x03 \x03(\x0e\x32\x16.optuna.StudyDirection\x12:\n\x0fuser_attributes\x18\x04 \x03(\x0b\x32!.optuna.Study.UserAttributesEntry\x12>\n\x11system_attributes\x18\x05 \x03(\x0b\x32#.optuna.Study.SystemAttributesEntry\x1a\x35\n\x13UserAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x37\n\x15SystemAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc3\x05\n\x05Trial\x12\x10\n\x08trial_id\x18\x01 \x01(\x03\x12\x0e\n\x06number\x18\x02 \x01(\x03\x12!\n\x05state\x18\x03 \x01(\x0e\x32\x12.optuna.TrialState\x12\x0e\n\x06values\x18\x04 \x03(\x01\x12\x16\n\x0e\x64\x61tetime_start\x18\x05 \x01(\t\x12\x19\n\x11\x64\x61tetime_complete\x18\x06 \x01(\t\x12)\n\x06params\x18\x07 \x03(\x0b\x32\x19.optuna.Trial.ParamsEntry\x12\x37\n\rdistributions\x18\x08 \x03(\x0b\x32 .optuna.Trial.DistributionsEntry\x12:\n\x0fuser_attributes\x18\t \x03(\x0b\x32!.optuna.Trial.UserAttributesEntry\x12>\n\x11system_attributes\x18\n \x03(\x0b\x32#.optuna.Trial.SystemAttributesEntry\x12\x42\n\x13intermediate_values\x18\x0b \x03(\x0b\x32%.optuna.Trial.IntermediateValuesEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a\x34\n\x12\x44istributionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x35\n\x13UserAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x37\n\x15SystemAttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x39\n\x17IntermediateValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01*,\n\x0eStudyDirection\x12\x0c\n\x08MINIMIZE\x10\x00\x12\x0c\n\x08MAXIMIZE\x10\x01*J\n\nTrialState\x12\x0b\n\x07RUNNING\x10\x00\x12\x0c\n\x08\x43OMPLETE\x10\x01\x12\n\n\x06PRUNED\x10\x02\x12\x08\n\x04\x46\x41IL\x10\x03\x12\x0b\n\x07WAITING\x10\x04\x32\xd7\r\n\x0eStorageService\x12L\n\x0e\x43reateNewStudy\x12\x1d.optuna.CreateNewStudyRequest\x1a\x1b.optuna.CreateNewStudyReply\x12\x43\n\x0b\x44\x65leteStudy\x12\x1a.optuna.DeleteStudyRequest\x1a\x18.optuna.DeleteStudyReply\x12\x61\n\x15SetStudyUserAttribute\x12$.optuna.SetStudyUserAttributeRequest\x1a\".optuna.SetStudyUserAttributeReply\x12g\n\x17SetStudySystemAttribute\x12&.optuna.SetStudySystemAttributeRequest\x1a$.optuna.SetStudySystemAttributeReply\x12X\n\x12GetStudyIdFromName\x12!.optuna.GetStudyIdFromNameRequest\x1a\x1f.optuna.GetStudyIdFromNameReply\x12X\n\x12GetStudyNameFromId\x12!.optuna.GetStudyNameFromIdRequest\x1a\x1f.optuna.GetStudyNameFromIdReply\x12X\n\x12GetStudyDirections\x12!.optuna.GetStudyDirectionsRequest\x1a\x1f.optuna.GetStudyDirectionsReply\x12\x64\n\x16GetStudyUserAttributes\x12%.optuna.GetStudyUserAttributesRequest\x1a#.optuna.GetStudyUserAttributesReply\x12j\n\x18GetStudySystemAttributes\x12\'.optuna.GetStudySystemAttributesRequest\x1a%.optuna.GetStudySystemAttributesReply\x12I\n\rGetAllStudies\x12\x1c.optuna.GetAllStudiesRequest\x1a\x1a.optuna.GetAllStudiesReply\x12L\n\x0e\x43reateNewTrial\x12\x1d.optuna.CreateNewTrialRequest\x1a\x1b.optuna.CreateNewTrialReply\x12U\n\x11SetTrialParameter\x12 .optuna.SetTrialParameterRequest\x1a\x1e.optuna.SetTrialParameterReply\x12\x82\x01\n GetTrialIdFromStudyIdTrialNumber\x12/.optuna.GetTrialIdFromStudyIdTrialNumberRequest\x1a-.optuna.GetTrialIdFromStudyIdTrialNumberReply\x12[\n\x13SetTrialStateValues\x12\".optuna.SetTrialStateValuesRequest\x1a .optuna.SetTrialStateValuesReply\x12m\n\x19SetTrialIntermediateValue\x12(.optuna.SetTrialIntermediateValueRequest\x1a&.optuna.SetTrialIntermediateValueReply\x12\x61\n\x15SetTrialUserAttribute\x12$.optuna.SetTrialUserAttributeRequest\x1a\".optuna.SetTrialUserAttributeReply\x12g\n\x17SetTrialSystemAttribute\x12&.optuna.SetTrialSystemAttributeRequest\x1a$.optuna.SetTrialSystemAttributeReply\x12:\n\x08GetTrial\x12\x17.optuna.GetTrialRequest\x1a\x15.optuna.GetTrialReply\x12=\n\tGetTrials\x12\x18.optuna.GetTrialsRequest\x1a\x16.optuna.GetTrialsReplyb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'api_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_GETSTUDYUSERATTRIBUTESREPLY_USERATTRIBUTESENTRY']._loaded_options = None + _globals['_GETSTUDYUSERATTRIBUTESREPLY_USERATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY_SYSTEMATTRIBUTESENTRY']._loaded_options = None + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY_SYSTEMATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_STUDY_USERATTRIBUTESENTRY']._loaded_options = None + _globals['_STUDY_USERATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_STUDY_SYSTEMATTRIBUTESENTRY']._loaded_options = None + _globals['_STUDY_SYSTEMATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_TRIAL_PARAMSENTRY']._loaded_options = None + _globals['_TRIAL_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_TRIAL_DISTRIBUTIONSENTRY']._loaded_options = None + _globals['_TRIAL_DISTRIBUTIONSENTRY']._serialized_options = b'8\001' + _globals['_TRIAL_USERATTRIBUTESENTRY']._loaded_options = None + _globals['_TRIAL_USERATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_TRIAL_SYSTEMATTRIBUTESENTRY']._loaded_options = None + _globals['_TRIAL_SYSTEMATTRIBUTESENTRY']._serialized_options = b'8\001' + _globals['_TRIAL_INTERMEDIATEVALUESENTRY']._loaded_options = None + _globals['_TRIAL_INTERMEDIATEVALUESENTRY']._serialized_options = b'8\001' + _globals['_STUDYDIRECTION']._serialized_start=3476 + _globals['_STUDYDIRECTION']._serialized_end=3520 + _globals['_TRIALSTATE']._serialized_start=3522 + _globals['_TRIALSTATE']._serialized_end=3596 + _globals['_CREATENEWSTUDYREQUEST']._serialized_start=21 + _globals['_CREATENEWSTUDYREQUEST']._serialized_end=108 + _globals['_CREATENEWSTUDYREPLY']._serialized_start=110 + _globals['_CREATENEWSTUDYREPLY']._serialized_end=149 + _globals['_DELETESTUDYREQUEST']._serialized_start=151 + _globals['_DELETESTUDYREQUEST']._serialized_end=189 + _globals['_DELETESTUDYREPLY']._serialized_start=191 + _globals['_DELETESTUDYREPLY']._serialized_end=209 + _globals['_SETSTUDYUSERATTRIBUTEREQUEST']._serialized_start=211 + _globals['_SETSTUDYUSERATTRIBUTEREQUEST']._serialized_end=287 + _globals['_SETSTUDYUSERATTRIBUTEREPLY']._serialized_start=289 + _globals['_SETSTUDYUSERATTRIBUTEREPLY']._serialized_end=317 + _globals['_SETSTUDYSYSTEMATTRIBUTEREQUEST']._serialized_start=319 + _globals['_SETSTUDYSYSTEMATTRIBUTEREQUEST']._serialized_end=397 + _globals['_SETSTUDYSYSTEMATTRIBUTEREPLY']._serialized_start=399 + _globals['_SETSTUDYSYSTEMATTRIBUTEREPLY']._serialized_end=429 + _globals['_GETSTUDYIDFROMNAMEREQUEST']._serialized_start=431 + _globals['_GETSTUDYIDFROMNAMEREQUEST']._serialized_end=478 + _globals['_GETSTUDYIDFROMNAMEREPLY']._serialized_start=480 + _globals['_GETSTUDYIDFROMNAMEREPLY']._serialized_end=523 + _globals['_GETSTUDYNAMEFROMIDREQUEST']._serialized_start=525 + _globals['_GETSTUDYNAMEFROMIDREQUEST']._serialized_end=570 + _globals['_GETSTUDYNAMEFROMIDREPLY']._serialized_start=572 + _globals['_GETSTUDYNAMEFROMIDREPLY']._serialized_end=617 + _globals['_GETSTUDYDIRECTIONSREQUEST']._serialized_start=619 + _globals['_GETSTUDYDIRECTIONSREQUEST']._serialized_end=664 + _globals['_GETSTUDYDIRECTIONSREPLY']._serialized_start=666 + _globals['_GETSTUDYDIRECTIONSREPLY']._serialized_end=735 + _globals['_GETSTUDYUSERATTRIBUTESREQUEST']._serialized_start=737 + _globals['_GETSTUDYUSERATTRIBUTESREQUEST']._serialized_end=786 + _globals['_GETSTUDYUSERATTRIBUTESREPLY']._serialized_start=789 + _globals['_GETSTUDYUSERATTRIBUTESREPLY']._serialized_end=955 + _globals['_GETSTUDYUSERATTRIBUTESREPLY_USERATTRIBUTESENTRY']._serialized_start=902 + _globals['_GETSTUDYUSERATTRIBUTESREPLY_USERATTRIBUTESENTRY']._serialized_end=955 + _globals['_GETSTUDYSYSTEMATTRIBUTESREQUEST']._serialized_start=957 + _globals['_GETSTUDYSYSTEMATTRIBUTESREQUEST']._serialized_end=1008 + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY']._serialized_start=1011 + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY']._serialized_end=1187 + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY_SYSTEMATTRIBUTESENTRY']._serialized_start=1132 + _globals['_GETSTUDYSYSTEMATTRIBUTESREPLY_SYSTEMATTRIBUTESENTRY']._serialized_end=1187 + _globals['_GETALLSTUDIESREQUEST']._serialized_start=1189 + _globals['_GETALLSTUDIESREQUEST']._serialized_end=1211 + _globals['_GETALLSTUDIESREPLY']._serialized_start=1213 + _globals['_GETALLSTUDIESREPLY']._serialized_end=1265 + _globals['_CREATENEWTRIALREQUEST']._serialized_start=1267 + _globals['_CREATENEWTRIALREQUEST']._serialized_end=1379 + _globals['_CREATENEWTRIALREPLY']._serialized_start=1381 + _globals['_CREATENEWTRIALREPLY']._serialized_end=1420 + _globals['_SETTRIALPARAMETERREQUEST']._serialized_start=1422 + _globals['_SETTRIALPARAMETERREQUEST']._serialized_end=1538 + _globals['_SETTRIALPARAMETERREPLY']._serialized_start=1540 + _globals['_SETTRIALPARAMETERREPLY']._serialized_end=1564 + _globals['_GETTRIALIDFROMSTUDYIDTRIALNUMBERREQUEST']._serialized_start=1566 + _globals['_GETTRIALIDFROMSTUDYIDTRIALNUMBERREQUEST']._serialized_end=1647 + _globals['_GETTRIALIDFROMSTUDYIDTRIALNUMBERREPLY']._serialized_start=1649 + _globals['_GETTRIALIDFROMSTUDYIDTRIALNUMBERREPLY']._serialized_end=1706 + _globals['_SETTRIALSTATEVALUESREQUEST']._serialized_start=1708 + _globals['_SETTRIALSTATEVALUESREQUEST']._serialized_end=1805 + _globals['_SETTRIALSTATEVALUESREPLY']._serialized_start=1807 + _globals['_SETTRIALSTATEVALUESREPLY']._serialized_end=1856 + _globals['_SETTRIALINTERMEDIATEVALUEREQUEST']._serialized_start=1858 + _globals['_SETTRIALINTERMEDIATEVALUEREQUEST']._serialized_end=1952 + _globals['_SETTRIALINTERMEDIATEVALUEREPLY']._serialized_start=1954 + _globals['_SETTRIALINTERMEDIATEVALUEREPLY']._serialized_end=1986 + _globals['_SETTRIALUSERATTRIBUTEREQUEST']._serialized_start=1988 + _globals['_SETTRIALUSERATTRIBUTEREQUEST']._serialized_end=2064 + _globals['_SETTRIALUSERATTRIBUTEREPLY']._serialized_start=2066 + _globals['_SETTRIALUSERATTRIBUTEREPLY']._serialized_end=2094 + _globals['_SETTRIALSYSTEMATTRIBUTEREQUEST']._serialized_start=2096 + _globals['_SETTRIALSYSTEMATTRIBUTEREQUEST']._serialized_end=2174 + _globals['_SETTRIALSYSTEMATTRIBUTEREPLY']._serialized_start=2176 + _globals['_SETTRIALSYSTEMATTRIBUTEREPLY']._serialized_end=2206 + _globals['_GETTRIALREQUEST']._serialized_start=2208 + _globals['_GETTRIALREQUEST']._serialized_end=2243 + _globals['_GETTRIALREPLY']._serialized_start=2245 + _globals['_GETTRIALREPLY']._serialized_end=2290 + _globals['_GETTRIALSREQUEST']._serialized_start=2292 + _globals['_GETTRIALSREQUEST']._serialized_end=2387 + _globals['_GETTRIALSREPLY']._serialized_start=2389 + _globals['_GETTRIALSREPLY']._serialized_end=2436 + _globals['_STUDY']._serialized_start=2439 + _globals['_STUDY']._serialized_end=2764 + _globals['_STUDY_USERATTRIBUTESENTRY']._serialized_start=902 + _globals['_STUDY_USERATTRIBUTESENTRY']._serialized_end=955 + _globals['_STUDY_SYSTEMATTRIBUTESENTRY']._serialized_start=1132 + _globals['_STUDY_SYSTEMATTRIBUTESENTRY']._serialized_end=1187 + _globals['_TRIAL']._serialized_start=2767 + _globals['_TRIAL']._serialized_end=3474 + _globals['_TRIAL_PARAMSENTRY']._serialized_start=3204 + _globals['_TRIAL_PARAMSENTRY']._serialized_end=3249 + _globals['_TRIAL_DISTRIBUTIONSENTRY']._serialized_start=3251 + _globals['_TRIAL_DISTRIBUTIONSENTRY']._serialized_end=3303 + _globals['_TRIAL_USERATTRIBUTESENTRY']._serialized_start=902 + _globals['_TRIAL_USERATTRIBUTESENTRY']._serialized_end=955 + _globals['_TRIAL_SYSTEMATTRIBUTESENTRY']._serialized_start=1132 + _globals['_TRIAL_SYSTEMATTRIBUTESENTRY']._serialized_end=1187 + _globals['_TRIAL_INTERMEDIATEVALUESENTRY']._serialized_start=3417 + _globals['_TRIAL_INTERMEDIATEVALUESENTRY']._serialized_end=3474 + _globals['_STORAGESERVICE']._serialized_start=3599 + _globals['_STORAGESERVICE']._serialized_end=5350 +# @@protoc_insertion_point(module_scope) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.pyi b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f0608671c070fbb1067fc2c766ef3a628dd3dbec --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2.pyi @@ -0,0 +1,1070 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +* +Optuna GRPC API +The following command generates the Python code from this file: +$ pip install mypy-protobuf==3.6.0 protobuf==5.28.1 grpcio==1.68.1 grpcio-tools==1.68.1 +$ python -m grpc_tools.protoc \\ +--proto_path=optuna/storages/_grpc \\ +--grpc_python_out=optuna/storages/_grpc/auto_generated \\ +--python_out=optuna/storages/_grpc/auto_generated \\ +--mypy_out=optuna/storages/_grpc/auto_generated \\ +optuna/storages/_grpc/api.proto +$ sed -i -e \\ +"s/import api_pb2 as api__pb2/import optuna.storages._grpc.auto_generated.api_pb2 as api__pb2/g" \\ +optuna/storages/_grpc/auto_generated/api_pb2_grpc.py +""" + +import builtins +import collections.abc +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.internal.enum_type_wrapper +import google.protobuf.message +import sys +import typing + +if sys.version_info >= (3, 10): + import typing as typing_extensions +else: + import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class _StudyDirection: + ValueType = typing.NewType("ValueType", builtins.int) + V: typing_extensions.TypeAlias = ValueType + +class _StudyDirectionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_StudyDirection.ValueType], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor + MINIMIZE: _StudyDirection.ValueType # 0 + MAXIMIZE: _StudyDirection.ValueType # 1 + +class StudyDirection(_StudyDirection, metaclass=_StudyDirectionEnumTypeWrapper): + """* + Study direction. + """ + +MINIMIZE: StudyDirection.ValueType # 0 +MAXIMIZE: StudyDirection.ValueType # 1 +global___StudyDirection = StudyDirection + +class _TrialState: + ValueType = typing.NewType("ValueType", builtins.int) + V: typing_extensions.TypeAlias = ValueType + +class _TrialStateEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_TrialState.ValueType], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor + RUNNING: _TrialState.ValueType # 0 + COMPLETE: _TrialState.ValueType # 1 + PRUNED: _TrialState.ValueType # 2 + FAIL: _TrialState.ValueType # 3 + WAITING: _TrialState.ValueType # 4 + +class TrialState(_TrialState, metaclass=_TrialStateEnumTypeWrapper): + """* + Trial state. + """ + +RUNNING: TrialState.ValueType # 0 +COMPLETE: TrialState.ValueType # 1 +PRUNED: TrialState.ValueType # 2 +FAIL: TrialState.ValueType # 3 +WAITING: TrialState.ValueType # 4 +global___TrialState = TrialState + +@typing.final +class CreateNewStudyRequest(google.protobuf.message.Message): + """* + ======================================== + Messages for Optuna storage service. + ======================================== + + * + Request to create a new study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + DIRECTIONS_FIELD_NUMBER: builtins.int + STUDY_NAME_FIELD_NUMBER: builtins.int + study_name: builtins.str + @property + def directions(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[global___StudyDirection.ValueType]: ... + def __init__( + self, + *, + directions: collections.abc.Iterable[global___StudyDirection.ValueType] | None = ..., + study_name: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["directions", b"directions", "study_name", b"study_name"]) -> None: ... + +global___CreateNewStudyRequest = CreateNewStudyRequest + +@typing.final +class CreateNewStudyReply(google.protobuf.message.Message): + """* + Reply to create a new study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___CreateNewStudyReply = CreateNewStudyReply + +@typing.final +class DeleteStudyRequest(google.protobuf.message.Message): + """* + Request to delete a study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___DeleteStudyRequest = DeleteStudyRequest + +@typing.final +class DeleteStudyReply(google.protobuf.message.Message): + """* + Reply to delete a study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___DeleteStudyReply = DeleteStudyReply + +@typing.final +class SetStudyUserAttributeRequest(google.protobuf.message.Message): + """* + Request to set a study's user attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + study_id: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + study_id: builtins.int = ..., + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "study_id", b"study_id", "value", b"value"]) -> None: ... + +global___SetStudyUserAttributeRequest = SetStudyUserAttributeRequest + +@typing.final +class SetStudyUserAttributeReply(google.protobuf.message.Message): + """* + Reply to set a study's user attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetStudyUserAttributeReply = SetStudyUserAttributeReply + +@typing.final +class SetStudySystemAttributeRequest(google.protobuf.message.Message): + """* + Request to set a study's system attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + study_id: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + study_id: builtins.int = ..., + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "study_id", b"study_id", "value", b"value"]) -> None: ... + +global___SetStudySystemAttributeRequest = SetStudySystemAttributeRequest + +@typing.final +class SetStudySystemAttributeReply(google.protobuf.message.Message): + """* + Reply to set a study's system attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetStudySystemAttributeReply = SetStudySystemAttributeReply + +@typing.final +class GetStudyIdFromNameRequest(google.protobuf.message.Message): + """* + Request to get a study id by its name. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_NAME_FIELD_NUMBER: builtins.int + study_name: builtins.str + def __init__( + self, + *, + study_name: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_name", b"study_name"]) -> None: ... + +global___GetStudyIdFromNameRequest = GetStudyIdFromNameRequest + +@typing.final +class GetStudyIdFromNameReply(google.protobuf.message.Message): + """* + Reply to get a study id by its name. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___GetStudyIdFromNameReply = GetStudyIdFromNameReply + +@typing.final +class GetStudyNameFromIdRequest(google.protobuf.message.Message): + """* + Request to get a study name by its id. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___GetStudyNameFromIdRequest = GetStudyNameFromIdRequest + +@typing.final +class GetStudyNameFromIdReply(google.protobuf.message.Message): + """* + Reply to get a study name by its id. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_NAME_FIELD_NUMBER: builtins.int + study_name: builtins.str + def __init__( + self, + *, + study_name: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_name", b"study_name"]) -> None: ... + +global___GetStudyNameFromIdReply = GetStudyNameFromIdReply + +@typing.final +class GetStudyDirectionsRequest(google.protobuf.message.Message): + """* + Request to get study directions. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___GetStudyDirectionsRequest = GetStudyDirectionsRequest + +@typing.final +class GetStudyDirectionsReply(google.protobuf.message.Message): + """* + Reply to get study directions. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + DIRECTIONS_FIELD_NUMBER: builtins.int + @property + def directions(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[global___StudyDirection.ValueType]: ... + def __init__( + self, + *, + directions: collections.abc.Iterable[global___StudyDirection.ValueType] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["directions", b"directions"]) -> None: ... + +global___GetStudyDirectionsReply = GetStudyDirectionsReply + +@typing.final +class GetStudyUserAttributesRequest(google.protobuf.message.Message): + """* + Request to get study user attributes. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___GetStudyUserAttributesRequest = GetStudyUserAttributesRequest + +@typing.final +class GetStudyUserAttributesReply(google.protobuf.message.Message): + """* + Reply to get study user attributes. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + @typing.final + class UserAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + USER_ATTRIBUTES_FIELD_NUMBER: builtins.int + @property + def user_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + def __init__( + self, + *, + user_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["user_attributes", b"user_attributes"]) -> None: ... + +global___GetStudyUserAttributesReply = GetStudyUserAttributesReply + +@typing.final +class GetStudySystemAttributesRequest(google.protobuf.message.Message): + """* + Request to get study system attributes. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + study_id: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id"]) -> None: ... + +global___GetStudySystemAttributesRequest = GetStudySystemAttributesRequest + +@typing.final +class GetStudySystemAttributesReply(google.protobuf.message.Message): + """* + Reply to get study system attributes. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + @typing.final + class SystemAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + SYSTEM_ATTRIBUTES_FIELD_NUMBER: builtins.int + @property + def system_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + def __init__( + self, + *, + system_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["system_attributes", b"system_attributes"]) -> None: ... + +global___GetStudySystemAttributesReply = GetStudySystemAttributesReply + +@typing.final +class GetAllStudiesRequest(google.protobuf.message.Message): + """* + Request to get all studies. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___GetAllStudiesRequest = GetAllStudiesRequest + +@typing.final +class GetAllStudiesReply(google.protobuf.message.Message): + """* + Reply to get all studies. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDIES_FIELD_NUMBER: builtins.int + @property + def studies(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Study]: ... + def __init__( + self, + *, + studies: collections.abc.Iterable[global___Study] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["studies", b"studies"]) -> None: ... + +global___GetAllStudiesReply = GetAllStudiesReply + +@typing.final +class CreateNewTrialRequest(google.protobuf.message.Message): + """* + Request to create a new trial. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + TEMPLATE_TRIAL_FIELD_NUMBER: builtins.int + TEMPLATE_TRIAL_IS_NONE_FIELD_NUMBER: builtins.int + study_id: builtins.int + template_trial_is_none: builtins.bool + @property + def template_trial(self) -> global___Trial: ... + def __init__( + self, + *, + study_id: builtins.int = ..., + template_trial: global___Trial | None = ..., + template_trial_is_none: builtins.bool = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["template_trial", b"template_trial"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id", "template_trial", b"template_trial", "template_trial_is_none", b"template_trial_is_none"]) -> None: ... + +global___CreateNewTrialRequest = CreateNewTrialRequest + +@typing.final +class CreateNewTrialReply(google.protobuf.message.Message): + """* + Reply to create a new trial. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + trial_id: builtins.int + def __init__( + self, + *, + trial_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["trial_id", b"trial_id"]) -> None: ... + +global___CreateNewTrialReply = CreateNewTrialReply + +@typing.final +class SetTrialParameterRequest(google.protobuf.message.Message): + """* + Request to set a trial parameter. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + PARAM_NAME_FIELD_NUMBER: builtins.int + PARAM_VALUE_INTERNAL_FIELD_NUMBER: builtins.int + DISTRIBUTION_FIELD_NUMBER: builtins.int + trial_id: builtins.int + param_name: builtins.str + param_value_internal: builtins.float + distribution: builtins.str + def __init__( + self, + *, + trial_id: builtins.int = ..., + param_name: builtins.str = ..., + param_value_internal: builtins.float = ..., + distribution: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["distribution", b"distribution", "param_name", b"param_name", "param_value_internal", b"param_value_internal", "trial_id", b"trial_id"]) -> None: ... + +global___SetTrialParameterRequest = SetTrialParameterRequest + +@typing.final +class SetTrialParameterReply(google.protobuf.message.Message): + """* + Reply to set a trial parameter. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetTrialParameterReply = SetTrialParameterReply + +@typing.final +class GetTrialIdFromStudyIdTrialNumberRequest(google.protobuf.message.Message): + """* + Request to get a trial id from its study id and trial number. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + TRIAL_NUMBER_FIELD_NUMBER: builtins.int + study_id: builtins.int + trial_number: builtins.int + def __init__( + self, + *, + study_id: builtins.int = ..., + trial_number: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["study_id", b"study_id", "trial_number", b"trial_number"]) -> None: ... + +global___GetTrialIdFromStudyIdTrialNumberRequest = GetTrialIdFromStudyIdTrialNumberRequest + +@typing.final +class GetTrialIdFromStudyIdTrialNumberReply(google.protobuf.message.Message): + """* + Reply to get a trial id from its study id and trial number. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + trial_id: builtins.int + def __init__( + self, + *, + trial_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["trial_id", b"trial_id"]) -> None: ... + +global___GetTrialIdFromStudyIdTrialNumberReply = GetTrialIdFromStudyIdTrialNumberReply + +@typing.final +class SetTrialStateValuesRequest(google.protobuf.message.Message): + """* + Request to set trial state and values. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + STATE_FIELD_NUMBER: builtins.int + VALUES_FIELD_NUMBER: builtins.int + trial_id: builtins.int + state: global___TrialState.ValueType + @property + def values(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: ... + def __init__( + self, + *, + trial_id: builtins.int = ..., + state: global___TrialState.ValueType = ..., + values: collections.abc.Iterable[builtins.float] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["state", b"state", "trial_id", b"trial_id", "values", b"values"]) -> None: ... + +global___SetTrialStateValuesRequest = SetTrialStateValuesRequest + +@typing.final +class SetTrialStateValuesReply(google.protobuf.message.Message): + """* + Reply to set trial state and values. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_UPDATED_FIELD_NUMBER: builtins.int + trial_updated: builtins.bool + def __init__( + self, + *, + trial_updated: builtins.bool = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["trial_updated", b"trial_updated"]) -> None: ... + +global___SetTrialStateValuesReply = SetTrialStateValuesReply + +@typing.final +class SetTrialIntermediateValueRequest(google.protobuf.message.Message): + """* + Request to set a trial intermediate value. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + STEP_FIELD_NUMBER: builtins.int + INTERMEDIATE_VALUE_FIELD_NUMBER: builtins.int + trial_id: builtins.int + step: builtins.int + intermediate_value: builtins.float + def __init__( + self, + *, + trial_id: builtins.int = ..., + step: builtins.int = ..., + intermediate_value: builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["intermediate_value", b"intermediate_value", "step", b"step", "trial_id", b"trial_id"]) -> None: ... + +global___SetTrialIntermediateValueRequest = SetTrialIntermediateValueRequest + +@typing.final +class SetTrialIntermediateValueReply(google.protobuf.message.Message): + """* + Reply to set a trial intermediate value. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetTrialIntermediateValueReply = SetTrialIntermediateValueReply + +@typing.final +class SetTrialUserAttributeRequest(google.protobuf.message.Message): + """* + Request to set a trial user attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + trial_id: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + trial_id: builtins.int = ..., + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "trial_id", b"trial_id", "value", b"value"]) -> None: ... + +global___SetTrialUserAttributeRequest = SetTrialUserAttributeRequest + +@typing.final +class SetTrialUserAttributeReply(google.protobuf.message.Message): + """* + Reply to set a trial user attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetTrialUserAttributeReply = SetTrialUserAttributeReply + +@typing.final +class SetTrialSystemAttributeRequest(google.protobuf.message.Message): + """* + Request to set a trial system attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + trial_id: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + trial_id: builtins.int = ..., + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "trial_id", b"trial_id", "value", b"value"]) -> None: ... + +global___SetTrialSystemAttributeRequest = SetTrialSystemAttributeRequest + +@typing.final +class SetTrialSystemAttributeReply(google.protobuf.message.Message): + """* + Reply to set a trial system attribute. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + def __init__( + self, + ) -> None: ... + +global___SetTrialSystemAttributeReply = SetTrialSystemAttributeReply + +@typing.final +class GetTrialRequest(google.protobuf.message.Message): + """* + Request to get a trial by its ID. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_ID_FIELD_NUMBER: builtins.int + trial_id: builtins.int + def __init__( + self, + *, + trial_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["trial_id", b"trial_id"]) -> None: ... + +global___GetTrialRequest = GetTrialRequest + +@typing.final +class GetTrialReply(google.protobuf.message.Message): + """* + Reply to get a trial by its ID. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIAL_FIELD_NUMBER: builtins.int + @property + def trial(self) -> global___Trial: ... + def __init__( + self, + *, + trial: global___Trial | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["trial", b"trial"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["trial", b"trial"]) -> None: ... + +global___GetTrialReply = GetTrialReply + +@typing.final +class GetTrialsRequest(google.protobuf.message.Message): + """* + Request to get trials in a study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + STUDY_ID_FIELD_NUMBER: builtins.int + INCLUDED_TRIAL_IDS_FIELD_NUMBER: builtins.int + TRIAL_ID_GREATER_THAN_FIELD_NUMBER: builtins.int + study_id: builtins.int + trial_id_greater_than: builtins.int + @property + def included_trial_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__( + self, + *, + study_id: builtins.int = ..., + included_trial_ids: collections.abc.Iterable[builtins.int] | None = ..., + trial_id_greater_than: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["included_trial_ids", b"included_trial_ids", "study_id", b"study_id", "trial_id_greater_than", b"trial_id_greater_than"]) -> None: ... + +global___GetTrialsRequest = GetTrialsRequest + +@typing.final +class GetTrialsReply(google.protobuf.message.Message): + """* + Reply to get trials in a study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TRIALS_FIELD_NUMBER: builtins.int + @property + def trials(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Trial]: ... + def __init__( + self, + *, + trials: collections.abc.Iterable[global___Trial] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["trials", b"trials"]) -> None: ... + +global___GetTrialsReply = GetTrialsReply + +@typing.final +class Study(google.protobuf.message.Message): + """* + Study. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + @typing.final + class UserAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + @typing.final + class SystemAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + STUDY_ID_FIELD_NUMBER: builtins.int + STUDY_NAME_FIELD_NUMBER: builtins.int + DIRECTIONS_FIELD_NUMBER: builtins.int + USER_ATTRIBUTES_FIELD_NUMBER: builtins.int + SYSTEM_ATTRIBUTES_FIELD_NUMBER: builtins.int + study_id: builtins.int + study_name: builtins.str + @property + def directions(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[global___StudyDirection.ValueType]: ... + @property + def user_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + @property + def system_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + def __init__( + self, + *, + study_id: builtins.int = ..., + study_name: builtins.str = ..., + directions: collections.abc.Iterable[global___StudyDirection.ValueType] | None = ..., + user_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + system_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["directions", b"directions", "study_id", b"study_id", "study_name", b"study_name", "system_attributes", b"system_attributes", "user_attributes", b"user_attributes"]) -> None: ... + +global___Study = Study + +@typing.final +class Trial(google.protobuf.message.Message): + """* + Trial. + """ + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + @typing.final + class ParamsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.float + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + @typing.final + class DistributionsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + @typing.final + class UserAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + @typing.final + class SystemAttributesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + value: builtins.str + def __init__( + self, + *, + key: builtins.str = ..., + value: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + @typing.final + class IntermediateValuesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int + value: builtins.float + def __init__( + self, + *, + key: builtins.int = ..., + value: builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ... + + TRIAL_ID_FIELD_NUMBER: builtins.int + NUMBER_FIELD_NUMBER: builtins.int + STATE_FIELD_NUMBER: builtins.int + VALUES_FIELD_NUMBER: builtins.int + DATETIME_START_FIELD_NUMBER: builtins.int + DATETIME_COMPLETE_FIELD_NUMBER: builtins.int + PARAMS_FIELD_NUMBER: builtins.int + DISTRIBUTIONS_FIELD_NUMBER: builtins.int + USER_ATTRIBUTES_FIELD_NUMBER: builtins.int + SYSTEM_ATTRIBUTES_FIELD_NUMBER: builtins.int + INTERMEDIATE_VALUES_FIELD_NUMBER: builtins.int + trial_id: builtins.int + number: builtins.int + state: global___TrialState.ValueType + datetime_start: builtins.str + datetime_complete: builtins.str + @property + def values(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: ... + @property + def params(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.float]: ... + @property + def distributions(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + @property + def user_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + @property + def system_attributes(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ... + @property + def intermediate_values(self) -> google.protobuf.internal.containers.ScalarMap[builtins.int, builtins.float]: ... + def __init__( + self, + *, + trial_id: builtins.int = ..., + number: builtins.int = ..., + state: global___TrialState.ValueType = ..., + values: collections.abc.Iterable[builtins.float] | None = ..., + datetime_start: builtins.str = ..., + datetime_complete: builtins.str = ..., + params: collections.abc.Mapping[builtins.str, builtins.float] | None = ..., + distributions: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + user_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + system_attributes: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., + intermediate_values: collections.abc.Mapping[builtins.int, builtins.float] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["datetime_complete", b"datetime_complete", "datetime_start", b"datetime_start", "distributions", b"distributions", "intermediate_values", b"intermediate_values", "number", b"number", "params", b"params", "state", b"state", "system_attributes", b"system_attributes", "trial_id", b"trial_id", "user_attributes", b"user_attributes", "values", b"values"]) -> None: ... + +global___Trial = Trial diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2_grpc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..15a6bfe355b0c6d822976613b0d3d34b938caec3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/auto_generated/api_pb2_grpc.py @@ -0,0 +1,915 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import optuna.storages._grpc.auto_generated.api_pb2 as api__pb2 + +GRPC_GENERATED_VERSION = '1.68.1' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in api_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class StorageServiceStub(object): + """* + Optuna storage service defines APIs to interact with the storage. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateNewStudy = channel.unary_unary( + '/optuna.StorageService/CreateNewStudy', + request_serializer=api__pb2.CreateNewStudyRequest.SerializeToString, + response_deserializer=api__pb2.CreateNewStudyReply.FromString, + _registered_method=True) + self.DeleteStudy = channel.unary_unary( + '/optuna.StorageService/DeleteStudy', + request_serializer=api__pb2.DeleteStudyRequest.SerializeToString, + response_deserializer=api__pb2.DeleteStudyReply.FromString, + _registered_method=True) + self.SetStudyUserAttribute = channel.unary_unary( + '/optuna.StorageService/SetStudyUserAttribute', + request_serializer=api__pb2.SetStudyUserAttributeRequest.SerializeToString, + response_deserializer=api__pb2.SetStudyUserAttributeReply.FromString, + _registered_method=True) + self.SetStudySystemAttribute = channel.unary_unary( + '/optuna.StorageService/SetStudySystemAttribute', + request_serializer=api__pb2.SetStudySystemAttributeRequest.SerializeToString, + response_deserializer=api__pb2.SetStudySystemAttributeReply.FromString, + _registered_method=True) + self.GetStudyIdFromName = channel.unary_unary( + '/optuna.StorageService/GetStudyIdFromName', + request_serializer=api__pb2.GetStudyIdFromNameRequest.SerializeToString, + response_deserializer=api__pb2.GetStudyIdFromNameReply.FromString, + _registered_method=True) + self.GetStudyNameFromId = channel.unary_unary( + '/optuna.StorageService/GetStudyNameFromId', + request_serializer=api__pb2.GetStudyNameFromIdRequest.SerializeToString, + response_deserializer=api__pb2.GetStudyNameFromIdReply.FromString, + _registered_method=True) + self.GetStudyDirections = channel.unary_unary( + '/optuna.StorageService/GetStudyDirections', + request_serializer=api__pb2.GetStudyDirectionsRequest.SerializeToString, + response_deserializer=api__pb2.GetStudyDirectionsReply.FromString, + _registered_method=True) + self.GetStudyUserAttributes = channel.unary_unary( + '/optuna.StorageService/GetStudyUserAttributes', + request_serializer=api__pb2.GetStudyUserAttributesRequest.SerializeToString, + response_deserializer=api__pb2.GetStudyUserAttributesReply.FromString, + _registered_method=True) + self.GetStudySystemAttributes = channel.unary_unary( + '/optuna.StorageService/GetStudySystemAttributes', + request_serializer=api__pb2.GetStudySystemAttributesRequest.SerializeToString, + response_deserializer=api__pb2.GetStudySystemAttributesReply.FromString, + _registered_method=True) + self.GetAllStudies = channel.unary_unary( + '/optuna.StorageService/GetAllStudies', + request_serializer=api__pb2.GetAllStudiesRequest.SerializeToString, + response_deserializer=api__pb2.GetAllStudiesReply.FromString, + _registered_method=True) + self.CreateNewTrial = channel.unary_unary( + '/optuna.StorageService/CreateNewTrial', + request_serializer=api__pb2.CreateNewTrialRequest.SerializeToString, + response_deserializer=api__pb2.CreateNewTrialReply.FromString, + _registered_method=True) + self.SetTrialParameter = channel.unary_unary( + '/optuna.StorageService/SetTrialParameter', + request_serializer=api__pb2.SetTrialParameterRequest.SerializeToString, + response_deserializer=api__pb2.SetTrialParameterReply.FromString, + _registered_method=True) + self.GetTrialIdFromStudyIdTrialNumber = channel.unary_unary( + '/optuna.StorageService/GetTrialIdFromStudyIdTrialNumber', + request_serializer=api__pb2.GetTrialIdFromStudyIdTrialNumberRequest.SerializeToString, + response_deserializer=api__pb2.GetTrialIdFromStudyIdTrialNumberReply.FromString, + _registered_method=True) + self.SetTrialStateValues = channel.unary_unary( + '/optuna.StorageService/SetTrialStateValues', + request_serializer=api__pb2.SetTrialStateValuesRequest.SerializeToString, + response_deserializer=api__pb2.SetTrialStateValuesReply.FromString, + _registered_method=True) + self.SetTrialIntermediateValue = channel.unary_unary( + '/optuna.StorageService/SetTrialIntermediateValue', + request_serializer=api__pb2.SetTrialIntermediateValueRequest.SerializeToString, + response_deserializer=api__pb2.SetTrialIntermediateValueReply.FromString, + _registered_method=True) + self.SetTrialUserAttribute = channel.unary_unary( + '/optuna.StorageService/SetTrialUserAttribute', + request_serializer=api__pb2.SetTrialUserAttributeRequest.SerializeToString, + response_deserializer=api__pb2.SetTrialUserAttributeReply.FromString, + _registered_method=True) + self.SetTrialSystemAttribute = channel.unary_unary( + '/optuna.StorageService/SetTrialSystemAttribute', + request_serializer=api__pb2.SetTrialSystemAttributeRequest.SerializeToString, + response_deserializer=api__pb2.SetTrialSystemAttributeReply.FromString, + _registered_method=True) + self.GetTrial = channel.unary_unary( + '/optuna.StorageService/GetTrial', + request_serializer=api__pb2.GetTrialRequest.SerializeToString, + response_deserializer=api__pb2.GetTrialReply.FromString, + _registered_method=True) + self.GetTrials = channel.unary_unary( + '/optuna.StorageService/GetTrials', + request_serializer=api__pb2.GetTrialsRequest.SerializeToString, + response_deserializer=api__pb2.GetTrialsReply.FromString, + _registered_method=True) + + +class StorageServiceServicer(object): + """* + Optuna storage service defines APIs to interact with the storage. + """ + + def CreateNewStudy(self, request, context): + """* + Create a new study. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteStudy(self, request, context): + """* + Delete a study. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetStudyUserAttribute(self, request, context): + """* + Set a study's user attribute. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetStudySystemAttribute(self, request, context): + """* + Set a study's system attribute. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetStudyIdFromName(self, request, context): + """* + Get a study id by its name. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetStudyNameFromId(self, request, context): + """* + Get a study name by its id. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetStudyDirections(self, request, context): + """* + Get study directions. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetStudyUserAttributes(self, request, context): + """* + Get study user attributes. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetStudySystemAttributes(self, request, context): + """* + Get study system attributes. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetAllStudies(self, request, context): + """* + Get all studies. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateNewTrial(self, request, context): + """* + Create a new trial. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetTrialParameter(self, request, context): + """* + Set a trial parameter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTrialIdFromStudyIdTrialNumber(self, request, context): + """* + Get a trial id from its study id and trial number. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetTrialStateValues(self, request, context): + """* + Set trial state and values. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetTrialIntermediateValue(self, request, context): + """* + Set a trial intermediate value. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetTrialUserAttribute(self, request, context): + """* + Set a trial user attribute. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetTrialSystemAttribute(self, request, context): + """* + Set a trial system attribute. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTrial(self, request, context): + """* + Get a trial by its ID. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTrials(self, request, context): + """* + Get trials in a study. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_StorageServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateNewStudy': grpc.unary_unary_rpc_method_handler( + servicer.CreateNewStudy, + request_deserializer=api__pb2.CreateNewStudyRequest.FromString, + response_serializer=api__pb2.CreateNewStudyReply.SerializeToString, + ), + 'DeleteStudy': grpc.unary_unary_rpc_method_handler( + servicer.DeleteStudy, + request_deserializer=api__pb2.DeleteStudyRequest.FromString, + response_serializer=api__pb2.DeleteStudyReply.SerializeToString, + ), + 'SetStudyUserAttribute': grpc.unary_unary_rpc_method_handler( + servicer.SetStudyUserAttribute, + request_deserializer=api__pb2.SetStudyUserAttributeRequest.FromString, + response_serializer=api__pb2.SetStudyUserAttributeReply.SerializeToString, + ), + 'SetStudySystemAttribute': grpc.unary_unary_rpc_method_handler( + servicer.SetStudySystemAttribute, + request_deserializer=api__pb2.SetStudySystemAttributeRequest.FromString, + response_serializer=api__pb2.SetStudySystemAttributeReply.SerializeToString, + ), + 'GetStudyIdFromName': grpc.unary_unary_rpc_method_handler( + servicer.GetStudyIdFromName, + request_deserializer=api__pb2.GetStudyIdFromNameRequest.FromString, + response_serializer=api__pb2.GetStudyIdFromNameReply.SerializeToString, + ), + 'GetStudyNameFromId': grpc.unary_unary_rpc_method_handler( + servicer.GetStudyNameFromId, + request_deserializer=api__pb2.GetStudyNameFromIdRequest.FromString, + response_serializer=api__pb2.GetStudyNameFromIdReply.SerializeToString, + ), + 'GetStudyDirections': grpc.unary_unary_rpc_method_handler( + servicer.GetStudyDirections, + request_deserializer=api__pb2.GetStudyDirectionsRequest.FromString, + response_serializer=api__pb2.GetStudyDirectionsReply.SerializeToString, + ), + 'GetStudyUserAttributes': grpc.unary_unary_rpc_method_handler( + servicer.GetStudyUserAttributes, + request_deserializer=api__pb2.GetStudyUserAttributesRequest.FromString, + response_serializer=api__pb2.GetStudyUserAttributesReply.SerializeToString, + ), + 'GetStudySystemAttributes': grpc.unary_unary_rpc_method_handler( + servicer.GetStudySystemAttributes, + request_deserializer=api__pb2.GetStudySystemAttributesRequest.FromString, + response_serializer=api__pb2.GetStudySystemAttributesReply.SerializeToString, + ), + 'GetAllStudies': grpc.unary_unary_rpc_method_handler( + servicer.GetAllStudies, + request_deserializer=api__pb2.GetAllStudiesRequest.FromString, + response_serializer=api__pb2.GetAllStudiesReply.SerializeToString, + ), + 'CreateNewTrial': grpc.unary_unary_rpc_method_handler( + servicer.CreateNewTrial, + request_deserializer=api__pb2.CreateNewTrialRequest.FromString, + response_serializer=api__pb2.CreateNewTrialReply.SerializeToString, + ), + 'SetTrialParameter': grpc.unary_unary_rpc_method_handler( + servicer.SetTrialParameter, + request_deserializer=api__pb2.SetTrialParameterRequest.FromString, + response_serializer=api__pb2.SetTrialParameterReply.SerializeToString, + ), + 'GetTrialIdFromStudyIdTrialNumber': grpc.unary_unary_rpc_method_handler( + servicer.GetTrialIdFromStudyIdTrialNumber, + request_deserializer=api__pb2.GetTrialIdFromStudyIdTrialNumberRequest.FromString, + response_serializer=api__pb2.GetTrialIdFromStudyIdTrialNumberReply.SerializeToString, + ), + 'SetTrialStateValues': grpc.unary_unary_rpc_method_handler( + servicer.SetTrialStateValues, + request_deserializer=api__pb2.SetTrialStateValuesRequest.FromString, + response_serializer=api__pb2.SetTrialStateValuesReply.SerializeToString, + ), + 'SetTrialIntermediateValue': grpc.unary_unary_rpc_method_handler( + servicer.SetTrialIntermediateValue, + request_deserializer=api__pb2.SetTrialIntermediateValueRequest.FromString, + response_serializer=api__pb2.SetTrialIntermediateValueReply.SerializeToString, + ), + 'SetTrialUserAttribute': grpc.unary_unary_rpc_method_handler( + servicer.SetTrialUserAttribute, + request_deserializer=api__pb2.SetTrialUserAttributeRequest.FromString, + response_serializer=api__pb2.SetTrialUserAttributeReply.SerializeToString, + ), + 'SetTrialSystemAttribute': grpc.unary_unary_rpc_method_handler( + servicer.SetTrialSystemAttribute, + request_deserializer=api__pb2.SetTrialSystemAttributeRequest.FromString, + response_serializer=api__pb2.SetTrialSystemAttributeReply.SerializeToString, + ), + 'GetTrial': grpc.unary_unary_rpc_method_handler( + servicer.GetTrial, + request_deserializer=api__pb2.GetTrialRequest.FromString, + response_serializer=api__pb2.GetTrialReply.SerializeToString, + ), + 'GetTrials': grpc.unary_unary_rpc_method_handler( + servicer.GetTrials, + request_deserializer=api__pb2.GetTrialsRequest.FromString, + response_serializer=api__pb2.GetTrialsReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'optuna.StorageService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('optuna.StorageService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class StorageService(object): + """* + Optuna storage service defines APIs to interact with the storage. + """ + + @staticmethod + def CreateNewStudy(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/CreateNewStudy', + api__pb2.CreateNewStudyRequest.SerializeToString, + api__pb2.CreateNewStudyReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DeleteStudy(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/DeleteStudy', + api__pb2.DeleteStudyRequest.SerializeToString, + api__pb2.DeleteStudyReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetStudyUserAttribute(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetStudyUserAttribute', + api__pb2.SetStudyUserAttributeRequest.SerializeToString, + api__pb2.SetStudyUserAttributeReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetStudySystemAttribute(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetStudySystemAttribute', + api__pb2.SetStudySystemAttributeRequest.SerializeToString, + api__pb2.SetStudySystemAttributeReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetStudyIdFromName(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetStudyIdFromName', + api__pb2.GetStudyIdFromNameRequest.SerializeToString, + api__pb2.GetStudyIdFromNameReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetStudyNameFromId(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetStudyNameFromId', + api__pb2.GetStudyNameFromIdRequest.SerializeToString, + api__pb2.GetStudyNameFromIdReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetStudyDirections(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetStudyDirections', + api__pb2.GetStudyDirectionsRequest.SerializeToString, + api__pb2.GetStudyDirectionsReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetStudyUserAttributes(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetStudyUserAttributes', + api__pb2.GetStudyUserAttributesRequest.SerializeToString, + api__pb2.GetStudyUserAttributesReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetStudySystemAttributes(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetStudySystemAttributes', + api__pb2.GetStudySystemAttributesRequest.SerializeToString, + api__pb2.GetStudySystemAttributesReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetAllStudies(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetAllStudies', + api__pb2.GetAllStudiesRequest.SerializeToString, + api__pb2.GetAllStudiesReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CreateNewTrial(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/CreateNewTrial', + api__pb2.CreateNewTrialRequest.SerializeToString, + api__pb2.CreateNewTrialReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetTrialParameter(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetTrialParameter', + api__pb2.SetTrialParameterRequest.SerializeToString, + api__pb2.SetTrialParameterReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetTrialIdFromStudyIdTrialNumber(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetTrialIdFromStudyIdTrialNumber', + api__pb2.GetTrialIdFromStudyIdTrialNumberRequest.SerializeToString, + api__pb2.GetTrialIdFromStudyIdTrialNumberReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetTrialStateValues(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetTrialStateValues', + api__pb2.SetTrialStateValuesRequest.SerializeToString, + api__pb2.SetTrialStateValuesReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetTrialIntermediateValue(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetTrialIntermediateValue', + api__pb2.SetTrialIntermediateValueRequest.SerializeToString, + api__pb2.SetTrialIntermediateValueReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetTrialUserAttribute(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetTrialUserAttribute', + api__pb2.SetTrialUserAttributeRequest.SerializeToString, + api__pb2.SetTrialUserAttributeReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetTrialSystemAttribute(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/SetTrialSystemAttribute', + api__pb2.SetTrialSystemAttributeRequest.SerializeToString, + api__pb2.SetTrialSystemAttributeReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetTrial(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetTrial', + api__pb2.GetTrialRequest.SerializeToString, + api__pb2.GetTrialReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetTrials(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/optuna.StorageService/GetTrials', + api__pb2.GetTrialsRequest.SerializeToString, + api__pb2.GetTrialsReply.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/client.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/client.py new file mode 100644 index 0000000000000000000000000000000000000000..3e53b4e6e92c023fe679b56b3d1c2ae7da6f9ff7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/client.py @@ -0,0 +1,442 @@ +from __future__ import annotations + +from collections.abc import Container +from collections.abc import Sequence +import copy +import json +import threading +from typing import Any +from typing import TYPE_CHECKING +import uuid + +from optuna._experimental import experimental_class +from optuna._imports import _LazyImport +from optuna.distributions import BaseDistribution +from optuna.distributions import distribution_to_json +from optuna.exceptions import DuplicatedStudyError +from optuna.exceptions import UpdateFinishedTrialError +from optuna.storages._base import BaseStorage +from optuna.storages._base import DEFAULT_STUDY_NAME_PREFIX +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial._frozen import FrozenTrial +from optuna.trial._state import TrialState + + +if TYPE_CHECKING: + import grpc + + from optuna.storages._grpc import servicer as grpc_servicer + from optuna.storages._grpc.auto_generated import api_pb2 + from optuna.storages._grpc.auto_generated import api_pb2_grpc +else: + api_pb2 = _LazyImport("optuna.storages._grpc.auto_generated.api_pb2") + api_pb2_grpc = _LazyImport("optuna.storages._grpc.auto_generated.api_pb2_grpc") + grpc = _LazyImport("grpc") + grpc_servicer = _LazyImport("optuna.storages._grpc.servicer") + + +def create_insecure_channel(host: str, port: int) -> grpc.Channel: + return grpc.insecure_channel( + f"{host}:{port}", options=[("grpc.max_receive_message_length", -1)] + ) + + +@experimental_class("4.2.0") +class GrpcStorageProxy(BaseStorage): + """gRPC client for :func:`~optuna.storages.run_grpc_proxy_server`. + + Example: + + This is a simple example of using :class:`~optuna.storages.GrpcStorageProxy` with + :func:`~optuna.storages.run_grpc_proxy_server`. + + .. code:: + + import optuna + from optuna.storages import GrpcStorageProxy + + storage = GrpcStorageProxy(host="localhost", port=13000) + study = optuna.create_study(storage=storage) + + Please refer to the example in :func:`~optuna.storages.run_grpc_proxy_server` for the + server side code. + + Args: + host: The hostname of the gRPC server. + port: The port of the gRPC server. + + .. warning:: + + Currently, gRPC storage proxy in combination with an SQLite3 database may cause unexpected + behaviors when calling :func:`optuna.delete_study` due to non-invalidated cache. + """ + + def __init__(self, *, host: str = "localhost", port: int = 13000) -> None: + self._host = host + self._port = port + self._setup() + + def _setup(self) -> None: + """Set up the gRPC channel and stub.""" + self._channel = create_insecure_channel(self._host, self._port) + self._stub = api_pb2_grpc.StorageServiceStub(self._channel) + self._cache = GrpcClientCache(self._stub) + + def wait_server_ready(self, timeout: float | None = None) -> None: + """Wait until the gRPC server is ready. + + Args: + timeout: The maximum time to wait in seconds. If :obj:`None`, wait indefinitely. + """ + try: + with create_insecure_channel(self._host, self._port) as channel: + grpc.channel_ready_future(channel).result(timeout=timeout) + except grpc.FutureTimeoutError as e: + raise ConnectionError("GRPC connection timeout") from e + + def close(self) -> None: + """Close the gRPC channel.""" + self._channel.close() + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_channel"] + del state["_stub"] + del state["_cache"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._setup() + + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + request = api_pb2.CreateNewStudyRequest( + directions=[ + api_pb2.MINIMIZE if d == StudyDirection.MINIMIZE else api_pb2.MAXIMIZE + for d in directions + ], + study_name=study_name or DEFAULT_STUDY_NAME_PREFIX + str(uuid.uuid4()), + ) + try: + response = self._stub.CreateNewStudy(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.ALREADY_EXISTS: + raise DuplicatedStudyError from e + raise + return response.study_id + + def delete_study(self, study_id: int) -> None: + request = api_pb2.DeleteStudyRequest(study_id=study_id) + try: + self._stub.DeleteStudy(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + # TODO(c-bata): Fix a cache invalidation issue when using SQLite3 + # Please see https://github.com/optuna/optuna/pull/5872/files#r1893708995 for details. + self._cache.delete_study_cache(study_id) + + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + request = api_pb2.SetStudyUserAttributeRequest( + study_id=study_id, key=key, value=json.dumps(value) + ) + try: + self._stub.SetStudyUserAttribute(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + + def set_study_system_attr(self, study_id: int, key: str, value: Any) -> None: + request = api_pb2.SetStudySystemAttributeRequest( + study_id=study_id, key=key, value=json.dumps(value) + ) + try: + self._stub.SetStudySystemAttribute(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + + def get_study_id_from_name(self, study_name: str) -> int: + request = api_pb2.GetStudyIdFromNameRequest(study_name=study_name) + try: + response = self._stub.GetStudyIdFromName(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return response.study_id + + def get_study_name_from_id(self, study_id: int) -> str: + request = api_pb2.GetStudyNameFromIdRequest(study_id=study_id) + try: + response = self._stub.GetStudyNameFromId(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return response.study_name + + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + request = api_pb2.GetStudyDirectionsRequest(study_id=study_id) + try: + response = self._stub.GetStudyDirections(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return [ + StudyDirection.MINIMIZE if d == api_pb2.MINIMIZE else StudyDirection.MAXIMIZE + for d in response.directions + ] + + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + request = api_pb2.GetStudyUserAttributesRequest(study_id=study_id) + try: + response = self._stub.GetStudyUserAttributes(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return {key: json.loads(value) for key, value in response.user_attributes.items()} + + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + request = api_pb2.GetStudySystemAttributesRequest(study_id=study_id) + try: + response = self._stub.GetStudySystemAttributes(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return {key: json.loads(value) for key, value in response.system_attributes.items()} + + def get_all_studies(self) -> list[FrozenStudy]: + request = api_pb2.GetAllStudiesRequest() + response = self._stub.GetAllStudies(request) + return [ + FrozenStudy( + study_id=study.study_id, + study_name=study.study_name, + direction=None, + directions=[ + StudyDirection.MINIMIZE if d == api_pb2.MINIMIZE else StudyDirection.MAXIMIZE + for d in study.directions + ], + user_attrs={ + key: json.loads(value) for key, value in study.user_attributes.items() + }, + system_attrs={ + key: json.loads(value) for key, value in study.system_attributes.items() + }, + ) + for study in response.studies + ] + + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + if template_trial is None: + request = api_pb2.CreateNewTrialRequest(study_id=study_id, template_trial_is_none=True) + else: + request = api_pb2.CreateNewTrialRequest( + study_id=study_id, + template_trial=grpc_servicer._to_proto_trial(template_trial), + template_trial_is_none=False, + ) + try: + response = self._stub.CreateNewTrial(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return response.trial_id + + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: BaseDistribution, + ) -> None: + request = api_pb2.SetTrialParameterRequest( + trial_id=trial_id, + param_name=param_name, + param_value_internal=param_value_internal, + distribution=distribution_to_json(distribution), + ) + try: + self._stub.SetTrialParameter(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: + raise UpdateFinishedTrialError from e + elif e.code() == grpc.StatusCode.INVALID_ARGUMENT: + raise ValueError from e + else: + raise + + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + request = api_pb2.SetTrialStateValuesRequest( + trial_id=trial_id, + state=grpc_servicer._to_proto_trial_state(state), + values=values, + ) + try: + response = self._stub.SetTrialStateValues(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: + raise UpdateFinishedTrialError from e + else: + raise + + return response.trial_updated + + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + request = api_pb2.SetTrialIntermediateValueRequest( + trial_id=trial_id, step=step, intermediate_value=intermediate_value + ) + try: + self._stub.SetTrialIntermediateValue(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: + raise UpdateFinishedTrialError from e + else: + raise + + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + request = api_pb2.SetTrialUserAttributeRequest( + trial_id=trial_id, key=key, value=json.dumps(value) + ) + try: + self._stub.SetTrialUserAttribute(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: + raise UpdateFinishedTrialError from e + else: + raise + + def set_trial_system_attr(self, trial_id: int, key: str, value: Any) -> None: + request = api_pb2.SetTrialSystemAttributeRequest( + trial_id=trial_id, key=key, value=json.dumps(value) + ) + try: + self._stub.SetTrialSystemAttribute(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: + raise UpdateFinishedTrialError from e + else: + raise + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + request = api_pb2.GetTrialIdFromStudyIdTrialNumberRequest( + study_id=study_id, trial_number=trial_number + ) + try: + response = self._stub.GetTrialIdFromStudyIdTrialNumber(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return response.trial_id + + def get_trial(self, trial_id: int) -> FrozenTrial: + request = api_pb2.GetTrialRequest(trial_id=trial_id) + try: + response = self._stub.GetTrial(request) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + raise KeyError from e + raise + return grpc_servicer._from_proto_trial(response.trial) + + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + trials = self._cache.get_all_trials(study_id, states) + return copy.deepcopy(trials) if deepcopy else trials + + +class GrpcClientCache: + def __init__(self, grpc_client: api_pb2_grpc.StorageServiceStub) -> None: + self.studies: dict[int, GrpcClientCacheEntry] = {} + self.grpc_client = grpc_client + self.lock = threading.Lock() + + def delete_study_cache(self, study_id: int) -> None: + with self.lock: + self.studies.pop(study_id, None) + + def get_all_trials( + self, study_id: int, states: Container[TrialState] | None + ) -> list[FrozenTrial]: + with self.lock: + self._read_trials_from_remote_storage(study_id) + study = self.studies[study_id] + trials: dict[int, FrozenTrial] | list[FrozenTrial] + if states is not None: + trials = {number: t for number, t in study.trials.items() if t.state in states} + else: + trials = study.trials + trials = list(sorted(trials.values(), key=lambda t: t.number)) + return trials + + def _read_trials_from_remote_storage(self, study_id: int) -> None: + if study_id not in self.studies: + self.studies[study_id] = GrpcClientCacheEntry() + study = self.studies[study_id] + + req = api_pb2.GetTrialsRequest( + study_id=study_id, + included_trial_ids=study.unfinished_trial_ids, + trial_id_greater_than=study.last_finished_trial_id, + ) + try: + res = self.grpc_client.GetTrials(req) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.NOT_FOUND: + self.studies.pop(study_id, None) + raise KeyError from e + raise + if not res.trials: + return + + for trial_proto in res.trials: + trial = grpc_servicer._from_proto_trial(trial_proto) + self._add_trial_to_cache(study_id, trial) + + def _add_trial_to_cache(self, study_id: int, trial: FrozenTrial) -> None: + study = self.studies[study_id] + study.trials[trial.number] = trial + + if not trial.state.is_finished(): + study.unfinished_trial_ids.add(trial._trial_id) + return + + study.last_finished_trial_id = max(study.last_finished_trial_id, trial._trial_id) + study.unfinished_trial_ids.discard(trial._trial_id) + + +class GrpcClientCacheEntry: + def __init__(self) -> None: + self.trials: dict[int, FrozenTrial] = {} + self.unfinished_trial_ids: set[int] = set() + self.last_finished_trial_id: int = -1 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/server.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/server.py new file mode 100644 index 0000000000000000000000000000000000000000..ae108de053fe20481d3e643e6a8b3eca8ce9261b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/server.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING + +from optuna import logging +from optuna._experimental import experimental_func +from optuna._imports import _LazyImport +from optuna.storages import BaseStorage + + +if TYPE_CHECKING: + import grpc + + from optuna.storages._grpc import servicer as grpc_servicer + from optuna.storages._grpc.auto_generated import api_pb2_grpc +else: + grpc = _LazyImport("grpc") + grpc_servicer = _LazyImport("optuna.storages._grpc.servicer") + api_pb2_grpc = _LazyImport("optuna.storages._grpc.auto_generated.api_pb2_grpc") + + +_logger = logging.get_logger(__name__) +DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f" + + +def make_server( + storage: BaseStorage, host: str, port: int, thread_pool: ThreadPoolExecutor | None = None +) -> grpc.Server: + server = grpc.server(thread_pool or ThreadPoolExecutor(max_workers=10)) + api_pb2_grpc.add_StorageServiceServicer_to_server( + grpc_servicer.OptunaStorageProxyService(storage), server + ) + server.add_insecure_port(f"{host}:{port}") + return server + + +@experimental_func("4.2.0") +def run_grpc_proxy_server( + storage: BaseStorage, + *, + host: str = "localhost", + port: int = 13000, + thread_pool: ThreadPoolExecutor | None = None, +) -> None: + """Run a gRPC server for the given storage URL, host, and port. + + Example: + + Run this server with the following way: + + .. code:: + + from optuna.storages import run_grpc_proxy_server + from optuna.storages import get_storage + + storage = get_storage("mysql+pymysql://:@/[?]") + run_grpc_proxy_server(storage, host="localhost", port=13000) + + Please refer to the client class :class:`~optuna.storages.GrpcStorageProxy` for + the client usage. Please use :func:`~optuna.storages.get_storage` instead of + :class:`~optuna.storages.RDBStorage` since ``RDBStorage`` by itself does not use cache in + process and it may cause significant slowdown. + + Args: + storage: A storage object to proxy. + host: Hostname to listen on. + port: Port to listen on. + thread_pool: + Thread pool to use for the server. If :obj:`None`, a default thread pool + with 10 workers will be used. + + .. warning:: + + Currently, gRPC storage proxy does not support the + :class:`~optuna.storages.JournalStorage`. This issue is tracked in + https://github.com/optuna/optuna/issues/6084. Please use + :class:`~optuna.storages.RDBStorage` instead. + """ + server = make_server(storage, host, port, thread_pool) + server.start() + _logger.info(f"Server started at {host}:{port}") + _logger.info("Listening...") + server.wait_for_termination() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/servicer.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/servicer.py new file mode 100644 index 0000000000000000000000000000000000000000..b23d7a136002a9a192fcd1fb37a7cd05df35205d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_grpc/servicer.py @@ -0,0 +1,429 @@ +from __future__ import annotations + +from datetime import datetime +import json +import threading +from typing import TYPE_CHECKING + +from optuna import logging +from optuna._imports import _LazyImport +from optuna.distributions import distribution_to_json +from optuna.distributions import json_to_distribution +from optuna.exceptions import DuplicatedStudyError +from optuna.exceptions import UpdateFinishedTrialError +from optuna.storages import BaseStorage +from optuna.study._study_direction import StudyDirection +from optuna.trial._frozen import FrozenTrial +from optuna.trial._state import TrialState + + +if TYPE_CHECKING: + import grpc + + from optuna.storages._grpc.auto_generated import api_pb2 + from optuna.storages._grpc.auto_generated import api_pb2_grpc +else: + api_pb2 = _LazyImport("optuna.storages._grpc.auto_generated.api_pb2") + api_pb2_grpc = _LazyImport("optuna.storages._grpc.auto_generated.api_pb2_grpc") + grpc = _LazyImport("grpc") + + +_logger = logging.get_logger(__name__) +DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f" + + +class OptunaStorageProxyService(api_pb2_grpc.StorageServiceServicer): + def __init__(self, storage: BaseStorage) -> None: + self._backend = storage + self._lock = threading.Lock() + + def CreateNewStudy( + self, + request: api_pb2.CreateNewStudyRequest, + context: grpc.ServicerContext, + ) -> api_pb2.CreateNewStudyReply: + directions = [ + StudyDirection.MINIMIZE if d == api_pb2.MINIMIZE else StudyDirection.MAXIMIZE + for d in request.directions + ] + study_name = request.study_name + + try: + study_id = self._backend.create_new_study(directions=directions, study_name=study_name) + except DuplicatedStudyError as e: + context.abort(code=grpc.StatusCode.ALREADY_EXISTS, details=str(e)) + return api_pb2.CreateNewStudyReply(study_id=study_id) + + def DeleteStudy( + self, + request: api_pb2.DeleteStudyRequest, + context: grpc.ServicerContext, + ) -> api_pb2.DeleteStudyReply: + study_id = request.study_id + try: + self._backend.delete_study(study_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.DeleteStudyReply() + + def SetStudyUserAttribute( + self, + request: api_pb2.SetStudyUserAttributeRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetStudyUserAttributeReply: + try: + self._backend.set_study_user_attr( + request.study_id, request.key, json.loads(request.value) + ) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.SetStudyUserAttributeReply() + + def SetStudySystemAttribute( + self, + request: api_pb2.SetStudySystemAttributeRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetStudySystemAttributeReply: + try: + self._backend.set_study_system_attr( + request.study_id, request.key, json.loads(request.value) + ) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.SetStudySystemAttributeReply() + + def GetStudyIdFromName( + self, + request: api_pb2.GetStudyIdFromNameRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetStudyIdFromNameReply: + try: + study_id = self._backend.get_study_id_from_name(request.study_name) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.GetStudyIdFromNameReply(study_id=study_id) + + def GetStudyNameFromId( + self, + request: api_pb2.GetStudyNameFromIdRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetStudyNameFromIdReply: + study_id = request.study_id + + try: + name = self._backend.get_study_name_from_id(study_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + assert name is not None + return api_pb2.GetStudyNameFromIdReply(study_name=name) + + def GetStudyDirections( + self, + request: api_pb2.GetStudyDirectionsRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetStudyDirectionsReply: + study_id = request.study_id + + try: + directions = self._backend.get_study_directions(study_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + + assert directions is not None + return api_pb2.GetStudyDirectionsReply( + directions=[ + api_pb2.MINIMIZE if d == StudyDirection.MINIMIZE else api_pb2.MAXIMIZE + for d in directions + ] + ) + + def GetStudyUserAttributes( + self, + request: api_pb2.GetStudyUserAttributesRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetStudyUserAttributesReply: + try: + attributes = self._backend.get_study_user_attrs(request.study_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.GetStudyUserAttributesReply( + user_attributes={key: json.dumps(value) for key, value in attributes.items()} + ) + + def GetStudySystemAttributes( + self, + request: api_pb2.GetStudySystemAttributesRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetStudySystemAttributesReply: + try: + attributes = self._backend.get_study_system_attrs(request.study_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.GetStudySystemAttributesReply( + system_attributes={key: json.dumps(value) for key, value in attributes.items()} + ) + + def GetAllStudies( + self, + request: api_pb2.GetAllStudiesRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetAllStudiesReply: + studies = self._backend.get_all_studies() + return api_pb2.GetAllStudiesReply( + studies=[ + api_pb2.Study( + study_id=study._study_id, + study_name=study.study_name, + directions=[ + api_pb2.MINIMIZE if d == StudyDirection.MINIMIZE else api_pb2.MAXIMIZE + for d in study.directions + ], + user_attributes={ + key: json.dumps(value) for key, value in study.user_attrs.items() + }, + system_attributes={ + key: json.dumps(value) for key, value in study.system_attrs.items() + }, + ) + for study in studies + ] + ) + + def CreateNewTrial( + self, + request: api_pb2.CreateNewTrialRequest, + context: grpc.ServicerContext, + ) -> api_pb2.CreateNewTrialReply: + study_id = request.study_id + + template_trial = None + if not request.template_trial_is_none: + template_trial = _from_proto_trial(request.template_trial) + + try: + trial_id = self._backend.create_new_trial(study_id, template_trial) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + + return api_pb2.CreateNewTrialReply(trial_id=trial_id) + + def SetTrialParameter( + self, + request: api_pb2.SetTrialParameterRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetTrialParameterReply: + trial_id = request.trial_id + param_name = request.param_name + param_value_internal = request.param_value_internal + distribution = json_to_distribution(request.distribution) + try: + self._backend.set_trial_param(trial_id, param_name, param_value_internal, distribution) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + except UpdateFinishedTrialError as e: + context.abort(code=grpc.StatusCode.FAILED_PRECONDITION, details=str(e)) + except ValueError as e: + context.abort(code=grpc.StatusCode.INVALID_ARGUMENT, details=str(e)) + return api_pb2.SetTrialParameterReply() + + def GetTrialIdFromStudyIdTrialNumber( + self, + request: api_pb2.GetTrialIdFromStudyIdTrialNumberRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetTrialIdFromStudyIdTrialNumberReply: + study_id = request.study_id + trial_number = request.trial_number + + try: + trial_id = self._backend.get_trial_id_from_study_id_trial_number( + study_id, trial_number + ) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + return api_pb2.GetTrialIdFromStudyIdTrialNumberReply(trial_id=trial_id) + + def SetTrialStateValues( + self, + request: api_pb2.SetTrialStateValuesRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetTrialStateValuesReply: + trial_id = request.trial_id + state = request.state + values = list(request.values) if request.values else None + try: + trial_updated = self._backend.set_trial_state_values( + trial_id, _from_proto_trial_state(state), values + ) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + except UpdateFinishedTrialError as e: + context.abort(code=grpc.StatusCode.FAILED_PRECONDITION, details=str(e)) + return api_pb2.SetTrialStateValuesReply(trial_updated=trial_updated) + + def SetTrialIntermediateValue( + self, + request: api_pb2.SetTrialIntermediateValueRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetTrialIntermediateValueReply: + trial_id = request.trial_id + step = request.step + intermediate_value = request.intermediate_value + try: + self._backend.set_trial_intermediate_value(trial_id, step, intermediate_value) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + except UpdateFinishedTrialError as e: + context.abort(code=grpc.StatusCode.FAILED_PRECONDITION, details=str(e)) + return api_pb2.SetTrialIntermediateValueReply() + + def SetTrialUserAttribute( + self, + request: api_pb2.SetTrialUserAttributeRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetTrialUserAttributeReply: + trial_id = request.trial_id + key = request.key + value = json.loads(request.value) + try: + self._backend.set_trial_user_attr(trial_id, key, value) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + except UpdateFinishedTrialError as e: + context.abort(code=grpc.StatusCode.FAILED_PRECONDITION, details=str(e)) + return api_pb2.SetTrialUserAttributeReply() + + def SetTrialSystemAttribute( + self, + request: api_pb2.SetTrialSystemAttributeRequest, + context: grpc.ServicerContext, + ) -> api_pb2.SetTrialSystemAttributeReply: + trial_id = request.trial_id + key = request.key + value = json.loads(request.value) + try: + self._backend.set_trial_system_attr(trial_id, key, value) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + except UpdateFinishedTrialError as e: + context.abort(code=grpc.StatusCode.FAILED_PRECONDITION, details=str(e)) + return api_pb2.SetTrialSystemAttributeReply() + + def GetTrial( + self, + request: api_pb2.GetTrialRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetTrialReply: + trial_id = request.trial_id + try: + trial = self._backend.get_trial(trial_id) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + + return api_pb2.GetTrialReply(trial=_to_proto_trial(trial)) + + def GetTrials( + self, + request: api_pb2.GetTrialsRequest, + context: grpc.ServicerContext, + ) -> api_pb2.GetTrialsReply: + study_id = request.study_id + included_trial_ids = set(request.included_trial_ids) + trial_id_greater_than = request.trial_id_greater_than + try: + trials = self._backend.get_all_trials(study_id, deepcopy=False) + except KeyError as e: + context.abort(code=grpc.StatusCode.NOT_FOUND, details=str(e)) + + filtered_trials = [ + _to_proto_trial(t) + for t in trials + if t._trial_id > trial_id_greater_than or t._trial_id in included_trial_ids + ] + return api_pb2.GetTrialsReply(trials=filtered_trials) + + +def _to_proto_trial_state(state: TrialState) -> api_pb2.TrialState.ValueType: + if state == TrialState.RUNNING: + return api_pb2.RUNNING + if state == TrialState.COMPLETE: + return api_pb2.COMPLETE + if state == TrialState.PRUNED: + return api_pb2.PRUNED + if state == TrialState.FAIL: + return api_pb2.FAIL + if state == TrialState.WAITING: + return api_pb2.WAITING + raise ValueError(f"Unknown TrialState: {state}") + + +def _from_proto_trial_state(state: api_pb2.TrialState.ValueType) -> TrialState: + if state == api_pb2.RUNNING: + return TrialState.RUNNING + if state == api_pb2.COMPLETE: + return TrialState.COMPLETE + if state == api_pb2.PRUNED: + return TrialState.PRUNED + if state == api_pb2.FAIL: + return TrialState.FAIL + if state == api_pb2.WAITING: + return TrialState.WAITING + raise ValueError(f"Unknown api_pb2.TrialState: {state}") + + +def _to_proto_trial(trial: FrozenTrial) -> api_pb2.Trial: + params = {} + for key, value in trial.params.items(): + params[key] = trial.distributions[key].to_internal_repr(value) + + return api_pb2.Trial( + trial_id=trial._trial_id, + number=trial.number, + state=_to_proto_trial_state(trial.state), + values=trial.values, + datetime_start=( + trial.datetime_start.strftime(DATETIME_FORMAT) if trial.datetime_start else "" + ), + datetime_complete=( + trial.datetime_complete.strftime(DATETIME_FORMAT) if trial.datetime_complete else "" + ), + distributions={ + key: distribution_to_json(distribution) + for key, distribution in trial.distributions.items() + }, + params=params, + user_attributes={key: json.dumps(value) for key, value in trial.user_attrs.items()}, + system_attributes={key: json.dumps(value) for key, value in trial.system_attrs.items()}, + intermediate_values={step: value for step, value in trial.intermediate_values.items()}, + ) + + +def _from_proto_trial(trial: api_pb2.Trial) -> FrozenTrial: + datetime_start = ( + datetime.strptime(trial.datetime_start, DATETIME_FORMAT) if trial.datetime_start else None + ) + datetime_complete = ( + datetime.strptime(trial.datetime_complete, DATETIME_FORMAT) + if trial.datetime_complete + else None + ) + distributions = { + key: json_to_distribution(value) for key, value in trial.distributions.items() + } + params = {} + for key, value in trial.params.items(): + params[key] = distributions[key].to_external_repr(value) + + return FrozenTrial( + trial_id=trial.trial_id, + number=trial.number, + state=_from_proto_trial_state(trial.state), + value=None, + values=trial.values if trial.values else None, + datetime_start=datetime_start, + datetime_complete=datetime_complete, + params=params, + distributions=distributions, + user_attrs={key: json.loads(value) for key, value in trial.user_attributes.items()}, + system_attrs={key: json.loads(value) for key, value in trial.system_attributes.items()}, + intermediate_values={step: value for step, value in trial.intermediate_values.items()}, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_heartbeat.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_heartbeat.py new file mode 100644 index 0000000000000000000000000000000000000000..a933dbbcd0a1347e9630fa6b8e5cd26ded2ac875 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_heartbeat.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +import abc +from collections.abc import Callable +import copy +from threading import Event +from threading import Thread +from types import TracebackType + +import optuna +from optuna._experimental import experimental_func +from optuna.storages import BaseStorage +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +class BaseHeartbeat(metaclass=abc.ABCMeta): + """Base class for heartbeat. + + This class is not supposed to be directly accessed by library users. + + The heartbeat mechanism periodically checks whether each trial process is alive during an + optimization loop. To support this mechanism, the methods of + :class:`~optuna.storages._heartbeat.BaseHeartbeat` is implemented for the target database + backend, typically with multiple inheritance of :class:`~optuna.storages._base.BaseStorage` + and :class:`~optuna.storages._heartbeat.BaseHeartbeat`. + + .. seealso:: + See :class:`~optuna.storages.RDBStorage`, where the backend supports heartbeat. + """ + + @abc.abstractmethod + def record_heartbeat(self, trial_id: int) -> None: + """Record the heartbeat of the trial. + + Args: + trial_id: + ID of the trial. + """ + raise NotImplementedError() + + @abc.abstractmethod + def _get_stale_trial_ids(self, study_id: int) -> list[int]: + """Get the stale trial ids of the study. + + Args: + study_id: + ID of the study. + Returns: + List of IDs of trials whose heartbeat has not been updated for a long time. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_heartbeat_interval(self) -> int | None: + """Get the heartbeat interval if it is set. + + Returns: + The heartbeat interval if it is set, otherwise :obj:`None`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_failed_trial_callback(self) -> Callable[["optuna.Study", FrozenTrial], None] | None: + """Get the failed trial callback function. + + Returns: + The failed trial callback function if it is set, otherwise :obj:`None`. + """ + raise NotImplementedError() + + +class BaseHeartbeatThread(metaclass=abc.ABCMeta): + def __enter__(self) -> None: + self.start() + + def __exit__( + self, + exc_type: type[Exception] | None, + exc_value: Exception | None, + traceback: TracebackType | None, + ) -> None: + self.join() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def join(self) -> None: + raise NotImplementedError() + + +class NullHeartbeatThread(BaseHeartbeatThread): + def __init__(self) -> None: + pass + + def start(self) -> None: + pass + + def join(self) -> None: + pass + + +class HeartbeatThread(BaseHeartbeatThread): + def __init__(self, trial_id: int, heartbeat: BaseHeartbeat) -> None: + self._trial_id = trial_id + self._heartbeat = heartbeat + self._thread: Thread | None = None + self._stop_event: Event | None = None + + def start(self) -> None: + self._stop_event = Event() + self._thread = Thread( + target=self._record_heartbeat, args=(self._trial_id, self._heartbeat, self._stop_event) + ) + self._thread.start() + + def join(self) -> None: + assert self._stop_event is not None + assert self._thread is not None + self._stop_event.set() + self._thread.join() + + @staticmethod + def _record_heartbeat(trial_id: int, heartbeat: BaseHeartbeat, stop_event: Event) -> None: + heartbeat_interval = heartbeat.get_heartbeat_interval() + assert heartbeat_interval is not None + while True: + heartbeat.record_heartbeat(trial_id) + if stop_event.wait(timeout=heartbeat_interval): + return + + +def get_heartbeat_thread(trial_id: int, storage: BaseStorage) -> BaseHeartbeatThread: + if is_heartbeat_enabled(storage): + assert isinstance(storage, BaseHeartbeat) + return HeartbeatThread(trial_id, storage) + else: + return NullHeartbeatThread() + + +@experimental_func("2.9.0") +def fail_stale_trials(study: "optuna.Study") -> None: + """Fail stale trials and run their failure callbacks. + + The running trials whose heartbeat has not been updated for a long time will be failed, + that is, those states will be changed to :obj:`~optuna.trial.TrialState.FAIL`. + + .. seealso:: + + See :class:`~optuna.storages.RDBStorage`. + + Args: + study: + Study holding the trials to check. + """ + storage = study._storage + + if not isinstance(storage, BaseHeartbeat): + return + + if not is_heartbeat_enabled(storage): + return + + failed_trial_ids = [] + for trial_id in storage._get_stale_trial_ids(study._study_id): + try: + if storage.set_trial_state_values(trial_id, state=TrialState.FAIL): + failed_trial_ids.append(trial_id) + except optuna.exceptions.UpdateFinishedTrialError: + # If another process fails the trial, the storage raises + # optuna.exceptions.UpdateFinishedTrialError. + pass + + failed_trial_callback = storage.get_failed_trial_callback() + if failed_trial_callback is not None: + for trial_id in failed_trial_ids: + failed_trial = copy.deepcopy(storage.get_trial(trial_id)) + failed_trial_callback(study, failed_trial) + + +def is_heartbeat_enabled(storage: BaseStorage) -> bool: + """Check whether the storage enables the heartbeat. + + Returns: + :obj:`True` if the storage also inherits :class:`~optuna.storages._heartbeat.BaseHeartbeat` + and the return value of :meth:`~optuna.storages.BaseStorage.get_heartbeat_interval` is an + integer, otherwise :obj:`False`. + """ + return isinstance(storage, BaseHeartbeat) and storage.get_heartbeat_interval() is not None diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_in_memory.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_in_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..88085f9d9eb62da68dde2b8c2751cec2fe11674d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_in_memory.py @@ -0,0 +1,425 @@ +from __future__ import annotations + +from collections.abc import Container +from collections.abc import Sequence +import copy +from datetime import datetime +import threading +from typing import Any +import uuid + +import optuna +from optuna import distributions # NOQA +from optuna._typing import JSONSerializable +from optuna.exceptions import DuplicatedStudyError +from optuna.storages import BaseStorage +from optuna.storages._base import DEFAULT_STUDY_NAME_PREFIX +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +_logger = optuna.logging.get_logger(__name__) + + +class InMemoryStorage(BaseStorage): + """Storage class that stores data in memory of the Python process. + + Example: + + Create an :class:`~optuna.storages.InMemoryStorage` instance. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + return x**2 + + + storage = optuna.storages.InMemoryStorage() + + study = optuna.create_study(storage=storage) + study.optimize(objective, n_trials=10) + """ + + def __init__(self) -> None: + self._trial_id_to_study_id_and_number: dict[int, tuple[int, int]] = {} + self._study_name_to_id: dict[str, int] = {} + self._studies: dict[int, _StudyInfo] = {} + + self._max_study_id = -1 + self._max_trial_id = -1 + + self._lock = threading.RLock() + self._prev_waiting_trial_number: dict[int, int] = {} + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_lock"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._lock = threading.RLock() + + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + with self._lock: + study_id = self._max_study_id + 1 + self._max_study_id += 1 + + if study_name is not None: + if study_name in self._study_name_to_id: + raise DuplicatedStudyError + else: + study_uuid = str(uuid.uuid4()) + study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid + + self._studies[study_id] = _StudyInfo(study_name, list(directions)) + self._study_name_to_id[study_name] = study_id + self._prev_waiting_trial_number[study_id] = 0 + + _logger.info("A new study created in memory with name: {}".format(study_name)) + + return study_id + + def delete_study(self, study_id: int) -> None: + with self._lock: + self._check_study_id(study_id) + + for trial in self._studies[study_id].trials: + del self._trial_id_to_study_id_and_number[trial._trial_id] + study_name = self._studies[study_id].name + del self._study_name_to_id[study_name] + del self._studies[study_id] + del self._prev_waiting_trial_number[study_id] + + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + with self._lock: + self._check_study_id(study_id) + + self._studies[study_id].user_attrs[key] = value + + def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: + with self._lock: + self._check_study_id(study_id) + + self._studies[study_id].system_attrs[key] = value + + def get_study_id_from_name(self, study_name: str) -> int: + with self._lock: + if study_name not in self._study_name_to_id: + raise KeyError("No such study {}.".format(study_name)) + + return self._study_name_to_id[study_name] + + def get_study_name_from_id(self, study_id: int) -> str: + with self._lock: + self._check_study_id(study_id) + return self._studies[study_id].name + + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + with self._lock: + self._check_study_id(study_id) + return self._studies[study_id].directions + + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + with self._lock: + self._check_study_id(study_id) + return self._studies[study_id].user_attrs + + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + with self._lock: + self._check_study_id(study_id) + return self._studies[study_id].system_attrs + + def get_all_studies(self) -> list[FrozenStudy]: + with self._lock: + return [self._build_frozen_study(study_id) for study_id in self._studies] + + def _build_frozen_study(self, study_id: int) -> FrozenStudy: + study = self._studies[study_id] + return FrozenStudy( + study_name=study.name, + direction=None, + directions=study.directions, + user_attrs=copy.deepcopy(study.user_attrs), + system_attrs=copy.deepcopy(study.system_attrs), + study_id=study_id, + ) + + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + with self._lock: + self._check_study_id(study_id) + + if template_trial is None: + trial = self._create_running_trial() + else: + trial = copy.deepcopy(template_trial) + + trial_id = self._max_trial_id + 1 + self._max_trial_id += 1 + trial.number = len(self._studies[study_id].trials) + trial._trial_id = trial_id + self._trial_id_to_study_id_and_number[trial_id] = (study_id, trial.number) + self._studies[study_id].trials.append(trial) + self._update_cache(trial_id, study_id) + return trial_id + + @staticmethod + def _create_running_trial() -> FrozenTrial: + return FrozenTrial( + trial_id=-1, # dummy value. + number=-1, # dummy value. + state=TrialState.RUNNING, + params={}, + distributions={}, + user_attrs={}, + system_attrs={}, + value=None, + intermediate_values={}, + datetime_start=datetime.now(), + datetime_complete=None, + ) + + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: distributions.BaseDistribution, + ) -> None: + with self._lock: + trial = self._get_trial(trial_id) + + self.check_trial_is_updatable(trial_id, trial.state) + + study_id = self._trial_id_to_study_id_and_number[trial_id][0] + # Check param distribution compatibility with previous trial(s). + if param_name in self._studies[study_id].param_distribution: + distributions.check_distribution_compatibility( + self._studies[study_id].param_distribution[param_name], distribution + ) + + # Set param distribution. + self._studies[study_id].param_distribution[param_name] = distribution + + # Set param. + trial = copy.copy(trial) + trial.params = copy.copy(trial.params) + trial.params[param_name] = distribution.to_external_repr(param_value_internal) + trial.distributions = copy.copy(trial.distributions) + trial.distributions[param_name] = distribution + self._set_trial(trial_id, trial) + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + with self._lock: + study = self._studies.get(study_id) + if study is None: + raise KeyError("No study with study_id {} exists.".format(study_id)) + + trials = study.trials + if len(trials) <= trial_number: + raise KeyError( + "No trial with trial number {} exists in study with study_id {}.".format( + trial_number, study_id + ) + ) + + trial = trials[trial_number] + assert trial.number == trial_number + + return trial._trial_id + + def get_trial_number_from_id(self, trial_id: int) -> int: + with self._lock: + self._check_trial_id(trial_id) + + return self._trial_id_to_study_id_and_number[trial_id][1] + + def get_best_trial(self, study_id: int) -> FrozenTrial: + with self._lock: + self._check_study_id(study_id) + + best_trial_id = self._studies[study_id].best_trial_id + + if best_trial_id is None: + raise ValueError("No trials are completed yet.") + elif len(self._studies[study_id].directions) > 1: + raise RuntimeError( + "Best trial can be obtained only for single-objective optimization." + ) + return self.get_trial(best_trial_id) + + def get_trial_param(self, trial_id: int, param_name: str) -> float: + with self._lock: + trial = self._get_trial(trial_id) + + distribution = trial.distributions[param_name] + return distribution.to_internal_repr(trial.params[param_name]) + + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + with self._lock: + trial = copy.copy(self._get_trial(trial_id)) + self.check_trial_is_updatable(trial_id, trial.state) + + if state == TrialState.RUNNING and trial.state != TrialState.WAITING: + return False + + trial.state = state + if values is not None: + trial.values = values + + if state == TrialState.RUNNING: + trial.datetime_start = datetime.now() + + if state.is_finished(): + trial.datetime_complete = datetime.now() + self._set_trial(trial_id, trial) + study_id = self._trial_id_to_study_id_and_number[trial_id][0] + self._update_cache(trial_id, study_id) + else: + self._set_trial(trial_id, trial) + + return True + + def _update_cache(self, trial_id: int, study_id: int) -> None: + trial = self._get_trial(trial_id) + + if trial.state != TrialState.COMPLETE: + return + + best_trial_id = self._studies[study_id].best_trial_id + if best_trial_id is None: + self._studies[study_id].best_trial_id = trial_id + return + + _directions = self.get_study_directions(study_id) + if len(_directions) > 1: + return + direction = _directions[0] + + best_trial = self._get_trial(best_trial_id) + assert best_trial is not None + if best_trial.value is None: + self._studies[study_id].best_trial_id = trial_id + return + # Complete trials do not have `None` values. + assert trial.value is not None + best_value = best_trial.value + new_value = trial.value + + if direction == StudyDirection.MAXIMIZE: + if best_value < new_value: + self._studies[study_id].best_trial_id = trial_id + else: + if best_value > new_value: + self._studies[study_id].best_trial_id = trial_id + + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + with self._lock: + trial = self._get_trial(trial_id) + self.check_trial_is_updatable(trial_id, trial.state) + + trial = copy.copy(trial) + trial.intermediate_values = copy.copy(trial.intermediate_values) + trial.intermediate_values[step] = intermediate_value + self._set_trial(trial_id, trial) + + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + with self._lock: + self._check_trial_id(trial_id) + trial = self._get_trial(trial_id) + self.check_trial_is_updatable(trial_id, trial.state) + + trial = copy.copy(trial) + trial.user_attrs = copy.copy(trial.user_attrs) + trial.user_attrs[key] = value + self._set_trial(trial_id, trial) + + def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: + with self._lock: + trial = self._get_trial(trial_id) + self.check_trial_is_updatable(trial_id, trial.state) + + trial = copy.copy(trial) + trial.system_attrs = copy.copy(trial.system_attrs) + trial.system_attrs[key] = value + self._set_trial(trial_id, trial) + + def get_trial(self, trial_id: int) -> FrozenTrial: + with self._lock: + return self._get_trial(trial_id) + + def _get_trial(self, trial_id: int) -> FrozenTrial: + self._check_trial_id(trial_id) + study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] + return self._studies[study_id].trials[trial_number] + + def _set_trial(self, trial_id: int, trial: FrozenTrial) -> None: + study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] + self._studies[study_id].trials[trial_number] = trial + + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + with self._lock: + self._check_study_id(study_id) + + # Optimized retrieval of trials in the WAITING state to improve performance + # for the call, `get_all_trials(states=(TrialState.WAITING,))`. + if states == (TrialState.WAITING,): + trials: list[FrozenTrial] = [] + for trial in self._studies[study_id].trials[ + self._prev_waiting_trial_number[study_id] : + ]: + if trial.state == TrialState.WAITING: + if not trials: + self._prev_waiting_trial_number[study_id] = trial.number + trials.append(trial) + if not trials: + self._prev_waiting_trial_number[study_id] = len(self._studies[study_id].trials) + + else: + trials = self._studies[study_id].trials + if states is not None: + trials = [t for t in trials if t.state in states] + + if deepcopy: + trials = copy.deepcopy(trials) + else: + # This copy is required for the replacing trick in `set_trial_xxx`. + trials = copy.copy(trials) + + return trials + + def _check_study_id(self, study_id: int) -> None: + if study_id not in self._studies: + raise KeyError("No study with study_id {} exists.".format(study_id)) + + def _check_trial_id(self, trial_id: int) -> None: + if trial_id not in self._trial_id_to_study_id_and_number: + raise KeyError("No trial with trial_id {} exists.".format(trial_id)) + + +class _StudyInfo: + def __init__(self, name: str, directions: list[StudyDirection]) -> None: + self.trials: list[FrozenTrial] = [] + self.param_distribution: dict[str, distributions.BaseDistribution] = {} + self.user_attrs: dict[str, Any] = {} + self.system_attrs: dict[str, Any] = {} + self.name: str = name + self.directions: list[StudyDirection] = directions + self.best_trial_id: int | None = None diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic.ini b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic.ini new file mode 100644 index 0000000000000000000000000000000000000000..52681f91aacbb835bef6d991229ba33ed8cdebc4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic.ini @@ -0,0 +1,75 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = alembic + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# timezone to use when rendering the date +# within the migration file as well as the filename. +# string value is passed to dateutil.tz.gettz() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +#truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; this defaults +# to alembic/versions. When using multiple version +# directories, initial revisions must be specified with --version-path +# version_locations = %(here)s/bar %(here)s/bat alembic/versions + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# NOTE: This URL is only used when generating migration scripts. +sqlalchemy.url = sqlite:///alembic.db + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/env.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/env.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6c569981e85aa5a005417eb0d93351daf33391 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/env.py @@ -0,0 +1,79 @@ +import logging +from logging.config import fileConfig + +from alembic import context +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +import optuna.storages._rdb.models + + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. + +if len(logging.getLogger().handlers) == 0: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = optuna.storages._rdb.models.BaseModel.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=target_metadata, literal_binds=True, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/script.py.mako b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/script.py.mako new file mode 100644 index 0000000000000000000000000000000000000000..2c0156303a8df3ffdc9de87765bf801bf6bea4a5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v0.9.0.a.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v0.9.0.a.py new file mode 100644 index 0000000000000000000000000000000000000000..229757218eff73678433c48ea157cbf8c9393094 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v0.9.0.a.py @@ -0,0 +1,132 @@ +"""empty message + +Revision ID: v0.9.0.a +Revises: +Create Date: 2019-03-12 12:30:31.178819 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = "v0.9.0.a" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "studies", + sa.Column("study_id", sa.Integer(), nullable=False), + sa.Column("study_name", sa.String(length=512), nullable=False), + sa.Column( + "direction", + sa.Enum("NOT_SET", "MINIMIZE", "MAXIMIZE", name="studydirection"), + nullable=False, + ), + sa.PrimaryKeyConstraint("study_id"), + ) + op.create_index(op.f("ix_studies_study_name"), "studies", ["study_name"], unique=True) + op.create_table( + "version_info", + sa.Column("version_info_id", sa.Integer(), autoincrement=False, nullable=False), + sa.Column("schema_version", sa.Integer(), nullable=True), + sa.Column("library_version", sa.String(length=256), nullable=True), + sa.CheckConstraint("version_info_id=1"), + sa.PrimaryKeyConstraint("version_info_id"), + ) + op.create_table( + "study_system_attributes", + sa.Column("study_system_attribute_id", sa.Integer(), nullable=False), + sa.Column("study_id", sa.Integer(), nullable=True), + sa.Column("key", sa.String(length=512), nullable=True), + sa.Column("value_json", sa.String(length=2048), nullable=True), + sa.ForeignKeyConstraint(["study_id"], ["studies.study_id"]), + sa.PrimaryKeyConstraint("study_system_attribute_id"), + sa.UniqueConstraint("study_id", "key"), + ) + op.create_table( + "study_user_attributes", + sa.Column("study_user_attribute_id", sa.Integer(), nullable=False), + sa.Column("study_id", sa.Integer(), nullable=True), + sa.Column("key", sa.String(length=512), nullable=True), + sa.Column("value_json", sa.String(length=2048), nullable=True), + sa.ForeignKeyConstraint(["study_id"], ["studies.study_id"]), + sa.PrimaryKeyConstraint("study_user_attribute_id"), + sa.UniqueConstraint("study_id", "key"), + ) + op.create_table( + "trials", + sa.Column("trial_id", sa.Integer(), nullable=False), + sa.Column("study_id", sa.Integer(), nullable=True), + sa.Column( + "state", + sa.Enum("RUNNING", "COMPLETE", "PRUNED", "FAIL", name="trialstate"), + nullable=False, + ), + sa.Column("value", sa.Float(), nullable=True), + sa.Column("datetime_start", sa.DateTime(), nullable=True), + sa.Column("datetime_complete", sa.DateTime(), nullable=True), + sa.ForeignKeyConstraint(["study_id"], ["studies.study_id"]), + sa.PrimaryKeyConstraint("trial_id"), + ) + op.create_table( + "trial_params", + sa.Column("param_id", sa.Integer(), nullable=False), + sa.Column("trial_id", sa.Integer(), nullable=True), + sa.Column("param_name", sa.String(length=512), nullable=True), + sa.Column("param_value", sa.Float(), nullable=True), + sa.Column("distribution_json", sa.String(length=2048), nullable=True), + sa.ForeignKeyConstraint(["trial_id"], ["trials.trial_id"]), + sa.PrimaryKeyConstraint("param_id"), + sa.UniqueConstraint("trial_id", "param_name"), + ) + op.create_table( + "trial_system_attributes", + sa.Column("trial_system_attribute_id", sa.Integer(), nullable=False), + sa.Column("trial_id", sa.Integer(), nullable=True), + sa.Column("key", sa.String(length=512), nullable=True), + sa.Column("value_json", sa.String(length=2048), nullable=True), + sa.ForeignKeyConstraint(["trial_id"], ["trials.trial_id"]), + sa.PrimaryKeyConstraint("trial_system_attribute_id"), + sa.UniqueConstraint("trial_id", "key"), + ) + op.create_table( + "trial_user_attributes", + sa.Column("trial_user_attribute_id", sa.Integer(), nullable=False), + sa.Column("trial_id", sa.Integer(), nullable=True), + sa.Column("key", sa.String(length=512), nullable=True), + sa.Column("value_json", sa.String(length=2048), nullable=True), + sa.ForeignKeyConstraint(["trial_id"], ["trials.trial_id"]), + sa.PrimaryKeyConstraint("trial_user_attribute_id"), + sa.UniqueConstraint("trial_id", "key"), + ) + op.create_table( + "trial_values", + sa.Column("trial_value_id", sa.Integer(), nullable=False), + sa.Column("trial_id", sa.Integer(), nullable=True), + sa.Column("step", sa.Integer(), nullable=True), + sa.Column("value", sa.Float(), nullable=True), + sa.ForeignKeyConstraint(["trial_id"], ["trials.trial_id"]), + sa.PrimaryKeyConstraint("trial_value_id"), + sa.UniqueConstraint("trial_id", "step"), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("trial_values") + op.drop_table("trial_user_attributes") + op.drop_table("trial_system_attributes") + op.drop_table("trial_params") + op.drop_table("trials") + op.drop_table("study_user_attributes") + op.drop_table("study_system_attributes") + op.drop_table("version_info") + op.drop_index(op.f("ix_studies_study_name"), table_name="studies") + op.drop_table("studies") + # ### end Alembic commands ### diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.2.0.a.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.2.0.a.py new file mode 100644 index 0000000000000000000000000000000000000000..b868a45909eb7827b89305af29fcdebd419c440e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.2.0.a.py @@ -0,0 +1,37 @@ +"""empty message + +Revision ID: v1.2.0.a +Revises: v0.9.0.a +Create Date: 2020-02-05 15:17:41.458947 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "v1.2.0.a" +down_revision = "v0.9.0.a" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("trials") as batch_op: + batch_op.alter_column( + "state", + type_=sa.Enum("RUNNING", "COMPLETE", "PRUNED", "FAIL", "WAITING", name="trialstate"), + existing_type=sa.Enum("RUNNING", "COMPLETE", "PRUNED", "FAIL", name="trialstate"), + ) + + +def downgrade(): + with op.batch_alter_table("trials") as batch_op: + batch_op.alter_column( + "state", + type_=sa.Enum("RUNNING", "COMPLETE", "PRUNED", "FAIL", name="trialstate"), + existing_type=sa.Enum( + "RUNNING", "COMPLETE", "PRUNED", "FAIL", "WAITING", name="trialstate" + ), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.3.0.a.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.3.0.a.py new file mode 100644 index 0000000000000000000000000000000000000000..a24f837a491dcd6e7d420d6694c1ffa4eb95f563 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v1.3.0.a.py @@ -0,0 +1,103 @@ +"""empty message + +Revision ID: v1.3.0.a +Revises: v1.2.0.a +Create Date: 2020-02-14 16:23:04.800808 + +""" + +import json + +from alembic import op +import sqlalchemy as sa + +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy import orm + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + +# revision identifiers, used by Alembic. +revision = "v1.3.0.a" +down_revision = "v1.2.0.a" +branch_labels = None +depends_on = None + +# Model definition +MAX_INDEXED_STRING_LENGTH = 512 +MAX_STRING_LENGTH = 2048 +BaseModel = declarative_base() + + +class TrialModel(BaseModel): + __tablename__ = "trials" + trial_id = sa.Column(sa.Integer, primary_key=True) + number = sa.Column(sa.Integer) + + +class TrialSystemAttributeModel(BaseModel): + __tablename__ = "trial_system_attributes" + trial_system_attribute_id = sa.Column(sa.Integer, primary_key=True) + trial_id = sa.Column(sa.Integer, sa.ForeignKey("trials.trial_id")) + key = sa.Column(sa.String(MAX_INDEXED_STRING_LENGTH)) + value_json = sa.Column(sa.String(MAX_STRING_LENGTH)) + + +def upgrade(): + bind = op.get_bind() + session = orm.Session(bind=bind) + + with op.batch_alter_table("trials") as batch_op: + batch_op.add_column(sa.Column("number", sa.Integer(), nullable=True, default=None)) + + try: + number_records = ( + session.query(TrialSystemAttributeModel) + .filter(TrialSystemAttributeModel.key == "_number") + .all() + ) + mapping = [ + {"trial_id": r.trial_id, "number": json.loads(r.value_json)} for r in number_records + ] + session.bulk_update_mappings(TrialModel, mapping) + + stmt = ( + sa.delete(TrialSystemAttributeModel) + .where(TrialSystemAttributeModel.key == "_number") + .execution_options(synchronize_session=False) + ) + session.execute(stmt) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + +def downgrade(): + bind = op.get_bind() + session = orm.Session(bind=bind) + + try: + number_attrs = [] + trials = session.query(TrialModel).all() + for trial in trials: + number_attrs.append( + TrialSystemAttributeModel( + trial_id=trial.trial_id, key="_number", value_json=json.dumps(trial.number) + ) + ) + session.bulk_save_objects(number_attrs) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + with op.batch_alter_table("trials") as batch_op: + batch_op.drop_column("number") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.4.0.a.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.4.0.a.py new file mode 100644 index 0000000000000000000000000000000000000000..fdfc0390d292c974730b6e27158f862aaf8503b7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.4.0.a.py @@ -0,0 +1,187 @@ +"""empty message + +Revision ID: v2.4.0.a +Revises: v1.3.0.a +Create Date: 2020-11-17 02:16:16.536171 + +""" + +from alembic import op +import sqlalchemy as sa +from typing import Any + +from sqlalchemy import Column +from sqlalchemy import Enum +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import Integer +from sqlalchemy import UniqueConstraint +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy import orm + +from optuna.study import StudyDirection + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + + +# revision identifiers, used by Alembic. +revision = "v2.4.0.a" +down_revision = "v1.3.0.a" +branch_labels = None +depends_on = None + +# Model definition +BaseModel = declarative_base() + + +class StudyModel(BaseModel): + __tablename__ = "studies" + study_id = Column(Integer, primary_key=True) + direction = sa.Column(sa.Enum(StudyDirection)) + + +class StudyDirectionModel(BaseModel): + __tablename__ = "study_directions" + __table_args__: Any = (UniqueConstraint("study_id", "objective"),) + study_direction_id = Column(Integer, primary_key=True) + direction = Column(Enum(StudyDirection), nullable=False) + study_id = Column(Integer, ForeignKey("studies.study_id"), nullable=False) + objective = Column(Integer, nullable=False) + + +class TrialModel(BaseModel): + __tablename__ = "trials" + trial_id = Column(Integer, primary_key=True) + number = Column(Integer) + study_id = Column(Integer, ForeignKey("studies.study_id")) + value = sa.Column(sa.Float) + + +class TrialValueModel(BaseModel): + __tablename__ = "trial_values" + __table_args__: Any = (UniqueConstraint("trial_id", "objective"),) + trial_value_id = Column(Integer, primary_key=True) + trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + objective = Column(Integer, nullable=False) + value = Column(Float, nullable=False) + step = sa.Column(sa.Integer) + + +class TrialIntermediateValueModel(BaseModel): + __tablename__ = "trial_intermediate_values" + __table_args__: Any = (UniqueConstraint("trial_id", "step"),) + trial_intermediate_value_id = Column(Integer, primary_key=True) + trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + step = Column(Integer, nullable=False) + intermediate_value = Column(Float, nullable=False) + + +def upgrade(): + bind = op.get_bind() + inspector = sa.inspect(bind) + tables = inspector.get_table_names() + + if "study_directions" not in tables: + op.create_table( + "study_directions", + sa.Column("study_direction_id", sa.Integer(), nullable=False), + sa.Column( + "direction", + sa.Enum("NOT_SET", "MINIMIZE", "MAXIMIZE", name="studydirection"), + nullable=False, + ), + sa.Column("study_id", sa.Integer(), nullable=False), + sa.Column("objective", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["study_id"], + ["studies.study_id"], + ), + sa.PrimaryKeyConstraint("study_direction_id"), + sa.UniqueConstraint("study_id", "objective"), + ) + + if "trial_intermediate_values" not in tables: + op.create_table( + "trial_intermediate_values", + sa.Column("trial_intermediate_value_id", sa.Integer(), nullable=False), + sa.Column("trial_id", sa.Integer(), nullable=False), + sa.Column("step", sa.Integer(), nullable=False), + sa.Column("intermediate_value", sa.Float(), nullable=False), + sa.ForeignKeyConstraint( + ["trial_id"], + ["trials.trial_id"], + ), + sa.PrimaryKeyConstraint("trial_intermediate_value_id"), + sa.UniqueConstraint("trial_id", "step"), + ) + + session = orm.Session(bind=bind) + try: + studies_records = session.query(StudyModel).all() + objects = [ + StudyDirectionModel(study_id=r.study_id, direction=r.direction, objective=0) + for r in studies_records + ] + session.bulk_save_objects(objects) + + intermediate_values_records = session.query( + TrialValueModel.trial_id, TrialValueModel.value, TrialValueModel.step + ).all() + objects = [ + TrialIntermediateValueModel( + trial_id=r.trial_id, intermediate_value=r.value, step=r.step + ) + for r in intermediate_values_records + ] + session.bulk_save_objects(objects) + + session.query(TrialValueModel).delete() + session.commit() + + with op.batch_alter_table("trial_values", schema=None) as batch_op: + batch_op.add_column(sa.Column("objective", sa.Integer(), nullable=False)) + # The name of this constraint is manually determined. + # In the future, the naming convention may be determined based on + # https://alembic.sqlalchemy.org/en/latest/naming.html + batch_op.create_unique_constraint( + "uq_trial_values_trial_id_objective", ["trial_id", "objective"] + ) + + trials_records = session.query(TrialModel).all() + objects = [ + TrialValueModel(trial_id=r.trial_id, value=r.value, objective=0) + for r in trials_records + ] + session.bulk_save_objects(objects) + + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + with op.batch_alter_table("studies", schema=None) as batch_op: + batch_op.drop_column("direction") + + with op.batch_alter_table("trial_values", schema=None) as batch_op: + batch_op.drop_column("step") + + with op.batch_alter_table("trials", schema=None) as batch_op: + batch_op.drop_column("value") + + for c in inspector.get_unique_constraints("trial_values"): + # MySQL changes the uniq constraint of (trial_id, step) to that of trial_id. + if c["column_names"] == ["trial_id"]: + with op.batch_alter_table("trial_values", schema=None) as batch_op: + batch_op.drop_constraint(c["name"], type_="unique") + break + + +# TODO(imamura): Implement downgrade +def downgrade(): + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py new file mode 100644 index 0000000000000000000000000000000000000000..76f7873d4ba403505a62c913b0224b0617e82047 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v2.6.0.a_.py @@ -0,0 +1,45 @@ +"""empty message + +Revision ID: v2.6.0.a +Revises: v2.4.0.a +Create Date: 2021-03-01 11:30:32.214196 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "v2.6.0.a" +down_revision = "v2.4.0.a" +branch_labels = None +depends_on = None + +MAX_STRING_LENGTH = 2048 + + +def upgrade(): + with op.batch_alter_table("study_user_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.TEXT) + with op.batch_alter_table("study_system_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.TEXT) + with op.batch_alter_table("trial_user_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.TEXT) + with op.batch_alter_table("trial_system_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.TEXT) + with op.batch_alter_table("trial_params") as batch_op: + batch_op.alter_column("distribution_json", type_=sa.TEXT) + + +def downgrade(): + with op.batch_alter_table("study_user_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.String(MAX_STRING_LENGTH)) + with op.batch_alter_table("study_system_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.String(MAX_STRING_LENGTH)) + with op.batch_alter_table("trial_user_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.String(MAX_STRING_LENGTH)) + with op.batch_alter_table("trial_system_attributes") as batch_op: + batch_op.alter_column("value_json", type_=sa.String(MAX_STRING_LENGTH)) + with op.batch_alter_table("trial_params") as batch_op: + batch_op.alter_column("distribution_json", type_=sa.String(MAX_STRING_LENGTH)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.a.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.a.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7b21f4d676fa8501d60da434ceeb6727af841f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.a.py @@ -0,0 +1,202 @@ +"""unify existing distributions to {int,float} distribution + +Revision ID: v3.0.0.a +Revises: v2.6.0.a +Create Date: 2021-11-21 23:48:42.424430 + +""" + +from __future__ import annotations + +from typing import Any + +import sqlalchemy as sa +from alembic import op +from sqlalchemy import Column +from sqlalchemy import DateTime +from sqlalchemy import Enum +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import Integer +from sqlalchemy import orm +from sqlalchemy import String +from sqlalchemy import Text +from sqlalchemy import UniqueConstraint +from sqlalchemy.exc import SQLAlchemyError + +from optuna.distributions import _convert_old_distribution_to_new_distribution +from optuna.distributions import BaseDistribution +from optuna.distributions import DiscreteUniformDistribution +from optuna.distributions import distribution_to_json +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.distributions import IntLogUniformDistribution +from optuna.distributions import IntUniformDistribution +from optuna.distributions import json_to_distribution +from optuna.distributions import LogUniformDistribution +from optuna.distributions import UniformDistribution +from optuna.trial import TrialState + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + + +# revision identifiers, used by Alembic. +revision = "v3.0.0.a" +down_revision = "v2.6.0.a" +branch_labels = None +depends_on = None + +MAX_INDEXED_STRING_LENGTH = 512 +BATCH_SIZE = 5000 + +BaseModel = declarative_base() + + +class StudyModel(BaseModel): + __tablename__ = "studies" + study_id = Column(Integer, primary_key=True) + study_name = Column(String(MAX_INDEXED_STRING_LENGTH), index=True, unique=True, nullable=False) + + +class TrialModel(BaseModel): + __tablename__ = "trials" + trial_id = Column(Integer, primary_key=True) + number = Column(Integer) + study_id = Column(Integer, ForeignKey("studies.study_id")) + state = Column(Enum(TrialState), nullable=False) + datetime_start = Column(DateTime) + datetime_complete = Column(DateTime) + + +class TrialParamModel(BaseModel): + __tablename__ = "trial_params" + __table_args__: Any = (UniqueConstraint("trial_id", "param_name"),) + param_id = Column(Integer, primary_key=True) + trial_id = Column(Integer, ForeignKey("trials.trial_id")) + param_name = Column(String(MAX_INDEXED_STRING_LENGTH)) + param_value = Column(Float) + distribution_json = Column(Text()) + + +def migrate_new_distribution(distribution_json: str) -> str: + distribution = json_to_distribution(distribution_json) + new_distribution = _convert_old_distribution_to_new_distribution( + distribution, + suppress_warning=True, + ) + return distribution_to_json(new_distribution) + + +def restore_old_distribution(distribution_json: str) -> str: + distribution = json_to_distribution(distribution_json) + old_distribution: BaseDistribution + + # Float distributions. + if isinstance(distribution, FloatDistribution): + if distribution.log: + old_distribution = LogUniformDistribution( + low=distribution.low, + high=distribution.high, + ) + else: + if distribution.step is not None: + old_distribution = DiscreteUniformDistribution( + low=distribution.low, + high=distribution.high, + q=distribution.step, + ) + else: + old_distribution = UniformDistribution( + low=distribution.low, + high=distribution.high, + ) + + # Integer distributions. + elif isinstance(distribution, IntDistribution): + if distribution.log: + old_distribution = IntLogUniformDistribution( + low=distribution.low, + high=distribution.high, + step=distribution.step, + ) + else: + old_distribution = IntUniformDistribution( + low=distribution.low, + high=distribution.high, + step=distribution.step, + ) + + # Categorical distribution. + else: + old_distribution = distribution + + return distribution_to_json(old_distribution) + + +def persist(session: orm.Session, distributions: list[BaseDistribution]) -> None: + if len(distributions) == 0: + return + session.bulk_save_objects(distributions) + session.commit() + + +def upgrade() -> None: + bind = op.get_bind() + inspector = sa.inspect(bind) + tables = inspector.get_table_names() + + assert "trial_params" in tables + + session = orm.Session(bind=bind) + try: + distributions: list[BaseDistribution] = [] + for distribution in session.query(TrialParamModel).yield_per(BATCH_SIZE): + distribution.distribution_json = migrate_new_distribution( + distribution.distribution_json, + ) + distributions.append(distribution) + + if len(distributions) == BATCH_SIZE: + persist(session, distributions) + distributions = [] + + persist(session, distributions) + + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + +def downgrade() -> None: + bind = op.get_bind() + inspector = sa.inspect(bind) + tables = inspector.get_table_names() + + assert "trial_params" in tables + + session = orm.Session(bind=bind) + try: + distributions = [] + for distribution in session.query(TrialParamModel).yield_per(BATCH_SIZE): + distribution.distribution_json = restore_old_distribution( + distribution.distribution_json, + ) + distributions.append(distribution) + + if len(distributions) == BATCH_SIZE: + persist(session, distributions) + distributions = [] + + persist(session, distributions) + + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.b.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.b.py new file mode 100644 index 0000000000000000000000000000000000000000..e7d09c346df6deb8eb5de94fae49fb8819cb9bd7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.b.py @@ -0,0 +1,98 @@ +"""Change floating point precision and make intermediate_value nullable. + +Revision ID: v3.0.0.b +Revises: v3.0.0.a +Create Date: 2022-04-27 16:31:42.012666 + +""" + +import enum + +from alembic import op +from sqlalchemy import and_ +from sqlalchemy import Column +from sqlalchemy import Enum +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import Integer +from sqlalchemy.orm import Session + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + + +# revision identifiers, used by Alembic. +revision = "v3.0.0.b" +down_revision = "v3.0.0.a" +branch_labels = None +depends_on = None + +BaseModel = declarative_base() +FLOAT_PRECISION = 53 + + +class TrialState(enum.Enum): + RUNNING = 0 + COMPLETE = 1 + PRUNED = 2 + FAIL = 3 + WAITING = 4 + + +class TrialModel(BaseModel): + __tablename__ = "trials" + trial_id = Column(Integer, primary_key=True) + number = Column(Integer) + state = Column(Enum(TrialState), nullable=False) + + +class TrialValueModel(BaseModel): + __tablename__ = "trial_values" + trial_value_id = Column(Integer, primary_key=True) + trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + value = Column(Float, nullable=False) + + +def upgrade(): + bind = op.get_bind() + session = Session(bind=bind) + + if ( + session.query(TrialValueModel) + .join(TrialModel, TrialValueModel.trial_id == TrialModel.trial_id) + .filter(and_(TrialModel.state == TrialState.COMPLETE, TrialValueModel.value.is_(None))) + .count() + ) != 0: + raise ValueError("Found invalid trial_values records (value=None and state='COMPLETE')") + session.query(TrialValueModel).filter(TrialValueModel.value.is_(None)).delete() + + with op.batch_alter_table("trial_intermediate_values") as batch_op: + batch_op.alter_column( + "intermediate_value", + type_=Float(precision=FLOAT_PRECISION), + nullable=True, + ) + with op.batch_alter_table("trial_params") as batch_op: + batch_op.alter_column( + "param_value", + type_=Float(precision=FLOAT_PRECISION), + existing_nullable=True, + ) + with op.batch_alter_table("trial_values") as batch_op: + batch_op.alter_column( + "value", + type_=Float(precision=FLOAT_PRECISION), + nullable=False, + ) + + +def downgrade(): + with op.batch_alter_table("trial_intermediate_values") as batch_op: + batch_op.alter_column("intermediate_value", type_=Float, nullable=False) + with op.batch_alter_table("trial_params") as batch_op: + batch_op.alter_column("param_value", type_=Float, existing_nullable=True) + with op.batch_alter_table("trial_values") as batch_op: + batch_op.alter_column("value", type_=Float, existing_nullable=False) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.c.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.c.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc0d0aef02c634b1c84111477a54a551ea4a5c8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.c.py @@ -0,0 +1,191 @@ +"""Add intermediate_value_type column to represent +inf and -inf + +Revision ID: v3.0.0.c +Revises: v3.0.0.b +Create Date: 2022-05-16 17:17:28.810792 + +""" + +from __future__ import annotations + +import enum + +import numpy as np +from alembic import op +import sqlalchemy as sa +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy import orm + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + + +# revision identifiers, used by Alembic. +revision = "v3.0.0.c" +down_revision = "v3.0.0.b" +branch_labels = None +depends_on = None + + +BaseModel = declarative_base() +RDB_MAX_FLOAT = np.finfo(np.float32).max +RDB_MIN_FLOAT = np.finfo(np.float32).min + + +FLOAT_PRECISION = 53 + + +class IntermediateValueModel(BaseModel): + class TrialIntermediateValueType(enum.Enum): + FINITE = 1 + INF_POS = 2 + INF_NEG = 3 + NAN = 4 + + __tablename__ = "trial_intermediate_values" + trial_intermediate_value_id = sa.Column(sa.Integer, primary_key=True) + intermediate_value = sa.Column(sa.Float(precision=FLOAT_PRECISION), nullable=True) + intermediate_value_type = sa.Column(sa.Enum(TrialIntermediateValueType), nullable=False) + + @classmethod + def intermediate_value_to_stored_repr( + cls, + value: float, + ) -> tuple[float | None, TrialIntermediateValueType]: + if np.isnan(value): + return (None, cls.TrialIntermediateValueType.NAN) + elif value == float("inf"): + return (None, cls.TrialIntermediateValueType.INF_POS) + elif value == float("-inf"): + return (None, cls.TrialIntermediateValueType.INF_NEG) + else: + return (value, cls.TrialIntermediateValueType.FINITE) + + +def upgrade(): + bind = op.get_bind() + inspector = sa.inspect(bind) + column_names = [c["name"] for c in inspector.get_columns("trial_intermediate_values")] + + sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True) + + # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE + # ADD COLUMN ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic + # does not support such a SQL statement. So first add a column with schema-level + # default value setting, then remove it by `batch_op.alter_column()`. + if "intermediate_value_type" not in column_names: + with op.batch_alter_table("trial_intermediate_values") as batch_op: + batch_op.add_column( + sa.Column( + "intermediate_value_type", + sa.Enum( + "FINITE", "INF_POS", "INF_NEG", "NAN", name="trialintermediatevaluetype" + ), + nullable=False, + server_default="FINITE", + ), + ) + with op.batch_alter_table("trial_intermediate_values") as batch_op: + batch_op.alter_column( + "intermediate_value_type", + existing_type=sa.Enum( + "FINITE", "INF_POS", "INF_NEG", "NAN", name="trialintermediatevaluetype" + ), + existing_nullable=False, + server_default=None, + ) + + session = orm.Session(bind=bind) + try: + records = ( + session.query(IntermediateValueModel) + .filter( + sa.or_( + IntermediateValueModel.intermediate_value > 1e16, + IntermediateValueModel.intermediate_value < -1e16, + IntermediateValueModel.intermediate_value.is_(None), + ) + ) + .all() + ) + mapping = [] + for r in records: + value: float + if r.intermediate_value is None or np.isnan(r.intermediate_value): + value = float("nan") + elif np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf( + r.intermediate_value + ): + value = float("inf") + elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf( + r.intermediate_value + ): + value = float("-inf") + else: + value = r.intermediate_value + ( + stored_value, + float_type, + ) = IntermediateValueModel.intermediate_value_to_stored_repr(value) + mapping.append( + { + "trial_intermediate_value_id": r.trial_intermediate_value_id, + "intermediate_value_type": float_type, + "intermediate_value": stored_value, + } + ) + session.bulk_update_mappings(IntermediateValueModel, mapping) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + +def downgrade(): + bind = op.get_bind() + session = orm.Session(bind=bind) + + try: + records = session.query(IntermediateValueModel).all() + mapping = [] + for r in records: + if ( + r.intermediate_value_type + == IntermediateValueModel.TrialIntermediateValueType.FINITE + or r.intermediate_value_type + == IntermediateValueModel.TrialIntermediateValueType.NAN + ): + continue + + _intermediate_value = r.intermediate_value + if ( + r.intermediate_value_type + == IntermediateValueModel.TrialIntermediateValueType.INF_POS + ): + _intermediate_value = RDB_MAX_FLOAT + else: + _intermediate_value = RDB_MIN_FLOAT + + mapping.append( + { + "trial_intermediate_value_id": r.trial_intermediate_value_id, + "intermediate_value": _intermediate_value, + } + ) + session.bulk_update_mappings(IntermediateValueModel, mapping) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + with op.batch_alter_table("trial_intermediate_values", schema=None) as batch_op: + batch_op.drop_column("intermediate_value_type") + + sa.Enum(IntermediateValueModel.FloatTypeEnum).drop(bind, checkfirst=True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.d.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.d.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6a3588e50e4a72de1fafc1c9b7702123593bf6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.0.0.d.py @@ -0,0 +1,193 @@ +"""Handle inf/-inf for trial_values table. + +Revision ID: v3.0.0.d +Revises: v3.0.0.c +Create Date: 2022-06-02 09:57:22.818798 + +""" + +from __future__ import annotations + +import enum + +import numpy as np +from alembic import op +import sqlalchemy as sa +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy import orm + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + + +# revision identifiers, used by Alembic. +revision = "v3.0.0.d" +down_revision = "v3.0.0.c" +branch_labels = None +depends_on = None + + +BaseModel = declarative_base() +RDB_MAX_FLOAT = np.finfo(np.float32).max +RDB_MIN_FLOAT = np.finfo(np.float32).min + + +FLOAT_PRECISION = 53 + + +class TrialValueModel(BaseModel): + class TrialValueType(enum.Enum): + FINITE = 1 + INF_POS = 2 + INF_NEG = 3 + + __tablename__ = "trial_values" + trial_value_id = sa.Column(sa.Integer, primary_key=True) + value = sa.Column(sa.Float(precision=FLOAT_PRECISION), nullable=True) + value_type = sa.Column(sa.Enum(TrialValueType), nullable=False) + + @classmethod + def value_to_stored_repr( + cls, + value: float, + ) -> tuple[float | None, TrialValueType]: + if value == float("inf"): + return (None, cls.TrialValueType.INF_POS) + elif value == float("-inf"): + return (None, cls.TrialValueType.INF_NEG) + else: + return (value, cls.TrialValueType.FINITE) + + @classmethod + def stored_repr_to_value(cls, value: float | None, float_type: TrialValueType) -> float: + if float_type == cls.TrialValueType.INF_POS: + assert value is None + return float("inf") + elif float_type == cls.TrialValueType.INF_NEG: + assert value is None + return float("-inf") + else: + assert float_type == cls.TrialValueType.FINITE + assert value is not None + return value + + +def upgrade(): + bind = op.get_bind() + inspector = sa.inspect(bind) + column_names = [c["name"] for c in inspector.get_columns("trial_values")] + + sa.Enum(TrialValueModel.TrialValueType).create(bind, checkfirst=True) + + # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE + # ADD COLUMN ... DEFAULT "FINITE"', but seemingly Alembic + # does not support such a SQL statement. So first add a column with schema-level + # default value setting, then remove it by `batch_op.alter_column()`. + if "value_type" not in column_names: + with op.batch_alter_table("trial_values") as batch_op: + batch_op.add_column( + sa.Column( + "value_type", + sa.Enum("FINITE", "INF_POS", "INF_NEG", name="trialvaluetype"), + nullable=False, + server_default="FINITE", + ), + ) + with op.batch_alter_table("trial_values") as batch_op: + batch_op.alter_column( + "value_type", + existing_type=sa.Enum("FINITE", "INF_POS", "INF_NEG", name="trialvaluetype"), + existing_nullable=False, + server_default=None, + ) + batch_op.alter_column( + "value", + existing_type=sa.Float(precision=FLOAT_PRECISION), + nullable=True, + ) + + session = orm.Session(bind=bind) + try: + records = ( + session.query(TrialValueModel) + .filter( + sa.or_( + TrialValueModel.value > 1e16, + TrialValueModel.value < -1e16, + ) + ) + .all() + ) + mapping = [] + for r in records: + value: float + if np.isclose(r.value, RDB_MAX_FLOAT) or np.isposinf(r.value): + value = float("inf") + elif np.isclose(r.value, RDB_MIN_FLOAT) or np.isneginf(r.value): + value = float("-inf") + else: + value = r.value + + ( + stored_value, + float_type, + ) = TrialValueModel.value_to_stored_repr(value) + mapping.append( + { + "trial_value_id": r.trial_value_id, + "value_type": float_type, + "value": stored_value, + } + ) + session.bulk_update_mappings(TrialValueModel, mapping) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + +def downgrade(): + bind = op.get_bind() + session = orm.Session(bind=bind) + + try: + records = session.query(TrialValueModel).all() + mapping = [] + for r in records: + if r.value_type == TrialValueModel.TrialValueType.FINITE: + continue + + _value = r.value + if r.value_type == TrialValueModel.TrialValueType.INF_POS: + _value = RDB_MAX_FLOAT + else: + _value = RDB_MIN_FLOAT + + mapping.append( + { + "trial_value_id": r.trial_value_id, + "value": _value, + } + ) + session.bulk_update_mappings(TrialValueModel, mapping) + session.commit() + except SQLAlchemyError as e: + session.rollback() + raise e + finally: + session.close() + + with op.batch_alter_table("trial_values", schema=None) as batch_op: + batch_op.drop_column("value_type") + batch_op.alter_column( + "value", + existing_type=sa.Float(precision=FLOAT_PRECISION), + nullable=False, + ) + + sa.Enum(TrialValueModel.TrialValueType).drop(bind, checkfirst=True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.2.0.a_.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.2.0.a_.py new file mode 100644 index 0000000000000000000000000000000000000000..3e73e20918156abd6181552a9e5bd59e5eb64958 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/alembic/versions/v3.2.0.a_.py @@ -0,0 +1,28 @@ +"""Add index to study_id column in trials table + +Revision ID: v3.2.0.a +Revises: v3.0.0.d +Create Date: 2023-02-25 13:21:00.730272 + +""" + +from alembic import op + + +# revision identifiers, used by Alembic. +revision = "v3.2.0.a" +down_revision = "v3.0.0.d" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_index(op.f("trials_study_id_key"), "trials", ["study_id"], unique=False) + + +def downgrade(): + # The following operation doesn't work on MySQL due to a foreign key constraint. + # + # mysql> DROP INDEX ix_trials_study_id ON trials; + # ERROR: Cannot drop index 'ix_trials_study_id': needed in a foreign key constraint. + op.drop_index(op.f("trials_study_id_key"), table_name="trials") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/models.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/models.py new file mode 100644 index 0000000000000000000000000000000000000000..8f375526d07107a932f88a61e1b536f8e7670667 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/models.py @@ -0,0 +1,572 @@ +from __future__ import annotations + +import enum +import math +from typing import Any + +from sqlalchemy import asc +from sqlalchemy import case +from sqlalchemy import CheckConstraint +from sqlalchemy import DateTime +from sqlalchemy import desc +from sqlalchemy import Enum +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import func +from sqlalchemy import Integer +from sqlalchemy import orm +from sqlalchemy import String +from sqlalchemy import Text +from sqlalchemy import UniqueConstraint + +from optuna import distributions +from optuna.study._study_direction import StudyDirection +from optuna.trial import TrialState + + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # TODO(c-bata): Remove this after dropping support for SQLAlchemy v1.3 or prior. + from sqlalchemy.ext.declarative import declarative_base + +try: + from sqlalchemy.orm import mapped_column + + _Column = mapped_column +except ImportError: + # TODO(Shinichi): Remove this after dropping support for SQLAlchemy<2.0. + from sqlalchemy import Column as _Column # type: ignore[assignment] + +# Don't modify this version number anymore. +# The schema management functionality has been moved to alembic. +SCHEMA_VERSION = 12 + +MAX_INDEXED_STRING_LENGTH = 512 +MAX_VERSION_LENGTH = 256 + +NOT_FOUND_MSG = "Record does not exist." + +FLOAT_PRECISION = 53 + +BaseModel: Any = declarative_base() + + +class StudyModel(BaseModel): + __tablename__ = "studies" + study_id = _Column(Integer, primary_key=True) + study_name = _Column( + String(MAX_INDEXED_STRING_LENGTH), index=True, unique=True, nullable=False + ) + + @classmethod + def find_or_raise_by_id( + cls, study_id: int, session: orm.Session, for_update: bool = False + ) -> "StudyModel": + query = session.query(cls).filter(cls.study_id == study_id) + + if for_update: + query = query.with_for_update() + + study = query.one_or_none() + if study is None: + raise KeyError(NOT_FOUND_MSG) + + return study + + @classmethod + def find_by_name(cls, study_name: str, session: orm.Session) -> "StudyModel" | None: + study = session.query(cls).filter(cls.study_name == study_name).one_or_none() + + return study + + @classmethod + def find_or_raise_by_name(cls, study_name: str, session: orm.Session) -> "StudyModel": + study = cls.find_by_name(study_name, session) + if study is None: + raise KeyError(NOT_FOUND_MSG) + + return study + + +class StudyDirectionModel(BaseModel): + __tablename__ = "study_directions" + __table_args__: Any = (UniqueConstraint("study_id", "objective"),) + study_direction_id = _Column(Integer, primary_key=True) + direction = _Column(Enum(StudyDirection), nullable=False) + study_id = _Column(Integer, ForeignKey("studies.study_id"), nullable=False) + objective = _Column(Integer, nullable=False) + + study = orm.relationship( + StudyModel, backref=orm.backref("directions", cascade="all, delete-orphan") + ) + + @classmethod + def where_study_id(cls, study_id: int, session: orm.Session) -> list["StudyDirectionModel"]: + return session.query(cls).filter(cls.study_id == study_id).all() + + +class StudyUserAttributeModel(BaseModel): + __tablename__ = "study_user_attributes" + __table_args__: Any = (UniqueConstraint("study_id", "key"),) + study_user_attribute_id = _Column(Integer, primary_key=True) + study_id = _Column(Integer, ForeignKey("studies.study_id")) + key = _Column(String(MAX_INDEXED_STRING_LENGTH)) + value_json = _Column(Text()) + + study = orm.relationship( + StudyModel, backref=orm.backref("user_attributes", cascade="all, delete-orphan") + ) + + @classmethod + def find_by_study_and_key( + cls, study: StudyModel, key: str, session: orm.Session + ) -> "StudyUserAttributeModel" | None: + attribute = ( + session.query(cls) + .filter(cls.study_id == study.study_id) + .filter(cls.key == key) + .one_or_none() + ) + + return attribute + + @classmethod + def where_study_id( + cls, study_id: int, session: orm.Session + ) -> list["StudyUserAttributeModel"]: + return session.query(cls).filter(cls.study_id == study_id).all() + + +class StudySystemAttributeModel(BaseModel): + __tablename__ = "study_system_attributes" + __table_args__: Any = (UniqueConstraint("study_id", "key"),) + study_system_attribute_id = _Column(Integer, primary_key=True) + study_id = _Column(Integer, ForeignKey("studies.study_id")) + key = _Column(String(MAX_INDEXED_STRING_LENGTH)) + value_json = _Column(Text()) + + study = orm.relationship( + StudyModel, backref=orm.backref("system_attributes", cascade="all, delete-orphan") + ) + + @classmethod + def find_by_study_and_key( + cls, study: StudyModel, key: str, session: orm.Session + ) -> "StudySystemAttributeModel" | None: + attribute = ( + session.query(cls) + .filter(cls.study_id == study.study_id) + .filter(cls.key == key) + .one_or_none() + ) + + return attribute + + @classmethod + def where_study_id( + cls, study_id: int, session: orm.Session + ) -> list["StudySystemAttributeModel"]: + return session.query(cls).filter(cls.study_id == study_id).all() + + +class TrialModel(BaseModel): + __tablename__ = "trials" + trial_id = _Column(Integer, primary_key=True) + # No `UniqueConstraint` is put on the `number` columns although it in practice is constrained + # to be unique. This is to reduce code complexity as table-level locking would be required + # otherwise. See https://github.com/optuna/optuna/pull/939#discussion_r387447632. + number = _Column(Integer) + study_id = _Column(Integer, ForeignKey("studies.study_id"), index=True) + state = _Column(Enum(TrialState), nullable=False) + datetime_start = _Column(DateTime) + datetime_complete = _Column(DateTime) + + study = orm.relationship( + StudyModel, backref=orm.backref("trials", cascade="all, delete-orphan") + ) + + @classmethod + def find_max_value_trial_id(cls, study_id: int, objective: int, session: orm.Session) -> int: + trial = ( + session.query(cls) + .with_entities(cls.trial_id) + .filter(cls.study_id == study_id) + .filter(cls.state == TrialState.COMPLETE) + .join(TrialValueModel) + .filter(TrialValueModel.objective == objective) + .order_by( + desc( + case( + {"INF_NEG": -1, "FINITE": 0, "INF_POS": 1}, + value=TrialValueModel.value_type, + ) + ), + desc(TrialValueModel.value), + ) + .limit(1) + .one_or_none() + ) + if trial is None: + raise ValueError(NOT_FOUND_MSG) + return trial[0] + + @classmethod + def find_min_value_trial_id(cls, study_id: int, objective: int, session: orm.Session) -> int: + trial = ( + session.query(cls) + .with_entities(cls.trial_id) + .filter(cls.study_id == study_id) + .filter(cls.state == TrialState.COMPLETE) + .join(TrialValueModel) + .filter(TrialValueModel.objective == objective) + .order_by( + asc( + case( + {"INF_NEG": -1, "FINITE": 0, "INF_POS": 1}, + value=TrialValueModel.value_type, + ) + ), + asc(TrialValueModel.value), + ) + .limit(1) + .one_or_none() + ) + if trial is None: + raise ValueError(NOT_FOUND_MSG) + return trial[0] + + @classmethod + def find_or_raise_by_id( + cls, trial_id: int, session: orm.Session, for_update: bool = False + ) -> "TrialModel": + query = session.query(cls).filter(cls.trial_id == trial_id) + + # "FOR UPDATE" clause is used for row-level locking. + # Please note that SQLite3 doesn't support this clause. + if for_update: + query = query.with_for_update() + + trial = query.one_or_none() + if trial is None: + raise KeyError(NOT_FOUND_MSG) + + return trial + + @classmethod + def count( + cls, session: orm.Session, study: StudyModel | None = None, state: TrialState | None = None + ) -> int: + trial_count = session.query(func.count(cls.trial_id)) + if study is not None: + trial_count = trial_count.filter(cls.study_id == study.study_id) + if state is not None: + trial_count = trial_count.filter(cls.state == state) + + return trial_count.scalar() + + def count_past_trials(self, session: orm.Session) -> int: + trial_count = session.query(func.count(TrialModel.trial_id)).filter( + TrialModel.study_id == self.study_id, TrialModel.trial_id < self.trial_id + ) + return trial_count.scalar() + + +class TrialUserAttributeModel(BaseModel): + __tablename__ = "trial_user_attributes" + __table_args__: Any = (UniqueConstraint("trial_id", "key"),) + trial_user_attribute_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id")) + key = _Column(String(MAX_INDEXED_STRING_LENGTH)) + value_json = _Column(Text()) + + trial = orm.relationship( + TrialModel, backref=orm.backref("user_attributes", cascade="all, delete-orphan") + ) + + @classmethod + def find_by_trial_and_key( + cls, trial: TrialModel, key: str, session: orm.Session + ) -> "TrialUserAttributeModel" | None: + attribute = ( + session.query(cls) + .filter(cls.trial_id == trial.trial_id) + .filter(cls.key == key) + .one_or_none() + ) + + return attribute + + @classmethod + def where_trial_id( + cls, trial_id: int, session: orm.Session + ) -> list["TrialUserAttributeModel"]: + return session.query(cls).filter(cls.trial_id == trial_id).all() + + +class TrialSystemAttributeModel(BaseModel): + __tablename__ = "trial_system_attributes" + __table_args__: Any = (UniqueConstraint("trial_id", "key"),) + trial_system_attribute_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id")) + key = _Column(String(MAX_INDEXED_STRING_LENGTH)) + value_json = _Column(Text()) + + trial = orm.relationship( + TrialModel, backref=orm.backref("system_attributes", cascade="all, delete-orphan") + ) + + @classmethod + def find_by_trial_and_key( + cls, trial: TrialModel, key: str, session: orm.Session + ) -> "TrialSystemAttributeModel" | None: + attribute = ( + session.query(cls) + .filter(cls.trial_id == trial.trial_id) + .filter(cls.key == key) + .one_or_none() + ) + + return attribute + + @classmethod + def where_trial_id( + cls, trial_id: int, session: orm.Session + ) -> list["TrialSystemAttributeModel"]: + return session.query(cls).filter(cls.trial_id == trial_id).all() + + +class TrialParamModel(BaseModel): + __tablename__ = "trial_params" + __table_args__: Any = (UniqueConstraint("trial_id", "param_name"),) + param_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id")) + param_name = _Column(String(MAX_INDEXED_STRING_LENGTH)) + param_value = _Column(Float(precision=FLOAT_PRECISION)) + distribution_json = _Column(Text()) + + trial = orm.relationship( + TrialModel, backref=orm.backref("params", cascade="all, delete-orphan") + ) + + def check_and_add(self, session: orm.Session, study_id: int) -> None: + self._check_compatibility_with_previous_trial_param_distributions(session, study_id) + session.add(self) + + def _check_compatibility_with_previous_trial_param_distributions( + self, session: orm.Session, study_id: int + ) -> None: + previous_record = ( + session.query(TrialParamModel) + .join(TrialModel) + .filter(TrialModel.study_id == study_id) + .filter(TrialParamModel.param_name == self.param_name) + .first() + ) + if previous_record is not None: + distributions.check_distribution_compatibility( + distributions.json_to_distribution(previous_record.distribution_json), + distributions.json_to_distribution(self.distribution_json), + ) + + @classmethod + def find_by_trial_and_param_name( + cls, trial: TrialModel, param_name: str, session: orm.Session + ) -> "TrialParamModel" | None: + param_distribution = ( + session.query(cls) + .filter(cls.trial_id == trial.trial_id) + .filter(cls.param_name == param_name) + .one_or_none() + ) + + return param_distribution + + @classmethod + def find_or_raise_by_trial_and_param_name( + cls, trial: TrialModel, param_name: str, session: orm.Session + ) -> "TrialParamModel": + param_distribution = cls.find_by_trial_and_param_name(trial, param_name, session) + + if param_distribution is None: + raise KeyError(NOT_FOUND_MSG) + + return param_distribution + + @classmethod + def where_trial_id(cls, trial_id: int, session: orm.Session) -> list["TrialParamModel"]: + trial_params = session.query(cls).filter(cls.trial_id == trial_id).all() + + return trial_params + + +class TrialValueModel(BaseModel): + class TrialValueType(enum.Enum): + FINITE = 1 + INF_POS = 2 + INF_NEG = 3 + + __tablename__ = "trial_values" + __table_args__: Any = (UniqueConstraint("trial_id", "objective"),) + trial_value_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + objective = _Column(Integer, nullable=False) + value = _Column(Float(precision=FLOAT_PRECISION), nullable=True) + value_type = _Column(Enum(TrialValueType), nullable=False) + + trial = orm.relationship( + TrialModel, backref=orm.backref("values", cascade="all, delete-orphan") + ) + + @classmethod + def value_to_stored_repr(cls, value: float) -> tuple[float | None, TrialValueType]: + if value == float("inf"): + return None, cls.TrialValueType.INF_POS + elif value == float("-inf"): + return None, cls.TrialValueType.INF_NEG + else: + return value, cls.TrialValueType.FINITE + + @classmethod + def stored_repr_to_value(cls, value: float | None, float_type: TrialValueType) -> float: + if float_type == cls.TrialValueType.INF_POS: + assert value is None + return float("inf") + elif float_type == cls.TrialValueType.INF_NEG: + assert value is None + return float("-inf") + else: + assert float_type == cls.TrialValueType.FINITE + assert value is not None + return value + + @classmethod + def find_by_trial_and_objective( + cls, trial: TrialModel, objective: int, session: orm.Session + ) -> "TrialValueModel" | None: + trial_value = ( + session.query(cls) + .filter(cls.trial_id == trial.trial_id) + .filter(cls.objective == objective) + .one_or_none() + ) + + return trial_value + + @classmethod + def where_trial_id(cls, trial_id: int, session: orm.Session) -> list["TrialValueModel"]: + trial_values = ( + session.query(cls).filter(cls.trial_id == trial_id).order_by(asc(cls.objective)).all() + ) + + return trial_values + + +class TrialIntermediateValueModel(BaseModel): + class TrialIntermediateValueType(enum.Enum): + FINITE = 1 + INF_POS = 2 + INF_NEG = 3 + NAN = 4 + + __tablename__ = "trial_intermediate_values" + __table_args__: Any = (UniqueConstraint("trial_id", "step"),) + trial_intermediate_value_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + step = _Column(Integer, nullable=False) + intermediate_value = _Column(Float(precision=FLOAT_PRECISION), nullable=True) + intermediate_value_type = _Column(Enum(TrialIntermediateValueType), nullable=False) + + trial = orm.relationship( + TrialModel, backref=orm.backref("intermediate_values", cascade="all, delete-orphan") + ) + + @classmethod + def intermediate_value_to_stored_repr( + cls, value: float + ) -> tuple[float | None, TrialIntermediateValueType]: + if math.isnan(value): + return None, cls.TrialIntermediateValueType.NAN + elif value == float("inf"): + return None, cls.TrialIntermediateValueType.INF_POS + elif value == float("-inf"): + return None, cls.TrialIntermediateValueType.INF_NEG + else: + return value, cls.TrialIntermediateValueType.FINITE + + @classmethod + def stored_repr_to_intermediate_value( + cls, value: float | None, float_type: TrialIntermediateValueType + ) -> float: + if float_type == cls.TrialIntermediateValueType.NAN: + assert value is None + return float("nan") + elif float_type == cls.TrialIntermediateValueType.INF_POS: + assert value is None + return float("inf") + elif float_type == cls.TrialIntermediateValueType.INF_NEG: + assert value is None + return float("-inf") + else: + assert float_type == cls.TrialIntermediateValueType.FINITE + assert value is not None + return value + + @classmethod + def find_by_trial_and_step( + cls, trial: TrialModel, step: int, session: orm.Session + ) -> "TrialIntermediateValueModel" | None: + trial_intermediate_value = ( + session.query(cls) + .filter(cls.trial_id == trial.trial_id) + .filter(cls.step == step) + .one_or_none() + ) + + return trial_intermediate_value + + @classmethod + def where_trial_id( + cls, trial_id: int, session: orm.Session + ) -> list["TrialIntermediateValueModel"]: + trial_intermediate_values = session.query(cls).filter(cls.trial_id == trial_id).all() + + return trial_intermediate_values + + +class TrialHeartbeatModel(BaseModel): + __tablename__ = "trial_heartbeats" + __table_args__: Any = (UniqueConstraint("trial_id"),) + trial_heartbeat_id = _Column(Integer, primary_key=True) + trial_id = _Column(Integer, ForeignKey("trials.trial_id"), nullable=False) + heartbeat = _Column(DateTime, nullable=False, default=func.current_timestamp()) + + trial = orm.relationship( + TrialModel, backref=orm.backref("heartbeats", cascade="all, delete-orphan") + ) + + @classmethod + def where_trial_id( + cls, trial_id: int, session: orm.Session, for_update: bool = False + ) -> "TrialHeartbeatModel" | None: + + query = session.query(cls).filter(cls.trial_id == trial_id) + + if for_update: + query = query.with_for_update() + + return query.one_or_none() + + +class VersionInfoModel(BaseModel): + __tablename__ = "version_info" + # setting check constraint to ensure the number of rows is at most 1 + __table_args__: Any = (CheckConstraint("version_info_id=1"),) + version_info_id = _Column(Integer, primary_key=True, autoincrement=False, default=1) + schema_version = _Column(Integer) + library_version = _Column(String(MAX_VERSION_LENGTH)) + + @classmethod + def find(cls, session: orm.Session) -> "VersionInfoModel" | None: + version_info = session.query(cls).one_or_none() + return version_info diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/storage.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..a8cb11855e3172d924f49ed97e93efb8e207f519 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/_rdb/storage.py @@ -0,0 +1,1175 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Container +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +from contextlib import contextmanager +import copy +from datetime import datetime +from datetime import timedelta +import json +import logging +import os +import random +import sqlite3 +import time +from typing import Any +from typing import TYPE_CHECKING +import uuid + +import optuna +from optuna import distributions +from optuna import version +from optuna._experimental import warn_experimental_argument +from optuna._imports import _LazyImport +from optuna._typing import JSONSerializable +from optuna.storages._base import BaseStorage +from optuna.storages._base import DEFAULT_STUDY_NAME_PREFIX +from optuna.storages._heartbeat import BaseHeartbeat +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + import alembic.command as alembic_command + import alembic.config as alembic_config + import alembic.migration as alembic_migration + import alembic.script as alembic_script + import sqlalchemy + import sqlalchemy.dialects.mysql as sqlalchemy_dialects_mysql + import sqlalchemy.dialects.sqlite as sqlalchemy_dialects_sqlite + import sqlalchemy.exc as sqlalchemy_exc + import sqlalchemy.orm as sqlalchemy_orm + import sqlalchemy.sql.functions as sqlalchemy_sql_functions + + from optuna.storages._rdb import models +else: + alembic_command = _LazyImport("alembic.command") + alembic_config = _LazyImport("alembic.config") + alembic_migration = _LazyImport("alembic.migration") + alembic_script = _LazyImport("alembic.script") + + sqlalchemy = _LazyImport("sqlalchemy") + sqlalchemy_dialects_mysql = _LazyImport("sqlalchemy.dialects.mysql") + sqlalchemy_dialects_sqlite = _LazyImport("sqlalchemy.dialects.sqlite") + sqlalchemy_exc = _LazyImport("sqlalchemy.exc") + sqlalchemy_orm = _LazyImport("sqlalchemy.orm") + sqlalchemy_sql_functions = _LazyImport("sqlalchemy.sql.functions") + + models = _LazyImport("optuna.storages._rdb.models") + + +_logger = optuna.logging.get_logger(__name__) + + +@contextmanager +def _create_scoped_session( + scoped_session: "sqlalchemy_orm.scoped_session", + ignore_integrity_error: bool = False, +) -> Generator["sqlalchemy_orm.Session", None, None]: + session = scoped_session() + try: + yield session + session.commit() + except sqlalchemy_exc.IntegrityError as e: + session.rollback() + if ignore_integrity_error: + _logger.debug( + "Ignoring {}. This happens due to a timing issue among threads/processes/nodes. " + "Another one might have committed a record with the same key(s).".format(repr(e)) + ) + else: + raise + except sqlalchemy_exc.SQLAlchemyError as e: + session.rollback() + message = ( + "An exception is raised during the commit. " + "This typically happens due to invalid data in the commit, " + "e.g. exceeding max length. " + ) + raise optuna.exceptions.StorageInternalError(message) from e + except Exception: + session.rollback() + raise + finally: + session.close() + + +class RDBStorage(BaseStorage, BaseHeartbeat): + """Storage class for RDB backend. + + Note that library users can instantiate this class, but the attributes + provided by this class are not supposed to be directly accessed by them. + + Example: + + Create an :class:`~optuna.storages.RDBStorage` instance with customized + ``pool_size`` and ``timeout`` settings. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + return x**2 + + + storage = optuna.storages.RDBStorage( + url="sqlite:///:memory:", + engine_kwargs={"pool_size": 20, "connect_args": {"timeout": 10}}, + ) + + study = optuna.create_study(storage=storage) + study.optimize(objective, n_trials=10) + + Args: + url: + URL of the storage. + engine_kwargs: + A dictionary of keyword arguments that is passed to + `sqlalchemy.engine.create_engine`_ function. + skip_compatibility_check: + Flag to skip schema compatibility check if set to :obj:`True`. + heartbeat_interval: + Interval to record the heartbeat. It is recorded every ``interval`` seconds. + ``heartbeat_interval`` must be :obj:`None` or a positive integer. + + .. note:: + Heartbeat mechanism is experimental. API would change in the future. + + .. note:: + The heartbeat is supposed to be used with :meth:`~optuna.study.Study.optimize`. + If you use :meth:`~optuna.study.Study.ask` and + :meth:`~optuna.study.Study.tell` instead, it will not work. + + grace_period: + Grace period before a running trial is failed from the last heartbeat. + ``grace_period`` must be :obj:`None` or a positive integer. + If it is :obj:`None`, the grace period will be `2 * heartbeat_interval`. + failed_trial_callback: + A callback function that is invoked after failing each stale trial. + The function must accept two parameters with the following types in this order: + :class:`~optuna.study.Study` and :class:`~optuna.trial.FrozenTrial`. + + .. note:: + The procedure to fail existing stale trials is called just before asking the + study for a new trial. + + skip_table_creation: + Flag to skip table creation if set to :obj:`True`. + + .. _sqlalchemy.engine.create_engine: + https://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine + + .. note:: + If you use MySQL, `pool_pre_ping`_ will be set to :obj:`True` by default to prevent + connection timeout. You can turn it off with ``engine_kwargs['pool_pre_ping']=False``, but + it is recommended to keep the setting if execution time of your objective function is + longer than the `wait_timeout` of your MySQL configuration. + + .. _pool_pre_ping: + https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params. + pool_pre_ping + + .. note:: + We would never recommend SQLite3 for parallel optimization. + Please see the FAQ :ref:`sqlite_concurrency` for details. + + .. note:: + Mainly in a cluster environment, running trials are often killed unexpectedly. + If you want to detect a failure of trials, please use the heartbeat + mechanism. Set ``heartbeat_interval``, ``grace_period``, and ``failed_trial_callback`` + appropriately according to your use case. For more details, please refer to the + :ref:`tutorial ` and `Example page + `__. + + .. seealso:: + You can use :class:`~optuna.storages.RetryFailedTrialCallback` to automatically retry + failed trials detected by heartbeat. + + """ + + def __init__( + self, + url: str, + engine_kwargs: dict[str, Any] | None = None, + skip_compatibility_check: bool = False, + *, + heartbeat_interval: int | None = None, + grace_period: int | None = None, + failed_trial_callback: Callable[["optuna.study.Study", FrozenTrial], None] | None = None, + skip_table_creation: bool = False, + ) -> None: + self.engine_kwargs = engine_kwargs or {} + self.url = self._fill_storage_url_template(url) + self.skip_compatibility_check = skip_compatibility_check + if heartbeat_interval is not None: + if heartbeat_interval <= 0: + raise ValueError("The value of `heartbeat_interval` should be a positive integer.") + else: + warn_experimental_argument("heartbeat_interval") + if grace_period is not None and grace_period <= 0: + raise ValueError("The value of `grace_period` should be a positive integer.") + self.heartbeat_interval = heartbeat_interval + self.grace_period = grace_period + self.failed_trial_callback = failed_trial_callback + + self._set_default_engine_kwargs_for_mysql(url, self.engine_kwargs) + + try: + self.engine = sqlalchemy.engine.create_engine(self.url, **self.engine_kwargs) + except ImportError as e: + raise ImportError( + "Failed to import DB access module for the specified storage URL. " + "Please install appropriate one." + ) from e + + self.scoped_session = sqlalchemy_orm.scoped_session( + sqlalchemy_orm.sessionmaker(bind=self.engine) + ) + if not skip_table_creation: + models.BaseModel.metadata.create_all(self.engine) + + self._version_manager = _VersionManager(self.url, self.engine, self.scoped_session) + if not skip_compatibility_check: + self._version_manager.check_table_schema_compatibility() + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["scoped_session"] + del state["engine"] + del state["_version_manager"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + try: + self.engine = sqlalchemy.engine.create_engine(self.url, **self.engine_kwargs) + except ImportError as e: + raise ImportError( + "Failed to import DB access module for the specified storage URL. " + "Please install appropriate one." + ) from e + + self.scoped_session = sqlalchemy_orm.scoped_session( + sqlalchemy_orm.sessionmaker(bind=self.engine) + ) + models.BaseModel.metadata.create_all(self.engine) + self._version_manager = _VersionManager(self.url, self.engine, self.scoped_session) + if not self.skip_compatibility_check: + self._version_manager.check_table_schema_compatibility() + + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + try: + with _create_scoped_session(self.scoped_session) as session: + if study_name is None: + study_name = self._create_unique_study_name(session) + + direction_models = [ + models.StudyDirectionModel(objective=objective, direction=d) + for objective, d in enumerate(list(directions)) + ] + + session.add(models.StudyModel(study_name=study_name, directions=direction_models)) + + except sqlalchemy_exc.IntegrityError: + raise optuna.exceptions.DuplicatedStudyError( + "Another study with name '{}' already exists. " + "Please specify a different name, or reuse the existing one " + "by setting `load_if_exists` (for Python API) or " + "`--skip-if-exists` flag (for CLI).".format(study_name) + ) + + _logger.info("A new study created in RDB with name: {}".format(study_name)) + + return self.get_study_id_from_name(study_name) + + def delete_study(self, study_id: int) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + study = models.StudyModel.find_or_raise_by_id(study_id, session) + session.delete(study) + + @staticmethod + def _create_unique_study_name(session: "sqlalchemy_orm.Session") -> str: + while True: + study_uuid = str(uuid.uuid4()) + study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid + study = models.StudyModel.find_by_name(study_name, session) + if study is None: + break + + return study_name + + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + study = models.StudyModel.find_or_raise_by_id(study_id, session) + attribute = models.StudyUserAttributeModel.find_by_study_and_key(study, key, session) + if attribute is None: + attribute = models.StudyUserAttributeModel( + study_id=study_id, key=key, value_json=json.dumps(value) + ) + session.add(attribute) + else: + attribute.value_json = json.dumps(value) + + def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + study = models.StudyModel.find_or_raise_by_id(study_id, session) + attribute = models.StudySystemAttributeModel.find_by_study_and_key(study, key, session) + if attribute is None: + attribute = models.StudySystemAttributeModel( + study_id=study_id, key=key, value_json=json.dumps(value) + ) + session.add(attribute) + else: + attribute.value_json = json.dumps(value) + + def get_study_id_from_name(self, study_name: str) -> int: + with _create_scoped_session(self.scoped_session) as session: + study = models.StudyModel.find_or_raise_by_name(study_name, session) + study_id = study.study_id + + return study_id + + def get_study_name_from_id(self, study_id: int) -> str: + with _create_scoped_session(self.scoped_session) as session: + study = models.StudyModel.find_or_raise_by_id(study_id, session) + study_name = study.study_name + + return study_name + + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + with _create_scoped_session(self.scoped_session) as session: + study = models.StudyModel.find_or_raise_by_id(study_id, session) + directions = [d.direction for d in study.directions] + + return directions + + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + with _create_scoped_session(self.scoped_session) as session: + # Ensure that that study exists. + models.StudyModel.find_or_raise_by_id(study_id, session) + attributes = models.StudyUserAttributeModel.where_study_id(study_id, session) + user_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes} + + return user_attrs + + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + with _create_scoped_session(self.scoped_session) as session: + # Ensure that that study exists. + models.StudyModel.find_or_raise_by_id(study_id, session) + attributes = models.StudySystemAttributeModel.where_study_id(study_id, session) + system_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes} + + return system_attrs + + def get_trial_user_attrs(self, trial_id: int) -> dict[str, Any]: + with _create_scoped_session(self.scoped_session) as session: + # Ensure trial exists. + models.TrialModel.find_or_raise_by_id(trial_id, session) + + attributes = models.TrialUserAttributeModel.where_trial_id(trial_id, session) + user_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes} + + return user_attrs + + def get_trial_system_attrs(self, trial_id: int) -> dict[str, Any]: + with _create_scoped_session(self.scoped_session) as session: + # Ensure trial exists. + models.TrialModel.find_or_raise_by_id(trial_id, session) + + attributes = models.TrialSystemAttributeModel.where_trial_id(trial_id, session) + system_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes} + + return system_attrs + + def get_all_studies(self) -> list[FrozenStudy]: + with _create_scoped_session(self.scoped_session) as session: + studies = ( + session.query( + models.StudyModel.study_id, + models.StudyModel.study_name, + ) + .order_by(models.StudyModel.study_id) + .all() + ) + + _directions = defaultdict(list) + for direction_model in session.query(models.StudyDirectionModel).all(): + _directions[direction_model.study_id].append(direction_model.direction) + + _user_attrs = defaultdict(list) + for attribute_model in session.query(models.StudyUserAttributeModel).all(): + _user_attrs[attribute_model.study_id].append(attribute_model) + + _system_attrs = defaultdict(list) + for attribute_model in session.query(models.StudySystemAttributeModel).all(): + _system_attrs[attribute_model.study_id].append(attribute_model) + + frozen_studies = [] + for study in studies: + directions = _directions[study.study_id] + user_attrs = _user_attrs.get(study.study_id, []) + system_attrs = _system_attrs.get(study.study_id, []) + frozen_studies.append( + FrozenStudy( + study_name=study.study_name, + direction=None, + directions=directions, + user_attrs={i.key: json.loads(i.value_json) for i in user_attrs}, + system_attrs={i.key: json.loads(i.value_json) for i in system_attrs}, + study_id=study.study_id, + ) + ) + + return frozen_studies + + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + return self._create_new_trial(study_id, template_trial)._trial_id + + def _create_new_trial( + self, study_id: int, template_trial: FrozenTrial | None = None + ) -> FrozenTrial: + """Create a new trial and returns a :class:`~optuna.trial.FrozenTrial`. + + Args: + study_id: + Study id. + template_trial: + A :class:`~optuna.trial.FrozenTrial` with default values for trial attributes. + + Returns: + A :class:`~optuna.trial.FrozenTrial` instance. + + """ + + def _create_frozen_trial( + trial: "models.TrialModel", template_trial: FrozenTrial | None + ) -> FrozenTrial: + if template_trial: + frozen = copy.deepcopy(template_trial) + frozen.number = trial.number + frozen.datetime_start = trial.datetime_start + frozen._trial_id = trial.trial_id + return frozen + return FrozenTrial( + number=trial.number, + state=trial.state, + value=None, + values=None, + datetime_start=trial.datetime_start, + datetime_complete=None, + params={}, + distributions={}, + user_attrs={}, + system_attrs={}, + intermediate_values={}, + trial_id=trial.trial_id, + ) + + # Retry maximum five times. Deadlocks may occur in distributed environments. + MAX_RETRIES = 5 + for n_retries in range(1, MAX_RETRIES + 1): + try: + with _create_scoped_session(self.scoped_session) as session: + # This lock is necessary because the trial creation is not an atomic operation + # and the calculation of trial.number is prone to race conditions. + models.StudyModel.find_or_raise_by_id(study_id, session, for_update=True) + trial = self._get_prepared_new_trial(study_id, template_trial, session) + return _create_frozen_trial(trial, template_trial) + # sqlalchemy_exc.OperationalError is converted to ``StorageInternalError``. + except optuna.exceptions.StorageInternalError as e: + # ``OperationalError`` happens either by (1) invalid inputs, e.g., too long string, + # or (2) timeout error, which relates to deadlock. Although Error (1) is not + # intended to be caught here, it must be fixed to use RDBStorage anyways. + if n_retries == MAX_RETRIES: + raise e + + # Optuna defers to the DB administrator to reduce DB server congestion, hence + # Optuna simply uses non-exponential backoff here for retries caused by deadlock. + time.sleep(random.random() * 2.0) + + assert False, "Should not be reached." + + def _get_prepared_new_trial( + self, + study_id: int, + template_trial: FrozenTrial | None, + session: "sqlalchemy_orm.Session", + ) -> "models.TrialModel": + if template_trial is None: + trial = models.TrialModel( + study_id=study_id, + number=None, + state=TrialState.RUNNING, + datetime_start=datetime.now(), + ) + else: + # Because only `RUNNING` trials can be updated, + # we temporarily set the state of the new trial to `RUNNING`. + # After all fields of the trial have been updated, + # the state is set to `template_trial.state`. + temp_state = TrialState.RUNNING + + trial = models.TrialModel( + study_id=study_id, + number=None, + state=temp_state, + datetime_start=template_trial.datetime_start, + datetime_complete=template_trial.datetime_complete, + ) + + session.add(trial) + + # Flush the session cache to reflect the above addition operation to + # the current RDB transaction. + # + # Without flushing, the following operations (e.g, `_set_trial_param_without_commit`) + # will fail because the target trial doesn't exist in the storage yet. + session.flush() + + if template_trial is not None: + if template_trial.values is not None and len(template_trial.values) > 1: + for objective, value in enumerate(template_trial.values): + self._set_trial_value_without_commit(session, trial.trial_id, objective, value) + elif template_trial.value is not None: + self._set_trial_value_without_commit( + session, trial.trial_id, 0, template_trial.value + ) + + for param_name, param_value in template_trial.params.items(): + distribution = template_trial.distributions[param_name] + param_value_in_internal_repr = distribution.to_internal_repr(param_value) + self._set_trial_param_without_commit( + session, trial.trial_id, param_name, param_value_in_internal_repr, distribution + ) + + for key, value in template_trial.user_attrs.items(): + self._set_trial_attr_without_commit( + session, models.TrialUserAttributeModel, trial.trial_id, key, value + ) + + for key, value in template_trial.system_attrs.items(): + self._set_trial_attr_without_commit( + session, models.TrialSystemAttributeModel, trial.trial_id, key, value + ) + + for step, intermediate_value in template_trial.intermediate_values.items(): + self._set_trial_intermediate_value_without_commit( + session, trial.trial_id, step, intermediate_value + ) + + trial.state = template_trial.state + + trial.number = trial.count_past_trials(session) + session.add(trial) + + return trial + + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: distributions.BaseDistribution, + ) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + self._set_trial_param_without_commit( + session, trial_id, param_name, param_value_internal, distribution + ) + + def _set_trial_param_without_commit( + self, + session: "sqlalchemy_orm.Session", + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: distributions.BaseDistribution, + ) -> None: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session) + self.check_trial_is_updatable(trial_id, trial.state) + + trial_param = models.TrialParamModel( + trial_id=trial_id, + param_name=param_name, + param_value=param_value_internal, + distribution_json=distributions.distribution_to_json(distribution), + ) + + trial_param.check_and_add(session, trial.study_id) + + def get_trial_param(self, trial_id: int, param_name: str) -> float: + with _create_scoped_session(self.scoped_session) as session: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session) + trial_param = models.TrialParamModel.find_or_raise_by_trial_and_param_name( + trial, param_name, session + ) + param_value = trial_param.param_value + + return param_value + + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + try: + with _create_scoped_session(self.scoped_session) as session: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session, for_update=True) + self.check_trial_is_updatable(trial_id, trial.state) + + if values is not None: + for objective, v in enumerate(values): + self._set_trial_value_without_commit(session, trial_id, objective, v) + + if state == TrialState.RUNNING and trial.state != TrialState.WAITING: + return False + + trial.state = state + + if state == TrialState.RUNNING: + trial.datetime_start = datetime.now() + + if state.is_finished(): + trial.datetime_complete = datetime.now() + except sqlalchemy_exc.IntegrityError: + return False + return True + + def _set_trial_value_without_commit( + self, session: "sqlalchemy_orm.Session", trial_id: int, objective: int, value: float + ) -> None: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session) + self.check_trial_is_updatable(trial_id, trial.state) + stored_value, value_type = models.TrialValueModel.value_to_stored_repr(value) + + trial_value = models.TrialValueModel.find_by_trial_and_objective(trial, objective, session) + if trial_value is None: + trial_value = models.TrialValueModel( + trial_id=trial_id, objective=objective, value=stored_value, value_type=value_type + ) + session.add(trial_value) + else: + trial_value.value = stored_value + trial_value.value_type = value_type + + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + self._set_trial_intermediate_value_without_commit( + session, trial_id, step, intermediate_value + ) + + def _set_trial_intermediate_value_without_commit( + self, + session: "sqlalchemy_orm.Session", + trial_id: int, + step: int, + intermediate_value: float, + ) -> None: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session) + self.check_trial_is_updatable(trial_id, trial.state) + + ( + stored_value, + value_type, + ) = models.TrialIntermediateValueModel.intermediate_value_to_stored_repr( + intermediate_value + ) + trial_intermediate_value = models.TrialIntermediateValueModel.find_by_trial_and_step( + trial, step, session + ) + if trial_intermediate_value is None: + trial_intermediate_value = models.TrialIntermediateValueModel( + trial_id=trial_id, + step=step, + intermediate_value=stored_value, + intermediate_value_type=value_type, + ) + session.add(trial_intermediate_value) + else: + trial_intermediate_value.intermediate_value = stored_value + trial_intermediate_value.intermediate_value_type = value_type + + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + self._set_trial_attr_without_commit( + session, + models.TrialUserAttributeModel, + trial_id, + key, + value, + ) + + def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + self._set_trial_attr_without_commit( + session, + models.TrialSystemAttributeModel, + trial_id, + key, + value, + ) + + def _set_trial_attr_without_commit( + self, + session: "sqlalchemy_orm.Session", + model_cls: type[models.TrialUserAttributeModel | models.TrialSystemAttributeModel], + trial_id: int, + key: str, + value: Any, + ) -> None: + trial = models.TrialModel.find_or_raise_by_id(trial_id, session) + self.check_trial_is_updatable(trial_id, trial.state) + + if self.engine.name == "mysql": + mysql_insert_stmt = sqlalchemy_dialects_mysql.insert(model_cls).values( + trial_id=trial_id, key=key, value_json=json.dumps(value) + ) + mysql_upsert_stmt = mysql_insert_stmt.on_duplicate_key_update( + value_json=mysql_insert_stmt.inserted.value_json + ) + session.execute(mysql_upsert_stmt) + elif self.engine.name == "sqlite" and sqlite3.sqlite_version_info >= (3, 24, 0): + sqlite_insert_stmt = sqlalchemy_dialects_sqlite.insert(model_cls).values( + trial_id=trial_id, key=key, value_json=json.dumps(value) + ) + sqlite_upsert_stmt = sqlite_insert_stmt.on_conflict_do_update( + index_elements=[model_cls.trial_id, model_cls.key], + set_=dict(value_json=sqlite_insert_stmt.excluded.value_json), + ) + session.execute(sqlite_upsert_stmt) + else: + # TODO(porink0424): Add support for other databases, e.g., PostgreSQL. + attribute = model_cls.find_by_trial_and_key(trial, key, session) + if attribute is None: + attribute = model_cls(trial_id=trial_id, key=key, value_json=json.dumps(value)) + session.add(attribute) + else: + attribute.value_json = json.dumps(value) + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + with _create_scoped_session(self.scoped_session) as session: + trial_id = ( + session.query(models.TrialModel.trial_id) + .filter( + models.TrialModel.number == trial_number, + models.TrialModel.study_id == study_id, + ) + .one_or_none() + ) + if trial_id is None: + raise KeyError( + "No trial with trial number {} exists in study with study_id {}.".format( + trial_number, study_id + ) + ) + return trial_id[0] + + def get_trial(self, trial_id: int) -> FrozenTrial: + with _create_scoped_session(self.scoped_session) as session: + trial_model = models.TrialModel.find_or_raise_by_id(trial_id, session) + frozen_trial = self._build_frozen_trial_from_trial_model(trial_model) + + return frozen_trial + + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + trials = self._get_trials(study_id, states, set(), -1) + + return copy.deepcopy(trials) if deepcopy else trials + + def _get_trials( + self, + study_id: int, + states: Container[TrialState] | None, + included_trial_ids: set[int], + trial_id_greater_than: int, + ) -> list[FrozenTrial]: + included_trial_ids = set( + trial_id for trial_id in included_trial_ids if trial_id <= trial_id_greater_than + ) + + with _create_scoped_session(self.scoped_session) as session: + # Ensure that the study exists. + models.StudyModel.find_or_raise_by_id(study_id, session) + query = ( + session.query(models.TrialModel) + .options(sqlalchemy_orm.selectinload(models.TrialModel.params)) + .options(sqlalchemy_orm.selectinload(models.TrialModel.values)) + .options(sqlalchemy_orm.selectinload(models.TrialModel.user_attributes)) + .options(sqlalchemy_orm.selectinload(models.TrialModel.system_attributes)) + .options(sqlalchemy_orm.selectinload(models.TrialModel.intermediate_values)) + .filter( + models.TrialModel.study_id == study_id, + ) + ) + + if states is not None: + # This assertion is for type checkers, since `states` is required to be Container + # in the base class while `models.TrialModel.state.in_` requires Iterable. + assert isinstance(states, Iterable) + query = query.filter(models.TrialModel.state.in_(states)) + + try: + if len(included_trial_ids) > 0 and trial_id_greater_than > -1: + _query = query.filter( + sqlalchemy.or_( + models.TrialModel.trial_id.in_(included_trial_ids), + models.TrialModel.trial_id > trial_id_greater_than, + ) + ) + elif trial_id_greater_than > -1: + _query = query.filter(models.TrialModel.trial_id > trial_id_greater_than) + else: + _query = query + trial_models = _query.order_by(models.TrialModel.trial_id).all() + except sqlalchemy_exc.OperationalError as e: + # Likely exceeding the number of maximum allowed variables using IN. + # This number differ between database dialects. For SQLite for instance, see + # https://www.sqlite.org/limits.html and the section describing + # SQLITE_MAX_VARIABLE_NUMBER. + + _logger.warning( + "Caught an error from sqlalchemy: {}. Falling back to a slower alternative. " + "".format(str(e)) + ) + + trial_models = query.order_by(models.TrialModel.trial_id).all() + trial_models = [ + t + for t in trial_models + if t.trial_id in included_trial_ids or t.trial_id > trial_id_greater_than + ] + + trials = [self._build_frozen_trial_from_trial_model(trial) for trial in trial_models] + + return trials + + def _build_frozen_trial_from_trial_model(self, trial: "models.TrialModel") -> FrozenTrial: + values: list[float] | None + if trial.values: + values = [0 for _ in trial.values] + for value_model in trial.values: + values[value_model.objective] = models.TrialValueModel.stored_repr_to_value( + value_model.value, value_model.value_type + ) + else: + values = None + + params = sorted(trial.params, key=lambda p: p.param_id) + + return FrozenTrial( + number=trial.number, + state=trial.state, + value=None, + values=values, + datetime_start=trial.datetime_start, + datetime_complete=trial.datetime_complete, + params={ + p.param_name: distributions.json_to_distribution( + p.distribution_json + ).to_external_repr(p.param_value) + for p in params + }, + distributions={ + p.param_name: distributions.json_to_distribution(p.distribution_json) + for p in params + }, + user_attrs={attr.key: json.loads(attr.value_json) for attr in trial.user_attributes}, + system_attrs={ + attr.key: json.loads(attr.value_json) for attr in trial.system_attributes + }, + intermediate_values={ + v.step: models.TrialIntermediateValueModel.stored_repr_to_intermediate_value( + v.intermediate_value, v.intermediate_value_type + ) + for v in trial.intermediate_values + }, + trial_id=trial.trial_id, + ) + + def get_best_trial(self, study_id: int) -> FrozenTrial: + with _create_scoped_session(self.scoped_session) as session: + _directions = self.get_study_directions(study_id) + if len(_directions) > 1: + raise RuntimeError( + "Best trial can be obtained only for single-objective optimization." + ) + direction = _directions[0] + + if direction == StudyDirection.MAXIMIZE: + trial_id = models.TrialModel.find_max_value_trial_id(study_id, 0, session) + else: + trial_id = models.TrialModel.find_min_value_trial_id(study_id, 0, session) + + return self.get_trial(trial_id) + + @staticmethod + def _set_default_engine_kwargs_for_mysql(url: str, engine_kwargs: dict[str, Any]) -> None: + # Skip if RDB is not MySQL. + if not url.startswith("mysql"): + return + + # Do not overwrite value. + if "pool_pre_ping" in engine_kwargs: + return + + # If True, the connection pool checks liveness of connections at every checkout. + # Without this option, trials that take longer than `wait_timeout` may cause connection + # errors. For further details, please refer to the following document: + # https://docs.sqlalchemy.org/en/13/core/pooling.html#pool-disconnects-pessimistic + engine_kwargs["pool_pre_ping"] = True + _logger.debug("pool_pre_ping=True was set to engine_kwargs to prevent connection timeout.") + + @staticmethod + def _fill_storage_url_template(template: str) -> str: + return template.format(SCHEMA_VERSION=models.SCHEMA_VERSION) + + def remove_session(self) -> None: + """Removes the current session. + + A session is stored in SQLAlchemy's ThreadLocalRegistry for each thread. This method + closes and removes the session which is associated to the current thread. Particularly, + under multi-thread use cases, it is important to call this method *from each thread*. + Otherwise, all sessions and their associated DB connections are destructed by a thread + that occasionally invoked the garbage collector. By default, it is not allowed to touch + a SQLite connection from threads other than the thread that created the connection. + Therefore, we need to explicitly close the connection from each thread. + + """ + + self.scoped_session.remove() + + def upgrade(self) -> None: + """Upgrade the storage schema.""" + + self._version_manager.upgrade() + + def get_current_version(self) -> str: + """Return the schema version currently used by this storage.""" + + return self._version_manager.get_current_version() + + def get_head_version(self) -> str: + """Return the latest schema version.""" + + return self._version_manager.get_head_version() + + def get_all_versions(self) -> list[str]: + """Return the schema version list.""" + + return self._version_manager.get_all_versions() + + def record_heartbeat(self, trial_id: int) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + # Fetch heartbeat with read-only. + heartbeat = models.TrialHeartbeatModel.where_trial_id(trial_id, session) + if heartbeat is None: # heartbeat record does not exist. + heartbeat = models.TrialHeartbeatModel(trial_id=trial_id) + session.add(heartbeat) + else: + # Re-fetch the existing heartbeat with the write authorization. + heartbeat = models.TrialHeartbeatModel.where_trial_id(trial_id, session, True) + assert heartbeat is not None + heartbeat.heartbeat = session.execute(sqlalchemy.func.now()).scalar() + + def _get_stale_trial_ids(self, study_id: int) -> list[int]: + assert self.heartbeat_interval is not None + if self.grace_period is None: + grace_period = 2 * self.heartbeat_interval + else: + grace_period = self.grace_period + stale_trial_ids = [] + + with _create_scoped_session(self.scoped_session, True) as session: + current_heartbeat = session.execute(sqlalchemy.func.now()).scalar() + assert current_heartbeat is not None + # Added the following line to prevent mixing of timezone-aware and timezone-naive + # `datetime` in PostgreSQL. See + # https://github.com/optuna/optuna/pull/2190#issuecomment-766605088 for details + current_heartbeat = current_heartbeat.replace(tzinfo=None) + + running_trials = ( + session.query(models.TrialModel) + .options(sqlalchemy_orm.selectinload(models.TrialModel.heartbeats)) + .filter(models.TrialModel.state == TrialState.RUNNING) + .filter(models.TrialModel.study_id == study_id) + .all() + ) + for trial in running_trials: + if len(trial.heartbeats) == 0: + continue + assert len(trial.heartbeats) == 1 + heartbeat = trial.heartbeats[0].heartbeat + if current_heartbeat - heartbeat > timedelta(seconds=grace_period): + stale_trial_ids.append(trial.trial_id) + + return stale_trial_ids + + def get_heartbeat_interval(self) -> int | None: + return self.heartbeat_interval + + def get_failed_trial_callback( + self, + ) -> Callable[["optuna.study.Study", FrozenTrial], None] | None: + return self.failed_trial_callback + + +class _VersionManager: + def __init__( + self, + url: str, + engine: "sqlalchemy.engine.Engine", + scoped_session: "sqlalchemy_orm.scoped_session", + ) -> None: + self.url = url + self.engine = engine + self.scoped_session = scoped_session + self._init_version_info_model() + self._init_alembic() + + def _init_version_info_model(self) -> None: + with _create_scoped_session(self.scoped_session, True) as session: + version_info = models.VersionInfoModel.find(session) + if version_info is not None: + return + + version_info = models.VersionInfoModel( + schema_version=models.SCHEMA_VERSION, + library_version=version.__version__, + ) + session.add(version_info) + + def _init_alembic(self) -> None: + logging.getLogger("alembic").setLevel(logging.WARN) + + with self.engine.connect() as connection: + context = alembic_migration.MigrationContext.configure(connection) + is_initialized = context.get_current_revision() is not None + + if is_initialized: + # The `alembic_version` table already exists and is not empty. + return + + if self._is_alembic_supported(): + revision = self.get_head_version() + else: + # The storage has been created before alembic is introduced. + revision = self._get_base_version() + + self._set_alembic_revision(revision) + + def _set_alembic_revision(self, revision: str) -> None: + with self.engine.connect() as connection: + context = alembic_migration.MigrationContext.configure(connection) + with connection.begin(): + script = self._create_alembic_script() + context.stamp(script, revision) + + def check_table_schema_compatibility(self) -> None: + with _create_scoped_session(self.scoped_session) as session: + # NOTE: After invocation of `_init_version_info_model` method, + # it is ensured that a `VersionInfoModel` entry exists. + version_info = models.VersionInfoModel.find(session) + + assert version_info is not None + + current_version = self.get_current_version() + head_version = self.get_head_version() + if current_version == head_version: + return + + message = ( + "The runtime optuna version {} is no longer compatible with the table schema " + "(set up by optuna {}). ".format(version.__version__, version_info.library_version) + ) + known_versions = self.get_all_versions() + + if current_version in known_versions: + message += ( + "Please execute `$ optuna storage upgrade --storage $STORAGE_URL` " + "for upgrading the storage." + ) + else: + message += ( + "Please try updating optuna to the latest version by `$ pip install -U optuna`." + ) + + raise RuntimeError(message) + + def get_current_version(self) -> str: + with self.engine.connect() as connection: + context = alembic_migration.MigrationContext.configure(connection) + version = context.get_current_revision() + assert version is not None + + return version + + def get_head_version(self) -> str: + script = self._create_alembic_script() + current_head = script.get_current_head() + assert current_head is not None + return current_head + + def _get_base_version(self) -> str: + script = self._create_alembic_script() + base = script.get_base() + assert base is not None, "There should be exactly one base, i.e. v0.9.0.a." + return base + + def get_all_versions(self) -> list[str]: + script = self._create_alembic_script() + return [r.revision for r in script.walk_revisions()] + + def upgrade(self) -> None: + config = self._create_alembic_config() + alembic_command.upgrade(config, "head") + + with _create_scoped_session(self.scoped_session, True) as session: + version_info = models.VersionInfoModel.find(session) + assert version_info is not None + version_info.schema_version = models.SCHEMA_VERSION + version_info.library_version = version.__version__ + + def _is_alembic_supported(self) -> bool: + with _create_scoped_session(self.scoped_session) as session: + version_info = models.VersionInfoModel.find(session) + + if version_info is None: + # `None` means this storage was created just now. + return True + + return version_info.schema_version == models.SCHEMA_VERSION + + def _create_alembic_script(self) -> "alembic_script.ScriptDirectory": + config = self._create_alembic_config() + script = alembic_script.ScriptDirectory.from_config(config) + return script + + def _create_alembic_config(self) -> "alembic_config.Config": + alembic_dir = os.path.join(os.path.dirname(__file__), "alembic") + + config = alembic_config.Config(os.path.join(os.path.dirname(__file__), "alembic.ini")) + config.set_main_option("script_location", escape_alembic_config_value(alembic_dir)) + config.set_main_option("sqlalchemy.url", escape_alembic_config_value(self.url)) + return config + + +def escape_alembic_config_value(value: str) -> str: + # We must escape '%' in a value string because the character + # is regarded as the trigger of variable expansion. + # Please see the documentation of `configparser.BasicInterpolation` for more details. + return value.replace("%", "%%") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61831b97b8875c325690886057e4c417d954e6b6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/__init__.py @@ -0,0 +1,19 @@ +from optuna.storages.journal._base import BaseJournalBackend +from optuna.storages.journal._file import JournalFileBackend +from optuna.storages.journal._file import JournalFileOpenLock +from optuna.storages.journal._file import JournalFileSymlinkLock +from optuna.storages.journal._redis import JournalRedisBackend +from optuna.storages.journal._storage import JournalStorage + + +# NOTE(nabenabe0928): Do not add objects deprecated at v4.0.0 here, e.g., JournalFileStorage +# because ``optuna/storages/journal`` was added at v4.0.0 and it will be confusing to keep them in +# the non-deprecated directory. +__all__ = [ + "JournalFileBackend", + "BaseJournalBackend", + "JournalFileOpenLock", + "JournalFileSymlinkLock", + "JournalRedisBackend", + "JournalStorage", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..207326bf3454c771c479d5a8ef457f2e10c9fa8b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_base.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import abc +from typing import Any + +from optuna._deprecated import deprecated_class + + +class BaseJournalBackend(abc.ABC): + """Base class for Journal storages. + + Storage classes implementing this base class must guarantee process safety. This means, + multiple processes might concurrently call ``read_logs`` and ``append_logs``. If the + backend storage does not internally support mutual exclusion mechanisms, such as locks, + you might want to use :class:`~optuna.storages.journal.JournalFileSymlinkLock` or + :class:`~optuna.storages.journal.JournalFileOpenLock` for creating a critical section. + + """ + + @abc.abstractmethod + def read_logs(self, log_number_from: int) -> list[dict[str, Any]]: + """Read logs with a log number greater than or equal to ``log_number_from``. + + If ``log_number_from`` is 0, read all the logs. + + Args: + log_number_from: + A non-negative integer value indicating which logs to read. + + Returns: + Logs with log number greater than or equal to ``log_number_from``. + """ + + raise NotImplementedError + + @abc.abstractmethod + def append_logs(self, logs: list[dict[str, Any]]) -> None: + """Append logs to the backend. + + Args: + logs: + A list that contains json-serializable logs. + """ + + raise NotImplementedError + + +class BaseJournalSnapshot(abc.ABC): + """Optional base class for Journal storages. + + Storage classes implementing this base class may work faster when + constructing the internal state from the large amount of logs. + """ + + @abc.abstractmethod + def save_snapshot(self, snapshot: bytes) -> None: + """Save snapshot to the backend. + + Args: + snapshot: A serialized snapshot (bytes) + """ + raise NotImplementedError + + @abc.abstractmethod + def load_snapshot(self) -> bytes | None: + """Load snapshot from the backend. + + Returns: + A serialized snapshot (bytes) if found, otherwise :obj:`None`. + """ + raise NotImplementedError + + +@deprecated_class( + "4.0.0", "6.0.0", text="Use :class:`~optuna.storages.journal.BaseJournalBackend` instead." +) +class BaseJournalLogStorage(BaseJournalBackend): + """Base class for Journal storages. + + Storage classes implementing this base class must guarantee process safety. This means, + multiple processes might concurrently call ``read_logs`` and ``append_logs``. If the + backend storage does not internally support mutual exclusion mechanisms, such as locks, + you might want to use :class:`~optuna.storages.journal.JournalFileSymlinkLock` or + :class:`~optuna.storages.journal.JournalFileOpenLock` for creating a critical section. + + """ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_file.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_file.py new file mode 100644 index 0000000000000000000000000000000000000000..4449508e46e1c494b533e12c2d7d543049d6619a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_file.py @@ -0,0 +1,319 @@ +from __future__ import annotations + +import abc +from collections.abc import Iterator +from contextlib import contextmanager +import errno +import json +import os +import time +from typing import Any +import uuid +import warnings + +from optuna._deprecated import deprecated_class +from optuna.storages.journal._base import BaseJournalBackend + + +LOCK_FILE_SUFFIX = ".lock" +RENAME_FILE_SUFFIX = ".rename" + + +class JournalFileBackend(BaseJournalBackend): + """File storage class for Journal log backend. + + Compared to SQLite3, the benefit of this backend is that it is more suitable for + environments where the file system does not support ``fcntl()`` file locking. + For example, as written in the `SQLite3 FAQ `__, + SQLite3 might not work on NFS (Network File System) since ``fcntl()`` file locking + is broken on many NFS implementations. In such scenarios, this backend provides + several workarounds for locking files. For more details, refer to the `Medium blog post`_. + + .. _Medium blog post: https://medium.com/optuna/distributed-optimization-via-nfs\ + -using-optunas-new-operation-based-logging-storage-9815f9c3f932 + + It's important to note that, similar to SQLite3, this class doesn't support a high + level of write concurrency, as outlined in the `SQLAlchemy documentation`_. However, + in typical situations where the objective function is computationally expensive, Optuna + users don't need to be concerned about this limitation. The reason being, the write + operations are not the bottleneck as long as the objective function doesn't invoke + :meth:`~optuna.trial.Trial.report` and :meth:`~optuna.trial.Trial.set_user_attr` excessively. + + .. _SQLAlchemy documentation: https://docs.sqlalchemy.org/en/20/dialects/sqlite.html\ + #database-locking-behavior-concurrency + + Args: + file_path: + Path of file to persist the log to. + + lock_obj: + Lock object for process exclusivity. An instance of + :class:`~optuna.storages.journal.JournalFileSymlinkLock` and + :class:`~optuna.storages.journal.JournalFileOpenLock` can be passed. + """ + + def __init__(self, file_path: str, lock_obj: BaseJournalFileLock | None = None) -> None: + self._file_path: str = file_path + self._lock = lock_obj or JournalFileSymlinkLock(self._file_path) + if not os.path.exists(self._file_path): + open(self._file_path, "ab").close() # Create a file if it does not exist. + self._log_number_offset: dict[int, int] = {0: 0} + + def read_logs(self, log_number_from: int) -> list[dict[str, Any]]: + logs = [] + with open(self._file_path, "rb") as f: + # Maintain remaining_log_size to allow writing by another process + # while reading the log. + remaining_log_size = os.stat(self._file_path).st_size + log_number_start = 0 + if log_number_from in self._log_number_offset: + f.seek(self._log_number_offset[log_number_from]) + log_number_start = log_number_from + remaining_log_size -= self._log_number_offset[log_number_from] + + last_decode_error = None + for log_number, line in enumerate(f, start=log_number_start): + byte_len = len(line) + remaining_log_size -= byte_len + if remaining_log_size < 0: + break + if last_decode_error is not None: + raise last_decode_error + if log_number + 1 not in self._log_number_offset: + self._log_number_offset[log_number + 1] = ( + self._log_number_offset[log_number] + byte_len + ) + if log_number < log_number_from: + continue + + # Ensure that each line ends with line separators (\n, \r\n). + if not line.endswith(b"\n"): + last_decode_error = ValueError("Invalid log format.") + del self._log_number_offset[log_number + 1] + continue + try: + logs.append(json.loads(line)) + except json.JSONDecodeError as err: + last_decode_error = err + del self._log_number_offset[log_number + 1] + return logs + + def append_logs(self, logs: list[dict[str, Any]]) -> None: + with get_lock_file(self._lock): + what_to_write = ( + "\n".join([json.dumps(log, separators=(",", ":")) for log in logs]) + "\n" + ) + with open(self._file_path, "ab") as f: + f.write(what_to_write.encode("utf-8")) + f.flush() + os.fsync(f.fileno()) + + +class BaseJournalFileLock(abc.ABC): + @abc.abstractmethod + def acquire(self) -> bool: + raise NotImplementedError + + @abc.abstractmethod + def release(self) -> None: + raise NotImplementedError + + +class JournalFileSymlinkLock(BaseJournalFileLock): + """Lock class for synchronizing processes for NFSv2 or later. + + On acquiring the lock, link system call is called to create an exclusive file. The file is + deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of + :class:`~optuna.storages.journal.JournalFileOpenLock`. + + Args: + filepath: + The path of the file whose race condition must be protected. + grace_period: + Grace period before an existing lock is forcibly released. + """ + + def __init__(self, filepath: str, grace_period: int | None = 30) -> None: + self._lock_target_file = filepath + self._lock_file = filepath + LOCK_FILE_SUFFIX + if grace_period is not None: + if grace_period <= 0: + raise ValueError("The value of `grace_period` should be a positive integer.") + if grace_period < 3: + warnings.warn("The value of `grace_period` might be too small. ") + self.grace_period = grace_period + + def acquire(self) -> bool: + """Acquire a lock in a blocking way by creating a symbolic link of a file. + + Returns: + :obj:`True` if it succeeded in creating a symbolic link of ``self._lock_target_file``. + """ + sleep_secs = 0.001 + last_update_monotonic_time = time.monotonic() + mtime = None + while True: + try: + os.symlink(self._lock_target_file, self._lock_file) + return True + except OSError as err: + if err.errno == errno.EEXIST: + if self.grace_period is not None: + try: + current_mtime = os.stat(self._lock_file).st_mtime + except OSError: + continue + if current_mtime != mtime: + mtime = current_mtime + last_update_monotonic_time = time.monotonic() + + if time.monotonic() - last_update_monotonic_time > self.grace_period: + warnings.warn( + "The existing lock file has not been released " + "for an extended period. Forcibly releasing the lock file." + ) + try: + self.release() + sleep_secs = 0.001 + except RuntimeError: + continue + + time.sleep(sleep_secs) + sleep_secs = min(sleep_secs * 2, 1) + continue + raise err + except BaseException: + self.release() + raise + + def release(self) -> None: + """Release a lock by removing the symbolic link.""" + + lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX + try: + os.rename(self._lock_file, lock_rename_file) + os.unlink(lock_rename_file) + except OSError: + raise RuntimeError("Error: did not possess lock") + except BaseException: + os.unlink(lock_rename_file) + raise + + +class JournalFileOpenLock(BaseJournalFileLock): + """Lock class for synchronizing processes for NFSv3 or later. + + On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive + file. The file is deleted when the lock is released. This class is only supported when using + NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use + :class:`~optuna.storages.journal.JournalFileSymlinkLock`. + + Args: + filepath: + The path of the file whose race condition must be protected. + grace_period: + Grace period before an existing lock is forcibly released. + """ + + def __init__(self, filepath: str, grace_period: int | None = 30) -> None: + self._lock_file = filepath + LOCK_FILE_SUFFIX + if grace_period is not None: + if grace_period <= 0: + raise ValueError("The value of `grace_period` should be a positive integer.") + if grace_period < 3: + warnings.warn("The value of `grace_period` might be too small. ") + self.grace_period = grace_period + + def acquire(self) -> bool: + """Acquire a lock in a blocking way by creating a lock file. + + Returns: + :obj:`True` if it succeeded in creating a ``self._lock_file``. + + """ + sleep_secs = 0.001 + last_update_monotonic_time = time.monotonic() + mtime = None + while True: + try: + open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY + os.close(os.open(self._lock_file, open_flags)) + return True + except OSError as err: + if err.errno == errno.EEXIST: + if self.grace_period is not None: + try: + current_mtime = os.stat(self._lock_file).st_mtime + except OSError: + continue + if current_mtime != mtime: + mtime = current_mtime + last_update_monotonic_time = time.monotonic() + + if time.monotonic() - last_update_monotonic_time > self.grace_period: + warnings.warn( + "The existing lock file has not been released " + "for an extended period. Forcibly releasing the lock file." + ) + try: + self.release() + sleep_secs = 0.001 + except RuntimeError: + continue + + time.sleep(sleep_secs) + sleep_secs = min(sleep_secs * 2, 1) + continue + raise err + except BaseException: + self.release() + raise + + def release(self) -> None: + """Release a lock by removing the created file.""" + + lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX + try: + os.rename(self._lock_file, lock_rename_file) + os.unlink(lock_rename_file) + except OSError: + raise RuntimeError("Error: did not possess lock") + except BaseException: + os.unlink(lock_rename_file) + raise + + +@contextmanager +def get_lock_file(lock_obj: BaseJournalFileLock) -> Iterator[None]: + lock_obj.acquire() + try: + yield + finally: + lock_obj.release() + + +@deprecated_class( + "4.0.0", "6.0.0", text="Use :class:`~optuna.storages.journal.JournalFileBackend` instead." +) +class JournalFileStorage(JournalFileBackend): + pass + + +@deprecated_class( + deprecated_version="4.0.0", + removed_version="6.0.0", + name="The import path :class:`~optuna.storages.JournalFileOpenLock`", + text="Use :class:`~optuna.storages.journal.JournalFileOpenLock` instead.", +) +class DeprecatedJournalFileOpenLock(JournalFileOpenLock): + pass + + +@deprecated_class( + deprecated_version="4.0.0", + removed_version="6.0.0", + name="The import path :class:`~optuna.storages.JournalFileSymlinkLock`", + text="Use :class:`~optuna.storages.journal.JournalFileSymlinkLock` instead.", +) +class DeprecatedJournalFileSymlinkLock(JournalFileSymlinkLock): + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_redis.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_redis.py new file mode 100644 index 0000000000000000000000000000000000000000..811cdc1eb4cc9f2585402d8d83e7afd99be49bd7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_redis.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import json +import time +from typing import Any + +from optuna._deprecated import deprecated_class +from optuna._experimental import experimental_class +from optuna._imports import try_import +from optuna.storages.journal._base import BaseJournalBackend +from optuna.storages.journal._base import BaseJournalSnapshot + + +with try_import() as _imports: + import redis + + +@experimental_class("3.1.0") +class JournalRedisBackend(BaseJournalBackend, BaseJournalSnapshot): + """Redis storage class for Journal log backend. + + Args: + url: + URL of the redis storage, password and db are optional. + (ie: ``redis://localhost:6379``) + use_cluster: + Flag whether you use the Redis cluster. If this is :obj:`False`, it is assumed that + you use the standalone Redis server and ensured that a write operation is atomic. This + provides the consistency of the preserved logs. If this is :obj:`True`, it is assumed + that you use the Redis cluster and not ensured that a write operation is atomic. This + means the preserved logs can be inconsistent due to network errors, and may + cause errors. + prefix: + Prefix of the preserved key of logs. This is useful when multiple users work on one + Redis server. + """ + + def __init__(self, url: str, use_cluster: bool = False, prefix: str = "") -> None: + _imports.check() + + self._url = url + self._redis = redis.Redis.from_url(url) + self._use_cluster = use_cluster + self._prefix = prefix + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_redis"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._redis = redis.Redis.from_url(self._url) + + def read_logs(self, log_number_from: int) -> list[dict[str, Any]]: + max_log_number_bytes = self._redis.get(f"{self._prefix}:log_number") + if max_log_number_bytes is None: + return [] + max_log_number = int(max_log_number_bytes) + + logs = [] + for log_number in range(log_number_from, max_log_number + 1): + sleep_secs = 0.1 + while True: + log = self._redis.get(self._key_log_id(log_number)) + if log is not None: + break + time.sleep(sleep_secs) + sleep_secs = min(sleep_secs * 2, 10) + try: + logs.append(json.loads(log)) + except json.JSONDecodeError as err: + if log_number != max_log_number: + raise err + return logs + + def append_logs(self, logs: list[dict[str, Any]]) -> None: + self._redis.setnx(f"{self._prefix}:log_number", -1) + for log in logs: + if not self._use_cluster: + self._redis.eval( # type: ignore + "local i = redis.call('incr', string.format('%s:log_number', ARGV[1])) " + "redis.call('set', string.format('%s:log:%d', ARGV[1], i), ARGV[2])", + 0, + self._prefix, + json.dumps(log), + ) + else: + log_number = self._redis.incr(f"{self._prefix}:log_number", 1) + self._redis.set(self._key_log_id(log_number), json.dumps(log)) + + def save_snapshot(self, snapshot: bytes) -> None: + self._redis.set(f"{self._prefix}:snapshot", snapshot) + + def load_snapshot(self) -> bytes | None: + snapshot_bytes = self._redis.get(f"{self._prefix}:snapshot") + return snapshot_bytes + + def _key_log_id(self, log_number: int) -> str: + return f"{self._prefix}:log:{log_number}" + + +@deprecated_class( + "4.0.0", "6.0.0", text="Use :class:`~optuna.storages.journal.JournalRedisBackend` instead." +) +class JournalRedisStorage(JournalRedisBackend): + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_storage.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..1a2ad783c1db4a1a6c76c2091a123790625a679e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/storages/journal/_storage.py @@ -0,0 +1,661 @@ +from __future__ import annotations + +from collections.abc import Container +from collections.abc import Sequence +import copy +import datetime +import enum +import pickle +import threading +from typing import Any +import uuid + +import optuna +from optuna._typing import JSONSerializable +from optuna.distributions import BaseDistribution +from optuna.distributions import check_distribution_compatibility +from optuna.distributions import distribution_to_json +from optuna.distributions import json_to_distribution +from optuna.exceptions import DuplicatedStudyError +from optuna.exceptions import UpdateFinishedTrialError +from optuna.storages import BaseStorage +from optuna.storages._base import DEFAULT_STUDY_NAME_PREFIX +from optuna.storages.journal._base import BaseJournalBackend +from optuna.storages.journal._base import BaseJournalSnapshot +from optuna.study._frozen import FrozenStudy +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +_logger = optuna.logging.get_logger(__name__) + +NOT_FOUND_MSG = "Record does not exist." +# A heuristic interval number to dump snapshots +SNAPSHOT_INTERVAL = 100 + + +class JournalOperation(enum.IntEnum): + CREATE_STUDY = 0 + DELETE_STUDY = 1 + SET_STUDY_USER_ATTR = 2 + SET_STUDY_SYSTEM_ATTR = 3 + CREATE_TRIAL = 4 + SET_TRIAL_PARAM = 5 + SET_TRIAL_STATE_VALUES = 6 + SET_TRIAL_INTERMEDIATE_VALUE = 7 + SET_TRIAL_USER_ATTR = 8 + SET_TRIAL_SYSTEM_ATTR = 9 + + +class JournalStorage(BaseStorage): + """Storage class for Journal storage backend. + + Note that library users can instantiate this class, but the attributes + provided by this class are not supposed to be directly accessed by them. + + Journal storage writes a record of every operation to the database as it is executed and + at the same time, keeps a latest snapshot of the database in-memory. If the database crashes + for any reason, the storage can re-establish the contents in memory by replaying the + operations stored from the beginning. + + Journal storage has several benefits over the conventional value logging storages. + + 1. The number of IOs can be reduced because of larger granularity of logs. + 2. Journal storage has simpler backend API than value logging storage. + 3. Journal storage keeps a snapshot in-memory so no need to add more cache. + + Example: + + .. code:: + + import optuna + + + def objective(trial): ... + + + storage = optuna.storages.JournalStorage( + optuna.storages.journal.JournalFileBackend("./optuna_journal_storage.log") + ) + + study = optuna.create_study(storage=storage) + study.optimize(objective) + + In a Windows environment, an error message "A required privilege is not held by the + client" may appear. In this case, you can solve the problem with creating storage + by specifying :class:`~optuna.storages.journal.JournalFileOpenLock` as follows. + + .. code:: + + file_path = "./optuna_journal_storage.log" + lock_obj = optuna.storages.journal.JournalFileOpenLock(file_path) + + storage = optuna.storages.JournalStorage( + optuna.storages.journal.JournalFileBackend(file_path, lock_obj=lock_obj), + ) + """ + + def __init__(self, log_storage: BaseJournalBackend) -> None: + self._worker_id_prefix = str(uuid.uuid4()) + "-" + self._backend = log_storage + self._thread_lock = threading.Lock() + self._replay_result = JournalStorageReplayResult(self._worker_id_prefix) + + with self._thread_lock: + if isinstance(self._backend, BaseJournalSnapshot): + snapshot = self._backend.load_snapshot() + if snapshot is not None: + self.restore_replay_result(snapshot) + self._sync_with_backend() + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_worker_id_prefix"] + del state["_replay_result"] + del state["_thread_lock"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._worker_id_prefix = str(uuid.uuid4()) + "-" + self._replay_result = JournalStorageReplayResult(self._worker_id_prefix) + self._thread_lock = threading.Lock() + + def restore_replay_result(self, snapshot: bytes) -> None: + try: + r: JournalStorageReplayResult | None = pickle.loads(snapshot) + except (pickle.UnpicklingError, KeyError): + _logger.warning("Failed to restore `JournalStorageReplayResult`.") + return + if r is None: + return + if not isinstance(r, JournalStorageReplayResult): + _logger.warning("The restored object is not `JournalStorageReplayResult`.") + return + r._worker_id_prefix = self._worker_id_prefix + r._worker_id_to_owned_trial_id = {} + r._last_created_trial_id_by_this_process = -1 + self._replay_result = r + + def _write_log(self, op_code: int, extra_fields: dict[str, Any]) -> None: + worker_id = self._replay_result.worker_id + self._backend.append_logs([{"op_code": op_code, "worker_id": worker_id, **extra_fields}]) + + def _sync_with_backend(self) -> None: + logs = self._backend.read_logs(self._replay_result.log_number_read) + self._replay_result.apply_logs(logs) + + def create_new_study( + self, directions: Sequence[StudyDirection], study_name: str | None = None + ) -> int: + study_name = study_name or DEFAULT_STUDY_NAME_PREFIX + str(uuid.uuid4()) + + with self._thread_lock: + self._write_log( + JournalOperation.CREATE_STUDY, {"study_name": study_name, "directions": directions} + ) + self._sync_with_backend() + + for frozen_study in self._replay_result.get_all_studies(): + if frozen_study.study_name != study_name: + continue + + _logger.info("A new study created in Journal with name: {}".format(study_name)) + study_id = frozen_study._study_id + + # Dump snapshot here. + if ( + isinstance(self._backend, BaseJournalSnapshot) + and study_id != 0 + and study_id % SNAPSHOT_INTERVAL == 0 + ): + self._backend.save_snapshot(pickle.dumps(self._replay_result)) + + return study_id + assert False, "Should not reach." + + def delete_study(self, study_id: int) -> None: + with self._thread_lock: + self._write_log(JournalOperation.DELETE_STUDY, {"study_id": study_id}) + self._sync_with_backend() + + def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: + log: dict[str, Any] = {"study_id": study_id, "user_attr": {key: value}} + with self._thread_lock: + self._write_log(JournalOperation.SET_STUDY_USER_ATTR, log) + self._sync_with_backend() + + def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: + log: dict[str, Any] = {"study_id": study_id, "system_attr": {key: value}} + with self._thread_lock: + self._write_log(JournalOperation.SET_STUDY_SYSTEM_ATTR, log) + self._sync_with_backend() + + def get_study_id_from_name(self, study_name: str) -> int: + with self._thread_lock: + self._sync_with_backend() + for study in self._replay_result.get_all_studies(): + if study.study_name == study_name: + return study._study_id + raise KeyError(NOT_FOUND_MSG) + + def get_study_name_from_id(self, study_id: int) -> str: + with self._thread_lock: + self._sync_with_backend() + return self._replay_result.get_study(study_id).study_name + + def get_study_directions(self, study_id: int) -> list[StudyDirection]: + with self._thread_lock: + self._sync_with_backend() + return self._replay_result.get_study(study_id).directions + + def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: + with self._thread_lock: + self._sync_with_backend() + return self._replay_result.get_study(study_id).user_attrs + + def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: + with self._thread_lock: + self._sync_with_backend() + return self._replay_result.get_study(study_id).system_attrs + + def get_all_studies(self) -> list[FrozenStudy]: + with self._thread_lock: + self._sync_with_backend() + return copy.deepcopy(self._replay_result.get_all_studies()) + + # Basic trial manipulation + def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: + log: dict[str, Any] = { + "study_id": study_id, + "datetime_start": datetime.datetime.now().isoformat(timespec="microseconds"), + } + + if template_trial: + log["state"] = template_trial.state + if template_trial.values is not None and len(template_trial.values) > 1: + log["value"] = None + log["values"] = template_trial.values + else: + log["value"] = template_trial.value + log["values"] = None + if template_trial.datetime_start: + log["datetime_start"] = template_trial.datetime_start.isoformat( + timespec="microseconds" + ) + else: + log["datetime_start"] = None + if template_trial.datetime_complete: + log["datetime_complete"] = template_trial.datetime_complete.isoformat( + timespec="microseconds" + ) + + log["distributions"] = { + k: distribution_to_json(dist) for k, dist in template_trial.distributions.items() + } + log["params"] = { + k: template_trial.distributions[k].to_internal_repr(param) + for k, param in template_trial.params.items() + } + log["user_attrs"] = template_trial.user_attrs + log["system_attrs"] = template_trial.system_attrs + log["intermediate_values"] = template_trial.intermediate_values + + with self._thread_lock: + self._write_log(JournalOperation.CREATE_TRIAL, log) + self._sync_with_backend() + trial_id = self._replay_result._last_created_trial_id_by_this_process + + # Dump snapshot here. + if ( + isinstance(self._backend, BaseJournalSnapshot) + and trial_id != 0 + and trial_id % SNAPSHOT_INTERVAL == 0 + ): + self._backend.save_snapshot(pickle.dumps(self._replay_result)) + return trial_id + + def set_trial_param( + self, + trial_id: int, + param_name: str, + param_value_internal: float, + distribution: BaseDistribution, + ) -> None: + log: dict[str, Any] = { + "trial_id": trial_id, + "param_name": param_name, + "param_value_internal": param_value_internal, + "distribution": distribution_to_json(distribution), + } + + with self._thread_lock: + self._write_log(JournalOperation.SET_TRIAL_PARAM, log) + self._sync_with_backend() + + def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: + with self._thread_lock: + self._sync_with_backend() + if len(self._replay_result._study_id_to_trial_ids[study_id]) <= trial_number: + raise KeyError( + "No trial with trial number {} exists in study with study_id {}.".format( + trial_number, study_id + ) + ) + return self._replay_result._study_id_to_trial_ids[study_id][trial_number] + + def set_trial_state_values( + self, trial_id: int, state: TrialState, values: Sequence[float] | None = None + ) -> bool: + log: dict[str, Any] = { + "trial_id": trial_id, + "state": state, + "values": values, + } + + if state == TrialState.RUNNING: + log["datetime_start"] = datetime.datetime.now().isoformat(timespec="microseconds") + elif state.is_finished(): + log["datetime_complete"] = datetime.datetime.now().isoformat(timespec="microseconds") + + with self._thread_lock: + self._write_log(JournalOperation.SET_TRIAL_STATE_VALUES, log) + self._sync_with_backend() + + if state == TrialState.RUNNING and trial_id != self._replay_result.owned_trial_id: + return False + else: + return True + + def set_trial_intermediate_value( + self, trial_id: int, step: int, intermediate_value: float + ) -> None: + log: dict[str, Any] = { + "trial_id": trial_id, + "step": step, + "intermediate_value": intermediate_value, + } + + with self._thread_lock: + self._write_log(JournalOperation.SET_TRIAL_INTERMEDIATE_VALUE, log) + self._sync_with_backend() + + def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: + log: dict[str, Any] = { + "trial_id": trial_id, + "user_attr": {key: value}, + } + + with self._thread_lock: + self._write_log(JournalOperation.SET_TRIAL_USER_ATTR, log) + self._sync_with_backend() + + def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: + log: dict[str, Any] = { + "trial_id": trial_id, + "system_attr": {key: value}, + } + + with self._thread_lock: + self._write_log(JournalOperation.SET_TRIAL_SYSTEM_ATTR, log) + self._sync_with_backend() + + def get_trial(self, trial_id: int) -> FrozenTrial: + with self._thread_lock: + self._sync_with_backend() + return self._replay_result.get_trial(trial_id) + + def get_all_trials( + self, + study_id: int, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + with self._thread_lock: + self._sync_with_backend() + frozen_trials = self._replay_result.get_all_trials(study_id, states) + if deepcopy: + return copy.deepcopy(frozen_trials) + return frozen_trials + + +class JournalStorageReplayResult: + def __init__(self, worker_id_prefix: str) -> None: + self.log_number_read = 0 + self._worker_id_prefix = worker_id_prefix + self._studies: dict[int, FrozenStudy] = {} + self._trials: dict[int, FrozenTrial] = {} + + self._study_id_to_trial_ids: dict[int, list[int]] = {} + self._trial_id_to_study_id: dict[int, int] = {} + self._next_study_id: int = 0 + self._worker_id_to_owned_trial_id: dict[str, int] = {} + + def apply_logs(self, logs: list[dict[str, Any]]) -> None: + for log in logs: + self.log_number_read += 1 + op = log["op_code"] + if op == JournalOperation.CREATE_STUDY: + self._apply_create_study(log) + elif op == JournalOperation.DELETE_STUDY: + self._apply_delete_study(log) + elif op == JournalOperation.SET_STUDY_USER_ATTR: + self._apply_set_study_user_attr(log) + elif op == JournalOperation.SET_STUDY_SYSTEM_ATTR: + self._apply_set_study_system_attr(log) + elif op == JournalOperation.CREATE_TRIAL: + self._apply_create_trial(log) + elif op == JournalOperation.SET_TRIAL_PARAM: + self._apply_set_trial_param(log) + elif op == JournalOperation.SET_TRIAL_STATE_VALUES: + self._apply_set_trial_state_values(log) + elif op == JournalOperation.SET_TRIAL_INTERMEDIATE_VALUE: + self._apply_set_trial_intermediate_value(log) + elif op == JournalOperation.SET_TRIAL_USER_ATTR: + self._apply_set_trial_user_attr(log) + elif op == JournalOperation.SET_TRIAL_SYSTEM_ATTR: + self._apply_set_trial_system_attr(log) + else: + assert False, "Should not reach." + + def get_study(self, study_id: int) -> FrozenStudy: + if study_id not in self._studies: + raise KeyError(NOT_FOUND_MSG) + return self._studies[study_id] + + def get_all_studies(self) -> list[FrozenStudy]: + return list(self._studies.values()) + + def get_trial(self, trial_id: int) -> FrozenTrial: + if trial_id not in self._trials: + raise KeyError(NOT_FOUND_MSG) + return self._trials[trial_id] + + def get_all_trials( + self, study_id: int, states: Container[TrialState] | None + ) -> list[FrozenTrial]: + if study_id not in self._studies: + raise KeyError(NOT_FOUND_MSG) + + frozen_trials: list[FrozenTrial] = [] + for trial_id in self._study_id_to_trial_ids[study_id]: + trial = self._trials[trial_id] + if states is None or trial.state in states: + frozen_trials.append(trial) + return frozen_trials + + @property + def worker_id(self) -> str: + return self._worker_id_prefix + str(threading.get_ident()) + + @property + def owned_trial_id(self) -> int | None: + return self._worker_id_to_owned_trial_id.get(self.worker_id) + + def _is_issued_by_this_worker(self, log: dict[str, Any]) -> bool: + return log["worker_id"] == self.worker_id + + def _study_exists(self, study_id: int, log: dict[str, Any]) -> bool: + if study_id in self._studies: + return True + if self._is_issued_by_this_worker(log): + raise KeyError(NOT_FOUND_MSG) + return False + + def _apply_create_study(self, log: dict[str, Any]) -> None: + study_name = log["study_name"] + directions = [StudyDirection(d) for d in log["directions"]] + + if study_name in [s.study_name for s in self._studies.values()]: + if self._is_issued_by_this_worker(log): + raise DuplicatedStudyError( + "Another study with name '{}' already exists. " + "Please specify a different name, or reuse the existing one " + "by setting `load_if_exists` (for Python API) or " + "`--skip-if-exists` flag (for CLI).".format(study_name) + ) + return + + study_id = self._next_study_id + self._next_study_id += 1 + + self._studies[study_id] = FrozenStudy( + study_name=study_name, + direction=None, + user_attrs={}, + system_attrs={}, + study_id=study_id, + directions=directions, + ) + self._study_id_to_trial_ids[study_id] = [] + + def _apply_delete_study(self, log: dict[str, Any]) -> None: + study_id = log["study_id"] + + if self._study_exists(study_id, log): + fs = self._studies.pop(study_id) + assert fs._study_id == study_id + + def _apply_set_study_user_attr(self, log: dict[str, Any]) -> None: + study_id = log["study_id"] + + if self._study_exists(study_id, log): + assert len(log["user_attr"]) == 1 + self._studies[study_id].user_attrs.update(log["user_attr"]) + + def _apply_set_study_system_attr(self, log: dict[str, Any]) -> None: + study_id = log["study_id"] + + if self._study_exists(study_id, log): + assert len(log["system_attr"]) == 1 + self._studies[study_id].system_attrs.update(log["system_attr"]) + + def _apply_create_trial(self, log: dict[str, Any]) -> None: + study_id = log["study_id"] + + if not self._study_exists(study_id, log): + return + + trial_id = len(self._trials) + distributions = {} + if "distributions" in log: + distributions = {k: json_to_distribution(v) for k, v in log["distributions"].items()} + params = {} + if "params" in log: + params = {k: distributions[k].to_external_repr(p) for k, p in log["params"].items()} + if log["datetime_start"] is not None: + datetime_start = datetime.datetime.fromisoformat(log["datetime_start"]) + else: + datetime_start = None + if "datetime_complete" in log: + datetime_complete = datetime.datetime.fromisoformat(log["datetime_complete"]) + else: + datetime_complete = None + + self._trials[trial_id] = FrozenTrial( + trial_id=trial_id, + number=len(self._study_id_to_trial_ids[study_id]), + state=TrialState(log.get("state", TrialState.RUNNING.value)), + params=params, + distributions=distributions, + user_attrs=log.get("user_attrs", {}), + system_attrs=log.get("system_attrs", {}), + value=log.get("value", None), + intermediate_values={int(k): v for k, v in log.get("intermediate_values", {}).items()}, + datetime_start=datetime_start, + datetime_complete=datetime_complete, + values=log.get("values", None), + ) + + self._study_id_to_trial_ids[study_id].append(trial_id) + self._trial_id_to_study_id[trial_id] = study_id + + if self._is_issued_by_this_worker(log): + self._last_created_trial_id_by_this_process = trial_id + if self._trials[trial_id].state == TrialState.RUNNING: + self._worker_id_to_owned_trial_id[self.worker_id] = trial_id + + def _apply_set_trial_param(self, log: dict[str, Any]) -> None: + trial_id = log["trial_id"] + + if not self._trial_exists_and_updatable(trial_id, log): + return + + param_name = log["param_name"] + param_value_internal = log["param_value_internal"] + distribution = json_to_distribution(log["distribution"]) + + study_id = self._trial_id_to_study_id[trial_id] + + for prev_trial_id in self._study_id_to_trial_ids[study_id]: + prev_trial = self._trials[prev_trial_id] + if param_name in prev_trial.params.keys(): + try: + check_distribution_compatibility( + prev_trial.distributions[param_name], distribution + ) + except Exception: + if self._is_issued_by_this_worker(log): + raise + return + break + + trial = copy.copy(self._trials[trial_id]) + trial.params = { + **copy.copy(trial.params), + param_name: distribution.to_external_repr(param_value_internal), + } + trial.distributions = {**copy.copy(trial.distributions), param_name: distribution} + self._trials[trial_id] = trial + + def _apply_set_trial_state_values(self, log: dict[str, Any]) -> None: + trial_id = log["trial_id"] + + if not self._trial_exists_and_updatable(trial_id, log): + return + + state = TrialState(log["state"]) + if state == self._trials[trial_id].state and state == TrialState.RUNNING: + return + + trial = copy.copy(self._trials[trial_id]) + if state == TrialState.RUNNING: + trial.datetime_start = datetime.datetime.fromisoformat(log["datetime_start"]) + if self._is_issued_by_this_worker(log): + self._worker_id_to_owned_trial_id[self.worker_id] = trial_id + if state.is_finished(): + trial.datetime_complete = datetime.datetime.fromisoformat(log["datetime_complete"]) + trial.state = state + if log["values"] is not None: + trial.values = log["values"] + + self._trials[trial_id] = trial + + def _apply_set_trial_intermediate_value(self, log: dict[str, Any]) -> None: + trial_id = log["trial_id"] + + if self._trial_exists_and_updatable(trial_id, log): + trial = copy.copy(self._trials[trial_id]) + trial.intermediate_values = { + **copy.copy(trial.intermediate_values), + log["step"]: log["intermediate_value"], + } + self._trials[trial_id] = trial + + def _apply_set_trial_user_attr(self, log: dict[str, Any]) -> None: + trial_id = log["trial_id"] + + if self._trial_exists_and_updatable(trial_id, log): + assert len(log["user_attr"]) == 1 + trial = copy.copy(self._trials[trial_id]) + trial.user_attrs = {**copy.copy(trial.user_attrs), **log["user_attr"]} + self._trials[trial_id] = trial + + def _apply_set_trial_system_attr(self, log: dict[str, Any]) -> None: + trial_id = log["trial_id"] + + if self._trial_exists_and_updatable(trial_id, log): + assert len(log["system_attr"]) == 1 + trial = copy.copy(self._trials[trial_id]) + trial.system_attrs = { + **copy.copy(trial.system_attrs), + **log["system_attr"], + } + self._trials[trial_id] = trial + + def _trial_exists_and_updatable(self, trial_id: int, log: dict[str, Any]) -> bool: + if trial_id not in self._trials: + if self._is_issued_by_this_worker(log): + raise KeyError(NOT_FOUND_MSG) + return False + elif self._trials[trial_id].state.is_finished(): + if self._is_issued_by_this_worker(log): + raise UpdateFinishedTrialError( + "Trial#{} has already finished and can not be updated.".format( + self._trials[trial_id].number + ) + ) + return False + else: + return True diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b63b844d0c23b639ca0c87e46e86e6e83a8c1ff --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/__init__.py @@ -0,0 +1,24 @@ +from optuna._callbacks import MaxTrialsCallback +from optuna.study._study_direction import StudyDirection +from optuna.study._study_summary import StudySummary +from optuna.study.study import copy_study +from optuna.study.study import create_study +from optuna.study.study import delete_study +from optuna.study.study import get_all_study_names +from optuna.study.study import get_all_study_summaries +from optuna.study.study import load_study +from optuna.study.study import Study + + +__all__ = [ + "MaxTrialsCallback", + "StudyDirection", + "StudySummary", + "copy_study", + "create_study", + "delete_study", + "get_all_study_names", + "get_all_study_summaries", + "load_study", + "Study", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_constrained_optimization.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_constrained_optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..d289e13f55bb2b4459a082670a90211db18a3590 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_constrained_optimization.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from collections.abc import Sequence + +from optuna.trial import FrozenTrial + + +_CONSTRAINTS_KEY = "constraints" + + +def _get_feasible_trials(trials: Sequence[FrozenTrial]) -> list[FrozenTrial]: + """Return feasible trials from given trials. + + This function assumes that the trials were created in constrained optimization. + Therefore, if there is no violation value in the trial, it is considered infeasible. + + + Returns: + A list of feasible trials. + """ + + feasible_trials = [] + for trial in trials: + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraints is not None and all(x <= 0.0 for x in constraints): + feasible_trials.append(trial) + return feasible_trials diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_dataframe.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..e9f51858f9a0e0db96580af9f688417918314b7a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_dataframe.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import collections +from typing import Any + +import optuna +from optuna._imports import try_import +from optuna.trial._state import TrialState + + +with try_import() as _imports: + # `Study.trials_dataframe` is disabled if pandas is not available. + import pandas as pd + +# Required for type annotation in `Study.trials_dataframe`. +if not _imports.is_successful(): + pd = object # NOQA + +__all__ = ["pd"] + + +def _create_records_and_aggregate_column( + study: "optuna.Study", attrs: tuple[str, ...] +) -> tuple[list[dict[tuple[str, str], Any]], list[tuple[str, str]]]: + attrs_to_df_columns: dict[str, str] = {} + for attr in attrs: + if attr.startswith("_"): + # Python conventional underscores are omitted in the dataframe. + df_column = attr[1:] + else: + df_column = attr + attrs_to_df_columns[attr] = df_column + + # column_agg is an aggregator of column names. + # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'. + # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers'). + column_agg: collections.defaultdict[str, set] = collections.defaultdict(set) + non_nested_attr = "" + + metric_names = study.metric_names + + records = [] + for trial in study.get_trials(deepcopy=False): + record = {} + for attr, df_column in attrs_to_df_columns.items(): + value = getattr(trial, attr) + if isinstance(value, TrialState): + value = value.name + if isinstance(value, dict): + for nested_attr, nested_value in value.items(): + record[(df_column, nested_attr)] = nested_value + column_agg[attr].add((df_column, nested_attr)) + elif attr == "values": + # Expand trial.values. + # trial.values should be None when the trial's state is FAIL or PRUNED. + trial_values = [None] * len(study.directions) if value is None else value + iterator = ( + enumerate(trial_values) + if metric_names is None + else zip(metric_names, trial_values) + ) + for nested_attr, nested_value in iterator: + record[(df_column, nested_attr)] = nested_value + column_agg[attr].add((df_column, nested_attr)) + elif isinstance(value, list): + for nested_attr, nested_value in enumerate(value): + record[(df_column, nested_attr)] = nested_value + column_agg[attr].add((df_column, nested_attr)) + elif attr == "value": + nested_attr = non_nested_attr if metric_names is None else metric_names[0] + record[(df_column, nested_attr)] = value + column_agg[attr].add((df_column, nested_attr)) + else: + record[(df_column, non_nested_attr)] = value + column_agg[attr].add((df_column, non_nested_attr)) + + records.append(record) + + columns: list[tuple[str, str]] = sum( + (sorted(column_agg[k]) for k in attrs if k in column_agg), [] + ) + + return records, columns + + +def _flatten_columns(columns: list[tuple[str, str]]) -> list[str]: + # Flatten the `MultiIndex` columns where names are concatenated with underscores. + # Filtering is required to omit non-nested columns avoiding unwanted trailing underscores. + return ["_".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns] + + +def _trials_dataframe( + study: "optuna.Study", attrs: tuple[str, ...], multi_index: bool +) -> "pd.DataFrame": + _imports.check() + + # If no trials, return an empty dataframe. + if len(study.get_trials(deepcopy=False)) == 0: + return pd.DataFrame() + + if "value" in attrs and study._is_multi_objective(): + attrs = tuple("values" if attr == "value" else attr for attr in attrs) + + records, columns = _create_records_and_aggregate_column(study, attrs) + + df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns)) + + if not multi_index: + df.columns = _flatten_columns(columns) + + return df diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_frozen.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_frozen.py new file mode 100644 index 0000000000000000000000000000000000000000..d74c4529f6d58043b425ba8bdb8f88c49b908626 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_frozen.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any + +from optuna import logging +from optuna.study._study_direction import StudyDirection + + +_logger = logging.get_logger(__name__) + + +class FrozenStudy: + """Basic attributes of a :class:`~optuna.study.Study`. + + This class is private and not referenced by Optuna users. + + Attributes: + study_name: + Name of the :class:`~optuna.study.Study`. + direction: + :class:`~optuna.study.StudyDirection` of the :class:`~optuna.study.Study`. + + .. note:: + This attribute is only available during single-objective optimization. + directions: + A list of :class:`~optuna.study.StudyDirection` objects. + user_attrs: + Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with + :func:`optuna.study.Study.set_user_attr`. + system_attrs: + Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally + set by Optuna. + + """ + + def __init__( + self, + study_name: str, + direction: StudyDirection | None, + user_attrs: dict[str, Any], + system_attrs: dict[str, Any], + study_id: int, + *, + directions: Sequence[StudyDirection] | None = None, + ): + self.study_name = study_name + if direction is None and directions is None: + raise ValueError("Specify one of `direction` and `directions`.") + elif directions is not None: + self._directions = list(directions) + elif direction is not None: + self._directions = [direction] + else: + raise ValueError("Specify only one of `direction` and `directions`.") + self.user_attrs = user_attrs + self.system_attrs = system_attrs + self._study_id = study_id + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, FrozenStudy): + return NotImplemented + + return other.__dict__ == self.__dict__ + + def __lt__(self, other: Any) -> bool: + if not isinstance(other, FrozenStudy): + return NotImplemented + + return self._study_id < other._study_id + + def __le__(self, other: Any) -> bool: + if not isinstance(other, FrozenStudy): + return NotImplemented + + return self._study_id <= other._study_id + + @property + def direction(self) -> StudyDirection: + if len(self._directions) > 1: + raise RuntimeError( + "This attribute is not available during multi-objective optimization." + ) + + return self._directions[0] + + @property + def directions(self) -> list[StudyDirection]: + return self._directions diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_multi_objective.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_multi_objective.py new file mode 100644 index 0000000000000000000000000000000000000000..6604d631d571e831db2746786966647e2e1da291 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_multi_objective.py @@ -0,0 +1,258 @@ +from __future__ import annotations + +from collections.abc import Sequence + +import numpy as np + +import optuna +from optuna.study._constrained_optimization import _get_feasible_trials +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +def _get_pareto_front_trials_by_trials( + trials: Sequence[FrozenTrial], + directions: Sequence[StudyDirection], + consider_constraint: bool = False, +) -> list[FrozenTrial]: + # NOTE(nabenabe0928): Vectorization relies on all the trials being complete. + trials = [t for t in trials if t.state == TrialState.COMPLETE] + if consider_constraint: + trials = _get_feasible_trials(trials) + if len(trials) == 0: + return [] + + if any(len(t.values) != len(directions) for t in trials): + raise ValueError( + "The number of the values and the number of the objectives must be identical." + ) + + loss_values = np.asarray( + [[_normalize_value(v, d) for v, d in zip(t.values, directions)] for t in trials] + ) + on_front = _is_pareto_front(loss_values, assume_unique_lexsorted=False) + return [t for t, is_pareto in zip(trials, on_front) if is_pareto] + + +def _get_pareto_front_trials( + study: "optuna.study.Study", consider_constraint: bool = False +) -> list[FrozenTrial]: + return _get_pareto_front_trials_by_trials(study.trials, study.directions, consider_constraint) + + +def _fast_non_domination_rank( + loss_values: np.ndarray, *, penalty: np.ndarray | None = None, n_below: int | None = None +) -> np.ndarray: + """Calculate non-domination rank based on the fast non-dominated sort algorithm. + + The fast non-dominated sort algorithm assigns a rank to each trial based on the dominance + relationship of the trials, determined by the objective values and the penalty values. The + algorithm is based on `the constrained NSGA-II algorithm + `__, but the handling of the case when penalty + values are None is different. The algorithm assigns the rank according to the following + rules: + + 1. Feasible trials: First, the algorithm assigns the rank to feasible trials, whose penalty + values are less than or equal to 0, according to unconstrained version of fast non- + dominated sort. + 2. Infeasible trials: Next, the algorithm assigns the rank from the minimum penalty value of to + the maximum penalty value. + 3. Trials with no penalty information (constraints value is None): Finally, The algorithm + assigns the rank to trials with no penalty information according to unconstrained version + of fast non-dominated sort. Note that only this step is different from the original + constrained NSGA-II algorithm. + Plus, the algorithm terminates whenever the number of sorted trials reaches n_below. + + Args: + loss_values: + Objective values, which is better when it is lower, of each trials. + penalty: + Constraints values of each trials. Defaults to None. + n_below: The minimum number of top trials required to be sorted. The algorithm will + terminate when the number of sorted trials reaches n_below. Defaults to None. + + Returns: + An ndarray in the shape of (n_trials,), where each element is the non-domination rank of + each trial. The rank is 0-indexed. This function guarantees the correctness of the ranks + only up to the top-``n_below`` solutions. If a solution's rank is worse than the + top-``n_below`` solution, its rank will be guaranteed to be greater than the rank of + the top-``n_below`` solution. + """ + if len(loss_values) == 0: + return np.array([], dtype=int) + + n_below = n_below or len(loss_values) + assert n_below > 0, "n_below must be a positive integer." + + if penalty is None: + return _calculate_nondomination_rank(loss_values, n_below=n_below) + + if len(penalty) != len(loss_values): + raise ValueError( + "The length of penalty and loss_values must be same, but got " + f"len(penalty)={len(penalty)} and len(loss_values)={len(loss_values)}." + ) + + ranks = np.full(len(loss_values), -1, dtype=int) + is_penalty_nan = np.isnan(penalty) + is_feasible = np.logical_and(~is_penalty_nan, penalty <= 0) + is_infeasible = np.logical_and(~is_penalty_nan, penalty > 0) + + # First, we calculate the domination rank for feasible trials. + ranks[is_feasible] = _calculate_nondomination_rank(loss_values[is_feasible], n_below=n_below) + n_below -= int(np.count_nonzero(is_feasible)) + + # Second, we calculate the domination rank for infeasible trials. + top_rank_infeasible = np.max(ranks[is_feasible], initial=-1) + 1 + ranks[is_infeasible] = top_rank_infeasible + _calculate_nondomination_rank( + penalty[is_infeasible][:, np.newaxis], n_below=n_below + ) + n_below -= int(np.count_nonzero(is_infeasible)) + + # Third, we calculate the domination rank for trials with no penalty information. + top_rank_penalty_nan = np.max(ranks[~is_penalty_nan], initial=-1) + 1 + ranks[is_penalty_nan] = top_rank_penalty_nan + _calculate_nondomination_rank( + loss_values[is_penalty_nan], n_below=n_below + ) + assert np.all(ranks != -1), "All the rank must be updated." + return ranks + + +def _is_pareto_front_nd(unique_lexsorted_loss_values: np.ndarray) -> np.ndarray: + # NOTE(nabenabe0928): I tried the Kung's algorithm below, but it was not really quick. + # https://github.com/optuna/optuna/pull/5302#issuecomment-1988665532 + # As unique_lexsorted_loss_values[:, 0] is sorted, we do not need it to judge dominance. + loss_values = unique_lexsorted_loss_values[:, 1:] + n_trials = loss_values.shape[0] + on_front = np.zeros(n_trials, dtype=bool) + # TODO(nabenabe): Replace with the following once Python 3.8 is dropped. + # nondominated_indices: np.ndarray[tuple[int], np.dtype[np.signedinteger]] = ... + nondominated_indices: np.ndarray[tuple[int, ...], np.dtype[np.signedinteger]] = np.arange( + n_trials + ) + while len(loss_values): + # The following judges `np.any(loss_values[i] < loss_values[0])` for each `i`. + nondominated_and_not_top = np.any(loss_values < loss_values[0], axis=1) + # NOTE: trials[j] cannot dominate trials[i] for i < j because of lexsort. + # Therefore, nondominated_indices[0] is always non-dominated. + on_front[nondominated_indices[0]] = True + loss_values = loss_values[nondominated_and_not_top] + # TODO(nabenabe): Replace with the following once Python 3.8 is dropped. + # ... = cast(np.ndarray[tuple[int], np.dtype[np.signedinteger]], ...) + nondominated_indices = nondominated_indices[nondominated_and_not_top] + + return on_front + + +def _is_pareto_front_2d(unique_lexsorted_loss_values: np.ndarray) -> np.ndarray: + n_trials = unique_lexsorted_loss_values.shape[0] + cummin_value1 = np.minimum.accumulate(unique_lexsorted_loss_values[:, 1]) + on_front = np.ones(n_trials, dtype=bool) + on_front[1:] = cummin_value1[1:] < cummin_value1[:-1] # True if cummin value1 is new minimum. + return on_front + + +def _is_pareto_front_for_unique_sorted(unique_lexsorted_loss_values: np.ndarray) -> np.ndarray: + (n_trials, n_objectives) = unique_lexsorted_loss_values.shape + if n_objectives == 1: + on_front = np.zeros(len(unique_lexsorted_loss_values), dtype=bool) + on_front[0] = True # Only the first element is Pareto optimal. + return on_front + elif n_objectives == 2: + return _is_pareto_front_2d(unique_lexsorted_loss_values) + else: + return _is_pareto_front_nd(unique_lexsorted_loss_values) + + +def _is_pareto_front(loss_values: np.ndarray, assume_unique_lexsorted: bool) -> np.ndarray: + # NOTE(nabenabe): If assume_unique_lexsorted=True, but loss_values is not a unique array, + # Duplicated Pareto solutions will be filtered out except for the earliest occurrences. + # If assume_unique_lexsorted=True and loss_values[:, 0] is not sorted, then the result will be + # incorrect. + if assume_unique_lexsorted: + return _is_pareto_front_for_unique_sorted(loss_values) + + unique_lexsorted_loss_values, order_inv = np.unique(loss_values, axis=0, return_inverse=True) + on_front = _is_pareto_front_for_unique_sorted(unique_lexsorted_loss_values) + # NOTE(nabenabe): We can remove `.reshape(-1)` if ``numpy==2.0.0`` is not used. + # https://github.com/numpy/numpy/issues/26738 + # TODO: Remove `.reshape(-1)` once `numpy==2.0.0` is obsolete. + return on_front[order_inv.reshape(-1)] + + +def _calculate_nondomination_rank( + loss_values: np.ndarray, *, n_below: int | None = None +) -> np.ndarray: + if len(loss_values) == 0 or (n_below is not None and n_below <= 0): + return np.zeros(len(loss_values), dtype=int) + + (n_trials, n_objectives) = loss_values.shape + if n_objectives == 1: + _, ranks = np.unique(loss_values[:, 0], return_inverse=True) + return ranks + + # It ensures that trials[j] will not dominate trials[i] for i < j. + # np.unique does lexsort. + unique_lexsorted_loss_values, order_inv = np.unique(loss_values, return_inverse=True, axis=0) + n_unique = unique_lexsorted_loss_values.shape[0] + # Clip n_below. + n_below = min(n_below or len(unique_lexsorted_loss_values), len(unique_lexsorted_loss_values)) + ranks = np.zeros(n_unique, dtype=int) + rank = 0 + indices = np.arange(n_unique) + while n_unique - indices.size < n_below: + on_front = _is_pareto_front(unique_lexsorted_loss_values, assume_unique_lexsorted=True) + ranks[indices[on_front]] = rank + # Remove the recent Pareto solutions. + indices = indices[~on_front] + unique_lexsorted_loss_values = unique_lexsorted_loss_values[~on_front] + rank += 1 + + ranks[indices] = rank # Rank worse than the top n_below is defined as the worst rank. + # NOTE(nabenabe): We can remove `.reshape(-1)` if ``numpy==2.0.0`` is not used. + # https://github.com/numpy/numpy/issues/26738 + # TODO: Remove `.reshape(-1)` once `numpy==2.0.0` is obsolete. + return ranks[order_inv.reshape(-1)] + + +def _dominates( + trial0: FrozenTrial, trial1: FrozenTrial, directions: Sequence[StudyDirection] +) -> bool: + values0 = trial0.values + values1 = trial1.values + + if trial0.state != TrialState.COMPLETE: + return False + + if trial1.state != TrialState.COMPLETE: + return True + + assert values0 is not None + assert values1 is not None + + if len(values0) != len(values1): + raise ValueError("Trials with different numbers of objectives cannot be compared.") + + if len(values0) != len(directions): + raise ValueError( + "The number of the values and the number of the objectives are mismatched." + ) + + normalized_values0 = [_normalize_value(v, d) for v, d in zip(values0, directions)] + normalized_values1 = [_normalize_value(v, d) for v, d in zip(values1, directions)] + + if normalized_values0 == normalized_values1: + return False + + return all(v0 <= v1 for v0, v1 in zip(normalized_values0, normalized_values1)) + + +def _normalize_value(value: float | None, direction: StudyDirection) -> float: + if value is None: + return float("inf") + + if direction is StudyDirection.MAXIMIZE: + value = -value + + return value diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_optimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..d6de2c7418092ce60bc2cc580d8e3aa37f64ed30 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_optimize.py @@ -0,0 +1,270 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Sequence +from concurrent.futures import FIRST_COMPLETED +from concurrent.futures import Future +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import wait +import copy +import datetime +import gc +import itertools +import os +import sys +from typing import Any +import warnings + +import optuna +from optuna import exceptions +from optuna import logging +from optuna import progress_bar as pbar_module +from optuna import trial as trial_module +from optuna.exceptions import ExperimentalWarning +from optuna.storages._heartbeat import get_heartbeat_thread +from optuna.storages._heartbeat import is_heartbeat_enabled +from optuna.study._tell import _tell_with_warning +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +_logger = logging.get_logger(__name__) + + +def _optimize( + study: "optuna.Study", + func: "optuna.study.study.ObjectiveFuncType", + n_trials: int | None = None, + timeout: float | None = None, + n_jobs: int = 1, + catch: tuple[type[Exception], ...] = (), + callbacks: Iterable[Callable[["optuna.Study", FrozenTrial], None]] | None = None, + gc_after_trial: bool = False, + show_progress_bar: bool = False, +) -> None: + if not isinstance(catch, tuple): + raise TypeError( + "The catch argument is of type '{}' but must be a tuple.".format(type(catch).__name__) + ) + + if study._thread_local.in_optimize_loop: + raise RuntimeError("Nested invocation of `Study.optimize` method isn't allowed.") + + if show_progress_bar and n_trials is None and timeout is not None and n_jobs != 1: + warnings.warn("The timeout-based progress bar is not supported with n_jobs != 1.") + show_progress_bar = False + + progress_bar = pbar_module._ProgressBar(show_progress_bar, n_trials, timeout) + + study._stop_flag = False + + try: + if n_jobs == 1: + _optimize_sequential( + study, + func, + n_trials, + timeout, + catch, + callbacks, + gc_after_trial, + reseed_sampler_rng=False, + time_start=None, + progress_bar=progress_bar, + ) + else: + if n_jobs == -1: + n_jobs = os.cpu_count() or 1 + + time_start = datetime.datetime.now() + futures: set[Future] = set() + + with ThreadPoolExecutor(max_workers=n_jobs) as executor: + for n_submitted_trials in itertools.count(): + if study._stop_flag: + break + + if ( + timeout is not None + and (datetime.datetime.now() - time_start).total_seconds() > timeout + ): + break + + if n_trials is not None and n_submitted_trials >= n_trials: + break + + if len(futures) >= n_jobs: + completed, futures = wait(futures, return_when=FIRST_COMPLETED) + # Raise if exception occurred in executing the completed futures. + for f in completed: + f.result() + + futures.add( + executor.submit( + _optimize_sequential, + study, + func, + 1, + timeout, + catch, + callbacks, + gc_after_trial, + True, + time_start, + progress_bar, + ) + ) + finally: + study._thread_local.in_optimize_loop = False + progress_bar.close() + + +def _optimize_sequential( + study: "optuna.Study", + func: "optuna.study.study.ObjectiveFuncType", + n_trials: int | None, + timeout: float | None, + catch: tuple[type[Exception], ...], + callbacks: Iterable[Callable[["optuna.Study", FrozenTrial], None]] | None, + gc_after_trial: bool, + reseed_sampler_rng: bool, + time_start: datetime.datetime | None, + progress_bar: pbar_module._ProgressBar | None, +) -> None: + # Here we set `in_optimize_loop = True`, not at the beginning of the `_optimize()` function. + # Because it is a thread-local object and `n_jobs` option spawns new threads. + study._thread_local.in_optimize_loop = True + if reseed_sampler_rng: + study.sampler.reseed_rng() + + i_trial = 0 + + if time_start is None: + time_start = datetime.datetime.now() + + while True: + if study._stop_flag: + break + + if n_trials is not None: + if i_trial >= n_trials: + break + i_trial += 1 + + if timeout is not None: + elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds() + if elapsed_seconds >= timeout: + break + + try: + frozen_trial = _run_trial(study, func, catch) + finally: + # The following line mitigates memory problems that can be occurred in some + # environments (e.g., services that use computing containers such as GitHub Actions). + # Please refer to the following PR for further details: + # https://github.com/optuna/optuna/pull/325. + if gc_after_trial: + gc.collect() + + if callbacks is not None: + for callback in callbacks: + callback(study, copy.deepcopy(frozen_trial)) + + if progress_bar is not None: + elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds() + progress_bar.update(elapsed_seconds, study) + + study._storage.remove_session() + + +def _run_trial( + study: "optuna.Study", + func: "optuna.study.study.ObjectiveFuncType", + catch: tuple[type[Exception], ...], +) -> trial_module.FrozenTrial: + if is_heartbeat_enabled(study._storage): + with warnings.catch_warnings(): + # Ignore ExperimentalWarning when using fail_stale_trials internally. + warnings.simplefilter("ignore", ExperimentalWarning) + optuna.storages.fail_stale_trials(study) + + trial = study.ask() + + state: TrialState | None = None + value_or_values: float | Sequence[float] | None = None + func_err: Exception | KeyboardInterrupt | None = None + func_err_fail_exc_info: Any | None = None + + with get_heartbeat_thread(trial._trial_id, study._storage): + try: + value_or_values = func(trial) + except exceptions.TrialPruned as e: + # TODO(mamu): Handle multi-objective cases. + state = TrialState.PRUNED + func_err = e + except (Exception, KeyboardInterrupt) as e: + state = TrialState.FAIL + func_err = e + func_err_fail_exc_info = sys.exc_info() + + # `_tell_with_warning` may raise during trial post-processing. + try: + frozen_trial, warning_message = _tell_with_warning( + study=study, + trial=trial, + value_or_values=value_or_values, + state=state, + suppress_warning=True, + ) + except Exception: + frozen_trial = study._storage.get_trial(trial._trial_id) + warning_message = None + raise + finally: + if frozen_trial.state == TrialState.COMPLETE: + study._log_completed_trial(frozen_trial) + elif frozen_trial.state == TrialState.PRUNED: + _logger.info("Trial {} pruned. {}".format(frozen_trial.number, str(func_err))) + elif frozen_trial.state == TrialState.FAIL: + if func_err is not None: + _log_failed_trial( + frozen_trial, + repr(func_err), + exc_info=func_err_fail_exc_info, + value_or_values=value_or_values, + ) + elif warning_message is not None: + _log_failed_trial( + frozen_trial, + warning_message, + value_or_values=value_or_values, + ) + else: + assert False, "Should not reach." + else: + assert False, "Should not reach." + + if ( + frozen_trial.state == TrialState.FAIL + and func_err is not None + and not isinstance(func_err, catch) + ): + raise func_err + return frozen_trial + + +def _log_failed_trial( + trial: FrozenTrial, + message: str | Warning, + exc_info: Any = None, + value_or_values: Any = None, +) -> None: + _logger.warning( + "Trial {} failed with parameters: {} because of the following error: {}.".format( + trial.number, trial.params, message + ), + exc_info=exc_info, + ) + + _logger.warning("Trial {} failed with value {}.".format(trial.number, repr(value_or_values))) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_direction.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_direction.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2d911953f6711c692f37017948d370200feb22 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_direction.py @@ -0,0 +1,18 @@ +import enum + + +class StudyDirection(enum.IntEnum): + """Direction of a :class:`~optuna.study.Study`. + + Attributes: + NOT_SET: + Direction has not been set. + MINIMIZE: + :class:`~optuna.study.Study` minimizes the objective function. + MAXIMIZE: + :class:`~optuna.study.Study` maximizes the objective function. + """ + + NOT_SET = 0 + MINIMIZE = 1 + MAXIMIZE = 2 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_summary.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..ad2a7af2a42e8a33d34c706c5a0f28e116cf4597 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_study_summary.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +from collections.abc import Sequence +import datetime +from typing import Any +import warnings + +from optuna import logging +from optuna import trial +from optuna.study._study_direction import StudyDirection + + +_logger = logging.get_logger(__name__) + + +class StudySummary: + """Basic attributes and aggregated results of a :class:`~optuna.study.Study`. + + See also :func:`optuna.study.get_all_study_summaries`. + + Attributes: + study_name: + Name of the :class:`~optuna.study.Study`. + direction: + :class:`~optuna.study.StudyDirection` of the :class:`~optuna.study.Study`. + + .. note:: + This attribute is only available during single-objective optimization. + directions: + A sequence of :class:`~optuna.study.StudyDirection` objects. + best_trial: + :class:`optuna.trial.FrozenTrial` with best objective value in the + :class:`~optuna.study.Study`. + user_attrs: + Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with + :func:`optuna.study.Study.set_user_attr`. + system_attrs: + Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally + set by Optuna. + + .. warning:: + Deprecated in v3.1.0. ``system_attrs`` argument will be removed in the future. + The removal of this feature is currently scheduled for v5.0.0, + but this schedule is subject to change. + See https://github.com/optuna/optuna/releases/tag/v3.1.0. + n_trials: + The number of trials ran in the :class:`~optuna.study.Study`. + datetime_start: + Datetime where the :class:`~optuna.study.Study` started. + + """ + + def __init__( + self, + study_name: str, + direction: StudyDirection | None, + best_trial: trial.FrozenTrial | None, + user_attrs: dict[str, Any], + system_attrs: dict[str, Any], + n_trials: int, + datetime_start: datetime.datetime | None, + study_id: int, + *, + directions: Sequence[StudyDirection] | None = None, + ): + self.study_name = study_name + if direction is None and directions is None: + raise ValueError("Specify one of `direction` and `directions`.") + elif directions is not None: + self._directions = list(directions) + elif direction is not None: + self._directions = [direction] + else: + raise ValueError("Specify only one of `direction` and `directions`.") + self.best_trial = best_trial + self.user_attrs = user_attrs + self._system_attrs = system_attrs + self.n_trials = n_trials + self.datetime_start = datetime_start + self._study_id = study_id + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, StudySummary): + return NotImplemented + + return other.__dict__ == self.__dict__ + + def __lt__(self, other: Any) -> bool: + if not isinstance(other, StudySummary): + return NotImplemented + + return self._study_id < other._study_id + + def __le__(self, other: Any) -> bool: + if not isinstance(other, StudySummary): + return NotImplemented + + return self._study_id <= other._study_id + + @property + def direction(self) -> StudyDirection: + if len(self._directions) > 1: + raise RuntimeError( + "This attribute is not available during multi-objective optimization." + ) + + return self._directions[0] + + @property + def directions(self) -> Sequence[StudyDirection]: + return self._directions + + @property + def system_attrs(self) -> dict[str, Any]: + warnings.warn( + "`system_attrs` has been deprecated in v3.1.0. " + "The removal of this feature is currently scheduled for v5.0.0, " + "but this schedule is subject to change. " + "See https://github.com/optuna/optuna/releases/tag/v3.1.0.", + FutureWarning, + ) + + return self._system_attrs diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_tell.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_tell.py new file mode 100644 index 0000000000000000000000000000000000000000..9335ea4ed9ed889f443e90a746a5294f752ae91c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/_tell.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +from collections.abc import Sequence +import math +from typing import TYPE_CHECKING +import warnings + +import optuna +from optuna import logging +from optuna import pruners +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + from optuna import Study + from optuna import Trial + + +_logger = logging.get_logger(__name__) + + +def _get_frozen_trial(study: Study, trial: Trial | int) -> FrozenTrial: + if isinstance(trial, optuna.Trial): + trial_id = trial._trial_id + elif isinstance(trial, int): + trial_number = trial + try: + trial_id = study._storage.get_trial_id_from_study_id_trial_number( + study._study_id, trial_number + ) + except KeyError as e: + raise ValueError( + f"Cannot tell for trial with number {trial_number} since it has not been " + "created." + ) from e + else: + raise TypeError("Trial must be a trial object or trial number.") + + return study._storage.get_trial(trial_id) + + +def _check_state_and_values( + state: TrialState | None, values: float | Sequence[float] | None +) -> None: + if state == TrialState.COMPLETE: + if values is None: + raise ValueError( + "No values were told. Values are required when state is TrialState.COMPLETE." + ) + elif state in (TrialState.PRUNED, TrialState.FAIL): + if values is not None: + raise ValueError( + "Values were told. Values cannot be specified when state is " + "TrialState.PRUNED or TrialState.FAIL." + ) + elif state is not None: + raise ValueError(f"Cannot tell with state {state}.") + + +def _check_values_are_feasible(study: Study, values: Sequence[float]) -> str | None: + for v in values: + # TODO(Imamura): Construct error message taking into account all values and do not early + # return `value` is assumed to be ignored on failure so we can set it to any value. + try: + float(v) + except (ValueError, TypeError): + return f"The value {repr(v)} could not be cast to float" + + if math.isnan(v): + return f"The value {v} is not acceptable" + + if len(study.directions) != len(values): + return ( + f"The number of the values {len(values)} did not match the number of the objectives " + f"{len(study.directions)}" + ) + + return None + + +def _tell_with_warning( + study: Study, + trial: Trial | int, + value_or_values: float | Sequence[float] | None = None, + state: TrialState | None = None, + skip_if_finished: bool = False, + suppress_warning: bool = False, +) -> tuple[FrozenTrial, str | None]: + """Internal method of :func:`~optuna.study.Study.tell`. + + Refer to the document for :func:`~optuna.study.Study.tell` for the reference. + This method has one additional parameter ``suppress_warning``. + + Args: + suppress_warning: + If :obj:`True`, tell will not show warnings when tell receives an invalid + values. This flag is expected to be :obj:`True` only when it is invoked by + Study.optimize. + """ + + # We must invalidate all trials cache here as it is only valid within a trial. + study._thread_local.cached_all_trials = None + + # Validate the trial argument. + frozen_trial = _get_frozen_trial(study, trial) + if frozen_trial.state.is_finished() and skip_if_finished: + _logger.info( + f"Skipped telling trial {frozen_trial.number} with values " + f"{value_or_values} and state {state} since trial was already finished. " + f"Finished trial has values {frozen_trial.values} and state {frozen_trial.state}." + ) + return frozen_trial, None + elif frozen_trial.state != TrialState.RUNNING: + raise ValueError(f"Cannot tell a {frozen_trial.state.name} trial.") + + # Validate the state and values arguments. + values: Sequence[float] | None + if value_or_values is None: + values = None + elif isinstance(value_or_values, Sequence): + values = value_or_values + else: + values = [value_or_values] + + _check_state_and_values(state, values) + + values_conversion_failure_message = None + + if state == TrialState.COMPLETE: + assert values is not None + + values_conversion_failure_message = _check_values_are_feasible(study, values) + if values_conversion_failure_message is not None: + raise ValueError(values_conversion_failure_message) + elif state == TrialState.PRUNED: + # Register the last intermediate value if present as the value of the trial. + # TODO(hvy): Whether a pruned trials should have an actual value can be discussed. + assert values is None + + last_step = frozen_trial.last_step + if last_step is not None: + last_intermediate_value = frozen_trial.intermediate_values[last_step] + # intermediate_values can be unacceptable value, i.e., NaN. + if _check_values_are_feasible(study, [last_intermediate_value]) is None: + values = [last_intermediate_value] + elif state is None: + if values is None: + values_conversion_failure_message = "The value None could not be cast to float." + else: + values_conversion_failure_message = _check_values_are_feasible(study, values) + + if values_conversion_failure_message is None: + state = TrialState.COMPLETE + else: + state = TrialState.FAIL + values = None + if not suppress_warning: + warnings.warn(values_conversion_failure_message) + values_conversion_failure_message = None + + assert state is not None + + # Cast values to list of floats. + if values is not None: + # values have been checked to be castable to floats in _check_values_are_feasible. + values = [float(value) for value in values] + + # Post-processing and storing the trial. + try: + # Sampler defined trial post-processing. + study = pruners._filter_study(study, frozen_trial) + study.sampler.after_trial(study, frozen_trial, state, values) + finally: + study._storage.set_trial_state_values(frozen_trial._trial_id, state, values) + + frozen_trial = study._storage.get_trial(frozen_trial._trial_id) + + return frozen_trial, values_conversion_failure_message diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/study.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/study.py new file mode 100644 index 0000000000000000000000000000000000000000..1857261bc419000332913b33d2230e44fc7df7fe --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/study/study.py @@ -0,0 +1,1726 @@ +from __future__ import annotations + +from collections.abc import Container +from collections.abc import Iterable +from collections.abc import Mapping +import copy +from numbers import Real +import threading +from typing import Any +from typing import Callable +from typing import cast +from typing import Sequence +from typing import TYPE_CHECKING +from typing import Union +import warnings + +import numpy as np + +import optuna +from optuna import exceptions +from optuna import logging +from optuna import pruners +from optuna import samplers +from optuna import storages +from optuna._convert_positional_args import convert_positional_args +from optuna._deprecated import deprecated_func +from optuna._experimental import experimental_func +from optuna._imports import _LazyImport +from optuna._typing import JSONSerializable +from optuna.distributions import _convert_old_distribution_to_new_distribution +from optuna.distributions import BaseDistribution +from optuna.storages._heartbeat import is_heartbeat_enabled +from optuna.study._constrained_optimization import _CONSTRAINTS_KEY +from optuna.study._constrained_optimization import _get_feasible_trials +from optuna.study._multi_objective import _get_pareto_front_trials +from optuna.study._optimize import _optimize +from optuna.study._study_direction import StudyDirection +from optuna.study._study_summary import StudySummary # NOQA +from optuna.study._tell import _tell_with_warning +from optuna.trial import create_trial +from optuna.trial import TrialState + + +_dataframe = _LazyImport("optuna.study._dataframe") + +if TYPE_CHECKING: + from optuna.study._dataframe import pd + from optuna.trial import FrozenTrial + from optuna.trial import Trial + + +ObjectiveFuncType = Callable[["Trial"], Union[float, Sequence[float]]] + + +_SYSTEM_ATTR_METRIC_NAMES = "study:metric_names" + + +_logger = logging.get_logger(__name__) + + +class _ThreadLocalStudyAttribute(threading.local): + in_optimize_loop: bool = False + cached_all_trials: list[FrozenTrial] | None = None + + +class Study: + """A study corresponds to an optimization task, i.e., a set of trials. + + This object provides interfaces to run a new :class:`~optuna.trial.Trial`, access trials' + history, set/get user-defined attributes of the study itself. + + Note that the direct use of this constructor is not recommended. + To create and load a study, please refer to the documentation of + :func:`~optuna.study.create_study` and :func:`~optuna.study.load_study` respectively. + + """ + + def __init__( + self, + study_name: str, + storage: str | storages.BaseStorage, + sampler: "samplers.BaseSampler" | None = None, + pruner: pruners.BasePruner | None = None, + ) -> None: + self.study_name = study_name + storage = storages.get_storage(storage) + study_id = storage.get_study_id_from_name(study_name) + self._study_id = study_id + self._storage = storage + self._directions = storage.get_study_directions(study_id) + + self.sampler = sampler or samplers.TPESampler() + self.pruner = pruner or pruners.MedianPruner() + + self._thread_local = _ThreadLocalStudyAttribute() + self._stop_flag = False + + def __getstate__(self) -> dict[Any, Any]: + state = self.__dict__.copy() + del state["_thread_local"] + return state + + def __setstate__(self, state: dict[Any, Any]) -> None: + self.__dict__.update(state) + self._thread_local = _ThreadLocalStudyAttribute() + + @property + def best_params(self) -> dict[str, Any]: + """Return parameters of the best trial in the study. + + .. note:: + This feature can only be used for single-objective optimization. + + Returns: + A dictionary containing parameters of the best trial. + + """ + + return self.best_trial.params + + @property + def best_value(self) -> float: + """Return the best objective value in the study. + + .. note:: + This feature can only be used for single-objective optimization. + + Returns: + A float representing the best objective value. + + """ + + best_value = self.best_trial.value + assert best_value is not None + + return best_value + + @property + def best_trial(self) -> FrozenTrial: + """Return the best trial in the study. + + .. note:: + This feature can only be used for single-objective optimization. + If your study is multi-objective, + use :attr:`~optuna.study.Study.best_trials` instead. + + Returns: + A :class:`~optuna.trial.FrozenTrial` object of the best trial. + + .. seealso:: + The :ref:`reuse_best_trial` tutorial provides a detailed example of how to use this + method. + + """ + return self._get_best_trial(deepcopy=True) + + @property + def best_trials(self) -> list[FrozenTrial]: + """Return trials located at the Pareto front in the study. + + A trial is located at the Pareto front if there are no trials that dominate the trial. + It's called that a trial ``t0`` dominates another trial ``t1`` if + ``all(v0 <= v1) for v0, v1 in zip(t0.values, t1.values)`` and + ``any(v0 < v1) for v0, v1 in zip(t0.values, t1.values)`` are held. + + Returns: + A list of :class:`~optuna.trial.FrozenTrial` objects. + """ + + # Check whether the study is constrained optimization. + trials = self.get_trials(deepcopy=False) + is_constrained = any((_CONSTRAINTS_KEY in trial.system_attrs) for trial in trials) + + return _get_pareto_front_trials(self, consider_constraint=is_constrained) + + @property + def direction(self) -> StudyDirection: + """Return the direction of the study. + + .. note:: + This feature can only be used for single-objective optimization. + If your study is multi-objective, + use :attr:`~optuna.study.Study.directions` instead. + + Returns: + A :class:`~optuna.study.StudyDirection` object. + + """ + + if self._is_multi_objective(): + raise RuntimeError( + "A single direction cannot be retrieved from a multi-objective study. Consider " + "using Study.directions to retrieve a list containing all directions." + ) + + return self.directions[0] + + @property + def directions(self) -> list[StudyDirection]: + """Return the directions of the study. + + Returns: + A list of :class:`~optuna.study.StudyDirection` objects. + """ + + return self._directions + + @property + def trials(self) -> list[FrozenTrial]: + """Return all trials in the study. + + The returned trials are ordered by trial number. + + This is a short form of ``self.get_trials(deepcopy=True, states=None)``. + + Returns: + A list of :class:`~optuna.trial.FrozenTrial` objects. + + .. seealso:: + See :func:`~optuna.study.Study.get_trials` for related method. + + """ + + return self.get_trials(deepcopy=True, states=None) + + def get_trials( + self, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + ) -> list[FrozenTrial]: + """Return all trials in the study. + + The returned trials are ordered by trial number. + + .. seealso:: + See :attr:`~optuna.study.Study.trials` for related property. + + Example: + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + trials = study.get_trials() + assert len(trials) == 3 + Args: + deepcopy: + Flag to control whether to apply ``copy.deepcopy()`` to the trials. + Note that if you set the flag to :obj:`False`, you shouldn't mutate + any fields of the returned trial. Otherwise the internal state of + the study may corrupt and unexpected behavior may happen. + states: + Trial states to filter on. If :obj:`None`, include all states. + + Returns: + A list of :class:`~optuna.trial.FrozenTrial` objects. + """ + return self._get_trials(deepcopy, states, use_cache=False) + + def _get_trials( + self, + deepcopy: bool = True, + states: Container[TrialState] | None = None, + use_cache: bool = False, + ) -> list[FrozenTrial]: + if use_cache: + if self._thread_local.cached_all_trials is None: + self._thread_local.cached_all_trials = self._storage.get_all_trials( + self._study_id, deepcopy=False + ) + trials = self._thread_local.cached_all_trials + if states is not None: + filtered_trials = [t for t in trials if t.state in states] + else: + filtered_trials = trials + return copy.deepcopy(filtered_trials) if deepcopy else filtered_trials + + return self._storage.get_all_trials(self._study_id, deepcopy=deepcopy, states=states) + + def _get_best_trial(self, deepcopy: bool) -> FrozenTrial: + """Return the best trial in the study. + + Args: + deepcopy: + Flag to control whether to apply ``copy.deepcopy()`` to the trial. + If :obj:`False`, returns the trial without deep copying for better performance. + Note that if you set this to :obj:`False`, you shouldn't mutate any fields + of the returned trial. + + Returns: + A :class:`~optuna.trial.FrozenTrial` object of the best trial. + """ + if self._is_multi_objective(): + raise RuntimeError( + "A single best trial cannot be retrieved from a multi-objective study. Consider " + "using Study.best_trials to retrieve a list containing the best trials." + ) + + best_trial = self._storage.get_best_trial(self._study_id) + + # If the trial with the best value is infeasible, select the best trial from all feasible + # trials. Note that the behavior is undefined when constrained optimization without the + # violation value in the best-valued trial. + constraints = best_trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraints is not None and any([x > 0.0 for x in constraints]): + complete_trials = self.get_trials(deepcopy=False, states=[TrialState.COMPLETE]) + feasible_trials = _get_feasible_trials(complete_trials) + if len(feasible_trials) == 0: + raise ValueError("No feasible trials are completed yet.") + if self.direction == StudyDirection.MAXIMIZE: + best_trial = max(feasible_trials, key=lambda t: cast(float, t.value)) + else: + best_trial = min(feasible_trials, key=lambda t: cast(float, t.value)) + + return copy.deepcopy(best_trial) if deepcopy else best_trial + + @property + def user_attrs(self) -> dict[str, Any]: + """Return user attributes. + + .. seealso:: + + See :func:`~optuna.study.Study.set_user_attr` for related method. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 1) + y = trial.suggest_float("y", 0, 1) + return x**2 + y**2 + + + study = optuna.create_study() + + study.set_user_attr("objective function", "quadratic function") + study.set_user_attr("dimensions", 2) + study.set_user_attr("contributors", ["Akiba", "Sano"]) + + assert study.user_attrs == { + "objective function": "quadratic function", + "dimensions": 2, + "contributors": ["Akiba", "Sano"], + } + + Returns: + A dictionary containing all user attributes. + """ + + return copy.deepcopy(self._storage.get_study_user_attrs(self._study_id)) + + @property + @deprecated_func("3.1.0", "5.0.0") + def system_attrs(self) -> dict[str, Any]: + """Return system attributes. + + Returns: + A dictionary containing all system attributes. + """ + + return copy.deepcopy(self._storage.get_study_system_attrs(self._study_id)) + + @property + def metric_names(self) -> list[str] | None: + """Return metric names. + + .. note:: + Use :meth:`~optuna.study.Study.set_metric_names` to set the metric names first. + + Returns: + A list with names for each dimension of the returned values of the objective function. + """ + return self._storage.get_study_system_attrs(self._study_id).get(_SYSTEM_ATTR_METRIC_NAMES) + + def optimize( + self, + func: ObjectiveFuncType, + n_trials: int | None = None, + timeout: float | None = None, + n_jobs: int = 1, + catch: Iterable[type[Exception]] | type[Exception] = (), + callbacks: Iterable[Callable[[Study, FrozenTrial], None]] | None = None, + gc_after_trial: bool = False, + show_progress_bar: bool = False, + ) -> None: + """Optimize an objective function. + + Optimization is done by choosing a suitable set of hyperparameter values from a given + range. Uses a sampler which implements the task of value suggestion based on a specified + distribution. The sampler is specified in :func:`~optuna.study.create_study` and the + default choice for the sampler is TPE. + See also :class:`~optuna.samplers.TPESampler` for more details on 'TPE'. + + Optimization will be stopped when receiving a termination signal such as SIGINT and + SIGTERM. Unlike other signals, a trial is automatically and cleanly failed when receiving + SIGINT (Ctrl+C). If ``n_jobs`` is greater than one or if another signal than SIGINT + is used, the interrupted trial state won't be properly updated. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + Args: + func: + A callable that implements objective function. + n_trials: + The number of trials for each process. :obj:`None` represents no limit in terms of + the number of trials. The study continues to create trials until the number of + trials reaches ``n_trials``, ``timeout`` period elapses, + :func:`~optuna.study.Study.stop` is called, or a termination signal such as + SIGTERM or Ctrl+C is received. + + .. seealso:: + :class:`optuna.study.MaxTrialsCallback` can ensure how many times trials + will be performed across all processes. + timeout: + Stop study after the given number of second(s). :obj:`None` represents no limit in + terms of elapsed time. The study continues to create trials until the number of + trials reaches ``n_trials``, ``timeout`` period elapses, + :func:`~optuna.study.Study.stop` is called or, a termination signal such as + SIGTERM or Ctrl+C is received. + n_jobs: + The number of parallel jobs. If this argument is set to ``-1``, the number is + set to CPU count. + + .. note:: + ``n_jobs`` allows parallelization using :obj:`threading` and may suffer from + `Python's GIL `__. + It is recommended to use :ref:`process-based parallelization` + if ``func`` is CPU bound. + + catch: + A study continues to run even when a trial raises one of the exceptions specified + in this argument. Default is an empty tuple, i.e. the study will stop for any + exception except for :class:`~optuna.exceptions.TrialPruned`. + callbacks: + List of callback functions that are invoked at the end of each trial. Each function + must accept two parameters with the following types in this order: + :class:`~optuna.study.Study` and :class:`~optuna.trial.FrozenTrial`. + + .. seealso:: + + See the tutorial of :ref:`optuna_callback` for how to use and implement + callback functions. + + gc_after_trial: + Flag to determine whether to automatically run garbage collection after each trial. + Set to :obj:`True` to run the garbage collection, :obj:`False` otherwise. + When it runs, it runs a full collection by internally calling :func:`gc.collect`. + If you see an increase in memory consumption over several trials, try setting this + flag to :obj:`True`. + + .. seealso:: + + :ref:`out-of-memory-gc-collect` + + show_progress_bar: + Flag to show progress bars or not. To show progress bar, set this :obj:`True`. + Note that it is disabled when ``n_trials`` is :obj:`None`, + ``timeout`` is not :obj:`None`, and ``n_jobs`` :math:`\\ne 1`. + + Raises: + RuntimeError: + If nested invocation of this method occurs. + """ + _optimize( + study=self, + func=func, + n_trials=n_trials, + timeout=timeout, + n_jobs=n_jobs, + catch=tuple(catch) if isinstance(catch, Iterable) else (catch,), + callbacks=callbacks, + gc_after_trial=gc_after_trial, + show_progress_bar=show_progress_bar, + ) + + def ask(self, fixed_distributions: dict[str, BaseDistribution] | None = None) -> Trial: + """Create a new trial from which hyperparameters can be suggested. + + This method is part of an alternative to :func:`~optuna.study.Study.optimize` that allows + controlling the lifetime of a trial outside the scope of ``func``. Each call to this + method should be followed by a call to :func:`~optuna.study.Study.tell` to finish the + created trial. + + .. seealso:: + + The :ref:`ask_and_tell` tutorial provides use-cases with examples. + + Example: + + Getting the trial object with the :func:`~optuna.study.Study.ask` method. + + .. testcode:: + + import optuna + + + study = optuna.create_study() + + trial = study.ask() + + x = trial.suggest_float("x", -1, 1) + + study.tell(trial, x**2) + + Example: + + Passing previously defined distributions to the :func:`~optuna.study.Study.ask` + method. + + .. testcode:: + + import optuna + + + study = optuna.create_study() + + distributions = { + "optimizer": optuna.distributions.CategoricalDistribution(["adam", "sgd"]), + "lr": optuna.distributions.FloatDistribution(0.0001, 0.1, log=True), + } + + # You can pass the distributions previously defined. + trial = study.ask(fixed_distributions=distributions) + + # `optimizer` and `lr` are already suggested and accessible with `trial.params`. + assert "optimizer" in trial.params + assert "lr" in trial.params + + Args: + fixed_distributions: + A dictionary containing the parameter names and parameter's distributions. Each + parameter in this dictionary is automatically suggested for the returned trial, + even when the suggest method is not explicitly invoked by the user. If this + argument is set to :obj:`None`, no parameter is automatically suggested. + + Returns: + A :class:`~optuna.trial.Trial`. + """ + + if not self._thread_local.in_optimize_loop and is_heartbeat_enabled(self._storage): + warnings.warn("Heartbeat of storage is supposed to be used with Study.optimize.") + + fixed_distributions = fixed_distributions or {} + fixed_distributions = { + key: _convert_old_distribution_to_new_distribution(dist) + for key, dist in fixed_distributions.items() + } + + # Sync storage once every trial. + self._thread_local.cached_all_trials = None + + trial_id = self._pop_waiting_trial_id() + if trial_id is None: + trial_id = self._storage.create_new_trial(self._study_id) + trial = optuna.Trial(self, trial_id) + + for name, param in fixed_distributions.items(): + trial._suggest(name, param) + + return trial + + def tell( + self, + trial: Trial | int, + values: float | Sequence[float] | None = None, + state: TrialState | None = None, + skip_if_finished: bool = False, + ) -> FrozenTrial: + """Finish a trial created with :func:`~optuna.study.Study.ask`. + + .. seealso:: + + The :ref:`ask_and_tell` tutorial provides use-cases with examples. + + Example: + + .. testcode:: + + import optuna + from optuna.trial import TrialState + + + def f(x): + return (x - 2) ** 2 + + + def df(x): + return 2 * x - 4 + + + study = optuna.create_study() + + n_trials = 30 + + for _ in range(n_trials): + trial = study.ask() + + lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True) + + # Iterative gradient descent objective function. + x = 3 # Initial value. + for step in range(128): + y = f(x) + + trial.report(y, step=step) + + if trial.should_prune(): + # Finish the trial with the pruned state. + study.tell(trial, state=TrialState.PRUNED) + break + + gy = df(x) + x -= gy * lr + else: + # Finish the trial with the final value after all iterations. + study.tell(trial, y) + + Args: + trial: + A :class:`~optuna.trial.Trial` object or a trial number. + values: + Optional objective value or a sequence of such values in case the study is used + for multi-objective optimization. Argument must be provided if ``state`` is + :class:`~optuna.trial.TrialState.COMPLETE` and should be :obj:`None` if ``state`` + is :class:`~optuna.trial.TrialState.FAIL` or + :class:`~optuna.trial.TrialState.PRUNED`. + state: + State to be reported. Must be :obj:`None`, + :class:`~optuna.trial.TrialState.COMPLETE`, + :class:`~optuna.trial.TrialState.FAIL` or + :class:`~optuna.trial.TrialState.PRUNED`. + If ``state`` is :obj:`None`, + it will be updated to :class:`~optuna.trial.TrialState.COMPLETE` + or :class:`~optuna.trial.TrialState.FAIL` depending on whether + validation for ``values`` reported succeed or not. + skip_if_finished: + Flag to control whether exception should be raised when values for already + finished trial are told. If :obj:`True`, tell is skipped without any error + when the trial is already finished. + + Returns: + A :class:`~optuna.trial.FrozenTrial` representing the resulting trial. + A returned trial is deep copied thus user can modify it as needed. + """ + + frozen_trial, _ = _tell_with_warning( + study=self, + trial=trial, + value_or_values=values, + state=state, + skip_if_finished=skip_if_finished, + ) + return copy.deepcopy(frozen_trial) + + def set_user_attr(self, key: str, value: Any) -> None: + """Set a user attribute to the study. + + .. seealso:: + + See :attr:`~optuna.study.Study.user_attrs` for related attribute. + + .. seealso:: + + See the recipe on :ref:`attributes`. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 1) + y = trial.suggest_float("y", 0, 1) + return x**2 + y**2 + + + study = optuna.create_study() + + study.set_user_attr("objective function", "quadratic function") + study.set_user_attr("dimensions", 2) + study.set_user_attr("contributors", ["Akiba", "Sano"]) + + assert study.user_attrs == { + "objective function": "quadratic function", + "dimensions": 2, + "contributors": ["Akiba", "Sano"], + } + + Args: + key: A key string of the attribute. + value: A value of the attribute. The value should be JSON serializable. + + """ + + self._storage.set_study_user_attr(self._study_id, key, value) + + @deprecated_func("3.1.0", "5.0.0") + def set_system_attr(self, key: str, value: Any) -> None: + """Set a system attribute to the study. + + Note that Optuna internally uses this method to save system messages. Please use + :func:`~optuna.study.Study.set_user_attr` to set users' attributes. + + Args: + key: A key string of the attribute. + value: A value of the attribute. The value should be JSON serializable. + + """ + + self._storage.set_study_system_attr(self._study_id, key, value) + + def trials_dataframe( + self, + attrs: tuple[str, ...] = ( + "number", + "value", + "datetime_start", + "datetime_complete", + "duration", + "params", + "user_attrs", + "system_attrs", + "state", + ), + multi_index: bool = False, + ) -> "pd.DataFrame": + """Export trials as a pandas DataFrame_. + + The DataFrame_ provides various features to analyze studies. It is also useful to draw a + histogram of objective values and to export trials as a CSV file. + If there are no trials, an empty DataFrame_ is returned. + + Example: + + .. testcode:: + + import optuna + import pandas + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + # Create a dataframe from the study. + df = study.trials_dataframe() + assert isinstance(df, pandas.DataFrame) + assert df.shape[0] == 3 # n_trials. + + Args: + attrs: + Specifies field names of :class:`~optuna.trial.FrozenTrial` to include them to a + DataFrame of trials. + multi_index: + Specifies whether the returned DataFrame_ employs MultiIndex_ or not. Columns that + are hierarchical by nature such as ``(params, x)`` will be flattened to + ``params_x`` when set to :obj:`False`. + + Returns: + A pandas DataFrame_ of trials in the :class:`~optuna.study.Study`. + + .. _DataFrame: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html + .. _MultiIndex: https://pandas.pydata.org/pandas-docs/stable/advanced.html + + Note: + If ``value`` is in ``attrs`` during multi-objective optimization, it is implicitly + replaced with ``values``. + + Note: + If :meth:`~optuna.study.Study.set_metric_names` is called, the ``value`` or ``values`` + is implicitly replaced with the dictionary with the objective name as key and the + objective value as value. + """ + return _dataframe._trials_dataframe(self, attrs, multi_index) + + def stop(self) -> None: + """Exit from the current optimization loop after the running trials finish. + + This method lets the running :meth:`~optuna.study.Study.optimize` method return + immediately after all trials which the :meth:`~optuna.study.Study.optimize` method + spawned finishes. + This method does not affect any behaviors of parallel or successive study processes. + This method only works when it is called inside an objective function or callback. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + if trial.number == 4: + trial.study.stop() + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=10) + assert len(study.trials) == 5 + + """ + + if not self._thread_local.in_optimize_loop: + raise RuntimeError( + "`Study.stop` is supposed to be invoked inside an objective function or a " + "callback." + ) + + self._stop_flag = True + + def enqueue_trial( + self, + params: dict[str, Any], + user_attrs: dict[str, Any] | None = None, + skip_if_exists: bool = False, + ) -> None: + """Enqueue a trial with given parameter values. + + You can fix the next sampling parameters which will be evaluated in your + objective function. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study() + study.enqueue_trial({"x": 5}) + study.enqueue_trial({"x": 0}, user_attrs={"memo": "optimal"}) + study.optimize(objective, n_trials=2) + + assert study.trials[0].params == {"x": 5} + assert study.trials[1].params == {"x": 0} + assert study.trials[1].user_attrs == {"memo": "optimal"} + + Args: + params: + Parameter values to pass your objective function. + user_attrs: + A dictionary of user-specific attributes other than ``params``. + skip_if_exists: + When :obj:`True`, prevents duplicate trials from being enqueued again. + + .. note:: + This method might produce duplicated trials if called simultaneously + by multiple processes at the same time with same ``params`` dict. + + .. seealso:: + + Please refer to :ref:`enqueue_trial_tutorial` for the tutorial of specifying + hyperparameters manually. + """ + + if not isinstance(params, dict): + raise TypeError("params must be a dictionary.") + + if skip_if_exists and self._should_skip_enqueue(params): + _logger.info(f"Trial with params {params} already exists. Skipping enqueue.") + return + + self.add_trial( + create_trial( + state=TrialState.WAITING, + system_attrs={"fixed_params": params}, + user_attrs=user_attrs, + ) + ) + + def add_trial(self, trial: FrozenTrial) -> None: + """Add trial to study. + + The trial is validated before being added. + + Example: + + .. testcode:: + + import optuna + from optuna.distributions import FloatDistribution + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study() + assert len(study.trials) == 0 + + trial = optuna.trial.create_trial( + params={"x": 2.0}, + distributions={"x": FloatDistribution(0, 10)}, + value=4.0, + ) + + study.add_trial(trial) + assert len(study.trials) == 1 + + study.optimize(objective, n_trials=3) + assert len(study.trials) == 4 + + other_study = optuna.create_study() + + for trial in study.trials: + other_study.add_trial(trial) + assert len(other_study.trials) == len(study.trials) + + other_study.optimize(objective, n_trials=2) + assert len(other_study.trials) == len(study.trials) + 2 + + .. seealso:: + + This method should in general be used to add already evaluated trials + (``trial.state.is_finished() == True``). To queue trials for evaluation, + please refer to :func:`~optuna.study.Study.enqueue_trial`. + + .. seealso:: + + See :func:`~optuna.trial.create_trial` for how to create trials. + + .. seealso:: + Please refer to :ref:`add_trial_tutorial` for the tutorial of specifying + hyperparameters with the evaluated value manually. + + Args: + trial: Trial to add. + + """ + + trial._validate() + + if trial.values is not None and len(self.directions) != len(trial.values): + raise ValueError( + f"The added trial has {len(trial.values)} values, which is different from the " + f"number of objectives {len(self.directions)} in the study (determined by " + "Study.directions)." + ) + + self._storage.create_new_trial(self._study_id, template_trial=trial) + + def add_trials(self, trials: Iterable[FrozenTrial]) -> None: + """Add trials to study. + + The trials are validated before being added. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + assert len(study.trials) == 3 + + other_study = optuna.create_study() + other_study.add_trials(study.trials) + assert len(other_study.trials) == len(study.trials) + + other_study.optimize(objective, n_trials=2) + assert len(other_study.trials) == len(study.trials) + 2 + + .. seealso:: + + See :func:`~optuna.study.Study.add_trial` for addition of each trial. + + Args: + trials: Trials to add. + + """ + + for trial in trials: + self.add_trial(trial) + + @experimental_func("3.2.0") + def set_metric_names(self, metric_names: list[str]) -> None: + """Set metric names. + + This method names each dimension of the returned values of the objective function. + It is particularly useful in multi-objective optimization. The metric names are + mainly referenced by the visualization functions. + + Example: + + .. testcode:: + + import optuna + import pandas + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2, x + 1 + + + study = optuna.create_study(directions=["minimize", "minimize"]) + study.set_metric_names(["x**2", "x+1"]) + study.optimize(objective, n_trials=3) + + df = study.trials_dataframe(multi_index=True) + assert isinstance(df, pandas.DataFrame) + assert list(df.get("values").keys()) == ["x**2", "x+1"] + + .. seealso:: + The names set by this method are used in :meth:`~optuna.study.Study.trials_dataframe` + and :func:`~optuna.visualization.plot_pareto_front`. + + Args: + metric_names: A list of metric names for the objective function. + """ + if len(self.directions) != len(metric_names): + raise ValueError("The number of objectives must match the length of the metric names.") + + self._storage.set_study_system_attr( + self._study_id, _SYSTEM_ATTR_METRIC_NAMES, metric_names + ) + + def _is_multi_objective(self) -> bool: + """Return :obj:`True` if the study has multiple objectives. + + Returns: + A boolean value indicates if `self.directions` has more than 1 element or not. + """ + + return len(self.directions) > 1 + + def _pop_waiting_trial_id(self) -> int | None: + for trial in self._storage.get_all_trials( + self._study_id, deepcopy=False, states=(TrialState.WAITING,) + ): + # Attempt to set the state to RUNNING. + # - If another process or thread has already changed the state to RUNNING, + # set_trial_state_values returns False. + # - If another process or thread has already finished the trial, + # an UpdateFinishedTrialError is raised. + try: + if not self._storage.set_trial_state_values( + trial._trial_id, + state=TrialState.RUNNING, + ): + continue + except exceptions.UpdateFinishedTrialError: + continue + + _logger.debug("Trial {} popped from the trial queue.".format(trial.number)) + return trial._trial_id + + return None + + def _should_skip_enqueue(self, params: Mapping[str, JSONSerializable]) -> bool: + for trial in self.get_trials(deepcopy=False): + trial_params = trial.system_attrs.get("fixed_params", trial.params) + if trial_params.keys() != params.keys(): + # Can't have repeated trials if different params are suggested. + continue + + repeated_params: list[bool] = [] + for param_name, param_value in params.items(): + existing_param = trial_params[param_name] + if not isinstance(param_value, type(existing_param)): + # Enqueued param has distribution that does not match existing param + # (e.g. trying to enqueue categorical to float param). + # We are not doing anything about it here, since sanitization should + # be handled regardless if `skip_if_exists` is `True`. + repeated_params.append(False) + continue + + is_repeated = ( + np.isnan(float(param_value)) + or np.isclose(float(param_value), float(existing_param), atol=0.0) + if isinstance(param_value, Real) + else param_value == existing_param + ) + repeated_params.append(bool(is_repeated)) + + if all(repeated_params): + return True + + return False + + def _log_completed_trial(self, trial: FrozenTrial) -> None: + if not _logger.isEnabledFor(logging.INFO): + return + + metric_names = self.metric_names + + if len(trial.values) > 1: + trial_values: list[float] | dict[str, float] + if metric_names is None: + trial_values = trial.values + else: + trial_values = {name: value for name, value in zip(metric_names, trial.values)} + _logger.info( + "Trial {} finished with values: {} and parameters: {}.".format( + trial.number, trial_values, trial.params + ) + ) + elif len(trial.values) == 1: + trial_value: float | dict[str, float] + if metric_names is None: + trial_value = trial.values[0] + else: + trial_value = {metric_names[0]: trial.values[0]} + + message = ( + f"Trial {trial.number} finished with value: {trial_value} and parameters: " + f"{trial.params}." + ) + try: + best_trial = self._get_best_trial(deepcopy=False) + message += f" Best is trial {best_trial.number} with value: {best_trial.value}." + except ValueError: + # If no feasible trials are completed yet, study.best_trial raises ValueError. + pass + _logger.info(message) + else: + assert False, "Should not reach." + + +@convert_positional_args( + previous_positional_arg_names=[ + "storage", + "sampler", + "pruner", + "study_name", + "direction", + "load_if_exists", + ], + deprecated_version="3.0.0", + removed_version="5.0.0", +) +def create_study( + *, + storage: str | storages.BaseStorage | None = None, + sampler: "samplers.BaseSampler" | None = None, + pruner: pruners.BasePruner | None = None, + study_name: str | None = None, + direction: str | StudyDirection | None = None, + load_if_exists: bool = False, + directions: Sequence[str | StudyDirection] | None = None, +) -> Study: + """Create a new :class:`~optuna.study.Study`. + + Example: + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + Args: + storage: + Database URL. If this argument is set to None, + :class:`~optuna.storages.InMemoryStorage` is used, and the + :class:`~optuna.study.Study` will not be persistent. + + .. note:: + When a database URL is passed, Optuna internally uses `SQLAlchemy`_ to handle + the database. Please refer to `SQLAlchemy's document`_ for further details. + If you want to specify non-default options to `SQLAlchemy Engine`_, you can + instantiate :class:`~optuna.storages.RDBStorage` with your desired options and + pass it to the ``storage`` argument instead of a URL. + + .. _SQLAlchemy: https://www.sqlalchemy.org/ + .. _SQLAlchemy's document: + https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls + .. _SQLAlchemy Engine: https://docs.sqlalchemy.org/en/latest/core/engines.html + + sampler: + A sampler object that implements background algorithm for value suggestion. + If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used during + single-objective optimization and :class:`~optuna.samplers.NSGAIISampler` during + multi-objective optimization. See also :class:`~optuna.samplers`. + pruner: + A pruner object that decides early stopping of unpromising trials. If :obj:`None` + is specified, :class:`~optuna.pruners.MedianPruner` is used as the default. See + also :class:`~optuna.pruners`. + study_name: + Study's name. If this argument is set to None, a unique name is generated + automatically. + direction: + Direction of optimization. Set ``minimize`` for minimization and ``maximize`` for + maximization. You can also pass the corresponding :class:`~optuna.study.StudyDirection` + object. ``direction`` and ``directions`` must not be specified at the same time. + + .. note:: + If none of `direction` and `directions` are specified, the direction of the study + is set to "minimize". + load_if_exists: + Flag to control the behavior to handle a conflict of study names. + In the case where a study named ``study_name`` already exists in the ``storage``, + a :class:`~optuna.exceptions.DuplicatedStudyError` is raised if ``load_if_exists`` is + set to :obj:`False`. + Otherwise, the creation of the study is skipped, and the existing one is returned. + directions: + A sequence of directions during multi-objective optimization. + ``direction`` and ``directions`` must not be specified at the same time. + + Returns: + A :class:`~optuna.study.Study` object. + + See also: + :func:`optuna.create_study` is an alias of :func:`optuna.study.create_study`. + + See also: + The :ref:`rdb` tutorial provides concrete examples to save and resume optimization using + RDB. + + """ + + if direction is None and directions is None: + directions = ["minimize"] + elif direction is not None and directions is not None: + raise ValueError("Specify only one of `direction` and `directions`.") + elif direction is not None: + if isinstance(direction, Sequence) and not isinstance(direction, str): + raise ValueError( + "Use `directions` instead of `direction` for multi-objective optimization." + ) + directions = [direction] + elif directions is not None: + directions = list(directions) + else: + assert False + + if len(directions) < 1: + raise ValueError("The number of objectives must be greater than 0.") + elif any( + d not in ["minimize", "maximize", StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE] + for d in directions + ): + raise ValueError( + f"`directions` must be a list of `minimize` or `maximize`, but got {directions}. " + "For single-objective optimization, please use `direction` instead of `directions`." + ) + + direction_objects = [ + d if isinstance(d, StudyDirection) else StudyDirection[d.upper()] for d in directions + ] + + storage = storages.get_storage(storage) + try: + study_id = storage.create_new_study(direction_objects, study_name) + except exceptions.DuplicatedStudyError: + if load_if_exists: + assert study_name is not None + + _logger.info( + "Using an existing study with name '{}' instead of " + "creating a new one.".format(study_name) + ) + study_id = storage.get_study_id_from_name(study_name) + else: + raise + + if sampler is None and len(direction_objects) > 1: + sampler = samplers.NSGAIISampler() + + study_name = storage.get_study_name_from_id(study_id) + study = Study(study_name=study_name, storage=storage, sampler=sampler, pruner=pruner) + + return study + + +@convert_positional_args( + previous_positional_arg_names=[ + "study_name", + "storage", + "sampler", + "pruner", + ], + deprecated_version="3.0.0", + removed_version="5.0.0", +) +def load_study( + *, + study_name: str | None, + storage: str | storages.BaseStorage, + sampler: "samplers.BaseSampler" | None = None, + pruner: pruners.BasePruner | None = None, +) -> Study: + """Load the existing :class:`~optuna.study.Study` that has the specified name. + + Example: + + .. testsetup:: + + import os + + if os.path.exists("example.db"): + raise RuntimeError("'example.db' already exists. Please remove it.") + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", 0, 10) + return x**2 + + + study = optuna.create_study(storage="sqlite:///example.db", study_name="my_study") + study.optimize(objective, n_trials=3) + + loaded_study = optuna.load_study(study_name="my_study", storage="sqlite:///example.db") + assert len(loaded_study.trials) == len(study.trials) + + .. testcleanup:: + + os.remove("example.db") + + Args: + study_name: + Study's name. Each study has a unique name as an identifier. If :obj:`None`, checks + whether the storage contains a single study, and if so loads that study. + ``study_name`` is required if there are multiple studies in the storage. + storage: + Database URL such as ``sqlite:///example.db``. Please see also the documentation of + :func:`~optuna.study.create_study` for further details. + sampler: + A sampler object that implements background algorithm for value suggestion. + If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used + as the default. See also :class:`~optuna.samplers`. + pruner: + A pruner object that decides early stopping of unpromising trials. + If :obj:`None` is specified, :class:`~optuna.pruners.MedianPruner` is used + as the default. See also :class:`~optuna.pruners`. + + Returns: + A :class:`~optuna.study.Study` object. + + See also: + :func:`optuna.load_study` is an alias of :func:`optuna.study.load_study`. + + """ + if study_name is None: + study_names = get_all_study_names(storage) + if len(study_names) != 1: + raise ValueError( + f"Could not determine the study name since the storage {storage} does not " + "contain exactly 1 study. Specify `study_name`." + ) + study_name = study_names[0] + _logger.info( + f"Study name was omitted but trying to load '{study_name}' because that was the only " + "study found in the storage." + ) + + study = Study(study_name=study_name, storage=storage, sampler=sampler, pruner=pruner) + if sampler is None and len(study.directions) > 1: + study.sampler = samplers.NSGAIISampler() + return study + + +@convert_positional_args( + previous_positional_arg_names=[ + "study_name", + "storage", + ], + deprecated_version="3.0.0", + removed_version="5.0.0", +) +def delete_study( + *, + study_name: str, + storage: str | storages.BaseStorage, +) -> None: + """Delete a :class:`~optuna.study.Study` object. + + Example: + + .. testsetup:: + + import os + + if os.path.exists("example.db"): + raise RuntimeError("'example.db' already exists. Please remove it.") + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return (x - 2) ** 2 + + + study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db") + study.optimize(objective, n_trials=3) + + optuna.delete_study(study_name="example-study", storage="sqlite:///example.db") + + .. testcleanup:: + + os.remove("example.db") + + Args: + study_name: + Study's name. + storage: + Database URL such as ``sqlite:///example.db``. Please see also the documentation of + :func:`~optuna.study.create_study` for further details. + + See also: + :func:`optuna.delete_study` is an alias of :func:`optuna.study.delete_study`. + + """ + + storage = storages.get_storage(storage) + study_id = storage.get_study_id_from_name(study_name) + storage.delete_study(study_id) + + +@convert_positional_args( + previous_positional_arg_names=[ + "from_study_name", + "from_storage", + "to_storage", + "to_study_name", + ], + warning_stacklevel=3, + deprecated_version="3.0.0", + removed_version="5.0.0", +) +def copy_study( + *, + from_study_name: str, + from_storage: str | storages.BaseStorage, + to_storage: str | storages.BaseStorage, + to_study_name: str | None = None, +) -> None: + """Copy study from one storage to another. + + The direction(s) of the objective(s) in the study, trials, user attributes and system + attributes are copied. + + .. note:: + :func:`~optuna.copy_study` copies a study even if the optimization is working on. + It means users will get a copied study that contains a trial that is not finished. + + Example: + + .. testsetup:: + + import os + + if os.path.exists("example.db"): + raise RuntimeError("'example.db' already exists. Please remove it.") + if os.path.exists("example_copy.db"): + raise RuntimeError("'example_copy.db' already exists. Please remove it.") + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return (x - 2) ** 2 + + + study = optuna.create_study( + study_name="example-study", + storage="sqlite:///example.db", + ) + study.optimize(objective, n_trials=3) + + optuna.copy_study( + from_study_name="example-study", + from_storage="sqlite:///example.db", + to_storage="sqlite:///example_copy.db", + ) + + study = optuna.load_study( + study_name=None, + storage="sqlite:///example_copy.db", + ) + + .. testcleanup:: + + os.remove("example.db") + os.remove("example_copy.db") + + Args: + from_study_name: + Name of study. + from_storage: + Source database URL such as ``sqlite:///example.db``. Please see also the + documentation of :func:`~optuna.study.create_study` for further details. + to_storage: + Destination database URL. + to_study_name: + Name of the created study. If omitted, ``from_study_name`` is used. + + Raises: + :class:`~optuna.exceptions.DuplicatedStudyError`: + If a study with a conflicting name already exists in the destination storage. + + """ + + from_study = load_study(study_name=from_study_name, storage=from_storage) + to_study = create_study( + study_name=to_study_name or from_study_name, + storage=to_storage, + directions=from_study.directions, + load_if_exists=False, + ) + + for key, value in from_study._storage.get_study_system_attrs(from_study._study_id).items(): + to_study._storage.set_study_system_attr(to_study._study_id, key, value) + + for key, value in from_study.user_attrs.items(): + to_study.set_user_attr(key, value) + + # Trials are deep copied on `add_trials`. + to_study.add_trials(from_study.get_trials(deepcopy=False)) + + +def get_all_study_summaries( + storage: str | storages.BaseStorage, include_best_trial: bool = True +) -> list[StudySummary]: + """Get all history of studies stored in a specified storage. + + Example: + + .. testsetup:: + + import os + + if os.path.exists("example.db"): + raise RuntimeError("'example.db' already exists. Please remove it.") + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return (x - 2) ** 2 + + + study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db") + study.optimize(objective, n_trials=3) + + study_summaries = optuna.study.get_all_study_summaries(storage="sqlite:///example.db") + assert len(study_summaries) == 1 + + study_summary = study_summaries[0] + assert study_summary.study_name == "example-study" + + .. testcleanup:: + + os.remove("example.db") + + Args: + storage: + Database URL such as ``sqlite:///example.db``. Please see also the documentation of + :func:`~optuna.study.create_study` for further details. + include_best_trial: + Include the best trials if exist. It potentially increases the number of queries and + may take longer to fetch summaries depending on the storage. + + Returns: + List of study history summarized as :class:`~optuna.study.StudySummary` objects. + + See also: + :func:`optuna.get_all_study_summaries` is an alias of + :func:`optuna.study.get_all_study_summaries`. + + """ + + storage = storages.get_storage(storage) + frozen_studies = storage.get_all_studies() + study_summaries = [] + + for s in frozen_studies: + all_trials = storage.get_all_trials(s._study_id) + completed_trials = [t for t in all_trials if t.state == TrialState.COMPLETE] + + n_trials = len(all_trials) + + if len(s.directions) == 1: + direction = s.direction + directions = None + if include_best_trial and len(completed_trials) != 0: + if direction == StudyDirection.MAXIMIZE: + best_trial = max(completed_trials, key=lambda t: cast(float, t.value)) + else: + best_trial = min(completed_trials, key=lambda t: cast(float, t.value)) + else: + best_trial = None + else: + direction = None + directions = s.directions + best_trial = None + + datetime_start = min( + [t.datetime_start for t in all_trials if t.datetime_start is not None], default=None + ) + + study_summaries.append( + StudySummary( + study_name=s.study_name, + direction=direction, + best_trial=best_trial, + user_attrs=s.user_attrs, + system_attrs=s.system_attrs, + n_trials=n_trials, + datetime_start=datetime_start, + study_id=s._study_id, + directions=directions, + ) + ) + + return study_summaries + + +def get_all_study_names(storage: str | storages.BaseStorage) -> list[str]: + """Get all study names stored in a specified storage. + + Example: + + .. testsetup:: + + import os + + if os.path.exists("example.db"): + raise RuntimeError("'example.db' already exists. Please remove it.") + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -10, 10) + return (x - 2) ** 2 + + + study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db") + study.optimize(objective, n_trials=3) + + study_names = optuna.study.get_all_study_names(storage="sqlite:///example.db") + assert len(study_names) == 1 + + assert study_names[0] == "example-study" + + .. testcleanup:: + + os.remove("example.db") + + Args: + storage: + Database URL such as ``sqlite:///example.db``. Please see also the documentation of + :func:`~optuna.study.create_study` for further details. + + Returns: + List of all study names in the storage. + + See also: + :func:`optuna.get_all_study_names` is an alias of + :func:`optuna.study.get_all_study_names`. + + """ + + storage = storages.get_storage(storage) + study_names = [study.study_name for study in storage.get_all_studies()] + + return study_names diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..825d43afaf5b8d90459eebcc3e6528fb8c437e19 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/__init__.py @@ -0,0 +1,28 @@ +from optuna.terminator.callback import TerminatorCallback +from optuna.terminator.erroreval import BaseErrorEvaluator +from optuna.terminator.erroreval import CrossValidationErrorEvaluator +from optuna.terminator.erroreval import report_cross_validation_scores +from optuna.terminator.erroreval import StaticErrorEvaluator +from optuna.terminator.improvement.emmr import EMMREvaluator +from optuna.terminator.improvement.evaluator import BaseImprovementEvaluator +from optuna.terminator.improvement.evaluator import BestValueStagnationEvaluator +from optuna.terminator.improvement.evaluator import RegretBoundEvaluator +from optuna.terminator.median_erroreval import MedianErrorEvaluator +from optuna.terminator.terminator import BaseTerminator +from optuna.terminator.terminator import Terminator + + +__all__ = [ + "TerminatorCallback", + "BaseErrorEvaluator", + "CrossValidationErrorEvaluator", + "report_cross_validation_scores", + "StaticErrorEvaluator", + "MedianErrorEvaluator", + "BaseImprovementEvaluator", + "BestValueStagnationEvaluator", + "RegretBoundEvaluator", + "EMMREvaluator", + "BaseTerminator", + "Terminator", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/callback.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/callback.py new file mode 100644 index 0000000000000000000000000000000000000000..ae9ec2ab64f6558df9ab827e2475329eb63ff4a7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/callback.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from optuna._experimental import experimental_class +from optuna.logging import get_logger +from optuna.study.study import Study +from optuna.terminator.terminator import BaseTerminator +from optuna.terminator.terminator import Terminator +from optuna.trial import FrozenTrial + + +_logger = get_logger(__name__) + + +@experimental_class("3.2.0") +class TerminatorCallback: + """A callback that terminates the optimization using Terminator. + + This class implements a callback which wraps :class:`~optuna.terminator.Terminator` + so that it can be used with the :func:`~optuna.study.Study.optimize` method. + + Args: + terminator: + A terminator object which determines whether to terminate the optimization by + assessing the room for optimization and statistical error. Defaults to a + :class:`~optuna.terminator.Terminator` object with default + ``improvement_evaluator`` and ``error_evaluator``. + + Example: + + .. testcode:: + + from sklearn.datasets import load_wine + from sklearn.ensemble import RandomForestClassifier + from sklearn.model_selection import cross_val_score + from sklearn.model_selection import KFold + + import optuna + from optuna.terminator import TerminatorCallback + from optuna.terminator import report_cross_validation_scores + + + def objective(trial): + X, y = load_wine(return_X_y=True) + + clf = RandomForestClassifier( + max_depth=trial.suggest_int("max_depth", 2, 32), + min_samples_split=trial.suggest_float("min_samples_split", 0, 1), + criterion=trial.suggest_categorical("criterion", ("gini", "entropy")), + ) + + scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True)) + report_cross_validation_scores(trial, scores) + return scores.mean() + + + study = optuna.create_study(direction="maximize") + terminator = TerminatorCallback() + study.optimize(objective, n_trials=50, callbacks=[terminator]) + + .. seealso:: + Please refer to :class:`~optuna.terminator.Terminator` for the details of + the terminator mechanism. + """ + + def __init__(self, terminator: BaseTerminator | None = None) -> None: + self._terminator = terminator or Terminator() + + def __call__(self, study: Study, trial: FrozenTrial) -> None: + should_terminate = self._terminator.should_terminate(study=study) + + if should_terminate: + _logger.info("The study has been stopped by the terminator.") + study.stop() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/erroreval.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/erroreval.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f78f1b7b2ff0c59251d3bb47f659d9152cb3d4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/erroreval.py @@ -0,0 +1,129 @@ +from __future__ import annotations + +import abc +from typing import cast + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.study import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import Trial +from optuna.trial._state import TrialState + + +_CROSS_VALIDATION_SCORES_KEY = "terminator:cv_scores" + + +class BaseErrorEvaluator(metaclass=abc.ABCMeta): + """Base class for error evaluators.""" + + @abc.abstractmethod + def evaluate( + self, + trials: list[FrozenTrial], + study_direction: StudyDirection, + ) -> float: + pass + + +@experimental_class("3.2.0") +class CrossValidationErrorEvaluator(BaseErrorEvaluator): + """An error evaluator for objective functions based on cross-validation. + + This evaluator evaluates the objective function's statistical error, which comes from the + randomness of dataset. This evaluator assumes that the objective function is the average of + the cross-validation and uses the scaled variance of the cross-validation scores in the best + trial at the moment as the statistical error. + + """ + + def evaluate( + self, + trials: list[FrozenTrial], + study_direction: StudyDirection, + ) -> float: + """Evaluate the statistical error of the objective function based on cross-validation. + + Args: + trials: + A list of trials to consider. The best trial in ``trials`` is used to compute the + statistical error. + + study_direction: + The direction of the study. + + Returns: + A float representing the statistical error of the objective function. + + """ + trials = [trial for trial in trials if trial.state == TrialState.COMPLETE] + assert len(trials) > 0 + + if study_direction == StudyDirection.MAXIMIZE: + best_trial = max(trials, key=lambda t: cast(float, t.value)) + else: + best_trial = min(trials, key=lambda t: cast(float, t.value)) + + best_trial_attrs = best_trial.system_attrs + if _CROSS_VALIDATION_SCORES_KEY in best_trial_attrs: + cv_scores = best_trial_attrs[_CROSS_VALIDATION_SCORES_KEY] + else: + raise ValueError( + "Cross-validation scores have not been reported. Please call " + "`report_cross_validation_scores(trial, scores)` during a trial and pass the " + "list of scores as `scores`." + ) + + k = len(cv_scores) + assert k > 1, "Should be guaranteed by `report_cross_validation_scores`." + scale = 1 / k + 1 / (k - 1) + + var = scale * np.var(cv_scores) + std = np.sqrt(var) + + return float(std) + + +@experimental_class("3.2.0") +def report_cross_validation_scores(trial: Trial, scores: list[float]) -> None: + """A function to report cross-validation scores of a trial. + + This function should be called within the objective function to report the cross-validation + scores. The reported scores are used to evaluate the statistical error for termination + judgement. + + Args: + trial: + A :class:`~optuna.trial.Trial` object to report the cross-validation scores. + scores: + The cross-validation scores of the trial. + + """ + if len(scores) <= 1: + raise ValueError("The length of `scores` is expected to be greater than one.") + trial.storage.set_trial_system_attr(trial._trial_id, _CROSS_VALIDATION_SCORES_KEY, scores) + + +@experimental_class("3.2.0") +class StaticErrorEvaluator(BaseErrorEvaluator): + """An error evaluator that always returns a constant value. + + This evaluator can be used to terminate the optimization when the evaluated improvement + potential is below the fixed threshold. + + Args: + constant: + A user-specified constant value to always return as an error estimate. + + """ + + def __init__(self, constant: float) -> None: + self._constant = constant + + def evaluate( + self, + trials: list[FrozenTrial], + study_direction: StudyDirection, + ) -> float: + return self._constant diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/emmr.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/emmr.py new file mode 100644 index 0000000000000000000000000000000000000000..5adc0b08e17e2b66fdf2ff1ba3dc232db83c4945 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/emmr.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +import math +import sys +from typing import cast +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.search_space import intersection_search_space +from optuna.study import StudyDirection +from optuna.terminator.improvement.evaluator import _compute_standardized_regret_bound +from optuna.terminator.improvement.evaluator import BaseImprovementEvaluator +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + import scipy.stats as scipy_stats + import torch + + from optuna._gp import acqf + from optuna._gp import gp + from optuna._gp import prior + from optuna._gp import search_space as gp_search_space +else: + from optuna._imports import _LazyImport + + torch = _LazyImport("torch") + gp = _LazyImport("optuna._gp.gp") + acqf = _LazyImport("optuna._gp.acqf") + prior = _LazyImport("optuna._gp.prior") + gp_search_space = _LazyImport("optuna._gp.search_space") + scipy_stats = _LazyImport("scipy.stats") + +MARGIN_FOR_NUMARICAL_STABILITY = 0.1 + + +@experimental_class("4.0.0") +class EMMREvaluator(BaseImprovementEvaluator): + """Evaluates a kind of regrets, called the Expected Minimum Model Regret(EMMR). + + EMMR is an upper bound of "expected minimum simple regret" in the optimization process. + + Expected minimum simple regret is a quantity that converges to zero only if the + optimization process has found the global optima. + + For further information about expected minimum simple regret and the algorithm, + please refer to the following paper: + + - `A stopping criterion for Bayesian optimization by the gap of expected minimum simple + regrets `__ + + Also, there is our blog post explaining this evaluator: + + - `Introducing A New Terminator: Early Termination of Black-box Optimization Based on + Expected Minimum Model Regret + `__ + + Args: + deterministic_objective: + A boolean value which indicates whether the objective function is deterministic. + Default is :obj:`False`. + delta: + A float number related to the criterion for termination. Default to 0.1. + For further information about this parameter, please see the aforementioned paper. + min_n_trials: + A minimum number of complete trials to compute the criterion. Default to 2. + seed: + A random seed for EMMREvaluator. + + Example: + + .. testcode:: + + import optuna + from optuna.terminator import EMMREvaluator + from optuna.terminator import MedianErrorEvaluator + from optuna.terminator import Terminator + + sampler = optuna.samplers.TPESampler(seed=0) + study = optuna.create_study(sampler=sampler, direction="minimize") + emmr_improvement_evaluator = EMMREvaluator() + median_error_evaluator = MedianErrorEvaluator(emmr_improvement_evaluator) + terminator = Terminator( + improvement_evaluator=emmr_improvement_evaluator, + error_evaluator=median_error_evaluator, + ) + + + for i in range(1000): + trial = study.ask() + + ys = [trial.suggest_float(f"x{i}", -10.0, 10.0) for i in range(5)] + value = sum(ys[i] ** 2 for i in range(5)) + + study.tell(trial, value) + + if terminator.should_terminate(study): + # Terminated by Optuna Terminator! + break + + """ + + def __init__( + self, + deterministic_objective: bool = False, + delta: float = 0.1, + min_n_trials: int = 2, + seed: int | None = None, + ) -> None: + if min_n_trials <= 1 or not np.isfinite(min_n_trials): + raise ValueError("`min_n_trials` is expected to be a finite integer more than one.") + + self._deterministic = deterministic_objective + self._delta = delta + self.min_n_trials = min_n_trials + self._rng = LazyRandomState(seed) + + def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float: + + optuna_search_space = intersection_search_space(trials) + complete_trials = [t for t in trials if t.state == TrialState.COMPLETE] + + if len(complete_trials) < self.min_n_trials: + return sys.float_info.max * MARGIN_FOR_NUMARICAL_STABILITY # Do not terminate. + + search_space, normalized_params = gp_search_space.get_search_space_and_normalized_params( + complete_trials, optuna_search_space + ) + if len(search_space.scale_types) == 0: + warnings.warn( + f"{self.__class__.__name__} cannot consider any search space." + "Termination will never occur in this study." + ) + return sys.float_info.max * MARGIN_FOR_NUMARICAL_STABILITY # Do not terminate. + + len_trials = len(complete_trials) + len_params = len(search_space.scale_types) + assert normalized_params.shape == (len_trials, len_params) + + # _gp module assumes that optimization direction is maximization + sign = -1 if study_direction == StudyDirection.MINIMIZE else 1 + score_vals = np.array([cast(float, t.value) for t in complete_trials]) * sign + score_vals = gp.warn_and_convert_inf(score_vals) + standarized_score_vals = (score_vals - score_vals.mean()) / max( + sys.float_info.min, score_vals.std() + ) + + assert len(standarized_score_vals) == len(normalized_params) + + kernel_params_t1 = gp.fit_kernel_params( # Fit kernel with up to (t-1)-th observation + X=normalized_params[..., :-1, :], + Y=standarized_score_vals[:-1], + is_categorical=(search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL), + log_prior=prior.default_log_prior, + minimum_noise=prior.DEFAULT_MINIMUM_NOISE_VAR, + initial_kernel_params=None, + deterministic_objective=self._deterministic, + ) + + kernel_params_t = gp.fit_kernel_params( # Fit kernel with up to t-th observation + X=normalized_params, + Y=standarized_score_vals, + is_categorical=(search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL), + log_prior=prior.default_log_prior, + minimum_noise=prior.DEFAULT_MINIMUM_NOISE_VAR, + initial_kernel_params=kernel_params_t1, + deterministic_objective=self._deterministic, + ) + + theta_t_star_index = int(np.argmax(standarized_score_vals)) + theta_t1_star_index = int(np.argmax(standarized_score_vals[:-1])) + theta_t_star = normalized_params[theta_t_star_index, :] + theta_t1_star = normalized_params[theta_t1_star_index, :] + + cov_t_between_theta_t_star_and_theta_t1_star = _compute_gp_posterior_cov_two_thetas( + search_space, + normalized_params, + standarized_score_vals, + kernel_params_t, + theta_t_star_index, + theta_t1_star_index, + ) + + mu_t1_theta_t_with_nu_t, variance_t1_theta_t_with_nu_t = _compute_gp_posterior( + search_space, + normalized_params[:-1, :], + standarized_score_vals[:-1], + normalized_params[-1, :], + kernel_params_t, + # Use kernel_params_t instead of kernel_params_t1. + # Use "t" under the assumption that "t" and "t1" are approximately the same. + # This is because kernel should same when computing KLD. + # For detailed information, please see section 4.4 of the paper: + # https://proceedings.mlr.press/v206/ishibashi23a/ishibashi23a.pdf + ) + _, variance_t_theta_t1_star = _compute_gp_posterior( + search_space, + normalized_params, + standarized_score_vals, + theta_t1_star, + kernel_params_t, + ) + mu_t_theta_t_star, variance_t_theta_t_star = _compute_gp_posterior( + search_space, + normalized_params, + standarized_score_vals, + theta_t_star, + kernel_params_t, + ) + mu_t1_theta_t1_star, _ = _compute_gp_posterior( + search_space, + normalized_params[:-1, :], + standarized_score_vals[:-1], + theta_t1_star, + kernel_params_t1, + ) + + y_t = standarized_score_vals[-1] + kappa_t1 = _compute_standardized_regret_bound( + kernel_params_t1, + search_space, + normalized_params[:-1, :], + standarized_score_vals[:-1], + self._delta, + rng=self._rng.rng, + ) + + theorem1_delta_mu_t_star = mu_t1_theta_t1_star - mu_t_theta_t_star + + alg1_delta_r_tilde_t_term1 = theorem1_delta_mu_t_star + + theorem1_v = math.sqrt( + max( + 1e-10, + variance_t_theta_t_star + - 2.0 * cov_t_between_theta_t_star_and_theta_t1_star + + variance_t_theta_t1_star, + ) + ) + theorem1_g = (mu_t_theta_t_star - mu_t1_theta_t1_star) / theorem1_v + + alg1_delta_r_tilde_t_term2 = theorem1_v * scipy_stats.norm.pdf(theorem1_g) + alg1_delta_r_tilde_t_term3 = theorem1_v * theorem1_g * scipy_stats.norm.cdf(theorem1_g) + + _lambda = prior.DEFAULT_MINIMUM_NOISE_VAR**-1 + eq4_rhs_term1 = 0.5 * math.log(1.0 + _lambda * variance_t1_theta_t_with_nu_t) + eq4_rhs_term2 = ( + -0.5 * variance_t1_theta_t_with_nu_t / (variance_t1_theta_t_with_nu_t + _lambda**-1) + ) + eq4_rhs_term3 = ( + 0.5 + * variance_t1_theta_t_with_nu_t + * (y_t - mu_t1_theta_t_with_nu_t) ** 2 + / (variance_t1_theta_t_with_nu_t + _lambda**-1) ** 2 + ) + + alg1_delta_r_tilde_t_term4 = kappa_t1 * math.sqrt( + 0.5 * (eq4_rhs_term1 + eq4_rhs_term2 + eq4_rhs_term3) + ) + + return min( + sys.float_info.max * 0.5, + alg1_delta_r_tilde_t_term1 + + alg1_delta_r_tilde_t_term2 + + alg1_delta_r_tilde_t_term3 + + alg1_delta_r_tilde_t_term4, + ) + + +def _compute_gp_posterior( + search_space: gp_search_space.SearchSpace, + X: np.ndarray, + Y: np.ndarray, + x_params: np.ndarray, + kernel_params: gp.KernelParamsTensor, +) -> tuple[float, float]: # mean, var + + acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_EI, + kernel_params=kernel_params, + search_space=search_space, + X=X, # normalized_params[..., :-1, :], + Y=Y, # standarized_score_vals[:-1], + ) + mean_tensor, var_tensor = gp.posterior( + acqf_params.kernel_params, + torch.from_numpy(acqf_params.X), + torch.from_numpy( + acqf_params.search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL + ), + torch.from_numpy(acqf_params.cov_Y_Y_inv), + torch.from_numpy(acqf_params.cov_Y_Y_inv_Y), + torch.from_numpy(x_params), # best_params or normalized_params[..., -1, :]), + ) + mean = mean_tensor.detach().numpy().flatten() + var = var_tensor.detach().numpy().flatten() + assert len(mean) == 1 and len(var) == 1 + return float(mean[0]), float(var[0]) + + +def _posterior_of_batched_theta( + kernel_params: gp.KernelParamsTensor, + X: torch.Tensor, # [len(trials), len(params)] + is_categorical: torch.Tensor, # bool[len(params)] + cov_Y_Y_inv: torch.Tensor, # [len(trials), len(trials)] + cov_Y_Y_inv_Y: torch.Tensor, # [len(trials)] + theta: torch.Tensor, # [batch, len(params)] +) -> tuple[torch.Tensor, torch.Tensor]: # (mean: [(batch,)], var: [(batch,batch)]) + + assert len(X.shape) == 2 + len_trials, len_params = X.shape + assert len(theta.shape) == 2 + len_batch = theta.shape[0] + assert theta.shape == (len_batch, len_params) + assert is_categorical.shape == (len_params,) + assert cov_Y_Y_inv.shape == (len_trials, len_trials) + assert cov_Y_Y_inv_Y.shape == (len_trials,) + + cov_ftheta_fX = gp.kernel(is_categorical, kernel_params, theta[..., None, :], X)[..., 0, :] + assert cov_ftheta_fX.shape == (len_batch, len_trials) + cov_ftheta_ftheta = gp.kernel(is_categorical, kernel_params, theta[..., None, :], theta)[ + ..., 0, : + ] + assert cov_ftheta_ftheta.shape == (len_batch, len_batch) + + assert torch.allclose(cov_ftheta_ftheta.diag(), gp.kernel_at_zero_distance(kernel_params)) + assert torch.allclose(cov_ftheta_ftheta, cov_ftheta_ftheta.T) + + mean = cov_ftheta_fX @ cov_Y_Y_inv_Y + assert mean.shape == (len_batch,) + var = cov_ftheta_ftheta - cov_ftheta_fX @ cov_Y_Y_inv @ cov_ftheta_fX.T + assert var.shape == (len_batch, len_batch) + + # We need to clamp the variance to avoid negative values due to numerical errors. + return mean, torch.clamp(var, min=0.0) + + +def _compute_gp_posterior_cov_two_thetas( + search_space: gp_search_space.SearchSpace, + normalized_params: np.ndarray, + standarized_score_vals: np.ndarray, + kernel_params: gp.KernelParamsTensor, + theta1_index: int, + theta2_index: int, +) -> float: # cov + + if theta1_index == theta2_index: + return _compute_gp_posterior( + search_space, + normalized_params, + standarized_score_vals, + normalized_params[theta1_index], + kernel_params, + )[1] + + assert normalized_params.shape[0] == standarized_score_vals.shape[0] + + acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LOG_EI, + kernel_params=kernel_params, + search_space=search_space, + X=normalized_params, + Y=standarized_score_vals, + ) + + _, var = _posterior_of_batched_theta( + acqf_params.kernel_params, + torch.from_numpy(acqf_params.X), + torch.from_numpy( + acqf_params.search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL + ), + torch.from_numpy(acqf_params.cov_Y_Y_inv), + torch.from_numpy(acqf_params.cov_Y_Y_inv_Y), + torch.from_numpy(normalized_params[[theta1_index, theta2_index]]), + ) + assert var.shape == (2, 2) + var = var.detach().numpy()[0, 1] + return float(var) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/evaluator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b07a88f68b6a4686cd7e5d35294aea24e48641 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/improvement/evaluator.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.distributions import BaseDistribution +from optuna.samplers._lazy_random_state import LazyRandomState +from optuna.search_space import intersection_search_space +from optuna.study import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +if TYPE_CHECKING: + + from optuna._gp import acqf + from optuna._gp import gp + from optuna._gp import optim_sample + from optuna._gp import prior + from optuna._gp import search_space as gp_search_space +else: + from optuna._imports import _LazyImport + + gp = _LazyImport("optuna._gp.gp") + optim_sample = _LazyImport("optuna._gp.optim_sample") + acqf = _LazyImport("optuna._gp.acqf") + prior = _LazyImport("optuna._gp.prior") + gp_search_space = _LazyImport("optuna._gp.search_space") + +DEFAULT_TOP_TRIALS_RATIO = 0.5 +DEFAULT_MIN_N_TRIALS = 20 + + +def _get_beta(n_params: int, n_trials: int, delta: float = 0.1) -> float: + # TODO(nabenabe0928): Check the original implementation to verify. + # Especially, |D| seems to be the domain size, but not the dimension based on Theorem 1. + beta = 2 * np.log(n_params * n_trials**2 * np.pi**2 / 6 / delta) + + # The following div is according to the original paper: "We then further scale it down + # by a factor of 5 as defined in the experiments in + # `Srinivas et al. (2010) `__" + beta /= 5 + + return beta + + +def _compute_standardized_regret_bound( + kernel_params: gp.KernelParamsTensor, + search_space: gp_search_space.SearchSpace, + normalized_top_n_params: np.ndarray, + standarized_top_n_values: np.ndarray, + delta: float = 0.1, + optimize_n_samples: int = 2048, + rng: np.random.RandomState | None = None, +) -> float: + """ + # In the original paper, f(x) was intended to be minimized, but here we would like to + # maximize f(x). Hence, the following changes happen: + # 1. min(ucb) over top trials becomes max(lcb) over top trials, and + # 2. min(lcb) over the search space becomes max(ucb) over the search space, and + # 3. Regret bound becomes max(ucb) over the search space minus max(lcb) over top trials. + """ + + n_trials, n_params = normalized_top_n_params.shape + + # calculate max_ucb + beta = _get_beta(n_params, n_trials, delta) + ucb_acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.UCB, + kernel_params=kernel_params, + search_space=search_space, + X=normalized_top_n_params, + Y=standarized_top_n_values, + beta=beta, + ) + # UCB over the search space. (Original: LCB over the search space. See Change 1 above.) + standardized_ucb_value = max( + acqf.eval_acqf_no_grad(ucb_acqf_params, normalized_top_n_params).max(), + optim_sample.optimize_acqf_sample(ucb_acqf_params, n_samples=optimize_n_samples, rng=rng)[ + 1 + ], + ) + + # calculate min_lcb + lcb_acqf_params = acqf.create_acqf_params( + acqf_type=acqf.AcquisitionFunctionType.LCB, + kernel_params=kernel_params, + search_space=search_space, + X=normalized_top_n_params, + Y=standarized_top_n_values, + beta=beta, + ) + # LCB over the top trials. (Original: UCB over the top trials. See Change 2 above.) + standardized_lcb_value = np.max( + acqf.eval_acqf_no_grad(lcb_acqf_params, normalized_top_n_params) + ) + + # max(UCB) - max(LCB). (Original: min(UCB) - min(LCB). See Change 3 above.) + return standardized_ucb_value - standardized_lcb_value # standardized regret bound + + +@experimental_class("3.2.0") +class BaseImprovementEvaluator(metaclass=abc.ABCMeta): + """Base class for improvement evaluators.""" + + @abc.abstractmethod + def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float: + pass + + +@experimental_class("3.2.0") +class RegretBoundEvaluator(BaseImprovementEvaluator): + """An error evaluator for upper bound on the regret with high-probability confidence. + + This evaluator evaluates the regret of current best solution, which defined as the difference + between the objective value of the best solution and of the global optimum. To be specific, + this evaluator calculates the upper bound on the regret based on the fact that empirical + estimator of the objective function is bounded by lower and upper confidence bounds with + high probability under the Gaussian process model assumption. + + Args: + top_trials_ratio: + A ratio of top trials to be considered when estimating the regret. Default to 0.5. + min_n_trials: + A minimum number of complete trials to estimate the regret. Default to 20. + seed: + Seed for random number generator. + + For further information about this evaluator, please refer to the following paper: + + - `Automatic Termination for Hyperparameter Optimization `__ + """ # NOQA: E501 + + def __init__( + self, + top_trials_ratio: float = DEFAULT_TOP_TRIALS_RATIO, + min_n_trials: int = DEFAULT_MIN_N_TRIALS, + seed: int | None = None, + ) -> None: + self._top_trials_ratio = top_trials_ratio + self._min_n_trials = min_n_trials + self._log_prior = prior.default_log_prior + self._minimum_noise = prior.DEFAULT_MINIMUM_NOISE_VAR + self._optimize_n_samples = 2048 + self._rng = LazyRandomState(seed) + + def _get_top_n( + self, normalized_params: np.ndarray, values: np.ndarray + ) -> tuple[np.ndarray, np.ndarray]: + assert len(normalized_params) == len(values) + n_trials = len(normalized_params) + top_n = np.clip(int(n_trials * self._top_trials_ratio), self._min_n_trials, n_trials) + top_n_val = np.partition(values, n_trials - top_n)[n_trials - top_n] + top_n_mask = values >= top_n_val + return normalized_params[top_n_mask], values[top_n_mask] + + def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float: + optuna_search_space = intersection_search_space(trials) + self._validate_input(trials, optuna_search_space) + + complete_trials = [t for t in trials if t.state == TrialState.COMPLETE] + + # _gp module assumes that optimization direction is maximization + sign = -1 if study_direction == StudyDirection.MINIMIZE else 1 + values = np.array([t.value for t in complete_trials]) * sign + search_space, normalized_params = gp_search_space.get_search_space_and_normalized_params( + complete_trials, optuna_search_space + ) + normalized_top_n_params, top_n_values = self._get_top_n(normalized_params, values) + top_n_values_mean = top_n_values.mean() + top_n_values_std = max(1e-10, top_n_values.std()) + standarized_top_n_values = (top_n_values - top_n_values_mean) / top_n_values_std + + kernel_params = gp.fit_kernel_params( + X=normalized_top_n_params, + Y=standarized_top_n_values, + is_categorical=(search_space.scale_types == gp_search_space.ScaleType.CATEGORICAL), + log_prior=self._log_prior, + minimum_noise=self._minimum_noise, + # TODO(contramundum53): Add option to specify this. + deterministic_objective=False, + # TODO(y0z): Add `kernel_params_cache` to speedup. + initial_kernel_params=None, + ) + + standardized_regret_bound = _compute_standardized_regret_bound( + kernel_params, + search_space, + normalized_top_n_params, + standarized_top_n_values, + rng=self._rng.rng, + ) + return standardized_regret_bound * top_n_values_std # regret bound + + @classmethod + def _validate_input( + cls, trials: list[FrozenTrial], search_space: dict[str, BaseDistribution] + ) -> None: + if len([t for t in trials if t.state == TrialState.COMPLETE]) == 0: + raise ValueError( + "Because no trial has been completed yet, the regret bound cannot be evaluated." + ) + + if len(search_space) == 0: + raise ValueError( + "The intersection search space is empty. This condition is not supported by " + f"{cls.__name__}." + ) + + +@experimental_class("3.4.0") +class BestValueStagnationEvaluator(BaseImprovementEvaluator): + """Evaluates the stagnation period of the best value in an optimization process. + + This class is initialized with a maximum stagnation period (``max_stagnation_trials``) + and is designed to evaluate the remaining trials before reaching this maximum period + of allowed stagnation. If this remaining trials reach zero, the trial terminates. + Therefore, the default error evaluator is instantiated by ``StaticErrorEvaluator(const=0)``. + + Args: + max_stagnation_trials: + The maximum number of trials allowed for stagnation. + """ + + def __init__(self, max_stagnation_trials: int = 30) -> None: + if max_stagnation_trials < 0: + raise ValueError("The maximum number of stagnant trials must not be negative.") + self._max_stagnation_trials = max_stagnation_trials + + def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float: + self._validate_input(trials) + is_maximize_direction = True if (study_direction == StudyDirection.MAXIMIZE) else False + trials = [t for t in trials if t.state == TrialState.COMPLETE] + current_step = len(trials) - 1 + + best_step = 0 + for i, trial in enumerate(trials): + best_value = trials[best_step].value + current_value = trial.value + assert best_value is not None + assert current_value is not None + if is_maximize_direction and (best_value < current_value): + best_step = i + elif (not is_maximize_direction) and (best_value > current_value): + best_step = i + + return self._max_stagnation_trials - (current_step - best_step) + + @classmethod + def _validate_input(cls, trials: list[FrozenTrial]) -> None: + if len([t for t in trials if t.state == TrialState.COMPLETE]) == 0: + raise ValueError( + "Because no trial has been completed yet, the improvement cannot be evaluated." + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/median_erroreval.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/median_erroreval.py new file mode 100644 index 0000000000000000000000000000000000000000..d956945fe0129f047e38d5c9441ab393558b0434 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/median_erroreval.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import sys + +import numpy as np + +from optuna._experimental import experimental_class +from optuna.study import StudyDirection +from optuna.terminator.erroreval import BaseErrorEvaluator +from optuna.terminator.improvement.evaluator import BaseImprovementEvaluator +from optuna.trial import FrozenTrial +from optuna.trial._state import TrialState + + +@experimental_class("4.0.0") +class MedianErrorEvaluator(BaseErrorEvaluator): + """An error evaluator that returns the ratio to initial median. + + This error evaluator is introduced as a heuristics in the following paper: + + - `A stopping criterion for Bayesian optimization by the gap of expected minimum simple + regrets `__ + + Args: + paired_improvement_evaluator: + The ``improvement_evaluator`` instance which is set with this ``error_evaluator``. + warm_up_trials: + A parameter specifies the number of initial trials to be discarded before + the calculation of median. Default to 10. + In optuna, the first 10 trials are often random sampling. + The ``warm_up_trials`` can exclude them from the calculation. + n_initial_trials: + A parameter specifies the number of initial trials considered in the calculation of + median after ``warm_up_trials``. Default to 20. + threshold_ratio: + A parameter specifies the ratio between the threshold and initial median. + Default to 0.01. + """ + + def __init__( + self, + paired_improvement_evaluator: BaseImprovementEvaluator, + warm_up_trials: int = 10, + n_initial_trials: int = 20, + threshold_ratio: float = 0.01, + ) -> None: + if warm_up_trials < 0: + raise ValueError("`warm_up_trials` is expected to be a non-negative integer.") + if n_initial_trials <= 0: + raise ValueError("`n_initial_trials` is expected to be a positive integer.") + if threshold_ratio <= 0.0 or not np.isfinite(threshold_ratio): + raise ValueError("`threshold_ratio_to_initial_median` is expected to be a positive.") + + self._paired_improvement_evaluator = paired_improvement_evaluator + self._warm_up_trials = warm_up_trials + self._n_initial_trials = n_initial_trials + self._threshold_ratio = threshold_ratio + self._threshold: float | None = None + + def evaluate( + self, + trials: list[FrozenTrial], + study_direction: StudyDirection, + ) -> float: + + if self._threshold is not None: + return self._threshold + + trials = [trial for trial in trials if trial.state == TrialState.COMPLETE] + if len(trials) < (self._warm_up_trials + self._n_initial_trials): + return ( + -sys.float_info.min + ) # Do not terminate. It assumes that improvement must non-negative. + trials.sort(key=lambda trial: trial.number) + criteria = [] + for i in range(1, self._n_initial_trials + 1): + criteria.append( + self._paired_improvement_evaluator.evaluate( + trials[self._warm_up_trials : self._warm_up_trials + i], study_direction + ) + ) + criteria.sort() + self._threshold = criteria[len(criteria) // 2] + assert self._threshold is not None + self._threshold = min(sys.float_info.max, self._threshold * self._threshold_ratio) + return self._threshold diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/terminator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/terminator.py new file mode 100644 index 0000000000000000000000000000000000000000..ac145910e31768df9d59198ca0b74e1ca9d23b1b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/terminator/terminator.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import abc + +from optuna._experimental import experimental_class +from optuna.study.study import Study +from optuna.terminator.erroreval import BaseErrorEvaluator +from optuna.terminator.erroreval import CrossValidationErrorEvaluator +from optuna.terminator.erroreval import StaticErrorEvaluator +from optuna.terminator.improvement.evaluator import BaseImprovementEvaluator +from optuna.terminator.improvement.evaluator import BestValueStagnationEvaluator +from optuna.terminator.improvement.evaluator import DEFAULT_MIN_N_TRIALS +from optuna.terminator.improvement.evaluator import RegretBoundEvaluator +from optuna.trial import TrialState + + +class BaseTerminator(metaclass=abc.ABCMeta): + """Base class for terminators.""" + + @abc.abstractmethod + def should_terminate(self, study: Study) -> bool: + pass + + +@experimental_class("3.2.0") +class Terminator(BaseTerminator): + """Automatic stopping mechanism for Optuna studies. + + This class implements an automatic stopping mechanism for Optuna studies, aiming to prevent + unnecessary computation. The study is terminated when the statistical error, e.g. + cross-validation error, exceeds the room left for optimization. + + For further information about the algorithm, please refer to the following paper: + + - `A. Makarova et al. Automatic termination for hyperparameter optimization. + `__ + + Args: + improvement_evaluator: + An evaluator object for assessing the room left for optimization. Defaults to a + :class:`~optuna.terminator.improvement.evaluator.RegretBoundEvaluator` object. + error_evaluator: + An evaluator for calculating the statistical error, e.g. cross-validation error. + Defaults to a :class:`~optuna.terminator.CrossValidationErrorEvaluator` + object. + min_n_trials: + The minimum number of trials before termination is considered. Defaults to ``20``. + + Raises: + ValueError: If ``min_n_trials`` is not a positive integer. + + Example: + + .. testcode:: + + import logging + import sys + + from sklearn.datasets import load_wine + from sklearn.ensemble import RandomForestClassifier + from sklearn.model_selection import cross_val_score + from sklearn.model_selection import KFold + + import optuna + from optuna.terminator import Terminator + from optuna.terminator import report_cross_validation_scores + + + study = optuna.create_study(direction="maximize") + terminator = Terminator() + min_n_trials = 20 + + while True: + trial = study.ask() + + X, y = load_wine(return_X_y=True) + + clf = RandomForestClassifier( + max_depth=trial.suggest_int("max_depth", 2, 32), + min_samples_split=trial.suggest_float("min_samples_split", 0, 1), + criterion=trial.suggest_categorical("criterion", ("gini", "entropy")), + ) + + scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True)) + report_cross_validation_scores(trial, scores) + + value = scores.mean() + logging.info(f"Trial #{trial.number} finished with value {value}.") + study.tell(trial, value) + + if trial.number > min_n_trials and terminator.should_terminate(study): + logging.info("Terminated by Optuna Terminator!") + break + + .. seealso:: + Please refer to :class:`~optuna.terminator.TerminatorCallback` for how to use + the terminator mechanism with the :func:`~optuna.study.Study.optimize` method. + + """ + + def __init__( + self, + improvement_evaluator: BaseImprovementEvaluator | None = None, + error_evaluator: BaseErrorEvaluator | None = None, + min_n_trials: int = DEFAULT_MIN_N_TRIALS, + ) -> None: + if min_n_trials <= 0: + raise ValueError("`min_n_trials` is expected to be a positive integer.") + + self._improvement_evaluator = improvement_evaluator or RegretBoundEvaluator() + self._error_evaluator = error_evaluator or self._initialize_error_evaluator() + self._min_n_trials = min_n_trials + + def _initialize_error_evaluator(self) -> BaseErrorEvaluator: + if isinstance(self._improvement_evaluator, BestValueStagnationEvaluator): + return StaticErrorEvaluator(constant=0) + return CrossValidationErrorEvaluator() + + def should_terminate(self, study: Study) -> bool: + """Judge whether the study should be terminated based on the reported values.""" + trials = study.get_trials(states=[TrialState.COMPLETE]) + + if len(trials) < self._min_n_trials: + return False + + improvement = self._improvement_evaluator.evaluate( + trials=study.trials, + study_direction=study.direction, + ) + + error = self._error_evaluator.evaluate( + trials=study.trials, study_direction=study.direction + ) + + should_terminate = improvement < error + return should_terminate diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/objectives.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/objectives.py new file mode 100644 index 0000000000000000000000000000000000000000..c98c1cf2bcfa0d74054c9be064e2ffbf4f393623 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/objectives.py @@ -0,0 +1,10 @@ +from optuna import TrialPruned +from optuna.trial import Trial + + +def fail_objective(_: Trial) -> float: + raise ValueError() + + +def pruned_objective(trial: Trial) -> float: + raise TrialPruned() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/pruners.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/pruners.py new file mode 100644 index 0000000000000000000000000000000000000000..ad78ab8d03083e9ea2a807bd8cbf88e5fea8ff17 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/pruners.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +import optuna + + +class DeterministicPruner(optuna.pruners.BasePruner): + def __init__(self, is_pruning: bool) -> None: + self.is_pruning = is_pruning + + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: + return self.is_pruning diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/samplers.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f87875174350a188dcdfdb3132fdf6ff1e7d34 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/samplers.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Any + +import optuna +from optuna.distributions import BaseDistribution + + +class DeterministicSampler(optuna.samplers.BaseSampler): + def __init__(self, params: dict[str, Any]) -> None: + self.params = params + + def infer_relative_search_space( + self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial" + ) -> dict[str, BaseDistribution]: + return {} + + def sample_relative( + self, + study: "optuna.study.Study", + trial: "optuna.trial.FrozenTrial", + search_space: dict[str, BaseDistribution], + ) -> dict[str, Any]: + return {} + + def sample_independent( + self, + study: "optuna.study.Study", + trial: "optuna.trial.FrozenTrial", + param_name: str, + param_distribution: BaseDistribution, + ) -> Any: + param_value = self.params[param_name] + assert param_distribution._contains(param_distribution.to_internal_repr(param_value)) + return param_value diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/storages.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/storages.py new file mode 100644 index 0000000000000000000000000000000000000000..ff85f14cb1a4304ad58f2109c1f751b1a4d6e7f7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/storages.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import socket +import threading +from types import TracebackType +from typing import Any +from typing import IO +from typing import TYPE_CHECKING + +import fakeredis + +import optuna +from optuna.storages import BaseStorage +from optuna.storages import GrpcStorageProxy +from optuna.storages.journal import JournalFileBackend +from optuna.testing.tempfile_pool import NamedTemporaryFilePool + + +if TYPE_CHECKING: + import grpc +else: + from optuna._imports import _LazyImport + + grpc = _LazyImport("grpc") + + +STORAGE_MODES: list[Any] = [ + "inmemory", + "sqlite", + "cached_sqlite", + "journal", + "journal_redis", + "grpc_rdb", + "grpc_journal_file", +] + + +STORAGE_MODES_HEARTBEAT = [ + "sqlite", + "cached_sqlite", +] + +SQLITE3_TIMEOUT = 300 + + +class StorageSupplier: + def __init__(self, storage_specifier: str, **kwargs: Any) -> None: + self.storage_specifier = storage_specifier + self.extra_args = kwargs + self.tempfile: IO[Any] | None = None + self.server: grpc.Server | None = None + self.thread: threading.Thread | None = None + self.proxy: GrpcStorageProxy | None = None + + def __enter__( + self, + ) -> ( + optuna.storages.InMemoryStorage + | optuna.storages._CachedStorage + | optuna.storages.RDBStorage + | optuna.storages.JournalStorage + | optuna.storages.GrpcStorageProxy + ): + if self.storage_specifier == "inmemory": + if len(self.extra_args) > 0: + raise ValueError("InMemoryStorage does not accept any arguments!") + return optuna.storages.InMemoryStorage() + elif "sqlite" in self.storage_specifier: + self.tempfile = NamedTemporaryFilePool().tempfile() + url = "sqlite:///{}".format(self.tempfile.name) + rdb_storage = optuna.storages.RDBStorage( + url, + engine_kwargs={"connect_args": {"timeout": SQLITE3_TIMEOUT}}, + **self.extra_args, + ) + return ( + optuna.storages._CachedStorage(rdb_storage) + if "cached" in self.storage_specifier + else rdb_storage + ) + elif self.storage_specifier == "journal_redis": + journal_redis_storage = optuna.storages.journal.JournalRedisBackend( + "redis://localhost" + ) + journal_redis_storage._redis = self.extra_args.get( + "redis", fakeredis.FakeStrictRedis() + ) + return optuna.storages.JournalStorage(journal_redis_storage) + elif self.storage_specifier == "grpc_journal_file": + self.tempfile = self.extra_args.get("file", NamedTemporaryFilePool().tempfile()) + assert self.tempfile is not None + storage = optuna.storages.JournalStorage( + optuna.storages.journal.JournalFileBackend(self.tempfile.name) + ) + return self._create_proxy(storage) + elif "journal" in self.storage_specifier: + self.tempfile = self.extra_args.get("file", NamedTemporaryFilePool().tempfile()) + assert self.tempfile is not None + file_storage = JournalFileBackend(self.tempfile.name) + return optuna.storages.JournalStorage(file_storage) + elif self.storage_specifier == "grpc_rdb": + self.tempfile = NamedTemporaryFilePool().tempfile() + url = "sqlite:///{}".format(self.tempfile.name) + return self._create_proxy(optuna.storages.RDBStorage(url)) + elif self.storage_specifier == "grpc_proxy": + assert "base_storage" in self.extra_args + return self._create_proxy(self.extra_args["base_storage"]) + else: + assert False + + def _create_proxy(self, storage: BaseStorage) -> GrpcStorageProxy: + port = _find_free_port() + self.server = optuna.storages._grpc.server.make_server(storage, "localhost", port) + self.thread = threading.Thread(target=self.server.start) + self.thread.start() + self.proxy = GrpcStorageProxy(host="localhost", port=port) + self.proxy.wait_server_ready(timeout=60) + return self.proxy + + def __exit__( + self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType + ) -> None: + if self.tempfile: + self.tempfile.close() + + if self.proxy: + self.proxy.close() + self.proxy = None + + if self.server: + assert self.thread is not None + self.server.stop(5).wait() + self.thread.join() + self.server = None + self.thread = None + + +def _find_free_port() -> int: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + for port in range(13000, 13100): + try: + sock.bind(("localhost", port)) + return port + except OSError: + continue + assert False, "must not reach here" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/tempfile_pool.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/tempfile_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..eec71279b2db9f7ae3230d254b99ef6c74409d97 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/tempfile_pool.py @@ -0,0 +1,46 @@ +# On Windows, temporary file shold delete "after" storage was deleted +# NamedTemporaryFilePool ensures tempfile delete after tests. + +from __future__ import annotations + +import atexit +import gc +import os +import tempfile +from types import TracebackType +from typing import Any +from typing import IO + + +class NamedTemporaryFilePool: + tempfile_pool: list[IO[Any]] = [] + + def __new__(cls, **kwargs: Any) -> "NamedTemporaryFilePool": + if not hasattr(cls, "_instance"): + cls._instance = super(NamedTemporaryFilePool, cls).__new__(cls) + atexit.register(cls._instance.cleanup) + return cls._instance + + def __init__(self, **kwargs: Any) -> None: + self.kwargs = kwargs + + def tempfile(self) -> IO[Any]: + self._tempfile = tempfile.NamedTemporaryFile(delete=False, **self.kwargs) + self.tempfile_pool.append(self._tempfile) + return self._tempfile + + def cleanup(self) -> None: + gc.collect() + for i in self.tempfile_pool: + os.unlink(i.name) + + def __enter__(self) -> IO[Any]: + return self.tempfile() + + def __exit__( + self, + exc_type: type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> None: + self._tempfile.close() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/threading.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/threading.py new file mode 100644 index 0000000000000000000000000000000000000000..1facbe97daeb54fa63b51615ce6794125cbddf74 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/threading.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from collections.abc import Callable +import threading +from typing import Any + + +class _TestableThread(threading.Thread): + def __init__(self, target: Callable[..., Any], args: tuple): + threading.Thread.__init__(self, target=target, args=args) + self.exc: BaseException | None = None + + def run(self) -> None: + try: + threading.Thread.run(self) + except BaseException as e: + self.exc = e + + def join(self, timeout: float | None = None) -> None: + super(_TestableThread, self).join(timeout) + if self.exc: + raise self.exc diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/trials.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/trials.py new file mode 100644 index 0000000000000000000000000000000000000000..6f060192630e138e23d8cc01781f7d1d4aba8e03 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/trials.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any + +import optuna +from optuna.distributions import BaseDistribution +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.trial import FrozenTrial +from optuna.trial import TrialState + + +def _create_frozen_trial( + number: int = 0, + values: Sequence[float] | None = None, + constraints: Sequence[float] | None = None, + params: dict[str, Any] | None = None, + param_distributions: dict[str, BaseDistribution] | None = None, + state: TrialState = TrialState.COMPLETE, +) -> optuna.trial.FrozenTrial: + return FrozenTrial( + number=number, + value=1.0 if values is None else None, + values=values, + state=state, + user_attrs={}, + system_attrs={} if constraints is None else {_CONSTRAINTS_KEY: list(constraints)}, + params=params or {}, + distributions=param_distributions or {}, + intermediate_values={}, + datetime_start=None, + datetime_complete=None, + trial_id=number, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/visualization.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..d462a31adce568495810115efb8e346515f20d7c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/testing/visualization.py @@ -0,0 +1,67 @@ +from optuna import Study +from optuna.distributions import FloatDistribution +from optuna.study import create_study +from optuna.trial import create_trial + + +def prepare_study_with_trials( + n_objectives: int = 1, + direction: str = "minimize", + value_for_first_trial: float = 0.0, +) -> Study: + """Return a dummy study object for tests. + + This function is added to reduce the code to set up dummy study object in each test case. + However, you can only use this function for unit tests that are loosely coupled with the + dummy study object. Unit tests that are tightly coupled with the study become difficult to + read because of + `Mystery Guest `__ and/or + `Eager Test `__ anti-patterns. + + Args: + n_objectives: Number of objective values. + direction: Study's optimization direction. + value_for_first_trial: Objective value in first trial. This value will be broadcasted + to all objectives in multi-objective optimization. + + Returns: + :class:`~optuna.study.Study` + + """ + + study = create_study(directions=[direction] * n_objectives) + study.add_trial( + create_trial( + values=[value_for_first_trial] * n_objectives, + params={"param_a": 1.0, "param_b": 2.0, "param_c": 3.0, "param_d": 4.0}, + distributions={ + "param_a": FloatDistribution(0.0, 3.0), + "param_b": FloatDistribution(0.0, 3.0), + "param_c": FloatDistribution(2.0, 5.0), + "param_d": FloatDistribution(2.0, 5.0), + }, + ) + ) + study.add_trial( + create_trial( + values=[2.0] * n_objectives, + params={"param_b": 0.0, "param_d": 4.0}, + distributions={ + "param_b": FloatDistribution(0.0, 3.0), + "param_d": FloatDistribution(2.0, 5.0), + }, + ) + ) + study.add_trial( + create_trial( + values=[1.0] * n_objectives, + params={"param_a": 2.5, "param_b": 1.0, "param_c": 4.5, "param_d": 2.0}, + distributions={ + "param_a": FloatDistribution(0.0, 3.0), + "param_b": FloatDistribution(0.0, 3.0), + "param_c": FloatDistribution(2.0, 5.0), + "param_d": FloatDistribution(2.0, 5.0), + }, + ) + ) + return study diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8278c922130b7a5df0ab1721d315e8a2634407 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/__init__.py @@ -0,0 +1,16 @@ +from optuna.trial._base import BaseTrial +from optuna.trial._fixed import FixedTrial +from optuna.trial._frozen import create_trial +from optuna.trial._frozen import FrozenTrial +from optuna.trial._state import TrialState +from optuna.trial._trial import Trial + + +__all__ = [ + "BaseTrial", + "FixedTrial", + "FrozenTrial", + "Trial", + "TrialState", + "create_trial", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0f08ae3e2408f452addb61df95fb0c3ec7ba027c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_base.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +import abc +from collections.abc import Sequence +import datetime +from typing import Any +from typing import overload + +from optuna._deprecated import deprecated_func +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType + + +_SUGGEST_INT_POSITIONAL_ARGS = ["self", "name", "low", "high", "step", "log"] + + +class BaseTrial(abc.ABC): + """Base class for trials. + + Note that this class is not supposed to be directly accessed by library users. + """ + + @abc.abstractmethod + def suggest_float( + self, + name: str, + low: float, + high: float, + *, + step: float | None = None, + log: bool = False, + ) -> float: + raise NotImplementedError + + @deprecated_func("3.0.0", "6.0.0") + @abc.abstractmethod + def suggest_uniform(self, name: str, low: float, high: float) -> float: + raise NotImplementedError + + @deprecated_func("3.0.0", "6.0.0") + @abc.abstractmethod + def suggest_loguniform(self, name: str, low: float, high: float) -> float: + raise NotImplementedError + + @deprecated_func("3.0.0", "6.0.0") + @abc.abstractmethod + def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float: + raise NotImplementedError + + @abc.abstractmethod + def suggest_int( + self, name: str, low: int, high: int, *, step: int = 1, log: bool = False + ) -> int: + raise NotImplementedError + + @overload + @abc.abstractmethod + def suggest_categorical(self, name: str, choices: Sequence[None]) -> None: ... + + @overload + @abc.abstractmethod + def suggest_categorical(self, name: str, choices: Sequence[bool]) -> bool: ... + + @overload + @abc.abstractmethod + def suggest_categorical(self, name: str, choices: Sequence[int]) -> int: ... + + @overload + @abc.abstractmethod + def suggest_categorical(self, name: str, choices: Sequence[float]) -> float: ... + + @overload + @abc.abstractmethod + def suggest_categorical(self, name: str, choices: Sequence[str]) -> str: ... + + @overload + @abc.abstractmethod + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: ... + + @abc.abstractmethod + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: + raise NotImplementedError + + @abc.abstractmethod + def report(self, value: float, step: int) -> None: + raise NotImplementedError + + @abc.abstractmethod + def should_prune(self) -> bool: + raise NotImplementedError + + @abc.abstractmethod + def set_user_attr(self, key: str, value: Any) -> None: + raise NotImplementedError + + @abc.abstractmethod + @deprecated_func("3.1.0", "5.0.0") + def set_system_attr(self, key: str, value: Any) -> None: + raise NotImplementedError + + @property + @abc.abstractmethod + def params(self) -> dict[str, Any]: + raise NotImplementedError + + @property + @abc.abstractmethod + def distributions(self) -> dict[str, BaseDistribution]: + raise NotImplementedError + + @property + @abc.abstractmethod + def user_attrs(self) -> dict[str, Any]: + raise NotImplementedError + + @property + @abc.abstractmethod + def system_attrs(self) -> dict[str, Any]: + raise NotImplementedError + + @property + @abc.abstractmethod + def datetime_start(self) -> datetime.datetime | None: + raise NotImplementedError + + @property + def number(self) -> int: + raise NotImplementedError diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_fixed.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_fixed.py new file mode 100644 index 0000000000000000000000000000000000000000..295254c272b5428879909d0d455b07b24649d4f4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_fixed.py @@ -0,0 +1,187 @@ +from __future__ import annotations + +from collections.abc import Sequence +import datetime +from typing import Any +from typing import overload +import warnings + +from optuna import distributions +from optuna._convert_positional_args import convert_positional_args +from optuna._deprecated import deprecated_func +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.trial._base import _SUGGEST_INT_POSITIONAL_ARGS +from optuna.trial._base import BaseTrial + + +_suggest_deprecated_msg = "Use suggest_float{args} instead." + + +class FixedTrial(BaseTrial): + """A trial class which suggests a fixed value for each parameter. + + This object has the same methods as :class:`~optuna.trial.Trial`, and it suggests pre-defined + parameter values. The parameter values can be determined at the construction of the + :class:`~optuna.trial.FixedTrial` object. In contrast to :class:`~optuna.trial.Trial`, + :class:`~optuna.trial.FixedTrial` does not depend on :class:`~optuna.study.Study`, and it is + useful for deploying optimization results. + + Example: + + Evaluate an objective function with parameter values given by a user. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -100, 100) + y = trial.suggest_categorical("y", [-1, 0, 1]) + return x**2 + y + + + assert objective(optuna.trial.FixedTrial({"x": 1, "y": 0})) == 1 + + + .. note:: + Please refer to :class:`~optuna.trial.Trial` for details of methods and properties. + + Args: + params: + A dictionary containing all parameters. + number: + A trial number. Defaults to ``0``. + + """ + + def __init__(self, params: dict[str, Any], number: int = 0) -> None: + self._params = params + self._suggested_params: dict[str, Any] = {} + self._distributions: dict[str, BaseDistribution] = {} + self._user_attrs: dict[str, Any] = {} + self._system_attrs: dict[str, Any] = {} + self._datetime_start = datetime.datetime.now() + self._number = number + + def suggest_float( + self, + name: str, + low: float, + high: float, + *, + step: float | None = None, + log: bool = False, + ) -> float: + return self._suggest(name, FloatDistribution(low, high, log=log, step=step)) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="")) + def suggest_uniform(self, name: str, low: float, high: float) -> float: + return self.suggest_float(name, low, high) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., log=True)")) + def suggest_loguniform(self, name: str, low: float, high: float) -> float: + return self.suggest_float(name, low, high, log=True) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., step=...)")) + def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float: + return self.suggest_float(name, low, high, step=q) + + @convert_positional_args( + previous_positional_arg_names=_SUGGEST_INT_POSITIONAL_ARGS, + deprecated_version="3.5.0", + removed_version="5.0.0", + ) + def suggest_int( + self, name: str, low: int, high: int, *, step: int = 1, log: bool = False + ) -> int: + return int(self._suggest(name, IntDistribution(low, high, log=log, step=step))) + + @overload + def suggest_categorical(self, name: str, choices: Sequence[None]) -> None: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[bool]) -> bool: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[int]) -> int: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[float]) -> float: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[str]) -> str: ... + + @overload + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: ... + + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: + return self._suggest(name, CategoricalDistribution(choices=choices)) + + def report(self, value: float, step: int) -> None: + pass + + def should_prune(self) -> bool: + return False + + def set_user_attr(self, key: str, value: Any) -> None: + self._user_attrs[key] = value + + @deprecated_func("3.1.0", "5.0.0") + def set_system_attr(self, key: str, value: Any) -> None: + self._system_attrs[key] = value + + def _suggest(self, name: str, distribution: BaseDistribution) -> Any: + if name not in self._params: + raise ValueError( + "The value of the parameter '{}' is not found. Please set it at " + "the construction of the FixedTrial object.".format(name) + ) + + value = self._params[name] + param_value_in_internal_repr = distribution.to_internal_repr(value) + if not distribution._contains(param_value_in_internal_repr): + warnings.warn( + "The value {} of the parameter '{}' is out of " + "the range of the distribution {}.".format(value, name, distribution) + ) + + if name in self._distributions: + distributions.check_distribution_compatibility(self._distributions[name], distribution) + + self._suggested_params[name] = value + self._distributions[name] = distribution + + return value + + @property + def params(self) -> dict[str, Any]: + return self._suggested_params + + @property + def distributions(self) -> dict[str, BaseDistribution]: + return self._distributions + + @property + def user_attrs(self) -> dict[str, Any]: + return self._user_attrs + + @property + def system_attrs(self) -> dict[str, Any]: + return self._system_attrs + + @property + def datetime_start(self) -> datetime.datetime | None: + return self._datetime_start + + @property + def number(self) -> int: + return self._number diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_frozen.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_frozen.py new file mode 100644 index 0000000000000000000000000000000000000000..8df49f2e96d1c4d3431e454864026e64a4a5d15d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_frozen.py @@ -0,0 +1,597 @@ +from __future__ import annotations + +from collections.abc import Mapping +from collections.abc import Sequence +import datetime +import math +from typing import Any +from typing import cast +from typing import Dict +from typing import overload +import warnings + +from optuna import distributions +from optuna import logging +from optuna._convert_positional_args import convert_positional_args +from optuna._deprecated import deprecated_func +from optuna._typing import JSONSerializable +from optuna.distributions import _convert_old_distribution_to_new_distribution +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.trial._base import _SUGGEST_INT_POSITIONAL_ARGS +from optuna.trial._base import BaseTrial +from optuna.trial._state import TrialState + + +_logger = logging.get_logger(__name__) +_suggest_deprecated_msg = "Use suggest_float{args} instead." + + +class FrozenTrial(BaseTrial): + """Status and results of a :class:`~optuna.trial.Trial`. + + An object of this class has the same methods as :class:`~optuna.trial.Trial`, but is not + associated with, nor has any references to a :class:`~optuna.study.Study`. + + It is therefore not possible to make persistent changes to a storage from this object by + itself, for instance by using :func:`~optuna.trial.FrozenTrial.set_user_attr`. + + It will suggest the parameter values stored in :attr:`params` and will not sample values from + any distributions. + + It can be passed to objective functions (see :func:`~optuna.study.Study.optimize`) and is + useful for deploying optimization results. + + Example: + + Re-evaluate an objective function with parameter values optimized study. + + .. testcode:: + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + assert objective(study.best_trial) == study.best_value + + .. note:: + Instances are mutable, despite the name. + For instance, :func:`~optuna.trial.FrozenTrial.set_user_attr` will update user attributes + of objects in-place. + + + Example: + + Overwritten attributes. + + .. testcode:: + + import copy + import datetime + + import optuna + + + def objective(trial): + x = trial.suggest_float("x", -1, 1) + + # this user attribute always differs + trial.set_user_attr("evaluation time", datetime.datetime.now()) + + return x**2 + + + study = optuna.create_study() + study.optimize(objective, n_trials=3) + + best_trial = study.best_trial + best_trial_copy = copy.deepcopy(best_trial) + + # re-evaluate + objective(best_trial) + + # the user attribute is overwritten by re-evaluation + assert best_trial.user_attrs != best_trial_copy.user_attrs + + .. note:: + Please refer to :class:`~optuna.trial.Trial` for details of methods and properties. + + + Attributes: + number: + Unique and consecutive number of :class:`~optuna.trial.Trial` for each + :class:`~optuna.study.Study`. Note that this field uses zero-based numbering. + state: + :class:`TrialState` of the :class:`~optuna.trial.Trial`. + value: + Objective value of the :class:`~optuna.trial.Trial`. + ``value`` and ``values`` must not be specified at the same time. + values: + Sequence of objective values of the :class:`~optuna.trial.Trial`. + The length is greater than 1 if the problem is multi-objective optimization. + ``value`` and ``values`` must not be specified at the same time. + datetime_start: + Datetime where the :class:`~optuna.trial.Trial` started. + datetime_complete: + Datetime where the :class:`~optuna.trial.Trial` finished. + params: + Dictionary that contains suggested parameters. + distributions: + Dictionary that contains the distributions of :attr:`params`. + user_attrs: + Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with + :func:`optuna.trial.Trial.set_user_attr`. + system_attrs: + Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with + :func:`optuna.trial.Trial.set_system_attr`. + intermediate_values: + Intermediate objective values set with :func:`optuna.trial.Trial.report`. + """ + + def __init__( + self, + number: int, + state: TrialState, + value: float | None, + datetime_start: datetime.datetime | None, + datetime_complete: datetime.datetime | None, + params: dict[str, Any], + distributions: dict[str, BaseDistribution], + user_attrs: dict[str, Any], + system_attrs: dict[str, Any], + intermediate_values: dict[int, float], + trial_id: int, + *, + values: Sequence[float] | None = None, + ) -> None: + self._number = number + self.state = state + self._values: list[float] | None = None + if value is not None and values is not None: + raise ValueError("Specify only one of `value` and `values`.") + elif value is not None: + self._values = [value] + elif values is not None: + self._values = list(values) + self._datetime_start = datetime_start + self.datetime_complete = datetime_complete + self._params = params + self._user_attrs = user_attrs + self._system_attrs = system_attrs + self.intermediate_values = intermediate_values + self._distributions = distributions + self._trial_id = trial_id + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, FrozenTrial): + return NotImplemented + return other.__dict__ == self.__dict__ + + def __lt__(self, other: Any) -> bool: + if not isinstance(other, FrozenTrial): + return NotImplemented + + return self.number < other.number + + def __le__(self, other: Any) -> bool: + if not isinstance(other, FrozenTrial): + return NotImplemented + + return self.number <= other.number + + def __hash__(self) -> int: + return hash(tuple(getattr(self, field) for field in self.__dict__)) + + def __repr__(self) -> str: + return "{cls}({kwargs})".format( + cls=self.__class__.__name__, + kwargs=", ".join( + "{field}={value}".format( + field=field if not field.startswith("_") else field[1:], + value=repr(getattr(self, field)), + ) + for field in self.__dict__ + ) + + ", value=None", + ) + + def suggest_float( + self, + name: str, + low: float, + high: float, + *, + step: float | None = None, + log: bool = False, + ) -> float: + return self._suggest(name, FloatDistribution(low, high, log=log, step=step)) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="")) + def suggest_uniform(self, name: str, low: float, high: float) -> float: + return self.suggest_float(name, low, high) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., log=True)")) + def suggest_loguniform(self, name: str, low: float, high: float) -> float: + return self.suggest_float(name, low, high, log=True) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., step=...)")) + def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float: + return self.suggest_float(name, low, high, step=q) + + @convert_positional_args( + previous_positional_arg_names=_SUGGEST_INT_POSITIONAL_ARGS, + deprecated_version="3.5.0", + removed_version="5.0.0", + ) + def suggest_int( + self, name: str, low: int, high: int, *, step: int = 1, log: bool = False + ) -> int: + return int(self._suggest(name, IntDistribution(low, high, log=log, step=step))) + + @overload + def suggest_categorical(self, name: str, choices: Sequence[None]) -> None: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[bool]) -> bool: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[int]) -> int: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[float]) -> float: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[str]) -> str: ... + + @overload + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: ... + + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: + return self._suggest(name, CategoricalDistribution(choices=choices)) + + def report(self, value: float, step: int) -> None: + """Interface of report function. + + Since :class:`~optuna.trial.FrozenTrial` is not pruned, + this report function does nothing. + + .. seealso:: + Please refer to :func:`~optuna.trial.FrozenTrial.should_prune`. + + Args: + value: + A value returned from the objective function. + step: + Step of the trial (e.g., Epoch of neural network training). Note that pruners + assume that ``step`` starts at zero. For example, + :class:`~optuna.pruners.MedianPruner` simply checks if ``step`` is less than + ``n_warmup_steps`` as the warmup mechanism. + """ + + pass + + def should_prune(self) -> bool: + """Suggest whether the trial should be pruned or not. + + The suggestion is always :obj:`False` regardless of a pruning algorithm. + + .. note:: + :class:`~optuna.trial.FrozenTrial` only samples one combination of parameters. + + Returns: + :obj:`False`. + """ + + return False + + def set_user_attr(self, key: str, value: Any) -> None: + self._user_attrs[key] = value + + @deprecated_func("3.1.0", "5.0.0") + def set_system_attr(self, key: str, value: Any) -> None: + self._system_attrs[key] = value + + def _validate(self) -> None: + if self.state != TrialState.WAITING and self.datetime_start is None: + raise ValueError( + "`datetime_start` is supposed to be set when the trial state is not waiting." + ) + + if self.state.is_finished(): + if self.datetime_complete is None: + raise ValueError("`datetime_complete` is supposed to be set for a finished trial.") + else: + if self.datetime_complete is not None: + raise ValueError( + "`datetime_complete` is supposed to be None for an unfinished trial." + ) + + if self.state == TrialState.FAIL and self._values is not None: + raise ValueError(f"values should be None for a failed trial, but got {self._values}.") + if self.state == TrialState.COMPLETE: + if self._values is None: + raise ValueError("values should be set for a complete trial.") + elif any(math.isnan(x) for x in self._values): + raise ValueError("values should not contain NaN.") + + if set(self.params.keys()) != set(self.distributions.keys()): + raise ValueError( + "Inconsistent parameters {} and distributions {}.".format( + set(self.params.keys()), set(self.distributions.keys()) + ) + ) + + for param_name, param_value in self.params.items(): + distribution = self.distributions[param_name] + + param_value_in_internal_repr = distribution.to_internal_repr(param_value) + if not distribution._contains(param_value_in_internal_repr): + raise ValueError( + "The value {} of parameter '{}' isn't contained in the distribution " + "{}.".format(param_value, param_name, distribution) + ) + + def _suggest(self, name: str, distribution: BaseDistribution) -> Any: + if name not in self._params: + raise ValueError( + "The value of the parameter '{}' is not found. Please set it at " + "the construction of the FrozenTrial object.".format(name) + ) + + value = self._params[name] + param_value_in_internal_repr = distribution.to_internal_repr(value) + if not distribution._contains(param_value_in_internal_repr): + warnings.warn( + "The value {} of the parameter '{}' is out of " + "the range of the distribution {}.".format(value, name, distribution) + ) + + if name in self._distributions: + distributions.check_distribution_compatibility(self._distributions[name], distribution) + + self._distributions[name] = distribution + + return value + + @property + def number(self) -> int: + return self._number + + @number.setter + def number(self, value: int) -> None: + self._number = value + + @property + def value(self) -> float | None: + if self._values is not None: + if len(self._values) > 1: + raise RuntimeError( + "This attribute is not available during multi-objective optimization." + ) + return self._values[0] + return None + + @value.setter + def value(self, v: float | None) -> None: + if self._values is not None: + if len(self._values) > 1: + raise RuntimeError( + "This attribute is not available during multi-objective optimization." + ) + + if v is not None: + self._values = [v] + else: + self._values = None + + # These `_get_values`, `_set_values`, and `values = property(_get_values, _set_values)` are + # defined to pass the mypy. + # See https://github.com/python/mypy/issues/3004#issuecomment-726022329. + def _get_values(self) -> list[float] | None: + return self._values + + def _set_values(self, v: Sequence[float] | None) -> None: + if v is not None: + self._values = list(v) + else: + self._values = None + + values = property(_get_values, _set_values) + + @property + def datetime_start(self) -> datetime.datetime | None: + return self._datetime_start + + @datetime_start.setter + def datetime_start(self, value: datetime.datetime | None) -> None: + self._datetime_start = value + + @property + def params(self) -> dict[str, Any]: + return self._params + + @params.setter + def params(self, params: dict[str, Any]) -> None: + self._params = params + + @property + def distributions(self) -> dict[str, BaseDistribution]: + return self._distributions + + @distributions.setter + def distributions(self, value: dict[str, BaseDistribution]) -> None: + self._distributions = value + + @property + def user_attrs(self) -> dict[str, Any]: + return self._user_attrs + + @user_attrs.setter + def user_attrs(self, value: dict[str, Any]) -> None: + self._user_attrs = value + + @property + def system_attrs(self) -> dict[str, Any]: + return self._system_attrs + + @system_attrs.setter + def system_attrs(self, value: Mapping[str, JSONSerializable]) -> None: + self._system_attrs = cast(Dict[str, Any], value) + + @property + def last_step(self) -> int | None: + """Return the maximum step of :attr:`intermediate_values` in the trial. + + Returns: + The maximum step of intermediates. + """ + + if len(self.intermediate_values) == 0: + return None + else: + return max(self.intermediate_values.keys()) + + @property + def duration(self) -> datetime.timedelta | None: + """Return the elapsed time taken to complete the trial. + + Returns: + The duration. + """ + + if self.datetime_start and self.datetime_complete: + return self.datetime_complete - self.datetime_start + else: + return None + + +def create_trial( + *, + state: TrialState = TrialState.COMPLETE, + value: float | None = None, + values: Sequence[float] | None = None, + params: dict[str, Any] | None = None, + distributions: dict[str, BaseDistribution] | None = None, + user_attrs: dict[str, Any] | None = None, + system_attrs: dict[str, Any] | None = None, + intermediate_values: dict[int, float] | None = None, +) -> FrozenTrial: + """Create a new :class:`~optuna.trial.FrozenTrial`. + + Example: + + .. testcode:: + + import optuna + from optuna.distributions import CategoricalDistribution + from optuna.distributions import FloatDistribution + + trial = optuna.trial.create_trial( + params={"x": 1.0, "y": 0}, + distributions={ + "x": FloatDistribution(0, 10), + "y": CategoricalDistribution([-1, 0, 1]), + }, + value=5.0, + ) + + assert isinstance(trial, optuna.trial.FrozenTrial) + assert trial.value == 5.0 + assert trial.params == {"x": 1.0, "y": 0} + + .. seealso:: + + See :func:`~optuna.study.Study.add_trial` for how this function can be used to create a + study from existing trials. + + .. note:: + + Please note that this is a low-level API. In general, trials that are passed to objective + functions are created inside :func:`~optuna.study.Study.optimize`. + + .. note:: + When ``state`` is :class:`TrialState.COMPLETE`, the following parameters are + required: + + * ``params`` + * ``distributions`` + * ``value`` or ``values`` + + Args: + state: + Trial state. + value: + Trial objective value. Must be specified if ``state`` is :class:`TrialState.COMPLETE`. + ``value`` and ``values`` must not be specified at the same time. + values: + Sequence of the trial objective values. The length is greater than 1 if the problem is + multi-objective optimization. + Must be specified if ``state`` is :class:`TrialState.COMPLETE`. + ``value`` and ``values`` must not be specified at the same time. + params: + Dictionary with suggested parameters of the trial. + distributions: + Dictionary with parameter distributions of the trial. + user_attrs: + Dictionary with user attributes. + system_attrs: + Dictionary with system attributes. Should not have to be used for most users. + intermediate_values: + Dictionary with intermediate objective values of the trial. + + Returns: + Created trial. + """ + + params = params or {} + distributions = distributions or {} + distributions = { + key: _convert_old_distribution_to_new_distribution(dist) + for key, dist in distributions.items() + } + user_attrs = user_attrs or {} + system_attrs = system_attrs or {} + intermediate_values = intermediate_values or {} + + if state == TrialState.WAITING: + datetime_start = None + else: + datetime_start = datetime.datetime.now() + + if state.is_finished(): + datetime_complete: datetime.datetime | None = datetime_start + else: + datetime_complete = None + + trial = FrozenTrial( + number=-1, + trial_id=-1, + state=state, + value=value, + values=values, + datetime_start=datetime_start, + datetime_complete=datetime_complete, + params=params, + distributions=distributions, + user_attrs=user_attrs, + system_attrs=system_attrs, + intermediate_values=intermediate_values, + ) + + trial._validate() + + return trial diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_state.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_state.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a77131102af73c457596fed305d5125e069207 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_state.py @@ -0,0 +1,36 @@ +import enum + + +class TrialState(enum.IntEnum): + """State of a :class:`~optuna.trial.Trial`. + + Attributes: + RUNNING: + The :class:`~optuna.trial.Trial` is running. + WAITING: + The :class:`~optuna.trial.Trial` is waiting and unfinished. + COMPLETE: + The :class:`~optuna.trial.Trial` has been finished without any error. + PRUNED: + The :class:`~optuna.trial.Trial` has been pruned with + :class:`~optuna.exceptions.TrialPruned`. + FAIL: + The :class:`~optuna.trial.Trial` has failed due to an uncaught error. + """ + + RUNNING = 0 + COMPLETE = 1 + PRUNED = 2 + FAIL = 3 + WAITING = 4 + + def __repr__(self) -> str: + return str(self) + + def is_finished(self) -> bool: + """Return a bool value to represent whether the trial state is unfinished or not. + + The unfinished state is either ``RUNNING`` or ``WAITING``. + """ + + return self != TrialState.RUNNING and self != TrialState.WAITING diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_trial.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..f0bf752ea369d441f964e7636315cd8e6e63cad6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/trial/_trial.py @@ -0,0 +1,772 @@ +from __future__ import annotations + +from collections import UserDict +from collections.abc import Sequence +import copy +import datetime +from typing import Any +from typing import overload +import warnings + +import optuna +from optuna import distributions +from optuna import logging +from optuna import pruners +from optuna._convert_positional_args import convert_positional_args +from optuna._deprecated import deprecated_func +from optuna.distributions import BaseDistribution +from optuna.distributions import CategoricalChoiceType +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.trial import FrozenTrial +from optuna.trial._base import _SUGGEST_INT_POSITIONAL_ARGS +from optuna.trial._base import BaseTrial + + +_logger = logging.get_logger(__name__) +_suggest_deprecated_msg = "Use suggest_float{args} instead." + + +class Trial(BaseTrial): + """A trial is a process of evaluating an objective function. + + This object is passed to an objective function and provides interfaces to get parameter + suggestion, manage the trial's state, and set/get user-defined attributes of the trial. + + Note that the direct use of this constructor is not recommended. + This object is seamlessly instantiated and passed to the objective function behind + the :func:`optuna.study.Study.optimize()` method; hence library users do not care about + instantiation of this object. + + Args: + study: + A :class:`~optuna.study.Study` object. + trial_id: + A trial ID that is automatically generated. + + """ + + def __init__(self, study: "optuna.study.Study", trial_id: int) -> None: + self.study = study + self._trial_id = trial_id + + self.storage = self.study._storage + + self._cached_frozen_trial = self.storage.get_trial(self._trial_id) + study = pruners._filter_study(self.study, self._cached_frozen_trial) + + self.study.sampler.before_trial(study, self._cached_frozen_trial) + + self.relative_search_space = self.study.sampler.infer_relative_search_space( + study, self._cached_frozen_trial + ) + self._relative_params: dict[str, Any] | None = None + self._fixed_params = self._cached_frozen_trial.system_attrs.get("fixed_params", {}) + + @property + def relative_params(self) -> dict[str, Any]: + if self._relative_params is None: + study = pruners._filter_study(self.study, self._cached_frozen_trial) + self._relative_params = self.study.sampler.sample_relative( + study, self._cached_frozen_trial, self.relative_search_space + ) + return self._relative_params + + def suggest_float( + self, + name: str, + low: float, + high: float, + *, + step: float | None = None, + log: bool = False, + ) -> float: + """Suggest a value for the floating point parameter. + + Example: + + Suggest a momentum, learning rate and scaling factor of learning rate + for neural network training. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + from sklearn.neural_network import MLPClassifier + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0) + + + def objective(trial): + momentum = trial.suggest_float("momentum", 0.0, 1.0) + learning_rate_init = trial.suggest_float( + "learning_rate_init", 1e-5, 1e-3, log=True + ) + power_t = trial.suggest_float("power_t", 0.2, 0.8, step=0.1) + clf = MLPClassifier( + hidden_layer_sizes=(100, 50), + momentum=momentum, + learning_rate_init=learning_rate_init, + solver="sgd", + random_state=0, + power_t=power_t, + ) + clf.fit(X_train, y_train) + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=3) + + Args: + name: + A parameter name. + low: + Lower endpoint of the range of suggested values. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`, + ``low`` must be larger than 0. + high: + Upper endpoint of the range of suggested values. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + step: + A step of discretization. + + .. note:: + The ``step`` and ``log`` arguments cannot be used at the same time. To set + the ``step`` argument to a float number, set the ``log`` argument to + :obj:`False`. + log: + A flag to sample the value from the log domain or not. + If ``log`` is true, the value is sampled from the range in the log domain. + Otherwise, the value is sampled from the range in the linear domain. + + .. note:: + The ``step`` and ``log`` arguments cannot be used at the same time. To set + the ``log`` argument to :obj:`True`, set the ``step`` argument to :obj:`None`. + + Returns: + A suggested float value. + + .. seealso:: + :ref:`configurations` tutorial describes more details and flexible usages. + """ + + distribution = FloatDistribution(low, high, log=log, step=step) + suggested_value = self._suggest(name, distribution) + self._check_distribution(name, distribution) + return suggested_value + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="")) + def suggest_uniform(self, name: str, low: float, high: float) -> float: + """Suggest a value for the continuous parameter. + + The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})` + in the linear domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of + :math:`\\mathsf{low}` will be returned. + + Args: + name: + A parameter name. + low: + Lower endpoint of the range of suggested values. ``low`` is included in the range. + high: + Upper endpoint of the range of suggested values. ``high`` is included in the range. + + Returns: + A suggested float value. + """ + + return self.suggest_float(name, low, high) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., log=True)")) + def suggest_loguniform(self, name: str, low: float, high: float) -> float: + """Suggest a value for the continuous parameter. + + The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})` + in the log domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of + :math:`\\mathsf{low}` will be returned. + + Args: + name: + A parameter name. + low: + Lower endpoint of the range of suggested values. ``low`` is included in the range. + high: + Upper endpoint of the range of suggested values. ``high`` is included in the range. + + Returns: + A suggested float value. + """ + + return self.suggest_float(name, low, high, log=True) + + @deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., step=...)")) + def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float: + """Suggest a value for the discrete parameter. + + The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high}]`, + and the step of discretization is :math:`q`. More specifically, + this method returns one of the values in the sequence + :math:`\\mathsf{low}, \\mathsf{low} + q, \\mathsf{low} + 2 q, \\dots, + \\mathsf{low} + k q \\le \\mathsf{high}`, + where :math:`k` denotes an integer. Note that :math:`high` may be changed due to round-off + errors if :math:`q` is not an integer. Please check warning messages to find the changed + values. + + Args: + name: + A parameter name. + low: + Lower endpoint of the range of suggested values. ``low`` is included in the range. + high: + Upper endpoint of the range of suggested values. ``high`` is included in the range. + q: + A step of discretization. + + Returns: + A suggested float value. + """ + + return self.suggest_float(name, low, high, step=q) + + @convert_positional_args( + previous_positional_arg_names=_SUGGEST_INT_POSITIONAL_ARGS, + deprecated_version="3.5.0", + removed_version="5.0.0", + ) + def suggest_int( + self, name: str, low: int, high: int, *, step: int = 1, log: bool = False + ) -> int: + """Suggest a value for the integer parameter. + + The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`. + + Example: + + Suggest the number of trees in `RandomForestClassifier `__. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.ensemble import RandomForestClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + + + def objective(trial): + n_estimators = trial.suggest_int("n_estimators", 50, 400) + clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0) + clf.fit(X_train, y_train) + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=3) + + Args: + name: + A parameter name. + low: + Lower endpoint of the range of suggested values. ``low`` is included in the range. + ``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`, + ``low`` must be larger than 0. + high: + Upper endpoint of the range of suggested values. ``high`` is included in the range. + ``high`` must be greater than or equal to ``low``. + step: + A step of discretization. + + .. note:: + Note that :math:`\\mathsf{high}` is modified if the range is not divisible by + :math:`\\mathsf{step}`. Please check the warning messages to find the changed + values. + + .. note:: + The method returns one of the values in the sequence + :math:`\\mathsf{low}, \\mathsf{low} + \\mathsf{step}, \\mathsf{low} + 2 * + \\mathsf{step}, \\dots, \\mathsf{low} + k * \\mathsf{step} \\le + \\mathsf{high}`, where :math:`k` denotes an integer. + + .. note:: + The ``step != 1`` and ``log`` arguments cannot be used at the same time. + To set the ``step`` argument :math:`\\mathsf{step} \\ge 2`, set the + ``log`` argument to :obj:`False`. + log: + A flag to sample the value from the log domain or not. + + .. note:: + If ``log`` is true, at first, the range of suggested values is divided into + grid points of width 1. The range of suggested values is then converted to + a log domain, from which a value is sampled. The uniformly sampled + value is re-converted to the original domain and rounded to the nearest grid + point that we just split, and the suggested value is determined. + For example, if `low = 2` and `high = 8`, then the range of suggested values is + `[2, 3, 4, 5, 6, 7, 8]` and lower values tend to be more sampled than higher + values. + + .. note:: + The ``step != 1`` and ``log`` arguments cannot be used at the same time. + To set the ``log`` argument to :obj:`True`, set the ``step`` argument to 1. + + .. seealso:: + :ref:`configurations` tutorial describes more details and flexible usages. + """ + + distribution = IntDistribution(low=low, high=high, log=log, step=step) + suggested_value = int(self._suggest(name, distribution)) + self._check_distribution(name, distribution) + return suggested_value + + @overload + def suggest_categorical(self, name: str, choices: Sequence[None]) -> None: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[bool]) -> bool: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[int]) -> int: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[float]) -> float: ... + + @overload + def suggest_categorical(self, name: str, choices: Sequence[str]) -> str: ... + + @overload + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: ... + + def suggest_categorical( + self, name: str, choices: Sequence[CategoricalChoiceType] + ) -> CategoricalChoiceType: + """Suggest a value for the categorical parameter. + + The value is sampled from ``choices``. + + Example: + + Suggest a kernel function of `SVC `__. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + from sklearn.svm import SVC + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + + + def objective(trial): + kernel = trial.suggest_categorical("kernel", ["linear", "poly", "rbf"]) + clf = SVC(kernel=kernel, gamma="scale", random_state=0) + clf.fit(X_train, y_train) + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=3) + + + Args: + name: + A parameter name. + choices: + Parameter value candidates. + + .. seealso:: + :class:`~optuna.distributions.CategoricalDistribution`. + + Returns: + A suggested value. + + .. seealso:: + :ref:`configurations` tutorial describes more details and flexible usages. + """ + # There is no need to call self._check_distribution because + # CategoricalDistribution does not support dynamic value space. + + return self._suggest(name, CategoricalDistribution(choices=choices)) + + def report(self, value: float, step: int) -> None: + """Report an objective function value for a given step. + + The reported values are used by the pruners to determine whether this trial should be + pruned. + + .. seealso:: + Please refer to :class:`~optuna.pruners.BasePruner`. + + .. note:: + The reported value is converted to ``float`` type by applying ``float()`` + function internally. Thus, it accepts all float-like types (e.g., ``numpy.float32``). + If the conversion fails, a ``TypeError`` is raised. + + .. note:: + If this method is called multiple times at the same ``step`` in a trial, + the reported ``value`` only the first time is stored and the reported values + from the second time are ignored. + + .. note:: + :func:`~optuna.trial.Trial.report` does not support multi-objective + optimization. + + Example: + + Report intermediate scores of `SGDClassifier `__ training. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.linear_model import SGDClassifier + from sklearn.model_selection import train_test_split + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y) + + + def objective(trial): + clf = SGDClassifier(random_state=0) + for step in range(100): + clf.partial_fit(X_train, y_train, np.unique(y)) + intermediate_value = clf.score(X_valid, y_valid) + trial.report(intermediate_value, step=step) + if trial.should_prune(): + raise optuna.TrialPruned() + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=3) + + + Args: + value: + A value returned from the objective function. + step: + Step of the trial (e.g., Epoch of neural network training). Note that pruners + assume that ``step`` starts at zero. For example, + :class:`~optuna.pruners.MedianPruner` simply checks if ``step`` is less than + ``n_warmup_steps`` as the warmup mechanism. + ``step`` must be a positive integer. + """ + + if len(self.study.directions) > 1: + raise NotImplementedError( + "Trial.report is not supported for multi-objective optimization." + ) + + try: + # For convenience, we allow users to report a value that can be cast to `float`. + value = float(value) + except (TypeError, ValueError): + message = ( + f"The `value` argument is of type '{type(value)}' but supposed to be a float." + ) + raise TypeError(message) from None + + try: + step = int(step) + except (TypeError, ValueError): + message = f"The `step` argument is of type '{type(step)}' but supposed to be an int." + raise TypeError(message) from None + + if step < 0: + raise ValueError(f"The `step` argument is {step} but cannot be negative.") + + if step in self._cached_frozen_trial.intermediate_values: + # Do nothing if already reported. + warnings.warn( + f"The reported value is ignored because this `step` {step} is already reported." + ) + return + + self.storage.set_trial_intermediate_value(self._trial_id, step, value) + self._cached_frozen_trial.intermediate_values[step] = value + + def should_prune(self) -> bool: + """Suggest whether the trial should be pruned or not. + + The suggestion is made by a pruning algorithm associated with the trial and is based on + previously reported values. The algorithm can be specified when constructing a + :class:`~optuna.study.Study`. + + .. note:: + If no values have been reported, the algorithm cannot make meaningful suggestions. + Similarly, if this method is called multiple times with the exact same set of reported + values, the suggestions will be the same. + + .. seealso:: + Please refer to the example code in :func:`optuna.trial.Trial.report`. + + .. note:: + :func:`~optuna.trial.Trial.should_prune` does not support multi-objective + optimization. + + Returns: + A boolean value. If :obj:`True`, the trial should be pruned according to the + configured pruning algorithm. Otherwise, the trial should continue. + """ + + if len(self.study.directions) > 1: + raise NotImplementedError( + "Trial.should_prune is not supported for multi-objective optimization." + ) + + trial = self._get_latest_trial() + return self.study.pruner.prune(self.study, trial) + + def set_user_attr(self, key: str, value: Any) -> None: + """Set user attributes to the trial. + + The user attributes in the trial can be access via :func:`optuna.trial.Trial.user_attrs`. + + .. seealso:: + + See the recipe on :ref:`attributes`. + + Example: + + Save fixed hyperparameters of neural network training. + + .. testcode:: + + import numpy as np + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + from sklearn.neural_network import MLPClassifier + + import optuna + + X, y = load_iris(return_X_y=True) + X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0) + + + def objective(trial): + trial.set_user_attr("BATCHSIZE", 128) + momentum = trial.suggest_float("momentum", 0, 1.0) + clf = MLPClassifier( + hidden_layer_sizes=(100, 50), + batch_size=trial.user_attrs["BATCHSIZE"], + momentum=momentum, + solver="sgd", + random_state=0, + ) + clf.fit(X_train, y_train) + + return clf.score(X_valid, y_valid) + + + study = optuna.create_study(direction="maximize") + study.optimize(objective, n_trials=3) + assert "BATCHSIZE" in study.best_trial.user_attrs.keys() + assert study.best_trial.user_attrs["BATCHSIZE"] == 128 + + + Args: + key: + A key string of the attribute. + value: + A value of the attribute. The value should be JSON serializable. + """ + + self.storage.set_trial_user_attr(self._trial_id, key, value) + self._cached_frozen_trial.user_attrs[key] = value + + @deprecated_func("3.1.0", "5.0.0") + def set_system_attr(self, key: str, value: Any) -> None: + """Set system attributes to the trial. + + Note that Optuna internally uses this method to save system messages such as failure + reason of trials. Please use :func:`~optuna.trial.Trial.set_user_attr` to set users' + attributes. + + Args: + key: + A key string of the attribute. + value: + A value of the attribute. The value should be JSON serializable. + """ + + self.storage.set_trial_system_attr(self._trial_id, key, value) + self._cached_frozen_trial.system_attrs[key] = value + + def _suggest(self, name: str, distribution: BaseDistribution) -> Any: + storage = self.storage + trial_id = self._trial_id + + trial = self._get_latest_trial() + + if name in trial.distributions: + # No need to sample if already suggested. + distributions.check_distribution_compatibility(trial.distributions[name], distribution) + param_value = trial.params[name] + else: + if self._is_fixed_param(name, distribution): + param_value = self._fixed_params[name] + elif distribution.single(): + param_value = distributions._get_single_value(distribution) + elif self._is_relative_param(name, distribution): + param_value = self.relative_params[name] + else: + study = pruners._filter_study(self.study, trial) + param_value = self.study.sampler.sample_independent( + study, trial, name, distribution + ) + + # `param_value` is validated here (invalid value like `np.nan` raises ValueError). + param_value_in_internal_repr = distribution.to_internal_repr(param_value) + storage.set_trial_param(trial_id, name, param_value_in_internal_repr, distribution) + + self._cached_frozen_trial.distributions[name] = distribution + self._cached_frozen_trial.params[name] = param_value + return param_value + + def _is_fixed_param(self, name: str, distribution: BaseDistribution) -> bool: + if name not in self._fixed_params: + return False + + param_value = self._fixed_params[name] + param_value_in_internal_repr = distribution.to_internal_repr(param_value) + + contained = distribution._contains(param_value_in_internal_repr) + if not contained: + warnings.warn( + "Fixed parameter '{}' with value {} is out of range " + "for distribution {}.".format(name, param_value, distribution) + ) + return True + + def _is_relative_param(self, name: str, distribution: BaseDistribution) -> bool: + if name not in self.relative_params: + return False + + if name not in self.relative_search_space: + raise ValueError( + "The parameter '{}' was sampled by `sample_relative` method " + "but it is not contained in the relative search space.".format(name) + ) + + relative_distribution = self.relative_search_space[name] + distributions.check_distribution_compatibility(relative_distribution, distribution) + + param_value = self.relative_params[name] + param_value_in_internal_repr = distribution.to_internal_repr(param_value) + return distribution._contains(param_value_in_internal_repr) + + def _check_distribution(self, name: str, distribution: BaseDistribution) -> None: + old_distribution = self._cached_frozen_trial.distributions.get(name, distribution) + if old_distribution != distribution: + warnings.warn( + 'Inconsistent parameter values for distribution with name "{}"! ' + "This might be a configuration mistake. " + "Optuna allows to call the same distribution with the same " + "name more than once in a trial. " + "When the parameter values are inconsistent optuna only " + "uses the values of the first call and ignores all following. " + "Using these values: {}".format(name, old_distribution._asdict()), + RuntimeWarning, + ) + + def _get_latest_trial(self) -> FrozenTrial: + # TODO(eukaryo): Remove this method after `system_attrs` property is removed. + latest_trial = copy.copy(self._cached_frozen_trial) + latest_trial.system_attrs = _LazyTrialSystemAttrs(self._trial_id, self.storage) + return latest_trial + + @property + def params(self) -> dict[str, Any]: + """Return parameters to be optimized. + + Returns: + A dictionary containing all parameters. + """ + + return copy.deepcopy(self._cached_frozen_trial.params) + + @property + def distributions(self) -> dict[str, BaseDistribution]: + """Return distributions of parameters to be optimized. + + Returns: + A dictionary containing all distributions. + """ + + return copy.deepcopy(self._cached_frozen_trial.distributions) + + @property + def user_attrs(self) -> dict[str, Any]: + """Return user attributes. + + Returns: + A dictionary containing all user attributes. + """ + + return copy.deepcopy(self._cached_frozen_trial.user_attrs) + + @property + @deprecated_func("3.1.0", "5.0.0") + def system_attrs(self) -> dict[str, Any]: + """Return system attributes. + + Returns: + A dictionary containing all system attributes. + """ + + return copy.deepcopy(self.storage.get_trial_system_attrs(self._trial_id)) + + @property + def datetime_start(self) -> datetime.datetime | None: + """Return start datetime. + + Returns: + Datetime where the :class:`~optuna.trial.Trial` started. + """ + return self._cached_frozen_trial.datetime_start + + @property + def number(self) -> int: + """Return trial's number which is consecutive and unique in a study. + + Returns: + A trial number. + """ + + return self._cached_frozen_trial.number + + +class _LazyTrialSystemAttrs(UserDict): + def __init__(self, trial_id: int, storage: optuna.storages.BaseStorage) -> None: + super().__init__() + self._trial_id = trial_id + self._storage = storage + self._initialized = False + + def __getattribute__(self, key: str) -> Any: + if key == "data": + if not self._initialized: + self._initialized = True + super().update(self._storage.get_trial_system_attrs(self._trial_id)) + return super().__getattribute__(key) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/version.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/version.py new file mode 100644 index 0000000000000000000000000000000000000000..ecdb1cef9e02d39efde3996e051ae0a1c65ad08a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/version.py @@ -0,0 +1 @@ +__version__ = "4.4.0" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2478c2e81956a477bca80041028fae2fc7814001 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/__init__.py @@ -0,0 +1,32 @@ +from optuna.visualization import matplotlib +from optuna.visualization._contour import plot_contour +from optuna.visualization._edf import plot_edf +from optuna.visualization._hypervolume_history import plot_hypervolume_history +from optuna.visualization._intermediate_values import plot_intermediate_values +from optuna.visualization._optimization_history import plot_optimization_history +from optuna.visualization._parallel_coordinate import plot_parallel_coordinate +from optuna.visualization._param_importances import plot_param_importances +from optuna.visualization._pareto_front import plot_pareto_front +from optuna.visualization._rank import plot_rank +from optuna.visualization._slice import plot_slice +from optuna.visualization._terminator_improvement import plot_terminator_improvement +from optuna.visualization._timeline import plot_timeline +from optuna.visualization._utils import is_available + + +__all__ = [ + "is_available", + "matplotlib", + "plot_contour", + "plot_edf", + "plot_hypervolume_history", + "plot_intermediate_values", + "plot_optimization_history", + "plot_parallel_coordinate", + "plot_param_importances", + "plot_pareto_front", + "plot_slice", + "plot_rank", + "plot_terminator_improvement", + "plot_timeline", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_contour.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_contour.py new file mode 100644 index 0000000000000000000000000000000000000000..cfc18f19ca4d46ffe4f227a135738a7349f9c4af --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_contour.py @@ -0,0 +1,432 @@ +from __future__ import annotations + +from collections.abc import Callable +import math +from typing import Any +from typing import NamedTuple +import warnings + +import numpy as np + +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.study import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _filter_nonfinite +from optuna.visualization._utils import _is_log_scale +from optuna.visualization._utils import _is_numerical +from optuna.visualization._utils import _is_reverse_scale + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import Contour + from optuna.visualization._plotly_imports import go + from optuna.visualization._plotly_imports import make_subplots + from optuna.visualization._plotly_imports import Scatter + from optuna.visualization._utils import COLOR_SCALE + +_logger = get_logger(__name__) + + +PADDING_RATIO = 0.05 + + +class _AxisInfo(NamedTuple): + name: str + range: tuple[float, float] + is_log: bool + is_cat: bool + indices: list[str | int | float] + values: list[str | float | None] + + +class _SubContourInfo(NamedTuple): + xaxis: _AxisInfo + yaxis: _AxisInfo + z_values: dict[tuple[int, int], float] + constraints: list[bool] = [] + + +class _ContourInfo(NamedTuple): + sorted_params: list[str] + sub_plot_infos: list[list[_SubContourInfo]] + reverse_scale: bool + target_name: str + + +class _PlotValues(NamedTuple): + x: list[Any] + y: list[Any] + + +def plot_contour( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot the parameter relationship as contour plot in a study. + + Note that, if a parameter contains missing values, a trial with missing values is not plotted. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the color bar. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + + .. note:: + The colormap is reversed when the ``target`` argument isn't :obj:`None` or ``direction`` + of :class:`~optuna.study.Study` is ``minimize``. + """ + + _imports.check() + info = _get_contour_info(study, params, target, target_name) + return _get_contour_plot(info) + + +def _get_contour_plot(info: _ContourInfo) -> "go.Figure": + layout = go.Layout(title="Contour Plot") + + sorted_params = info.sorted_params + sub_plot_infos = info.sub_plot_infos + reverse_scale = info.reverse_scale + target_name = info.target_name + + if len(sorted_params) <= 1: + return go.Figure(data=[], layout=layout) + + if len(sorted_params) == 2: + x_param = sorted_params[0] + y_param = sorted_params[1] + sub_plot_info = sub_plot_infos[0][0] + sub_plots = _get_contour_subplot(sub_plot_info, reverse_scale, target_name) + figure = go.Figure(data=sub_plots, layout=layout) + figure.update_xaxes(title_text=x_param, range=sub_plot_info.xaxis.range) + figure.update_yaxes(title_text=y_param, range=sub_plot_info.yaxis.range) + + if sub_plot_info.xaxis.is_cat: + figure.update_xaxes(type="category") + if sub_plot_info.yaxis.is_cat: + figure.update_yaxes(type="category") + + if sub_plot_info.xaxis.is_log: + log_range = [math.log10(p) for p in sub_plot_info.xaxis.range] + figure.update_xaxes(range=log_range, type="log") + if sub_plot_info.yaxis.is_log: + log_range = [math.log10(p) for p in sub_plot_info.yaxis.range] + figure.update_yaxes(range=log_range, type="log") + else: + figure = make_subplots( + rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True + ) + figure.update_layout(layout) + showscale = True # showscale option only needs to be specified once. + for x_i, x_param in enumerate(sorted_params): + for y_i, y_param in enumerate(sorted_params): + if x_param == y_param: + figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1) + else: + sub_plots = _get_contour_subplot( + sub_plot_infos[y_i][x_i], reverse_scale, target_name + ) + contour = sub_plots[0] + scatter = sub_plots[1] + contour.update(showscale=showscale) # showscale's default is True. + if showscale: + showscale = False + figure.add_trace(contour, row=y_i + 1, col=x_i + 1) + figure.add_trace(scatter, row=y_i + 1, col=x_i + 1) + + xaxis = sub_plot_infos[y_i][x_i].xaxis + yaxis = sub_plot_infos[y_i][x_i].yaxis + figure.update_xaxes(range=xaxis.range, row=y_i + 1, col=x_i + 1) + figure.update_yaxes(range=yaxis.range, row=y_i + 1, col=x_i + 1) + + if xaxis.is_cat: + figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1) + if yaxis.is_cat: + figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1) + + if xaxis.is_log: + log_range = [math.log10(p) for p in xaxis.range] + figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) + if yaxis.is_log: + log_range = [math.log10(p) for p in yaxis.range] + figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) + + if x_i == 0: + figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1) + if y_i == len(sorted_params) - 1: + figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1) + + return figure + + +def _get_contour_subplot( + info: _SubContourInfo, + reverse_scale: bool, + target_name: str = "Objective Value", +) -> tuple["Contour", "Scatter", "Scatter"]: + x_indices = info.xaxis.indices + y_indices = info.yaxis.indices + + if len(x_indices) < 2 or len(y_indices) < 2: + return go.Contour(), go.Scatter(), go.Scatter() + if len(info.z_values) == 0: + warnings.warn( + f"Contour plot will not be displayed because `{info.xaxis.name}` and " + f"`{info.yaxis.name}` cannot co-exist in `trial.params`." + ) + return go.Contour(), go.Scatter(), go.Scatter() + + feasible = _PlotValues([], []) + infeasible = _PlotValues([], []) + + for x_value, y_value, c in zip(info.xaxis.values, info.yaxis.values, info.constraints): + if x_value is not None and y_value is not None: + if c: + feasible.x.append(x_value) + feasible.y.append(y_value) + else: + infeasible.x.append(x_value) + infeasible.y.append(y_value) + + z_values = np.full((len(y_indices), len(x_indices)), np.nan) + + xys = np.array(list(info.z_values.keys())) + zs = np.array(list(info.z_values.values())) + + z_values[xys[:, 1], xys[:, 0]] = zs + + contour = go.Contour( + x=x_indices, + y=y_indices, + z=z_values, + colorbar={"title": target_name}, + colorscale=COLOR_SCALE, + connectgaps=True, + contours_coloring="heatmap", + hoverinfo="none", + line_smoothing=1.3, + reversescale=reverse_scale, + ) + + return ( + contour, + _create_scatter(feasible.x, feasible.y, is_feasible=True), + _create_scatter(infeasible.x, infeasible.y, is_feasible=False), + ) + + +def _create_scatter(x: list[Any], y: list[Any], is_feasible: bool) -> Scatter: + edge_color = "Gray" + marker_color = "black" if is_feasible else "#cccccc" + name = "Feasible Trial" if is_feasible else "Infeasible Trial" + return go.Scatter( + x=x, + y=y, + marker={ + "line": {"width": 2.0, "color": edge_color}, + "color": marker_color, + }, + mode="markers", + name=name, + showlegend=False, + ) + + +def _get_contour_info( + study: Study, + params: list[str] | None = None, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> _ContourInfo: + _check_plot_args(study, target, target_name) + + trials = _filter_nonfinite( + study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target + ) + + all_params = {p_name for t in trials for p_name in t.params.keys()} + if len(trials) == 0: + _logger.warning("Your study does not have any completed trials.") + sorted_params = [] + elif params is None: + sorted_params = sorted(all_params) + else: + if len(params) <= 1: + _logger.warning("The length of params must be greater than 1.") + + for input_p_name in params: + if input_p_name not in all_params: + raise ValueError("Parameter {} does not exist in your study.".format(input_p_name)) + sorted_params = sorted(set(params)) + + sub_plot_infos: list[list[_SubContourInfo]] + if len(sorted_params) == 2: + x_param = sorted_params[0] + y_param = sorted_params[1] + sub_plot_info = _get_contour_subplot_info(study, trials, x_param, y_param, target) + sub_plot_infos = [[sub_plot_info]] + else: + sub_plot_infos = [] + for i, y_param in enumerate(sorted_params): + sub_plot_infos.append([]) + for x_param in sorted_params: + sub_plot_info = _get_contour_subplot_info(study, trials, x_param, y_param, target) + sub_plot_infos[i].append(sub_plot_info) + + reverse_scale = _is_reverse_scale(study, target) + + return _ContourInfo( + sorted_params=sorted_params, + sub_plot_infos=sub_plot_infos, + reverse_scale=reverse_scale, + target_name=target_name, + ) + + +def _get_contour_subplot_info( + study: Study, + trials: list[FrozenTrial], + x_param: str, + y_param: str, + target: Callable[[FrozenTrial], float] | None, +) -> _SubContourInfo: + xaxis = _get_axis_info(trials, x_param) + yaxis = _get_axis_info(trials, y_param) + + if x_param == y_param: + return _SubContourInfo(xaxis=xaxis, yaxis=yaxis, z_values={}) + + if len(xaxis.indices) < 2: + _logger.warning("Param {} unique value length is less than 2.".format(x_param)) + return _SubContourInfo(xaxis=xaxis, yaxis=yaxis, z_values={}) + if len(yaxis.indices) < 2: + _logger.warning("Param {} unique value length is less than 2.".format(y_param)) + return _SubContourInfo(xaxis=xaxis, yaxis=yaxis, z_values={}) + + z_values: dict[tuple[int, int], float] = {} + for i, trial in enumerate(trials): + if x_param not in trial.params or y_param not in trial.params: + continue + x_value = xaxis.values[i] + y_value = yaxis.values[i] + assert x_value is not None + assert y_value is not None + x_i = xaxis.indices.index(x_value) + y_i = yaxis.indices.index(y_value) + + if target is None: + value = trial.value + else: + value = target(trial) + assert value is not None + + existing = z_values.get((x_i, y_i)) + if existing is None or target is not None: + # When target function is present, we can't be sure what the z-value + # represents and therefore we don't know how to select the best one. + z_values[(x_i, y_i)] = value + else: + z_values[(x_i, y_i)] = ( + min(existing, value) + if study.direction is StudyDirection.MINIMIZE + else max(existing, value) + ) + + return _SubContourInfo( + xaxis=xaxis, + yaxis=yaxis, + z_values=z_values, + constraints=[_satisfy_constraints(t) for t in trials], + ) + + +def _satisfy_constraints(trial: FrozenTrial) -> bool: + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + return constraints is None or all([x <= 0.0 for x in constraints]) + + +def _get_axis_info(trials: list[FrozenTrial], param_name: str) -> _AxisInfo: + values: list[str | float | None] + if _is_numerical(trials, param_name): + values = [t.params.get(param_name) for t in trials] + else: + values = [ + str(t.params.get(param_name)) if param_name in t.params else None for t in trials + ] + + min_value = min([v for v in values if v is not None]) + max_value = max([v for v in values if v is not None]) + + if _is_log_scale(trials, param_name): + min_value = float(min_value) + max_value = float(max_value) + padding = (math.log10(max_value) - math.log10(min_value)) * PADDING_RATIO + min_value = math.pow(10, math.log10(min_value) - padding) + max_value = math.pow(10, math.log10(max_value) + padding) + is_log = True + is_cat = False + + elif _is_numerical(trials, param_name): + min_value = float(min_value) + max_value = float(max_value) + padding = (max_value - min_value) * PADDING_RATIO + min_value = min_value - padding + max_value = max_value + padding + is_log = False + is_cat = False + + else: + unique_values = set(values) + span = len(unique_values) - 1 + if None in unique_values: + span -= 1 + padding = span * PADDING_RATIO + min_value = -padding + max_value = span + padding + is_log = False + is_cat = True + + indices = sorted(set([v for v in values if v is not None])) + + if len(indices) < 2: + return _AxisInfo( + name=param_name, + range=(min_value, max_value), + is_log=is_log, + is_cat=is_cat, + indices=indices, + values=values, + ) + + if _is_numerical(trials, param_name): + indices.insert(0, min_value) + indices.append(max_value) + + return _AxisInfo( + name=param_name, + range=(min_value, max_value), + is_log=is_log, + is_cat=is_cat, + indices=indices, + values=values, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_edf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_edf.py new file mode 100644 index 0000000000000000000000000000000000000000..4b7da8d4fabf17f796ee539a693eeabbd7b0af10 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_edf.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import cast +from typing import NamedTuple + +import numpy as np + +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _filter_nonfinite + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +NUM_SAMPLES_X_AXIS = 100 + + +class _EDFLineInfo(NamedTuple): + study_name: str + y_values: np.ndarray + + +class _EDFInfo(NamedTuple): + lines: list[_EDFLineInfo] + x_values: np.ndarray + + +def plot_edf( + study: Study | Sequence[Study], + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot the objective value EDF (empirical distribution function) of a study. + + Note that only the complete trials are considered when plotting the EDF. + + .. note:: + + EDF is useful to analyze and improve search spaces. + For instance, you can see a practical use case of EDF in the paper + `Designing Network Design Spaces + `__. + + .. note:: + + The plotted EDF assumes that the value of the objective function is in + accordance with the uniform distribution over the objective space. + + Args: + study: + A target :class:`~optuna.study.Study` object. + You can pass multiple studies if you want to compare those EDFs. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + + layout = go.Layout( + title="Empirical Distribution Function Plot", + xaxis={"title": target_name}, + yaxis={"title": "Cumulative Probability"}, + ) + + info = _get_edf_info(study, target, target_name) + edf_lines = info.lines + + if len(edf_lines) == 0: + return go.Figure(data=[], layout=layout) + + traces = [] + for study_name, y_values in edf_lines: + traces.append(go.Scatter(x=info.x_values, y=y_values, name=study_name, mode="lines")) + + figure = go.Figure(data=traces, layout=layout) + figure.update_yaxes(range=[0, 1]) + + return figure + + +def _get_edf_info( + study: Study | Sequence[Study], + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> _EDFInfo: + if isinstance(study, Study): + studies = [study] + else: + studies = list(study) + + _check_plot_args(studies, target, target_name) + + if len(studies) == 0: + _logger.warning("There are no studies.") + return _EDFInfo(lines=[], x_values=np.array([])) + + if target is None: + + def _target(t: FrozenTrial) -> float: + return cast(float, t.value) + + target = _target + + study_names = [] + all_values: list[np.ndarray] = [] + for study in studies: + trials = _filter_nonfinite( + study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target + ) + + values = np.array([target(trial) for trial in trials]) + all_values.append(values) + study_names.append(study.study_name) + + if all(len(values) == 0 for values in all_values): + _logger.warning("There are no complete trials.") + return _EDFInfo(lines=[], x_values=np.array([])) + + min_x_value = np.min(np.concatenate(all_values)) + max_x_value = np.max(np.concatenate(all_values)) + x_values = np.linspace(min_x_value, max_x_value, NUM_SAMPLES_X_AXIS) + + edf_line_info_list = [] + for study_name, values in zip(study_names, all_values): + y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size + edf_line_info_list.append(_EDFLineInfo(study_name=study_name, y_values=y_values)) + + return _EDFInfo(lines=edf_line_info_list, x_values=x_values) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_hypervolume_history.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_hypervolume_history.py new file mode 100644 index 0000000000000000000000000000000000000000..25b0cb42d0a44c9b6801a6ece77b9048b2bc7a72 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_hypervolume_history.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import NamedTuple + +import numpy as np + +from optuna._experimental import experimental_func +from optuna._hypervolume import compute_hypervolume +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.study._study_direction import StudyDirection +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +class _HypervolumeHistoryInfo(NamedTuple): + trial_numbers: list[int] + values: list[float] + + +@experimental_func("3.3.0") +def plot_hypervolume_history( + study: Study, + reference_point: Sequence[float], +) -> "go.Figure": + """Plot hypervolume history of all trials in a study. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes. + The number of objectives must be 2 or more. + + reference_point: + A reference point to use for hypervolume computation. + The dimension of the reference point must be the same as the number of objectives. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + + if not study._is_multi_objective(): + raise ValueError( + "Study must be multi-objective. For single-objective optimization, " + "please use plot_optimization_history instead." + ) + + if len(reference_point) != len(study.directions): + raise ValueError( + "The dimension of the reference point must be the same as the number of objectives." + ) + + info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64)) + return _get_hypervolume_history_plot(info) + + +def _get_hypervolume_history_plot( + info: _HypervolumeHistoryInfo, +) -> "go.Figure": + layout = go.Layout( + title="Hypervolume History Plot", + xaxis={"title": "Trial"}, + yaxis={"title": "Hypervolume"}, + ) + + data = go.Scatter( + x=info.trial_numbers, + y=info.values, + mode="lines+markers", + ) + return go.Figure(data=data, layout=layout) + + +def _get_hypervolume_history_info( + study: Study, + reference_point: np.ndarray, +) -> _HypervolumeHistoryInfo: + completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + + if len(completed_trials) == 0: + _logger.warning("Your study does not have any completed trials.") + + # Our hypervolume computation module assumes that all objectives are minimized. + # Here we transform the objective values and the reference point. + signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions]) + minimization_reference_point = signs * reference_point + + # Only feasible trials are considered in hypervolume computation. + trial_numbers = [] + hypervolume_values = [] + best_trials_values_normalized: np.ndarray | None = None + hypervolume = 0.0 + for trial in completed_trials: + trial_numbers.append(trial.number) + + has_constraints = _CONSTRAINTS_KEY in trial.system_attrs + if has_constraints: + constraints_values = trial.system_attrs[_CONSTRAINTS_KEY] + if any(map(lambda x: x > 0.0, constraints_values)): + # The trial is infeasible. + hypervolume_values.append(hypervolume) + continue + + values_normalized = (signs * trial.values)[np.newaxis, :] + if best_trials_values_normalized is not None: + if (best_trials_values_normalized <= values_normalized).all(axis=1).any(axis=0): + # The trial is not on the Pareto front. + hypervolume_values.append(hypervolume) + continue + + if best_trials_values_normalized is None: + best_trials_values_normalized = values_normalized + else: + is_kept = (best_trials_values_normalized < values_normalized).any(axis=1) + best_trials_values_normalized = np.concatenate( + [best_trials_values_normalized[is_kept, :], values_normalized], axis=0 + ) + + loss_vals = best_trials_values_normalized[ + (best_trials_values_normalized <= minimization_reference_point[np.newaxis, :]).all( + axis=1 + ) + ] + if loss_vals.size > 0: + hypervolume = compute_hypervolume(loss_vals, minimization_reference_point) + hypervolume_values.append(hypervolume) + + if best_trials_values_normalized is None: + _logger.warning("Your study does not have any feasible trials.") + + return _HypervolumeHistoryInfo(trial_numbers, hypervolume_values) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_intermediate_values.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_intermediate_values.py new file mode 100644 index 0000000000000000000000000000000000000000..d79cae6e6ec2ce58bf81486998e70b7941907179 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_intermediate_values.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from typing import NamedTuple + +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +class _TrialInfo(NamedTuple): + trial_number: int + sorted_intermediate_values: list[tuple[int, float]] + feasible: bool + + +class _IntermediatePlotInfo(NamedTuple): + trial_infos: list[_TrialInfo] + + +def _get_intermediate_plot_info(study: Study) -> _IntermediatePlotInfo: + trials = study.get_trials( + deepcopy=False, states=(TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING) + ) + + def _satisfies_constraints(trial: FrozenTrial) -> bool: + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + return constraints is None or all([x <= 0.0 for x in constraints]) + + trial_infos = [ + _TrialInfo( + trial.number, sorted(trial.intermediate_values.items()), _satisfies_constraints(trial) + ) + for trial in trials + if len(trial.intermediate_values) > 0 + ] + + if len(trials) == 0: + _logger.warning("Study instance does not contain trials.") + elif len(trial_infos) == 0: + _logger.warning( + "You need to set up the pruning feature to utilize `plot_intermediate_values()`" + ) + + return _IntermediatePlotInfo(trial_infos) + + +def plot_intermediate_values(study: Study) -> "go.Figure": + """Plot intermediate values of all trials in a study. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate + values. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + return _get_intermediate_plot(_get_intermediate_plot_info(study)) + + +def _get_intermediate_plot(info: _IntermediatePlotInfo) -> "go.Figure": + layout = go.Layout( + title="Intermediate Values Plot", + xaxis={"title": "Step"}, + yaxis={"title": "Intermediate Value"}, + showlegend=False, + ) + + trial_infos = info.trial_infos + + if len(trial_infos) == 0: + return go.Figure(data=[], layout=layout) + + default_marker = {"maxdisplayed": 10} + + traces = [ + go.Scatter( + x=tuple((x for x, _ in tinfo.sorted_intermediate_values)), + y=tuple((y for _, y in tinfo.sorted_intermediate_values)), + mode="lines+markers", + marker=( + default_marker + if tinfo.feasible + else {**default_marker, "color": "#CCCCCC"} # type: ignore[dict-item] + ), + name="Trial{}".format(tinfo.trial_number), + ) + for tinfo in trial_infos + ] + + return go.Figure(data=traces, layout=layout) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_optimization_history.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_optimization_history.py new file mode 100644 index 0000000000000000000000000000000000000000..10557668a7358397e9961a6aacf2ac8b98d43eae --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_optimization_history.py @@ -0,0 +1,299 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from enum import Enum +import math +from typing import cast +from typing import NamedTuple + +import numpy as np + +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +class _ValueState(Enum): + Feasible = 0 + Infeasible = 1 + Incomplete = 2 + + +class _ValuesInfo(NamedTuple): + values: list[float] + stds: list[float] | None + label_name: str + states: list[_ValueState] + + +class _OptimizationHistoryInfo(NamedTuple): + trial_numbers: list[int] + values_info: _ValuesInfo + best_values_info: _ValuesInfo | None + + +def _get_optimization_history_info_list( + study: Study | Sequence[Study], + target: Callable[[FrozenTrial], float] | None, + target_name: str, + error_bar: bool, +) -> list[_OptimizationHistoryInfo]: + _check_plot_args(study, target, target_name) + if isinstance(study, Study): + studies = [study] + else: + studies = list(study) + + info_list: list[_OptimizationHistoryInfo] = [] + for study in studies: + trials = study.get_trials() + label_name = target_name if len(studies) == 1 else f"{target_name} of {study.study_name}" + values = [] + value_states = [] + for trial in trials: + if trial.state != TrialState.COMPLETE: + values.append(float("nan")) + value_states.append(_ValueState.Incomplete) + continue + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraints is None or all([x <= 0.0 for x in constraints]): + value_states.append(_ValueState.Feasible) + else: + value_states.append(_ValueState.Infeasible) + if target is not None: + values.append(target(trial)) + else: + values.append(cast(float, trial.value)) + if target is not None: + # We don't calculate best for user-defined target function since we cannot tell + # which direction is better. + best_values_info: _ValuesInfo | None = None + else: + feasible_best_values = [] + if study.direction == StudyDirection.MINIMIZE: + feasible_best_values = [ + v if s == _ValueState.Feasible else float("inf") + for v, s in zip(values, value_states) + ] + best_values = list(np.minimum.accumulate(feasible_best_values)) + else: + feasible_best_values = [ + v if s == _ValueState.Feasible else -float("inf") + for v, s in zip(values, value_states) + ] + best_values = list(np.maximum.accumulate(feasible_best_values)) + best_label_name = ( + "Best Value" if len(studies) == 1 else f"Best Value of {study.study_name}" + ) + best_values_info = _ValuesInfo(best_values, None, best_label_name, value_states) + info_list.append( + _OptimizationHistoryInfo( + trial_numbers=[t.number for t in trials], + values_info=_ValuesInfo(values, None, label_name, value_states), + best_values_info=best_values_info, + ) + ) + + if len(info_list) == 0: + _logger.warning("There are no studies.") + + feasible_trial_count = sum( + info.values_info.states.count(_ValueState.Feasible) for info in info_list + ) + infeasible_trial_count = sum( + info.values_info.states.count(_ValueState.Infeasible) for info in info_list + ) + if feasible_trial_count + infeasible_trial_count == 0: + _logger.warning("There are no complete trials.") + info_list.clear() + + if not error_bar: + return info_list + + # When error_bar=True, a list of 0 or 1 element is returned. + if len(info_list) == 0: + return [] + if feasible_trial_count == 0: + _logger.warning("There are no feasible trials.") + return [] + + all_trial_numbers = [number for info in info_list for number in info.trial_numbers] + max_num_trial = max(all_trial_numbers) + 1 + + def _aggregate(label_name: str, use_best_value: bool) -> tuple[list[int], _ValuesInfo]: + # Calculate mean and std of values for each trial number. + values: list[list[float]] = [[] for _ in range(max_num_trial)] + states: list[list[_ValueState]] = [[] for _ in range(max_num_trial)] + assert info_list is not None + for trial_numbers, values_info, best_values_info in info_list: + if use_best_value: + assert best_values_info is not None + values_info = best_values_info + for n, v, s in zip(trial_numbers, values_info.values, values_info.states): + if not math.isinf(v): + if not use_best_value and s == _ValueState.Feasible: + values[n].append(v) + elif use_best_value: + values[n].append(v) + states[n].append(s) + trial_numbers_union: list[int] = [] + value_states: list[_ValueState] = [] + value_means: list[float] = [] + value_stds: list[float] = [] + for i in range(max_num_trial): + if len(states[i]) > 0 and _ValueState.Feasible in states[i]: + value_states.append(_ValueState.Feasible) + trial_numbers_union.append(i) + value_means.append(np.mean(values[i]).item()) + value_stds.append(np.std(values[i]).item()) + else: + value_states.append(_ValueState.Infeasible) + return trial_numbers_union, _ValuesInfo(value_means, value_stds, label_name, value_states) + + eb_trial_numbers, eb_values_info = _aggregate(target_name, False) + eb_best_values_info: _ValuesInfo | None = None + if target is None: + _, eb_best_values_info = _aggregate("Best Value", True) + return [_OptimizationHistoryInfo(eb_trial_numbers, eb_values_info, eb_best_values_info)] + + +def plot_optimization_history( + study: Study | Sequence[Study], + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", + error_bar: bool = False, +) -> "go.Figure": + """Plot optimization history of all trials in a study. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + You can pass multiple studies if you want to compare those optimization histories. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label and the legend. + error_bar: + A flag to show the error bar. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + + info_list = _get_optimization_history_info_list(study, target, target_name, error_bar) + return _get_optimization_history_plot(info_list, target_name) + + +def _get_optimization_history_plot( + info_list: list[_OptimizationHistoryInfo], + target_name: str, +) -> "go.Figure": + layout = go.Layout( + title="Optimization History Plot", + xaxis={"title": "Trial"}, + yaxis={"title": target_name}, + ) + + traces = [] + for trial_numbers, values_info, best_values_info in info_list: + infeasible_trial_numbers = [ + n for n, s in zip(trial_numbers, values_info.states) if s == _ValueState.Infeasible + ] + if values_info.stds is None: + error_y = None + feasible_trial_numbers = [ + num + for num, s in zip(trial_numbers, values_info.states) + if s == _ValueState.Feasible + ] + feasible_trial_values = [] + for num in feasible_trial_numbers: + feasible_trial_values.append(values_info.values[num]) + infeasible_trial_values = [] + for num in infeasible_trial_numbers: + infeasible_trial_values.append(values_info.values[num]) + else: + if ( + _ValueState.Infeasible in values_info.states + or _ValueState.Incomplete in values_info.states + ): + _logger.warning( + "Your study contains infeasible trials. " + "In optimization history plot, " + "error bars are calculated for only feasible trial values." + ) + error_y = {"type": "data", "array": values_info.stds, "visible": True} + feasible_trial_numbers = trial_numbers + feasible_trial_values = values_info.values + infeasible_trial_values = [] + traces.append( + go.Scatter( + x=feasible_trial_numbers, + y=feasible_trial_values, + error_y=error_y, + mode="markers", + name=values_info.label_name, + ) + ) + if best_values_info is not None: + traces.append( + go.Scatter( + x=trial_numbers, + y=best_values_info.values, + name=best_values_info.label_name, + mode="lines", + ) + ) + if best_values_info.stds is not None: + upper = np.array(best_values_info.values) + np.array(best_values_info.stds) + traces.append( + go.Scatter( + x=trial_numbers, + y=upper, + mode="lines", + line=dict(width=0.01), + showlegend=False, + ) + ) + lower = np.array(best_values_info.values) - np.array(best_values_info.stds) + traces.append( + go.Scatter( + x=trial_numbers, + y=lower, + mode="none", + showlegend=False, + fill="tonexty", + fillcolor="rgba(255,0,0,0.2)", + ) + ) + traces.append( + go.Scatter( + x=infeasible_trial_numbers, + y=infeasible_trial_values, + error_y=error_y, + mode="markers", + name="Infeasible Trial", + marker={"color": "#cccccc"}, + showlegend=False, + ) + ) + return go.Figure(data=traces, layout=layout) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_parallel_coordinate.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_parallel_coordinate.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0b97a45080c9a514d68ad62955231a9263cd84 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_parallel_coordinate.py @@ -0,0 +1,303 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +import math +from typing import Any +from typing import cast +from typing import NamedTuple + +import numpy as np + +from optuna.distributions import CategoricalDistribution +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _filter_nonfinite +from optuna.visualization._utils import _get_skipped_trial_numbers +from optuna.visualization._utils import _is_log_scale +from optuna.visualization._utils import _is_numerical +from optuna.visualization._utils import _is_reverse_scale + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + from optuna.visualization._utils import COLOR_SCALE + +_logger = get_logger(__name__) + + +class _DimensionInfo(NamedTuple): + label: str + values: tuple[float, ...] + range: tuple[float, float] + is_log: bool + is_cat: bool + tickvals: list[int | float] + ticktext: list[str] + + +class _ParallelCoordinateInfo(NamedTuple): + dim_objective: _DimensionInfo + dims_params: list[_DimensionInfo] + reverse_scale: bool + target_name: str + + +def plot_parallel_coordinate( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot the high-dimensional parameter relationships in a study. + + Note that, if a parameter contains missing values, a trial with missing values is not plotted. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label and the legend. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + + .. note:: + The colormap is reversed when the ``target`` argument isn't :obj:`None` or ``direction`` + of :class:`~optuna.study.Study` is ``minimize``. + """ + + _imports.check() + info = _get_parallel_coordinate_info(study, params, target, target_name) + return _get_parallel_coordinate_plot(info) + + +def _get_parallel_coordinate_plot(info: _ParallelCoordinateInfo) -> "go.Figure": + layout = go.Layout(title="Parallel Coordinate Plot") + + if len(info.dims_params) == 0 or len(info.dim_objective.values) == 0: + return go.Figure(data=[], layout=layout) + + dims = _get_dims_from_info(info) + reverse_scale = info.reverse_scale + target_name = info.target_name + + traces = [ + go.Parcoords( + dimensions=dims, + labelangle=30, + labelside="bottom", + line={ + "color": dims[0]["values"], + "colorscale": COLOR_SCALE, + "colorbar": {"title": target_name}, + "showscale": True, + "reversescale": reverse_scale, + }, + ) + ] + + figure = go.Figure(data=traces, layout=layout) + + return figure + + +def _get_parallel_coordinate_info( + study: Study, + params: list[str] | None = None, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> _ParallelCoordinateInfo: + _check_plot_args(study, target, target_name) + + reverse_scale = _is_reverse_scale(study, target) + + trials = _filter_nonfinite( + study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target + ) + + all_params = {p_name for t in trials for p_name in t.params.keys()} + if params is not None: + for input_p_name in params: + if input_p_name not in all_params: + raise ValueError("Parameter {} does not exist in your study.".format(input_p_name)) + all_params = set(params) + sorted_params = sorted(all_params) + + if target is None: + + def _target(t: FrozenTrial) -> float: + return cast(float, t.value) + + target = _target + + skipped_trial_numbers = _get_skipped_trial_numbers(trials, sorted_params) + + objectives = tuple([target(t) for t in trials if t.number not in skipped_trial_numbers]) + # The value of (0, 0) is a dummy range. It is ignored when we plot. + objective_range = (min(objectives), max(objectives)) if len(objectives) > 0 else (0, 0) + dim_objective = _DimensionInfo( + label=target_name, + values=objectives, + range=objective_range, + is_log=False, + is_cat=False, + tickvals=[], + ticktext=[], + ) + + if len(trials) == 0: + _logger.warning("Your study does not have any completed trials.") + return _ParallelCoordinateInfo( + dim_objective=dim_objective, + dims_params=[], + reverse_scale=reverse_scale, + target_name=target_name, + ) + + if len(objectives) == 0: + _logger.warning("Your study has only completed trials with missing parameters.") + return _ParallelCoordinateInfo( + dim_objective=dim_objective, + dims_params=[], + reverse_scale=reverse_scale, + target_name=target_name, + ) + + numeric_cat_params_indices: list[int] = [] + dims = [] + for dim_index, p_name in enumerate(sorted_params, start=1): + values = [] + is_categorical = False + for t in trials: + if t.number in skipped_trial_numbers: + continue + if p_name in t.params: + values.append(t.params[p_name]) + is_categorical |= isinstance(t.distributions[p_name], CategoricalDistribution) + if _is_log_scale(trials, p_name): + values = [math.log10(v) for v in values] + min_value = min(values) + max_value = max(values) + tickvals: list[int | float] = list( + range(math.ceil(min_value), math.floor(max_value) + 1) + ) + if min_value not in tickvals: + tickvals = [min_value] + tickvals + if max_value not in tickvals: + tickvals = tickvals + [max_value] + dim = _DimensionInfo( + label=_truncate_label(p_name), + values=tuple(values), + range=(min_value, max_value), + is_log=True, + is_cat=False, + tickvals=tickvals, + ticktext=["{:.3g}".format(math.pow(10, x)) for x in tickvals], + ) + elif is_categorical: + vocab: defaultdict[int | str, int] = defaultdict(lambda: len(vocab)) + + ticktext: list[str] + if _is_numerical(trials, p_name): + _ = [vocab[v] for v in sorted(values)] + values = [vocab[v] for v in values] + ticktext = [str(v) for v in list(sorted(vocab.keys()))] + numeric_cat_params_indices.append(dim_index) + else: + values = [vocab[v] for v in values] + ticktext = [str(v) for v in list(sorted(vocab.keys(), key=lambda x: vocab[x]))] + dim = _DimensionInfo( + label=_truncate_label(p_name), + values=tuple(values), + range=(min(values), max(values)), + is_log=False, + is_cat=True, + tickvals=list(range(len(vocab))), + ticktext=ticktext, + ) + else: + dim = _DimensionInfo( + label=_truncate_label(p_name), + values=tuple(values), + range=(min(values), max(values)), + is_log=False, + is_cat=False, + tickvals=[], + ticktext=[], + ) + + dims.append(dim) + + if numeric_cat_params_indices: + dims.insert(0, dim_objective) + # np.lexsort consumes the sort keys the order from back to front. + # So the values of parameters have to be reversed the order. + idx = np.lexsort([dims[index].values for index in numeric_cat_params_indices][::-1]) + updated_dims = [] + for dim in dims: + # Since the values are mapped to other categories by the index, + # the index will be swapped according to the sorted index of numeric params. + updated_dims.append( + _DimensionInfo( + label=dim.label, + values=tuple(np.array(dim.values)[idx]), + range=dim.range, + is_log=dim.is_log, + is_cat=dim.is_cat, + tickvals=dim.tickvals, + ticktext=dim.ticktext, + ) + ) + dim_objective = updated_dims[0] + dims = updated_dims[1:] + + return _ParallelCoordinateInfo( + dim_objective=dim_objective, + dims_params=dims, + reverse_scale=reverse_scale, + target_name=target_name, + ) + + +def _get_dims_from_info(info: _ParallelCoordinateInfo) -> list[dict[str, Any]]: + dims = [ + { + "label": info.dim_objective.label, + "values": info.dim_objective.values, + "range": info.dim_objective.range, + } + ] + + for dim in info.dims_params: + if dim.is_log or dim.is_cat: + dims.append( + { + "label": dim.label, + "values": dim.values, + "range": dim.range, + "tickvals": dim.tickvals, + "ticktext": dim.ticktext, + } + ) + else: + dims.append({"label": dim.label, "values": dim.values, "range": dim.range}) + + return dims + + +def _truncate_label(label: str) -> str: + return label if len(label) < 20 else "{}...".format(label[:17]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_param_importances.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_param_importances.py new file mode 100644 index 0000000000000000000000000000000000000000..3351d87ac86a93ecf7183101201adb990cdad8ed --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_param_importances.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import NamedTuple + +import optuna +from optuna.distributions import BaseDistribution +from optuna.importance._base import BaseImportanceEvaluator +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _filter_nonfinite + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + + +logger = get_logger(__name__) + + +class _ImportancesInfo(NamedTuple): + importance_values: list[float] + param_names: list[str] + importance_labels: list[str] + target_name: str + + +def _get_importances_info( + study: Study, + evaluator: BaseImportanceEvaluator | None, + params: list[str] | None, + target: Callable[[FrozenTrial], float] | None, + target_name: str, +) -> _ImportancesInfo: + _check_plot_args(study, target, target_name) + + trials = _filter_nonfinite( + study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target + ) + + if len(trials) == 0: + logger.warning("Study instance does not contain completed trials.") + return _ImportancesInfo( + importance_values=[], + param_names=[], + importance_labels=[], + target_name=target_name, + ) + + importances = optuna.importance.get_param_importances( + study, evaluator=evaluator, params=params, target=target + ) + + importances = dict(reversed(list(importances.items()))) + importance_values = list(importances.values()) + param_names = list(importances.keys()) + importance_labels = [f"{val:.2f}" if val >= 0.01 else "<0.01" for val in importance_values] + + return _ImportancesInfo( + importance_values=importance_values, + param_names=param_names, + importance_labels=importance_labels, + target_name=target_name, + ) + + +def _get_importances_infos( + study: Study, + evaluator: BaseImportanceEvaluator | None, + params: list[str] | None, + target: Callable[[FrozenTrial], float] | None, + target_name: str, +) -> tuple[_ImportancesInfo, ...]: + metric_names = study.metric_names + if target or not study._is_multi_objective(): + target_name = metric_names[0] if metric_names is not None and not target else target_name + importances_infos: tuple[_ImportancesInfo, ...] = ( + _get_importances_info( + study, + evaluator, + params, + target=target, + target_name=target_name, + ), + ) + + else: + n_objectives = len(study.directions) + target_names = ( + metric_names + if metric_names is not None + else (f"{target_name} {objective_id}" for objective_id in range(n_objectives)) + ) + + importances_infos = tuple( + _get_importances_info( + study, + evaluator, + params, + target=lambda t: t.values[objective_id], + target_name=target_name, + ) + for objective_id, target_name in enumerate(target_names) + ) + + return importances_infos + + +def plot_param_importances( + study: Study, + evaluator: BaseImportanceEvaluator | None = None, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot hyperparameter importances. + + .. seealso:: + + This function visualizes the results of :func:`optuna.importance.get_param_importances`. + + Args: + study: + An optimized study. + evaluator: + An importance evaluator object that specifies which algorithm to base the importance + assessment on. + Defaults to + :class:`~optuna.importance.FanovaImportanceEvaluator`. + + .. note:: + :class:`~optuna.importance.FanovaImportanceEvaluator` takes over 1 minute + when given a study that contains 1000+ trials. We published + `optuna-fast-fanova `__ library, + that is a Cython accelerated fANOVA implementation. + By using it, you can get hyperparameter importances within a few seconds. + + params: + A list of names of parameters to assess. + If :obj:`None`, all parameters that are present in all of the completed trials are + assessed. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + For multi-objective optimization, all objectives will be plotted if ``target`` + is :obj:`None`. + + .. note:: + This argument can be used to specify which objective to plot if ``study`` is being + used for multi-objective optimization. For example, to get only the hyperparameter + importance of the first objective, use ``target=lambda t: t.values[0]`` for the + target parameter. + target_name: + Target's name to display on the legend. Names set via + :meth:`~optuna.study.Study.set_metric_names` will be used if ``target`` is :obj:`None`, + overriding this argument. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + importances_infos = _get_importances_infos(study, evaluator, params, target, target_name) + return _get_importances_plot(importances_infos, study) + + +def _get_importances_plot(infos: tuple[_ImportancesInfo, ...], study: Study) -> "go.Figure": + layout = go.Layout( + title="Hyperparameter Importances", + xaxis={"title": "Hyperparameter Importance"}, + yaxis={"title": "Hyperparameter"}, + ) + + data: list[go.Bar] = [] + for info in infos: + if not info.importance_values: + continue + + data.append( + go.Bar( + x=info.importance_values, + y=info.param_names, + name=info.target_name, + text=info.importance_labels, + textposition="outside", + cliponaxis=False, # Ensure text is not clipped. + hovertemplate=_get_hover_template(info, study), + orientation="h", + ) + ) + + return go.Figure(data, layout) + + +def _get_distribution(param_name: str, study: Study) -> BaseDistribution: + for trial in study.trials: + if param_name in trial.distributions: + return trial.distributions[param_name] + assert False + + +def _make_hovertext(param_name: str, importance: float, study: Study) -> str: + return "{} ({}): {}".format( + param_name, _get_distribution(param_name, study).__class__.__name__, importance + ) + + +def _get_hover_template(importances_info: _ImportancesInfo, study: Study) -> list[str]: + return [ + _make_hovertext(param_name, importance, study) + for param_name, importance in zip( + importances_info.param_names, importances_info.importance_values + ) + ] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_pareto_front.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_pareto_front.py new file mode 100644 index 0000000000000000000000000000000000000000..da427f7919b21af1598fdcfe9bea2dc1ed7dc9d6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_pareto_front.py @@ -0,0 +1,419 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +from typing import Any +from typing import NamedTuple +import warnings + +import optuna +from optuna import _deprecated +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.study._multi_objective import _get_pareto_front_trials_by_trials +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _make_hovertext + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = optuna.logging.get_logger(__name__) + + +class _ParetoFrontInfo(NamedTuple): + n_targets: int + target_names: list[str] + best_trials_with_values: list[tuple[FrozenTrial, list[float]]] + non_best_trials_with_values: list[tuple[FrozenTrial, list[float]]] + infeasible_trials_with_values: list[tuple[FrozenTrial, list[float]]] + axis_order: list[int] + include_dominated_trials: bool + has_constraints: bool + + +def plot_pareto_front( + study: Study, + *, + target_names: list[str] | None = None, + include_dominated_trials: bool = True, + axis_order: list[int] | None = None, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + targets: Callable[[FrozenTrial], Sequence[float]] | None = None, +) -> "go.Figure": + """Plot the Pareto front of a study. + + .. seealso:: + Please refer to :ref:`multi_objective` for the tutorial of the Pareto front visualization. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their objective + values. The number of objectives must be either 2 or 3 when ``targets`` is :obj:`None`. + target_names: + Objective name list used as the axis titles. If :obj:`None` is specified, + "Objective {objective_index}" is used instead. If ``targets`` is specified + for a study that does not contain any completed trial, + ``target_name`` must be specified. + include_dominated_trials: + A flag to include all dominated trial's objective values. + axis_order: + A list of indices indicating the axis order. If :obj:`None` is specified, + default order is used. ``axis_order`` and ``targets`` cannot be used at the same time. + + .. warning:: + Deprecated in v3.0.0. This feature will be removed in the future. The removal of + this feature is currently scheduled for v5.0.0, but this schedule is subject to + change. See https://github.com/optuna/optuna/releases/tag/v3.0.0. + constraints_func: + An optional function that computes the objective constraints. It must take a + :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must + be a sequence of :obj:`float` s. A value strictly larger than 0 means that a + constraint is violated. A value equal to or smaller than 0 is considered feasible. + This specification is the same as in, for example, + :class:`~optuna.samplers.NSGAIISampler`. + + If given, trials are classified into three categories: feasible and best, feasible but + non-best, and infeasible. Categories are shown in different colors. Here, whether a + trial is best (on Pareto front) or not is determined ignoring all infeasible trials. + + .. warning:: + Deprecated in v4.0.0. This feature will be removed in the future. The removal of + this feature is currently scheduled for v6.0.0, but this schedule is subject to + change. See https://github.com/optuna/optuna/releases/tag/v4.0.0. + targets: + A function that returns targets values to display. + The argument to this function is :class:`~optuna.trial.FrozenTrial`. + ``axis_order`` and ``targets`` cannot be used at the same time. + If ``study.n_objectives`` is neither 2 nor 3, ``targets`` must be specified. + + .. note:: + Added in v3.0.0 as an experimental feature. The interface may change in newer + versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.0.0. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + + info = _get_pareto_front_info( + study, target_names, include_dominated_trials, axis_order, constraints_func, targets + ) + return _get_pareto_front_plot(info) + + +def _get_pareto_front_plot(info: _ParetoFrontInfo) -> "go.Figure": + include_dominated_trials = info.include_dominated_trials + has_constraints = info.has_constraints + if not has_constraints: + data = [ + _make_scatter_object( + info.n_targets, + info.axis_order, + include_dominated_trials, + info.non_best_trials_with_values, + hovertemplate="%{text}Trial", + dominated_trials=True, + ), + _make_scatter_object( + info.n_targets, + info.axis_order, + include_dominated_trials, + info.best_trials_with_values, + hovertemplate="%{text}Best Trial", + dominated_trials=False, + ), + ] + else: + data = [ + _make_scatter_object( + info.n_targets, + info.axis_order, + include_dominated_trials, + info.infeasible_trials_with_values, + hovertemplate="%{text}Infeasible Trial", + infeasible=True, + ), + _make_scatter_object( + info.n_targets, + info.axis_order, + include_dominated_trials, + info.non_best_trials_with_values, + hovertemplate="%{text}Feasible Trial", + dominated_trials=True, + ), + _make_scatter_object( + info.n_targets, + info.axis_order, + include_dominated_trials, + info.best_trials_with_values, + hovertemplate="%{text}Best Trial", + dominated_trials=False, + ), + ] + + if info.n_targets == 2: + layout = go.Layout( + title="Pareto-front Plot", + xaxis_title=info.target_names[info.axis_order[0]], + yaxis_title=info.target_names[info.axis_order[1]], + ) + else: + layout = go.Layout( + title="Pareto-front Plot", + scene={ + "xaxis_title": info.target_names[info.axis_order[0]], + "yaxis_title": info.target_names[info.axis_order[1]], + "zaxis_title": info.target_names[info.axis_order[2]], + }, + ) + return go.Figure(data=data, layout=layout) + + +def _get_pareto_front_info( + study: Study, + target_names: list[str] | None = None, + include_dominated_trials: bool = True, + axis_order: list[int] | None = None, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + targets: Callable[[FrozenTrial], Sequence[float]] | None = None, +) -> _ParetoFrontInfo: + if axis_order is not None: + msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format( + name="`axis_order`", d_ver="3.0.0", r_ver="5.0.0" + ) + warnings.warn(msg, FutureWarning) + + if constraints_func is not None: + msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format( + name="`constraints_func`", d_ver="4.0.0", r_ver="6.0.0" + ) + warnings.warn(msg, FutureWarning) + + if targets is not None and axis_order is not None: + raise ValueError( + "Using both `targets` and `axis_order` is not supported. " + "Use either `targets` or `axis_order`." + ) + + feasible_trials = [] + infeasible_trials = [] + has_constraints = False + for trial in study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)): + if constraints_func is not None: + # NOTE(nabenabe0928): This part is deprecated. + has_constraints = True + if all(map(lambda x: x <= 0.0, constraints_func(trial))): + feasible_trials.append(trial) + else: + infeasible_trials.append(trial) + continue + + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + has_constraints |= constraints is not None + if constraints is None or all(x <= 0.0 for x in constraints): + feasible_trials.append(trial) + else: + infeasible_trials.append(trial) + + best_trials = _get_pareto_front_trials_by_trials(feasible_trials, study.directions) + if include_dominated_trials: + non_best_trials = _get_non_pareto_front_trials(feasible_trials, best_trials) + else: + non_best_trials = [] + + if len(best_trials) == 0: + what_trial = "completed" if has_constraints else "completed and feasible" + _logger.warning(f"Your study does not have any {what_trial} trials. ") + + _targets = targets + if _targets is None: + if len(study.directions) in (2, 3): + _targets = _targets_default + else: + raise ValueError( + "`plot_pareto_front` function only supports 2 or 3 objective" + " studies when using `targets` is `None`. Please use `targets`" + " if your objective studies have more than 3 objectives." + ) + + def _make_trials_with_values( + trials: list[FrozenTrial], + targets: Callable[[FrozenTrial], Sequence[float]], + ) -> list[tuple[FrozenTrial, list[float]]]: + target_values = [targets(trial) for trial in trials] + for v in target_values: + if not isinstance(v, Sequence): + raise ValueError( + "`targets` should return a sequence of target values." + " your `targets` returns {}".format(type(v)) + ) + return [(trial, list(v)) for trial, v in zip(trials, target_values)] + + best_trials_with_values = _make_trials_with_values(best_trials, _targets) + non_best_trials_with_values = _make_trials_with_values(non_best_trials, _targets) + infeasible_trials_with_values = _make_trials_with_values(infeasible_trials, _targets) + + def _infer_n_targets( + trials_with_values: Sequence[tuple[FrozenTrial, Sequence[float]]], + ) -> int | None: + if len(trials_with_values) > 0: + return len(trials_with_values[0][1]) + return None + + # Check for `non_best_trials_with_values` can be skipped, because if `best_trials_with_values` + # is empty, then `non_best_trials_with_values` will also be empty. + n_targets = _infer_n_targets(best_trials_with_values) or _infer_n_targets( + infeasible_trials_with_values + ) + if n_targets is None: + if target_names is not None: + n_targets = len(target_names) + elif targets is None: + n_targets = len(study.directions) + else: + raise ValueError( + "If `targets` is specified for empty studies, `target_names` must be specified." + ) + + if n_targets not in (2, 3): + raise ValueError( + "`plot_pareto_front` function only supports 2 or 3 targets." + " you used {} targets now.".format(n_targets) + ) + + if target_names is None: + metric_names = study.metric_names + if metric_names is None: + target_names = [f"Objective {i}" for i in range(n_targets)] + else: + target_names = metric_names + elif len(target_names) != n_targets: + raise ValueError(f"The length of `target_names` is supposed to be {n_targets}.") + + if axis_order is None: + axis_order = list(range(n_targets)) + else: + if len(axis_order) != n_targets: + raise ValueError( + f"Size of `axis_order` {axis_order}. Expect: {n_targets}, " + f"Actual: {len(axis_order)}." + ) + if len(set(axis_order)) != n_targets: + raise ValueError(f"Elements of given `axis_order` {axis_order} are not unique!.") + if max(axis_order) > n_targets - 1: + raise ValueError( + f"Given `axis_order` {axis_order} contains invalid index {max(axis_order)} " + f"higher than {n_targets - 1}." + ) + if min(axis_order) < 0: + raise ValueError( + f"Given `axis_order` {axis_order} contains invalid index {min(axis_order)} " + "lower than 0." + ) + + return _ParetoFrontInfo( + n_targets=n_targets, + target_names=target_names, + best_trials_with_values=best_trials_with_values, + non_best_trials_with_values=non_best_trials_with_values, + infeasible_trials_with_values=infeasible_trials_with_values, + axis_order=axis_order, + include_dominated_trials=include_dominated_trials, + has_constraints=has_constraints, + ) + + +def _targets_default(trial: FrozenTrial) -> Sequence[float]: + return trial.values + + +def _get_non_pareto_front_trials( + trials: list[FrozenTrial], pareto_trials: list[FrozenTrial] +) -> list[FrozenTrial]: + non_pareto_trials = [] + for trial in trials: + if trial not in pareto_trials: + non_pareto_trials.append(trial) + return non_pareto_trials + + +def _make_scatter_object( + n_targets: int, + axis_order: Sequence[int], + include_dominated_trials: bool, + trials_with_values: Sequence[tuple[FrozenTrial, Sequence[float]]], + hovertemplate: str, + infeasible: bool = False, + dominated_trials: bool = False, +) -> "go.Scatter" | "go.Scatter3d": + trials_with_values = trials_with_values or [] + + marker = _make_marker( + [trial for trial, _ in trials_with_values], + include_dominated_trials, + dominated_trials=dominated_trials, + infeasible=infeasible, + ) + if n_targets == 2: + return go.Scatter( + x=[values[axis_order[0]] for _, values in trials_with_values], + y=[values[axis_order[1]] for _, values in trials_with_values], + text=[_make_hovertext(trial) for trial, _ in trials_with_values], + mode="markers", + hovertemplate=hovertemplate, + marker=marker, + showlegend=False, + ) + elif n_targets == 3: + return go.Scatter3d( + x=[values[axis_order[0]] for _, values in trials_with_values], + y=[values[axis_order[1]] for _, values in trials_with_values], + z=[values[axis_order[2]] for _, values in trials_with_values], + text=[_make_hovertext(trial) for trial, _ in trials_with_values], + mode="markers", + hovertemplate=hovertemplate, + marker=marker, + showlegend=False, + ) + else: + assert False, "Must not reach here" + + +def _make_marker( + trials: Sequence[FrozenTrial], + include_dominated_trials: bool, + dominated_trials: bool = False, + infeasible: bool = False, +) -> dict[str, Any]: + if dominated_trials and not include_dominated_trials: + assert len(trials) == 0 + + if infeasible: + return { + "color": "#cccccc", + } + elif dominated_trials: + return { + "line": {"width": 0.5, "color": "Grey"}, + "color": [t.number for t in trials], + "colorscale": "Blues", + "colorbar": { + "title": "Trial", + }, + } + else: + return { + "line": {"width": 0.5, "color": "Grey"}, + "color": [t.number for t in trials], + "colorscale": "Reds", + "colorbar": { + "title": "Best Trial", + "x": 1.1 if include_dominated_trials else 1, + "xpad": 40, + }, + } diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_plotly_imports.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_plotly_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..32256b29642b7c14107b119487ed3d65028e4fb5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_plotly_imports.py @@ -0,0 +1,23 @@ +from packaging import version + +from optuna._imports import try_import + + +with try_import() as _imports: + import plotly + from plotly import __version__ as plotly_version + import plotly.graph_objects as go + from plotly.graph_objects import Contour + from plotly.graph_objects import Scatter + from plotly.subplots import make_subplots + + if version.parse(plotly_version) < version.parse("4.0.0"): + raise ImportError( + "Your version of Plotly is " + plotly_version + " . " + "Please install plotly version 4.0.0 or higher. " + "Plotly can be installed by executing `$ pip install -U plotly>=4.0.0`. " + "For further information, please refer to the installation guide of plotly. ", + name="plotly", + ) + +__all__ = ["_imports", "plotly", "go", "Contour", "Scatter", "make_subplots"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_rank.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_rank.py new file mode 100644 index 0000000000000000000000000000000000000000..15f74ea6ae616a1322a86e7a173a263a1486452d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_rank.py @@ -0,0 +1,406 @@ +from __future__ import annotations + +from collections.abc import Callable +import math +import typing +from typing import Any +from typing import NamedTuple + +import numpy as np + +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _is_log_scale +from optuna.visualization._utils import _is_numerical +from optuna.visualization.matplotlib._matplotlib_imports import _imports as matplotlib_imports + + +plotly_is_available = _imports.is_successful() +if plotly_is_available: + from optuna.visualization._plotly_imports import go + from optuna.visualization._plotly_imports import make_subplots + from optuna.visualization._plotly_imports import plotly + from optuna.visualization._plotly_imports import Scatter +if matplotlib_imports.is_successful(): + # TODO(c-bata): Refactor to remove matplotlib and plotly dependencies in `_get_rank_info()`. + # See https://github.com/optuna/optuna/pull/5133#discussion_r1414761672 for the discussion. + from optuna.visualization.matplotlib._matplotlib_imports import plt as matplotlib_plt + +_logger = get_logger(__name__) + + +PADDING_RATIO = 0.05 + + +class _AxisInfo(NamedTuple): + name: str + range: tuple[float, float] + is_log: bool + is_cat: bool + + +class _RankSubplotInfo(NamedTuple): + xaxis: _AxisInfo + yaxis: _AxisInfo + xs: list[Any] + ys: list[Any] + trials: list[FrozenTrial] + zs: np.ndarray + colors: np.ndarray + + +class _RankPlotInfo(NamedTuple): + params: list[str] + sub_plot_infos: list[list[_RankSubplotInfo]] + target_name: str + zs: np.ndarray + colors: np.ndarray + has_custom_target: bool + + +def plot_rank( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot parameter relations as scatter plots with colors indicating ranks of target value. + + Note that trials missing the specified parameters will not be plotted. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the color bar. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + + .. note:: + This function requires plotly >= 5.0.0. + """ + + _imports.check() + info = _get_rank_info(study, params, target, target_name) + return _get_rank_plot(info) + + +def _get_order_with_same_order_averaging(data: np.ndarray) -> np.ndarray: + order = np.zeros_like(data, dtype=float) + data_sorted = np.sort(data) + for i, d in enumerate(data): + indices = np.where(data_sorted == d)[0] + order[i] = sum(indices) / len(indices) + return order + + +def _get_rank_info( + study: Study, + params: list[str] | None, + target: Callable[[FrozenTrial], float] | None, + target_name: str, +) -> _RankPlotInfo: + _check_plot_args(study, target, target_name) + + trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)) + + all_params = {p_name for t in trials for p_name in t.params.keys()} + if len(trials) == 0: + _logger.warning("Your study does not have any completed trials.") + params = [] + elif params is None: + params = sorted(all_params) + else: + for input_p_name in params: + if input_p_name not in all_params: + raise ValueError("Parameter {} does not exist in your study.".format(input_p_name)) + + if len(params) == 0: + _logger.warning("params is an empty list.") + + has_custom_target = True + if target is None: + + def target(trial: FrozenTrial) -> float: + return typing.cast(float, trial.value) + + has_custom_target = False + target_values = np.array([target(trial) for trial in trials]) + raw_ranks = _get_order_with_same_order_averaging(target_values) + color_idxs = raw_ranks / (len(trials) - 1) if len(trials) >= 2 else np.array([0.5]) + colors = _convert_color_idxs_to_scaled_rgb_colors(color_idxs) + + sub_plot_infos: list[list[_RankSubplotInfo]] + if len(params) == 2: + x_param = params[0] + y_param = params[1] + sub_plot_info = _get_rank_subplot_info(trials, target_values, colors, x_param, y_param) + sub_plot_infos = [[sub_plot_info]] + else: + sub_plot_infos = [ + [ + _get_rank_subplot_info(trials, target_values, colors, x_param, y_param) + for x_param in params + ] + for y_param in params + ] + + return _RankPlotInfo( + params=params, + sub_plot_infos=sub_plot_infos, + target_name=target_name, + zs=target_values, + colors=colors, + has_custom_target=has_custom_target, + ) + + +def _get_rank_subplot_info( + trials: list[FrozenTrial], + target_values: np.ndarray, + colors: np.ndarray, + x_param: str, + y_param: str, +) -> _RankSubplotInfo: + xaxis = _get_axis_info(trials, x_param) + yaxis = _get_axis_info(trials, y_param) + + infeasible_trial_ids = [] + filtered_ids = [] + for idx, trial in enumerate(trials): + constraints = trial.system_attrs.get(_CONSTRAINTS_KEY) + if constraints is not None and any([x > 0.0 for x in constraints]): + infeasible_trial_ids.append(idx) + if x_param in trial.params and y_param in trial.params: + filtered_ids.append(idx) + + filtered_trials = [trials[i] for i in filtered_ids] + xs = [trial.params[x_param] for trial in filtered_trials] + ys = [trial.params[y_param] for trial in filtered_trials] + zs = target_values[filtered_ids] + + colors[infeasible_trial_ids] = (204, 204, 204) + colors = colors[filtered_ids] + return _RankSubplotInfo( + xaxis=xaxis, + yaxis=yaxis, + xs=xs, + ys=ys, + trials=filtered_trials, + zs=np.array(zs), + colors=colors, + ) + + +def _get_axis_info(trials: list[FrozenTrial], param_name: str) -> _AxisInfo: + values: list[str | float | None] + is_numerical = _is_numerical(trials, param_name) + if is_numerical: + values = [t.params.get(param_name) for t in trials] + else: + values = [ + str(t.params.get(param_name)) if param_name in t.params else None for t in trials + ] + + min_value = min([v for v in values if v is not None]) + max_value = max([v for v in values if v is not None]) + + if _is_log_scale(trials, param_name): + min_value = float(min_value) + max_value = float(max_value) + padding = (math.log10(max_value) - math.log10(min_value)) * PADDING_RATIO + min_value = math.pow(10, math.log10(min_value) - padding) + max_value = math.pow(10, math.log10(max_value) + padding) + is_log = True + is_cat = False + + elif is_numerical: + min_value = float(min_value) + max_value = float(max_value) + padding = (max_value - min_value) * PADDING_RATIO + min_value = min_value - padding + max_value = max_value + padding + is_log = False + is_cat = False + + else: + unique_values = set(values) + span = len(unique_values) - 1 + if None in unique_values: + span -= 1 + padding = span * PADDING_RATIO + min_value = -padding + max_value = span + padding + is_log = False + is_cat = True + + return _AxisInfo( + name=param_name, + range=(min_value, max_value), + is_log=is_log, + is_cat=is_cat, + ) + + +def _get_rank_subplot( + info: _RankSubplotInfo, target_name: str, print_raw_objectives: bool +) -> "Scatter": + def get_hover_text(trial: FrozenTrial, target_value: float) -> str: + lines = [f"Trial #{trial.number}"] + lines += [f"{k}: {v}" for k, v in trial.params.items()] + lines += [f"{target_name}: {target_value}"] + if print_raw_objectives: + lines += [f"Objective #{i}: {v}" for i, v in enumerate(trial.values)] + return "
".join(lines) + + scatter = go.Scatter( + x=[str(x) for x in info.xs] if info.xaxis.is_cat else info.xs, + y=[str(y) for y in info.ys] if info.yaxis.is_cat else info.ys, + marker={ + "color": list(map(plotly.colors.label_rgb, info.colors)), + "line": {"width": 0.5, "color": "Grey"}, + }, + mode="markers", + showlegend=False, + hovertemplate="%{hovertext}", + hovertext=[ + get_hover_text(trial, target_value) + for trial, target_value in zip(info.trials, info.zs) + ], + ) + return scatter + + +class _TickInfo(NamedTuple): + coloridxs: list[float] + text: list[str] + + +def _get_tick_info(target_values: np.ndarray) -> _TickInfo: + sorted_target_values = np.sort(target_values) + coloridxs = [0, 0.25, 0.5, 0.75, 1] + values = np.quantile(sorted_target_values, coloridxs) + rank_text = ["min.", "25%", "50%", "75%", "max."] + text = [f"{rank_text[i]} ({values[i]:3g})" for i in range(len(values))] + return _TickInfo(coloridxs=coloridxs, text=text) + + +def _get_rank_plot( + info: _RankPlotInfo, +) -> "go.Figure": + params = info.params + sub_plot_infos = info.sub_plot_infos + + layout = go.Layout(title=f"Rank ({info.target_name})") + + if len(params) == 0: + return go.Figure(data=[], layout=layout) + if len(params) == 2: + x_param = params[0] + y_param = params[1] + sub_plot_info = sub_plot_infos[0][0] + sub_plots = _get_rank_subplot(sub_plot_info, info.target_name, info.has_custom_target) + + figure = go.Figure(data=sub_plots, layout=layout) + figure.update_xaxes(title_text=x_param, range=sub_plot_info.xaxis.range) + figure.update_yaxes(title_text=y_param, range=sub_plot_info.yaxis.range) + + if sub_plot_info.xaxis.is_cat: + figure.update_xaxes(type="category") + if sub_plot_info.yaxis.is_cat: + figure.update_yaxes(type="category") + + if sub_plot_info.xaxis.is_log: + log_range = [math.log10(p) for p in sub_plot_info.xaxis.range] + figure.update_xaxes(range=log_range, type="log") + if sub_plot_info.yaxis.is_log: + log_range = [math.log10(p) for p in sub_plot_info.yaxis.range] + figure.update_yaxes(range=log_range, type="log") + else: + figure = make_subplots( + rows=len(params), + cols=len(params), + shared_xaxes=True, + shared_yaxes=True, + horizontal_spacing=0.08 / len(params), + vertical_spacing=0.08 / len(params), + ) + + figure.update_layout(layout) + for x_i, x_param in enumerate(params): + for y_i, y_param in enumerate(params): + scatter = _get_rank_subplot( + sub_plot_infos[y_i][x_i], info.target_name, info.has_custom_target + ) + figure.add_trace(scatter, row=y_i + 1, col=x_i + 1) + + xaxis = sub_plot_infos[y_i][x_i].xaxis + yaxis = sub_plot_infos[y_i][x_i].yaxis + figure.update_xaxes(range=xaxis.range, row=y_i + 1, col=x_i + 1) + figure.update_yaxes(range=yaxis.range, row=y_i + 1, col=x_i + 1) + + if xaxis.is_cat: + figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1) + if yaxis.is_cat: + figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1) + + if xaxis.is_log: + log_range = [math.log10(p) for p in xaxis.range] + figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) + if yaxis.is_log: + log_range = [math.log10(p) for p in yaxis.range] + figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) + + if x_i == 0: + figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1) + if y_i == len(params) - 1: + figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1) + + tick_info = _get_tick_info(info.zs) + + colormap = "RdYlBu_r" + colorbar_trace = go.Scatter( + x=[None], + y=[None], + mode="markers", + marker=dict( + colorscale=colormap, + showscale=True, + cmin=0, + cmax=1, + colorbar=dict(thickness=10, tickvals=tick_info.coloridxs, ticktext=tick_info.text), + ), + hoverinfo="none", + showlegend=False, + ) + figure.add_trace(colorbar_trace) + return figure + + +def _convert_color_idxs_to_scaled_rgb_colors(color_idxs: np.ndarray) -> np.ndarray: + colormap = "RdYlBu_r" + if plotly_is_available: + # sample_colorscale requires plotly >= 5.0.0. + labeled_colors = plotly.colors.sample_colorscale(colormap, color_idxs) + scaled_rgb_colors = np.array([plotly.colors.unlabel_rgb(cl) for cl in labeled_colors]) + return scaled_rgb_colors + else: + cmap = matplotlib_plt.get_cmap(colormap) + colors = cmap(color_idxs)[:, :3] # Drop alpha values. + rgb_colors = np.asarray(colors * 255, dtype=int) + return rgb_colors diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_slice.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_slice.py new file mode 100644 index 0000000000000000000000000000000000000000..38f014d208d3675242df271a1ce00b65c6c72032 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_slice.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import Any +from typing import cast +from typing import NamedTuple + +from optuna.distributions import CategoricalChoiceType +from optuna.distributions import CategoricalDistribution +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _check_plot_args +from optuna.visualization._utils import _filter_nonfinite +from optuna.visualization._utils import _is_log_scale + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + from optuna.visualization._plotly_imports import make_subplots + from optuna.visualization._plotly_imports import Scatter + from optuna.visualization._utils import COLOR_SCALE + +_logger = get_logger(__name__) + + +class _SliceSubplotInfo(NamedTuple): + param_name: str + x: list[Any] + y: list[float] + trial_numbers: list[int] + is_log: bool + is_numerical: bool + constraints: list[bool] + x_labels: tuple[CategoricalChoiceType, ...] | None + + +class _SlicePlotInfo(NamedTuple): + target_name: str + subplots: list[_SliceSubplotInfo] + + +class _PlotValues(NamedTuple): + x: list[Any] + y: list[float] + trial_numbers: list[int] + + +def _get_slice_subplot_info( + trials: list[FrozenTrial], + param: str, + target: Callable[[FrozenTrial], float] | None, + log_scale: bool, + numerical: bool, + x_labels: tuple[CategoricalChoiceType, ...] | None, +) -> _SliceSubplotInfo: + if target is None: + + def _target(t: FrozenTrial) -> float: + return cast(float, t.value) + + target = _target + + plot_info = _SliceSubplotInfo( + param_name=param, + x=[], + y=[], + trial_numbers=[], + is_log=log_scale, + is_numerical=numerical, + x_labels=x_labels, + constraints=[], + ) + + for t in trials: + if param not in t.params: + continue + plot_info.x.append(t.params[param]) + plot_info.y.append(target(t)) + plot_info.trial_numbers.append(t.number) + constraints = t.system_attrs.get(_CONSTRAINTS_KEY) + plot_info.constraints.append(constraints is None or all([x <= 0.0 for x in constraints])) + + return plot_info + + +def _get_slice_plot_info( + study: Study, + params: list[str] | None, + target: Callable[[FrozenTrial], float] | None, + target_name: str, +) -> _SlicePlotInfo: + _check_plot_args(study, target, target_name) + + trials = _filter_nonfinite( + study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target + ) + + if len(trials) == 0: + _logger.warning("Your study does not have any completed trials.") + return _SlicePlotInfo(target_name, []) + + all_params = {p_name for t in trials for p_name in t.params.keys()} + + distributions = {} + for trial in trials: + for param_name, distribution in trial.distributions.items(): + if param_name not in distributions: + distributions[param_name] = distribution + + x_labels = {} + for param_name, distribution in distributions.items(): + if isinstance(distribution, CategoricalDistribution): + x_labels[param_name] = distribution.choices + + if params is None: + sorted_params = sorted(all_params) + else: + for input_p_name in params: + if input_p_name not in all_params: + raise ValueError(f"Parameter {input_p_name} does not exist in your study.") + sorted_params = sorted(set(params)) + + return _SlicePlotInfo( + target_name=target_name, + subplots=[ + _get_slice_subplot_info( + trials=trials, + param=param, + target=target, + log_scale=_is_log_scale(trials, param), + numerical=not isinstance(distributions[param], CategoricalDistribution), + x_labels=x_labels.get(param), + ) + for param in sorted_params + ], + ) + + +def plot_slice( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "go.Figure": + """Plot the parameter relationship as slice plot in a study. + + Note that, if a parameter contains missing values, a trial with missing values is not plotted. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + + _imports.check() + return _get_slice_plot(_get_slice_plot_info(study, params, target, target_name)) + + +def _get_slice_plot(info: _SlicePlotInfo) -> "go.Figure": + layout = go.Layout(title="Slice Plot") + + if len(info.subplots) == 0: + return go.Figure(data=[], layout=layout) + elif len(info.subplots) == 1: + figure = go.Figure(data=_generate_slice_subplot(info.subplots[0]), layout=layout) + figure.update_xaxes(title_text=info.subplots[0].param_name) + figure.update_yaxes(title_text=info.target_name) + if not info.subplots[0].is_numerical: + figure.update_xaxes( + type="category", categoryorder="array", categoryarray=info.subplots[0].x_labels + ) + elif info.subplots[0].is_log: + figure.update_xaxes(type="log") + else: + figure = make_subplots(rows=1, cols=len(info.subplots), shared_yaxes=True) + figure.update_layout(layout) + showscale = True # showscale option only needs to be specified once. + for column_index, subplot_info in enumerate(info.subplots, start=1): + trace = _generate_slice_subplot(subplot_info) + trace[0].update(marker={"showscale": showscale}) # showscale's default is True. + if showscale: + showscale = False + for t in trace: + figure.add_trace(t, row=1, col=column_index) + figure.update_xaxes(title_text=subplot_info.param_name, row=1, col=column_index) + if column_index == 1: + figure.update_yaxes(title_text=info.target_name, row=1, col=column_index) + if not subplot_info.is_numerical: + figure.update_xaxes( + type="category", + categoryorder="array", + categoryarray=subplot_info.x_labels, + row=1, + col=column_index, + ) + elif subplot_info.is_log: + figure.update_xaxes(type="log", row=1, col=column_index) + if len(info.subplots) > 3: + # Ensure that each subplot has a minimum width without relying on autusizing. + figure.update_layout(width=300 * len(info.subplots)) + + return figure + + +def _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> list[Scatter]: + trace = [] + + feasible = _PlotValues([], [], []) + infeasible = _PlotValues([], [], []) + + for x, y, num, c in zip( + subplot_info.x, subplot_info.y, subplot_info.trial_numbers, subplot_info.constraints + ): + if x is not None or x != "None" or y is not None or y != "None": + if c: + feasible.x.append(x) + feasible.y.append(y) + feasible.trial_numbers.append(num) + else: + infeasible.x.append(x) + infeasible.y.append(y) + trace.append( + go.Scatter( + x=feasible.x, + y=feasible.y, + mode="markers", + name="Feasible Trial", + marker={ + "line": {"width": 0.5, "color": "Grey"}, + "color": feasible.trial_numbers, + "colorscale": COLOR_SCALE, + "colorbar": { + "title": "Trial", + "x": 1.0, # Offset the colorbar position with a fixed width `xpad`. + "xpad": 40, + }, + }, + showlegend=False, + ) + ) + if len(infeasible.x) > 0: + trace.append( + go.Scatter( + x=infeasible.x, + y=infeasible.y, + mode="markers", + name="Infeasible Trial", + marker={ + "color": "#cccccc", + }, + showlegend=False, + ) + ) + + return trace diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_terminator_improvement.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_terminator_improvement.py new file mode 100644 index 0000000000000000000000000000000000000000..e2aeb37ab826ff6c1bce8bf63d0ff65e945d6529 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_terminator_improvement.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from typing import NamedTuple + +import tqdm + +import optuna +from optuna._experimental import experimental_func +from optuna.logging import get_logger +from optuna.study.study import Study +from optuna.terminator import BaseErrorEvaluator +from optuna.terminator import BaseImprovementEvaluator +from optuna.terminator import CrossValidationErrorEvaluator +from optuna.terminator import RegretBoundEvaluator +from optuna.terminator.erroreval import StaticErrorEvaluator +from optuna.terminator.improvement.evaluator import BestValueStagnationEvaluator +from optuna.terminator.improvement.evaluator import DEFAULT_MIN_N_TRIALS +from optuna.visualization._plotly_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +PADDING_RATIO_Y = 0.05 +OPACITY = 0.25 + + +class _ImprovementInfo(NamedTuple): + trial_numbers: list[int] + improvements: list[float] + errors: list[float] | None + + +@experimental_func("3.2.0") +def plot_terminator_improvement( + study: Study, + plot_error: bool = False, + improvement_evaluator: BaseImprovementEvaluator | None = None, + error_evaluator: BaseErrorEvaluator | None = None, + min_n_trials: int = DEFAULT_MIN_N_TRIALS, +) -> "go.Figure": + """Plot the potentials for future objective improvement. + + This function visualizes the objective improvement potentials, evaluated + with ``improvement_evaluator``. + It helps to determine whether we should continue the optimization or not. + You can also plot the error evaluated with + ``error_evaluator`` if the ``plot_error`` argument is set to :obj:`True`. + Note that this function may take some time to compute + the improvement potentials. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted + for their improvement. + plot_error: + A flag to show the error. If it is set to :obj:`True`, errors + evaluated by ``error_evaluator`` are also plotted as line graph. + Defaults to :obj:`False`. + improvement_evaluator: + An object that evaluates the improvement of the objective function. + Defaults to :class:`~optuna.terminator.RegretBoundEvaluator`. + error_evaluator: + An object that evaluates the error inherent in the objective function. + Defaults to :class:`~optuna.terminator.CrossValidationErrorEvaluator`. + min_n_trials: + The minimum number of trials before termination is considered. + Terminator improvements for trials below this value are + shown in a lighter color. Defaults to ``20``. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + """ + _imports.check() + + info = _get_improvement_info(study, plot_error, improvement_evaluator, error_evaluator) + return _get_improvement_plot(info, min_n_trials) + + +def _get_improvement_info( + study: Study, + get_error: bool = False, + improvement_evaluator: BaseImprovementEvaluator | None = None, + error_evaluator: BaseErrorEvaluator | None = None, +) -> _ImprovementInfo: + if study._is_multi_objective(): + raise ValueError("This function does not support multi-objective optimization study.") + + if improvement_evaluator is None: + improvement_evaluator = RegretBoundEvaluator() + if error_evaluator is None: + if isinstance(improvement_evaluator, BestValueStagnationEvaluator): + error_evaluator = StaticErrorEvaluator(constant=0) + else: + error_evaluator = CrossValidationErrorEvaluator() + + trial_numbers = [] + completed_trials = [] + improvements = [] + errors = [] + + for trial in tqdm.tqdm(study.trials): + if trial.state == optuna.trial.TrialState.COMPLETE: + completed_trials.append(trial) + + if len(completed_trials) == 0: + continue + + trial_numbers.append(trial.number) + + improvement = improvement_evaluator.evaluate( + trials=completed_trials, study_direction=study.direction + ) + improvements.append(improvement) + + if get_error: + error = error_evaluator.evaluate( + trials=completed_trials, study_direction=study.direction + ) + errors.append(error) + + if len(errors) == 0: + return _ImprovementInfo( + trial_numbers=trial_numbers, improvements=improvements, errors=None + ) + else: + return _ImprovementInfo( + trial_numbers=trial_numbers, improvements=improvements, errors=errors + ) + + +def _get_improvement_scatter( + trial_numbers: list[int], + improvements: list[float], + opacity: float = 1.0, + showlegend: bool = True, +) -> "go.Scatter": + plotly_blue_with_opacity = f"rgba(99, 110, 250, {opacity})" + return go.Scatter( + x=trial_numbers, + y=improvements, + mode="markers+lines", + marker=dict(color=plotly_blue_with_opacity), + line=dict(color=plotly_blue_with_opacity), + name="Terminator Improvement", + showlegend=showlegend, + legendgroup="improvement", + ) + + +def _get_error_scatter( + trial_numbers: list[int], + errors: list[float] | None, +) -> "go.Scatter": + if errors is None: + return go.Scatter() + + plotly_red = "rgb(239, 85, 59)" + return go.Scatter( + x=trial_numbers, + y=errors, + mode="markers+lines", + name="Error", + marker=dict(color=plotly_red), + line=dict(color=plotly_red), + ) + + +def _get_y_range(info: _ImprovementInfo, min_n_trials: int) -> tuple[float, float]: + min_value = min(info.improvements) + if info.errors is not None: + min_value = min(min_value, min(info.errors)) + + # Determine the display range based on trials after min_n_trials. + if len(info.trial_numbers) > min_n_trials: + max_value = max(info.improvements[min_n_trials:]) + # If there are no trials after min_trials, determine the display range based on all trials. + else: + max_value = max(info.improvements) + + if info.errors is not None: + max_value = max(max_value, max(info.errors)) + + padding = (max_value - min_value) * PADDING_RATIO_Y + return min_value - padding, max_value + padding + + +def _get_improvement_plot(info: _ImprovementInfo, min_n_trials: int) -> "go.Figure": + n_trials = len(info.trial_numbers) + + fig = go.Figure( + layout=go.Layout( + title="Terminator Improvement Plot", + xaxis=dict(title="Trial"), + yaxis=dict(title="Terminator Improvement"), + ) + ) + if n_trials == 0: + _logger.warning("There are no complete trials.") + return fig + + fig.add_trace( + _get_improvement_scatter( + info.trial_numbers[: min_n_trials + 1], + info.improvements[: min_n_trials + 1], + # Plot line with a lighter color until the number of trials reaches min_n_trials. + OPACITY, + n_trials <= min_n_trials, # Avoid showing legend twice. + ) + ) + + if n_trials > min_n_trials: + fig.add_trace( + _get_improvement_scatter( + info.trial_numbers[min_n_trials:], + info.improvements[min_n_trials:], + ) + ) + + fig.add_trace(_get_error_scatter(info.trial_numbers, info.errors)) + + fig.update_yaxes(range=_get_y_range(info, min_n_trials)) + return fig diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_timeline.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_timeline.py new file mode 100644 index 0000000000000000000000000000000000000000..361de7f57306519b81e7a3811f2e995ba647f243 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_timeline.py @@ -0,0 +1,187 @@ +from __future__ import annotations + +import datetime +from typing import NamedTuple + +from optuna.logging import get_logger +from optuna.samplers._base import _CONSTRAINTS_KEY +from optuna.study import Study +from optuna.trial import TrialState +from optuna.visualization._plotly_imports import _imports +from optuna.visualization._utils import _make_hovertext + + +if _imports.is_successful(): + from optuna.visualization._plotly_imports import go + +_logger = get_logger(__name__) + + +class _TimelineBarInfo(NamedTuple): + number: int + start: datetime.datetime + complete: datetime.datetime + state: TrialState + hovertext: str + infeasible: bool + + +class _TimelineInfo(NamedTuple): + bars: list[_TimelineBarInfo] + + +def plot_timeline(study: Study, n_recent_trials: int | None = None) -> "go.Figure": + """Plot the timeline of a study. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted with + their lifetime. + n_recent_trials: + The number of recent trials to plot. If :obj:`None`, all trials are plotted. + If specified, only the most recent ``n_recent_trials`` will be displayed. + Must be a positive integer. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + + Raises: + ValueError: if ``n_recent_trials`` is 0 or negative. + """ + + if n_recent_trials is not None and n_recent_trials <= 0: + raise ValueError("n_recent_trials must be a positive integer or None.") + + _imports.check() + info = _get_timeline_info(study, n_recent_trials=n_recent_trials) + return _get_timeline_plot(info) + + +def _get_max_datetime_complete(study: Study) -> datetime.datetime: + max_run_duration = max( + [ + t.datetime_complete - t.datetime_start + for t in study.trials + if t.datetime_complete is not None and t.datetime_start is not None + ], + default=None, + ) + if _is_running_trials_in_study(study, max_run_duration): + return datetime.datetime.now() + + return max( + [t.datetime_complete for t in study.trials if t.datetime_complete is not None], + default=datetime.datetime.now(), + ) + + +def _is_running_trials_in_study(study: Study, max_run_duration: datetime.timedelta | None) -> bool: + running_trials = study.get_trials(states=(TrialState.RUNNING,), deepcopy=False) + if max_run_duration is None: + return len(running_trials) > 0 + + now = datetime.datetime.now() + # This heuristic is to check whether we have trials that were somehow killed, + # still remain as `RUNNING` in `study`. + return any( + now - t.datetime_start < 5 * max_run_duration + for t in running_trials + # MyPy redefinition: Running trial should have datetime_start. + if t.datetime_start is not None + ) + + +def _get_timeline_info(study: Study, n_recent_trials: int | None = None) -> _TimelineInfo: + bars = [] + + max_datetime = _get_max_datetime_complete(study) + timedelta_for_small_bar = datetime.timedelta(seconds=1) + + trials = study.get_trials(deepcopy=False) + if n_recent_trials is not None: + trials = trials[-n_recent_trials:] + + for trial in trials: + datetime_start = trial.datetime_start or max_datetime + datetime_complete = ( + max_datetime + timedelta_for_small_bar + if trial.state == TrialState.RUNNING + else trial.datetime_complete or datetime_start + timedelta_for_small_bar + ) + infeasible = ( + False + if _CONSTRAINTS_KEY not in trial.system_attrs + else any([x > 0 for x in trial.system_attrs[_CONSTRAINTS_KEY]]) + ) + if datetime_complete < datetime_start: + _logger.warning( + ( + f"The start and end times for Trial {trial.number} seem to be reversed. " + f"The start time is {datetime_start} and the end time is {datetime_complete}." + ) + ) + bars.append( + _TimelineBarInfo( + number=trial.number, + start=datetime_start, + complete=datetime_complete, + state=trial.state, + hovertext=_make_hovertext(trial), + infeasible=infeasible, + ) + ) + + if len(bars) == 0: + _logger.warning("Your study does not have any trials.") + + return _TimelineInfo(bars) + + +def _get_timeline_plot(info: _TimelineInfo) -> "go.Figure": + _cm = { + "COMPLETE": "blue", + "FAIL": "red", + "PRUNED": "orange", + "RUNNING": "green", + "WAITING": "gray", + } + + fig = go.Figure() + for state in sorted(TrialState, key=lambda x: x.name): + if state.name == "COMPLETE": + infeasible_bars = [b for b in info.bars if b.state == state and b.infeasible] + feasible_bars = [b for b in info.bars if b.state == state and not b.infeasible] + _plot_bars(infeasible_bars, "#cccccc", "INFEASIBLE", fig) + _plot_bars(feasible_bars, _cm[state.name], state.name, fig) + else: + bars = [b for b in info.bars if b.state == state] + _plot_bars(bars, _cm[state.name], state.name, fig) + fig.update_xaxes(type="date") + fig.update_layout( + go.Layout( + title="Timeline Plot", + xaxis={"title": "Datetime"}, + yaxis={"title": "Trial"}, + ) + ) + fig.update_layout(showlegend=True) # Draw a legend even if all TrialStates are the same. + return fig + + +def _plot_bars(bars: list[_TimelineBarInfo], color: str, name: str, fig: go.Figure) -> None: + if len(bars) == 0: + return + + fig.add_trace( + go.Bar( + name=name, + x=[(b.complete - b.start).total_seconds() * 1000 for b in bars], + y=[b.number for b in bars], + base=[b.start.isoformat() for b in bars], + text=[b.hovertext for b in bars], + hovertemplate="%{text}" + name + "", + orientation="h", + marker=dict(color=color), + textposition="none", # Avoid drawing hovertext in a bar. + ) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..75864f3781f66092b09ea0ac44df3047975a43ee --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/_utils.py @@ -0,0 +1,201 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence +import json +from typing import Any +from typing import cast +import warnings + +import numpy as np + +import optuna +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.study import Study +from optuna.study._study_direction import StudyDirection +from optuna.trial import FrozenTrial +from optuna.visualization import _plotly_imports + + +__all__ = ["is_available"] +_logger = optuna.logging.get_logger(__name__) + + +def is_available() -> bool: + """Returns whether visualization with plotly is available or not. + + .. note:: + + :mod:`~optuna.visualization` module depends on plotly version 4.0.0 or higher. If a + supported version of plotly isn't installed in your environment, this function will return + :obj:`False`. In such case, please execute ``$ pip install -U plotly>=4.0.0`` to install + plotly. + + Returns: + :obj:`True` if visualization with plotly is available, :obj:`False` otherwise. + """ + + return _plotly_imports._imports.is_successful() + + +if is_available(): + import plotly.colors + + COLOR_SCALE = plotly.colors.sequential.Blues + + +def _check_plot_args( + study: Study | Sequence[Study], + target: Callable[[FrozenTrial], float] | None, + target_name: str, +) -> None: + studies: Sequence[Study] + if isinstance(study, Study): + studies = [study] + else: + studies = study + + if target is None and any(study._is_multi_objective() for study in studies): + raise ValueError( + "If the `study` is being used for multi-objective optimization, " + "please specify the `target`." + ) + + if target is not None and target_name == "Objective Value": + warnings.warn( + "`target` is specified, but `target_name` is the default value, 'Objective Value'." + ) + + +def _is_log_scale(trials: list[FrozenTrial], param: str) -> bool: + for trial in trials: + if param not in trial.params: + continue + dist = trial.distributions[param] + return isinstance(dist, (FloatDistribution, IntDistribution)) and dist.log + return False + + +def _is_numerical(trials: list[FrozenTrial], param: str) -> bool: + for trial in trials: + if param not in trial.params: + continue + dist = trial.distributions[param] + if isinstance(dist, (IntDistribution, FloatDistribution)): + return True + elif isinstance(dist, CategoricalDistribution): + # NOTE: Although it is a bit odd to do so, we keep it as is only for visualization. + return all( + isinstance(v, (int, float)) and not isinstance(v, bool) for v in dist.choices + ) + else: + assert False, "Should not reach." + return True + + +def _get_param_values(trials: list[FrozenTrial], p_name: str) -> list[Any]: + values = [t.params[p_name] for t in trials if p_name in t.params] + if _is_numerical(trials, p_name): + return values + return list(map(str, values)) + + +def _get_skipped_trial_numbers( + trials: list[FrozenTrial], used_param_names: Sequence[str] +) -> set[int]: + """Utility function for ``plot_parallel_coordinate``. + + If trial's parameters do not contain a parameter in ``used_param_names``, + ``plot_parallel_coordinate`` methods do not use such trials. + + Args: + trials: + List of ``FrozenTrial``s. + used_param_names: + The parameter names used in ``plot_parallel_coordinate``. + + Returns: + A set of invalid trial numbers. + """ + + skipped_trial_numbers = set() + for trial in trials: + for used_param in used_param_names: + if used_param not in trial.params.keys(): + skipped_trial_numbers.add(trial.number) + break + return skipped_trial_numbers + + +def _filter_nonfinite( + trials: list[FrozenTrial], + target: Callable[[FrozenTrial], float] | None = None, + with_message: bool = True, +) -> list[FrozenTrial]: + # For multi-objective optimization target must be specified to select + # one of objective values to filter trials by (and plot by later on). + # This function is not raising when target is missing, since we're + # assuming plot args have been sanitized before. + if target is None: + + def _target(t: FrozenTrial) -> float: + return cast(float, t.value) + + target = _target + + filtered_trials: list[FrozenTrial] = [] + for trial in trials: + value = target(trial) + + try: + value = float(value) + except ( + ValueError, + TypeError, + ): + warnings.warn( + f"Trial{trial.number}'s target value {repr(value)} could not be cast to float." + ) + raise + + # Not a Number, positive infinity and negative infinity are considered to be non-finite. + if not np.isfinite(value): + if with_message: + _logger.warning( + f"Trial {trial.number} is omitted in visualization " + "because its objective value is inf or nan." + ) + else: + filtered_trials.append(trial) + + return filtered_trials + + +def _is_reverse_scale(study: Study, target: Callable[[FrozenTrial], float] | None) -> bool: + return target is not None or study.direction == StudyDirection.MINIMIZE + + +def _make_json_compatible(value: Any) -> Any: + try: + json.dumps(value) + return value + except TypeError: + # The value can't be converted to JSON directly, so return a string representation. + return str(value) + + +def _make_hovertext(trial: FrozenTrial) -> str: + user_attrs = {key: _make_json_compatible(value) for key, value in trial.user_attrs.items()} + user_attrs_dict = {"user_attrs": user_attrs} if user_attrs else {} + text = json.dumps( + { + "number": trial.number, + "values": trial.values, + "params": trial.params, + **user_attrs_dict, + }, + indent=2, + ) + return text.replace("\n", "
") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54368c8e5de73114ac9a311070369042164f56cd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/__init__.py @@ -0,0 +1,30 @@ +from optuna.visualization.matplotlib._contour import plot_contour +from optuna.visualization.matplotlib._edf import plot_edf +from optuna.visualization.matplotlib._hypervolume_history import plot_hypervolume_history +from optuna.visualization.matplotlib._intermediate_values import plot_intermediate_values +from optuna.visualization.matplotlib._optimization_history import plot_optimization_history +from optuna.visualization.matplotlib._parallel_coordinate import plot_parallel_coordinate +from optuna.visualization.matplotlib._param_importances import plot_param_importances +from optuna.visualization.matplotlib._pareto_front import plot_pareto_front +from optuna.visualization.matplotlib._rank import plot_rank +from optuna.visualization.matplotlib._slice import plot_slice +from optuna.visualization.matplotlib._terminator_improvement import plot_terminator_improvement +from optuna.visualization.matplotlib._timeline import plot_timeline +from optuna.visualization.matplotlib._utils import is_available + + +__all__ = [ + "is_available", + "plot_contour", + "plot_edf", + "plot_intermediate_values", + "plot_hypervolume_history", + "plot_optimization_history", + "plot_parallel_coordinate", + "plot_param_importances", + "plot_pareto_front", + "plot_rank", + "plot_slice", + "plot_terminator_improvement", + "plot_timeline", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_contour.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_contour.py new file mode 100644 index 0000000000000000000000000000000000000000..91849ad93d7c9a213f551a7989ecc61c65a8659e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_contour.py @@ -0,0 +1,364 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence + +import numpy as np + +from optuna._experimental import experimental_func +from optuna._imports import try_import +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._contour import _AxisInfo +from optuna.visualization._contour import _ContourInfo +from optuna.visualization._contour import _get_contour_info +from optuna.visualization._contour import _PlotValues +from optuna.visualization._contour import _SubContourInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +with try_import() as _optuna_imports: + import scipy + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import Colormap + from optuna.visualization.matplotlib._matplotlib_imports import ContourSet + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +CONTOUR_POINT_NUM = 100 + + +@experimental_func("2.2.0") +def plot_contour( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot the parameter relationship as contour plot in a study with Matplotlib. + + Note that, if a parameter contains missing values, a trial with missing values is not plotted. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_contour` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the color bar. + + Returns: + A :class:`matplotlib.axes.Axes` object. + + .. note:: + The colormap is reversed when the ``target`` argument isn't :obj:`None` or ``direction`` + of :class:`~optuna.study.Study` is ``minimize``. + """ + + _imports.check() + info = _get_contour_info(study, params, target, target_name) + return _get_contour_plot(info) + + +def _get_contour_plot(info: _ContourInfo) -> "Axes": + sorted_params = info.sorted_params + sub_plot_infos = info.sub_plot_infos + reverse_scale = info.reverse_scale + target_name = info.target_name + + if len(sorted_params) <= 1: + _, ax = plt.subplots() + return ax + n_params = len(sorted_params) + + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + if n_params == 2: + # Set up the graph style. + fig, axs = plt.subplots() + axs.set_title("Contour Plot") + cmap = _set_cmap(reverse_scale) + + cs = _generate_contour_subplot(sub_plot_infos[0][0], axs, cmap) + if isinstance(cs, ContourSet): + axcb = fig.colorbar(cs) + axcb.set_label(target_name) + else: + # Set up the graph style. + fig, axs = plt.subplots(n_params, n_params) + assert isinstance(axs, np.ndarray) + fig.suptitle("Contour Plot") + cmap = _set_cmap(reverse_scale) + + # Prepare data and draw contour plots. + cs_list = [] + for x_i in range(len(sorted_params)): + for y_i in range(len(sorted_params)): + ax = axs[y_i, x_i] + cs = _generate_contour_subplot(sub_plot_infos[y_i][x_i], ax, cmap) + if isinstance(cs, ContourSet): + cs_list.append(cs) + if cs_list: + axcb = fig.colorbar(cs_list[0], ax=axs) + axcb.set_label(target_name) + + return axs + + +def _set_cmap(reverse_scale: bool) -> "Colormap": + cmap = "Blues_r" if not reverse_scale else "Blues" + return plt.get_cmap(cmap) + + +class _LabelEncoder: + def __init__(self) -> None: + self.labels: list[str] = [] + + def fit(self, labels: list[str]) -> "_LabelEncoder": + self.labels = sorted(set(labels)) + return self + + def transform(self, labels: list[str]) -> list[int]: + return [self.labels.index(label) for label in labels] + + def fit_transform(self, labels: list[str]) -> list[int]: + return self.fit(labels).transform(labels) + + def get_labels(self) -> list[str]: + return self.labels + + def get_indices(self) -> list[int]: + return list(range(len(self.labels))) + + +def _filter_missing_values( + xaxis: _AxisInfo, yaxis: _AxisInfo +) -> tuple[list[str | float], list[str | float]]: + x_values = [] + y_values = [] + for x_value, y_value in zip(xaxis.values, yaxis.values): + if x_value is not None and y_value is not None: + x_values.append(x_value) + y_values.append(y_value) + return x_values, y_values + + +def _calculate_axis_data( + axis: _AxisInfo, + values: Sequence[str | float], +) -> tuple[np.ndarray, list[str], list[int], list[int | float]]: + # Convert categorical values to int. + cat_param_labels: list[str] = [] + cat_param_pos: list[int] = [] + returned_values: Sequence[int | float] + if axis.is_cat: + enc = _LabelEncoder() + # Fit LabelEncoder with all the categories in categorical distribution. + enc.fit(list(map(str, filter(lambda value: value is not None, axis.values)))) + # Then transform the values using the fitted label encoder. + # Note that `values` may not include all the categories, + # so we use `axis.values` for fitting. + returned_values = enc.transform(list(map(str, values))) + cat_param_labels = enc.get_labels() + cat_param_pos = enc.get_indices() + else: + returned_values = list(map(lambda x: float(x), values)) + + # For x and y, create 1-D array of evenly spaced coordinates on linear or log scale. + if axis.is_log: + ci = np.logspace(np.log10(axis.range[0]), np.log10(axis.range[1]), CONTOUR_POINT_NUM) + else: + ci = np.linspace(axis.range[0], axis.range[1], CONTOUR_POINT_NUM) + + return ci, cat_param_labels, cat_param_pos, list(returned_values) + + +def _calculate_griddata(info: _SubContourInfo) -> tuple[np.ndarray, _PlotValues, _PlotValues]: + xaxis = info.xaxis + yaxis = info.yaxis + z_values_dict = info.z_values + + x_values = [] + y_values = [] + z_values = [] + for x_value, y_value in zip(xaxis.values, yaxis.values): + if x_value is not None and y_value is not None: + x_values.append(x_value) + y_values.append(y_value) + x_i = xaxis.indices.index(x_value) + y_i = yaxis.indices.index(y_value) + z_values.append(z_values_dict[(x_i, y_i)]) + + # Return empty values when x or y has no value. + if len(x_values) == 0 or len(y_values) == 0: + return np.array([]), _PlotValues([], []), _PlotValues([], []) + + xi, cat_param_labels_x, cat_param_pos_x, transformed_x_values = _calculate_axis_data( + xaxis, + x_values, + ) + yi, cat_param_labels_y, cat_param_pos_y, transformed_y_values = _calculate_axis_data( + yaxis, + y_values, + ) + + # Calculate grid data points. + zi: np.ndarray = np.array([]) + # Create irregularly spaced map of trial values + # and interpolate it with Plotly's interpolation formulation. + if xaxis.name != yaxis.name: + zmap = _create_zmap(transformed_x_values, transformed_y_values, z_values, xi, yi) + zi = _interpolate_zmap(zmap, CONTOUR_POINT_NUM) + + # categorize by constraints + feasible = _PlotValues([], []) + infeasible = _PlotValues([], []) + + for x_value, y_value, c in zip(transformed_x_values, transformed_y_values, info.constraints): + if c: + feasible.x.append(x_value) + feasible.y.append(y_value) + else: + infeasible.x.append(x_value) + infeasible.y.append(y_value) + + return zi, feasible, infeasible + + +def _generate_contour_subplot( + info: _SubContourInfo, ax: "Axes", cmap: "Colormap" +) -> "ContourSet" | None: + ax.label_outer() + + if len(info.xaxis.indices) < 2 or len(info.yaxis.indices) < 2: + return None + + ax.set(xlabel=info.xaxis.name, ylabel=info.yaxis.name) + ax.set_xlim(info.xaxis.range[0], info.xaxis.range[1]) + ax.set_ylim(info.yaxis.range[0], info.yaxis.range[1]) + x_values, y_values = _filter_missing_values(info.xaxis, info.yaxis) + xi, x_cat_param_label, x_cat_param_pos, _ = _calculate_axis_data(info.xaxis, x_values) + yi, y_cat_param_label, y_cat_param_pos, _ = _calculate_axis_data(info.yaxis, y_values) + if info.xaxis.is_cat: + ax.set_xticks(x_cat_param_pos) + ax.set_xticklabels(x_cat_param_label) + else: + ax.set_xscale("log" if info.xaxis.is_log else "linear") + if info.yaxis.is_cat: + ax.set_yticks(y_cat_param_pos) + ax.set_yticklabels(y_cat_param_label) + else: + ax.set_yscale("log" if info.yaxis.is_log else "linear") + + if info.xaxis.name == info.yaxis.name: + return None + + zi, feasible_plot_values, infeasible_plot_values = _calculate_griddata(info) + cs = None + if len(zi) > 0: + # Contour the gridded data. + ax.contour(xi, yi, zi, 15, linewidths=0.5, colors="k") + cs = ax.contourf(xi, yi, zi, 15, cmap=cmap.reversed()) + assert isinstance(cs, ContourSet) + # Plot data points. + ax.scatter( + feasible_plot_values.x, + feasible_plot_values.y, + marker="o", + c="black", + s=20, + edgecolors="grey", + linewidth=2.0, + ) + ax.scatter( + infeasible_plot_values.x, + infeasible_plot_values.y, + marker="o", + c="#cccccc", + s=20, + edgecolors="grey", + linewidth=2.0, + ) + + return cs + + +def _create_zmap( + x_values: Sequence[int | float], + y_values: Sequence[int | float], + z_values: Sequence[float], + xi: np.ndarray, + yi: np.ndarray, +) -> dict[tuple[int, int], float]: + # Creates z-map from trial values and params. + # z-map is represented by hashmap of coordinate and trial value pairs. + # + # Coordinates are represented by tuple of integers, where the first item + # indicates x-axis index and the second item indicates y-axis index + # and refer to a position of trial value on irregular param grid. + # + # Since params were resampled either with linspace or logspace + # original params might not be on the x and y axes anymore + # so we are going with close approximations of trial value positions. + zmap = dict() + for x, y, z in zip(x_values, y_values, z_values): + xindex = int(np.argmin(np.abs(xi - x))) + yindex = int(np.argmin(np.abs(yi - y))) + zmap[(xindex, yindex)] = z + + return zmap + + +def _interpolate_zmap(zmap: dict[tuple[int, int], float], contour_plot_num: int) -> np.ndarray: + # Implements interpolation formulation used in Plotly + # to interpolate heatmaps and contour plots + # https://github.com/plotly/plotly.js/blob/95b3bd1bb19d8dc226627442f8f66bce9576def8/src/traces/heatmap/interp2d.js#L15-L20 + # citing their doc: + # + # > Fill in missing data from a 2D array using an iterative + # > poisson equation solver with zero-derivative BC at edges. + # > Amazingly, this just amounts to repeatedly averaging all the existing + # > nearest neighbors + # + # Plotly's algorithm is equivalent to solve the following linear simultaneous equation. + # It is discretization form of the Poisson equation. + # + # z[x, y] = zmap[(x, y)] (if zmap[(x, y)] is given) + # 4 * z[x, y] = z[x-1, y] + z[x+1, y] + z[x, y-1] + z[x, y+1] (if zmap[(x, y)] is not given) + + a_data = [] + a_row = [] + a_col = [] + b = np.zeros(contour_plot_num**2) + for x in range(contour_plot_num): + for y in range(contour_plot_num): + grid_index = y * contour_plot_num + x + if (x, y) in zmap: + a_data.append(1) + a_row.append(grid_index) + a_col.append(grid_index) + b[grid_index] = zmap[(x, y)] + else: + for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)): + if 0 <= x + dx < contour_plot_num and 0 <= y + dy < contour_plot_num: + a_data.append(1) + a_row.append(grid_index) + a_col.append(grid_index) + a_data.append(-1) + a_row.append(grid_index) + a_col.append(grid_index + dy * contour_plot_num + dx) + + z = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix((a_data, (a_row, a_col))), b) + + return z.reshape((contour_plot_num, contour_plot_num)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_edf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_edf.py new file mode 100644 index 0000000000000000000000000000000000000000..741d0a0fc1594eca312cbc516d1a14806c4a61f2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_edf.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence + +from optuna._experimental import experimental_func +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._edf import _get_edf_info +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import plt + +_logger = get_logger(__name__) + + +@experimental_func("2.2.0") +def plot_edf( + study: Study | Sequence[Study], + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot the objective value EDF (empirical distribution function) of a study with Matplotlib. + + Note that only the complete trials are considered when plotting the EDF. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_edf` for an example, + where this function can be replaced with it. + + .. note:: + + Please refer to `matplotlib.pyplot.legend + `_ + to adjust the style of the generated legend. + + Args: + study: + A target :class:`~optuna.study.Study` object. + You can pass multiple studies if you want to compare those EDFs. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots() + ax.set_title("Empirical Distribution Function Plot") + ax.set_xlabel(target_name) + ax.set_ylabel("Cumulative Probability") + ax.set_ylim(0, 1) + cmap = plt.get_cmap("tab20") # Use tab20 colormap for multiple line plots. + + info = _get_edf_info(study, target, target_name) + edf_lines = info.lines + + if len(edf_lines) == 0: + return ax + + for i, (study_name, y_values) in enumerate(edf_lines): + ax.plot(info.x_values, y_values, color=cmap(i), alpha=0.7, label=study_name) + + if len(edf_lines) >= 2: + ax.legend() + + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_hypervolume_history.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_hypervolume_history.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c230cc9b0405e924b9a9d3b92955c8c118e7cf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_hypervolume_history.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from collections.abc import Sequence + +import numpy as np + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.visualization._hypervolume_history import _get_hypervolume_history_info +from optuna.visualization._hypervolume_history import _HypervolumeHistoryInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +@experimental_func("3.3.0") +def plot_hypervolume_history( + study: Study, + reference_point: Sequence[float], +) -> "Axes": + """Plot hypervolume history of all trials in a study with Matplotlib. + + .. note:: + You need to adjust the size of the plot by yourself using ``plt.tight_layout()`` or + ``plt.savefig(IMAGE_NAME, bbox_inches='tight')``. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes. + The number of objectives must be 2 or more. + + reference_point: + A reference point to use for hypervolume computation. + The dimension of the reference point must be the same as the number of objectives. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + + if not study._is_multi_objective(): + raise ValueError( + "Study must be multi-objective. For single-objective optimization, " + "please use plot_optimization_history instead." + ) + + if len(reference_point) != len(study.directions): + raise ValueError( + "The dimension of the reference point must be the same as the number of objectives." + ) + + info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64)) + return _get_hypervolume_history_plot(info) + + +def _get_hypervolume_history_plot( + info: _HypervolumeHistoryInfo, +) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots() + ax.set_title("Hypervolume History Plot") + ax.set_xlabel("Trial") + ax.set_ylabel("Hypervolume") + cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. + + ax.plot( + info.trial_numbers, + info.values, + marker="o", + color=cmap(0), + alpha=0.5, + ) + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_intermediate_values.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_intermediate_values.py new file mode 100644 index 0000000000000000000000000000000000000000..803e10a9df789fa56f06b71a7f6b605780cb95b6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_intermediate_values.py @@ -0,0 +1,65 @@ +from optuna._experimental import experimental_func +from optuna.logging import get_logger +from optuna.study import Study +from optuna.visualization._intermediate_values import _get_intermediate_plot_info +from optuna.visualization._intermediate_values import _IntermediatePlotInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import plt + +_logger = get_logger(__name__) + + +@experimental_func("2.2.0") +def plot_intermediate_values(study: Study) -> "Axes": + """Plot intermediate values of all trials in a study with Matplotlib. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_intermediate_values` for an example. + + .. note:: + Please refer to `matplotlib.pyplot.legend + `__ + to adjust the style of the generated legend. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate + values. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + return _get_intermediate_plot(_get_intermediate_plot_info(study)) + + +def _get_intermediate_plot(info: _IntermediatePlotInfo) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots(tight_layout=True) + ax.set_title("Intermediate Values Plot") + ax.set_xlabel("Step") + ax.set_ylabel("Intermediate Value") + cmap = plt.get_cmap("tab20") # Use tab20 colormap for multiple line plots. + + trial_infos = info.trial_infos + + for i, tinfo in enumerate(trial_infos): + ax.plot( + tuple((x for x, _ in tinfo.sorted_intermediate_values)), + tuple((y for _, y in tinfo.sorted_intermediate_values)), + color=cmap(i) if tinfo.feasible else "#CCCCCC", + marker=".", + alpha=0.7, + label="Trial{}".format(tinfo.trial_number), + ) + + if len(trial_infos) >= 2: + ax.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) + + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_matplotlib_imports.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_matplotlib_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..1ecece11e5ff1bcbedf7aa140f5a7494d71e8363 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_matplotlib_imports.py @@ -0,0 +1,43 @@ +from packaging import version + +from optuna._imports import try_import + + +with try_import() as _imports: + # TODO(ytknzw): Add specific imports. + import matplotlib + from matplotlib import __version__ as matplotlib_version + from matplotlib import pyplot as plt + from matplotlib.axes._axes import Axes + from matplotlib.collections import LineCollection + from matplotlib.collections import PathCollection + from matplotlib.colors import Colormap + from matplotlib.contour import ContourSet + from matplotlib.dates import DateFormatter + from matplotlib.figure import Figure + from mpl_toolkits.mplot3d.axes3d import Axes3D + + # TODO(ytknzw): Set precise version. + if version.parse(matplotlib_version) < version.parse("3.0.0"): + raise ImportError( + "Your version of Matplotlib is " + matplotlib_version + " . " + "Please install Matplotlib version 3.0.0 or higher. " + "Matplotlib can be installed by executing `$ pip install -U matplotlib>=3.0.0`. " + "For further information, please refer to the installation guide of Matplotlib. ", + name="matplotlib", + ) + +__all__ = [ + "_imports", + "matplotlib", + "matplotlib_version", + "plt", + "Axes", + "Axes3D", + "Colormap", + "ContourSet", + "DateFormatter", + "Figure", + "LineCollection", + "PathCollection", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_optimization_history.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_optimization_history.py new file mode 100644 index 0000000000000000000000000000000000000000..f34aaca00b525599c0b971229efbc38d824f7f58 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_optimization_history.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence + +import numpy as np + +from optuna._experimental import experimental_func +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._optimization_history import _get_optimization_history_info_list +from optuna.visualization._optimization_history import _OptimizationHistoryInfo +from optuna.visualization._optimization_history import _ValueState +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import plt + +_logger = get_logger(__name__) + + +@experimental_func("2.2.0") +def plot_optimization_history( + study: Study | Sequence[Study], + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", + error_bar: bool = False, +) -> "Axes": + """Plot optimization history of all trials in a study with Matplotlib. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_optimization_history` for an example. + + .. note:: + You need to adjust the size of the plot by yourself using ``plt.tight_layout()`` or + ``plt.savefig(IMAGE_NAME, bbox_inches='tight')``. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + You can pass multiple studies if you want to compare those optimization histories. + + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label and the legend. + + error_bar: + A flag to show the error bar. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + + info_list = _get_optimization_history_info_list(study, target, target_name, error_bar) + return _get_optimization_history_plot(info_list, target_name) + + +def _get_optimization_history_plot( + info_list: list[_OptimizationHistoryInfo], + target_name: str, +) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots() + ax.set_title("Optimization History Plot") + ax.set_xlabel("Trial") + ax.set_ylabel(target_name) + cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. + + for i, (trial_numbers, values_info, best_values_info) in enumerate(info_list): + if values_info.stds is not None: + if ( + _ValueState.Infeasible in values_info.states + or _ValueState.Incomplete in values_info.states + ): + _logger.warning( + "Your study contains infeasible trials. " + "In optimization history plot, " + "error bars are calculated for only feasible trial values." + ) + feasible_trial_numbers = trial_numbers + feasible_trial_values = values_info.values + plt.errorbar( + x=feasible_trial_numbers, + y=feasible_trial_values, + yerr=values_info.stds, + capsize=5, + fmt="o", + color="tab:blue", + ) + infeasible_trial_numbers: list[int] = [] + infeasible_trial_values: list[float] = [] + else: + feasible_trial_numbers = [ + n for n, s in zip(trial_numbers, values_info.states) if s == _ValueState.Feasible + ] + infeasible_trial_numbers = [ + n for n, s in zip(trial_numbers, values_info.states) if s == _ValueState.Infeasible + ] + feasible_trial_values = [] + for num in feasible_trial_numbers: + feasible_trial_values.append(values_info.values[num]) + infeasible_trial_values = [] + for num in infeasible_trial_numbers: + infeasible_trial_values.append(values_info.values[num]) + ax.scatter( + x=feasible_trial_numbers, + y=feasible_trial_values, + color=cmap(0) if len(info_list) == 1 else cmap(2 * i), + alpha=1, + label=values_info.label_name, + ) + + if best_values_info is not None: + ax.plot( + trial_numbers, + best_values_info.values, + color=cmap(3) if len(info_list) == 1 else cmap(2 * i + 1), + alpha=0.5, + label=best_values_info.label_name, + ) + if best_values_info.stds is not None: + lower = np.array(best_values_info.values) - np.array(best_values_info.stds) + upper = np.array(best_values_info.values) + np.array(best_values_info.stds) + ax.fill_between( + x=trial_numbers, + y1=lower, + y2=upper, + color="tab:red", + alpha=0.4, + ) + ax.legend() + ax.scatter( + x=infeasible_trial_numbers, + y=infeasible_trial_values, + color="#cccccc", + ) + plt.legend(bbox_to_anchor=(1.05, 1.0), loc="upper left") + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_parallel_coordinate.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_parallel_coordinate.py new file mode 100644 index 0000000000000000000000000000000000000000..9497364a7469dc9f82122f0d06da21726223a936 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_parallel_coordinate.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +from collections.abc import Callable + +import numpy as np + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._parallel_coordinate import _get_parallel_coordinate_info +from optuna.visualization._parallel_coordinate import _ParallelCoordinateInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import LineCollection + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +@experimental_func("2.2.0") +def plot_parallel_coordinate( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot the high-dimensional parameter relationships in a study with Matplotlib. + + Note that, if a parameter contains missing values, a trial with missing values is not plotted. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_parallel_coordinate` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label and the legend. + + Returns: + A :class:`matplotlib.axes.Axes` object. + + .. note:: + The colormap is reversed when the ``target`` argument isn't :obj:`None` or ``direction`` + of :class:`~optuna.study.Study` is ``minimize``. + """ + + _imports.check() + info = _get_parallel_coordinate_info(study, params, target, target_name) + return _get_parallel_coordinate_plot(info) + + +def _get_parallel_coordinate_plot(info: _ParallelCoordinateInfo) -> "Axes": + reversescale = info.reverse_scale + target_name = info.target_name + + # Set up the graph style. + fig, ax = plt.subplots() + cmap = plt.get_cmap("Blues_r" if reversescale else "Blues") + ax.set_title("Parallel Coordinate Plot") + ax.spines["top"].set_visible(False) + ax.spines["bottom"].set_visible(False) + + # Prepare data for plotting. + if len(info.dims_params) == 0 or len(info.dim_objective.values) == 0: + return ax + + obj_min = info.dim_objective.range[0] + obj_max = info.dim_objective.range[1] + obj_w = obj_max - obj_min + dims_obj_base = [[o] for o in info.dim_objective.values] + for dim in info.dims_params: + p_min = dim.range[0] + p_max = dim.range[1] + p_w = p_max - p_min + + if p_w == 0.0: + center = obj_w / 2 + obj_min + for i in range(len(dim.values)): + dims_obj_base[i].append(center) + else: + for i, v in enumerate(dim.values): + dims_obj_base[i].append((v - p_min) / p_w * obj_w + obj_min) + + # Draw multiple line plots and axes. + # Ref: https://stackoverflow.com/a/50029441 + n_params = len(info.dims_params) + ax.set_xlim(0, n_params) + ax.set_ylim(info.dim_objective.range[0], info.dim_objective.range[1]) + xs = [range(n_params + 1) for _ in range(len(dims_obj_base))] + segments = [np.column_stack([x, y]) for x, y in zip(xs, dims_obj_base)] + lc = LineCollection(segments, cmap=cmap) + lc.set_array(np.asarray(info.dim_objective.values)) + axcb = fig.colorbar(lc, pad=0.1, ax=ax) + axcb.set_label(target_name) + var_names = [info.dim_objective.label] + [dim.label for dim in info.dims_params] + plt.xticks(range(n_params + 1), var_names, rotation=330) + + for i, dim in enumerate(info.dims_params): + ax2 = ax.twinx() + if dim.is_log: + ax2.set_ylim(np.power(10, dim.range[0]), np.power(10, dim.range[1])) + ax2.set_yscale("log") + else: + ax2.set_ylim(dim.range[0], dim.range[1]) + ax2.spines["top"].set_visible(False) + ax2.spines["bottom"].set_visible(False) + ax2.xaxis.set_visible(False) + ax2.spines["right"].set_position(("axes", (i + 1) / n_params)) + if dim.is_cat: + ax2.set_yticks(dim.tickvals) + ax2.set_yticklabels(dim.ticktext) + + ax.add_collection(lc) + + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_param_importances.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_param_importances.py new file mode 100644 index 0000000000000000000000000000000000000000..e879a77bff0bcde1a6a5daa06d2f1c3fe639621f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_param_importances.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +from collections.abc import Callable + +import numpy as np + +from optuna._experimental import experimental_func +from optuna.importance._base import BaseImportanceEvaluator +from optuna.logging import get_logger +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._param_importances import _get_importances_infos +from optuna.visualization._param_importances import _ImportancesInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import Figure + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +_logger = get_logger(__name__) + + +AXES_PADDING_RATIO = 1.05 + + +@experimental_func("2.2.0") +def plot_param_importances( + study: Study, + evaluator: BaseImportanceEvaluator | None = None, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot hyperparameter importances with Matplotlib. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_param_importances` for an example. + + Args: + study: + An optimized study. + evaluator: + An importance evaluator object that specifies which algorithm to base the importance + assessment on. + Defaults to + :class:`~optuna.importance.FanovaImportanceEvaluator`. + params: + A list of names of parameters to assess. + If :obj:`None`, all parameters that are present in all of the completed trials are + assessed. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + For multi-objective optimization, all objectives will be plotted if ``target`` + is :obj:`None`. + + .. note:: + This argument can be used to specify which objective to plot if ``study`` is being + used for multi-objective optimization. For example, to get only the hyperparameter + importance of the first objective, use ``target=lambda t: t.values[0]`` for the + target parameter. + target_name: + Target's name to display on the axis label. Names set via + :meth:`~optuna.study.Study.set_metric_names` will be used if ``target`` is :obj:`None`, + overriding this argument. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + importances_infos = _get_importances_infos(study, evaluator, params, target, target_name) + return _get_importances_plot(importances_infos) + + +def _get_importances_plot(infos: tuple[_ImportancesInfo, ...]) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + fig, ax = plt.subplots() + ax.set_title("Hyperparameter Importances", loc="left") + ax.set_xlabel("Hyperparameter Importance") + ax.set_ylabel("Hyperparameter") + height = 0.8 / len(infos) # Default height split between objectives. + + for objective_id, info in enumerate(infos): + param_names = info.param_names + pos = np.arange(len(param_names)) + offset = height * objective_id + importance_values = info.importance_values + + if not importance_values: + continue + + # Draw horizontal bars. + ax.barh( + pos + offset, + importance_values, + height=height, + align="center", + label=info.target_name, + color=plt.get_cmap("tab20c")(objective_id), + ) + + _set_bar_labels(info, fig, ax, offset) + ax.set_yticks(pos + offset / 2, param_names) + + ax.legend(loc="best") + return ax + + +def _set_bar_labels(info: _ImportancesInfo, fig: "Figure", ax: "Axes", offset: float) -> None: + # Figure canvas does not necessarily have a get_renderer. + assert hasattr(fig.canvas, "get_renderer") + renderer = fig.canvas.get_renderer() + for idx, (val, label) in enumerate(zip(info.importance_values, info.importance_labels)): + text = ax.text(val, idx + offset, label, va="center") + + # Sometimes horizontal axis needs to be re-scaled + # to avoid text going over plot area. + bbox = text.get_window_extent(renderer) + bbox = bbox.transformed(ax.transData.inverted()) + _, plot_xmax = ax.get_xlim() + bbox_xmax = bbox.xmax + + if bbox_xmax > plot_xmax: + ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_pareto_front.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_pareto_front.py new file mode 100644 index 0000000000000000000000000000000000000000..20bcc4a41e5f6142ffebe536fbf52994d602ce2b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_pareto_front.py @@ -0,0 +1,190 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Sequence + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._pareto_front import _get_pareto_front_info +from optuna.visualization._pareto_front import _ParetoFrontInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import Axes3D + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +@experimental_func("2.8.0") +def plot_pareto_front( + study: Study, + *, + target_names: list[str] | None = None, + include_dominated_trials: bool = True, + axis_order: list[int] | None = None, + constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None, + targets: Callable[[FrozenTrial], Sequence[float]] | None = None, +) -> "Axes": + """Plot the Pareto front of a study. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_pareto_front` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their objective + values. ``study.n_objectives`` must be either 2 or 3 when ``targets`` is :obj:`None`. + target_names: + Objective name list used as the axis titles. If :obj:`None` is specified, + "Objective {objective_index}" is used instead. If ``targets`` is specified + for a study that does not contain any completed trial, + ``target_name`` must be specified. + include_dominated_trials: + A flag to include all dominated trial's objective values. + axis_order: + A list of indices indicating the axis order. If :obj:`None` is specified, + default order is used. ``axis_order`` and ``targets`` cannot be used at the same time. + + .. warning:: + Deprecated in v3.0.0. This feature will be removed in the future. The removal of + this feature is currently scheduled for v5.0.0, but this schedule is subject to + change. See https://github.com/optuna/optuna/releases/tag/v3.0.0. + constraints_func: + An optional function that computes the objective constraints. It must take a + :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must + be a sequence of :obj:`float` s. A value strictly larger than 0 means that a + constraint is violated. A value equal to or smaller than 0 is considered feasible. + This specification is the same as in, for example, + :class:`~optuna.samplers.NSGAIISampler`. + + If given, trials are classified into three categories: feasible and best, feasible but + non-best, and infeasible. Categories are shown in different colors. Here, whether a + trial is best (on Pareto front) or not is determined ignoring all infeasible trials. + + .. warning:: + Deprecated in v4.0.0. This feature will be removed in the future. The removal of + this feature is currently scheduled for v6.0.0, but this schedule is subject to + change. See https://github.com/optuna/optuna/releases/tag/v4.0.0. + targets: + A function that returns a tuple of target values to display. + The argument to this function is :class:`~optuna.trial.FrozenTrial`. + ``targets`` must be :obj:`None` or return 2 or 3 values. + ``axis_order`` and ``targets`` cannot be used at the same time. + If the number of objectives is neither 2 nor 3, ``targets`` must be specified. + + .. note:: + Added in v3.0.0 as an experimental feature. The interface may change in newer + versions without prior notice. + See https://github.com/optuna/optuna/releases/tag/v3.0.0. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + + info = _get_pareto_front_info( + study, target_names, include_dominated_trials, axis_order, constraints_func, targets + ) + return _get_pareto_front_plot(info) + + +def _get_pareto_front_plot(info: _ParetoFrontInfo) -> "Axes": + if info.n_targets == 2: + return _get_pareto_front_2d(info) + elif info.n_targets == 3: + return _get_pareto_front_3d(info) + else: + assert False, "Must not reach here" + + +def _get_pareto_front_2d(info: _ParetoFrontInfo) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots() + ax.set_title("Pareto-front Plot") + cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. + + ax.set_xlabel(info.target_names[info.axis_order[0]]) + ax.set_ylabel(info.target_names[info.axis_order[1]]) + + trial_label: str = "Trial" + if len(info.infeasible_trials_with_values) > 0: + ax.scatter( + x=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values], + y=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values], + color="#cccccc", + label="Infeasible Trial", + ) + trial_label = "Feasible Trial" + if len(info.non_best_trials_with_values) > 0: + ax.scatter( + x=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values], + y=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values], + color=cmap(0), + label=trial_label, + ) + if len(info.best_trials_with_values) > 0: + ax.scatter( + x=[values[info.axis_order[0]] for _, values in info.best_trials_with_values], + y=[values[info.axis_order[1]] for _, values in info.best_trials_with_values], + color=cmap(3), + label="Best Trial", + ) + + if info.non_best_trials_with_values is not None and ax.has_data(): + ax.legend() + + return ax + + +def _get_pareto_front_3d(info: _ParetoFrontInfo) -> "Axes": + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + fig = plt.figure() + ax: Axes3D = fig.add_subplot(projection="3d") + ax.set_title("Pareto-front Plot") + cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. + + ax.set_xlabel(info.target_names[info.axis_order[0]]) + ax.set_ylabel(info.target_names[info.axis_order[1]]) + ax.set_zlabel(info.target_names[info.axis_order[2]]) + + trial_label: str = "Trial" + if ( + info.infeasible_trials_with_values is not None + and len(info.infeasible_trials_with_values) > 0 + ): + ax.scatter( + xs=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values], + ys=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values], + zs=[values[info.axis_order[2]] for _, values in info.infeasible_trials_with_values], + color="#cccccc", + label="Infeasible Trial", + ) + trial_label = "Feasible Trial" + + if info.non_best_trials_with_values is not None and len(info.non_best_trials_with_values) > 0: + ax.scatter( + xs=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values], + ys=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values], + zs=[values[info.axis_order[2]] for _, values in info.non_best_trials_with_values], + color=cmap(0), + label=trial_label, + ) + + if info.best_trials_with_values is not None and len(info.best_trials_with_values): + ax.scatter( + xs=[values[info.axis_order[0]] for _, values in info.best_trials_with_values], + ys=[values[info.axis_order[1]] for _, values in info.best_trials_with_values], + zs=[values[info.axis_order[2]] for _, values in info.best_trials_with_values], + color=cmap(3), + label="Best Trial", + ) + + if info.non_best_trials_with_values is not None and ax.has_data(): + ax.legend() + + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_rank.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_rank.py new file mode 100644 index 0000000000000000000000000000000000000000..b651f50f164b794b03128209c3301b9fecd9e3d3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_rank.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +from collections.abc import Callable + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._rank import _get_rank_info +from optuna.visualization._rank import _get_tick_info +from optuna.visualization._rank import _RankPlotInfo +from optuna.visualization._rank import _RankSubplotInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import PathCollection + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +@experimental_func("3.2.0") +def plot_rank( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot parameter relations as scatter plots with colors indicating ranks of target value. + + Note that trials missing the specified parameters will not be plotted. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_rank` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the color bar. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + info = _get_rank_info(study, params, target, target_name) + return _get_rank_plot(info) + + +def _get_rank_plot( + info: _RankPlotInfo, +) -> "Axes": + params = info.params + sub_plot_infos = info.sub_plot_infos + + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + + title = f"Rank ({info.target_name})" + + n_params = len(params) + if n_params == 0: + _, ax = plt.subplots() + ax.set_title(title) + return ax + if n_params == 1 or n_params == 2: + fig, axs = plt.subplots() + axs.set_title(title) + pc = _add_rank_subplot(axs, sub_plot_infos[0][0]) + else: + fig, axs = plt.subplots(n_params, n_params) + fig.suptitle(title) + + for x_i in range(n_params): + for y_i in range(n_params): + ax = axs[x_i, y_i] + # Set the x or y label only if the subplot is in the edge of the overall figure. + pc = _add_rank_subplot( + ax, + sub_plot_infos[x_i][y_i], + set_x_label=x_i == (n_params - 1), + set_y_label=y_i == 0, + ) + + tick_info = _get_tick_info(info.zs) + + pc.set_cmap(plt.get_cmap("RdYlBu_r")) + cbar = fig.colorbar(pc, ax=axs, ticks=tick_info.coloridxs) + cbar.ax.set_yticklabels(tick_info.text) + # NOTE(Alnusjaponica): The class of cbar.outline inherits matplotlib.patches.Patch, + # which has set_edgecolor method. However, mypy does not recognize it. + cbar.outline.set_edgecolor("gray") # type: ignore[operator] + return axs + + +def _add_rank_subplot( + ax: "Axes", info: _RankSubplotInfo, set_x_label: bool = True, set_y_label: bool = True +) -> "PathCollection": + if set_x_label: + ax.set_xlabel(info.xaxis.name) + if set_y_label: + ax.set_ylabel(info.yaxis.name) + + if not info.xaxis.is_cat: + ax.set_xlim(info.xaxis.range[0], info.xaxis.range[1]) + if not info.yaxis.is_cat: + ax.set_ylim(info.yaxis.range[0], info.yaxis.range[1]) + + if info.xaxis.is_log: + ax.set_xscale("log") + + if info.yaxis.is_log: + ax.set_yscale("log") + + return ax.scatter( + x=[str(x) for x in info.xs] if info.xaxis.is_cat else info.xs, + y=[str(y) for y in info.ys] if info.yaxis.is_cat else info.ys, + c=info.colors / 255, + edgecolors="grey", + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_slice.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_slice.py new file mode 100644 index 0000000000000000000000000000000000000000..0747b062f674d213d711fa9eb5d5d100a194b881 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_slice.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +import math +from typing import Any + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.trial import FrozenTrial +from optuna.visualization._slice import _get_slice_plot_info +from optuna.visualization._slice import _PlotValues +from optuna.visualization._slice import _SlicePlotInfo +from optuna.visualization._slice import _SliceSubplotInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import Colormap + from optuna.visualization.matplotlib._matplotlib_imports import matplotlib + from optuna.visualization.matplotlib._matplotlib_imports import PathCollection + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +@experimental_func("2.2.0") +def plot_slice( + study: Study, + params: list[str] | None = None, + *, + target: Callable[[FrozenTrial], float] | None = None, + target_name: str = "Objective Value", +) -> "Axes": + """Plot the parameter relationship as slice plot in a study with Matplotlib. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_slice` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their target values. + params: + Parameter list to visualize. The default is all parameters. + target: + A function to specify the value to display. If it is :obj:`None` and ``study`` is being + used for single-objective optimization, the objective values are plotted. + + .. note:: + Specify this argument if ``study`` is being used for multi-objective optimization. + target_name: + Target's name to display on the axis label. + + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + + _imports.check() + return _get_slice_plot(_get_slice_plot_info(study, params, target, target_name)) + + +def _get_slice_plot(info: _SlicePlotInfo) -> "Axes": + if len(info.subplots) == 0: + _, ax = plt.subplots() + return ax + + # Set up the graph style. + cmap = plt.get_cmap("Blues") + padding_ratio = 0.05 + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + + if len(info.subplots) == 1: + # Set up the graph style. + fig, axs = plt.subplots() + axs.set_title("Slice Plot") + + # Draw a scatter plot. + sc = _generate_slice_subplot(info.subplots[0], axs, cmap, padding_ratio, info.target_name) + else: + # Set up the graph style. + min_figwidth = matplotlib.rcParams["figure.figsize"][0] / 2 + fighight = matplotlib.rcParams["figure.figsize"][1] + # Ensure that each subplot has a minimum width without relying on auto-sizing. + fig, axs = plt.subplots( + 1, + len(info.subplots), + sharey=True, + figsize=(min_figwidth * len(info.subplots), fighight), + ) + fig.suptitle("Slice Plot") + + # Draw scatter plots. + for i, subplot in enumerate(info.subplots): + ax = axs[i] + sc = _generate_slice_subplot(subplot, ax, cmap, padding_ratio, info.target_name) + + axcb = fig.colorbar(sc, ax=axs) + axcb.set_label("Trial") + + return axs + + +def _generate_slice_subplot( + subplot_info: _SliceSubplotInfo, + ax: "Axes", + cmap: "Colormap", + padding_ratio: float, + target_name: str, +) -> "PathCollection": + ax.set(xlabel=subplot_info.param_name, ylabel=target_name) + scale = None + + feasible = _PlotValues([], [], []) + infeasible = _PlotValues([], [], []) + for x, y, num, c in zip( + subplot_info.x, subplot_info.y, subplot_info.trial_numbers, subplot_info.constraints + ): + if x is not None or x != "None" or y is not None or y != "None": + if c: + feasible.x.append(x) + feasible.y.append(y) + feasible.trial_numbers.append(num) + else: + infeasible.x.append(x) + infeasible.y.append(y) + infeasible.trial_numbers.append(num) + if subplot_info.is_log: + ax.set_xscale("log") + scale = "log" + if subplot_info.is_numerical: + feasible_x = feasible.x + feasible_y = feasible.y + feasible_c = feasible.trial_numbers + infeasible_x = infeasible.x + infeasible_y = infeasible.y + else: + feasible_x, feasible_y, feasible_c = _get_categorical_plot_values(subplot_info, feasible) + infeasible_x, infeasible_y, _ = _get_categorical_plot_values(subplot_info, infeasible) + scale = "categorical" + xlim = _calc_lim_with_padding(feasible_x + infeasible_x, padding_ratio, scale) + ax.set_xlim(xlim[0], xlim[1]) + sc = ax.scatter(feasible_x, feasible_y, c=feasible_c, cmap=cmap, edgecolors="grey") + ax.scatter(infeasible_x, infeasible_y, c="#cccccc", label="Infeasible Trial") + ax.label_outer() + + return sc + + +def _get_categorical_plot_values( + subplot_info: _SliceSubplotInfo, values: _PlotValues +) -> tuple[list[Any], list[float], list[int]]: + assert subplot_info.x_labels is not None + value_x = [] + value_y = [] + value_c = [] + points_dict = defaultdict(list) + for x, y, number in zip(values.x, values.y, values.trial_numbers): + points_dict[x].append((y, number)) + for x_label in subplot_info.x_labels: + for y, number in points_dict[x_label]: + value_x.append(str(x_label)) + value_y.append(y) + value_c.append(number) + return value_x, value_y, value_c + + +def _calc_lim_with_padding( + values: list[Any], padding_ratio: float, scale: str | None +) -> tuple[float, float]: + value_max = max(values) + value_min = min(values) + if scale == "log": + padding = (math.log10(value_max) - math.log10(value_min)) * padding_ratio + return ( + math.pow(10, math.log10(value_min) - padding), + math.pow(10, math.log10(value_max) + padding), + ) + elif scale == "categorical": + width = len(set(values)) - 1 + padding = width * padding_ratio + return -padding, width + padding + else: + padding = (value_max - value_min) * padding_ratio + return value_min - padding, value_max + padding diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_terminator_improvement.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_terminator_improvement.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2939d304d7ca0af21ae7939ba2e1484d1dc3a7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_terminator_improvement.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +from optuna._experimental import experimental_func +from optuna.logging import get_logger +from optuna.study.study import Study +from optuna.terminator import BaseErrorEvaluator +from optuna.terminator import BaseImprovementEvaluator +from optuna.terminator.improvement.evaluator import DEFAULT_MIN_N_TRIALS +from optuna.visualization._terminator_improvement import _get_improvement_info +from optuna.visualization._terminator_improvement import _get_y_range +from optuna.visualization._terminator_improvement import _ImprovementInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import plt + +_logger = get_logger(__name__) + + +PADDING_RATIO_Y = 0.05 +ALPHA = 0.25 + + +@experimental_func("3.2.0") +def plot_terminator_improvement( + study: Study, + plot_error: bool = False, + improvement_evaluator: BaseImprovementEvaluator | None = None, + error_evaluator: BaseErrorEvaluator | None = None, + min_n_trials: int = DEFAULT_MIN_N_TRIALS, +) -> "Axes": + """Plot the potentials for future objective improvement. + + This function visualizes the objective improvement potentials, evaluated + with ``improvement_evaluator``. + It helps to determine whether we should continue the optimization or not. + You can also plot the error evaluated with + ``error_evaluator`` if the ``plot_error`` argument is set to :obj:`True`. + Note that this function may take some time to compute + the improvement potentials. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_terminator_improvement`. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted for their improvement. + plot_error: + A flag to show the error. If it is set to :obj:`True`, errors + evaluated by ``error_evaluator`` are also plotted as line graph. + Defaults to :obj:`False`. + improvement_evaluator: + An object that evaluates the improvement of the objective function. + Default to :class:`~optuna.terminator.RegretBoundEvaluator`. + error_evaluator: + An object that evaluates the error inherent in the objective function. + Default to :class:`~optuna.terminator.CrossValidationErrorEvaluator`. + min_n_trials: + The minimum number of trials before termination is considered. + Terminator improvements for trials below this value are + shown in a lighter color. Defaults to ``20``. + + Returns: + A :class:`matplotlib.axes.Axes` object. + """ + _imports.check() + + info = _get_improvement_info(study, plot_error, improvement_evaluator, error_evaluator) + return _get_improvement_plot(info, min_n_trials) + + +def _get_improvement_plot(info: _ImprovementInfo, min_n_trials: int) -> "Axes": + n_trials = len(info.trial_numbers) + + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + _, ax = plt.subplots() + ax.set_title("Terminator Improvement Plot") + ax.set_xlabel("Trial") + ax.set_ylabel("Terminator Improvement") + cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. + + if n_trials == 0: + _logger.warning("There are no complete trials.") + return ax + + ax.plot( + info.trial_numbers[: min_n_trials + 1], + info.improvements[: min_n_trials + 1], + marker="o", + color=cmap(0), + alpha=ALPHA, + label="Terminator Improvement" if n_trials <= min_n_trials else None, + ) + + if n_trials > min_n_trials: + ax.plot( + info.trial_numbers[min_n_trials:], + info.improvements[min_n_trials:], + marker="o", + color=cmap(0), + label="Terminator Improvement", + ) + + if info.errors is not None: + ax.plot( + info.trial_numbers, + info.errors, + marker="o", + color=cmap(3), + label="Error", + ) + ax.legend() + ax.set_ylim(_get_y_range(info, min_n_trials)) + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_timeline.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_timeline.py new file mode 100644 index 0000000000000000000000000000000000000000..6969a34c129192c4b0088c3a01ea1f19f2047ac4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_timeline.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +from optuna._experimental import experimental_func +from optuna.study import Study +from optuna.trial import TrialState +from optuna.visualization._timeline import _get_timeline_info +from optuna.visualization._timeline import _TimelineBarInfo +from optuna.visualization._timeline import _TimelineInfo +from optuna.visualization.matplotlib._matplotlib_imports import _imports + + +if _imports.is_successful(): + from optuna.visualization.matplotlib._matplotlib_imports import Axes + from optuna.visualization.matplotlib._matplotlib_imports import DateFormatter + from optuna.visualization.matplotlib._matplotlib_imports import matplotlib + from optuna.visualization.matplotlib._matplotlib_imports import plt + + +_INFEASIBLE_KEY = "INFEASIBLE" + + +@experimental_func("3.2.0") +def plot_timeline(study: Study, n_recent_trials: int | None = None) -> "Axes": + """Plot the timeline of a study. + + .. seealso:: + Please refer to :func:`optuna.visualization.plot_timeline` for an example. + + Args: + study: + A :class:`~optuna.study.Study` object whose trials are plotted with + their lifetime. + n_recent_trials: + The number of recent trials to plot. If :obj:`None`, all trials are plotted. + If specified, only the most recent ``n_recent_trials`` will be displayed. + Must be a positive integer. + + Returns: + A :class:`plotly.graph_objects.Figure` object. + + Raises: + ValueError: if ``n_recent_trials`` is 0 or negative. + """ + + if n_recent_trials is not None and n_recent_trials <= 0: + raise ValueError("n_recent_trials must be a positive integer or None.") + + _imports.check() + info = _get_timeline_info(study, n_recent_trials) + return _get_timeline_plot(info) + + +def _get_state_name(bar_info: _TimelineBarInfo) -> str: + if bar_info.state == TrialState.COMPLETE and bar_info.infeasible: + return _INFEASIBLE_KEY + else: + return bar_info.state.name + + +def _get_timeline_plot(info: _TimelineInfo) -> "Axes": + _cm = { + TrialState.COMPLETE.name: "tab:blue", + TrialState.FAIL.name: "tab:red", + TrialState.PRUNED.name: "tab:orange", + _INFEASIBLE_KEY: "#CCCCCC", + TrialState.RUNNING.name: "tab:green", + TrialState.WAITING.name: "tab:gray", + } + # Set up the graph style. + plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. + fig, ax = plt.subplots() + ax.set_title("Timeline Plot") + ax.set_xlabel("Datetime") + ax.set_ylabel("Trial") + + if len(info.bars) == 0: + return ax + + # According to the `ax.barh` docstring, using list[timedelta] as width and list[datetime] as + # left is supported, but mypy does not recognize it. Please refer to the following link for + # more details: + # https://github.com/matplotlib/matplotlib/blob/v3.10.1/lib/matplotlib/axes/_axes.py#L2701-L2836 + ax.barh( + y=[b.number for b in info.bars], + width=[b.complete - b.start for b in info.bars], # type: ignore[arg-type] + left=[b.start for b in info.bars], # type: ignore[arg-type] + color=[_cm[_get_state_name(b)] for b in info.bars], + ) + + # There are 5 types of TrialState in total. + # However, the legend depicts only types present in the arguments. + legend_handles = [] + for state_name, color in _cm.items(): + if any(_get_state_name(b) == state_name for b in info.bars): + legend_handles.append(matplotlib.patches.Patch(color=color, label=state_name)) + ax.legend(handles=legend_handles, loc="upper left", bbox_to_anchor=(1.05, 1.0)) + fig.tight_layout() + + assert len(info.bars) > 0 + first_start_time = min([b.start for b in info.bars]) + last_complete_time = max([b.complete for b in info.bars]) + margin = (last_complete_time - first_start_time) * 0.05 + + # Officially, ax.set_xlim expects arguments right and left to be float, + # but ax.barh() accepts datetime, so we leave the type as datetime. + ax.set_xlim( + right=last_complete_time + margin, # type: ignore[arg-type] + left=first_start_time - margin, # type: ignore[arg-type] + ) + ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True)) + ax.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) # type: ignore[no-untyped-call] + plt.gcf().autofmt_xdate() + return ax diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e52775e842b479cc84059084e2c5cefe72994873 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/optuna/visualization/matplotlib/_utils.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from optuna._experimental import experimental_func +from optuna.distributions import CategoricalDistribution +from optuna.distributions import FloatDistribution +from optuna.distributions import IntDistribution +from optuna.trial import FrozenTrial +from optuna.visualization.matplotlib import _matplotlib_imports + + +__all__ = ["is_available"] + + +@experimental_func("2.2.0") +def is_available() -> bool: + """Returns whether visualization with Matplotlib is available or not. + + .. note:: + + :mod:`~optuna.visualization.matplotlib` module depends on Matplotlib version 3.0.0 or + higher. If a supported version of Matplotlib isn't installed in your environment, this + function will return :obj:`False`. In such a case, please execute ``$ pip install -U + matplotlib>=3.0.0`` to install Matplotlib. + + Returns: + :obj:`True` if visualization with Matplotlib is available, :obj:`False` otherwise. + """ + + return _matplotlib_imports._imports.is_successful() + + +def _is_log_scale(trials: list[FrozenTrial], param: str) -> bool: + for trial in trials: + if param in trial.params: + dist = trial.distributions[param] + + if isinstance(dist, (FloatDistribution, IntDistribution)): + if dist.log: + return True + + return False + + +def _is_categorical(trials: list[FrozenTrial], param: str) -> bool: + return any( + isinstance(t.distributions[param], CategoricalDistribution) + for t in trials + if param in t.params + ) + + +def _is_numerical(trials: list[FrozenTrial], param: str) -> bool: + return all( + (isinstance(t.params[param], int) or isinstance(t.params[param], float)) + and not isinstance(t.params[param], bool) + for t in trials + if param in t.params + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__config__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ad659481025be30917e8ff9f9f1c86553b7d81 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__config__.py @@ -0,0 +1,161 @@ +# This file is generated by SciPy's build process +# It contains system_info results at the time of building this package. +from enum import Enum + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return { k: _cleanup(v) for k, v in d.items() if v != '' and _cleanup(v) != '' } + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": r"cython", + "linker": r"cython", + "version": r"3.0.12", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + "fortran": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"gfortran", + "args": r"", + "linker args": r"", + }, + "pythran": { + "version": r"0.17.0", + "include directory": r"../../tmp/pip-build-env-jujiy7q_/overlay/lib/python3.10/site-packages/pythran" + }, + }, + "Machine Information": { + "host": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "build": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "cross-compiled": bool("False".lower().replace('false', '')), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.28", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.10.15/lib/python3.10/site-packages/scipy_openblas32/include", + "lib directory": r"/opt/_internal/cpython-3.10.15/lib/python3.10/site-packages/scipy_openblas32/lib", + "openblas configuration": r"OpenBLAS 0.3.28 DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.28", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.10.15/lib/python3.10/site-packages/scipy_openblas32/include", + "lib directory": r"/opt/_internal/cpython-3.10.15/lib/python3.10/site-packages/scipy_openblas32/lib", + "openblas configuration": r"OpenBLAS 0.3.28 DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project", + }, + "pybind11": { + "name": "pybind11", + "version": "2.13.6", + "detection method": "config-tool", + "include directory": r"unknown", + }, + }, + "Python Information": { + "path": r"/opt/python/cp310-cp310/bin/python", + "version": "3.10", + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which SciPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d17c59d80a8e4ad7c8972a26b75cd3fb43a64ab5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/__init__.py @@ -0,0 +1,141 @@ +""" +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + constants --- Physical and mathematical constants and units + datasets --- Dataset methods + differentiate --- Finite difference differentiation tools + fft --- Discrete Fourier transforms + fftpack --- Legacy discrete Fourier transforms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + ndimage --- N-D image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Public API in the main SciPy namespace +-------------------------------------- +:: + + __version__ --- SciPy version string + LowLevelCallable --- Low-level callback function + show_config --- Show scipy build configuration + test --- Run scipy unittests + +""" + +import importlib as _importlib + +from numpy import __version__ as __numpy_version__ + + +try: + from scipy.__config__ import show as show_config +except ImportError as e: + msg = """Error importing SciPy: you cannot import SciPy while + being in scipy source directory; please exit the SciPy source + tree first and relaunch your Python interpreter.""" + raise ImportError(msg) from e + + +from scipy.version import version as __version__ + + +# Allow distributors to run custom init code +from . import _distributor_init +del _distributor_init + + +from scipy._lib import _pep440 +# In maintenance branch, change to np_maxversion N+3 if numpy is at N +np_minversion = '1.23.5' +np_maxversion = '2.5.0' +if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or + _pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)): + import warnings + warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" + f" is required for this version of SciPy (detected " + f"version {__numpy_version__})", + UserWarning, stacklevel=2) +del _pep440 + + +# This is the first import of an extension module within SciPy. If there's +# a general issue with the install, such that extension modules are missing +# or cannot be imported, this is where we'll get a failure - so give an +# informative error message. +try: + from scipy._lib._ccallback import LowLevelCallable +except ImportError as e: + msg = "The `scipy` install you are using seems to be broken, " + \ + "(extension modules cannot be imported), " + \ + "please try reinstalling." + raise ImportError(msg) from e + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester + + +submodules = [ + 'cluster', + 'constants', + 'datasets', + 'differentiate', + 'fft', + 'fftpack', + 'integrate', + 'interpolate', + 'io', + 'linalg', + 'ndimage', + 'odr', + 'optimize', + 'signal', + 'sparse', + 'spatial', + 'special', + 'stats' +] + +__all__ = submodules + [ + 'LowLevelCallable', + 'test', + 'show_config', + '__version__', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name in submodules: + return _importlib.import_module(f'scipy.{name}') + else: + try: + return globals()[name] + except KeyError: + raise AttributeError( + f"Module 'scipy' has no attribute '{name}'" + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/_distributor_init.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5df134975aa27d31beaff74c3cbfd2d3fb0a55dd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/_distributor_init.py @@ -0,0 +1,18 @@ +""" Distributor init file + +Distributors: you can replace the contents of this file with your own custom +code to support particular distributions of SciPy. + +For example, this is a good place to put any checks for hardware requirements +or BLAS/LAPACK library initialization. + +The SciPy standard source distribution will not put code in this file beyond +the try-except import of `_distributor_init_local` (which is not part of a +standard source distribution), so you can safely replace this file with your +own version. +""" + +try: + from . import _distributor_init_local # noqa: F401 +except ImportError: + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b7784969a573d05cc6b98ee9066c42720156d1d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/__init__.py @@ -0,0 +1,31 @@ +""" +========================================= +Clustering package (:mod:`scipy.cluster`) +========================================= + +.. currentmodule:: scipy.cluster + +Clustering algorithms are useful in information theory, target detection, +communications, compression, and other areas. The `vq` module only +supports vector quantization and the k-means algorithms. + +The `hierarchy` module provides functions for hierarchical and +agglomerative clustering. Its features include generating hierarchical +clusters from distance matrices, +calculating statistics on clusters, cutting linkages +to generate flat clusters, and visualizing clusters with dendrograms. + +.. toctree:: + :maxdepth: 1 + + cluster.vq + cluster.hierarchy + +""" +__all__ = ['vq', 'hierarchy'] + +from . import vq, hierarchy + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..522e63d8e6f2904c73c704897f715721509c819c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py @@ -0,0 +1,4178 @@ +""" +Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) +======================================================== + +.. currentmodule:: scipy.cluster.hierarchy + +These functions cut hierarchical clusterings into flat clusterings +or find the roots of the forest formed by a cut by providing the flat +cluster ids of each observation. + +.. autosummary:: + :toctree: generated/ + + fcluster + fclusterdata + leaders + +These are routines for agglomerative clustering. + +.. autosummary:: + :toctree: generated/ + + linkage + single + complete + average + weighted + centroid + median + ward + +These routines compute statistics on hierarchies. + +.. autosummary:: + :toctree: generated/ + + cophenet + from_mlab_linkage + inconsistent + maxinconsts + maxdists + maxRstat + to_mlab_linkage + +Routines for visualizing flat clusters. + +.. autosummary:: + :toctree: generated/ + + dendrogram + +These are data structures and routines for representing hierarchies as +tree objects. + +.. autosummary:: + :toctree: generated/ + + ClusterNode + leaves_list + to_tree + cut_tree + optimal_leaf_ordering + +These are predicates for checking the validity of linkage and +inconsistency matrices as well as for checking isomorphism of two +flat cluster assignments. + +.. autosummary:: + :toctree: generated/ + + is_valid_im + is_valid_linkage + is_isomorphic + is_monotonic + correspond + num_obs_linkage + +Utility routines for plotting: + +.. autosummary:: + :toctree: generated/ + + set_link_color_palette + +Utility classes: + +.. autosummary:: + :toctree: generated/ + + DisjointSet -- data structure for incremental connectivity queries + +""" +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) +# +# Author: Damian Eads +# Date: September 22, 2007 +# +# Copyright (c) 2007, 2008, Damian Eads +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# - Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# - Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# - Neither the name of the author nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import bisect +from collections import deque + +import numpy as np +from . import _hierarchy, _optimal_leaf_ordering +import scipy.spatial.distance as distance +from scipy._lib._array_api import array_namespace, _asarray, xp_copy, is_jax +from scipy._lib._disjoint_set import DisjointSet + + +_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, + 'median': 4, 'ward': 5, 'weighted': 6} +_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') + +__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete', + 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster', + 'fclusterdata', 'from_mlab_linkage', 'inconsistent', + 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage', + 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists', + 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering', + 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', + 'ward', 'weighted'] + + +class ClusterWarning(UserWarning): + pass + + +def _warning(s): + warnings.warn(f'scipy.cluster: {s}', ClusterWarning, stacklevel=3) + + +def int_floor(arr, xp): + # array_api_strict is strict about not allowing `int()` on a float array. + # That's typically not needed, here it is - so explicitly convert + return int(xp.astype(xp.asarray(arr), xp.int64)) + + +def single(y): + """ + Perform single/min/nearest linkage on the condensed distance matrix ``y``. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + The linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = single(y) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) + >>> fcluster(Z, 1, criterion='distance') + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='single', metric='euclidean') + + +def complete(y): + """ + Perform complete/max/farthest point linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import complete, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = complete(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.41421356, 3. ], + [ 5. , 13. , 1.41421356, 3. ], + [ 8. , 14. , 1.41421356, 3. ], + [11. , 15. , 1.41421356, 3. ], + [16. , 17. , 4.12310563, 6. ], + [18. , 19. , 4.12310563, 6. ], + [20. , 21. , 5.65685425, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4.5, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='complete', metric='euclidean') + + +def average(y): + """ + Perform average/UPGMA linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import average, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = average(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 5. , 13. , 1.20710678, 3. ], + [ 8. , 14. , 1.20710678, 3. ], + [11. , 15. , 1.20710678, 3. ], + [16. , 17. , 3.39675184, 6. ], + [18. , 19. , 3.39675184, 6. ], + [20. , 21. , 4.09206523, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='average', metric='euclidean') + + +def weighted(y): + """ + Perform weighted/WPGMA linkage on the condensed distance matrix. + + See `linkage` for more information on the return + structure and algorithm. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import weighted, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = weighted(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 11. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 8. , 13. , 1.20710678, 3. ], + [ 5. , 14. , 1.20710678, 3. ], + [10. , 15. , 1.20710678, 3. ], + [18. , 19. , 3.05595762, 6. ], + [16. , 17. , 3.32379407, 6. ], + [20. , 21. , 4.06357713, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='weighted', metric='euclidean') + + +def centroid(y): + """ + Perform centroid/UPGMC linkage. + + See `linkage` for more information on the input matrix, + return structure, and algorithm. + + The following are common calling conventions: + + 1. ``Z = centroid(y)`` + + Performs centroid/UPGMC linkage on the condensed distance + matrix ``y``. + + 2. ``Z = centroid(X)`` + + Performs centroid/UPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import centroid, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = centroid(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3.33333333, 6. ], + [16. , 17. , 3.33333333, 6. ], + [20. , 21. , 3.33333333, 12. ]]) # may vary + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) # may vary + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) # may vary + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) # may vary + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='centroid', metric='euclidean') + + +def median(y): + """ + Perform median/WPGMC linkage. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = median(y)`` + + Performs median/WPGMC linkage on the condensed distance matrix + ``y``. See ``linkage`` for more information on the return + structure and algorithm. + + 2. ``Z = median(X)`` + + Performs median/WPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. See `linkage` + for more information on the return structure and algorithm. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = median(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='median', metric='euclidean') + + +def ward(y): + """ + Perform Ward's linkage on a condensed distance matrix. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = ward(y)`` + Performs Ward's linkage on the condensed distance matrix ``y``. + + 2. ``Z = ward(X)`` + Performs Ward's linkage on the observation matrix ``X`` using + Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = ward(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + >>> fcluster(Z, 3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='ward', metric='euclidean') + + +def linkage(y, method='single', metric='euclidean', optimal_ordering=False): + """ + Perform hierarchical/agglomerative clustering. + + The input y may be either a 1-D condensed distance matrix + or a 2-D array of observation vectors. + + If y is a 1-D condensed distance matrix, + then y must be a :math:`\\binom{n}{2}` sized + vector, where n is the number of original observations paired + in the distance matrix. The behavior of this function is very + similar to the MATLAB linkage function. + + A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the + :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and + ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A + cluster with an index less than :math:`n` corresponds to one of + the :math:`n` original observations. The distance between + clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The + fourth value ``Z[i, 3]`` represents the number of original + observations in the newly formed cluster. + + The following linkage methods are used to compute the distance + :math:`d(s, t)` between two clusters :math:`s` and + :math:`t`. The algorithm begins with a forest of clusters that + have yet to be used in the hierarchy being formed. When two + clusters :math:`s` and :math:`t` from this forest are combined + into a single cluster :math:`u`, :math:`s` and :math:`t` are + removed from the forest, and :math:`u` is added to the + forest. When only one cluster remains in the forest, the algorithm + stops, and this cluster becomes the root. + + A distance matrix is maintained at each iteration. The ``d[i,j]`` + entry corresponds to the distance between cluster :math:`i` and + :math:`j` in the original forest. + + At each iteration, the algorithm must update the distance matrix + to reflect the distance of the newly formed cluster u with the + remaining clusters in the forest. + + Suppose there are :math:`|u|` original observations + :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and + :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in + cluster :math:`v`. Recall, :math:`s` and :math:`t` are + combined to form cluster :math:`u`. Let :math:`v` be any + remaining cluster in the forest that is not :math:`u`. + + The following are methods for calculating the distance between the + newly formed cluster :math:`u` and each :math:`v`. + + * method='single' assigns + + .. math:: + d(u,v) = \\min(dist(u[i],v[j])) + + for all points :math:`i` in cluster :math:`u` and + :math:`j` in cluster :math:`v`. This is also known as the + Nearest Point Algorithm. + + * method='complete' assigns + + .. math:: + d(u, v) = \\max(dist(u[i],v[j])) + + for all points :math:`i` in cluster u and :math:`j` in + cluster :math:`v`. This is also known by the Farthest Point + Algorithm or Voor Hees Algorithm. + + * method='average' assigns + + .. math:: + d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} + {(|u|*|v|)} + + for all points :math:`i` and :math:`j` where :math:`|u|` + and :math:`|v|` are the cardinalities of clusters :math:`u` + and :math:`v`, respectively. This is also called the UPGMA + algorithm. + + * method='weighted' assigns + + .. math:: + d(u,v) = (dist(s,v) + dist(t,v))/2 + + where cluster u was formed with cluster s and t and v + is a remaining cluster in the forest (also called WPGMA). + + * method='centroid' assigns + + .. math:: + dist(s,t) = ||c_s-c_t||_2 + + where :math:`c_s` and :math:`c_t` are the centroids of + clusters :math:`s` and :math:`t`, respectively. When two + clusters :math:`s` and :math:`t` are combined into a new + cluster :math:`u`, the new centroid is computed over all the + original objects in clusters :math:`s` and :math:`t`. The + distance then becomes the Euclidean distance between the + centroid of :math:`u` and the centroid of a remaining cluster + :math:`v` in the forest. This is also known as the UPGMC + algorithm. + + * method='median' assigns :math:`d(s,t)` like the ``centroid`` + method. When two clusters :math:`s` and :math:`t` are combined + into a new cluster :math:`u`, the average of centroids s and t + give the new centroid :math:`u`. This is also known as the + WPGMC algorithm. + + * method='ward' uses the Ward variance minimization algorithm. + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Warning: When the minimum distance pair in the forest is chosen, there + may be two or more pairs with the same minimum distance. This + implementation may choose a different minimum than the MATLAB + version. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed distance matrix + is a flat array containing the upper triangular of the distance matrix. + This is the form that ``pdist`` returns. Alternatively, a collection of + :math:`m` observation vectors in :math:`n` dimensions may be passed as + an :math:`m` by :math:`n` array. All elements of the condensed distance + matrix must be finite, i.e., no NaNs or infs. + method : str, optional + The linkage algorithm to use. See the ``Linkage Methods`` section below + for full descriptions. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + optimal_ordering : bool, optional + If True, the linkage matrix will be reordered so that the distance + between successive leaves is minimal. This results in a more intuitive + tree structure when the data are visualized. defaults to False, because + this algorithm can be slow, particularly on large datasets [2]_. See + also the `optimal_leaf_ordering` function. + + .. versionadded:: 1.0.0 + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + Notes + ----- + 1. For method 'single', an optimized algorithm based on minimum spanning + tree is implemented. It has time complexity :math:`O(n^2)`. + For methods 'complete', 'average', 'weighted' and 'ward', an algorithm + called nearest-neighbors chain is implemented. It also has time + complexity :math:`O(n^2)`. + For other methods, a naive algorithm is implemented with :math:`O(n^3)` + time complexity. + All algorithms use :math:`O(n^2)` memory. + Refer to [1]_ for details about the algorithms. + 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if + Euclidean pairwise metric is used. If `y` is passed as precomputed + pairwise distances, then it is the user's responsibility to assure that + these distances are in fact Euclidean, otherwise the produced result + will be incorrect. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + References + ---------- + .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering + algorithms", :arXiv:`1109.2378v1`. + .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal + leaf ordering for hierarchical clustering", 2001. Bioinformatics + :doi:`10.1093/bioinformatics/17.suppl_1.S22` + + Examples + -------- + >>> from scipy.cluster.hierarchy import dendrogram, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + + >>> Z = linkage(X, 'ward') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + + >>> Z = linkage(X, 'single') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + """ + xp = array_namespace(y) + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if method not in _LINKAGE_METHODS: + raise ValueError(f"Invalid method: {method}") + + if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2: + msg = f"`method={method}` requires the distance metric to be Euclidean" + raise ValueError(msg) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + xp.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + n = int(distance.num_obs_y(y)) + method_code = _LINKAGE_METHODS[method] + + y = np.asarray(y) + if method == 'single': + result = _hierarchy.mst_single_linkage(y, n) + elif method in ['complete', 'average', 'weighted', 'ward']: + result = _hierarchy.nn_chain(y, n, method_code) + else: + result = _hierarchy.fast_linkage(y, n, method_code) + result = xp.asarray(result) + + if optimal_ordering: + y = xp.asarray(y) + return optimal_leaf_ordering(result, y) + else: + return result + + +class ClusterNode: + """ + A tree node class for representing a cluster. + + Leaf nodes correspond to original observations, while non-leaf nodes + correspond to non-singleton clusters. + + The `to_tree` function converts a matrix returned by the linkage + function into an easy-to-use tree representation. + + All parameter names are also attributes. + + Parameters + ---------- + id : int + The node id. + left : ClusterNode instance, optional + The left child tree node. + right : ClusterNode instance, optional + The right child tree node. + dist : float, optional + Distance for this cluster in the linkage matrix. + count : int, optional + The number of samples in this cluster. + + See Also + -------- + to_tree : for converting a linkage matrix ``Z`` into a tree object. + + """ + + def __init__(self, id, left=None, right=None, dist=0.0, count=1): + if id < 0: + raise ValueError('The id must be non-negative.') + if dist < 0: + raise ValueError('The distance must be non-negative.') + if (left is None and right is not None) or \ + (left is not None and right is None): + raise ValueError('Only full or proper binary trees are permitted.' + ' This node has one child.') + if count < 1: + raise ValueError('A cluster must contain at least one original ' + 'observation.') + self.id = id + self.left = left + self.right = right + self.dist = dist + if self.left is None: + self.count = count + else: + self.count = left.count + right.count + + def __lt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist < node.dist + + def __gt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist > node.dist + + def __eq__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist == node.dist + + def get_id(self): + """ + The identifier of the target node. + + For ``0 <= i < n``, `i` corresponds to original observation i. + For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed + at iteration ``i-n``. + + Returns + ------- + id : int + The identifier of the target node. + + """ + return self.id + + def get_count(self): + """ + The number of leaf nodes (original observations) belonging to + the cluster node nd. If the target node is a leaf, 1 is + returned. + + Returns + ------- + get_count : int + The number of leaf nodes below the target node. + + """ + return self.count + + def get_left(self): + """ + Return a reference to the left child tree object. + + Returns + ------- + left : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.left + + def get_right(self): + """ + Return a reference to the right child tree object. + + Returns + ------- + right : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.right + + def is_leaf(self): + """ + Return True if the target node is a leaf. + + Returns + ------- + leafness : bool + True if the target node is a leaf node. + + """ + return self.left is None + + def pre_order(self, func=(lambda x: x.id)): + """ + Perform pre-order traversal without recursive function calls. + + When a leaf node is first encountered, ``func`` is called with + the leaf node as its argument, and its result is appended to + the list. + + For example, the statement:: + + ids = root.pre_order(lambda x: x.id) + + returns a list of the node ids corresponding to the leaf nodes + of the tree as they appear from left to right. + + Parameters + ---------- + func : function + Applied to each leaf ClusterNode object in the pre-order traversal. + Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, + the result of ``func(n[i])`` is stored in ``L[i]``. If not + provided, the index of the original observation to which the node + corresponds is used. + + Returns + ------- + L : list + The pre-order traversal. + + """ + # Do a preorder traversal, caching the result. To avoid having to do + # recursion, we'll store the previous index we've visited in a vector. + n = self.count + + curNode = [None] * (2 * n) + lvisited = set() + rvisited = set() + curNode[0] = self + k = 0 + preorder = [] + while k >= 0: + nd = curNode[k] + ndid = nd.id + if nd.is_leaf(): + preorder.append(func(nd)) + k = k - 1 + else: + if ndid not in lvisited: + curNode[k + 1] = nd.left + lvisited.add(ndid) + k = k + 1 + elif ndid not in rvisited: + curNode[k + 1] = nd.right + rvisited.add(ndid) + k = k + 1 + # If we've visited the left and right of this non-leaf + # node already, go up in the tree. + else: + k = k - 1 + + return preorder + + +_cnode_bare = ClusterNode(0) +_cnode_type = type(ClusterNode) + + +def _order_cluster_tree(Z): + """ + Return clustering nodes in bottom-up order by distance. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + + Returns + ------- + nodes : list + A list of ClusterNode objects. + """ + q = deque() + tree = to_tree(Z) + q.append(tree) + nodes = [] + + while q: + node = q.popleft() + if not node.is_leaf(): + bisect.insort_left(nodes, node) + q.append(node.get_right()) + q.append(node.get_left()) + return nodes + + +def cut_tree(Z, n_clusters=None, height=None): + """ + Given a linkage matrix Z, return the cut tree. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + n_clusters : array_like, optional + Number of clusters in the tree at the cut point. + height : array_like, optional + The height at which to cut the tree. Only possible for ultrametric + trees. + + Returns + ------- + cutree : array + An array indicating group membership at each agglomeration step. I.e., + for a full cut tree, in the first column each data point is in its own + cluster. At the next step, two nodes are merged. Finally, all + singleton and non-singleton clusters are in one group. If `n_clusters` + or `height` are given, the columns correspond to the columns of + `n_clusters` or `height`. + + Examples + -------- + >>> from scipy import cluster + >>> import numpy as np + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> X = rng.random((50, 4)) + >>> Z = cluster.hierarchy.ward(X) + >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) + >>> cutree[:10] + array([[0, 0], + [1, 1], + [2, 2], + [3, 3], + [3, 4], + [2, 2], + [0, 0], + [1, 5], + [3, 6], + [4, 7]]) # random + + """ + xp = array_namespace(Z) + nobs = num_obs_linkage(Z) + nodes = _order_cluster_tree(Z) + + if height is not None and n_clusters is not None: + raise ValueError("At least one of either height or n_clusters " + "must be None") + elif height is None and n_clusters is None: # return the full cut tree + cols_idx = xp.arange(nobs) + elif height is not None: + height = xp.asarray(height) + heights = xp.asarray([x.dist for x in nodes]) + cols_idx = xp.searchsorted(heights, height) + else: + n_clusters = xp.asarray(n_clusters) + cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters) + + try: + n_cols = len(cols_idx) + except TypeError: # scalar + n_cols = 1 + cols_idx = xp.asarray([cols_idx]) + + groups = xp.zeros((n_cols, nobs), dtype=xp.int64) + last_group = xp.arange(nobs) + if 0 in cols_idx: + groups[0] = last_group + + for i, node in enumerate(nodes): + idx = node.pre_order() + this_group = xp_copy(last_group, xp=xp) + # TODO ARRAY_API complex indexing not supported + this_group[idx] = xp.min(last_group[idx]) + this_group[this_group > xp.max(last_group[idx])] -= 1 + if i + 1 in cols_idx: + groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group + last_group = this_group + + return groups.T + + +def to_tree(Z, rd=False): + """ + Convert a linkage matrix into an easy-to-use tree object. + + The reference to the root `ClusterNode` object is returned (by default). + + Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, + and ``count`` attribute. The left and right attributes point to + ClusterNode objects that were combined to generate the cluster. + If both are None then the `ClusterNode` object is a leaf node, its count + must be 1, and its distance is meaningless but set to 0. + + *Note: This function is provided for the convenience of the library + user. ClusterNodes are not used as input to any of the functions in this + library.* + + Parameters + ---------- + Z : ndarray + The linkage matrix in proper form (see the `linkage` + function documentation). + rd : bool, optional + When False (default), a reference to the root `ClusterNode` object is + returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a + reference to the root node while ``d`` is a list of `ClusterNode` + objects - one per original entry in the linkage matrix plus entries + for all clustering steps. If a cluster id is + less than the number of samples ``n`` in the data that the linkage + matrix describes, then it corresponds to a singleton cluster (leaf + node). + See `linkage` for more information on the assignment of cluster ids + to clusters. + + Returns + ------- + tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) + If ``rd`` is False, a `ClusterNode`. + If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number + of samples. See the description of `rd` above for more details. + + See Also + -------- + linkage, is_valid_linkage, ClusterNode + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> x = rng.random((5, 2)) + >>> Z = hierarchy.linkage(x) + >>> hierarchy.to_tree(Z) + >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) + >>> rootnode + >> len(nodelist) + 9 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # Number of original objects is equal to the number of rows plus 1. + n = Z.shape[0] + 1 + + # Create a list full of None's to store the node objects + d = [None] * (n * 2 - 1) + + # Create the nodes corresponding to the n original objects. + for i in range(0, n): + d[i] = ClusterNode(i) + + nd = None + + for i in range(Z.shape[0]): + row = Z[i, :] + + fi = int_floor(row[0], xp) + fj = int_floor(row[1], xp) + if fi > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 0') % fi) + if fj > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 1') % fj) + + nd = ClusterNode(i + n, d[fi], d[fj], row[2]) + # ^ id ^ left ^ right ^ dist + if row[3] != nd.count: + raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' + 'incorrect.') % i) + d[n + i] = nd + + if rd: + return (nd, d) + else: + return nd + + +def optimal_leaf_ordering(Z, y, metric='euclidean'): + """ + Given a linkage matrix Z and distance, reorder the cut tree. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + y : ndarray + The condensed distance matrix from which Z was generated. + Alternatively, a collection of m observation vectors in n + dimensions may be passed as an m by n array. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + + Returns + ------- + Z_ordered : ndarray + A copy of the linkage matrix Z, reordered to minimize the distance + between adjacent leaves. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> X = rng.standard_normal((10, 10)) + >>> Z = hierarchy.ward(X) + >>> hierarchy.leaves_list(Z) + array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32) + >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) + array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32) + + """ + xp = array_namespace(Z, y) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + np.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + Z = np.asarray(Z) + y = np.asarray(y) + return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y)) + + +def cophenet(Z, Y=None): + """ + Calculate the cophenetic distances between each observation in + the hierarchical clustering defined by the linkage ``Z``. + + Suppose ``p`` and ``q`` are original observations in + disjoint clusters ``s`` and ``t``, respectively and + ``s`` and ``t`` are joined by a direct parent cluster + ``u``. The cophenetic distance between observations + ``i`` and ``j`` is simply the distance between + clusters ``s`` and ``t``. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as an array + (see `linkage` function). + Y : ndarray (optional) + Calculates the cophenetic correlation coefficient ``c`` of a + hierarchical clustering defined by the linkage matrix `Z` + of a set of :math:`n` observations in :math:`m` + dimensions. `Y` is the condensed distance matrix from which + `Z` was generated. + + Returns + ------- + c : ndarray + The cophentic correlation distance (if ``Y`` is passed). + d : ndarray + The cophenetic distance matrix in condensed form. The + :math:`ij` th entry is the cophenetic distance between + original observations :math:`i` and :math:`j`. + + See Also + -------- + linkage : + for a description of what a linkage matrix is. + scipy.spatial.distance.squareform : + transforming condensed matrices into square ones. + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, cophenet + >>> from scipy.spatial.distance import pdist, squareform + + Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance + between two points of ``X`` is the distance between the largest two + distinct clusters that each of the points: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + ``X`` corresponds to this dataset :: + + x x x x + x x + + x x + x x x x + + >>> Z = single(pdist(X)) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + >>> cophenet(Z) + array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., + 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) + + The output of the `scipy.cluster.hierarchy.cophenet` method is + represented in condensed form. We can use + `scipy.spatial.distance.squareform` to see the output as a + regular matrix (where each element ``ij`` denotes the cophenetic distance + between each ``i``, ``j`` pair of points in ``X``): + + >>> squareform(cophenet(Z)) + array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) + + In this example, the cophenetic distance between points on ``X`` that are + very close (i.e., in the same corner) is 1. For other pairs of points is 2, + because the points will be located in clusters at different + corners - thus, the distance between these clusters will be larger. + + """ + xp = array_namespace(Z, Y) + # Ensure float64 C-contiguous array. Cython code doesn't deal with striding. + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + zz = np.zeros((n * (n-1)) // 2, dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.cophenetic_distances(Z, zz, int(n)) + zz = xp.asarray(zz) + if Y is None: + return zz + + Y = _asarray(Y, order='C', xp=xp) + distance.is_valid_y(Y, throw=True, name='Y') + + z = xp.mean(zz) + y = xp.mean(Y) + Yy = Y - y + Zz = zz - z + numerator = (Yy * Zz) + denomA = Yy**2 + denomB = Zz**2 + c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB)) + return (c, zz) + + +def inconsistent(Z, d=2): + r""" + Calculate inconsistency statistics on a linkage matrix. + + Parameters + ---------- + Z : ndarray + The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical + clustering). See `linkage` documentation for more information on its + form. + d : int, optional + The number of links up to `d` levels below each non-singleton cluster. + + Returns + ------- + R : ndarray + A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link + statistics for the non-singleton cluster ``i``. The link statistics are + computed over the link heights for links :math:`d` levels below the + cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard + deviation of the link heights, respectively; ``R[i,2]`` is the number + of links included in the calculation; and ``R[i,3]`` is the + inconsistency coefficient, + + .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} + + Notes + ----- + This function behaves similarly to the MATLAB(TM) ``inconsistent`` + function. + + Examples + -------- + >>> from scipy.cluster.hierarchy import inconsistent, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + >>> Z = linkage(X, 'ward') + >>> print(Z) + [[ 5. 6. 0. 2. ] + [ 2. 7. 0. 2. ] + [ 0. 4. 1. 2. ] + [ 1. 8. 1.15470054 3. ] + [ 9. 10. 2.12132034 4. ] + [ 3. 12. 4.11096096 5. ] + [11. 13. 14.07183949 8. ]] + >>> inconsistent(Z) + array([[ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0. , 1. , 0. ], + [ 0.57735027, 0.81649658, 2. , 0.70710678], + [ 1.04044011, 1.06123822, 3. , 1.01850858], + [ 3.11614065, 1.40688837, 2. , 0.70710678], + [ 6.44583366, 6.76770586, 3. , 1.12682288]]) + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if (not d == np.floor(d)) or d < 0: + raise ValueError('The second argument d must be a nonnegative ' + 'integer value.') + + n = Z.shape[0] + 1 + R = np.zeros((n - 1, 4), dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.inconsistent(Z, R, int(n), int(d)) + R = xp.asarray(R) + return R + + +def from_mlab_linkage(Z): + """ + Convert a linkage matrix generated by MATLAB(TM) to a new + linkage matrix compatible with this module. + + The conversion does two things: + + * the indices are converted from ``1..N`` to ``0..(N-1)`` form, + and + + * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the + number of original observations (leaves) in the non-singleton + cluster ``i``. + + This function is useful when loading in linkages from legacy data + files generated by MATLAB. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by MATLAB(TM). + + Returns + ------- + ZS : ndarray + A linkage matrix compatible with ``scipy.cluster.hierarchy``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + to_mlab_linkage : transform from SciPy to MATLAB format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage + + Given a linkage matrix in MATLAB format ``mZ``, we can use + `scipy.cluster.hierarchy.from_mlab_linkage` to import + it into SciPy format: + + >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], + ... [10, 11, 1], [3, 13, 1.29099445], + ... [6, 14, 1.29099445], + ... [9, 15, 1.29099445], + ... [12, 16, 1.29099445], + ... [17, 18, 5.77350269], + ... [19, 20, 5.77350269], + ... [21, 22, 8.16496581]]) + + >>> Z = from_mlab_linkage(mZ) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [ 11. , 15. , 1.29099445, 3. ], + [ 16. , 17. , 5.77350269, 6. ], + [ 18. , 19. , 5.77350269, 6. ], + [ 20. , 21. , 8.16496581, 12. ]]) + + As expected, the linkage matrix ``Z`` returned includes an + additional column counting the number of original samples in + each cluster. Also, all cluster indices are reduced by 1 + (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing). + + """ + xp = array_namespace(Z) + Z = _asarray(Z, dtype=xp.float64, order='C', xp=xp) + Zs = Z.shape + + # If it's empty, return it. + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return xp_copy(Z, xp=xp) + + if len(Zs) != 2: + raise ValueError("The linkage array must be rectangular.") + + # If it contains no rows, return it. + if Zs[0] == 0: + return xp_copy(Z, xp=xp) + + if xp.min(Z[:, 0:2]) != 1.0 and xp.max(Z[:, 0:2]) != 2 * Zs[0]: + raise ValueError('The format of the indices is not 1..N') + + Zpart = xp.concat((Z[:, 0:2] - 1.0, Z[:, 2:]), axis=1) + CS = np.zeros((Zs[0],), dtype=np.float64) + if is_jax(xp): + # calculate_cluster_sizes doesn't accept read-only arrays + Zpart = np.array(Zpart, copy=True) + else: + Zpart = np.asarray(Zpart) + _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) + res = np.hstack([Zpart, CS.reshape(Zs[0], 1)]) + return xp.asarray(res) + + +def to_mlab_linkage(Z): + """ + Convert a linkage matrix to a MATLAB(TM) compatible one. + + Converts a linkage matrix ``Z`` generated by the linkage function + of this module to a MATLAB(TM) compatible one. The return linkage + matrix has the last column removed and the cluster indices are + converted to ``1..N`` indexing. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by ``scipy.cluster.hierarchy``. + + Returns + ------- + to_mlab_linkage : ndarray + A linkage matrix compatible with MATLAB(TM)'s hierarchical + clustering functions. + + The return linkage matrix has the last column removed + and the cluster indices are converted to ``1..N`` indexing. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + from_mlab_linkage : transform from Matlab to SciPy format. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + After a linkage matrix ``Z`` has been created, we can use + `scipy.cluster.hierarchy.to_mlab_linkage` to convert it + into MATLAB format: + + >>> mZ = to_mlab_linkage(Z) + >>> mZ + array([[ 1. , 2. , 1. ], + [ 4. , 5. , 1. ], + [ 7. , 8. , 1. ], + [ 10. , 11. , 1. ], + [ 3. , 13. , 1.29099445], + [ 6. , 14. , 1.29099445], + [ 9. , 15. , 1.29099445], + [ 12. , 16. , 1.29099445], + [ 17. , 18. , 5.77350269], + [ 19. , 20. , 5.77350269], + [ 21. , 22. , 8.16496581]]) + + The new linkage matrix ``mZ`` uses 1-indexing for all the + clusters (instead of 0-indexing). Also, the last column of + the original linkage matrix has been dropped. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + Zs = Z.shape + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return xp_copy(Z, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + return xp.concat((Z[:, :2] + 1.0, Z[:, 2:3]), axis=1) + + +def is_monotonic(Z): + """ + Return True if the linkage passed is monotonic. + + The linkage is monotonic if for every cluster :math:`s` and :math:`t` + joined, the distance between them is no less than the distance + between any previously joined clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix to check for monotonicity. + + Returns + ------- + b : bool + A boolean indicating whether the linkage is monotonic. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, ward, is_monotonic + >>> from scipy.spatial.distance import pdist + + By definition, some hierarchical clustering algorithms - such as + `scipy.cluster.hierarchy.ward` - produce monotonic assignments of + samples to clusters; however, this is not always true for other + hierarchical methods - e.g. `scipy.cluster.hierarchy.median`. + + Given a linkage matrix ``Z`` (as the result of a hierarchical clustering + method) we can test programmatically whether it has the monotonicity + property or not, using `scipy.cluster.hierarchy.is_monotonic`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_monotonic(Z) + True + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> is_monotonic(Z) + False + + Note that this method is equivalent to just verifying that the distances + in the third column of the linkage matrix appear in a monotonically + increasing order. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # We expect the i'th value to be greater than its successor. + return xp.all(Z[1:, 2] >= Z[:-1, 2]) + + +def is_valid_im(R, warning=False, throw=False, name=None): + """Return True if the inconsistency matrix passed is valid. + + It must be a :math:`n` by 4 array of doubles. The standard + deviations ``R[:,1]`` must be nonnegative. The link counts + ``R[:,2]`` must be positive and no greater than :math:`n-1`. + + Parameters + ---------- + R : ndarray + The inconsistency matrix to check for validity. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [2.78516386, 2.58797734, 3. , 1.15470054], + [2.78516386, 2.58797734, 3. , 1.15470054], + [6.57065706, 1.38071187, 3. , 1.15470054]]) + + Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that + ``R`` is correct: + + >>> is_valid_im(R) + True + + However, if ``R`` is wrongly constructed (e.g., one of the standard + deviations is set to a negative value), then the check will fail: + + >>> R[-1,1] = R[-1,1] * -1 + >>> is_valid_im(R) + False + + """ + xp = array_namespace(R) + R = _asarray(R, order='c', xp=xp) + valid = True + name_str = f"{name!r} " if name else '' + try: + if R.dtype != xp.float64: + raise TypeError(f'Inconsistency matrix {name_str}must contain doubles ' + '(double).') + if len(R.shape) != 2: + raise ValueError(f'Inconsistency matrix {name_str}must have shape=2 (i.e. ' + 'be two-dimensional).') + if R.shape[1] != 4: + raise ValueError(f'Inconsistency matrix {name_str}' + 'must have 4 columns.') + if R.shape[0] < 1: + raise ValueError(f'Inconsistency matrix {name_str}' + 'must have at least one row.') + if xp.any(R[:, 0] < 0): + raise ValueError(f'Inconsistency matrix {name_str}' + 'contains negative link height means.') + if xp.any(R[:, 1] < 0): + raise ValueError(f'Inconsistency matrix {name_str}' + 'contains negative link height standard deviations.') + if xp.any(R[:, 2] < 0): + raise ValueError(f'Inconsistency matrix {name_str}' + 'contains negative link counts.') + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def is_valid_linkage(Z, warning=False, throw=False, name=None): + """ + Check the validity of a linkage matrix. + + A linkage matrix is valid if it is a 2-D array (type double) + with :math:`n` rows and 4 columns. The first two columns must contain + indices between 0 and :math:`2n-1`. For a given row ``i``, the following + two expressions have to hold: + + .. math:: + + 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 + 0 \\leq Z[i,1] \\leq i+n-1 + + I.e., a cluster cannot join another cluster unless the cluster being joined + has been generated. + + The fourth column of `Z` represents the number of original observations + in a cluster, so a valid ``Z[i, 3]`` value may not exceed the number of + original observations. + + Parameters + ---------- + Z : array_like + Linkage matrix. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, is_valid_linkage + >>> from scipy.spatial.distance import pdist + + All linkage matrices generated by the clustering methods in this module + will be valid (i.e., they will have the appropriate dimensions and the two + required expressions will hold for all the rows). + + We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_valid_linkage(Z) + True + + However, if we create a linkage matrix in a wrong way - or if we modify + a valid one in a way that any of the required expressions don't hold + anymore, then the check will fail: + + >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point + >>> is_valid_linkage(Z) + False + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + valid = True + name_str = f"{name!r} " if name else '' + try: + if Z.dtype != xp.float64: + raise TypeError(f'Linkage matrix {name_str}must contain doubles.') + if len(Z.shape) != 2: + raise ValueError(f'Linkage matrix {name_str}must have shape=2 (i.e. be' + ' two-dimensional).') + if Z.shape[1] != 4: + raise ValueError(f'Linkage matrix {name_str}must have 4 columns.') + if Z.shape[0] == 0: + raise ValueError('Linkage must be computed on at least two ' + 'observations.') + n = Z.shape[0] + if n > 1: + if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)): + raise ValueError(f'Linkage {name_str}contains negative indices.') + if xp.any(Z[:, 2] < 0): + raise ValueError(f'Linkage {name_str}contains negative distances.') + if xp.any(Z[:, 3] < 0): + raise ValueError(f'Linkage {name_str}contains negative counts.') + if xp.any(Z[:, 3] > (Z.shape[0] + 1)): + raise ValueError('Linkage matrix contains excessive observations' + 'in a cluster') + if _check_hierarchy_uses_cluster_before_formed(Z): + raise ValueError(f'Linkage {name_str}uses non-singleton cluster before' + ' it is formed.') + if _check_hierarchy_uses_cluster_more_than_once(Z): + raise ValueError(f'Linkage {name_str}uses the same cluster more than once.') + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def _check_hierarchy_uses_cluster_before_formed(Z): + n = Z.shape[0] + 1 + for i in range(0, n - 1): + if Z[i, 0] >= n + i or Z[i, 1] >= n + i: + return True + return False + + +def _check_hierarchy_uses_cluster_more_than_once(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + used_more_than_once = ( + (float(Z[i, 0]) in chosen) + or (float(Z[i, 1]) in chosen) + or Z[i, 0] == Z[i, 1] + ) + if used_more_than_once: + return True + chosen.add(float(Z[i, 0])) + chosen.add(float(Z[i, 1])) + return False + + +def _check_hierarchy_not_all_clusters_used(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + chosen.add(int(Z[i, 0])) + chosen.add(int(Z[i, 1])) + must_chosen = set(range(0, 2 * n - 2)) + return len(must_chosen.difference(chosen)) > 0 + + +def num_obs_linkage(Z): + """ + Return the number of original observations of the linkage matrix passed. + + Parameters + ---------- + Z : ndarray + The linkage matrix on which to perform the operation. + + Returns + ------- + n : int + The number of original observations in the linkage. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, num_obs_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + ``Z`` is a linkage matrix obtained after using the Ward clustering method + with ``X``, a dataset with 12 data points. + + >>> num_obs_linkage(Z) + 12 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + return (Z.shape[0] + 1) + + +def correspond(Z, Y): + """ + Check for correspondence between linkage and condensed distance matrices. + + They must have the same number of original observations for + the check to succeed. + + This function is useful as a sanity check in algorithms that make + extensive use of linkage and distance matrices that must + correspond to the same set of original observations. + + Parameters + ---------- + Z : array_like + The linkage matrix to check for correspondence. + Y : array_like + The condensed distance matrix to check for correspondence. + + Returns + ------- + b : bool + A boolean indicating whether the linkage matrix and distance + matrix could possibly correspond to one another. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, correspond + >>> from scipy.spatial.distance import pdist + + This method can be used to check if a given linkage matrix ``Z`` has been + obtained from the application of a cluster method over a dataset ``X``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + >>> X_condensed = pdist(X) + >>> Z = ward(X_condensed) + + Here, we can compare ``Z`` and ``X`` (in condensed form): + + >>> correspond(Z, X_condensed) + True + + """ + is_valid_linkage(Z, throw=True) + distance.is_valid_y(Y, throw=True) + xp = array_namespace(Z, Y) + Z = _asarray(Z, order='c', xp=xp) + Y = _asarray(Y, order='c', xp=xp) + return distance.num_obs_y(Y) == num_obs_linkage(Z) + + +def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): + """ + Form flat clusters from the hierarchical clustering defined by + the given linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded with the matrix returned + by the `linkage` function. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + The criterion to use in forming flat clusters. This can + be any of the following values: + + ``inconsistent`` : + If a cluster node and all its + descendants have an inconsistent value less than or equal + to `t`, then all its leaf descendants belong to the + same flat cluster. When no non-singleton cluster meets + this criterion, every node is assigned to its own + cluster. (Default) + + ``distance`` : + Forms flat clusters so that the original + observations in each flat cluster have no greater a + cophenetic distance than `t`. + + ``maxclust`` : + Finds a minimum threshold ``r`` so that + the cophenetic distance between any two original + observations in the same flat cluster is no more than + ``r`` and no more than `t` flat clusters are formed. + + ``monocrit`` : + Forms a flat cluster from a cluster node c + with index i when ``monocrit[j] <= t``. + + For example, to threshold on the maximum mean distance + as computed in the inconsistency matrix R with a + threshold of 0.8 do:: + + MR = maxRstat(Z, R, 3) + fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR) + + ``maxclust_monocrit`` : + Forms a flat cluster from a + non-singleton cluster node ``c`` when ``monocrit[i] <= + r`` for all cluster indices ``i`` below and including + ``c``. ``r`` is minimized such that no more than ``t`` + flat clusters are formed. monocrit must be + monotonic. For example, to minimize the threshold t on + maximum inconsistency values so that no more than 3 flat + clusters are formed, do:: + + MI = maxinconsts(Z, R) + fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) + depth : int, optional + The maximum depth to perform the inconsistency calculation. + It has no meaning for the other criteria. Default is 2. + R : ndarray, optional + The inconsistency matrix to use for the ``'inconsistent'`` + criterion. This matrix is computed if not provided. + monocrit : ndarray, optional + An array of length n-1. `monocrit[i]` is the + statistics upon which non-singleton i is thresholded. The + monocrit vector must be monotonic, i.e., given a node c with + index i, for all node indices j corresponding to nodes + below c, ``monocrit[i] >= monocrit[j]``. + + Returns + ------- + fcluster : ndarray + An array of length ``n``. ``T[i]`` is the flat cluster number to + which original observation ``i`` belongs. + + See Also + -------- + linkage : for information about hierarchical clustering methods work. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward` + generate a linkage matrix ``Z`` as their output: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + This matrix represents a dendrogram, where the first and second elements + are the two clusters merged at each step, the third element is the + distance between these clusters, and the fourth element is the size of + the new cluster - the number of original data points included. + + `scipy.cluster.hierarchy.fcluster` can be used to flatten the + dendrogram, obtaining as a result an assignation of the original data + points to single clusters. + + This assignation mostly depends on a distance threshold ``t`` - the maximum + inter-cluster distance allowed: + + >>> fcluster(Z, t=0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + + >>> fcluster(Z, t=1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + + >>> fcluster(Z, t=3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + >>> fcluster(Z, t=9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + In the first case, the threshold ``t`` is too small to allow any two + samples in the data to form a cluster, so 12 different clusters are + returned. + + In the second case, the threshold is large enough to allow the first + 4 points to be merged with their nearest neighbors. So, here, only 8 + clusters are returned. + + The third case, with a much higher threshold, allows for up to 8 data + points to be connected - so 4 clusters are returned here. + + Lastly, the threshold of the fourth case is large enough to allow for + all data points to be merged together - so a single cluster is returned. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + T = np.zeros((n,), dtype='i') + + if monocrit is not None: + monocrit = np.asarray(monocrit, order='C', dtype=np.float64) + + Z = np.asarray(Z) + monocrit = np.asarray(monocrit) + if criterion == 'inconsistent': + if R is None: + R = inconsistent(Z, depth) + else: + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_im(R, throw=True, name='R') + # Since the C code does not support striding using strides. + # The dimensions are used instead. + R = np.asarray(R) + _hierarchy.cluster_in(Z, R, T, float(t), int(n)) + elif criterion == 'distance': + _hierarchy.cluster_dist(Z, T, float(t), int(n)) + elif criterion == 'maxclust': + _hierarchy.cluster_maxclust_dist(Z, T, int(n), t) + elif criterion == 'monocrit': + _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) + elif criterion == 'maxclust_monocrit': + _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) + else: + raise ValueError(f'Invalid cluster formation criterion: {str(criterion)}') + return xp.asarray(T) + + +def fclusterdata(X, t, criterion='inconsistent', + metric='euclidean', depth=2, method='single', R=None): + """ + Cluster observation data using a given metric. + + Clusters the original observations in the n-by-m data + matrix X (n observations in m dimensions), using the euclidean + distance metric to calculate distances between original observations, + performs hierarchical clustering using the single linkage algorithm, + and forms flat clusters using the inconsistency method with `t` as the + cut-off threshold. + + A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is + the index of the flat cluster to which the original observation ``i`` + belongs. + + Parameters + ---------- + X : (N, M) ndarray + N by M data matrix with N observations in M dimensions. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + Specifies the criterion for forming flat clusters. Valid + values are 'inconsistent' (default), 'distance', or 'maxclust' + cluster formation algorithms. See `fcluster` for descriptions. + metric : str or function, optional + The distance metric for calculating pairwise distances. See + ``distance.pdist`` for descriptions and linkage to verify + compatibility with the linkage method. + depth : int, optional + The maximum depth for the inconsistency calculation. See + `inconsistent` for more information. + method : str, optional + The linkage method to use (single, complete, average, + weighted, median centroid, ward). See `linkage` for more + information. Default is "single". + R : ndarray, optional + The inconsistency matrix. It will be computed if necessary + if it is not passed. + + Returns + ------- + fclusterdata : ndarray + A vector of length n. T[i] is the flat cluster number to + which original observation i belongs. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + Notes + ----- + This function is similar to the MATLAB function ``clusterdata``. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fclusterdata + + This is a convenience method that abstracts all the steps to perform in a + typical SciPy's hierarchical clustering workflow. + + * Transform the input data into a condensed matrix with + `scipy.spatial.distance.pdist`. + + * Apply a clustering method. + + * Obtain flat clusters at a user defined distance threshold ``t`` using + `scipy.cluster.hierarchy.fcluster`. + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> fclusterdata(X, t=1) + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + The output here (for the dataset ``X``, distance threshold ``t``, and the + default settings) is four clusters with three data points each. + + """ + xp = array_namespace(X) + X = _asarray(X, order='C', dtype=xp.float64, xp=xp) + + if X.ndim != 2: + raise TypeError('The observation matrix X must be an n by m ' + 'array.') + + Y = distance.pdist(X, metric=metric) + Y = xp.asarray(Y) + Z = linkage(Y, method=method) + if R is None: + R = inconsistent(Z, d=depth) + else: + R = _asarray(R, order='c', xp=xp) + T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) + return T + + +def leaves_list(Z): + """ + Return a list of leaf node ids. + + The return corresponds to the observation vector index as it appears + in the tree from left to right. Z is a linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. `Z` is + a linkage matrix. See `linkage` for more information. + + Returns + ------- + leaves_list : ndarray + The list of leaf node ids. + + See Also + -------- + dendrogram : for information about dendrogram structure. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list + >>> from scipy.spatial.distance import pdist + >>> from matplotlib import pyplot as plt + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + The linkage matrix ``Z`` represents a dendrogram, that is, a tree that + encodes the structure of the clustering performed. + `scipy.cluster.hierarchy.leaves_list` shows the mapping between + indices in the ``X`` dataset and leaves in the dendrogram: + + >>> leaves_list(Z) + array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) + + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + ML = np.zeros((n,), dtype='i') + Z = np.asarray(Z) + _hierarchy.prelist(Z, ML, n) + return xp.asarray(ML) + + +# Maps number of leaves to text size. +# +# p <= 20, size="12" +# 20 < p <= 30, size="10" +# 30 < p <= 50, size="8" +# 50 < p <= np.inf, size="6" + +_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} +_drotation = {20: 0, 40: 45, np.inf: 90} +_dtextsortedkeys = list(_dtextsizes.keys()) +_dtextsortedkeys.sort() +_drotationsortedkeys = list(_drotation.keys()) +_drotationsortedkeys.sort() + + +def _remove_dups(L): + """ + Remove duplicates AND preserve the original order of the elements. + + The set class is not guaranteed to do this. + """ + seen_before = set() + L2 = [] + for i in L: + if i not in seen_before: + seen_before.add(i) + L2.append(i) + return L2 + + +def _get_tick_text_size(p): + for k in _dtextsortedkeys: + if p <= k: + return _dtextsizes[k] + + +def _get_tick_rotation(p): + for k in _drotationsortedkeys: + if p <= k: + return _drotation[k] + + +def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, + no_labels, color_list, leaf_font_size=None, + leaf_rotation=None, contraction_marks=None, + ax=None, above_threshold_color='C0'): + # Import matplotlib here so that it's not imported unless dendrograms + # are plotted. Raise an informative error if importing fails. + try: + # if an axis is provided, don't use pylab at all + if ax is None: + import matplotlib.pylab + import matplotlib.patches + import matplotlib.collections + except ImportError as e: + raise ImportError("You must install the matplotlib library to plot " + "the dendrogram. Use no_plot=True to calculate the " + "dendrogram without plotting.") from e + + if ax is None: + ax = matplotlib.pylab.gca() + # if we're using pylab, we want to trigger a draw at the end + trigger_redraw = True + else: + trigger_redraw = False + + # Independent variable plot width + ivw = len(ivl) * 10 + # Dependent variable plot height + dvw = mh + mh * 0.05 + + iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) + if orientation in ('top', 'bottom'): + if orientation == 'top': + ax.set_ylim([0, dvw]) + ax.set_xlim([0, ivw]) + else: + ax.set_ylim([dvw, 0]) + ax.set_xlim([0, ivw]) + + xlines = icoords + ylines = dcoords + if no_labels: + ax.set_xticks([]) + ax.set_xticklabels([]) + else: + ax.set_xticks(iv_ticks) + + if orientation == 'top': + ax.xaxis.set_ticks_position('bottom') + else: + ax.xaxis.set_ticks_position('top') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_xticklines(): + line.set_visible(False) + + leaf_rot = (float(_get_tick_rotation(len(ivl))) + if (leaf_rotation is None) else leaf_rotation) + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) + + elif orientation in ('left', 'right'): + if orientation == 'left': + ax.set_xlim([dvw, 0]) + ax.set_ylim([0, ivw]) + else: + ax.set_xlim([0, dvw]) + ax.set_ylim([0, ivw]) + + xlines = dcoords + ylines = icoords + if no_labels: + ax.set_yticks([]) + ax.set_yticklabels([]) + else: + ax.set_yticks(iv_ticks) + + if orientation == 'left': + ax.yaxis.set_ticks_position('right') + else: + ax.yaxis.set_ticks_position('left') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_yticklines(): + line.set_visible(False) + + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + + if leaf_rotation is not None: + ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) + else: + ax.set_yticklabels(ivl, size=leaf_font) + + # Let's use collections instead. This way there is a separate legend item + # for each tree grouping, rather than stupidly one for each line segment. + colors_used = _remove_dups(color_list) + color_to_lines = {} + for color in colors_used: + color_to_lines[color] = [] + for (xline, yline, color) in zip(xlines, ylines, color_list): + color_to_lines[color].append(list(zip(xline, yline))) + + colors_to_collections = {} + # Construct the collections. + for color in colors_used: + coll = matplotlib.collections.LineCollection(color_to_lines[color], + colors=(color,)) + colors_to_collections[color] = coll + + # Add all the groupings below the color threshold. + for color in colors_used: + if color != above_threshold_color: + ax.add_collection(colors_to_collections[color]) + # If there's a grouping of links above the color threshold, it goes last. + if above_threshold_color in colors_to_collections: + ax.add_collection(colors_to_collections[above_threshold_color]) + + if contraction_marks is not None: + Ellipse = matplotlib.patches.Ellipse + for (x, y) in contraction_marks: + if orientation in ('left', 'right'): + e = Ellipse((y, x), width=dvw / 100, height=1.0) + else: + e = Ellipse((x, y), width=1.0, height=dvw / 100) + ax.add_artist(e) + e.set_clip_box(ax.bbox) + e.set_alpha(0.5) + e.set_facecolor('k') + + if trigger_redraw: + matplotlib.pylab.draw_if_interactive() + + +# C0 is used for above threshold color +_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9') +_link_line_colors = list(_link_line_colors_default) + + +def set_link_color_palette(palette): + """ + Set list of matplotlib color codes for use by dendrogram. + + Note that this palette is global (i.e., setting it once changes the colors + for all subsequent calls to `dendrogram`) and that it affects only the + the colors below ``color_threshold``. + + Note that `dendrogram` also accepts a custom coloring function through its + ``link_color_func`` keyword, which is more flexible and non-global. + + Parameters + ---------- + palette : list of str or None + A list of matplotlib color codes. The order of the color codes is the + order in which the colors are cycled through when color thresholding in + the dendrogram. + + If ``None``, resets the palette to its default (which are matplotlib + default colors C1 to C9). + + Returns + ------- + None + + See Also + -------- + dendrogram + + Notes + ----- + Ability to reset the palette with ``None`` added in SciPy 0.17.0. + + Thread safety: using this function in a multi-threaded fashion may + result in `dendrogram` producing plots with unexpected colors. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['C1', 'C0', 'C0', 'C0', 'C0'] + >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) + >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b') + >>> dn['color_list'] + ['c', 'b', 'b', 'b', 'b'] + >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, + ... above_threshold_color='k') + >>> dn['color_list'] + ['c', 'm', 'm', 'k', 'k'] + + Now, reset the color palette to its default: + + >>> hierarchy.set_link_color_palette(None) + + """ + if palette is None: + # reset to its default + palette = _link_line_colors_default + elif not isinstance(palette, (list, tuple)): + raise TypeError("palette must be a list or tuple") + _ptypes = [isinstance(p, str) for p in palette] + + if False in _ptypes: + raise TypeError("all palette list elements must be color strings") + + global _link_line_colors + _link_line_colors = palette + + +def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, + get_leaves=True, orientation='top', labels=None, + count_sort=False, distance_sort=False, show_leaf_counts=True, + no_plot=False, no_labels=False, leaf_font_size=None, + leaf_rotation=None, leaf_label_func=None, + show_contracted=False, link_color_func=None, ax=None, + above_threshold_color='C0'): + """ + Plot the hierarchical clustering as a dendrogram. + + The dendrogram illustrates how each cluster is + composed by drawing a U-shaped link between a non-singleton + cluster and its children. The top of the U-link indicates a + cluster merge. The two legs of the U-link indicate which clusters + were merged. The length of the two legs of the U-link represents + the distance between the child clusters. It is also the + cophenetic distance between original observations in the two + children clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix encoding the hierarchical clustering to + render as a dendrogram. See the ``linkage`` function for more + information on the format of ``Z``. + p : int, optional + The ``p`` parameter for ``truncate_mode``. + truncate_mode : str, optional + The dendrogram can be hard to read when the original + observation matrix from which the linkage is derived is + large. Truncation is used to condense the dendrogram. There + are several modes: + + ``None`` + No truncation is performed (default). + Note: ``'none'`` is an alias for ``None`` that's kept for + backward compatibility. + + ``'lastp'`` + The last ``p`` non-singleton clusters formed in the linkage are the + only non-leaf nodes in the linkage; they correspond to rows + ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are + contracted into leaf nodes. + + ``'level'`` + No more than ``p`` levels of the dendrogram tree are displayed. + A "level" includes all nodes with ``p`` merges from the final merge. + + Note: ``'mtica'`` is an alias for ``'level'`` that's kept for + backward compatibility. + + color_threshold : double, optional + For brevity, let :math:`t` be the ``color_threshold``. + Colors all the descendent links below a cluster node + :math:`k` the same color if :math:`k` is the first node below + the cut threshold :math:`t`. All links connecting nodes with + distances greater than or equal to the threshold are colored + with de default matplotlib color ``'C0'``. If :math:`t` is less + than or equal to zero, all nodes are colored ``'C0'``. + If ``color_threshold`` is None or 'default', + corresponding with MATLAB(TM) behavior, the threshold is set to + ``0.7*max(Z[:,2])``. + + get_leaves : bool, optional + Includes a list ``R['leaves']=H`` in the result + dictionary. For each :math:`i`, ``H[i] == j``, cluster node + ``j`` appears in position ``i`` in the left-to-right traversal + of the leaves, where :math:`j < 2n-1` and :math:`i < n`. + orientation : str, optional + The direction to plot the dendrogram, which can be any + of the following strings: + + ``'top'`` + Plots the root at the top, and plot descendent links going downwards. + (default). + + ``'bottom'`` + Plots the root at the bottom, and plot descendent links going + upwards. + + ``'left'`` + Plots the root at the left, and plot descendent links going right. + + ``'right'`` + Plots the root at the right, and plot descendent links going left. + + labels : ndarray, optional + By default, ``labels`` is None so the index of the original observation + is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized + sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the + text to put under the :math:`i` th leaf node only if it corresponds to + an original observation and not a non-singleton cluster. + count_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum number of original objects in its cluster + is plotted first. + + ``'descending'`` + The child with the maximum number of original objects in its cluster + is plotted first. + + Note, ``distance_sort`` and ``count_sort`` cannot both be True. + distance_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum distance between its direct descendents is + plotted first. + + ``'descending'`` + The child with the maximum distance between its direct descendents is + plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + show_leaf_counts : bool, optional + When True, leaf nodes representing :math:`k>1` original + observation are labeled with the number of observations they + contain in parentheses. + no_plot : bool, optional + When True, the final rendering is not performed. This is + useful if only the data structures computed for the rendering + are needed or if matplotlib is not available. + no_labels : bool, optional + When True, no labels appear next to the leaf nodes in the + rendering of the dendrogram. + leaf_rotation : double, optional + Specifies the angle (in degrees) to rotate the leaf + labels. When unspecified, the rotation is based on the number of + nodes in the dendrogram (default is 0). + leaf_font_size : int, optional + Specifies the font size (in points) of the leaf labels. When + unspecified, the size based on the number of nodes in the + dendrogram. + leaf_label_func : lambda or function, optional + When ``leaf_label_func`` is a callable function, for each + leaf with cluster index :math:`k < 2n-1`. The function + is expected to return a string with the label for the + leaf. + + Indices :math:`k < n` correspond to original observations + while indices :math:`k \\geq n` correspond to non-singleton + clusters. + + For example, to label singletons with their node id and + non-singletons with their id, count, and inconsistency + coefficient, simply do:: + + # First define the leaf label function. + def llf(id): + if id < n: + return str(id) + else: + return '[%d %d %1.2f]' % (id, count, R[n-id,3]) + + # The text for the leaf nodes is going to be big so force + # a rotation of 90 degrees. + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) + + # leaf_label_func can also be used together with ``truncate_mode``, + # in which case you will get your leaves labeled after truncation: + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90, + truncate_mode='level', p=2) + + show_contracted : bool, optional + When True the heights of non-singleton nodes contracted + into a leaf node are plotted as crosses along the link + connecting that leaf node. This really is only useful when + truncation is used (see ``truncate_mode`` parameter). + link_color_func : callable, optional + If given, `link_color_function` is called with each non-singleton id + corresponding to each U-shaped link it will paint. The function is + expected to return the color to paint the link, encoded as a matplotlib + color string code. For example:: + + dendrogram(Z, link_color_func=lambda k: colors[k]) + + colors the direct links below each untruncated non-singleton node + ``k`` using ``colors[k]``. + ax : matplotlib Axes instance, optional + If None and `no_plot` is not True, the dendrogram will be plotted + on the current axes. Otherwise if `no_plot` is not True the + dendrogram will be plotted on the given ``Axes`` instance. This can be + useful if the dendrogram is part of a more complex figure. + above_threshold_color : str, optional + This matplotlib color string sets the color of the links above the + color_threshold. The default is ``'C0'``. + + Returns + ------- + R : dict + A dictionary of data structures computed to render the + dendrogram. Its has the following keys: + + ``'color_list'`` + A list of color names. The k'th element represents the color of the + k'th link. + + ``'icoord'`` and ``'dcoord'`` + Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` + where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` + where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is + ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. + + ``'ivl'`` + A list of labels corresponding to the leaf nodes. + + ``'leaves'`` + For each i, ``H[i] == j``, cluster node ``j`` appears in position + ``i`` in the left-to-right traversal of the leaves, where + :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the + ``i``-th leaf node corresponds to an original observation. + Otherwise, it corresponds to a non-singleton cluster. + + ``'leaves_color_list'`` + A list of color names. The k'th element represents the color of the + k'th leaf. + + See Also + -------- + linkage, set_link_color_palette + + Notes + ----- + It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise + crossings appear in the dendrogram. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> import matplotlib.pyplot as plt + + A very basic example: + + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> plt.figure() + >>> dn = hierarchy.dendrogram(Z) + + Now, plot in given axes, improve the color scheme and use both vertical and + horizontal orientations: + + >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) + >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) + >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', + ... orientation='top') + >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], + ... above_threshold_color='#bcbddc', + ... orientation='right') + >>> hierarchy.set_link_color_palette(None) # reset to default after use + >>> plt.show() + + """ + # This feature was thought about but never implemented (still useful?): + # + # ... = dendrogram(..., leaves_order=None) + # + # Plots the leaves in the order specified by a vector of + # original observation indices. If the vector contains duplicates + # or results in a crossing, an exception will be thrown. Passing + # None orders leaf nodes based on the order they appear in the + # pre-order traversal. + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + + if orientation not in ["top", "left", "bottom", "right"]: + raise ValueError("orientation must be one of 'top', 'left', " + "'bottom', or 'right'") + + if labels is not None: + try: + len_labels = len(labels) + except (TypeError, AttributeError): + len_labels = labels.shape[0] + if Z.shape[0] + 1 != len_labels: + raise ValueError("Dimensions of Z and labels must be consistent.") + + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + if isinstance(p, (int, float)): + p = int(p) + else: + raise TypeError('The second argument must be a number') + + if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None): + # 'mtica' is kept working for backwards compat. + raise ValueError('Invalid truncation mode.') + + if truncate_mode == 'lastp': + if p > n or p == 0: + p = n + + if truncate_mode == 'mtica': + # 'mtica' is an alias + truncate_mode = 'level' + + if truncate_mode == 'level': + if p <= 0: + p = np.inf + + if get_leaves: + lvs = [] + else: + lvs = None + + icoord_list = [] + dcoord_list = [] + color_list = [] + current_color = [0] + currently_below_threshold = [False] + ivl = [] # list of leaves + + if color_threshold is None or (isinstance(color_threshold, str) and + color_threshold == 'default'): + color_threshold = xp.max(Z[:, 2]) * 0.7 + + R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, + 'leaves': lvs, 'color_list': color_list} + + # Empty list will be filled in _dendrogram_calculate_info + contraction_marks = [] if show_contracted else None + + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=2*n - 2, + iv=0.0, + ivl=ivl, + n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, + lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + if not no_plot: + mh = xp.max(Z[:, 2]) + _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, + no_labels, color_list, + leaf_font_size=leaf_font_size, + leaf_rotation=leaf_rotation, + contraction_marks=contraction_marks, + ax=ax, + above_threshold_color=above_threshold_color) + + R["leaves_color_list"] = _get_leaves_color_list(R) + + return R + + +def _get_leaves_color_list(R): + leaves_color_list = [None] * len(R['leaves']) + for link_x, link_y, link_color in zip(R['icoord'], + R['dcoord'], + R['color_list']): + for (xi, yi) in zip(link_x, link_y): + if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1): + # if yi is 0.0 and xi is divisible by 5 and odd, + # the point is a leaf + # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`) + # index of leaves are 0, 1, 2, 3, ... as below + leaf_index = (int(xi) - 5) // 10 + # each leaf has a same color of its link. + leaves_color_list[leaf_index] = link_color + return leaves_color_list + + +def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + + # If leaf node labels are to be displayed... + if ivl is not None: + # If a leaf_label_func has been provided, the label comes from the + # string returned from the leaf_label_func, which is a function + # passed to dendrogram. + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + # Otherwise, if the dendrogram caller has passed a labels list + # for the leaf nodes, use it. + if labels is not None: + ivl.append(labels[int(i - n)]) + else: + # Otherwise, use the id as the label for the leaf.x + ivl.append(str(int(i))) + + +def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels, show_leaf_counts): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + if ivl is not None: + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + if show_leaf_counts: + ivl.append("(" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + ")") + else: + ivl.append("") + + +def _append_contraction_marks(Z, iv, i, n, contraction_marks, xp): + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp): + if i >= n: + contraction_marks.append((iv, Z[i - n, 2])) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _dendrogram_calculate_info(Z, p, truncate_mode, + color_threshold=np.inf, get_leaves=True, + orientation='top', labels=None, + count_sort=False, distance_sort=False, + show_leaf_counts=False, i=-1, iv=0.0, + ivl=None, n=0, icoord_list=None, dcoord_list=None, + lvs=None, mhr=False, + current_color=None, color_list=None, + currently_below_threshold=None, + leaf_label_func=None, level=0, + contraction_marks=None, + link_color_func=None, + above_threshold_color='C0'): + """ + Calculate the endpoints of the links as well as the labels for the + the dendrogram rooted at the node with index i. iv is the independent + variable value to plot the left-most leaf node below the root node i + (if orientation='top', this would be the left-most x value where the + plotting of this root node i and its descendents should begin). + + ivl is a list to store the labels of the leaf nodes. The leaf_label_func + is called whenever ivl != None, labels == None, and + leaf_label_func != None. When ivl != None and labels != None, the + labels list is used only for labeling the leaf nodes. When + ivl == None, no labels are generated for leaf nodes. + + When get_leaves==True, a list of leaves is built as they are visited + in the dendrogram. + + Returns a tuple with l being the independent variable coordinate that + corresponds to the midpoint of cluster to the left of cluster i if + i is non-singleton, otherwise the independent coordinate of the leaf + node if i is a leaf node. + + Returns + ------- + A tuple (left, w, h, md), where: + * left is the independent variable coordinate of the center of the + the U of the subtree + + * w is the amount of space used for the subtree (in independent + variable units) + + * h is the height of the subtree in dependent variable units + + * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including + the target node. + + """ + xp = array_namespace(Z) + if n == 0: + raise ValueError("Invalid singleton cluster count n.") + + if i == -1: + raise ValueError("Invalid root cluster index i.") + + if truncate_mode == 'lastp': + # If the node is a leaf node but corresponds to a non-singleton + # cluster, its label is either the empty string or the number of + # original observations belonging to cluster i. + if 2*n - p > i >= n: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode == 'level': + if i > n and level > p: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # Otherwise, only truncate if we have a leaf node. + # + # Only place leaves if they correspond to original observations. + if i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # !!! Otherwise, we don't have a leaf node, so work on plotting a + # non-leaf node. + # Actual indices of a and b + aa = int_floor(Z[i - n, 0], xp) + ab = int_floor(Z[i - n, 1], xp) + if aa >= n: + # The number of singletons below cluster a + na = Z[aa - n, 3] + # The distance between a's two direct children. + da = Z[aa - n, 2] + else: + na = 1 + da = 0.0 + if ab >= n: + nb = Z[ab - n, 3] + db = Z[ab - n, 2] + else: + nb = 1 + db = 0.0 + + if count_sort == 'ascending' or count_sort is True: + # If a has a count greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if na > nb: + # The cluster index to draw to the left (ua) will be ab + # and the one to draw to the right (ub) will be aa + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif count_sort == 'descending': + # If a has a count less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if na > nb: + ua = aa + ub = ab + else: + ua = ab + ub = aa + elif distance_sort == 'ascending' or distance_sort is True: + # If a has a distance greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if da > db: + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif distance_sort == 'descending': + # If a has a distance less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if da > db: + ua = aa + ub = ab + else: + ua = ab + ub = aa + else: + ua = aa + ub = ab + + # Updated iv variable and the amount of space used. + (uiva, uwa, uah, uamd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ua, iv=iv, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + h = Z[i - n, 2] + if h >= color_threshold or color_threshold <= 0: + c = above_threshold_color + + if currently_below_threshold[0]: + current_color[0] = (current_color[0] + 1) % len(_link_line_colors) + currently_below_threshold[0] = False + else: + currently_below_threshold[0] = True + c = _link_line_colors[current_color[0]] + + (uivb, uwb, ubh, ubmd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ub, iv=iv + uwa, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + max_dist = max(uamd, ubmd, h) + + icoord_list.append([uiva, uiva, uivb, uivb]) + dcoord_list.append([uah, h, h, ubh]) + if link_color_func is not None: + v = link_color_func(int(i)) + if not isinstance(v, str): + raise TypeError("link_color_func must return a matplotlib " + "color string!") + color_list.append(v) + else: + color_list.append(c) + + return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) + + +def is_isomorphic(T1, T2): + """ + Determine if two different cluster assignments are equivalent. + + Parameters + ---------- + T1 : array_like + An assignment of singleton cluster ids to flat cluster ids. + T2 : array_like + An assignment of singleton cluster ids to flat cluster ids. + + Returns + ------- + b : bool + Whether the flat cluster assignments `T1` and `T2` are + equivalent. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic + >>> from scipy.cluster.hierarchy import single, complete + >>> from scipy.spatial.distance import pdist + + Two flat cluster assignments can be isomorphic if they represent the same + cluster assignment, with different labels. + + For example, we can use the `scipy.cluster.hierarchy.single`: method + and flatten the output to four clusters: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = single(pdist(X)) + >>> T = fcluster(Z, 1, criterion='distance') + >>> T + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + We can then do the same using the + `scipy.cluster.hierarchy.complete`: method: + + >>> Z = complete(pdist(X)) + >>> T_ = fcluster(Z, 1.5, criterion='distance') + >>> T_ + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + As we can see, in both cases we obtain four clusters and all the data + points are distributed in the same way - the only thing that changes + are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both + cluster assignments are isomorphic: + + >>> is_isomorphic(T, T_) + True + + """ + T1 = np.asarray(T1, order='c') + T2 = np.asarray(T2, order='c') + + T1S = T1.shape + T2S = T2.shape + + if len(T1S) != 1: + raise ValueError('T1 must be one-dimensional.') + if len(T2S) != 1: + raise ValueError('T2 must be one-dimensional.') + if T1S[0] != T2S[0]: + raise ValueError('T1 and T2 must have the same number of elements.') + n = T1S[0] + d1 = {} + d2 = {} + for i in range(0, n): + if T1[i] in d1: + if T2[i] not in d2: + return False + if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: + return False + elif T2[i] in d2: + return False + else: + d1[T1[i]] = T2[i] + d2[T2[i]] = T1[i] + return True + + +def maxdists(Z): + """ + Return the maximum distance between any non-singleton cluster. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + ``linkage`` for more information. + + Returns + ------- + maxdists : ndarray + A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents + the maximum distance between any cluster (including + singletons) below and including the node with index i. More + specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the + set of all node indices below and including node i. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + is_monotonic : for testing for monotonicity of a linkage matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, maxdists + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists` + computes for each new cluster generated (i.e., for each row of the linkage + matrix) what is the maximum distance between any two child clusters. + + Due to the nature of hierarchical clustering, in many cases this is going + to be just the distance between the two child clusters that were merged + to form the current one - that is, Z[:,2]. + + However, for non-monotonic cluster assignments such as + `scipy.cluster.hierarchy.median` clustering this is not always the + case: There may be cluster formations were the distance between the two + clusters merged is smaller than the distance between their children. + + We can see this in an example: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> maxdists(Z) + array([1. , 1. , 1. , 1. , 1.11803399, + 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 , + 3.5 ]) + + Note that while the distance between the two clusters merged when creating the + last cluster is 3.25, there are two children (clusters 16 and 17) whose distance + is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in + this case. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + MD = np.zeros((n - 1,)) + Z = np.asarray(Z) + _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) + MD = xp.asarray(MD) + return MD + + +def maxinconsts(Z, R): + """ + Return the maximum inconsistency coefficient for each + non-singleton cluster and its children. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + R : ndarray + The inconsistency matrix. + + Returns + ------- + MI : ndarray + A monotonic ``(n-1)``-sized numpy array of doubles. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute + the maximum value of the inconsistency statistic (the last column of + ``R``) for each non-singleton cluster and its children: + + >>> maxinconsts(Z, R) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + n = Z.shape[0] + 1 + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + MI = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) + MI = xp.asarray(MI) + return MI + + +def maxRstat(Z, R, i): + """ + Return the maximum statistic for each non-singleton cluster and its + children. + + Parameters + ---------- + Z : array_like + The hierarchical clustering encoded as a matrix. See `linkage` for more + information. + R : array_like + The inconsistency matrix. + i : int + The column of `R` to use as the statistic. + + Returns + ------- + MR : ndarray + Calculates the maximum statistic for the i'th column of the + inconsistency matrix `R` for each non-singleton cluster + node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where + ``Q(j)`` the set of all node ids corresponding to nodes below + and including ``j``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + `scipy.cluster.hierarchy.maxRstat` can be used to compute + the maximum value of each column of ``R``, for each non-singleton + cluster and its children: + + >>> maxRstat(Z, R, 0) + array([1. , 1. , 1. , 1. , 1.05901699, + 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, + 3.25 ]) + >>> maxRstat(Z, R, 1) + array([0. , 0. , 0. , 0. , 0.08346263, + 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, + 1.37522872]) + >>> maxRstat(Z, R, 3) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + if not isinstance(i, int): + raise TypeError('The third argument must be an integer.') + + if i < 0 or i > 3: + raise ValueError('i must be an integer between 0 and 3 inclusive.') + + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + + n = Z.shape[0] + 1 + MR = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) + MR = xp.asarray(MR) + return MR + + +def leaders(Z, T): + """ + Return the root nodes in a hierarchical clustering. + + Returns the root nodes in a hierarchical clustering corresponding + to a cut defined by a flat cluster assignment vector ``T``. See + the ``fcluster`` function for more information on the format of ``T``. + + For each flat cluster :math:`j` of the :math:`k` flat clusters + represented in the n-sized flat cluster assignment vector ``T``, + this function finds the lowest cluster node :math:`i` in the linkage + tree Z, such that: + + * leaf descendants belong only to flat cluster j + (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where + :math:`S(i)` is the set of leaf ids of descendant leaf nodes + with cluster node :math:`i`) + + * there does not exist a leaf that is not a descendant with + :math:`i` that also belongs to cluster :math:`j` + (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If + this condition is violated, ``T`` is not a valid cluster + assignment vector, and an exception will be thrown. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + T : ndarray + The flat cluster assignment vector. + + Returns + ------- + L : ndarray + The leader linkage node id's stored as a k-element 1-D array, + where ``k`` is the number of flat clusters found in ``T``. + + ``L[j]=i`` is the linkage cluster node id that is the + leader of flat cluster with id M[j]. If ``i < n``, ``i`` + corresponds to an original observation, otherwise it + corresponds to a non-singleton cluster. + M : ndarray + The leader linkage node id's stored as a k-element 1-D array, where + ``k`` is the number of flat clusters found in ``T``. This allows the + set of flat cluster ids to be any arbitrary set of ``k`` integers. + + For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with + id 8's leader is linkage node 2. + + See Also + -------- + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster, leaders + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z`` - obtained after apply a clustering method + to a dataset ``X`` - and a flat cluster assignment array ``T``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + >>> T = fcluster(Z, 3, criterion='distance') + >>> T + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + `scipy.cluster.hierarchy.leaders` returns the indices of the nodes + in the dendrogram that are the leaders of each flat cluster: + + >>> L, M = leaders(Z, T) + >>> L + array([16, 17, 18, 19], dtype=int32) + + (remember that indices 0-11 point to the 12 data points in ``X``, + whereas indices 12-22 point to the 11 rows of ``Z``) + + `scipy.cluster.hierarchy.leaders` also returns the indices of + the flat clusters in ``T``: + + >>> M + array([1, 2, 3, 4], dtype=int32) + + """ + xp = array_namespace(Z, T) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + T = _asarray(T, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if T.dtype != xp.int32: + raise TypeError('T must be a 1-D array of dtype int32.') + + if T.shape[0] != Z.shape[0] + 1: + raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') + + n_clusters = int(xp.unique_values(T).shape[0]) + n_obs = int(Z.shape[0] + 1) + L = np.zeros(n_clusters, dtype=np.int32) + M = np.zeros(n_clusters, dtype=np.int32) + Z = np.asarray(Z) + T = np.asarray(T, dtype=np.int32) + s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs) + if s >= 0: + raise ValueError(('T is not a valid assignment vector. Error found ' + 'when examining linkage node %d (< 2n-1).') % s) + L, M = xp.asarray(L), xp.asarray(M) + return (L, M) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7d874ca5eb7141a44559307d1c28dd412171396f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py @@ -0,0 +1,145 @@ +from numpy import array + + +Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], + [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], + [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], + [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], + [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], + [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], + [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], + [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], + [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], + [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], + [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], + [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], + [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], + [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], + [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], + [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], + [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], + [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], + [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], + [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], + [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], + [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], + [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], + [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], + [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], + [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], + [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], + [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], + [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], + [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) + +ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., + 564., 138., 219., 869., 669.]) + +linkage_ytdist_single = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +linkage_ytdist_complete = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [1., 6., 400., 3.], + [0., 7., 412., 3.], + [8., 9., 996., 6.]]) + +linkage_ytdist_average = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 680.77777778, 6.]]) + +linkage_ytdist_weighted = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 670.125, 6.]]) + +# the optimal leaf ordering of linkage_ytdist_single +linkage_ytdist_single_olo = array([[5., 2., 138., 2.], + [4., 3., 219., 2.], + [7., 0., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +X = array([[1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355]]) + +linkage_X_centroid = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_median = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_ward = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +# the optimal leaf ordering of linkage_X_ward +linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], + [5., 1., 1.77045373, 2.], + [2., 0., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +inconsistent_ytdist = { + 1: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [255., 0., 1., 0.], + [268., 0., 1., 0.], + [295., 0., 1., 0.]]), + 2: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [261.5, 9.19238816, 2., 0.70710678], + [233.66666667, 83.9424406, 3., 0.7306594]]), + 3: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [239., 69.36377537, 4., 0.80733783]]), + 4: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [235., 60.73302232, 5., 0.98793042]])} + +fcluster_inconsistent = { + 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_distance = { + 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, + 1, 1, 1, 2, 1, 1, 1, 1, 1]), + 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_maxclust = { + 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, + 1, 1, 1, 3, 1, 1, 1, 1, 2]), + 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py new file mode 100644 index 0000000000000000000000000000000000000000..a73512d35eef168f625a1942a87d248e73a71aa2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py @@ -0,0 +1,202 @@ +import pytest +from pytest import raises as assert_raises +import numpy as np +from scipy.cluster.hierarchy import DisjointSet +import string + + +def generate_random_token(): + k = len(string.ascii_letters) + tokens = list(np.arange(k, dtype=int)) + tokens += list(np.arange(k, dtype=float)) + tokens += list(string.ascii_letters) + tokens += [None for i in range(k)] + tokens = np.array(tokens, dtype=object) + rng = np.random.RandomState(seed=0) + + while 1: + size = rng.randint(1, 3) + element = rng.choice(tokens, size) + if size == 1: + yield element[0] + else: + yield tuple(element) + + +def get_elements(n): + # dict is deterministic without difficulty of comparing numpy ints + elements = {} + for element in generate_random_token(): + if element not in elements: + elements[element] = len(elements) + if len(elements) >= n: + break + return list(elements.keys()) + + +def test_init(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert dis.n_subsets == n + assert list(dis) == elements + + +def test_len(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert len(dis) == n + + dis.add("dummy") + assert len(dis) == n + 1 + + +@pytest.mark.parametrize("n", [10, 100]) +def test_contains(n): + elements = get_elements(n) + dis = DisjointSet(elements) + for x in elements: + assert x in dis + + assert "dummy" not in dis + + +@pytest.mark.parametrize("n", [10, 100]) +def test_add(n): + elements = get_elements(n) + dis1 = DisjointSet(elements) + + dis2 = DisjointSet() + for i, x in enumerate(elements): + dis2.add(x) + assert len(dis2) == i + 1 + + # test idempotency by adding element again + dis2.add(x) + assert len(dis2) == i + 1 + + assert list(dis1) == list(dis2) + + +def test_element_not_present(): + elements = get_elements(n=10) + dis = DisjointSet(elements) + + with assert_raises(KeyError): + dis["dummy"] + + with assert_raises(KeyError): + dis.merge(elements[0], "dummy") + + with assert_raises(KeyError): + dis.connected(elements[0], "dummy") + + +@pytest.mark.parametrize("direction", ["forwards", "backwards"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_linear_union_sequence(n, direction): + elements = get_elements(n) + dis = DisjointSet(elements) + assert elements == list(dis) + + indices = list(range(n - 1)) + if direction == "backwards": + indices = indices[::-1] + + for it, i in enumerate(indices): + assert not dis.connected(elements[i], elements[i + 1]) + assert dis.merge(elements[i], elements[i + 1]) + assert dis.connected(elements[i], elements[i + 1]) + assert dis.n_subsets == n - 1 - it + + roots = [dis[i] for i in elements] + if direction == "forwards": + assert all(elements[0] == r for r in roots) + else: + assert all(elements[-2] == r for r in roots) + assert not dis.merge(elements[0], elements[-1]) + + +@pytest.mark.parametrize("n", [10, 100]) +def test_self_unions(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + for x in elements: + assert dis.connected(x, x) + assert not dis.merge(x, x) + assert dis.connected(x, x) + assert dis.n_subsets == len(elements) + + assert elements == list(dis) + roots = [dis[x] for x in elements] + assert elements == roots + + +@pytest.mark.parametrize("order", ["ab", "ba"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_equal_size_ordering(n, order): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + indices = np.arange(n) + rng.shuffle(indices) + + for i in range(0, len(indices), 2): + a, b = elements[indices[i]], elements[indices[i + 1]] + if order == "ab": + assert dis.merge(a, b) + else: + assert dis.merge(b, a) + + expected = elements[min(indices[i], indices[i + 1])] + assert dis[a] == expected + assert dis[b] == expected + + +@pytest.mark.parametrize("kmax", [5, 10]) +def test_binary_tree(kmax): + n = 2**kmax + elements = get_elements(n) + dis = DisjointSet(elements) + rng = np.random.RandomState(seed=0) + + for k in 2**np.arange(kmax): + for i in range(0, n, 2 * k): + r1, r2 = rng.randint(0, k, size=2) + a, b = elements[i + r1], elements[i + k + r2] + assert not dis.connected(a, b) + assert dis.merge(a, b) + assert dis.connected(a, b) + + assert elements == list(dis) + roots = [dis[i] for i in elements] + expected_indices = np.arange(n) - np.arange(n) % (2 * k) + expected = [elements[i] for i in expected_indices] + assert roots == expected + + +@pytest.mark.parametrize("n", [10, 100]) +def test_subsets(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + for i, j in rng.randint(0, n, (n, 2)): + x = elements[i] + y = elements[j] + + expected = {element for element in dis if {dis[element]} == {dis[x]}} + assert dis.subset_size(x) == len(dis.subset(x)) + assert expected == dis.subset(x) + + expected = {dis[element]: set() for element in dis} + for element in dis: + expected[dis[element]].add(element) + expected = list(expected.values()) + assert expected == dis.subsets() + + dis.merge(x, y) + assert dis.subset(x) == dis.subset(y) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd0e37c59f3b004b09ce1662a98b53505398453 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py @@ -0,0 +1,1300 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, assert_array_equal, assert_, + assert_warns) +import pytest +from pytest import raises as assert_raises + +import scipy.cluster.hierarchy +from scipy.cluster.hierarchy import ( + ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, + num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, + is_isomorphic, single, leaders, + correspond, is_monotonic, maxdists, maxinconsts, maxRstat, + is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, + set_link_color_palette, cut_tree, optimal_leaf_ordering, + _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) +from scipy.spatial.distance import pdist +from scipy.cluster._hierarchy import Heap +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from threading import Lock + +from . import hierarchy_test_data + + +# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so +# check if it's available +try: + import matplotlib + # and set the backend to be Agg (no gui) + matplotlib.use('Agg') + # before importing pyplot + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] +skip_xp_backends = pytest.mark.skip_xp_backends + + +class TestLinkage: + + @skip_xp_backends(cpu_only=True) + def test_linkage_non_finite_elements_in_distance_matrix(self, xp): + # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). + # Exception expected. + y = xp.asarray([xp.nan] + [0.0]*5) + assert_raises(ValueError, linkage, y) + + @skip_xp_backends(cpu_only=True) + def test_linkage_empty_distance_matrix(self, xp): + # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. + y = xp.zeros((0,)) + assert_raises(ValueError, linkage, y) + + @skip_xp_backends(cpu_only=True) + def test_linkage_tdist(self, xp): + for method in ['single', 'complete', 'average', 'weighted']: + self.check_linkage_tdist(method, xp) + + def check_linkage_tdist(self, method, xp): + # Tests linkage(Y, method) on the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + @skip_xp_backends(cpu_only=True) + def test_linkage_X(self, xp): + for method in ['centroid', 'median', 'ward']: + self.check_linkage_q(method, xp) + + def check_linkage_q(self, method, xp): + # Tests linkage(Y, method) on the Q data set. + Z = linkage(xp.asarray(hierarchy_test_data.X), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + y = scipy.spatial.distance.pdist(hierarchy_test_data.X, + metric="euclidean") + Z = linkage(xp.asarray(y), method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + @skip_xp_backends(cpu_only=True) + def test_compare_with_trivial(self, xp): + rng = np.random.RandomState(0) + n = 20 + X = rng.rand(n, 2) + d = pdist(X) + + for method, code in _LINKAGE_METHODS.items(): + Z_trivial = _hierarchy.linkage(d, n, code) + Z = linkage(xp.asarray(d), method) + xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15) + + @skip_xp_backends(cpu_only=True) + def test_optimal_leaf_ordering(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + +@skip_xp_backends(cpu_only=True) +class TestLinkageTies: + + _expectations = { + 'single': np.array([[0, 1, 1.41421356, 2], + [2, 3, 1.41421356, 3]]), + 'complete': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.82842712, 3]]), + 'average': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'weighted': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'centroid': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'median': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'ward': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.44948974, 3]]), + } + + def test_linkage_ties(self, xp): + for method in ['single', 'complete', 'average', 'weighted', + 'centroid', 'median', 'ward']: + self.check_linkage_ties(method, xp) + + def check_linkage_ties(self, method, xp): + X = xp.asarray([[-1, -1], [0, 0], [1, 1]]) + Z = linkage(X, method=method) + expectedZ = self._expectations[method] + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_xp_backends(cpu_only=True) +class TestInconsistent: + + def test_inconsistent_tdist(self, xp): + for depth in hierarchy_test_data.inconsistent_ytdist: + self.check_inconsistent_tdist(depth, xp) + + def check_inconsistent_tdist(self, depth, xp): + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + xp_assert_close(inconsistent(Z, depth), + xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth])) + + +@skip_xp_backends(cpu_only=True) +class TestCopheneticDistance: + + def test_linkage_cophenet_tdist_Z(self, xp): + # Tests cophenet(Z) on tdist data set. + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + M = cophenet(Z) + xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10) + + def test_linkage_cophenet_tdist_Z_Y(self, xp): + # Tests cophenet(Z, Y) on tdist data set. + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist)) + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295], dtype=xp.float64) + expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()] + xp_assert_close(c, expectedc, atol=1e-10) + xp_assert_close(M, expectedM, atol=1e-10) + + def test_gh_22183(self, xp): + # check for lack of segfault + # (out of bounds memory access) + # and correct interception of + # invalid linkage matrix + arr=[[0.0, 1.0, 1.0, 2.0], + [2.0, 12.0, 1.0, 3.0], + [3.0, 4.0, 1.0, 2.0], + [5.0, 14.0, 1.0, 3.0], + [6.0, 7.0, 1.0, 2.0], + [8.0, 16.0, 1.0, 3.0], + [9.0, 10.0, 1.0, 2.0], + [11.0, 18.0, 1.0, 3.0], + [13.0, 15.0, 2.0, 6.0], + [17.0, 20.0, 2.0, 32.0], + [19.0, 21.0, 2.0, 12.0]] + with pytest.raises(ValueError, match="excessive observations"): + cophenet(xp.asarray(arr)) + + +class TestMLabLinkageConversion: + + def test_mlab_linkage_conversion_empty(self, xp): + # Tests from/to_mlab_linkage on empty linkage array. + X = xp.asarray([], dtype=xp.float64) + xp_assert_equal(from_mlab_linkage(X), X) + xp_assert_equal(to_mlab_linkage(X), X) + + @skip_xp_backends(cpu_only=True) + def test_mlab_linkage_conversion_single_row(self, xp): + # Tests from/to_mlab_linkage on linkage array with single row. + Z = xp.asarray([[0., 1., 3., 2.]]) + Zm = xp.asarray([[1, 2, 3]]) + xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64), + rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + @skip_xp_backends(cpu_only=True) + def test_mlab_linkage_conversion_multiple_rows(self, xp): + # Tests from/to_mlab_linkage on linkage array with multiple rows. + Zm = xp.asarray([[3, 6, 138], [4, 5, 219], + [1, 8, 255], [2, 9, 268], [7, 10, 295]]) + Z = xp.asarray([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]], + dtype=xp.float64) + xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + +@skip_xp_backends(cpu_only=True) +class TestFcluster: + + def test_fclusterdata(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fclusterdata(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fclusterdata(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fclusterdata(t, 'maxclust', xp) + + def check_fclusterdata(self, t, criterion, xp): + # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + X = xp.asarray(hierarchy_test_data.Q_X) + T = fclusterdata(X, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fcluster(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster(t, 'maxclust', xp) + + def check_fcluster(self, t, criterion, xp): + # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_monocrit(self, xp): + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster_monocrit(t, xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster_maxclust_monocrit(t, xp) + + def check_fcluster_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def check_fcluster_maxclust_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_maxclust_gh_12651(self, xp): + y = xp.asarray([[1], [4], [5]]) + Z = single(y) + assert_array_equal(fcluster(Z, t=1, criterion="maxclust"), + xp.asarray([1, 1, 1])) + assert_array_equal(fcluster(Z, t=2, criterion="maxclust"), + xp.asarray([2, 1, 1])) + assert_array_equal(fcluster(Z, t=3, criterion="maxclust"), + xp.asarray([1, 2, 3])) + assert_array_equal(fcluster(Z, t=5, criterion="maxclust"), + xp.asarray([1, 2, 3])) + + +@skip_xp_backends(cpu_only=True) +class TestLeaders: + + def test_leaders_single(self, xp): + # Tests leaders using a flat clustering generated by single linkage. + X = hierarchy_test_data.Q_X + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + T = fcluster(Z, criterion='maxclust', t=3) + Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1])) + T = xp.asarray(T, dtype=xp.int32) + L = leaders(Z, T) + assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15) + + +@skip_xp_backends(np_only=True, + reason='`is_isomorphic` only supports NumPy backend') +class TestIsIsomorphic: + + @skip_xp_backends(np_only=True, + reason='array-likes only supported for NumPy backend') + def test_array_like(self, xp): + assert is_isomorphic([1, 1, 1], [2, 2, 2]) + assert is_isomorphic([], []) + + def test_is_isomorphic_1(self, xp): + # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) + a = xp.asarray([1, 1, 1]) + b = xp.asarray([2, 2, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_2(self, xp): + # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) + a = xp.asarray([1, 7, 1]) + b = xp.asarray([2, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_3(self, xp): + # Tests is_isomorphic on test case #3 (no flat clusters) + a = xp.asarray([]) + b = xp.asarray([]) + assert is_isomorphic(a, b) + + def test_is_isomorphic_4A(self, xp): + # Tests is_isomorphic on test case #4A + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_4B(self, xp): + # Tests is_isomorphic on test case #4B + # (3 flat clusters, different labelings, nonisomorphic) + a = xp.asarray([1, 2, 3, 3]) + b = xp.asarray([1, 3, 2, 3]) + assert is_isomorphic(a, b) is False + assert is_isomorphic(b, a) is False + + def test_is_isomorphic_4C(self, xp): + # Tests is_isomorphic on test case #4C + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([7, 2, 3]) + b = xp.asarray([6, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_5(self, xp): + # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling). + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, xp=xp) + + def test_is_isomorphic_6(self, xp): + # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling, slightly + # nonisomorphic.) + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp) + + def test_is_isomorphic_7(self, xp): + # Regression test for gh-6271 + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 1, 1]) + assert not is_isomorphic(a, b) + + def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0, + *, xp): + for k in range(3): + a = (np.random.rand(nobs) * nclusters).astype(int) + b = np.zeros(a.size, dtype=int) + P = np.random.permutation(nclusters) + for i in range(0, a.shape[0]): + b[i] = P[a[i]] + if noniso: + Q = np.random.permutation(nobs) + b[Q[0:nerrors]] += 1 + b[Q[0:nerrors]] %= nclusters + a = xp.asarray(a) + b = xp.asarray(b) + assert is_isomorphic(a, b) == (not noniso) + assert is_isomorphic(b, a) == (not noniso) + + +@skip_xp_backends(cpu_only=True) +class TestIsValidLinkage: + + def test_is_valid_linkage_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp) + + def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_linkage(Z) with linkage matrices of various sizes + Z = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + Z = Z[:nrow, :ncol] + assert_(is_valid_linkage(Z) == valid) + if not valid: + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_int_type(self, xp): + # Tests is_valid_linkage(Z) with integer type. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_linkage(Z) is False) + assert_raises(TypeError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_empty(self, xp): + # Tests is_valid_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(is_valid_linkage(Z) is True) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_is_valid_linkage_4_and_up_neg_index_left(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (left). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,0] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_is_valid_linkage_4_and_up_neg_index_right(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (right). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,1] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_is_valid_linkage_4_and_up_neg_dist(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative distances. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,2] = -0.5 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_is_valid_linkage_4_and_up_neg_counts(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,3] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + +@skip_xp_backends(cpu_only=True) +class TestIsValidInconsistent: + + def test_is_valid_im_int_type(self, xp): + # Tests is_valid_im(R) with integer type. + R = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_im(R) is False) + assert_raises(TypeError, is_valid_im, R, throw=True) + + def test_is_valid_im_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_im_various_size(nrow, ncol, valid, xp) + + def check_is_valid_im_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_im(R) with linkage matrices of various sizes + R = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + R = R[:nrow, :ncol] + assert_(is_valid_im(R) == valid) + if not valid: + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_empty(self, xp): + # Tests is_valid_im(R) with empty inconsistency matrix. + R = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + assert_(is_valid_im(R) is True) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_is_valid_im_4_and_up_neg_index_left(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height means. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,0] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_is_valid_im_4_and_up_neg_index_right(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height standard deviations. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,1] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_is_valid_im_4_and_up_neg_dist(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,2] = -0.5 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + +class TestNumObsLinkage: + + @skip_xp_backends(cpu_only=True) + def test_num_obs_linkage_empty(self, xp): + # Tests num_obs_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, num_obs_linkage, Z) + + def test_num_obs_linkage_1x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 2 observations. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 2) + + def test_num_obs_linkage_2x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 3 observations. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 3) + + @skip_xp_backends(cpu_only=True) + def test_num_obs_linkage_4_and_up(self, xp): + # Tests num_obs_linkage(Z) on linkage on observation sets between sizes + # 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_equal(num_obs_linkage(Z), i) + + +@skip_xp_backends(cpu_only=True) +class TestLeavesList: + + def test_leaves_list_1x4(self, xp): + # Tests leaves_list(Z) on a 1x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15) + + def test_leaves_list_2x4(self, xp): + # Tests leaves_list(Z) on a 2x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15) + + def test_leaves_list_Q(self, xp): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', + 'median', 'ward']: + self.check_leaves_list_Q(method, xp) + + def check_leaves_list_Q(self, method, xp): + # Tests leaves_list(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + node = to_tree(Z) + assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15) + + def test_Q_subtree_pre_order(self, xp): + # Tests that pre_order() works when called on sub-trees. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + node = to_tree(Z) + assert_allclose(node.pre_order(), (node.get_left().pre_order() + + node.get_right().pre_order()), + rtol=1e-15) + + +@skip_xp_backends(cpu_only=True) +class TestCorrespond: + + def test_correspond_empty(self, xp): + # Tests correspond(Z, y) with empty linkage and condensed distance matrix. + y = xp.zeros((0,), dtype=xp.float64) + Z = xp.zeros((0,4), dtype=xp.float64) + assert_raises(ValueError, correspond, Z, y) + + def test_correspond_2_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. + for i in range(2, 4): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + + def test_correspond_4_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + + list(zip(list(range(3, 5)), list(range(2, 4))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_correspond_4_and_up_2(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + + list(zip(list(range(2, 7)), list(range(16, 21))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_num_obs_linkage_multi_matrix(self, xp): + # Tests num_obs_linkage with observation matrices of multiple sizes. + for n in range(2, 10): + X = np.random.rand(n, 4) + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + assert_equal(num_obs_linkage(Z), n) + + +@skip_xp_backends(cpu_only=True) +class TestIsMonotonic: + + def test_is_monotonic_empty(self, xp): + # Tests is_monotonic(Z) on an empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, is_monotonic, Z) + + def test_is_monotonic_1x4(self, xp): + # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_T(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 3]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_F(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. + Z = xp.asarray([[0, 1, 0.4, 2], + [2, 3, 0.3, 3]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_T(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_3x4_F1(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.2, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F2(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. + Z = xp.asarray([[0, 1, 0.8, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F3(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.2, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_tdist_linkage1(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Expecting True. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert is_monotonic(Z) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_is_monotonic_tdist_linkage2(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Perturbing. Expecting False. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + Z[2,2] = 0.0 + assert not is_monotonic(Z) + + def test_is_monotonic_Q_linkage(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # Q data set. Expecting True. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + assert is_monotonic(Z) + + +@skip_xp_backends(cpu_only=True) +class TestMaxDists: + + def test_maxdists_empty_linkage(self, xp): + # Tests maxdists(Z) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxdists, Z) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_maxdists_one_cluster_linkage(self, xp): + # Tests maxdists(Z) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment') + def test_maxdists_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxdists_Q_linkage(method, xp) + + def check_maxdists_Q_linkage(self, method, xp): + # Tests maxdists(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxInconsts: + + @skip_xp_backends(cpu_only=True) + def test_maxinconsts_empty_linkage(self, xp): + # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_difrow_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxinconsts, Z, R) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment', + cpu_only=True) + def test_maxinconsts_one_cluster_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment', + cpu_only=True) + def test_maxinconsts_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxinconsts_Q_linkage(method, xp) + + def check_maxinconsts_Q_linkage(self, method, xp): + # Tests maxinconsts(Z, R) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxRStat: + + def test_maxRstat_invalid_index(self, xp): + for i in [3.3, -1, 4]: + self.check_maxRstat_invalid_index(i, xp) + + def check_maxRstat_invalid_index(self, i, xp): + # Tests maxRstat(Z, R, i). Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + if isinstance(i, int): + assert_raises(ValueError, maxRstat, Z, R, i) + else: + assert_raises(TypeError, maxRstat, Z, R, i) + + @skip_xp_backends(cpu_only=True) + def test_maxRstat_empty_linkage(self, xp): + for i in range(4): + self.check_maxRstat_empty_linkage(i, xp) + + def check_maxRstat_empty_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_difrow_linkage(self, xp): + for i in range(4): + self.check_maxRstat_difrow_linkage(i, xp) + + def check_maxRstat_difrow_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxRstat, Z, R, i) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment', + cpu_only=True) + def test_maxRstat_one_cluster_linkage(self, xp): + for i in range(4): + self.check_maxRstat_one_cluster_linkage(i, xp) + + def check_maxRstat_one_cluster_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_xp_backends('jax.numpy', reason='jax arrays do not support item assignment', + cpu_only=True) + def test_maxRstat_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + for i in range(4): + self.check_maxRstat_Q_linkage(method, i, xp) + + def check_maxRstat_Q_linkage(self, method, i, xp): + # Tests maxRstat(Z, R, i) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +@skip_xp_backends(cpu_only=True) +class TestDendrogram: + + def test_dendrogram_single_linkage_tdist(self, xp): + # Tests dendrogram calculation on single linkage of the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + R = dendrogram(Z, no_plot=True) + leaves = R["leaves"] + assert_equal(leaves, [2, 5, 1, 0, 3, 4]) + + def test_valid_orientation(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert_raises(ValueError, dendrogram, Z, orientation="foo") + + def test_labels_as_array_or_list(self, xp): + # test for gh-12418 + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + labels = [1, 3, 2, 6, 4, 5] + result1 = dendrogram(Z, labels=xp.asarray(labels), no_plot=True) + result2 = dendrogram(Z, labels=labels, no_plot=True) + assert result1 == result2 + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_valid_label_size(self, xp): + link = xp.asarray([ + [0, 1, 1.0, 4], + [2, 3, 1.0, 5], + [4, 5, 2.0, 6], + ]) + plt.figure() + with pytest.raises(ValueError) as exc_info: + dendrogram(link, labels=list(range(100))) + assert "Dimensions of Z and labels must be consistent."\ + in str(exc_info.value) + + with pytest.raises( + ValueError, + match="Dimensions of Z and labels must be consistent."): + dendrogram(link, labels=[]) + + plt.close() + + @skip_xp_backends('torch', + reason='MPL 3.9.2 & torch DeprecationWarning from __array_wrap__' + ' and NumPy 2.0' + ) + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_plot(self, xp): + for orientation in ['top', 'bottom', 'left', 'right']: + self.check_dendrogram_plot(orientation, xp) + + def check_dendrogram_plot(self, orientation, xp): + # Tests dendrogram plotting. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 219.0, 219.0, 0.0], + [0.0, 255.0, 255.0, 219.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [45.0, 45.0, 55.0, 55.0], + [35.0, 35.0, 50.0, 50.0], + [25.0, 25.0, 42.5, 42.5], + [10.0, 10.0, 33.75, 33.75]], + 'ivl': ['2', '5', '1', '0', '3', '4'], + 'leaves': [2, 5, 1, 0, 3, 4], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'], + } + + fig = plt.figure() + ax = fig.add_subplot(221) + + # test that dendrogram accepts ax keyword + R1 = dendrogram(Z, ax=ax, orientation=orientation) + R1['dcoord'] = np.asarray(R1['dcoord']) + assert_equal(R1, expected) + + # test that dendrogram accepts and handle the leaf_font_size and + # leaf_rotation keywords + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20, leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + assert_equal(testlabel.get_size(), 20) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_size(), 20) + plt.close() + + # test plotting to gca (will import pylab) + R2 = dendrogram(Z, orientation=orientation) + plt.close() + R2['dcoord'] = np.asarray(R2['dcoord']) + assert_equal(R2, expected) + + @skip_xp_backends('torch', + reason='MPL 3.9.2 & torch DeprecationWarning from __array_wrap__' + ' and NumPy 2.0' + ) + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_truncate_mode(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + R = dendrogram(Z, 2, 'lastp', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C0'], + 'dcoord': [[0.0, 295.0, 295.0, 0.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0]], + 'ivl': ['(2)', '(4)'], + 'leaves': [6, 9], + 'leaves_color_list': ['C0', 'C0'], + }) + + R = dendrogram(Z, 2, 'mtica', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 255.0, 255.0, 0.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [35.0, 35.0, 45.0, 45.0], + [25.0, 25.0, 40.0, 40.0], + [10.0, 10.0, 32.5, 32.5]], + 'ivl': ['2', '5', '1', '0', '(2)'], + 'leaves': [2, 5, 1, 0, 7], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'], + }) + + @pytest.fixture + def dendrogram_lock(self): + return Lock() + + def test_dendrogram_colors(self, xp, dendrogram_lock): + # Tests dendrogram plots with alternate colors + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + with dendrogram_lock: + # Global color palette might be changed concurrently + set_link_color_palette(['c', 'm', 'y', 'k']) + R = dendrogram(Z, no_plot=True, + above_threshold_color='g', color_threshold=250) + set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) + + color_list = R['color_list'] + assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) + + # reset color palette (global list) + set_link_color_palette(None) + + def test_dendrogram_leaf_colors_zero_dist(self, xp): + # tests that the colors of leafs are correct for tree + # with two identical points + x = xp.asarray([[1, 0, 0], + [0, 0, 1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + def test_dendrogram_leaf_colors(self, xp): + # tests that the colors are correct for a tree + # with two near points ((0, 0, 1.1) and (0, 0, 1)) + x = xp.asarray([[1, 0, 0], + [0, 0, 1.1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + +def calculate_maximum_distances(Z, xp): + # Used for testing correctness of maxdists. + n = Z.shape[0] + 1 + B = xp.zeros((n-1,), dtype=Z.dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = Z[i, 2] + B[i] = xp.max(q) + return B + + +def calculate_maximum_inconsistencies(Z, R, k=3, xp=np): + # Used for testing correctness of maxinconsts. + n = Z.shape[0] + 1 + dtype = xp.result_type(Z, R) + B = xp.zeros((n-1,), dtype=dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = R[i, k] + B[i] = xp.max(q) + return B + + +@pytest.mark.thread_unsafe +@skip_xp_backends(cpu_only=True) +def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp): + assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]])) + + +def test_euclidean_linkage_value_error(xp): + for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: + assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]), + method=method, metric='cityblock') + + +@skip_xp_backends(cpu_only=True) +def test_2x2_linkage(xp): + Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean') + Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean') + xp_assert_close(Z1, Z2, rtol=1e-15) + + +@skip_xp_backends(cpu_only=True) +def test_node_compare(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + tree = to_tree(Z) + assert_(tree > tree.get_left()) + assert_(tree.get_right() > tree.get_left()) + assert_(tree.get_right() == tree.get_right()) + assert_(tree.get_right() != tree.get_left()) + + +@skip_xp_backends(np_only=True, reason='`cut_tree` uses non-standard indexing') +def test_cut_tree(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + cutree = cut_tree(Z) + + # cutree.dtype varies between int32 and int64 over platforms + xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False) + xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False) + assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1)) + + xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15) + xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15) + + nodes = _order_cluster_tree(Z) + heights = xp.asarray([node.dist for node in nodes]) + + xp_assert_close(cutree[:, np.searchsorted(heights, [5])], + cut_tree(Z, height=5), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])], + cut_tree(Z, height=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])], + cut_tree(Z, height=[10, 5]), rtol=1e-15) + + +@skip_xp_backends(cpu_only=True) +def test_optimal_leaf_ordering(xp): + # test with the distance vector y + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)), + xp.asarray(hierarchy_test_data.ytdist)) + expectedZ = hierarchy_test_data.linkage_ytdist_single_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + # test with the observation matrix X + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'), + xp.asarray(hierarchy_test_data.X)) + expectedZ = hierarchy_test_data.linkage_X_ward_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_xp_backends(np_only=True, reason='`Heap` only supports NumPy backend') +def test_Heap(xp): + values = xp.asarray([2, -1, 0, -1.5, 3]) + heap = Heap(values) + + pair = heap.get_min() + assert_equal(pair['key'], 3) + assert_equal(pair['value'], -1.5) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], -1) + + heap.change_value(1, 2.5) + pair = heap.get_min() + assert_equal(pair['key'], 2) + assert_equal(pair['value'], 0) + + heap.remove_min() + heap.remove_min() + + heap.change_value(1, 10) + pair = heap.get_min() + assert_equal(pair['key'], 4) + assert_equal(pair['value'], 3) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], 10) + + +@skip_xp_backends(cpu_only=True) +def test_centroid_neg_distance(xp): + # gh-21011 + values = xp.asarray([0, 0, -1]) + with pytest.raises(ValueError): + # This is just checking that this doesn't crash + linkage(values, method='centroid') diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py new file mode 100644 index 0000000000000000000000000000000000000000..d0321e7d81d79472ffc773baeee806aad14212fc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py @@ -0,0 +1,450 @@ +import warnings +import sys +from copy import deepcopy +from threading import Lock + +import numpy as np +from numpy.testing import ( + assert_array_equal, assert_equal, assert_, suppress_warnings +) +import pytest +from pytest import raises as assert_raises + +from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten, + ClusterError, _krandinit) +from scipy.cluster import _vq +from scipy.conftest import array_api_compatible +from scipy.sparse._sputils import matrix + +from scipy._lib import array_api_extra as xpx +from scipy._lib._array_api import ( + SCIPY_ARRAY_API, array_namespace, xp_copy, xp_assert_close, xp_assert_equal +) + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] +skip_xp_backends = pytest.mark.skip_xp_backends + +TESTDATA_2D = np.array([ + -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, + -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, + 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, + -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, + -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, + -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, + 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, + -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, + -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, + -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, + 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, + -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, + 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, + -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, + 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, + -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, + 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, + 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, + -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, + 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, + -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, + -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, + -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, + 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, + -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, + 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, + 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, + -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, + 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, + 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, + -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, + -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, + 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, + -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, + -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, + -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, + 2.11]).reshape((200, 2)) + + +# Global data +X = np.array([[3.0, 3], [4, 3], [4, 2], + [9, 2], [5, 1], [6, 2], [9, 4], + [5, 2], [5, 4], [7, 4], [6, 5]]) + +CODET1 = np.array([[3.0000, 3.0000], + [6.2000, 4.0000], + [5.8000, 1.8000]]) + +CODET2 = np.array([[11.0/3, 8.0/3], + [6.7500, 4.2500], + [6.2500, 1.7500]]) + +LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) + + +class TestWhiten: + + def test_whiten(self, xp): + desired = xp.asarray([[5.08738849, 2.97091878], + [3.19909255, 0.69660580], + [4.51041982, 0.02640918], + [4.38567074, 0.95120889], + [2.32191480, 1.63195503]]) + + obs = xp.asarray([[0.98744510, 0.82766775], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + @pytest.fixture + def whiten_lock(self): + return Lock() + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_whiten_zero_std(self, xp, whiten_lock): + desired = xp.asarray([[0., 1.0, 2.86666544], + [0., 1.0, 1.32460034], + [0., 1.0, 3.74382172]]) + + obs = xp.asarray([[0., 1., 0.74109533], + [0., 1., 0.34243798], + [0., 1., 0.96785929]]) + + with whiten_lock: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + assert_equal(len(w), 1) + assert_(issubclass(w[-1].category, RuntimeWarning)) + + def test_whiten_not_finite(self, xp): + for bad_value in xp.nan, xp.inf, -xp.inf: + obs = xp.asarray([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_whiten_not_finite_matrix(self, xp): + for bad_value in np.nan, np.inf, -np.inf: + obs = matrix([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + +class TestVq: + + @skip_xp_backends(cpu_only=True) + def test_py_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0] + xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64), + check_dtype=False) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_py_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(matrix(X), matrix(initc))[0] + assert_array_equal(label1, LABEL1) + + @skip_xp_backends(np_only=True, reason='`_vq` only supports NumPy backend') + def test_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(xp.asarray(X), xp.asarray(initc)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(matrix(X), matrix(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(matrix(X), matrix(initc)) + + @skip_xp_backends(cpu_only=True) + def test_vq_1d(self, xp): + # Test special rank 1 vq algo, python implementation. + data = X[:, 0] + initc = data[:3] + a, b = _vq.vq(data, initc) + data = xp.asarray(data) + initc = xp.asarray(initc) + ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) + # ta.dtype varies between int32 and int64 over platforms + xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False) + xp_assert_equal(tb, xp.asarray(b)) + + @skip_xp_backends(np_only=True, reason='`_vq` only supports NumPy backend') + def test__vq_sametype(self, xp): + a = xp.asarray([1.0, 2.0], dtype=xp.float64) + b = a.astype(xp.float32) + assert_raises(TypeError, _vq.vq, a, b) + + @skip_xp_backends(np_only=True, reason='`_vq` only supports NumPy backend') + def test__vq_invalid_type(self, xp): + a = xp.asarray([1, 2], dtype=int) + assert_raises(TypeError, _vq.vq, a, a) + + @skip_xp_backends(cpu_only=True) + def test_vq_large_nfeat(self, xp): + X = np.random.rand(20, 20) + code_book = np.random.rand(3, 20) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + X = X.astype(np.float32) + code_book = code_book.astype(np.float32) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + @skip_xp_backends(cpu_only=True) + def test_vq_large_features(self, xp): + X = np.random.rand(10, 5) * 1000000 + code_book = np.random.rand(2, 5) * 1000000 + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + +# Whole class skipped on GPU for now; +# once pdist/cdist are hooked up for CuPy, more tests will work +@skip_xp_backends(cpu_only=True) +class TestKMean: + + def test_large_features(self, xp): + # Generate a data set with large values, and run kmeans on it to + # (regression for 1077). + d = 300 + n = 100 + + m1 = np.random.randn(d) + m2 = np.random.randn(d) + x = 10000 * np.random.randn(n, d) - 20000 * m1 + y = 10000 * np.random.randn(n, d) + 20000 * m2 + + data = np.empty((x.shape[0] + y.shape[0], d), np.float64) + data[:x.shape[0]] = x + data[x.shape[0]:] = y + + # use `seed` to ensure backwards compatibility after SPEC7 + kmeans(xp.asarray(data), 2, seed=1) + + def test_kmeans_simple(self, xp): + rng = np.random.default_rng(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1, rng=rng)[0] + xp_assert_close(code1, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans_simple_matrix(self, xp): + rng = np.random.default_rng(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(matrix(X), matrix(initc), iter=1, rng=rng)[0] + xp_assert_close(code1, CODET2) + + def test_kmeans_lost_cluster(self, xp): + # This will cause kmeans to have a cluster with no points. + data = xp.asarray(TESTDATA_2D) + initk = xp.asarray([[-1.8127404, -0.67128041], + [2.04621601, 0.07401111], + [-2.31149087, -0.05160469]]) + + kmeans(data, initk) + with suppress_warnings() as sup: + sup.filter(UserWarning, + "One of the clusters is empty. Re-run kmeans with a " + "different initialization") + kmeans2(data, initk, missing='warn') + + assert_raises(ClusterError, kmeans2, data, initk, missing='raise') + + def test_kmeans2_simple(self, xp): + rng = np.random.default_rng(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix] + for tp in arrays: + code1 = kmeans2(tp(X), tp(initc), iter=1, rng=rng)[0] + code2 = kmeans2(tp(X), tp(initc), iter=2, rng=rng)[0] + + xp_assert_close(code1, xp.asarray(CODET1)) + xp_assert_close(code2, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans2_simple_matrix(self, xp): + rng = np.random.default_rng(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + code1 = kmeans2(matrix(X), matrix(initc), iter=1, rng=rng)[0] + code2 = kmeans2(matrix(X), matrix(initc), iter=2, rng=rng)[0] + + xp_assert_close(code1, CODET1) + xp_assert_close(code2, CODET2) + + def test_kmeans2_rank1(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + + initc = data1[:3] + code = xp_copy(initc, xp=xp) + + # use `seed` to ensure backwards compatibility after SPEC7 + kmeans2(data1, code, iter=1, seed=1)[0] + kmeans2(data1, code, iter=2)[0] + + def test_kmeans2_rank1_2(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + kmeans2(data1, 2, iter=1) + + def test_kmeans2_high_dim(self, xp): + # test kmeans2 when the number of dimensions exceeds the number + # of input points + data = xp.asarray(TESTDATA_2D) + data = xp.reshape(data, (20, 20))[:10, :] + kmeans2(data, 2) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_kmeans2_init(self, xp): + rng = np.random.default_rng(12345678) + data = xp.asarray(TESTDATA_2D) + k = 3 + + kmeans2(data, k, minit='points', rng=rng) + kmeans2(data[:, 1], k, minit='points', rng=rng) # special case (1-D) + + kmeans2(data, k, minit='++', rng=rng) + kmeans2(data[:, 1], k, minit='++', rng=rng) # special case (1-D) + + # minit='random' can give warnings, filter those + with suppress_warnings() as sup: + sup.filter(message="One of the clusters is empty. Re-run.") + kmeans2(data, k, minit='random', rng=rng) + kmeans2(data[:, 1], k, minit='random', rng=rng) # special case (1-D) + + @pytest.fixture + def krand_lock(self): + return Lock() + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MemoryError in Wine.') + def test_krandinit(self, xp, krand_lock): + data = xp.asarray(TESTDATA_2D) + datas = [xp.reshape(data, (200, 2)), + xp.reshape(data, (20, 20))[:10, :]] + k = int(1e6) + xp_test = array_namespace(data) + with krand_lock: + for data in datas: + rng = np.random.default_rng(1234) + init = _krandinit(data, k, rng, xp_test) + orig_cov = xpx.cov(data.T, xp=xp_test) + init_cov = xpx.cov(init.T, xp=xp_test) + xp_assert_close(orig_cov, init_cov, atol=1.1e-2) + + def test_kmeans2_empty(self, xp): + # Regression test for gh-1032. + assert_raises(ValueError, kmeans2, xp.asarray([]), 2) + + def test_kmeans_0k(self, xp): + # Regression test for gh-1073: fail when k arg is 0. + assert_raises(ValueError, kmeans, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([])) + + def test_kmeans_large_thres(self, xp): + # Regression test for gh-1774 + x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64) + res = kmeans(x, 1, thresh=1e16) + xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()]) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_kmeans2_kpp_low_dim(self, xp): + # Regression test for gh-11462 + rng = np.random.default_rng(2358792345678234568) + prev_res = xp.asarray([[-1.95266667, 0.898], + [-3.153375, 3.3945]], dtype=xp.float64) + res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++', rng=rng) + xp_assert_close(res, prev_res) + + @pytest.mark.thread_unsafe + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_kmeans2_kpp_high_dim(self, xp): + # Regression test for gh-11462 + rng = np.random.default_rng(23587923456834568) + n_dim = 100 + size = 10 + centers = np.vstack([5 * np.ones(n_dim), + -5 * np.ones(n_dim)]) + + data = np.vstack([ + rng.multivariate_normal(centers[0], np.eye(n_dim), size=size), + rng.multivariate_normal(centers[1], np.eye(n_dim), size=size) + ]) + + data = xp.asarray(data) + res, _ = kmeans2(data, 2, minit='++', rng=rng) + xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers))) + + def test_kmeans_diff_convergence(self, xp): + # Regression test for gh-8727 + obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64) + res = kmeans(obs, xp.asarray([-3., 0.99])) + xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()]) + + @skip_xp_backends('jax.numpy', + reason='jax arrays do not support item assignment') + def test_kmeans_and_kmeans2_random_seed(self, xp): + + seed_list = [ + 1234, np.random.RandomState(1234), np.random.default_rng(1234) + ] + + for seed in seed_list: + seed1 = deepcopy(seed) + seed2 = deepcopy(seed) + data = xp.asarray(TESTDATA_2D) + # test for kmeans + res1, _ = kmeans(data, 2, seed=seed1) + res2, _ = kmeans(data, 2, seed=seed2) + xp_assert_close(res1, res2) # should be same results + # test for kmeans2 + for minit in ["random", "points", "++"]: + res1, _ = kmeans2(data, 2, minit=minit, seed=seed1) + res2, _ = kmeans2(data, 2, minit=minit, seed=seed2) + xp_assert_close(res1, res2) # should be same results diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/vq.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/vq.py new file mode 100644 index 0000000000000000000000000000000000000000..a791e2956070d0785556dd9c4f877004ae50cd9f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/cluster/vq.py @@ -0,0 +1,828 @@ +""" +K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) +==================================================================== + +Provides routines for k-means clustering, generating code books +from k-means models and quantizing vectors by comparing them with +centroids in a code book. + +.. autosummary:: + :toctree: generated/ + + whiten -- Normalize a group of observations so each feature has unit variance + vq -- Calculate code book membership of a set of observation vectors + kmeans -- Perform k-means on a set of observation vectors forming k clusters + kmeans2 -- A different implementation of k-means with more methods + -- for initializing centroids + +Background information +---------------------- +The k-means algorithm takes as input the number of clusters to +generate, k, and a set of observation vectors to cluster. It +returns a set of centroids, one for each of the k clusters. An +observation vector is classified with the cluster number or +centroid index of the centroid closest to it. + +A vector v belongs to cluster i if it is closer to centroid i than +any other centroid. If v belongs to i, we say centroid i is the +dominating centroid of v. The k-means algorithm tries to +minimize distortion, which is defined as the sum of the squared distances +between each observation vector and its dominating centroid. +The minimization is achieved by iteratively reclassifying +the observations into clusters and recalculating the centroids until +a configuration is reached in which the centroids are stable. One can +also define a maximum number of iterations. + +Since vector quantization is a natural application for k-means, +information theory terminology is often used. The centroid index +or cluster index is also referred to as a "code" and the table +mapping codes to centroids and, vice versa, is often referred to as a +"code book". The result of k-means, a set of centroids, can be +used to quantize vectors. Quantization aims to find an encoding of +vectors that reduces the expected distortion. + +All routines expect obs to be an M by N array, where the rows are +the observation vectors. The codebook is a k by N array, where the +ith row is the centroid of code word i. The observation vectors +and centroids have the same feature dimension. + +As an example, suppose we wish to compress a 24-bit color image +(each pixel is represented by one byte for red, one for blue, and +one for green) before sending it over the web. By using a smaller +8-bit encoding, we can reduce the amount of data by two +thirds. Ideally, the colors for each of the 256 possible 8-bit +encoding values should be chosen to minimize distortion of the +color. Running k-means with k=256 generates a code book of 256 +codes, which fills up all possible 8-bit sequences. Instead of +sending a 3-byte value for each pixel, the 8-bit centroid index +(or code word) of the dominating centroid is transmitted. The code +book is also sent over the wire so each 8-bit code can be +translated back to a 24-bit pixel value representation. If the +image of interest was of an ocean, we would expect many 24-bit +blues to be represented by 8-bit codes. If it was an image of a +human face, more flesh-tone colors would be represented in the +code book. + +""" +import warnings +import numpy as np +from collections import deque +from scipy._lib._array_api import ( + _asarray, array_namespace, xp_size, xp_copy +) +from scipy._lib._util import (check_random_state, rng_integers, + _transition_to_rng) +from scipy._lib import array_api_extra as xpx +from scipy.spatial.distance import cdist + +from . import _vq + +__docformat__ = 'restructuredtext' + +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] + + +class ClusterError(Exception): + pass + + +def whiten(obs, check_finite=True): + """ + Normalize a group of observations on a per feature basis. + + Before running k-means, it is beneficial to rescale each feature + dimension of the observation set by its standard deviation (i.e. "whiten" + it - as in "white noise" where each frequency has equal power). + Each feature is divided by its standard deviation across all observations + to give it unit variance. + + Parameters + ---------- + obs : ndarray + Each row of the array is an observation. The + columns are the features seen during each observation. + + >>> # f0 f1 f2 + >>> obs = [[ 1., 1., 1.], #o0 + ... [ 2., 2., 2.], #o1 + ... [ 3., 3., 3.], #o2 + ... [ 4., 4., 4.]] #o3 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + result : ndarray + Contains the values in `obs` scaled by the standard deviation + of each column. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import whiten + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7,]]) + >>> whiten(features) + array([[ 4.17944278, 2.69811351, 7.21248917], + [ 3.29956009, 2.93273208, 9.33380951], + [ 1.75976538, 0.7038557 , 7.21248917]]) + + """ + xp = array_namespace(obs) + obs = _asarray(obs, check_finite=check_finite, xp=xp) + std_dev = xp.std(obs, axis=0) + zero_std_mask = std_dev == 0 + if xp.any(zero_std_mask): + std_dev[zero_std_mask] = 1.0 + warnings.warn("Some columns have standard deviation zero. " + "The values of these columns will not change.", + RuntimeWarning, stacklevel=2) + return obs / std_dev + + +def vq(obs, code_book, check_finite=True): + """ + Assign codes from a code book to observations. + + Assigns a code from a code book to each observation. Each + observation vector in the 'M' by 'N' `obs` array is compared with the + centroids in the code book and assigned the code of the closest + centroid. + + The features in `obs` should have unit variance, which can be + achieved by passing them through the whiten function. The code + book can be created with the k-means algorithm or a different + encoding algorithm. + + Parameters + ---------- + obs : ndarray + Each row of the 'M' x 'N' array is an observation. The columns are + the "features" seen during each observation. The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray + The code book is usually generated using the k-means algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + + >>> # f0 f1 f2 f3 + >>> code_book = [ + ... [ 1., 2., 3., 4.], #c0 + ... [ 1., 2., 3., 4.], #c1 + ... [ 1., 2., 3., 4.]] #c2 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + A length M array holding the code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq + >>> code_book = np.array([[1., 1., 1.], + ... [2., 2., 2.]]) + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7]]) + >>> vq(features, code_book) + (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239])) + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + ct = xp.result_type(obs, code_book) + + c_obs = xp.astype(obs, ct, copy=False) + c_code_book = xp.astype(code_book, ct, copy=False) + + if xp.isdtype(ct, kind='real floating'): + c_obs = np.asarray(c_obs) + c_code_book = np.asarray(c_code_book) + result = _vq.vq(c_obs, c_code_book) + return xp.asarray(result[0]), xp.asarray(result[1]) + return py_vq(obs, code_book, check_finite=False) + + +def py_vq(obs, code_book, check_finite=True): + """ Python version of vq algorithm. + + The algorithm computes the Euclidean distance between each + observation and every frame in the code_book. + + Parameters + ---------- + obs : ndarray + Expects a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (e.g., columns) than obs. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + code[i] gives the label of the ith obversation; its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + + Notes + ----- + This function is slower than the C version but works for + all input types. If the inputs have the wrong types for the + C versions of the function, this one is called as a last resort. + + It is about 20 times slower than the C version. + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + + if obs.ndim != code_book.ndim: + raise ValueError("Observation and code_book should have the same rank") + + if obs.ndim == 1: + obs = obs[:, xp.newaxis] + code_book = code_book[:, xp.newaxis] + + # Once `cdist` has array API support, this `xp.asarray` call can be removed + dist = xp.asarray(cdist(obs, code_book)) + code = xp.argmin(dist, axis=1) + min_dist = xp.min(dist, axis=1) + return code, min_dist + + +def _kmeans(obs, guess, thresh=1e-5, xp=None): + """ "raw" version of k-means. + + Returns + ------- + code_book + The lowest distortion codebook found. + avg_dist + The average distance a observation is from a code in the book. + Lower means the code_book matches the data better. + + See Also + -------- + kmeans : wrapper around k-means + + Examples + -------- + Note: not whitened in this example. + + >>> import numpy as np + >>> from scipy.cluster.vq import _kmeans + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = np.array((features[0],features[2])) + >>> _kmeans(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) + + """ + xp = np if xp is None else xp + code_book = guess + diff = xp.inf + prev_avg_dists = deque([diff], maxlen=2) + while diff > thresh: + # compute membership and distances between obs and code_book + obs_code, distort = vq(obs, code_book, check_finite=False) + prev_avg_dists.append(xp.mean(distort, axis=-1)) + # recalc code_book as centroids of associated obs + obs = np.asarray(obs) + obs_code = np.asarray(obs_code) + code_book, has_members = _vq.update_cluster_means(obs, obs_code, + code_book.shape[0]) + obs = xp.asarray(obs) + obs_code = xp.asarray(obs_code) + code_book = xp.asarray(code_book) + has_members = xp.asarray(has_members) + code_book = code_book[has_members] + diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1]) + + return code_book, prev_avg_dists[1] + + +@_transition_to_rng("seed") +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True, + *, rng=None): + """ + Performs k-means on a set of observation vectors forming k clusters. + + The k-means algorithm adjusts the classification of the observations + into clusters and updates the cluster centroids until the position of + the centroids is stable over successive iterations. In this + implementation of the algorithm, the stability of the centroids is + determined by comparing the absolute value of the change in the average + Euclidean distance between the observations and their corresponding + centroids against a threshold. This yields + a code book mapping centroids to codes and vice versa. + + Parameters + ---------- + obs : ndarray + Each row of the M by N array is an observation vector. The + columns are the features seen during each observation. + The features must be whitened first with the `whiten` function. + + k_or_guess : int or ndarray + The number of centroids to generate. A code is assigned to + each centroid, which is also the row index of the centroid + in the code_book matrix generated. + + The initial k centroids are chosen by randomly selecting + observations from the observation matrix. Alternatively, + passing a k by N array specifies the initial k centroids. + + iter : int, optional + The number of times to run k-means, returning the codebook + with the lowest distortion. This argument is ignored if + initial centroids are specified with an array for the + ``k_or_guess`` parameter. This parameter does not represent the + number of iterations of the k-means algorithm. + + thresh : float, optional + Terminates the k-means algorithm if the change in + distortion since the last k-means iteration is less than + or equal to threshold. + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + + Returns + ------- + codebook : ndarray + A k by N array of k centroids. The ith centroid + codebook[i] is represented with the code i. The centroids + and codes generated represent the lowest distortion seen, + not necessarily the globally minimal distortion. + Note that the number of centroids is not necessarily the same as the + ``k_or_guess`` parameter, because centroids assigned to no observations + are removed during iterations. + + distortion : float + The mean (non-squared) Euclidean distance between the observations + passed and the centroids generated. Note the difference to the standard + definition of distortion in the context of the k-means algorithm, which + is the sum of the squared distances. + + See Also + -------- + kmeans2 : a different implementation of k-means clustering + with more methods for generating initial centroids but without + using a distortion change threshold as a stopping criterion. + + whiten : must be called prior to passing an observation matrix + to kmeans. + + Notes + ----- + For more functionalities or optimal performance, you can use + `sklearn.cluster.KMeans `_. + `This `_ + is a benchmark result of several implementations. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq, kmeans, whiten + >>> import matplotlib.pyplot as plt + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = np.array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], # random + [ 0.93218041, 1.24398691]]), 0.85684700941625547) + + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], # random + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) + + >>> # Create 50 datapoints in two clusters a and b + >>> pts = 50 + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) + >>> b = rng.multivariate_normal([30, 10], + ... [[10, 2], [2, 1]], + ... size=pts) + >>> features = np.concatenate((a, b)) + >>> # Whiten data + >>> whitened = whiten(features) + >>> # Find 2 clusters in the data + >>> codebook, distortion = kmeans(whitened, 2) + >>> # Plot whitened data and cluster centers in red + >>> plt.scatter(whitened[:, 0], whitened[:, 1]) + >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') + >>> plt.show() + + """ + if isinstance(k_or_guess, int): + xp = array_namespace(obs) + else: + xp = array_namespace(obs, k_or_guess) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite) + if iter < 1: + raise ValueError(f"iter must be at least 1, got {iter}") + + # Determine whether a count (scalar) or an initial guess (array) was passed. + if xp_size(guess) != 1: + if xp_size(guess) < 1: + raise ValueError(f"Asked for 0 clusters. Initial book was {guess}") + return _kmeans(obs, guess, thresh=thresh, xp=xp) + + # k_or_guess is a scalar, now verify that it's an integer + k = int(guess) + if k != guess: + raise ValueError("If k_or_guess is a scalar, it must be an integer.") + if k < 1: + raise ValueError("Asked for %d clusters." % k) + + rng = check_random_state(rng) + + # initialize best distance value to a large value + best_dist = xp.inf + for i in range(iter): + # the initial code book is randomly selected from observations + guess = _kpoints(obs, k, rng, xp) + book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp) + if dist < best_dist: + best_book = book + best_dist = dist + return best_book, best_dist + + +def _kpoints(data, k, rng, xp): + """Pick k points at random in data (one row = one observation). + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + idx = rng.choice(data.shape[0], size=int(k), replace=False) + # convert to array with default integer dtype (avoids numpy#25607) + idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype) + return xp.take(data, idx, axis=0) + + +def _krandinit(data, k, rng, xp): + """Returns k samples of a random variable whose parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable whose mean and covariances are the ones estimated from the data. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + mu = xp.mean(data, axis=0) + k = np.asarray(k) + + if data.ndim == 1: + _cov = xpx.cov(data, xp=xp) + x = rng.standard_normal(size=k) + x = xp.asarray(x) + x *= xp.sqrt(_cov) + elif data.shape[1] > data.shape[0]: + # initialize when the covariance matrix is rank deficient + _, s, vh = xp.linalg.svd(data - mu, full_matrices=False) + x = rng.standard_normal(size=(k, xp_size(s))) + x = xp.asarray(x) + sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.)) + x = x @ sVh + else: + _cov = xpx.atleast_nd(xpx.cov(data.T, xp=xp), ndim=2, xp=xp) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = rng.standard_normal(size=(k, xp_size(mu))) + x = xp.asarray(x) + x = x @ xp.linalg.cholesky(_cov).T + + x += mu + return x + + +def _kpp(data, k, rng, xp): + """ Picks k points in the data based on the kmeans++ method. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + init : ndarray + A 'k' by 'N' containing the initial centroids. + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + + ndim = len(data.shape) + if ndim == 1: + data = data[:, None] + + dims = data.shape[1] + + init = xp.empty((int(k), dims)) + + for i in range(k): + if i == 0: + init[i, :] = data[rng_integers(rng, data.shape[0]), :] + + else: + D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) + probs = D2/D2.sum() + cumprobs = probs.cumsum() + r = rng.uniform() + cumprobs = np.asarray(cumprobs) + init[i, :] = data[np.searchsorted(cumprobs, r), :] + + if ndim == 1: + init = init[:, 0] + return init + + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} + + +def _missing_warn(): + """Print a warning when called.""" + warnings.warn("One of the clusters is empty. " + "Re-run kmeans with a different initialization.", + stacklevel=3) + + +def _missing_raise(): + """Raise a ClusterError when called.""" + raise ClusterError("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} + + +@_transition_to_rng("seed") +def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', + missing='warn', check_finite=True, *, rng=None): + """ + Classify a set of observations into k clusters using the k-means algorithm. + + The algorithm attempts to minimize the Euclidean distance between + observations and centroids. Several initialization methods are + included. + + Parameters + ---------- + data : ndarray + A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length + 'M' array of 'M' 1-D observations. + k : int or ndarray + The number of clusters to form as well as the number of + centroids to generate. If `minit` initialization string is + 'matrix', or if a ndarray is given instead, it is + interpreted as initial cluster to use instead. + iter : int, optional + Number of iterations of the k-means algorithm to run. Note + that this differs in meaning from the iters parameter to + the kmeans function. + thresh : float, optional + (not used yet) + minit : str, optional + Method for initialization. Available methods are 'random', + 'points', '++' and 'matrix': + + 'random': generate k centroids from a Gaussian with mean and + variance estimated from the data. + + 'points': choose k observations (rows) at random from data for + the initial centroids. + + '++': choose k observations accordingly to the kmeans++ method + (careful seeding) + + 'matrix': interpret the k parameter as a k by M (or length k + array for 1-D data) array of initial centroids. + missing : str, optional + Method to deal with empty clusters. Available methods are + 'warn' and 'raise': + + 'warn': give a warning and continue. + + 'raise': raise an ClusterError and terminate the algorithm. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + + Returns + ------- + centroid : ndarray + A 'k' by 'N' array of centroids found at the last iteration of + k-means. + label : ndarray + label[i] is the code or index of the centroid the + ith observation is closest to. + + See Also + -------- + kmeans + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + + Examples + -------- + >>> from scipy.cluster.vq import kmeans2 + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + Create z, an array with shape (100, 2) containing a mixture of samples + from three multivariate normal distributions. + + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45) + >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30) + >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25) + >>> z = np.concatenate((a, b, c)) + >>> rng.shuffle(z) + + Compute three clusters. + + >>> centroid, label = kmeans2(z, 3, minit='points') + >>> centroid + array([[ 2.22274463, -0.61666946], # may vary + [ 0.54069047, 5.86541444], + [ 6.73846769, 4.01991898]]) + + How many points are in each cluster? + + >>> counts = np.bincount(label) + >>> counts + array([29, 51, 20]) # may vary + + Plot the clusters. + + >>> w0 = z[label == 0] + >>> w1 = z[label == 1] + >>> w2 = z[label == 2] + >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0') + >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1') + >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2') + >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids') + >>> plt.axis('equal') + >>> plt.legend(shadow=True) + >>> plt.show() + + """ + if int(iter) < 1: + raise ValueError(f"Invalid iter ({iter}), must be a positive integer.") + try: + miss_meth = _valid_miss_meth[missing] + except KeyError as e: + raise ValueError(f"Unknown missing method {missing!r}") from e + + if isinstance(k, int): + xp = array_namespace(data) + else: + xp = array_namespace(data, k) + data = _asarray(data, xp=xp, check_finite=check_finite) + code_book = xp_copy(k, xp=xp) + if data.ndim == 1: + d = 1 + elif data.ndim == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 is not supported.") + + if xp_size(data) < 1 or xp_size(code_book) < 1: + raise ValueError("Empty input is not supported.") + + # If k is not a single value, it should be compatible with data's shape + if minit == 'matrix' or xp_size(code_book) > 1: + if data.ndim != code_book.ndim: + raise ValueError("k array doesn't match data rank") + nc = code_book.shape[0] + if data.ndim > 1 and code_book.shape[1] != d: + raise ValueError("k array doesn't match data dimension") + else: + nc = int(code_book) + + if nc < 1: + raise ValueError("Cannot ask kmeans2 for %d clusters" + " (k was %s)" % (nc, code_book)) + elif nc != code_book: + warnings.warn("k was not an integer, was converted.", stacklevel=2) + + try: + init_meth = _valid_init_meth[minit] + except KeyError as e: + raise ValueError(f"Unknown init method {minit!r}") from e + else: + rng = check_random_state(rng) + code_book = init_meth(data, code_book, rng, xp) + + data = np.asarray(data) + code_book = np.asarray(code_book) + for i in range(iter): + # Compute the nearest neighbor for each obs using the current code book + label = vq(data, code_book, check_finite=check_finite)[0] + # Update the code book by computing centroids + new_code_book, has_members = _vq.update_cluster_means(data, label, nc) + if not has_members.all(): + miss_meth() + # Set the empty clusters to their previous positions + new_code_book[~has_members] = code_book[~has_members] + code_book = new_code_book + + return xp.asarray(code_book), xp.asarray(label) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/conftest.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..05129585b8f7041c202f1ae8a1d47d56336b4b67 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/conftest.py @@ -0,0 +1,552 @@ +# Pytest customization +import json +import os +import warnings +import tempfile +from contextlib import contextmanager + +import numpy as np +import numpy.testing as npt +import pytest +import hypothesis + +from scipy._lib._fpumode import get_fpu_mode +from scipy._lib._testutils import FPUModeChangeWarning +from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE +from scipy._lib import _pep440 + +try: + from scipy_doctest.conftest import dt_config + HAVE_SCPDT = True +except ModuleNotFoundError: + HAVE_SCPDT = False + +try: + import pytest_run_parallel # noqa:F401 + PARALLEL_RUN_AVAILABLE = True +except Exception: + PARALLEL_RUN_AVAILABLE = False + + +def pytest_configure(config): + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "xslow: mark test as extremely slow (not run unless explicitly requested)") + config.addinivalue_line("markers", + "xfail_on_32bit: mark test as failing on 32-bit platforms") + try: + import pytest_timeout # noqa:F401 + except Exception: + config.addinivalue_line( + "markers", 'timeout: mark a test for a non-default timeout') + try: + # This is a more reliable test of whether pytest_fail_slow is installed + # When I uninstalled it, `import pytest_fail_slow` didn't fail! + from pytest_fail_slow import parse_duration # type: ignore[import-not-found] # noqa:F401,E501 + except Exception: + config.addinivalue_line( + "markers", 'fail_slow: mark a test for a non-default timeout failure') + config.addinivalue_line("markers", + "skip_xp_backends(backends, reason=None, np_only=False, cpu_only=False, " + "exceptions=None): " + "mark the desired skip configuration for the `skip_xp_backends` fixture.") + config.addinivalue_line("markers", + "xfail_xp_backends(backends, reason=None, np_only=False, cpu_only=False, " + "exceptions=None): " + "mark the desired xfail configuration for the `xfail_xp_backends` fixture.") + if not PARALLEL_RUN_AVAILABLE: + config.addinivalue_line( + 'markers', + 'parallel_threads(n): run the given test function in parallel ' + 'using `n` threads.') + config.addinivalue_line( + "markers", + "thread_unsafe: mark the test function as single-threaded", + ) + config.addinivalue_line( + "markers", + "iterations(n): run the given test function `n` times in each thread", + ) + + +def pytest_runtest_setup(item): + mark = item.get_closest_marker("xslow") + if mark is not None: + try: + v = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + v = False + if not v: + pytest.skip("very slow test; " + "set environment variable SCIPY_XSLOW=1 to run it") + mark = item.get_closest_marker("xfail_on_32bit") + if mark is not None and np.intp(0).itemsize < 8: + pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}') + + # Older versions of threadpoolctl have an issue that may lead to this + # warning being emitted, see gh-14441 + with npt.suppress_warnings() as sup: + sup.filter(pytest.PytestUnraisableExceptionWarning) + + try: + from threadpoolctl import threadpool_limits + + HAS_THREADPOOLCTL = True + except Exception: # observed in gh-14441: (ImportError, AttributeError) + # Optional dependency only. All exceptions are caught, for robustness + HAS_THREADPOOLCTL = False + + if HAS_THREADPOOLCTL: + # Set the number of openmp threads based on the number of workers + # xdist is using to prevent oversubscription. Simplified version of what + # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper + # functions) + try: + xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT']) + except KeyError: + # raises when pytest-xdist is not installed + return + + if not os.getenv('OMP_NUM_THREADS'): + max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores + threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1) + try: + threadpool_limits(threads_per_worker, user_api='blas') + except Exception: + # May raise AttributeError for older versions of OpenBLAS. + # Catch any error for robustness. + return + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + warnings.warn(f"FPU mode changed from {old_mode:#x} to {new_mode:#x} during " + "the test", + category=FPUModeChangeWarning, stacklevel=0) + + +if not PARALLEL_RUN_AVAILABLE: + @pytest.fixture + def num_parallel_threads(): + return 1 + + +# Array API backend handling +xp_available_backends = {'numpy': np} + +if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str): + # fill the dict of backends with available libraries + try: + import array_api_strict + xp_available_backends.update({'array_api_strict': array_api_strict}) + if _pep440.parse(array_api_strict.__version__) < _pep440.Version('2.0'): + raise ImportError("array-api-strict must be >= version 2.0") + array_api_strict.set_array_api_strict_flags( + api_version='2023.12' + ) + except ImportError: + pass + + try: + import torch # type: ignore[import-not-found] + xp_available_backends.update({'torch': torch}) + # can use `mps` or `cpu` + torch.set_default_device(SCIPY_DEVICE) + except ImportError: + pass + + try: + import cupy # type: ignore[import-not-found] + xp_available_backends.update({'cupy': cupy}) + except ImportError: + pass + + try: + import jax.numpy # type: ignore[import-not-found] + xp_available_backends.update({'jax.numpy': jax.numpy}) + jax.config.update("jax_enable_x64", True) + jax.config.update("jax_default_device", jax.devices(SCIPY_DEVICE)[0]) + except ImportError: + pass + + # by default, use all available backends + if SCIPY_ARRAY_API.lower() not in ("1", "true"): + SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API) + + if 'all' in SCIPY_ARRAY_API_: + pass # same as True + else: + # only select a subset of backend by filtering out the dict + try: + xp_available_backends = { + backend: xp_available_backends[backend] + for backend in SCIPY_ARRAY_API_ + } + except KeyError: + msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}" + raise ValueError(msg) + +if 'cupy' in xp_available_backends: + SCIPY_DEVICE = 'cuda' + +array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values()) + +skip_xp_invalid_arg = pytest.mark.skipif(SCIPY_ARRAY_API, + reason = ('Test involves masked arrays, object arrays, or other types ' + 'that are not valid input when `SCIPY_ARRAY_API` is used.')) + + +def _backends_kwargs_from_request(request, skip_or_xfail): + """A helper for {skip,xfail}_xp_backends""" + # do not allow multiple backends + args_ = request.keywords[f'{skip_or_xfail}_xp_backends'].args + if len(args_) > 1: + # np_only / cpu_only has args=(), otherwise it's ('numpy',) + # and we do not allow ('numpy', 'cupy') + raise ValueError(f"multiple backends: {args_}") + + markers = list(request.node.iter_markers(f'{skip_or_xfail}_xp_backends')) + backends = [] + kwargs = {} + for marker in markers: + if marker.kwargs.get('np_only'): + kwargs['np_only'] = True + kwargs['exceptions'] = marker.kwargs.get('exceptions', []) + elif marker.kwargs.get('cpu_only'): + if not kwargs.get('np_only'): + # if np_only is given, it is certainly cpu only + kwargs['cpu_only'] = True + kwargs['exceptions'] = marker.kwargs.get('exceptions', []) + + # add backends, if any + if len(marker.args) > 0: + backend = marker.args[0] # was a tuple, ('numpy',) etc + backends.append(backend) + kwargs.update(**{backend: marker.kwargs}) + + return backends, kwargs + + +@pytest.fixture +def skip_xp_backends(xp, request): + """skip_xp_backends(backend=None, reason=None, np_only=False, cpu_only=False, exceptions=None) + + Skip a decorated test for the provided backend, or skip a category of backends. + + See ``skip_or_xfail_backends`` docstring for details. Note that, contrary to + ``skip_or_xfail_backends``, the ``backend`` and ``reason`` arguments are optional + single strings: this function only skips a single backend at a time. + To skip multiple backends, provide multiple decorators. + """ # noqa: E501 + if "skip_xp_backends" not in request.keywords: + return + + backends, kwargs = _backends_kwargs_from_request(request, skip_or_xfail='skip') + skip_or_xfail_xp_backends(xp, backends, kwargs, skip_or_xfail='skip') + + +@pytest.fixture +def xfail_xp_backends(xp, request): + """xfail_xp_backends(backend=None, reason=None, np_only=False, cpu_only=False, exceptions=None) + + xfail a decorated test for the provided backend, or xfail a category of backends. + + See ``skip_or_xfail_backends`` docstring for details. Note that, contrary to + ``skip_or_xfail_backends``, the ``backend`` and ``reason`` arguments are optional + single strings: this function only xfails a single backend at a time. + To xfail multiple backends, provide multiple decorators. + """ # noqa: E501 + if "xfail_xp_backends" not in request.keywords: + return + backends, kwargs = _backends_kwargs_from_request(request, skip_or_xfail='xfail') + skip_or_xfail_xp_backends(xp, backends, kwargs, skip_or_xfail='xfail') + + +def skip_or_xfail_xp_backends(xp, backends, kwargs, skip_or_xfail='skip'): + """ + Skip based on the ``skip_xp_backends`` or ``xfail_xp_backends`` marker. + + See the "Support for the array API standard" docs page for usage examples. + + Parameters + ---------- + backends : tuple + Backends to skip/xfail, e.g. ``("array_api_strict", "torch")``. + These are overriden when ``np_only`` is ``True``, and are not + necessary to provide for non-CPU backends when ``cpu_only`` is ``True``. + For a custom reason to apply, you should pass a dict ``{'reason': '...'}`` + to a keyword matching the name of the backend. + reason : str, optional + A reason for the skip/xfail in the case of ``np_only=True``. + If unprovided, a default reason is used. Note that it is not possible + to specify a custom reason with ``cpu_only``. + np_only : bool, optional + When ``True``, the test is skipped/xfailed for all backends other + than the default NumPy backend. There is no need to provide + any ``backends`` in this case. To specify a reason, pass a + value to ``reason``. Default: ``False``. + cpu_only : bool, optional + When ``True``, the test is skipped/xfailed on non-CPU devices. + There is no need to provide any ``backends`` in this case, + but any ``backends`` will also be skipped on the CPU. + Default: ``False``. + exceptions : list, optional + A list of exceptions for use with ``cpu_only`` or ``np_only``. + This should be provided when delegation is implemented for some, + but not all, non-CPU/non-NumPy backends. + skip_or_xfail : str + ``'skip'`` to skip, ``'xfail'`` to xfail. + """ + skip_or_xfail = getattr(pytest, skip_or_xfail) + np_only = kwargs.get("np_only", False) + cpu_only = kwargs.get("cpu_only", False) + exceptions = kwargs.get("exceptions", []) + + if reasons := kwargs.get("reasons"): + raise ValueError(f"provide a single `reason=` kwarg; got {reasons=} instead") + + # input validation + if np_only and cpu_only: + # np_only is a stricter subset of cpu_only + cpu_only = False + if exceptions and not (cpu_only or np_only): + raise ValueError("`exceptions` is only valid alongside `cpu_only` or `np_only`") + + if np_only: + reason = kwargs.get("reason", "do not run with non-NumPy backends.") + if not isinstance(reason, str) and len(reason) > 1: + raise ValueError("please provide a singleton `reason` " + "when using `np_only`") + if xp.__name__ != 'numpy' and xp.__name__ not in exceptions: + skip_or_xfail(reason=reason) + return + if cpu_only: + reason = ("no array-agnostic implementation or delegation available " + "for this backend and device") + exceptions = [] if exceptions is None else exceptions + if SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu': + if xp.__name__ == 'cupy' and 'cupy' not in exceptions: + skip_or_xfail(reason=reason) + elif xp.__name__ == 'torch' and 'torch' not in exceptions: + if 'cpu' not in xp.empty(0).device.type: + skip_or_xfail(reason=reason) + elif xp.__name__ == 'jax.numpy' and 'jax.numpy' not in exceptions: + for d in xp.empty(0).devices(): + if 'cpu' not in d.device_kind: + skip_or_xfail(reason=reason) + + if backends is not None: + for i, backend in enumerate(backends): + if xp.__name__ == backend: + reason = kwargs[backend].get('reason') + if not reason: + reason = f"do not run with array API backend: {backend}" + + skip_or_xfail(reason=reason) + + +# Following the approach of NumPy's conftest.py... +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for SciPy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via scipy.test() +hypothesis.settings.register_profile( + name="nondeterministic", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="deterministic", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=list(hypothesis.HealthCheck), +) + +# Profile is currently set by environment variable `SCIPY_HYPOTHESIS_PROFILE` +# In the future, it would be good to work the choice into dev.py. +SCIPY_HYPOTHESIS_PROFILE = os.environ.get("SCIPY_HYPOTHESIS_PROFILE", + "deterministic") +hypothesis.settings.load_profile(SCIPY_HYPOTHESIS_PROFILE) + + +############################################################################ +# doctesting stuff + +if HAVE_SCPDT: + + # FIXME: populate the dict once + @contextmanager + def warnings_errors_and_rng(test=None): + """Temporarily turn (almost) all warnings to errors. + + Filter out known warnings which we allow. + """ + known_warnings = dict() + + # these functions are known to emit "divide by zero" RuntimeWarnings + divide_by_zero = [ + 'scipy.linalg.norm', 'scipy.ndimage.center_of_mass', + ] + for name in divide_by_zero: + known_warnings[name] = dict(category=RuntimeWarning, + message='divide by zero') + + # Deprecated stuff in scipy.signal and elsewhere + deprecated = [ + 'scipy.signal.cwt', 'scipy.signal.morlet', 'scipy.signal.morlet2', + 'scipy.signal.ricker', + 'scipy.integrate.simpson', + 'scipy.interpolate.interp2d', + 'scipy.linalg.kron', + ] + for name in deprecated: + known_warnings[name] = dict(category=DeprecationWarning) + + from scipy import integrate + # the functions are known to emit IntegrationWarnings + integration_w = ['scipy.special.ellip_normal', + 'scipy.special.ellip_harm_2', + ] + for name in integration_w: + known_warnings[name] = dict(category=integrate.IntegrationWarning, + message='The occurrence of roundoff') + + # scipy.stats deliberately emits UserWarnings sometimes + user_w = ['scipy.stats.anderson_ksamp', 'scipy.stats.kurtosistest', + 'scipy.stats.normaltest', 'scipy.sparse.linalg.norm'] + for name in user_w: + known_warnings[name] = dict(category=UserWarning) + + # additional one-off warnings to filter + dct = { + 'scipy.sparse.linalg.norm': + dict(category=UserWarning, message="Exited at iteration"), + # tutorials + 'linalg.rst': + dict(message='the matrix subclass is not', + category=PendingDeprecationWarning), + 'stats.rst': + dict(message='The maximum number of subdivisions', + category=integrate.IntegrationWarning), + } + known_warnings.update(dct) + + # these legitimately emit warnings in examples + legit = set('scipy.signal.normalize') + + # Now, the meat of the matter: filter warnings, + # also control the random seed for each doctest. + + # XXX: this matches the refguide-check behavior, but is a tad strange: + # makes sure that the seed the old-fashioned np.random* methods is + # *NOT* reproducible but the new-style `default_rng()` *IS* repoducible. + # Should these two be either both repro or both not repro? + + from scipy._lib._util import _fixed_default_rng + import numpy as np + with _fixed_default_rng(): + np.random.seed(None) + with warnings.catch_warnings(): + if test and test.name in known_warnings: + warnings.filterwarnings('ignore', + **known_warnings[test.name]) + yield + elif test and test.name in legit: + yield + else: + warnings.simplefilter('error', Warning) + yield + + dt_config.user_context_mgr = warnings_errors_and_rng + dt_config.skiplist = set([ + 'scipy.linalg.LinAlgError', # comes from numpy + 'scipy.fftpack.fftshift', # fftpack stuff is also from numpy + 'scipy.fftpack.ifftshift', + 'scipy.fftpack.fftfreq', + 'scipy.special.sinc', # sinc is from numpy + 'scipy.optimize.show_options', # does not have much to doctest + 'scipy.signal.normalize', # manipulates warnings (XXX temp skip) + 'scipy.sparse.linalg.norm', # XXX temp skip + # these below test things which inherit from np.ndarray + # cross-ref https://github.com/numpy/numpy/issues/28019 + 'scipy.io.matlab.MatlabObject.strides', + 'scipy.io.matlab.MatlabObject.dtype', + 'scipy.io.matlab.MatlabOpaque.dtype', + 'scipy.io.matlab.MatlabOpaque.strides', + 'scipy.io.matlab.MatlabFunction.strides', + 'scipy.io.matlab.MatlabFunction.dtype' + ]) + + # these are affected by NumPy 2.0 scalar repr: rely on string comparison + if np.__version__ < "2": + dt_config.skiplist.update(set([ + 'scipy.io.hb_read', + 'scipy.io.hb_write', + 'scipy.sparse.csgraph.connected_components', + 'scipy.sparse.csgraph.depth_first_order', + 'scipy.sparse.csgraph.shortest_path', + 'scipy.sparse.csgraph.floyd_warshall', + 'scipy.sparse.csgraph.dijkstra', + 'scipy.sparse.csgraph.bellman_ford', + 'scipy.sparse.csgraph.johnson', + 'scipy.sparse.csgraph.yen', + 'scipy.sparse.csgraph.breadth_first_order', + 'scipy.sparse.csgraph.reverse_cuthill_mckee', + 'scipy.sparse.csgraph.structural_rank', + 'scipy.sparse.csgraph.construct_dist_matrix', + 'scipy.sparse.csgraph.reconstruct_path', + 'scipy.ndimage.value_indices', + 'scipy.stats.mstats.describe', + ])) + + # help pytest collection a bit: these names are either private + # (distributions), or just do not need doctesting. + dt_config.pytest_extra_ignore = [ + "scipy.stats.distributions", + "scipy.optimize.cython_optimize", + "scipy.test", + "scipy.show_config", + # equivalent to "pytest --ignore=path/to/file" + "scipy/special/_precompute", + "scipy/interpolate/_interpnd_info.py", + "scipy/_lib/array_api_compat", + "scipy/_lib/highs", + "scipy/_lib/unuran", + "scipy/_lib/_gcutils.py", + "scipy/_lib/doccer.py", + "scipy/_lib/_uarray", + ] + + dt_config.pytest_extra_xfail = { + # name: reason + "ND_regular_grid.rst": "ReST parser limitation", + "extrapolation_examples.rst": "ReST parser limitation", + "sampling_pinv.rst": "__cinit__ unexpected argument", + "sampling_srou.rst": "nan in scalar_power", + "probability_distributions.rst": "integration warning", + } + + # tutorials + dt_config.pseudocode = set(['integrate.nquad(func,']) + dt_config.local_resources = { + 'io.rst': [ + "octave_a.mat", + "octave_cells.mat", + "octave_struct.mat" + ] + } + + dt_config.strict_check = True +############################################################################ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1417abe144142078b38c45794f73178cec486b8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/__init__.py @@ -0,0 +1,116 @@ +""" +================================== +Input and output (:mod:`scipy.io`) +================================== + +.. currentmodule:: scipy.io + +SciPy has many modules, classes, and functions available to read data +from and write data to a variety of file formats. + +.. seealso:: `NumPy IO routines `__ + +MATLAB® files +============= + +.. autosummary:: + :toctree: generated/ + + loadmat - Read a MATLAB style mat file (version 4 through 7.1) + savemat - Write a MATLAB style mat file (version 4 through 7.1) + whosmat - List contents of a MATLAB style mat file (version 4 through 7.1) + +For low-level MATLAB reading and writing utilities, see `scipy.io.matlab`. + +IDL® files +========== + +.. autosummary:: + :toctree: generated/ + + readsav - Read an IDL 'save' file + +Matrix Market files +=================== + +.. autosummary:: + :toctree: generated/ + + mminfo - Query matrix info from Matrix Market formatted file + mmread - Read matrix from Matrix Market formatted file + mmwrite - Write matrix to Matrix Market formatted file + +Unformatted Fortran files +=============================== + +.. autosummary:: + :toctree: generated/ + + FortranFile - A file object for unformatted sequential Fortran files + FortranEOFError - Exception indicating the end of a well-formed file + FortranFormattingError - Exception indicating an inappropriate end + +Netcdf +====== + +.. autosummary:: + :toctree: generated/ + + netcdf_file - A file object for NetCDF data + netcdf_variable - A data object for the netcdf module + +Harwell-Boeing files +==================== + +.. autosummary:: + :toctree: generated/ + + hb_read -- read H-B file + hb_write -- write H-B file + +Wav sound files (:mod:`scipy.io.wavfile`) +========================================= + +.. module:: scipy.io.wavfile + +.. autosummary:: + :toctree: generated/ + + read + write + WavFileWarning + +Arff files (:mod:`scipy.io.arff`) +================================= + +.. module:: scipy.io.arff + +.. autosummary:: + :toctree: generated/ + + loadarff + MetaData + ArffError + ParseArffError +""" +# matfile read and write +from .matlab import loadmat, savemat, whosmat + +# netCDF file support +from ._netcdf import netcdf_file, netcdf_variable + +# Fortran file support +from ._fortran import FortranFile, FortranEOFError, FortranFormattingError + +from ._fast_matrix_market import mminfo, mmread, mmwrite +from ._idl import readsav +from ._harwell_boeing import hb_read, hb_write + +# Deprecated namespaces, to be removed in v2.0.0 +from . import arff, harwell_boeing, idl, mmio, netcdf, wavfile + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd6f8fb30ea8dcc2a9de7b1be23a9538c130718 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py @@ -0,0 +1,598 @@ +# Copyright (C) 2022-2023 Adam Lugowski. All rights reserved. +# Use of this source code is governed by the BSD 2-clause license found in +# the LICENSE.txt file. +# SPDX-License-Identifier: BSD-2-Clause +""" +Matrix Market I/O with a C++ backend. +See http://math.nist.gov/MatrixMarket/formats.html +for information about the Matrix Market format. + +.. versionadded:: 1.12.0 +""" +import io +import os + +import numpy as np +from scipy.sparse import coo_array, issparse, coo_matrix +from scipy.io import _mmio + +__all__ = ['mminfo', 'mmread', 'mmwrite'] + +PARALLELISM = 0 +""" +Number of threads that `mmread()` and `mmwrite()` use. +0 means number of CPUs in the system. +Use `threadpoolctl` to set this value. +""" + +ALWAYS_FIND_SYMMETRY = False +""" +Whether mmwrite() with symmetry='AUTO' will always search for symmetry +inside the matrix. This is scipy.io._mmio.mmwrite()'s default behavior, +but has a significant performance cost on large matrices. +""" + +_field_to_dtype = { + "integer": "int64", + "unsigned-integer": "uint64", + "real": "float64", + "complex": "complex", + "pattern": "float64", +} + + +def _fmm_version(): + from . import _fmm_core + return _fmm_core.__version__ + + +# Register with threadpoolctl, if available +try: + import threadpoolctl + + class _FMMThreadPoolCtlController(threadpoolctl.LibController): + user_api = "scipy" + internal_api = "scipy_mmio" + + filename_prefixes = ("_fmm_core",) + + def get_num_threads(self): + global PARALLELISM + return PARALLELISM + + def set_num_threads(self, num_threads): + global PARALLELISM + PARALLELISM = num_threads + + def get_version(self): + return _fmm_version + + def set_additional_attributes(self): + pass + + threadpoolctl.register(_FMMThreadPoolCtlController) +except (ImportError, AttributeError): + # threadpoolctl not installed or version too old + pass + + +class _TextToBytesWrapper(io.BufferedReader): + """ + Convert a TextIOBase string stream to a byte stream. + """ + + def __init__(self, text_io_buffer, encoding=None, errors=None, **kwargs): + super().__init__(text_io_buffer, **kwargs) + self.encoding = encoding or text_io_buffer.encoding or 'utf-8' + self.errors = errors or text_io_buffer.errors or 'strict' + + def __del__(self): + # do not close the wrapped stream + self.detach() + + def _encoding_call(self, method_name, *args, **kwargs): + raw_method = getattr(self.raw, method_name) + val = raw_method(*args, **kwargs) + return val.encode(self.encoding, errors=self.errors) + + def read(self, size=-1): + return self._encoding_call('read', size) + + def read1(self, size=-1): + return self._encoding_call('read1', size) + + def peek(self, size=-1): + return self._encoding_call('peek', size) + + def seek(self, offset, whence=0): + # Random seeks are not allowed because of non-trivial conversion + # between byte and character offsets, + # with the possibility of a byte offset landing within a character. + if offset == 0 and whence == 0 or \ + offset == 0 and whence == 2: + # seek to start or end is ok + super().seek(offset, whence) + else: + # Drop any other seek + # In this application this may happen when pystreambuf seeks during sync(), + # which can happen when closing a partially-read stream. + # Ex. when mminfo() only reads the header then exits. + pass + + +def _read_body_array(cursor): + """ + Read MatrixMarket array body + """ + from . import _fmm_core + + vals = np.zeros(cursor.header.shape, dtype=_field_to_dtype.get(cursor.header.field)) + _fmm_core.read_body_array(cursor, vals) + return vals + + +def _read_body_coo(cursor, generalize_symmetry=True): + """ + Read MatrixMarket coordinate body + """ + from . import _fmm_core + + index_dtype = "int32" + if cursor.header.nrows >= 2**31 or cursor.header.ncols >= 2**31: + # Dimensions are too large to fit in int32 + index_dtype = "int64" + + i = np.zeros(cursor.header.nnz, dtype=index_dtype) + j = np.zeros(cursor.header.nnz, dtype=index_dtype) + data = np.zeros(cursor.header.nnz, dtype=_field_to_dtype.get(cursor.header.field)) + + _fmm_core.read_body_coo(cursor, i, j, data) + + if generalize_symmetry and cursor.header.symmetry != "general": + off_diagonal_mask = (i != j) + off_diagonal_rows = i[off_diagonal_mask] + off_diagonal_cols = j[off_diagonal_mask] + off_diagonal_data = data[off_diagonal_mask] + + if cursor.header.symmetry == "skew-symmetric": + off_diagonal_data *= -1 + elif cursor.header.symmetry == "hermitian": + off_diagonal_data = off_diagonal_data.conjugate() + + i = np.concatenate((i, off_diagonal_cols)) + j = np.concatenate((j, off_diagonal_rows)) + data = np.concatenate((data, off_diagonal_data)) + + return (data, (i, j)), cursor.header.shape + + +def _get_read_cursor(source, parallelism=None): + """ + Open file for reading. + """ + from . import _fmm_core + + ret_stream_to_close = None + if parallelism is None: + parallelism = PARALLELISM + + try: + source = os.fspath(source) + # It's a file path + is_path = True + except TypeError: + is_path = False + + if is_path: + path = str(source) + if path.endswith('.gz'): + import gzip + source = gzip.GzipFile(path, 'r') + ret_stream_to_close = source + elif path.endswith('.bz2'): + import bz2 + source = bz2.BZ2File(path, 'rb') + ret_stream_to_close = source + else: + return _fmm_core.open_read_file(path, parallelism), ret_stream_to_close + + # Stream object. + if hasattr(source, "read"): + if isinstance(source, io.TextIOBase): + source = _TextToBytesWrapper(source) + return _fmm_core.open_read_stream(source, parallelism), ret_stream_to_close + else: + raise TypeError("Unknown source type") + + +def _get_write_cursor(target, h=None, comment=None, parallelism=None, + symmetry="general", precision=None): + """ + Open file for writing. + """ + from . import _fmm_core + + if parallelism is None: + parallelism = PARALLELISM + if comment is None: + comment = '' + if symmetry is None: + symmetry = "general" + if precision is None: + precision = -1 + + if not h: + h = _fmm_core.header(comment=comment, symmetry=symmetry) + + try: + target = os.fspath(target) + # It's a file path + if target[-4:] != '.mtx': + target += '.mtx' + return _fmm_core.open_write_file(str(target), h, parallelism, precision) + except TypeError: + pass + + if hasattr(target, "write"): + # Stream object. + if isinstance(target, io.TextIOBase): + raise TypeError("target stream must be open in binary mode.") + return _fmm_core.open_write_stream(target, h, parallelism, precision) + else: + raise TypeError("Unknown source object") + + +def _apply_field(data, field, no_pattern=False): + """ + Ensure that ``data.dtype`` is compatible with the specified MatrixMarket field type. + + Parameters + ---------- + data : ndarray + Input array. + + field : str + Matrix Market field, such as 'real', 'complex', 'integer', 'pattern'. + + no_pattern : bool, optional + Whether an empty array may be returned for a 'pattern' field. + + Returns + ------- + data : ndarray + Input data if no conversion necessary, or a converted version + """ + + if field is None: + return data + if field == "pattern": + if no_pattern: + return data + else: + return np.zeros(0) + + dtype = _field_to_dtype.get(field, None) + if dtype is None: + raise ValueError("Invalid field.") + + return np.asarray(data, dtype=dtype) + + +def _validate_symmetry(symmetry): + """ + Check that the symmetry parameter is one that MatrixMarket allows.. + """ + if symmetry is None: + return "general" + + symmetry = str(symmetry).lower() + symmetries = ["general", "symmetric", "skew-symmetric", "hermitian"] + if symmetry not in symmetries: + raise ValueError("Invalid symmetry. Must be one of: " + ", ".join(symmetries)) + + return symmetry + + +def mmread(source, *, spmatrix=True): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file-like object. + spmatrix : bool, optional (default: True) + If ``True``, return sparse ``coo_matrix``. Otherwise return ``coo_array``. + + Returns + ------- + a : ndarray or coo_array + Dense or sparse array depending on the matrix format in the + Matrix Market file. + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mmread + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + ``mmread(source)`` returns the data as sparse array in COO format. + + >>> m = mmread(StringIO(text), spmatrix=False) + >>> m + + >>> m.toarray() + array([[0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 2., 3.], + [4., 5., 6., 7., 0.], + [0., 0., 0., 0., 0.]]) + + This method is threaded. + The default number of threads is equal to the number of CPUs in the system. + Use `threadpoolctl `_ to override: + + >>> import threadpoolctl + >>> + >>> with threadpoolctl.threadpool_limits(limits=2): + ... m = mmread(StringIO(text), spmatrix=False) + + """ + cursor, stream_to_close = _get_read_cursor(source) + + if cursor.header.format == "array": + mat = _read_body_array(cursor) + if stream_to_close: + stream_to_close.close() + return mat + else: + triplet, shape = _read_body_coo(cursor, generalize_symmetry=True) + if stream_to_close: + stream_to_close.close() + if spmatrix: + return coo_matrix(triplet, shape=shape) + return coo_array(triplet, shape=shape) + + +def mmwrite(target, a, comment=None, field=None, precision=None, symmetry="AUTO"): + r""" + Writes the sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'AUTO', 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. If symmetry is 'AUTO' the symmetry type of 'a' is either + determined or set to 'general', at mmwrite's discretion. + + Returns + ------- + None + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.sparse import coo_array + >>> from scipy.io import mmwrite + + Write a small NumPy array to a matrix market file. The file will be + written in the ``'array'`` format. + + >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]]) + >>> target = BytesIO() + >>> mmwrite(target, a) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + 2 4 + 1 + 0 + 0 + 2.5 + 0 + 0 + 0 + 6.25 + + Add a comment to the output file, and set the precision to 3. + + >>> target = BytesIO() + >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + % Some test data. + % + 2 4 + 1.00e+00 + 0.00e+00 + 0.00e+00 + 2.50e+00 + 0.00e+00 + 0.00e+00 + 0.00e+00 + 6.25e+00 + + Convert to a sparse matrix before calling ``mmwrite``. This will + result in the output format being ``'coordinate'`` rather than + ``'array'``. + + >>> target = BytesIO() + >>> mmwrite(target, coo_array(a), precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix coordinate real general + % + 2 4 3 + 1 1 1.00e+00 + 2 2 2.50e+00 + 2 4 6.25e+00 + + Write a complex Hermitian array to a matrix market file. Note that + only six values are actually written to the file; the other values + are implied by the symmetry. + + >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]]) + >>> z + array([[ 3. +0.j, 1. +2.j, 4. -3.j], + [ 1. -2.j, 1. +0.j, -0. -5.j], + [ 4. +3.j, 0. +5.j, 2.5+0.j]]) + + >>> target = BytesIO() + >>> mmwrite(target, z, precision=2) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array complex hermitian + % + 3 3 + 3.0e+00 0.0e+00 + 1.0e+00 -2.0e+00 + 4.0e+00 3.0e+00 + 1.0e+00 0.0e+00 + 0.0e+00 5.0e+00 + 2.5e+00 0.0e+00 + + This method is threaded. + The default number of threads is equal to the number of CPUs in the system. + Use `threadpoolctl `_ to override: + + >>> import threadpoolctl + >>> + >>> target = BytesIO() + >>> with threadpoolctl.threadpool_limits(limits=2): + ... mmwrite(target, a) + + """ + from . import _fmm_core + + if isinstance(a, list) or isinstance(a, tuple) or hasattr(a, "__array__"): + a = np.asarray(a) + + if symmetry == "AUTO": + if ALWAYS_FIND_SYMMETRY or (hasattr(a, "shape") and max(a.shape) < 100): + symmetry = None + else: + symmetry = "general" + + if symmetry is None: + symmetry = _mmio.MMFile()._get_symmetry(a) + + symmetry = _validate_symmetry(symmetry) + cursor = _get_write_cursor(target, comment=comment, + precision=precision, symmetry=symmetry) + + if isinstance(a, np.ndarray): + # Write dense numpy arrays + a = _apply_field(a, field, no_pattern=True) + _fmm_core.write_body_array(cursor, a) + + elif issparse(a): + # Write sparse scipy matrices + a = a.tocoo() + + if symmetry is not None and symmetry != "general": + # A symmetric matrix only specifies the elements below the diagonal. + # Ensure that the matrix satisfies this requirement. + lower_triangle_mask = a.row >= a.col + a = coo_array((a.data[lower_triangle_mask], + (a.row[lower_triangle_mask], + a.col[lower_triangle_mask])), shape=a.shape) + + data = _apply_field(a.data, field) + _fmm_core.write_body_coo(cursor, a.shape, a.row, a.col, data) + + else: + raise ValueError(f"unknown matrix type: {type(a)}") + + +def mminfo(source): + """ + Return size and storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mminfo + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + + ``mminfo(source)`` returns the number of rows, number of columns, + format, field type and symmetry attribute of the source file. + + >>> mminfo(StringIO(text)) + (5, 5, 7, 'coordinate', 'real', 'general') + """ + cursor, stream_to_close = _get_read_cursor(source, 1) + h = cursor.header + cursor.close() + if stream_to_close: + stream_to_close.close() + return h.nrows, h.ncols, h.nnz, h.format, h.field, h.symmetry diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fortran.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fortran.py new file mode 100644 index 0000000000000000000000000000000000000000..ac491dce68fe2f2f171dcee5a3097b0f4c4ea10c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_fortran.py @@ -0,0 +1,354 @@ +""" +Module to read / write Fortran unformatted sequential files. + +This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz. + +""" +import warnings +import numpy as np + +__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError'] + + +class FortranEOFError(TypeError, OSError): + """Indicates that the file ended properly. + + This error descends from TypeError because the code used to raise + TypeError (and this was the only way to know that the file had + ended) so users might have ``except TypeError:``. + + """ + pass + + +class FortranFormattingError(TypeError, OSError): + """Indicates that the file ended mid-record. + + Descends from TypeError for backward compatibility. + + """ + pass + + +class FortranFile: + """ + A file object for unformatted sequential files from Fortran code. + + Parameters + ---------- + filename : file or str + Open file object or filename. + mode : {'r', 'w'}, optional + Read-write mode, default is 'r'. + header_dtype : dtype, optional + Data type of the header. Size and endianness must match the input/output file. + + Notes + ----- + These files are broken up into records of unspecified types. The size of + each record is given at the start (although the size of this header is not + standard) and the data is written onto disk without any formatting. Fortran + compilers supporting the BACKSPACE statement will write a second copy of + the size to facilitate backwards seeking. + + This class only supports files written with both sizes for the record. + It also does not support the subrecords used in Intel and gfortran compilers + for records which are greater than 2GB with a 4-byte header. + + An example of an unformatted sequential file in Fortran would be written as:: + + OPEN(1, FILE=myfilename, FORM='unformatted') + + WRITE(1) myvariable + + Since this is a non-standard file format, whose contents depend on the + compiler and the endianness of the machine, caution is advised. Files from + gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work. + + Consider using Fortran direct-access files or files from the newer Stream + I/O, which can be easily read by `numpy.fromfile`. + + Examples + -------- + To create an unformatted sequential Fortran file: + + >>> from scipy.io import FortranFile + >>> import numpy as np + >>> f = FortranFile('test.unf', 'w') + >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32)) + >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T) + >>> f.close() + + To read this file: + + >>> f = FortranFile('test.unf', 'r') + >>> print(f.read_ints(np.int32)) + [1 2 3 4 5] + >>> print(f.read_reals(float).reshape((5,4), order="F")) + [[0. 0.05263158 0.10526316 0.15789474] + [0.21052632 0.26315789 0.31578947 0.36842105] + [0.42105263 0.47368421 0.52631579 0.57894737] + [0.63157895 0.68421053 0.73684211 0.78947368] + [0.84210526 0.89473684 0.94736842 1. ]] + >>> f.close() + + Or, in Fortran:: + + integer :: a(5), i + double precision :: b(5,4) + open(1, file='test.unf', form='unformatted') + read(1) a + read(1) b + close(1) + write(*,*) a + do i = 1, 5 + write(*,*) b(i,:) + end do + + """ + def __init__(self, filename, mode='r', header_dtype=np.uint32): + if header_dtype is None: + raise ValueError('Must specify dtype') + + header_dtype = np.dtype(header_dtype) + if header_dtype.kind != 'u': + warnings.warn("Given a dtype which is not unsigned.", stacklevel=2) + + if mode not in 'rw' or len(mode) != 1: + raise ValueError('mode must be either r or w') + + if hasattr(filename, 'seek'): + self._fp = filename + else: + self._fp = open(filename, f'{mode}b') + + self._header_dtype = header_dtype + + def _read_size(self, eof_ok=False): + n = self._header_dtype.itemsize + b = self._fp.read(n) + if (not b) and eof_ok: + raise FortranEOFError("End of file occurred at end of record") + elif len(b) < n: + raise FortranFormattingError( + "End of file in the middle of the record size") + return int(np.frombuffer(b, dtype=self._header_dtype, count=1)[0]) + + def write_record(self, *items): + """ + Write a record (including sizes) to the file. + + Parameters + ---------- + *items : array_like + The data arrays to write. + + Notes + ----- + Writes data items to a file:: + + write_record(a.T, b.T, c.T, ...) + + write(1) a, b, c, ... + + Note that data in multidimensional arrays is written in + row-major order --- to make them read correctly by Fortran + programs, you need to transpose the arrays yourself when + writing them. + + """ + items = tuple(np.asarray(item) for item in items) + total_size = sum(item.nbytes for item in items) + + nb = np.array([total_size], dtype=self._header_dtype) + + nb.tofile(self._fp) + for item in items: + item.tofile(self._fp) + nb.tofile(self._fp) + + def read_record(self, *dtypes, **kwargs): + """ + Reads a record of a given type from the file. + + Parameters + ---------- + *dtypes : dtypes, optional + Data type(s) specifying the size and endianness of the data. + + Returns + ------- + data : ndarray + A 1-D array object. + + Raises + ------ + FortranEOFError + To signal that no further records are available + FortranFormattingError + To signal that the end of the file was encountered + part-way through a record + + Notes + ----- + If the record contains a multidimensional array, you can specify + the size in the dtype. For example:: + + INTEGER var(5,4) + + can be read with:: + + read_record('(4,5)i4').T + + Note that this function does **not** assume the file data is in Fortran + column major order, so you need to (i) swap the order of dimensions + when reading and (ii) transpose the resulting array. + + Alternatively, you can read the data as a 1-D array and handle the + ordering yourself. For example:: + + read_record('i4').reshape(5, 4, order='F') + + For records that contain several variables or mixed types (as opposed + to single scalar or array types), give them as separate arguments:: + + double precision :: a + integer :: b + write(1) a, b + + record = f.read_record(' 0, -n and n if n < 0 + + Parameters + ---------- + n : int + max number one wants to be able to represent + min : int + minimum number of characters to use for the format + + Returns + ------- + res : IntFormat + IntFormat instance with reasonable (see Notes) computed width + + Notes + ----- + Reasonable should be understood as the minimal string length necessary + without losing precision. For example, IntFormat.from_number(1) will + return an IntFormat instance of width 2, so that any 0 and 1 may be + represented as 1-character strings without loss of information. + """ + width = number_digits(n) + 1 + if n < 0: + width += 1 + repeat = 80 // width + return cls(width, min, repeat=repeat) + + def __init__(self, width, min=None, repeat=None): + self.width = width + self.repeat = repeat + self.min = min + + def __repr__(self): + r = "IntFormat(" + if self.repeat: + r += "%d" % self.repeat + r += "I%d" % self.width + if self.min: + r += ".%d" % self.min + return r + ")" + + @property + def fortran_format(self): + r = "(" + if self.repeat: + r += "%d" % self.repeat + r += "I%d" % self.width + if self.min: + r += ".%d" % self.min + return r + ")" + + @property + def python_format(self): + return "%" + str(self.width) + "d" + + +class ExpFormat: + @classmethod + def from_number(cls, n, min=None): + """Given a float number, returns a "reasonable" ExpFormat instance to + represent any number between -n and n. + + Parameters + ---------- + n : float + max number one wants to be able to represent + min : int + minimum number of characters to use for the format + + Returns + ------- + res : ExpFormat + ExpFormat instance with reasonable (see Notes) computed width + + Notes + ----- + Reasonable should be understood as the minimal string length necessary + to avoid losing precision. + """ + # len of one number in exp format: sign + 1|0 + "." + + # number of digit for fractional part + 'E' + sign of exponent + + # len of exponent + finfo = np.finfo(n.dtype) + # Number of digits for fractional part + n_prec = finfo.precision + 1 + # Number of digits for exponential part + n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp]))) + width = 1 + 1 + n_prec + 1 + n_exp + 1 + if n < 0: + width += 1 + repeat = int(np.floor(80 / width)) + return cls(width, n_prec, min, repeat=repeat) + + def __init__(self, width, significand, min=None, repeat=None): + """\ + Parameters + ---------- + width : int + number of characters taken by the string (includes space). + """ + self.width = width + self.significand = significand + self.repeat = repeat + self.min = min + + def __repr__(self): + r = "ExpFormat(" + if self.repeat: + r += "%d" % self.repeat + r += "E%d.%d" % (self.width, self.significand) + if self.min: + r += "E%d" % self.min + return r + ")" + + @property + def fortran_format(self): + r = "(" + if self.repeat: + r += "%d" % self.repeat + r += "E%d.%d" % (self.width, self.significand) + if self.min: + r += "E%d" % self.min + return r + ")" + + @property + def python_format(self): + return "%" + str(self.width-1) + "." + str(self.significand) + "E" + + +class Token: + def __init__(self, type, value, pos): + self.type = type + self.value = value + self.pos = pos + + def __str__(self): + return f"""Token('{self.type}', "{self.value}")""" + + def __repr__(self): + return self.__str__() + + +class Tokenizer: + def __init__(self): + self.tokens = list(TOKENS.keys()) + self.res = [re.compile(TOKENS[i]) for i in self.tokens] + + def input(self, s): + self.data = s + self.curpos = 0 + self.len = len(s) + + def next_token(self): + curpos = self.curpos + + while curpos < self.len: + for i, r in enumerate(self.res): + m = r.match(self.data, curpos) + if m is None: + continue + else: + self.curpos = m.end() + return Token(self.tokens[i], m.group(), self.curpos) + raise SyntaxError("Unknown character at position %d (%s)" + % (self.curpos, self.data[curpos])) + + +# Grammar for fortran format: +# format : LPAR format_string RPAR +# format_string : repeated | simple +# repeated : repeat simple +# simple : int_fmt | exp_fmt +# int_fmt : INT_ID width +# exp_fmt : simple_exp_fmt +# simple_exp_fmt : EXP_ID width DOT significand +# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits +# repeat : INT +# width : INT +# significand : INT +# ndigits : INT + +# Naive fortran formatter - parser is hand-made +class FortranFormatParser: + """Parser for Fortran format strings. The parse method returns a *Format + instance. + + Notes + ----- + Only ExpFormat (exponential format for floating values) and IntFormat + (integer format) for now. + """ + def __init__(self): + self.tokenizer = threading.local() + + def parse(self, s): + if not hasattr(self.tokenizer, 't'): + self.tokenizer.t = Tokenizer() + + self.tokenizer.t.input(s) + + tokens = [] + + try: + while True: + t = self.tokenizer.t.next_token() + if t is None: + break + else: + tokens.append(t) + return self._parse_format(tokens) + except SyntaxError as e: + raise BadFortranFormat(str(e)) from e + + def _get_min(self, tokens): + next = tokens.pop(0) + if not next.type == "DOT": + raise SyntaxError() + next = tokens.pop(0) + return next.value + + def _expect(self, token, tp): + if not token.type == tp: + raise SyntaxError() + + def _parse_format(self, tokens): + if not tokens[0].type == "LPAR": + raise SyntaxError("Expected left parenthesis at position " + "%d (got '%s')" % (0, tokens[0].value)) + elif not tokens[-1].type == "RPAR": + raise SyntaxError("Expected right parenthesis at position " + f"{len(tokens)} (got '{tokens[-1].value}')") + + tokens = tokens[1:-1] + types = [t.type for t in tokens] + if types[0] == "INT": + repeat = int(tokens.pop(0).value) + else: + repeat = None + + next = tokens.pop(0) + if next.type == "INT_ID": + next = self._next(tokens, "INT") + width = int(next.value) + if tokens: + min = int(self._get_min(tokens)) + else: + min = None + return IntFormat(width, min, repeat) + elif next.type == "EXP_ID": + next = self._next(tokens, "INT") + width = int(next.value) + + next = self._next(tokens, "DOT") + + next = self._next(tokens, "INT") + significand = int(next.value) + + if tokens: + next = self._next(tokens, "EXP_ID") + + next = self._next(tokens, "INT") + min = int(next.value) + else: + min = None + return ExpFormat(width, significand, min, repeat) + else: + raise SyntaxError(f"Invalid formatter type {next.value}") + + def _next(self, tokens, tp): + if not len(tokens) > 0: + raise SyntaxError() + next = tokens.pop(0) + self._expect(next, tp) + return next diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/hb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/hb.py new file mode 100644 index 0000000000000000000000000000000000000000..96fef89ac35a271f3a5501beaefd06da13ede685 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/hb.py @@ -0,0 +1,575 @@ +""" +Implementation of Harwell-Boeing read/write. + +At the moment not the full Harwell-Boeing format is supported. Supported +features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + +""" +# TODO: +# - Add more support (symmetric/complex matrices, non-assembled matrices ?) + +# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but +# takes a lot of memory. Being faster would require compiled code. +# write is not efficient. Although not a terribly exciting task, +# having reusable facilities to efficiently read/write fortran-formatted files +# would be useful outside this module. + +import warnings + +import numpy as np +from scipy.sparse import csc_array, csc_matrix +from ._fortran_format_parser import FortranFormatParser, IntFormat, ExpFormat + +__all__ = ["hb_read", "hb_write"] + + +class MalformedHeader(Exception): + pass + + +class LineOverflow(Warning): + pass + + +def _nbytes_full(fmt, nlines): + """Return the number of bytes to read to get every full lines for the + given parsed fortran format.""" + return (fmt.repeat * fmt.width + 1) * (nlines - 1) + + +class HBInfo: + @classmethod + def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): + """Create a HBInfo instance from an existing sparse matrix. + + Parameters + ---------- + m : sparse array or matrix + the HBInfo instance will derive its parameters from m + title : str + Title to put in the HB header + key : str + Key + mxtype : HBMatrixType + type of the input matrix + fmt : dict + not implemented + + Returns + ------- + hb_info : HBInfo instance + """ + m = m.tocsc(copy=False) + + pointer = m.indptr + indices = m.indices + values = m.data + + nrows, ncols = m.shape + nnon_zeros = m.nnz + + if fmt is None: + # +1 because HB use one-based indexing (Fortran), and we will write + # the indices /pointer as such + pointer_fmt = IntFormat.from_number(np.max(pointer+1)) + indices_fmt = IntFormat.from_number(np.max(indices+1)) + + if values.dtype.kind in np.typecodes["AllFloat"]: + values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) + elif values.dtype.kind in np.typecodes["AllInteger"]: + values_fmt = IntFormat.from_number(-np.max(np.abs(values))) + else: + message = f"type {values.dtype.kind} not implemented yet" + raise NotImplementedError(message) + else: + raise NotImplementedError("fmt argument not supported yet.") + + if mxtype is None: + if not np.isrealobj(values): + raise ValueError("Complex values not supported yet") + if values.dtype.kind in np.typecodes["AllInteger"]: + tp = "integer" + elif values.dtype.kind in np.typecodes["AllFloat"]: + tp = "real" + else: + raise NotImplementedError( + f"type {values.dtype} for values not implemented") + mxtype = HBMatrixType(tp, "unsymmetric", "assembled") + else: + raise ValueError("mxtype argument not handled yet.") + + def _nlines(fmt, size): + nlines = size // fmt.repeat + if nlines * fmt.repeat != size: + nlines += 1 + return nlines + + pointer_nlines = _nlines(pointer_fmt, pointer.size) + indices_nlines = _nlines(indices_fmt, indices.size) + values_nlines = _nlines(values_fmt, values.size) + + total_nlines = pointer_nlines + indices_nlines + values_nlines + + return cls(title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + pointer_fmt.fortran_format, indices_fmt.fortran_format, + values_fmt.fortran_format) + + @classmethod + def from_file(cls, fid): + """Create a HBInfo instance from a file object containing a matrix in the + HB format. + + Parameters + ---------- + fid : file-like matrix + File or file-like object containing a matrix in the HB format. + + Returns + ------- + hb_info : HBInfo instance + """ + # First line + line = fid.readline().strip("\n") + if not len(line) > 72: + raise ValueError("Expected at least 72 characters for first line, " + f"got: \n{line}") + title = line[:72] + key = line[72:] + + # Second line + line = fid.readline().strip("\n") + if not len(line.rstrip()) >= 56: + raise ValueError("Expected at least 56 characters for second line, " + f"got: \n{line}") + total_nlines = _expect_int(line[:14]) + pointer_nlines = _expect_int(line[14:28]) + indices_nlines = _expect_int(line[28:42]) + values_nlines = _expect_int(line[42:56]) + + rhs_nlines = line[56:72].strip() + if rhs_nlines == '': + rhs_nlines = 0 + else: + rhs_nlines = _expect_int(rhs_nlines) + if not rhs_nlines == 0: + raise ValueError("Only files without right hand side supported for " + "now.") + + # Third line + line = fid.readline().strip("\n") + if not len(line) >= 70: + raise ValueError(f"Expected at least 72 character for third line, " + f"got:\n{line}") + + mxtype_s = line[:3].upper() + if not len(mxtype_s) == 3: + raise ValueError("mxtype expected to be 3 characters long") + + mxtype = HBMatrixType.from_fortran(mxtype_s) + if mxtype.value_type not in ["real", "integer"]: + raise ValueError("Only real or integer matrices supported for " + f"now (detected {mxtype})") + if not mxtype.structure == "unsymmetric": + raise ValueError("Only unsymmetric matrices supported for " + f"now (detected {mxtype})") + if not mxtype.storage == "assembled": + raise ValueError("Only assembled matrices supported for now") + + if not line[3:14] == " " * 11: + raise ValueError(f"Malformed data for third line: {line}") + + nrows = _expect_int(line[14:28]) + ncols = _expect_int(line[28:42]) + nnon_zeros = _expect_int(line[42:56]) + nelementals = _expect_int(line[56:70]) + if not nelementals == 0: + raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" + % nelementals) + + # Fourth line + line = fid.readline().strip("\n") + + ct = line.split() + if not len(ct) == 3: + raise ValueError(f"Expected 3 formats, got {ct}") + + return cls(title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + ct[0], ct[1], ct[2], + rhs_nlines, nelementals) + + def __init__(self, title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + pointer_format_str, indices_format_str, values_format_str, + right_hand_sides_nlines=0, nelementals=0): + """Do not use this directly, but the class ctrs (from_* functions).""" + if title is None: + title = "No Title" + if len(title) > 72: + raise ValueError("title cannot be > 72 characters") + + if key is None: + key = "|No Key" + if len(key) > 8: + warnings.warn(f"key is > 8 characters (key is {key})", + LineOverflow, stacklevel=3) + self.title = title + self.key = key + + self.total_nlines = total_nlines + self.pointer_nlines = pointer_nlines + self.indices_nlines = indices_nlines + self.values_nlines = values_nlines + + parser = FortranFormatParser() + pointer_format = parser.parse(pointer_format_str) + if not isinstance(pointer_format, IntFormat): + raise ValueError("Expected int format for pointer format, got " + f"{pointer_format}") + + indices_format = parser.parse(indices_format_str) + if not isinstance(indices_format, IntFormat): + raise ValueError("Expected int format for indices format, got " + f"{indices_format}") + + values_format = parser.parse(values_format_str) + if isinstance(values_format, ExpFormat): + if mxtype.value_type not in ["real", "complex"]: + raise ValueError(f"Inconsistency between matrix type {mxtype} and " + f"value type {values_format}") + values_dtype = np.float64 + elif isinstance(values_format, IntFormat): + if mxtype.value_type not in ["integer"]: + raise ValueError(f"Inconsistency between matrix type {mxtype} and " + f"value type {values_format}") + # XXX: fortran int -> dtype association ? + values_dtype = int + else: + raise ValueError(f"Unsupported format for values {values_format!r}") + + self.pointer_format = pointer_format + self.indices_format = indices_format + self.values_format = values_format + + self.pointer_dtype = np.int32 + self.indices_dtype = np.int32 + self.values_dtype = values_dtype + + self.pointer_nlines = pointer_nlines + self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) + + self.indices_nlines = indices_nlines + self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) + + self.values_nlines = values_nlines + self.values_nbytes_full = _nbytes_full(values_format, values_nlines) + + self.nrows = nrows + self.ncols = ncols + self.nnon_zeros = nnon_zeros + self.nelementals = nelementals + self.mxtype = mxtype + + def dump(self): + """Gives the header corresponding to this instance as a string.""" + header = [self.title.ljust(72) + self.key.ljust(8)] + + header.append("%14d%14d%14d%14d" % + (self.total_nlines, self.pointer_nlines, + self.indices_nlines, self.values_nlines)) + header.append("%14s%14d%14d%14d%14d" % + (self.mxtype.fortran_format.ljust(14), self.nrows, + self.ncols, self.nnon_zeros, 0)) + + pffmt = self.pointer_format.fortran_format + iffmt = self.indices_format.fortran_format + vffmt = self.values_format.fortran_format + header.append("%16s%16s%20s" % + (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) + return "\n".join(header) + + +def _expect_int(value, msg=None): + try: + return int(value) + except ValueError as e: + if msg is None: + msg = "Expected an int, got %s" + raise ValueError(msg % value) from e + + +def _read_hb_data(content, header): + # XXX: look at a way to reduce memory here (big string creation) + ptr_string = "".join([content.read(header.pointer_nbytes_full), + content.readline()]) + ptr = np.fromstring(ptr_string, + dtype=int, sep=' ') + + ind_string = "".join([content.read(header.indices_nbytes_full), + content.readline()]) + ind = np.fromstring(ind_string, + dtype=int, sep=' ') + + val_string = "".join([content.read(header.values_nbytes_full), + content.readline()]) + val = np.fromstring(val_string, + dtype=header.values_dtype, sep=' ') + + return csc_array((val, ind-1, ptr-1), shape=(header.nrows, header.ncols)) + + +def _write_data(m, fid, header): + m = m.tocsc(copy=False) + + def write_array(f, ar, nlines, fmt): + # ar_nlines is the number of full lines, n is the number of items per + # line, ffmt the fortran format + pyfmt = fmt.python_format + pyfmt_full = pyfmt * fmt.repeat + + # for each array to write, we first write the full lines, and special + # case for partial line + full = ar[:(nlines - 1) * fmt.repeat] + for row in full.reshape((nlines-1, fmt.repeat)): + f.write(pyfmt_full % tuple(row) + "\n") + nremain = ar.size - full.size + if nremain > 0: + f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") + + fid.write(header.dump()) + fid.write("\n") + # +1 is for Fortran one-based indexing + write_array(fid, m.indptr+1, header.pointer_nlines, + header.pointer_format) + write_array(fid, m.indices+1, header.indices_nlines, + header.indices_format) + write_array(fid, m.data, header.values_nlines, + header.values_format) + + +class HBMatrixType: + """Class to hold the matrix type.""" + # q2f* translates qualified names to Fortran character + _q2f_type = { + "real": "R", + "complex": "C", + "pattern": "P", + "integer": "I", + } + _q2f_structure = { + "symmetric": "S", + "unsymmetric": "U", + "hermitian": "H", + "skewsymmetric": "Z", + "rectangular": "R" + } + _q2f_storage = { + "assembled": "A", + "elemental": "E", + } + + _f2q_type = {j: i for i, j in _q2f_type.items()} + _f2q_structure = {j: i for i, j in _q2f_structure.items()} + _f2q_storage = {j: i for i, j in _q2f_storage.items()} + + @classmethod + def from_fortran(cls, fmt): + if not len(fmt) == 3: + raise ValueError("Fortran format for matrix type should be 3 " + "characters long") + try: + value_type = cls._f2q_type[fmt[0]] + structure = cls._f2q_structure[fmt[1]] + storage = cls._f2q_storage[fmt[2]] + return cls(value_type, structure, storage) + except KeyError as e: + raise ValueError(f"Unrecognized format {fmt}") from e + + def __init__(self, value_type, structure, storage="assembled"): + self.value_type = value_type + self.structure = structure + self.storage = storage + + if value_type not in self._q2f_type: + raise ValueError(f"Unrecognized type {value_type}") + if structure not in self._q2f_structure: + raise ValueError(f"Unrecognized structure {structure}") + if storage not in self._q2f_storage: + raise ValueError(f"Unrecognized storage {storage}") + + @property + def fortran_format(self): + return self._q2f_type[self.value_type] + \ + self._q2f_structure[self.structure] + \ + self._q2f_storage[self.storage] + + def __repr__(self): + return f"HBMatrixType({self.value_type}, {self.structure}, {self.storage})" + + +class HBFile: + def __init__(self, file, hb_info=None): + """Create a HBFile instance. + + Parameters + ---------- + file : file-object + StringIO work as well + hb_info : HBInfo, optional + Should be given as an argument for writing, in which case the file + should be writable. + """ + self._fid = file + if hb_info is None: + self._hb_info = HBInfo.from_file(file) + else: + #raise OSError("file %s is not writable, and hb_info " + # "was given." % file) + self._hb_info = hb_info + + @property + def title(self): + return self._hb_info.title + + @property + def key(self): + return self._hb_info.key + + @property + def type(self): + return self._hb_info.mxtype.value_type + + @property + def structure(self): + return self._hb_info.mxtype.structure + + @property + def storage(self): + return self._hb_info.mxtype.storage + + def read_matrix(self): + return _read_hb_data(self._fid, self._hb_info) + + def write_matrix(self, m): + return _write_data(m, self._fid, self._hb_info) + + +def hb_read(path_or_open_file, *, spmatrix=True): + """Read HB-format file. + + Parameters + ---------- + path_or_open_file : path-like or file-like + If a file-like object, it is used as-is. Otherwise, it is opened + before reading. + spmatrix : bool, optional (default: True) + If ``True``, return sparse ``coo_matrix``. Otherwise return ``coo_array``. + + Returns + ------- + data : csc_array or csc_matrix + The data read from the HB file as a sparse array. + + Notes + ----- + At the moment not the full Harwell-Boeing format is supported. Supported + features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + + Examples + -------- + We can read and write a harwell-boeing format file: + + >>> from scipy.io import hb_read, hb_write + >>> from scipy.sparse import csr_array, eye + >>> data = csr_array(eye(3)) # create a sparse array + >>> hb_write("data.hb", data) # write a hb file + >>> print(hb_read("data.hb", spmatrix=False)) # read a hb file + + Coords Values + (0, 0) 1.0 + (1, 1) 1.0 + (2, 2) 1.0 + """ + def _get_matrix(fid): + hb = HBFile(fid) + return hb.read_matrix() + + if hasattr(path_or_open_file, 'read'): + data = _get_matrix(path_or_open_file) + else: + with open(path_or_open_file) as f: + data = _get_matrix(f) + if spmatrix: + return csc_matrix(data) + return data + + +def hb_write(path_or_open_file, m, hb_info=None): + """Write HB-format file. + + Parameters + ---------- + path_or_open_file : path-like or file-like + If a file-like object, it is used as-is. Otherwise, it is opened + before writing. + m : sparse array or matrix + the sparse array to write + hb_info : HBInfo + contains the meta-data for write + + Returns + ------- + None + + Notes + ----- + At the moment not the full Harwell-Boeing format is supported. Supported + features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + + Examples + -------- + We can read and write a harwell-boeing format file: + + >>> from scipy.io import hb_read, hb_write + >>> from scipy.sparse import csr_array, eye + >>> data = csr_array(eye(3)) # create a sparse array + >>> hb_write("data.hb", data) # write a hb file + >>> print(hb_read("data.hb", spmatrix=False)) # read a hb file + + Coords Values + (0, 0) 1.0 + (1, 1) 1.0 + (2, 2) 1.0 + """ + m = m.tocsc(copy=False) + + if hb_info is None: + hb_info = HBInfo.from_data(m) + + def _set_matrix(fid): + hb = HBFile(fid, hb_info) + return hb.write_matrix(m) + + if hasattr(path_or_open_file, 'write'): + return _set_matrix(path_or_open_file) + else: + with open(path_or_open_file, 'w') as f: + return _set_matrix(f) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_fortran_format.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_fortran_format.py new file mode 100644 index 0000000000000000000000000000000000000000..dae040c523d6a6d618e89402d39a0cb05bad927a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_fortran_format.py @@ -0,0 +1,74 @@ +import numpy as np + +from numpy.testing import assert_equal +from pytest import raises as assert_raises + +from scipy.io._harwell_boeing._fortran_format_parser import ( + FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat) + + +class TestFortranFormatParser: + def setup_method(self): + self.parser = FortranFormatParser() + + def _test_equal(self, format, ref): + ret = self.parser.parse(format) + assert_equal(ret.__dict__, ref.__dict__) + + def test_simple_int(self): + self._test_equal("(I4)", IntFormat(4)) + + def test_simple_repeated_int(self): + self._test_equal("(3I4)", IntFormat(4, repeat=3)) + + def test_simple_exp(self): + self._test_equal("(E4.3)", ExpFormat(4, 3)) + + def test_exp_exp(self): + self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3)) + + def test_repeat_exp(self): + self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2)) + + def test_repeat_exp_exp(self): + self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2)) + + def test_wrong_formats(self): + def _test_invalid(bad_format): + assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format)) + _test_invalid("I4") + _test_invalid("(E4)") + _test_invalid("(E4.)") + _test_invalid("(E4.E3)") + + +class TestIntFormat: + def test_to_fortran(self): + f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)] + res = ["(I10)", "(I12.10)", "(3I12.10)"] + + for i, j in zip(f, res): + assert_equal(i.fortran_format, j) + + def test_from_number(self): + f = [10, -12, 123456789] + r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20), + IntFormat(10, repeat=8)] + for i, j in zip(f, r_f): + assert_equal(IntFormat.from_number(i).__dict__, j.__dict__) + + +class TestExpFormat: + def test_to_fortran(self): + f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3), + ExpFormat(10, 5, repeat=3)] + res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"] + + for i, j in zip(f, res): + assert_equal(i.fortran_format, j) + + def test_from_number(self): + f = np.array([1.0, -1.2]) + r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)] + for i, j in zip(f, r_f): + assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_hb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_hb.py new file mode 100644 index 0000000000000000000000000000000000000000..d0c9ee5635fdd45c0f6560e686c9e40176ef95e4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_hb.py @@ -0,0 +1,70 @@ +from io import StringIO +import tempfile + +import numpy as np + +from numpy.testing import assert_equal, \ + assert_array_almost_equal_nulp + +from scipy.sparse import coo_array, csc_array, random_array, isspmatrix + +from scipy.io import hb_read, hb_write + + +SIMPLE = """\ +No Title |No Key + 9 4 1 4 +RUA 100 100 10 0 +(26I3) (26I3) (3E23.15) +1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 +3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 +3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9 +9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11 +37 71 89 18 30 45 70 19 25 52 +2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01 +6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01 +4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01 +6.912334991524289e-01 +""" + +SIMPLE_MATRIX = coo_array( + ((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799, + 0.0661749042483, 0.887037034319, 0.419647859016, + 0.564960307211, 0.993442388709, 0.691233499152,), + (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51], + [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]])))) + + +def assert_csc_almost_equal(r, l): + r = csc_array(r) + l = csc_array(l) + assert_equal(r.indptr, l.indptr) + assert_equal(r.indices, l.indices) + assert_array_almost_equal_nulp(r.data, l.data, 10000) + + +class TestHBReader: + def test_simple(self): + m = hb_read(StringIO(SIMPLE), spmatrix=False) + assert_csc_almost_equal(m, SIMPLE_MATRIX) + assert not isspmatrix(m) + m = hb_read(StringIO(SIMPLE), spmatrix=True) + assert isspmatrix(m) + m = hb_read(StringIO(SIMPLE)) # default + assert isspmatrix(m) + + +class TestHBReadWrite: + + def check_save_load(self, value): + with tempfile.NamedTemporaryFile(mode='w+t') as file: + hb_write(file, value) + file.file.seek(0) + value_loaded = hb_read(file, spmatrix=False) + assert_csc_almost_equal(value, value_loaded) + + def test_simple(self): + random_arr = random_array((10, 100), density=0.1) + for format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'): + arr = random_arr.asformat(format, copy=False) + self.check_save_load(arr) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_idl.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_idl.py new file mode 100644 index 0000000000000000000000000000000000000000..5730a9d4fe1beda5a71aa668bda13d17f2fc2436 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_idl.py @@ -0,0 +1,919 @@ +# IDLSave - a python module to read IDL 'save' files +# Copyright (c) 2010 Thomas P. Robitaille + +# Many thanks to Craig Markwardt for publishing the Unofficial Format +# Specification for IDL .sav files, without which this Python module would not +# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt). + +# This code was developed by with permission from ITT Visual Information +# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems, +# Inc. for their Interactive Data Language software. + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +__all__ = ['readsav'] + +import struct +import numpy as np +import tempfile +import zlib +import warnings + +# Define the different data types that can be found in an IDL save file +DTYPE_DICT = {1: '>u1', + 2: '>i2', + 3: '>i4', + 4: '>f4', + 5: '>f8', + 6: '>c8', + 7: '|O', + 8: '|O', + 9: '>c16', + 10: '|O', + 11: '|O', + 12: '>u2', + 13: '>u4', + 14: '>i8', + 15: '>u8'} + +# Define the different record types that can be found in an IDL save file +RECTYPE_DICT = {0: "START_MARKER", + 1: "COMMON_VARIABLE", + 2: "VARIABLE", + 3: "SYSTEM_VARIABLE", + 6: "END_MARKER", + 10: "TIMESTAMP", + 12: "COMPILED", + 13: "IDENTIFICATION", + 14: "VERSION", + 15: "HEAP_HEADER", + 16: "HEAP_DATA", + 17: "PROMOTE64", + 19: "NOTICE", + 20: "DESCRIPTION"} + +# Define a dictionary to contain structure definitions +STRUCT_DICT = {} + + +def _align_32(f): + '''Align to the next 32-bit position in a file''' + + pos = f.tell() + if pos % 4 != 0: + f.seek(pos + 4 - pos % 4) + return + + +def _skip_bytes(f, n): + '''Skip `n` bytes''' + f.read(n) + return + + +def _read_bytes(f, n): + '''Read the next `n` bytes''' + return f.read(n) + + +def _read_byte(f): + '''Read a single byte''' + return np.uint8(struct.unpack('>B', f.read(4)[:1])[0]) + + +def _read_long(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>l', f.read(4))[0]) + + +def _read_int16(f): + '''Read a signed 16-bit integer''' + return np.int16(struct.unpack('>h', f.read(4)[2:4])[0]) + + +def _read_int32(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>i', f.read(4))[0]) + + +def _read_int64(f): + '''Read a signed 64-bit integer''' + return np.int64(struct.unpack('>q', f.read(8))[0]) + + +def _read_uint16(f): + '''Read an unsigned 16-bit integer''' + return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0]) + + +def _read_uint32(f): + '''Read an unsigned 32-bit integer''' + return np.uint32(struct.unpack('>I', f.read(4))[0]) + + +def _read_uint64(f): + '''Read an unsigned 64-bit integer''' + return np.uint64(struct.unpack('>Q', f.read(8))[0]) + + +def _read_float32(f): + '''Read a 32-bit float''' + return np.float32(struct.unpack('>f', f.read(4))[0]) + + +def _read_float64(f): + '''Read a 64-bit float''' + return np.float64(struct.unpack('>d', f.read(8))[0]) + + +class Pointer: + '''Class used to define pointers''' + + def __init__(self, index): + self.index = index + return + + +class ObjectPointer(Pointer): + '''Class used to define object pointers''' + pass + + +def _read_string(f): + '''Read a string''' + length = _read_long(f) + if length > 0: + chars = _read_bytes(f, length).decode('latin1') + _align_32(f) + else: + chars = '' + return chars + + +def _read_string_data(f): + '''Read a data string (length is specified twice)''' + length = _read_long(f) + if length > 0: + length = _read_long(f) + string_data = _read_bytes(f, length) + _align_32(f) + else: + string_data = '' + return string_data + + +def _read_data(f, dtype): + '''Read a variable with a specified data type''' + if dtype == 1: + if _read_int32(f) != 1: + raise Exception("Error occurred while reading byte variable") + return _read_byte(f) + elif dtype == 2: + return _read_int16(f) + elif dtype == 3: + return _read_int32(f) + elif dtype == 4: + return _read_float32(f) + elif dtype == 5: + return _read_float64(f) + elif dtype == 6: + real = _read_float32(f) + imag = _read_float32(f) + return np.complex64(real + imag * 1j) + elif dtype == 7: + return _read_string_data(f) + elif dtype == 8: + raise Exception("Should not be here - please report this") + elif dtype == 9: + real = _read_float64(f) + imag = _read_float64(f) + return np.complex128(real + imag * 1j) + elif dtype == 10: + return Pointer(_read_int32(f)) + elif dtype == 11: + return ObjectPointer(_read_int32(f)) + elif dtype == 12: + return _read_uint16(f) + elif dtype == 13: + return _read_uint32(f) + elif dtype == 14: + return _read_int64(f) + elif dtype == 15: + return _read_uint64(f) + else: + raise Exception("Unknown IDL type: %i - please report this" % dtype) + + +def _read_structure(f, array_desc, struct_desc): + ''' + Read a structure, with the array and structure descriptors given as + `array_desc` and `structure_desc` respectively. + ''' + + nrows = array_desc['nelements'] + columns = struct_desc['tagtable'] + + dtype = [] + for col in columns: + if col['structure'] or col['array']: + dtype.append(((col['name'].lower(), col['name']), np.object_)) + else: + if col['typecode'] in DTYPE_DICT: + dtype.append(((col['name'].lower(), col['name']), + DTYPE_DICT[col['typecode']])) + else: + raise Exception("Variable type %i not implemented" % + col['typecode']) + + structure = np.rec.recarray((nrows, ), dtype=dtype) + + for i in range(nrows): + for col in columns: + dtype = col['typecode'] + if col['structure']: + structure[col['name']][i] = _read_structure(f, + struct_desc['arrtable'][col['name']], + struct_desc['structtable'][col['name']]) + elif col['array']: + structure[col['name']][i] = _read_array(f, dtype, + struct_desc['arrtable'][col['name']]) + else: + structure[col['name']][i] = _read_data(f, dtype) + + # Reshape structure if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + structure = structure.reshape(dims) + + return structure + + +def _read_array(f, typecode, array_desc): + ''' + Read an array of type `typecode`, with the array descriptor given as + `array_desc`. + ''' + + if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]: + + if typecode == 1: + nbytes = _read_int32(f) + if nbytes != array_desc['nbytes']: + warnings.warn("Not able to verify number of bytes from header", + stacklevel=3) + + # Read bytes as numpy array + array = np.frombuffer(f.read(array_desc['nbytes']), + dtype=DTYPE_DICT[typecode]) + + elif typecode in [2, 12]: + + # These are 2 byte types, need to skip every two as they are not packed + + array = np.frombuffer(f.read(array_desc['nbytes']*2), + dtype=DTYPE_DICT[typecode])[1::2] + + else: + + # Read bytes into list + array = [] + for i in range(array_desc['nelements']): + dtype = typecode + data = _read_data(f, dtype) + array.append(data) + + array = np.array(array, dtype=np.object_) + + # Reshape array if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + array = array.reshape(dims) + + # Go to next alignment position + _align_32(f) + + return array + + +def _read_record(f): + '''Function to read in a full record''' + + record = {'rectype': _read_long(f)} + + nextrec = _read_uint32(f) + nextrec += _read_uint32(f).astype(np.int64) * 2**32 + + _skip_bytes(f, 4) + + if record['rectype'] not in RECTYPE_DICT: + raise Exception("Unknown RECTYPE: %i" % record['rectype']) + + record['rectype'] = RECTYPE_DICT[record['rectype']] + + if record['rectype'] in ["VARIABLE", "HEAP_DATA"]: + + if record['rectype'] == "VARIABLE": + record['varname'] = _read_string(f) + else: + record['heap_index'] = _read_long(f) + _skip_bytes(f, 4) + + rectypedesc = _read_typedesc(f) + + if rectypedesc['typecode'] == 0: + + if nextrec == f.tell(): + record['data'] = None # Indicates NULL value + else: + raise ValueError("Unexpected type code: 0") + + else: + + varstart = _read_long(f) + if varstart != 7: + raise Exception("VARSTART is not 7") + + if rectypedesc['structure']: + record['data'] = _read_structure(f, rectypedesc['array_desc'], + rectypedesc['struct_desc']) + elif rectypedesc['array']: + record['data'] = _read_array(f, rectypedesc['typecode'], + rectypedesc['array_desc']) + else: + dtype = rectypedesc['typecode'] + record['data'] = _read_data(f, dtype) + + elif record['rectype'] == "TIMESTAMP": + + _skip_bytes(f, 4*256) + record['date'] = _read_string(f) + record['user'] = _read_string(f) + record['host'] = _read_string(f) + + elif record['rectype'] == "VERSION": + + record['format'] = _read_long(f) + record['arch'] = _read_string(f) + record['os'] = _read_string(f) + record['release'] = _read_string(f) + + elif record['rectype'] == "IDENTIFICATON": + + record['author'] = _read_string(f) + record['title'] = _read_string(f) + record['idcode'] = _read_string(f) + + elif record['rectype'] == "NOTICE": + + record['notice'] = _read_string(f) + + elif record['rectype'] == "DESCRIPTION": + + record['description'] = _read_string_data(f) + + elif record['rectype'] == "HEAP_HEADER": + + record['nvalues'] = _read_long(f) + record['indices'] = [_read_long(f) for _ in range(record['nvalues'])] + + elif record['rectype'] == "COMMONBLOCK": + + record['nvars'] = _read_long(f) + record['name'] = _read_string(f) + record['varnames'] = [_read_string(f) for _ in range(record['nvars'])] + + elif record['rectype'] == "END_MARKER": + + record['end'] = True + + elif record['rectype'] == "UNKNOWN": + + warnings.warn("Skipping UNKNOWN record", stacklevel=3) + + elif record['rectype'] == "SYSTEM_VARIABLE": + + warnings.warn("Skipping SYSTEM_VARIABLE record", stacklevel=3) + + else: + + raise Exception(f"record['rectype']={record['rectype']} not implemented") + + f.seek(nextrec) + + return record + + +def _read_typedesc(f): + '''Function to read in a type descriptor''' + + typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)} + + if typedesc['varflags'] & 2 == 2: + raise Exception("System variables not implemented") + + typedesc['array'] = typedesc['varflags'] & 4 == 4 + typedesc['structure'] = typedesc['varflags'] & 32 == 32 + + if typedesc['structure']: + typedesc['array_desc'] = _read_arraydesc(f) + typedesc['struct_desc'] = _read_structdesc(f) + elif typedesc['array']: + typedesc['array_desc'] = _read_arraydesc(f) + + return typedesc + + +def _read_arraydesc(f): + '''Function to read in an array descriptor''' + + arraydesc = {'arrstart': _read_long(f)} + + if arraydesc['arrstart'] == 8: + + _skip_bytes(f, 4) + + arraydesc['nbytes'] = _read_long(f) + arraydesc['nelements'] = _read_long(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = _read_long(f) + + arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])] + + elif arraydesc['arrstart'] == 18: + + warnings.warn("Using experimental 64-bit array read", stacklevel=3) + + _skip_bytes(f, 8) + + arraydesc['nbytes'] = _read_uint64(f) + arraydesc['nelements'] = _read_uint64(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = 8 + + arraydesc['dims'] = [] + for d in range(arraydesc['nmax']): + v = _read_long(f) + if v != 0: + raise Exception("Expected a zero in ARRAY_DESC") + arraydesc['dims'].append(_read_long(f)) + + else: + + raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart']) + + return arraydesc + + +def _read_structdesc(f): + '''Function to read in a structure descriptor''' + + structdesc = {} + + structstart = _read_long(f) + if structstart != 9: + raise Exception("STRUCTSTART should be 9") + + structdesc['name'] = _read_string(f) + predef = _read_long(f) + structdesc['ntags'] = _read_long(f) + structdesc['nbytes'] = _read_long(f) + + structdesc['predef'] = predef & 1 + structdesc['inherits'] = predef & 2 + structdesc['is_super'] = predef & 4 + + if not structdesc['predef']: + + structdesc['tagtable'] = [_read_tagdesc(f) + for _ in range(structdesc['ntags'])] + + for tag in structdesc['tagtable']: + tag['name'] = _read_string(f) + + structdesc['arrtable'] = {tag['name']: _read_arraydesc(f) + for tag in structdesc['tagtable'] + if tag['array']} + + structdesc['structtable'] = {tag['name']: _read_structdesc(f) + for tag in structdesc['tagtable'] + if tag['structure']} + + if structdesc['inherits'] or structdesc['is_super']: + structdesc['classname'] = _read_string(f) + structdesc['nsupclasses'] = _read_long(f) + structdesc['supclassnames'] = [ + _read_string(f) for _ in range(structdesc['nsupclasses'])] + structdesc['supclasstable'] = [ + _read_structdesc(f) for _ in range(structdesc['nsupclasses'])] + + STRUCT_DICT[structdesc['name']] = structdesc + + else: + + if structdesc['name'] not in STRUCT_DICT: + raise Exception("PREDEF=1 but can't find definition") + + structdesc = STRUCT_DICT[structdesc['name']] + + return structdesc + + +def _read_tagdesc(f): + '''Function to read in a tag descriptor''' + + tagdesc = {'offset': _read_long(f)} + + if tagdesc['offset'] == -1: + tagdesc['offset'] = _read_uint64(f) + + tagdesc['typecode'] = _read_long(f) + tagflags = _read_long(f) + + tagdesc['array'] = tagflags & 4 == 4 + tagdesc['structure'] = tagflags & 32 == 32 + tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT + # Assume '10'x is scalar + + return tagdesc + + +def _replace_heap(variable, heap): + + if isinstance(variable, Pointer): + + while isinstance(variable, Pointer): + + if variable.index == 0: + variable = None + else: + if variable.index in heap: + variable = heap[variable.index] + else: + warnings.warn("Variable referenced by pointer not found " + "in heap: variable will be set to None", + stacklevel=3) + variable = None + + replace, new = _replace_heap(variable, heap) + + if replace: + variable = new + + return True, variable + + elif isinstance(variable, np.rec.recarray): + + # Loop over records + for ir, record in enumerate(variable): + + replace, new = _replace_heap(record, heap) + + if replace: + variable[ir] = new + + return False, variable + + elif isinstance(variable, np.record): + + # Loop over values + for iv, value in enumerate(variable): + + replace, new = _replace_heap(value, heap) + + if replace: + variable[iv] = new + + return False, variable + + elif isinstance(variable, np.ndarray): + + # Loop over values if type is np.object_ + if variable.dtype.type is np.object_: + + for iv in range(variable.size): + + replace, new = _replace_heap(variable.item(iv), heap) + + if replace: + variable.reshape(-1)[iv] = new + + return False, variable + + else: + + return False, variable + + +class AttrDict(dict): + ''' + A case-insensitive dictionary with access via item, attribute, and call + notations: + + >>> from scipy.io._idl import AttrDict + >>> d = AttrDict() + >>> d['Variable'] = 123 + >>> d['Variable'] + 123 + >>> d.Variable + 123 + >>> d.variable + 123 + >>> d('VARIABLE') + 123 + >>> d['missing'] + Traceback (most recent error last): + ... + KeyError: 'missing' + >>> d.missing + Traceback (most recent error last): + ... + AttributeError: 'AttrDict' object has no attribute 'missing' + ''' + + def __init__(self, init=None): + if init is None: + init = {} + dict.__init__(self, init) + + def __getitem__(self, name): + return super().__getitem__(name.lower()) + + def __setitem__(self, key, value): + return super().__setitem__(key.lower(), value) + + def __getattr__(self, name): + try: + return self.__getitem__(name) + except KeyError: + raise AttributeError( + f"'{type(self)}' object has no attribute '{name}'") from None + + __setattr__ = __setitem__ + __call__ = __getitem__ + + +def readsav(file_name, idict=None, python_dict=False, + uncompressed_file_name=None, verbose=False): + """ + Read an IDL .sav file. + + Parameters + ---------- + file_name : str + Name of the IDL save file. + idict : dict, optional + Dictionary in which to insert .sav file variables. + python_dict : bool, optional + By default, the object return is not a Python dictionary, but a + case-insensitive dictionary with item, attribute, and call access + to variables. To get a standard Python dictionary, set this option + to True. + uncompressed_file_name : str, optional + This option only has an effect for .sav files written with the + /compress option. If a file name is specified, compressed .sav + files are uncompressed to this file. Otherwise, readsav will use + the `tempfile` module to determine a temporary filename + automatically, and will remove the temporary file upon successfully + reading it in. + verbose : bool, optional + Whether to print out information about the save file, including + the records read, and available variables. + + Returns + ------- + idl_dict : AttrDict or dict + If `python_dict` is set to False (default), this function returns a + case-insensitive dictionary with item, attribute, and call access + to variables. If `python_dict` is set to True, this function + returns a Python dictionary with all variable names in lowercase. + If `idict` was specified, then variables are written to the + dictionary specified, and the updated dictionary is returned. + + Examples + -------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + >>> from scipy.io import readsav + + Get the filename for an example .sav file from the tests/data directory. + + >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data') + >>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav') + + Load the .sav file contents. + + >>> sav_data = readsav(sav_fname) + + Get keys of the .sav file contents. + + >>> print(sav_data.keys()) + dict_keys(['array1d']) + + Access a content with a key. + + >>> print(sav_data['array1d']) + [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0.] + + """ + + # Initialize record and variable holders + records = [] + if python_dict or idict: + variables = {} + else: + variables = AttrDict() + + # Open the IDL file + f = open(file_name, 'rb') + + # Read the signature, which should be 'SR' + signature = _read_bytes(f, 2) + if signature != b'SR': + raise Exception(f"Invalid SIGNATURE: {signature}") + + # Next, the record format, which is '\x00\x04' for normal .sav + # files, and '\x00\x06' for compressed .sav files. + recfmt = _read_bytes(f, 2) + + if recfmt == b'\x00\x04': + pass + + elif recfmt == b'\x00\x06': + + if verbose: + print("IDL Save file is compressed") + + if uncompressed_file_name: + fout = open(uncompressed_file_name, 'w+b') + else: + fout = tempfile.NamedTemporaryFile(suffix='.sav') + + if verbose: + print(f" -> expanding to {fout.name}") + + # Write header + fout.write(b'SR\x00\x04') + + # Cycle through records + while True: + + # Read record type + rectype = _read_long(f) + fout.write(struct.pack('>l', int(rectype))) + + # Read position of next record and return as int + nextrec = _read_uint32(f) + nextrec += _read_uint32(f).astype(np.int64) * 2**32 + + # Read the unknown 4 bytes + unknown = f.read(4) + + # Check if the end of the file has been reached + if RECTYPE_DICT[rectype] == 'END_MARKER': + modval = np.int64(2**32) + fout.write(struct.pack('>I', int(nextrec) % modval)) + fout.write( + struct.pack('>I', int((nextrec - (nextrec % modval)) / modval)) + ) + fout.write(unknown) + break + + # Find current position + pos = f.tell() + + # Decompress record + rec_string = zlib.decompress(f.read(nextrec-pos)) + + # Find new position of next record + nextrec = fout.tell() + len(rec_string) + 12 + + # Write out record + fout.write(struct.pack('>I', int(nextrec % 2**32))) + fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) + fout.write(unknown) + fout.write(rec_string) + + # Close the original compressed file + f.close() + + # Set f to be the decompressed file, and skip the first four bytes + f = fout + f.seek(4) + + else: + raise Exception(f"Invalid RECFMT: {recfmt}") + + # Loop through records, and add them to the list + while True: + r = _read_record(f) + records.append(r) + if 'end' in r: + if r['end']: + break + + # Close the file + f.close() + + # Find heap data variables + heap = {} + for r in records: + if r['rectype'] == "HEAP_DATA": + heap[r['heap_index']] = r['data'] + + # Find all variables + for r in records: + if r['rectype'] == "VARIABLE": + replace, new = _replace_heap(r['data'], heap) + if replace: + r['data'] = new + variables[r['varname'].lower()] = r['data'] + + if verbose: + + # Print out timestamp info about the file + for record in records: + if record['rectype'] == "TIMESTAMP": + print("-"*50) + print(f"Date: {record['date']}") + print(f"User: {record['user']}") + print(f"Host: {record['host']}") + break + + # Print out version info about the file + for record in records: + if record['rectype'] == "VERSION": + print("-"*50) + print(f"Format: {record['format']}") + print(f"Architecture: {record['arch']}") + print(f"Operating System: {record['os']}") + print(f"IDL Version: {record['release']}") + break + + # Print out identification info about the file + for record in records: + if record['rectype'] == "IDENTIFICATON": + print("-"*50) + print(f"Author: {record['author']}") + print(f"Title: {record['title']}") + print(f"ID Code: {record['idcode']}") + break + + # Print out descriptions saved with the file + for record in records: + if record['rectype'] == "DESCRIPTION": + print("-"*50) + print(f"Description: {record['description']}") + break + + print("-"*50) + print(f"Successfully read {len(records)} records of which:") + + # Create convenience list of record types + rectypes = [r['rectype'] for r in records] + + for rt in set(rectypes): + if rt != 'END_MARKER': + print(" - %i are of type %s" % (rectypes.count(rt), rt)) + print("-"*50) + + if 'VARIABLE' in rectypes: + print("Available variables:") + for var in variables: + print(f" - {var} [{type(variables[var])}]") + print("-"*50) + + if idict: + for var in variables: + idict[var] = variables[var] + return idict + else: + return variables diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_mmio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_mmio.py new file mode 100644 index 0000000000000000000000000000000000000000..32db20065d632d582f04addcf766daa4e6b5fd8e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_mmio.py @@ -0,0 +1,968 @@ +""" + Matrix Market I/O in Python. + See http://math.nist.gov/MatrixMarket/formats.html + for information about the Matrix Market format. +""" +# +# Author: Pearu Peterson +# Created: October, 2004 +# +# References: +# http://math.nist.gov/MatrixMarket/ +# +import os + +import numpy as np +from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate, + ones, can_cast) + +from scipy.sparse import coo_array, issparse, coo_matrix + +__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile'] + + +# ----------------------------------------------------------------------------- +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def mminfo(source): + """ + Return size and storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mminfo + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + + ``mminfo(source)`` returns the number of rows, number of columns, + format, field type and symmetry attribute of the source file. + + >>> mminfo(StringIO(text)) + (5, 5, 7, 'coordinate', 'real', 'general') + """ + return MMFile.info(source) + +# ----------------------------------------------------------------------------- + + +def mmread(source, *, spmatrix=True): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file-like object. + spmatrix : bool, optional (default: True) + If ``True``, return sparse ``coo_matrix``. Otherwise return ``coo_array``. + + Returns + ------- + a : ndarray or coo_array or coo_matrix + Dense or sparse array depending on the matrix format in the + Matrix Market file. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mmread + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + ``mmread(source)`` returns the data as sparse matrix in COO format. + + >>> m = mmread(StringIO(text), spmatrix=False) + >>> m + + >>> m.toarray() + array([[0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 2., 3.], + [4., 5., 6., 7., 0.], + [0., 0., 0., 0., 0.]]) + """ + return MMFile().read(source, spmatrix=spmatrix) + +# ----------------------------------------------------------------------------- + + +def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None): + r""" + Writes the sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + + Returns + ------- + None + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.sparse import coo_array + >>> from scipy.io import mmwrite + + Write a small NumPy array to a matrix market file. The file will be + written in the ``'array'`` format. + + >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]]) + >>> target = BytesIO() + >>> mmwrite(target, a) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + 2 4 + 1 + 0 + 0 + 2.5 + 0 + 0 + 0 + 6.25 + + Add a comment to the output file, and set the precision to 3. + + >>> target = BytesIO() + >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + % Some test data. + % + 2 4 + 1.00e+00 + 0.00e+00 + 0.00e+00 + 2.50e+00 + 0.00e+00 + 0.00e+00 + 0.00e+00 + 6.25e+00 + + Convert to a sparse matrix before calling ``mmwrite``. This will + result in the output format being ``'coordinate'`` rather than + ``'array'``. + + >>> target = BytesIO() + >>> mmwrite(target, coo_array(a), precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix coordinate real general + % + 2 4 3 + 1 1 1.00e+00 + 2 2 2.50e+00 + 2 4 6.25e+00 + + Write a complex Hermitian array to a matrix market file. Note that + only six values are actually written to the file; the other values + are implied by the symmetry. + + >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]]) + >>> z + array([[ 3. +0.j, 1. +2.j, 4. -3.j], + [ 1. -2.j, 1. +0.j, -0. -5.j], + [ 4. +3.j, 0. +5.j, 2.5+0.j]]) + + >>> target = BytesIO() + >>> mmwrite(target, z, precision=2) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array complex hermitian + % + 3 3 + 3.0e+00 0.0e+00 + 1.0e+00 -2.0e+00 + 4.0e+00 3.0e+00 + 1.0e+00 0.0e+00 + 0.0e+00 5.0e+00 + 2.5e+00 0.0e+00 + + """ + MMFile().write(target, a, comment, field, precision, symmetry) + + +############################################################################### +class MMFile: + __slots__ = ('_rows', + '_cols', + '_entries', + '_format', + '_field', + '_symmetry') + + @property + def rows(self): + return self._rows + + @property + def cols(self): + return self._cols + + @property + def entries(self): + return self._entries + + @property + def format(self): + return self._format + + @property + def field(self): + return self._field + + @property + def symmetry(self): + return self._symmetry + + @property + def has_symmetry(self): + return self._symmetry in (self.SYMMETRY_SYMMETRIC, + self.SYMMETRY_SKEW_SYMMETRIC, + self.SYMMETRY_HERMITIAN) + + # format values + FORMAT_COORDINATE = 'coordinate' + FORMAT_ARRAY = 'array' + FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) + + @classmethod + def _validate_format(self, format): + if format not in self.FORMAT_VALUES: + msg = f'unknown format type {format}, must be one of {self.FORMAT_VALUES}' + raise ValueError(msg) + + # field values + FIELD_INTEGER = 'integer' + FIELD_UNSIGNED = 'unsigned-integer' + FIELD_REAL = 'real' + FIELD_COMPLEX = 'complex' + FIELD_PATTERN = 'pattern' + FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, + FIELD_PATTERN) + + @classmethod + def _validate_field(self, field): + if field not in self.FIELD_VALUES: + msg = f'unknown field type {field}, must be one of {self.FIELD_VALUES}' + raise ValueError(msg) + + # symmetry values + SYMMETRY_GENERAL = 'general' + SYMMETRY_SYMMETRIC = 'symmetric' + SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' + SYMMETRY_HERMITIAN = 'hermitian' + SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, + SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) + + @classmethod + def _validate_symmetry(self, symmetry): + if symmetry not in self.SYMMETRY_VALUES: + raise ValueError(f'unknown symmetry type {symmetry}, ' + f'must be one of {self.SYMMETRY_VALUES}') + + DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp', + FIELD_UNSIGNED: 'uint64', + FIELD_REAL: 'd', + FIELD_COMPLEX: 'D', + FIELD_PATTERN: 'd'} + + # ------------------------------------------------------------------------- + @staticmethod + def reader(): + pass + + # ------------------------------------------------------------------------- + @staticmethod + def writer(): + pass + + # ------------------------------------------------------------------------- + @classmethod + def info(self, source): + """ + Return size, storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + """ + + stream, close_it = self._open(source) + + try: + + # read and validate header line + line = stream.readline() + mmid, matrix, format, field, symmetry = \ + (asstr(part.strip()) for part in line.split()) + if not mmid.startswith('%%MatrixMarket'): + raise ValueError('source is not in Matrix Market format') + if not matrix.lower() == 'matrix': + raise ValueError("Problem reading file header: " + line) + + # http://math.nist.gov/MatrixMarket/formats.html + if format.lower() == 'array': + format = self.FORMAT_ARRAY + elif format.lower() == 'coordinate': + format = self.FORMAT_COORDINATE + + # skip comments + # line.startswith('%') + while line: + if line.lstrip() and line.lstrip()[0] in ['%', 37]: + line = stream.readline() + else: + break + + # skip empty lines + while not line.strip(): + line = stream.readline() + + split_line = line.split() + if format == self.FORMAT_ARRAY: + if not len(split_line) == 2: + raise ValueError("Header line not of length 2: " + + line.decode('ascii')) + rows, cols = map(int, split_line) + entries = rows * cols + else: + if not len(split_line) == 3: + raise ValueError("Header line not of length 3: " + + line.decode('ascii')) + rows, cols, entries = map(int, split_line) + + return (rows, cols, entries, format, field.lower(), + symmetry.lower()) + + finally: + if close_it: + stream.close() + + # ------------------------------------------------------------------------- + @staticmethod + def _open(filespec, mode='rb'): + """ Return an open file stream for reading based on source. + + If source is a file name, open it (after trying to find it with mtx and + gzipped mtx extensions). Otherwise, just return source. + + Parameters + ---------- + filespec : str or file-like + String giving file name or file-like object + mode : str, optional + Mode with which to open file, if `filespec` is a file name. + + Returns + ------- + fobj : file-like + Open file-like object. + close_it : bool + True if the calling function should close this file when done, + false otherwise. + """ + # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class + # implementing a '__fspath__' method), try to convert it to str. If this + # fails by throwing a 'TypeError', assume it's an open file handle and + # return it as-is. + try: + filespec = os.fspath(filespec) + except TypeError: + return filespec, False + + # 'filespec' is definitely a str now + + # open for reading + if mode[0] == 'r': + + # determine filename plus extension + if not os.path.isfile(filespec): + if os.path.isfile(filespec+'.mtx'): + filespec = filespec + '.mtx' + elif os.path.isfile(filespec+'.mtx.gz'): + filespec = filespec + '.mtx.gz' + elif os.path.isfile(filespec+'.mtx.bz2'): + filespec = filespec + '.mtx.bz2' + # open filename + if filespec.endswith('.gz'): + import gzip + stream = gzip.open(filespec, mode) + elif filespec.endswith('.bz2'): + import bz2 + stream = bz2.BZ2File(filespec, 'rb') + else: + stream = open(filespec, mode) + + # open for writing + else: + if filespec[-4:] != '.mtx': + filespec = filespec + '.mtx' + stream = open(filespec, mode) + + return stream, True + + # ------------------------------------------------------------------------- + @staticmethod + def _get_symmetry(a): + m, n = a.shape + if m != n: + return MMFile.SYMMETRY_GENERAL + issymm = True + isskew = True + isherm = a.dtype.char in 'FD' + + # sparse input + if issparse(a): + # check if number of nonzero entries of lower and upper triangle + # matrix are equal + a = a.tocoo() + (row, col) = a.nonzero() + if (row < col).sum() != (row > col).sum(): + return MMFile.SYMMETRY_GENERAL + + # define iterator over symmetric pair entries + a = a.todok() + + def symm_iterator(): + for ((i, j), aij) in a.items(): + if i > j: + aji = a[j, i] + yield (aij, aji, False) + elif i == j: + yield (aij, aij, True) + + # non-sparse input + else: + # define iterator over symmetric pair entries + def symm_iterator(): + for j in range(n): + for i in range(j, n): + aij, aji = a[i][j], a[j][i] + yield (aij, aji, i == j) + + # check for symmetry + # yields aij, aji, is_diagonal + for (aij, aji, is_diagonal) in symm_iterator(): + if isskew and is_diagonal and aij != 0: + isskew = False + else: + if issymm and aij != aji: + issymm = False + with np.errstate(over="ignore"): + # This can give a warning for uint dtypes, so silence that + if isskew and aij != -aji: + isskew = False + if isherm and aij != conj(aji): + isherm = False + if not (issymm or isskew or isherm): + break + + # return symmetry value + if issymm: + return MMFile.SYMMETRY_SYMMETRIC + if isskew: + return MMFile.SYMMETRY_SKEW_SYMMETRIC + if isherm: + return MMFile.SYMMETRY_HERMITIAN + return MMFile.SYMMETRY_GENERAL + + # ------------------------------------------------------------------------- + @staticmethod + def _field_template(field, precision): + return {MMFile.FIELD_REAL: '%%.%ie\n' % precision, + MMFile.FIELD_INTEGER: '%i\n', + MMFile.FIELD_UNSIGNED: '%u\n', + MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % + (precision, precision) + }.get(field, None) + + # ------------------------------------------------------------------------- + def __init__(self, **kwargs): + self._init_attrs(**kwargs) + + # ------------------------------------------------------------------------- + def read(self, source, *, spmatrix=True): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file object. + spmatrix : bool, optional (default: True) + If ``True``, return sparse ``coo_matrix``. Otherwise return ``coo_array``. + + Returns + ------- + a : ndarray or coo_array or coo_matrix + Dense or sparse array depending on the matrix format in the + Matrix Market file. + """ + stream, close_it = self._open(source) + + try: + self._parse_header(stream) + data = self._parse_body(stream) + + finally: + if close_it: + stream.close() + if spmatrix and isinstance(data, coo_array): + data = coo_matrix(data) + return data + + + # ------------------------------------------------------------------------- + def write(self, target, a, comment='', field=None, precision=None, + symmetry=None): + """ + Writes sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + """ + + stream, close_it = self._open(target, 'wb') + + try: + self._write(stream, a, comment, field, precision, symmetry) + + finally: + if close_it: + stream.close() + else: + stream.flush() + + # ------------------------------------------------------------------------- + def _init_attrs(self, **kwargs): + """ + Initialize each attributes with the corresponding keyword arg value + or a default of None + """ + + attrs = self.__class__.__slots__ + public_attrs = [attr[1:] for attr in attrs] + invalid_keys = set(kwargs.keys()) - set(public_attrs) + + if invalid_keys: + raise ValueError(f"found {tuple(invalid_keys)} invalid keyword " + f"arguments, please only use {public_attrs}") + + for attr in attrs: + setattr(self, attr, kwargs.get(attr[1:], None)) + + # ------------------------------------------------------------------------- + def _parse_header(self, stream): + rows, cols, entries, format, field, symmetry = \ + self.__class__.info(stream) + self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, + field=field, symmetry=symmetry) + + # ------------------------------------------------------------------------- + def _parse_body(self, stream): + rows, cols, entries, format, field, symm = (self.rows, self.cols, + self.entries, self.format, + self.field, self.symmetry) + + dtype = self.DTYPES_BY_FIELD.get(field, None) + + has_symmetry = self.has_symmetry + is_integer = field == self.FIELD_INTEGER + is_unsigned_integer = field == self.FIELD_UNSIGNED + is_complex = field == self.FIELD_COMPLEX + is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC + is_herm = symm == self.SYMMETRY_HERMITIAN + is_pattern = field == self.FIELD_PATTERN + + if format == self.FORMAT_ARRAY: + a = zeros((rows, cols), dtype=dtype) + line = 1 + i, j = 0, 0 + if is_skew: + a[i, j] = 0 + if i < rows - 1: + i += 1 + while line: + line = stream.readline() + # line.startswith('%') + if not line or line[0] in ['%', 37] or not line.strip(): + continue + if is_integer: + aij = int(line) + elif is_unsigned_integer: + aij = int(line) + elif is_complex: + aij = complex(*map(float, line.split())) + else: + aij = float(line) + a[i, j] = aij + if has_symmetry and i != j: + if is_skew: + a[j, i] = -aij + elif is_herm: + a[j, i] = conj(aij) + else: + a[j, i] = aij + if i < rows-1: + i = i + 1 + else: + j = j + 1 + if not has_symmetry: + i = 0 + else: + i = j + if is_skew: + a[i, j] = 0 + if i < rows-1: + i += 1 + + if is_skew: + if not (i in [0, j] and j == cols - 1): + raise ValueError("Parse error, did not read all lines.") + else: + if not (i in [0, j] and j == cols): + raise ValueError("Parse error, did not read all lines.") + + elif format == self.FORMAT_COORDINATE: + # Read sparse COOrdinate format + + if entries == 0: + # empty matrix + return coo_array((rows, cols), dtype=dtype) + + I = zeros(entries, dtype='intc') + J = zeros(entries, dtype='intc') + if is_pattern: + V = ones(entries, dtype='int8') + elif is_integer: + V = zeros(entries, dtype='intp') + elif is_unsigned_integer: + V = zeros(entries, dtype='uint64') + elif is_complex: + V = zeros(entries, dtype='complex') + else: + V = zeros(entries, dtype='float') + + entry_number = 0 + for line in stream: + # line.startswith('%') + if not line or line[0] in ['%', 37] or not line.strip(): + continue + + if entry_number+1 > entries: + raise ValueError("'entries' in header is smaller than " + "number of entries") + l = line.split() + I[entry_number], J[entry_number] = map(int, l[:2]) + + if not is_pattern: + if is_integer: + V[entry_number] = int(l[2]) + elif is_unsigned_integer: + V[entry_number] = int(l[2]) + elif is_complex: + V[entry_number] = complex(*map(float, l[2:])) + else: + V[entry_number] = float(l[2]) + entry_number += 1 + if entry_number < entries: + raise ValueError("'entries' in header is larger than " + "number of entries") + + I -= 1 # adjust indices (base 1 -> base 0) + J -= 1 + + if has_symmetry: + mask = (I != J) # off diagonal mask + od_I = I[mask] + od_J = J[mask] + od_V = V[mask] + + I = concatenate((I, od_J)) + J = concatenate((J, od_I)) + + if is_skew: + od_V *= -1 + elif is_herm: + od_V = od_V.conjugate() + + V = concatenate((V, od_V)) + + a = coo_array((V, (I, J)), shape=(rows, cols), dtype=dtype) + else: + raise NotImplementedError(format) + + return a + + # ------------------------------------------------------------------------ + def _write(self, stream, a, comment='', field=None, precision=None, + symmetry=None): + if isinstance(a, list) or isinstance(a, ndarray) or \ + isinstance(a, tuple) or hasattr(a, '__array__'): + rep = self.FORMAT_ARRAY + a = asarray(a) + if len(a.shape) != 2: + raise ValueError('Expected 2 dimensional array') + rows, cols = a.shape + + if field is not None: + + if field == self.FIELD_INTEGER: + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + a = a.astype('intp') + elif field == self.FIELD_REAL: + if a.dtype.char not in 'fd': + a = a.astype('d') + elif field == self.FIELD_COMPLEX: + if a.dtype.char not in 'FD': + a = a.astype('D') + + else: + if not issparse(a): + raise ValueError(f'unknown matrix type: {type(a)}') + + rep = 'coordinate' + rows, cols = a.shape + + typecode = a.dtype.char + + if precision is None: + if typecode in 'fF': + precision = 8 + else: + precision = 16 + if field is None: + kind = a.dtype.kind + if kind == 'i': + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + field = 'integer' + elif kind == 'f': + field = 'real' + elif kind == 'c': + field = 'complex' + elif kind == 'u': + field = 'unsigned-integer' + else: + raise TypeError('unexpected dtype kind ' + kind) + + if symmetry is None: + symmetry = self._get_symmetry(a) + + # validate rep, field, and symmetry + self.__class__._validate_format(rep) + self.__class__._validate_field(field) + self.__class__._validate_symmetry(symmetry) + + # write initial header line + data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n' + stream.write(data.encode('latin1')) + + # write comments + for line in comment.split('\n'): + data = f'%{line}\n' + stream.write(data.encode('latin1')) + + template = self._field_template(field, precision) + # write dense format + if rep == self.FORMAT_ARRAY: + # write shape spec + data = '%i %i\n' % (rows, cols) + stream.write(data.encode('latin1')) + + if field in (self.FIELD_INTEGER, self.FIELD_REAL, + self.FIELD_UNSIGNED): + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC: + for j in range(cols): + for i in range(j + 1, rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + else: + for j in range(cols): + for i in range(j, rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + elif field == self.FIELD_COMPLEX: + + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + aij = a[i, j] + data = template % (real(aij), imag(aij)) + stream.write(data.encode('latin1')) + else: + for j in range(cols): + for i in range(j, rows): + aij = a[i, j] + data = template % (real(aij), imag(aij)) + stream.write(data.encode('latin1')) + + elif field == self.FIELD_PATTERN: + raise ValueError('pattern type inconsisted with dense format') + + else: + raise TypeError(f'Unknown field type {field}') + + # write sparse format + else: + coo = a.tocoo() # convert to COOrdinate format + + # if symmetry format used, remove values above main diagonal + if symmetry != self.SYMMETRY_GENERAL: + lower_triangle_mask = coo.row >= coo.col + coo = coo_array((coo.data[lower_triangle_mask], + (coo.row[lower_triangle_mask], + coo.col[lower_triangle_mask])), + shape=coo.shape) + + # write shape spec + data = '%i %i %i\n' % (rows, cols, coo.nnz) + stream.write(data.encode('latin1')) + + template = self._field_template(field, precision-1) + + if field == self.FIELD_PATTERN: + for r, c in zip(coo.row+1, coo.col+1): + data = "%i %i\n" % (r, c) + stream.write(data.encode('latin1')) + elif field in (self.FIELD_INTEGER, self.FIELD_REAL, + self.FIELD_UNSIGNED): + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + data = ("%i %i " % (r, c)) + (template % d) + stream.write(data.encode('latin1')) + elif field == self.FIELD_COMPLEX: + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + data = ("%i %i " % (r, c)) + (template % (d.real, d.imag)) + stream.write(data.encode('latin1')) + else: + raise TypeError(f'Unknown field type {field}') + + +def _is_fromfile_compatible(stream): + """ + Check whether `stream` is compatible with numpy.fromfile. + + Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with + Python 3. + """ + + bad_cls = [] + try: + import gzip + bad_cls.append(gzip.GzipFile) + except ImportError: + pass + try: + import bz2 + bad_cls.append(bz2.BZ2File) + except ImportError: + pass + + bad_cls = tuple(bad_cls) + return not isinstance(stream, bad_cls) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_netcdf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4bddd0126facebd39c7ae996eb885a630cf550 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_netcdf.py @@ -0,0 +1,1094 @@ +""" +NetCDF reader/writer module. + +This module is used to read and create NetCDF files. NetCDF files are +accessed through the `netcdf_file` object. Data written to and from NetCDF +files are contained in `netcdf_variable` objects. Attributes are given +as member variables of the `netcdf_file` and `netcdf_variable` objects. + +This module implements the Scientific.IO.NetCDF API to read and create +NetCDF files. The same API is also used in the PyNIO and pynetcdf +modules, allowing these modules to be used interchangeably when working +with NetCDF files. + +Only NetCDF3 is supported here; for NetCDF4 see +`netCDF4-python `__, +which has a similar API. + +""" + +# TODO: +# * properly implement ``_FillValue``. +# * fix character variables. +# * implement PAGESIZE for Python 2.6? + +# The Scientific.IO.NetCDF API allows attributes to be added directly to +# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate +# between user-set attributes and instance attributes, user-set attributes +# are automatically stored in the ``_attributes`` attribute by overloading +#``__setattr__``. This is the reason why the code sometimes uses +#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; +# otherwise the key would be inserted into userspace attributes. + + +__all__ = ['netcdf_file', 'netcdf_variable'] + + +import warnings +import weakref +from operator import mul +from platform import python_implementation + +import mmap as mm + +import numpy as np +from numpy import frombuffer, dtype, empty, array, asarray +from numpy import little_endian as LITTLE_ENDIAN +from functools import reduce + + +IS_PYPY = python_implementation() == 'PyPy' + +ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' +ZERO = b'\x00\x00\x00\x00' +NC_BYTE = b'\x00\x00\x00\x01' +NC_CHAR = b'\x00\x00\x00\x02' +NC_SHORT = b'\x00\x00\x00\x03' +NC_INT = b'\x00\x00\x00\x04' +NC_FLOAT = b'\x00\x00\x00\x05' +NC_DOUBLE = b'\x00\x00\x00\x06' +NC_DIMENSION = b'\x00\x00\x00\n' +NC_VARIABLE = b'\x00\x00\x00\x0b' +NC_ATTRIBUTE = b'\x00\x00\x00\x0c' +FILL_BYTE = b'\x81' +FILL_CHAR = b'\x00' +FILL_SHORT = b'\x80\x01' +FILL_INT = b'\x80\x00\x00\x01' +FILL_FLOAT = b'\x7C\xF0\x00\x00' +FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' + +TYPEMAP = {NC_BYTE: ('b', 1), + NC_CHAR: ('c', 1), + NC_SHORT: ('h', 2), + NC_INT: ('i', 4), + NC_FLOAT: ('f', 4), + NC_DOUBLE: ('d', 8)} + +FILLMAP = {NC_BYTE: FILL_BYTE, + NC_CHAR: FILL_CHAR, + NC_SHORT: FILL_SHORT, + NC_INT: FILL_INT, + NC_FLOAT: FILL_FLOAT, + NC_DOUBLE: FILL_DOUBLE} + +REVERSE = {('b', 1): NC_BYTE, + ('B', 1): NC_CHAR, + ('c', 1): NC_CHAR, + ('h', 2): NC_SHORT, + ('i', 4): NC_INT, + ('f', 4): NC_FLOAT, + ('d', 8): NC_DOUBLE, + + # these come from asarray(1).dtype.char and asarray('foo').dtype.char, + # used when getting the types from generic attributes. + ('l', 4): NC_INT, + ('S', 1): NC_CHAR} + + +class netcdf_file: + """ + A file object for NetCDF data. + + A `netcdf_file` object has two standard attributes: `dimensions` and + `variables`. The values of both are dictionaries, mapping dimension + names to their associated lengths and variable names to variables, + respectively. Application programs should never modify these + dictionaries. + + All other attributes correspond to global attributes defined in the + NetCDF file. Global file attributes are created by assigning to an + attribute of the `netcdf_file` object. + + Parameters + ---------- + filename : string or file-like + string -> filename + mode : {'r', 'w', 'a'}, optional + read-write-append mode, default is 'r' + mmap : None or bool, optional + Whether to mmap `filename` when reading. Default is True + when `filename` is a file name, False when `filename` is a + file-like object. Note that when mmap is in use, data arrays + returned refer directly to the mmapped data on disk, and the + file cannot be closed as long as references to it exist. + version : {1, 2}, optional + version of netcdf to read / write, where 1 means *Classic + format* and 2 means *64-bit offset format*. Default is 1. See + `here `__ + for more info. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + Notes + ----- + The major advantage of this module over other modules is that it doesn't + require the code to be linked to the NetCDF libraries. This module is + derived from `pupynere `_. + + NetCDF files are a self-describing binary data format. The file contains + metadata that describes the dimensions and variables in the file. More + details about NetCDF files can be found `here + `__. There + are three main sections to a NetCDF data structure: + + 1. Dimensions + 2. Variables + 3. Attributes + + The dimensions section records the name and length of each dimension used + by the variables. The variables would then indicate which dimensions it + uses and any attributes such as data units, along with containing the data + values for the variable. It is good practice to include a + variable that is the same name as a dimension to provide the values for + that axes. Lastly, the attributes section would contain additional + information such as the name of the file creator or the instrument used to + collect the data. + + When writing data to a NetCDF file, there is often the need to indicate the + 'record dimension'. A record dimension is the unbounded dimension for a + variable. For example, a temperature variable may have dimensions of + latitude, longitude and time. If one wants to add more temperature data to + the NetCDF file as time progresses, then the temperature variable should + have the time dimension flagged as the record dimension. + + In addition, the NetCDF file header contains the position of the data in + the file, so access can be done in an efficient manner without loading + unnecessary data into memory. It uses the ``mmap`` module to create + Numpy arrays mapped to the data on disk, for the same purpose. + + Note that when `netcdf_file` is used to open a file with mmap=True + (default for read-only), arrays returned by it refer to data + directly on the disk. The file should not be closed, and cannot be cleanly + closed when asked, if such arrays are alive. You may want to copy data arrays + obtained from mmapped Netcdf file if they are to be processed after the file + is closed, see the example below. + + Examples + -------- + To create a NetCDF file: + + >>> from scipy.io import netcdf_file + >>> import numpy as np + >>> f = netcdf_file('simple.nc', 'w') + >>> f.history = 'Created for a test' + >>> f.createDimension('time', 10) + >>> time = f.createVariable('time', 'i', ('time',)) + >>> time[:] = np.arange(10) + >>> time.units = 'days since 2008-01-01' + >>> f.close() + + Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice + of the time variable allows for the data to be set in the object, rather + than letting ``arange(10)`` overwrite the ``time`` variable. + + To read the NetCDF file we just created: + + >>> from scipy.io import netcdf_file + >>> f = netcdf_file('simple.nc', 'r') + >>> print(f.history) + b'Created for a test' + >>> time = f.variables['time'] + >>> print(time.units) + b'days since 2008-01-01' + >>> print(time.shape) + (10,) + >>> print(time[-1]) + 9 + + NetCDF files, when opened read-only, return arrays that refer + directly to memory-mapped data on disk: + + >>> data = time[:] + + If the data is to be processed after the file is closed, it needs + to be copied to main memory: + + >>> data = time[:].copy() + >>> del time + >>> f.close() + >>> data.mean() + 4.5 + + A NetCDF file can also be used as context manager: + + >>> from scipy.io import netcdf_file + >>> with netcdf_file('simple.nc', 'r') as f: + ... print(f.history) + b'Created for a test' + + """ + def __init__(self, filename, mode='r', mmap=None, version=1, + maskandscale=False): + """Initialize netcdf_file from fileobj (str or file-like).""" + if mode not in 'rwa': + raise ValueError("Mode must be either 'r', 'w' or 'a'.") + + if hasattr(filename, 'seek'): # file-like + self.fp = filename + self.filename = 'None' + if mmap is None: + mmap = False + elif mmap and not hasattr(filename, 'fileno'): + raise ValueError('Cannot use file object for mmap') + else: # maybe it's a string + self.filename = filename + omode = 'r+' if mode == 'a' else mode + self.fp = open(self.filename, f'{omode}b') + if mmap is None: + # Mmapped files on PyPy cannot be usually closed + # before the GC runs, so it's better to use mmap=False + # as the default. + mmap = (not IS_PYPY) + + if mode != 'r': + # Cannot read write-only files + mmap = False + + self.use_mmap = mmap + self.mode = mode + self.version_byte = version + self.maskandscale = maskandscale + + self.dimensions = {} + self.variables = {} + + self._dims = [] + self._recs = 0 + self._recsize = 0 + + self._mm = None + self._mm_buf = None + if self.use_mmap: + self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) + self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) + + self._attributes = {} + + if mode in 'ra': + self._read() + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def close(self): + """Closes the NetCDF file.""" + if hasattr(self, 'fp') and not self.fp.closed: + try: + self.flush() + finally: + self.variables = {} + if self._mm_buf is not None: + ref = weakref.ref(self._mm_buf) + self._mm_buf = None + if ref() is None: + # self._mm_buf is gc'd, and we can close the mmap + self._mm.close() + else: + # we cannot close self._mm, since self._mm_buf is + # alive and there may still be arrays referring to it + warnings.warn( + "Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still " + "exist. All data arrays obtained from such files refer " + "directly to data on disk, and must be copied before the " + "file can be cleanly closed. " + "(See netcdf_file docstring for more information on mmap.)", + category=RuntimeWarning, stacklevel=2, + ) + self._mm = None + self.fp.close() + __del__ = close + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def createDimension(self, name, length): + """ + Adds a dimension to the Dimension section of the NetCDF data structure. + + Note that this function merely adds a new dimension that the variables can + reference. The values for the dimension, if desired, should be added as + a variable using `createVariable`, referring to this dimension. + + Parameters + ---------- + name : str + Name of the dimension (Eg, 'lat' or 'time'). + length : int + Length of the dimension. + + See Also + -------- + createVariable + + """ + if length is None and self._dims: + raise ValueError("Only first dimension may be unlimited!") + + self.dimensions[name] = length + self._dims.append(name) + + def createVariable(self, name, type, dimensions): + """ + Create an empty variable for the `netcdf_file` object, specifying its data + type and the dimensions it uses. + + Parameters + ---------- + name : str + Name of the new variable. + type : dtype or str + Data type of the variable. + dimensions : sequence of str + List of the dimension names used by the variable, in the desired order. + + Returns + ------- + variable : netcdf_variable + The newly created ``netcdf_variable`` object. + This object has also been added to the `netcdf_file` object as well. + + See Also + -------- + createDimension + + Notes + ----- + Any dimensions to be used by the variable should already exist in the + NetCDF data structure or should be created by `createDimension` prior to + creating the NetCDF variable. + + """ + shape = tuple([self.dimensions[dim] for dim in dimensions]) + shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy + + type = dtype(type) + typecode, size = type.char, type.itemsize + if (typecode, size) not in REVERSE: + raise ValueError(f"NetCDF 3 does not support type {type}") + + # convert to big endian always for NetCDF 3 + data = empty(shape_, dtype=type.newbyteorder("B")) + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, + maskandscale=self.maskandscale) + return self.variables[name] + + def flush(self): + """ + Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. + + See Also + -------- + sync : Identical function + + """ + if hasattr(self, 'mode') and self.mode in 'wa': + self._write() + sync = flush + + def _write(self): + self.fp.seek(0) + self.fp.write(b'CDF') + self.fp.write(array(self.version_byte, '>b').tobytes()) + + # Write headers and data. + self._write_numrecs() + self._write_dim_array() + self._write_gatt_array() + self._write_var_array() + + def _write_numrecs(self): + # Get highest record count from all record variables. + for var in self.variables.values(): + if var.isrec and len(var.data) > self._recs: + self.__dict__['_recs'] = len(var.data) + self._pack_int(self._recs) + + def _write_dim_array(self): + if self.dimensions: + self.fp.write(NC_DIMENSION) + self._pack_int(len(self.dimensions)) + for name in self._dims: + self._pack_string(name) + length = self.dimensions[name] + self._pack_int(length or 0) # replace None with 0 for record dimension + else: + self.fp.write(ABSENT) + + def _write_gatt_array(self): + self._write_att_array(self._attributes) + + def _write_att_array(self, attributes): + if attributes: + self.fp.write(NC_ATTRIBUTE) + self._pack_int(len(attributes)) + for name, values in attributes.items(): + self._pack_string(name) + self._write_att_values(values) + else: + self.fp.write(ABSENT) + + def _write_var_array(self): + if self.variables: + self.fp.write(NC_VARIABLE) + self._pack_int(len(self.variables)) + + # Sort variable names non-recs first, then recs. + def sortkey(n): + v = self.variables[n] + if v.isrec: + return (-1,) + return v._shape + variables = sorted(self.variables, key=sortkey, reverse=True) + + # Set the metadata for all variables. + for name in variables: + self._write_var_metadata(name) + # Now that we have the metadata, we know the vsize of + # each record variable, so we can calculate recsize. + self.__dict__['_recsize'] = sum([ + var._vsize for var in self.variables.values() + if var.isrec]) + # Set the data for all variables. + for name in variables: + self._write_var_data(name) + else: + self.fp.write(ABSENT) + + def _write_var_metadata(self, name): + var = self.variables[name] + + self._pack_string(name) + self._pack_int(len(var.dimensions)) + for dimname in var.dimensions: + dimid = self._dims.index(dimname) + self._pack_int(dimid) + + self._write_att_array(var._attributes) + + nc_type = REVERSE[var.typecode(), var.itemsize()] + self.fp.write(nc_type) + + if not var.isrec: + vsize = var.data.size * var.data.itemsize + vsize += -vsize % 4 + else: # record variable + try: + vsize = var.data[0].size * var.data.itemsize + except IndexError: + vsize = 0 + rec_vars = len([v for v in self.variables.values() + if v.isrec]) + if rec_vars > 1: + vsize += -vsize % 4 + self.variables[name].__dict__['_vsize'] = vsize + self._pack_int(vsize) + + # Pack a bogus begin, and set the real value later. + self.variables[name].__dict__['_begin'] = self.fp.tell() + self._pack_begin(0) + + def _write_var_data(self, name): + var = self.variables[name] + + # Set begin in file header. + the_beguine = self.fp.tell() + self.fp.seek(var._begin) + self._pack_begin(the_beguine) + self.fp.seek(the_beguine) + + # Write data. + if not var.isrec: + self.fp.write(var.data.tobytes()) + count = var.data.size * var.data.itemsize + self._write_var_padding(var, var._vsize - count) + else: # record variable + # Handle rec vars with shape[0] < nrecs. + if self._recs > len(var.data): + shape = (self._recs,) + var.data.shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + var.data.resize(shape) + except ValueError: + dtype = var.data.dtype + var.__dict__['data'] = np.resize(var.data, shape).astype(dtype) + + pos0 = pos = self.fp.tell() + for rec in var.data: + # Apparently scalars cannot be converted to big endian. If we + # try to convert a ``=i4`` scalar to, say, '>i4' the dtype + # will remain as ``=i4``. + if not rec.shape and (rec.dtype.byteorder == '<' or + (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): + rec = rec.byteswap() + self.fp.write(rec.tobytes()) + # Padding + count = rec.size * rec.itemsize + self._write_var_padding(var, var._vsize - count) + pos += self._recsize + self.fp.seek(pos) + self.fp.seek(pos0 + var._vsize) + + def _write_var_padding(self, var, size): + encoded_fill_value = var._get_encoded_fill_value() + num_fills = size // len(encoded_fill_value) + self.fp.write(encoded_fill_value * num_fills) + + def _write_att_values(self, values): + if hasattr(values, 'dtype'): + nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] + else: + types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)] + + # bytes index into scalars in py3k. Check for "string" types + if isinstance(values, (str, bytes)): + sample = values + else: + try: + sample = values[0] # subscriptable? + except TypeError: + sample = values # scalar + + for class_, nc_type in types: + if isinstance(sample, class_): + break + + typecode, size = TYPEMAP[nc_type] + dtype_ = f'>{typecode}' + # asarray() dies with bytes and '>c' in py3k. Change to 'S' + dtype_ = 'S' if dtype_ == '>c' else dtype_ + + values = asarray(values, dtype=dtype_) + + self.fp.write(nc_type) + + if values.dtype.char == 'S': + nelems = values.itemsize + else: + nelems = values.size + self._pack_int(nelems) + + if not values.shape and (values.dtype.byteorder == '<' or + (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): + values = values.byteswap() + self.fp.write(values.tobytes()) + count = values.size * values.itemsize + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _read(self): + # Check magic bytes and version + magic = self.fp.read(3) + if not magic == b'CDF': + raise TypeError(f"Error: {self.filename} is not a valid NetCDF 3 file") + self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] + + # Read file headers and set data. + self._read_numrecs() + self._read_dim_array() + self._read_gatt_array() + self._read_var_array() + + def _read_numrecs(self): + self.__dict__['_recs'] = self._unpack_int() + + def _read_dim_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_DIMENSION]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + for dim in range(count): + name = self._unpack_string().decode('latin1') + length = self._unpack_int() or None # None for record dimension + self.dimensions[name] = length + self._dims.append(name) # preserve order + + def _read_gatt_array(self): + for k, v in self._read_att_array().items(): + self.__setattr__(k, v) + + def _read_att_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_ATTRIBUTE]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + attributes = {} + for attr in range(count): + name = self._unpack_string().decode('latin1') + attributes[name] = self._read_att_values() + return attributes + + def _read_var_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_VARIABLE]: + raise ValueError("Unexpected header.") + + begin = 0 + dtypes = {'names': [], 'formats': []} + rec_vars = [] + count = self._unpack_int() + for var in range(count): + (name, dimensions, shape, attributes, + typecode, size, dtype_, begin_, vsize) = self._read_var() + # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html + # Note that vsize is the product of the dimension lengths + # (omitting the record dimension) and the number of bytes + # per value (determined from the type), increased to the + # next multiple of 4, for each variable. If a record + # variable, this is the amount of space per record. The + # netCDF "record size" is calculated as the sum of the + # vsize's of all the record variables. + # + # The vsize field is actually redundant, because its value + # may be computed from other information in the header. The + # 32-bit vsize field is not large enough to contain the size + # of variables that require more than 2^32 - 4 bytes, so + # 2^32 - 1 is used in the vsize field for such variables. + if shape and shape[0] is None: # record variable + rec_vars.append(name) + # The netCDF "record size" is calculated as the sum of + # the vsize's of all the record variables. + self.__dict__['_recsize'] += vsize + if begin == 0: + begin = begin_ + dtypes['names'].append(name) + dtypes['formats'].append(str(shape[1:]) + dtype_) + + # Handle padding with a virtual variable. + if typecode in 'bch': + actual_size = reduce(mul, (1,) + shape[1:]) * size + padding = -actual_size % 4 + if padding: + dtypes['names'].append('_padding_%d' % var) + dtypes['formats'].append('(%d,)>b' % padding) + + # Data will be set later. + data = None + else: # not a record variable + # Calculate size to avoid problems with vsize (above) + a_size = reduce(mul, shape, 1) * size + if self.use_mmap: + data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) + data.shape = shape + else: + pos = self.fp.tell() + self.fp.seek(begin_) + data = frombuffer(self.fp.read(a_size), dtype=dtype_ + ).copy() + data.shape = shape + self.fp.seek(pos) + + # Add variable. + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, attributes, + maskandscale=self.maskandscale) + + if rec_vars: + # Remove padding when only one record variable. + if len(rec_vars) == 1: + dtypes['names'] = dtypes['names'][:1] + dtypes['formats'] = dtypes['formats'][:1] + + # Build rec array. + if self.use_mmap: + buf = self._mm_buf[begin:begin+self._recs*self._recsize] + rec_array = buf.view(dtype=dtypes) + rec_array.shape = (self._recs,) + else: + pos = self.fp.tell() + self.fp.seek(begin) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), + dtype=dtypes).copy() + rec_array.shape = (self._recs,) + self.fp.seek(pos) + + for var in rec_vars: + self.variables[var].__dict__['data'] = rec_array[var] + + def _read_var(self): + name = self._unpack_string().decode('latin1') + dimensions = [] + shape = [] + dims = self._unpack_int() + + for i in range(dims): + dimid = self._unpack_int() + dimname = self._dims[dimid] + dimensions.append(dimname) + dim = self.dimensions[dimname] + shape.append(dim) + dimensions = tuple(dimensions) + shape = tuple(shape) + + attributes = self._read_att_array() + nc_type = self.fp.read(4) + vsize = self._unpack_int() + begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() + + typecode, size = TYPEMAP[nc_type] + dtype_ = f'>{typecode}' + + return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize + + def _read_att_values(self): + nc_type = self.fp.read(4) + n = self._unpack_int() + + typecode, size = TYPEMAP[nc_type] + + count = n*size + values = self.fp.read(int(count)) + self.fp.read(-count % 4) # read padding + + if typecode != 'c': + values = frombuffer(values, dtype=f'>{typecode}').copy() + if values.shape == (1,): + values = values[0] + else: + values = values.rstrip(b'\x00') + return values + + def _pack_begin(self, begin): + if self.version_byte == 1: + self._pack_int(begin) + elif self.version_byte == 2: + self._pack_int64(begin) + + def _pack_int(self, value): + self.fp.write(array(value, '>i').tobytes()) + _pack_int32 = _pack_int + + def _unpack_int(self): + return int(frombuffer(self.fp.read(4), '>i')[0]) + _unpack_int32 = _unpack_int + + def _pack_int64(self, value): + self.fp.write(array(value, '>q').tobytes()) + + def _unpack_int64(self): + return frombuffer(self.fp.read(8), '>q')[0] + + def _pack_string(self, s): + count = len(s) + self._pack_int(count) + self.fp.write(s.encode('latin1')) + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _unpack_string(self): + count = self._unpack_int() + s = self.fp.read(count).rstrip(b'\x00') + self.fp.read(-count % 4) # read padding + return s + + +class netcdf_variable: + """ + A data object for netcdf files. + + `netcdf_variable` objects are constructed by calling the method + `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` + objects behave much like array objects defined in numpy, except that their + data resides in a file. Data is read by indexing and written by assigning + to an indexed subset; the entire array can be accessed by the index ``[:]`` + or (for scalars) by using the methods `getValue` and `assignValue`. + `netcdf_variable` objects also have attribute `shape` with the same meaning + as for arrays, but the shape cannot be modified. There is another read-only + attribute `dimensions`, whose value is the tuple of dimension names. + + All other attributes correspond to variable attributes defined in + the NetCDF file. Variable attributes are created by assigning to an + attribute of the `netcdf_variable` object. + + Parameters + ---------- + data : array_like + The data array that holds the values for the variable. + Typically, this is initialized as empty, but with the proper shape. + typecode : dtype character code + Desired data-type for the data array. + size : int + Desired element size for the data array. + shape : sequence of ints + The shape of the array. This should match the lengths of the + variable's dimensions. + dimensions : sequence of strings + The names of the dimensions used by the variable. Must be in the + same order of the dimension lengths given by `shape`. + attributes : dict, optional + Attribute values (any type) keyed by string names. These attributes + become attributes for the netcdf_variable object. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + + Attributes + ---------- + dimensions : list of str + List of names of dimensions used by the variable object. + isrec, shape + Properties + + See also + -------- + isrec, shape + + """ + def __init__(self, data, typecode, size, shape, dimensions, + attributes=None, + maskandscale=False): + self.data = data + self._typecode = typecode + self._size = size + self._shape = shape + self.dimensions = dimensions + self.maskandscale = maskandscale + + self._attributes = attributes or {} + for k, v in self._attributes.items(): + self.__dict__[k] = v + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def isrec(self): + """Returns whether the variable has a record dimension or not. + + A record dimension is a dimension along which additional data could be + easily appended in the netcdf data structure without much rewriting of + the data file. This attribute is a read-only property of the + `netcdf_variable`. + + """ + return bool(self.data.shape) and not self._shape[0] + isrec = property(isrec) + + def shape(self): + """Returns the shape tuple of the data variable. + + This is a read-only attribute and can not be modified in the + same manner of other numpy arrays. + """ + return self.data.shape + shape = property(shape) + + def getValue(self): + """ + Retrieve a scalar value from a `netcdf_variable` of length one. + + Raises + ------ + ValueError + If the netcdf variable is an array of length greater than one, + this exception will be raised. + + """ + return self.data.item() + + def assignValue(self, value): + """ + Assign a scalar value to a `netcdf_variable` of length one. + + Parameters + ---------- + value : scalar + Scalar value (of compatible type) to assign to a length-one netcdf + variable. This value will be written to file. + + Raises + ------ + ValueError + If the input is not a scalar, or if the destination is not a length-one + netcdf variable. + + """ + if not self.data.flags.writeable: + # Work-around for a bug in NumPy. Calling itemset() on a read-only + # memory-mapped array causes a seg. fault. + # See NumPy ticket #1622, and SciPy ticket #1202. + # This check for `writeable` can be removed when the oldest version + # of NumPy still supported by scipy contains the fix for #1622. + raise RuntimeError("variable is not writeable") + + self.data[:] = value + + def typecode(self): + """ + Return the typecode of the variable. + + Returns + ------- + typecode : char + The character typecode of the variable (e.g., 'i' for int). + + """ + return self._typecode + + def itemsize(self): + """ + Return the itemsize of the variable. + + Returns + ------- + itemsize : int + The element size of the variable (e.g., 8 for float64). + + """ + return self._size + + def __getitem__(self, index): + if not self.maskandscale: + return self.data[index] + + data = self.data[index].copy() + missing_value = self._get_missing_value() + data = self._apply_missing_value(data, missing_value) + scale_factor = self._attributes.get('scale_factor') + add_offset = self._attributes.get('add_offset') + if add_offset is not None or scale_factor is not None: + data = data.astype(np.float64) + if scale_factor is not None: + data = data * scale_factor + if add_offset is not None: + data += add_offset + + return data + + def __setitem__(self, index, data): + if self.maskandscale: + missing_value = ( + self._get_missing_value() or + getattr(data, 'fill_value', 999999)) + self._attributes.setdefault('missing_value', missing_value) + self._attributes.setdefault('_FillValue', missing_value) + data = ((data - self._attributes.get('add_offset', 0.0)) / + self._attributes.get('scale_factor', 1.0)) + data = np.ma.asarray(data).filled(missing_value) + if self._typecode not in 'fd' and data.dtype.kind == 'f': + data = np.round(data) + + # Expand data for record vars? + if self.isrec: + if isinstance(index, tuple): + rec_index = index[0] + else: + rec_index = index + if isinstance(rec_index, slice): + recs = (rec_index.start or 0) + len(data) + else: + recs = rec_index + 1 + if recs > len(self.data): + shape = (recs,) + self._shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + self.data.resize(shape) + except ValueError: + dtype = self.data.dtype + self.__dict__['data'] = np.resize(self.data, shape).astype(dtype) + self.data[index] = data + + def _default_encoded_fill_value(self): + """ + The default encoded fill-value for this Variable's data type. + """ + nc_type = REVERSE[self.typecode(), self.itemsize()] + return FILLMAP[nc_type] + + def _get_encoded_fill_value(self): + """ + Returns the encoded fill value for this variable as bytes. + + This is taken from either the _FillValue attribute, or the default fill + value for this variable's data type. + """ + if '_FillValue' in self._attributes: + fill_value = np.array(self._attributes['_FillValue'], + dtype=self.data.dtype).tobytes() + if len(fill_value) == self.itemsize(): + return fill_value + else: + return self._default_encoded_fill_value() + else: + return self._default_encoded_fill_value() + + def _get_missing_value(self): + """ + Returns the value denoting "no data" for this variable. + + If this variable does not have a missing/fill value, returns None. + + If both _FillValue and missing_value are given, give precedence to + _FillValue. The netCDF standard gives special meaning to _FillValue; + missing_value is just used for compatibility with old datasets. + """ + + if '_FillValue' in self._attributes: + missing_value = self._attributes['_FillValue'] + elif 'missing_value' in self._attributes: + missing_value = self._attributes['missing_value'] + else: + missing_value = None + + return missing_value + + @staticmethod + def _apply_missing_value(data, missing_value): + """ + Applies the given missing value to the data array. + + Returns a numpy.ma array, with any value equal to missing_value masked + out (unless missing_value is None, in which case the original array is + returned). + """ + + if missing_value is None: + newdata = data + else: + try: + missing_value_isnan = np.isnan(missing_value) + except (TypeError, NotImplementedError): + # some data types (e.g., characters) cannot be tested for NaN + missing_value_isnan = False + + if missing_value_isnan: + mymask = np.isnan(data) + else: + mymask = (data == missing_value) + + newdata = np.ma.masked_where(mymask, data) + + return newdata + + +NetCDFFile = netcdf_file +NetCDFVariable = netcdf_variable diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8a863a591370a3b1bd4afdb05e621c1af8eab3e2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcfe1c4237e69054b582a0fa52f710b25a1d7914 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/__init__.py @@ -0,0 +1,28 @@ +""" +Module to read ARFF files +========================= +ARFF is the standard data format for WEKA. +It is a text file format which support numerical, string and data values. +The format can also represent missing data and sparse data. + +Notes +----- +The ARFF support in ``scipy.io`` provides file reading functionality only. +For more extensive ARFF functionality, see `liac-arff +`_. + +See the `WEKA website `_ +for more details about the ARFF format and available datasets. + +""" +from ._arffread import * +from . import _arffread + +# Deprecated namespaces, to be removed in v2.0.0 +from .import arffread + +__all__ = _arffread.__all__ + ['arffread'] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/_arffread.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/_arffread.py new file mode 100644 index 0000000000000000000000000000000000000000..65495b8d98386492eecbccb8968715590c0faf7c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/_arffread.py @@ -0,0 +1,873 @@ +# Last Change: Mon Aug 20 08:00 PM 2007 J +import re +import datetime + +import numpy as np + +import csv +import ctypes + +"""A module to read arff files.""" + +__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] + +# An Arff file is basically two parts: +# - header +# - data +# +# A header has each of its components starting by @META where META is one of +# the keyword (attribute of relation, for now). + +# TODO: +# - both integer and reals are treated as numeric -> the integer info +# is lost! +# - Replace ValueError by ParseError or something + +# We know can handle the following: +# - numeric and nominal attributes +# - missing values for numeric attributes + +r_meta = re.compile(r'^\s*@') +# Match a comment +r_comment = re.compile(r'^%') +# Match an empty line +r_empty = re.compile(r'^\s+$') +# Match a header line, that is a line which starts by @ + a word +r_headerline = re.compile(r'^\s*@\S*') +r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') +r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') +r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') + +r_nominal = re.compile(r'{(.+)}') +r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$") + +# To get attributes name enclosed with '' +r_comattrval = re.compile(r"'(..+)'\s+(..+$)") +# To get normal attributes +r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") + +# ------------------------ +# Module defined exception +# ------------------------ + + +class ArffError(OSError): + pass + + +class ParseArffError(ArffError): + pass + + +# ---------- +# Attributes +# ---------- +class Attribute: + + type_name = None + + def __init__(self, name): + self.name = name + self.range = None + self.dtype = np.object_ + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + """ + return None + + def parse_data(self, data_str): + """ + Parse a value of this type. + """ + return None + + def __str__(self): + """ + Parse a value of this type. + """ + return self.name + ',' + self.type_name + + +class NominalAttribute(Attribute): + + type_name = 'nominal' + + def __init__(self, name, values): + super().__init__(name) + self.values = values + self.range = values + self.dtype = (np.bytes_, max(len(i) for i in values)) + + @staticmethod + def _get_nom_val(atrv): + """Given a string containing a nominal type, returns a tuple of the + possible values. + + A nominal type is defined as something framed between braces ({}). + + Parameters + ---------- + atrv : str + Nominal type definition + + Returns + ------- + poss_vals : tuple + possible values + + Examples + -------- + >>> from scipy.io.arff._arffread import NominalAttribute + >>> NominalAttribute._get_nom_val("{floup, bouga, fl, ratata}") + ('floup', 'bouga', 'fl', 'ratata') + """ + m = r_nominal.match(atrv) + if m: + attrs, _ = split_data_line(m.group(1)) + return tuple(attrs) + else: + raise ValueError("This does not look like a nominal string") + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + + For nominal attributes, the attribute string would be like '{, + , }'. + """ + if attr_string[0] == '{': + values = cls._get_nom_val(attr_string) + return cls(name, values) + else: + return None + + def parse_data(self, data_str): + """ + Parse a value of this type. + """ + if data_str in self.values: + return data_str + elif data_str == '?': + return data_str + else: + raise ValueError(f"{str(data_str)} value not in {str(self.values)}") + + def __str__(self): + msg = self.name + ",{" + for i in range(len(self.values)-1): + msg += self.values[i] + "," + msg += self.values[-1] + msg += "}" + return msg + + +class NumericAttribute(Attribute): + + def __init__(self, name): + super().__init__(name) + self.type_name = 'numeric' + self.dtype = np.float64 + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + + For numeric attributes, the attribute string would be like + 'numeric' or 'int' or 'real'. + """ + + attr_string = attr_string.lower().strip() + + if (attr_string[:len('numeric')] == 'numeric' or + attr_string[:len('int')] == 'int' or + attr_string[:len('real')] == 'real'): + return cls(name) + else: + return None + + def parse_data(self, data_str): + """ + Parse a value of this type. + + Parameters + ---------- + data_str : str + string to convert + + Returns + ------- + f : float + where float can be nan + + Examples + -------- + >>> from scipy.io.arff._arffread import NumericAttribute + >>> atr = NumericAttribute('atr') + >>> atr.parse_data('1') + 1.0 + >>> atr.parse_data('1\\n') + 1.0 + >>> atr.parse_data('?\\n') + nan + """ + if '?' in data_str: + return np.nan + else: + return float(data_str) + + def _basic_stats(self, data): + nbfac = data.size * 1. / (data.size - 1) + return (np.nanmin(data), np.nanmax(data), + np.mean(data), np.std(data) * nbfac) + + +class StringAttribute(Attribute): + + def __init__(self, name): + super().__init__(name) + self.type_name = 'string' + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + + For string attributes, the attribute string would be like + 'string'. + """ + + attr_string = attr_string.lower().strip() + + if attr_string[:len('string')] == 'string': + return cls(name) + else: + return None + + +class DateAttribute(Attribute): + + def __init__(self, name, date_format, datetime_unit): + super().__init__(name) + self.date_format = date_format + self.datetime_unit = datetime_unit + self.type_name = 'date' + self.range = date_format + self.dtype = np.datetime64(0, self.datetime_unit) + + @staticmethod + def _get_date_format(atrv): + m = r_date.match(atrv) + if m: + pattern = m.group(1).strip() + # convert time pattern from Java's SimpleDateFormat to C's format + datetime_unit = None + if "yyyy" in pattern: + pattern = pattern.replace("yyyy", "%Y") + datetime_unit = "Y" + elif "yy": + pattern = pattern.replace("yy", "%y") + datetime_unit = "Y" + if "MM" in pattern: + pattern = pattern.replace("MM", "%m") + datetime_unit = "M" + if "dd" in pattern: + pattern = pattern.replace("dd", "%d") + datetime_unit = "D" + if "HH" in pattern: + pattern = pattern.replace("HH", "%H") + datetime_unit = "h" + if "mm" in pattern: + pattern = pattern.replace("mm", "%M") + datetime_unit = "m" + if "ss" in pattern: + pattern = pattern.replace("ss", "%S") + datetime_unit = "s" + if "z" in pattern or "Z" in pattern: + raise ValueError("Date type attributes with time zone not " + "supported, yet") + + if datetime_unit is None: + raise ValueError("Invalid or unsupported date format") + + return pattern, datetime_unit + else: + raise ValueError("Invalid or no date format") + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + + For date attributes, the attribute string would be like + 'date '. + """ + + attr_string_lower = attr_string.lower().strip() + + if attr_string_lower[:len('date')] == 'date': + date_format, datetime_unit = cls._get_date_format(attr_string) + return cls(name, date_format, datetime_unit) + else: + return None + + def parse_data(self, data_str): + """ + Parse a value of this type. + """ + date_str = data_str.strip().strip("'").strip('"') + if date_str == '?': + return np.datetime64('NaT', self.datetime_unit) + else: + dt = datetime.datetime.strptime(date_str, self.date_format) + return np.datetime64(dt).astype( + f"datetime64[{self.datetime_unit}]") + + def __str__(self): + return super().__str__() + ',' + self.date_format + + +class RelationalAttribute(Attribute): + + def __init__(self, name): + super().__init__(name) + self.type_name = 'relational' + self.dtype = np.object_ + self.attributes = [] + self.dialect = None + + @classmethod + def parse_attribute(cls, name, attr_string): + """ + Parse the attribute line if it knows how. Returns the parsed + attribute, or None. + + For date attributes, the attribute string would be like + 'date '. + """ + + attr_string_lower = attr_string.lower().strip() + + if attr_string_lower[:len('relational')] == 'relational': + return cls(name) + else: + return None + + def parse_data(self, data_str): + # Copy-pasted + elems = list(range(len(self.attributes))) + + escaped_string = data_str.encode().decode("unicode-escape") + + row_tuples = [] + + for raw in escaped_string.split("\n"): + row, self.dialect = split_data_line(raw, self.dialect) + + row_tuples.append(tuple( + [self.attributes[i].parse_data(row[i]) for i in elems])) + + return np.array(row_tuples, + [(a.name, a.dtype) for a in self.attributes]) + + def __str__(self): + return (super().__str__() + '\n\t' + + '\n\t'.join(str(a) for a in self.attributes)) + + +# ----------------- +# Various utilities +# ----------------- +def to_attribute(name, attr_string): + attr_classes = (NominalAttribute, NumericAttribute, DateAttribute, + StringAttribute, RelationalAttribute) + + for cls in attr_classes: + attr = cls.parse_attribute(name, attr_string) + if attr is not None: + return attr + + raise ParseArffError(f"unknown attribute {attr_string}") + + +def csv_sniffer_has_bug_last_field(): + """ + Checks if the bug https://bugs.python.org/issue30157 is unpatched. + """ + + # We only compute this once. + has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None) + + if has_bug is None: + dialect = csv.Sniffer().sniff("3, 'a'") + csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'" + has_bug = csv_sniffer_has_bug_last_field.has_bug + + return has_bug + + +def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters): + """ + Workaround for the bug https://bugs.python.org/issue30157 if is unpatched. + """ + if csv_sniffer_has_bug_last_field(): + # Reuses code from the csv module + right_regex = r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)' # noqa: E501 + + for restr in (r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?", # noqa: E501 + r'(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # .*?", # noqa: E501 + right_regex, # ,".*?" + r'(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) # noqa: E501 + regexp = re.compile(restr, re.DOTALL | re.MULTILINE) + matches = regexp.findall(sniff_line) + if matches: + break + + # If it does not match the expression that was bugged, + # then this bug does not apply + if restr != right_regex: + return + + groupindex = regexp.groupindex + + # There is only one end of the string + assert len(matches) == 1 + m = matches[0] + + n = groupindex['quote'] - 1 + quote = m[n] + + n = groupindex['delim'] - 1 + delim = m[n] + + n = groupindex['space'] - 1 + space = bool(m[n]) + + dq_regexp = re.compile( + rf"(({re.escape(delim)})|^)\W*{quote}[^{re.escape(delim)}\n]*{quote}[^{re.escape(delim)}\n]*{quote}\W*(({re.escape(delim)})|$)", re.MULTILINE # noqa: E501 + ) + + doublequote = bool(dq_regexp.search(sniff_line)) + + dialect.quotechar = quote + if delim in delimiters: + dialect.delimiter = delim + dialect.doublequote = doublequote + dialect.skipinitialspace = space + + +def split_data_line(line, dialect=None): + delimiters = ",\t" + + # This can not be done in a per reader basis, and relational fields + # can be HUGE + csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2)) + + # Remove the line end if any + if line[-1] == '\n': + line = line[:-1] + + # Remove potential trailing whitespace + line = line.strip() + + sniff_line = line + + # Add a delimiter if none is present, so that the csv.Sniffer + # does not complain for a single-field CSV. + if not any(d in line for d in delimiters): + sniff_line += "," + + if dialect is None: + dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters) + workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line, + dialect=dialect, + delimiters=delimiters) + + row = next(csv.reader([line], dialect)) + + return row, dialect + + +# -------------- +# Parsing header +# -------------- +def tokenize_attribute(iterable, attribute): + """Parse a raw string in header (e.g., starts by @attribute). + + Given a raw string attribute, try to get the name and type of the + attribute. Constraints: + + * The first line must start with @attribute (case insensitive, and + space like characters before @attribute are allowed) + * Works also if the attribute is spread on multilines. + * Works if empty lines or comments are in between + + Parameters + ---------- + attribute : str + the attribute string. + + Returns + ------- + name : str + name of the attribute + value : str + value of the attribute + next : str + next line to be parsed + + Examples + -------- + If attribute is a string defined in python as r"floupi real", will + return floupi as name, and real as value. + + >>> from scipy.io.arff._arffread import tokenize_attribute + >>> iterable = iter([0] * 10) # dummy iterator + >>> tokenize_attribute(iterable, r"@attribute floupi real") + ('floupi', 'real', 0) + + If attribute is r"'floupi 2' real", will return 'floupi 2' as name, + and real as value. + + >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") + ('floupi 2', 'real', 0) + + """ + sattr = attribute.strip() + mattr = r_attribute.match(sattr) + if mattr: + # atrv is everything after @attribute + atrv = mattr.group(1) + if r_comattrval.match(atrv): + name, type = tokenize_single_comma(atrv) + next_item = next(iterable) + elif r_wcomattrval.match(atrv): + name, type = tokenize_single_wcomma(atrv) + next_item = next(iterable) + else: + # Not sure we should support this, as it does not seem supported by + # weka. + raise ValueError("multi line not supported yet") + else: + raise ValueError(f"First line unparsable: {sattr}") + + attribute = to_attribute(name, type) + + if type.lower() == 'relational': + next_item = read_relational_attribute(iterable, attribute, next_item) + # raise ValueError("relational attributes not supported yet") + + return attribute, next_item + + +def tokenize_single_comma(val): + # XXX we match twice the same string (here and at the caller level). It is + # stupid, but it is easier for now... + m = r_comattrval.match(val) + if m: + try: + name = m.group(1).strip() + type = m.group(2).strip() + except IndexError as e: + raise ValueError("Error while tokenizing attribute") from e + else: + raise ValueError(f"Error while tokenizing single {val}") + return name, type + + +def tokenize_single_wcomma(val): + # XXX we match twice the same string (here and at the caller level). It is + # stupid, but it is easier for now... + m = r_wcomattrval.match(val) + if m: + try: + name = m.group(1).strip() + type = m.group(2).strip() + except IndexError as e: + raise ValueError("Error while tokenizing attribute") from e + else: + raise ValueError(f"Error while tokenizing single {val}") + return name, type + + +def read_relational_attribute(ofile, relational_attribute, i): + """Read the nested attributes of a relational attribute""" + + r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' + + relational_attribute.name + r'\s*$') + + while not r_end_relational.match(i): + m = r_headerline.match(i) + if m: + isattr = r_attribute.match(i) + if isattr: + attr, i = tokenize_attribute(ofile, i) + relational_attribute.attributes.append(attr) + else: + raise ValueError(f"Error parsing line {i}") + else: + i = next(ofile) + + i = next(ofile) + return i + + +def read_header(ofile): + """Read the header of the iterable ofile.""" + i = next(ofile) + + # Pass first comments + while r_comment.match(i): + i = next(ofile) + + # Header is everything up to DATA attribute ? + relation = None + attributes = [] + while not r_datameta.match(i): + m = r_headerline.match(i) + if m: + isattr = r_attribute.match(i) + if isattr: + attr, i = tokenize_attribute(ofile, i) + attributes.append(attr) + else: + isrel = r_relation.match(i) + if isrel: + relation = isrel.group(1) + else: + raise ValueError(f"Error parsing line {i}") + i = next(ofile) + else: + i = next(ofile) + + return relation, attributes + + +class MetaData: + """Small container to keep useful information on a ARFF dataset. + + Knows about attributes names and types. + + Examples + -------- + :: + + data, meta = loadarff('iris.arff') + # This will print the attributes names of the iris.arff dataset + for i in meta: + print(i) + # This works too + meta.names() + # Getting attribute type + types = meta.types() + + Methods + ------- + names + types + + Notes + ----- + Also maintains the list of attributes in order, i.e., doing for i in + meta, where meta is an instance of MetaData, will return the + different attribute names in the order they were defined. + """ + def __init__(self, rel, attr): + self.name = rel + self._attributes = {a.name: a for a in attr} + + def __repr__(self): + msg = "" + msg += f"Dataset: {self.name}\n" + for i in self._attributes: + msg += f"\t{i}'s type is {self._attributes[i].type_name}" + if self._attributes[i].range: + msg += f", range is {str(self._attributes[i].range)}" + msg += '\n' + return msg + + def __iter__(self): + return iter(self._attributes) + + def __getitem__(self, key): + attr = self._attributes[key] + + return (attr.type_name, attr.range) + + def names(self): + """Return the list of attribute names. + + Returns + ------- + attrnames : list of str + The attribute names. + """ + return list(self._attributes) + + def types(self): + """Return the list of attribute types. + + Returns + ------- + attr_types : list of str + The attribute types. + """ + attr_types = [self._attributes[name].type_name + for name in self._attributes] + return attr_types + + +def loadarff(f): + """ + Read an arff file. + + The data is returned as a record array, which can be accessed much like + a dictionary of NumPy arrays. For example, if one of the attributes is + called 'pressure', then its first 10 data points can be accessed from the + ``data`` record array like so: ``data['pressure'][0:10]`` + + + Parameters + ---------- + f : file-like or str + File-like object to read from, or filename to open. + + Returns + ------- + data : record array + The data of the arff file, accessible by attribute names. + meta : `MetaData` + Contains information about the arff file such as name and + type of attributes, the relation (name of the dataset), etc. + + Raises + ------ + ParseArffError + This is raised if the given file is not ARFF-formatted. + NotImplementedError + The ARFF file has an attribute which is not supported yet. + + Notes + ----- + + This function should be able to read most arff files. Not + implemented functionality include: + + * date type attributes + * string type attributes + + It can read files with numeric and nominal attributes. It cannot read + files with sparse data ({} in the file). However, this function can + read files with missing data (? in the file), representing the data + points as NaNs. + + Examples + -------- + >>> from scipy.io import arff + >>> from io import StringIO + >>> content = \"\"\" + ... @relation foo + ... @attribute width numeric + ... @attribute height numeric + ... @attribute color {red,green,blue,yellow,black} + ... @data + ... 5.0,3.25,blue + ... 4.5,3.75,green + ... 3.0,4.00,red + ... \"\"\" + >>> f = StringIO(content) + >>> data, meta = arff.loadarff(f) + >>> data + array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], + dtype=[('width', '>> meta + Dataset: foo + \twidth's type is numeric + \theight's type is numeric + \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') + + """ + if hasattr(f, 'read'): + ofile = f + else: + ofile = open(f) + try: + return _loadarff(ofile) + finally: + if ofile is not f: # only close what we opened + ofile.close() + + +def _loadarff(ofile): + # Parse the header file + try: + rel, attr = read_header(ofile) + except ValueError as e: + msg = "Error while parsing header, error was: " + str(e) + raise ParseArffError(msg) from e + + # Check whether we have a string attribute (not supported yet) + hasstr = False + for a in attr: + if isinstance(a, StringAttribute): + hasstr = True + + meta = MetaData(rel, attr) + + # XXX The following code is not great + # Build the type descriptor descr and the list of converters to convert + # each attribute to the suitable type (which should match the one in + # descr). + + # This can be used once we want to support integer as integer values and + # not as numeric anymore (using masked arrays ?). + + if hasstr: + # How to support string efficiently ? Ideally, we should know the max + # size of the string before allocating the numpy array. + raise NotImplementedError("String attributes not supported yet, sorry") + + ni = len(attr) + + def generator(row_iter, delim=','): + # TODO: this is where we are spending time (~80%). I think things + # could be made more efficiently: + # - We could for example "compile" the function, because some values + # do not change here. + # - The function to convert a line to dtyped values could also be + # generated on the fly from a string and be executed instead of + # looping. + # - The regex are overkill: for comments, checking that a line starts + # by % should be enough and faster, and for empty lines, same thing + # --> this does not seem to change anything. + + # 'compiling' the range since it does not change + # Note, I have already tried zipping the converters and + # row elements and got slightly worse performance. + elems = list(range(ni)) + + dialect = None + for raw in row_iter: + # We do not abstract skipping comments and empty lines for + # performance reasons. + if r_comment.match(raw) or r_empty.match(raw): + continue + + row, dialect = split_data_line(raw, dialect) + + yield tuple([attr[i].parse_data(row[i]) for i in elems]) + + a = list(generator(ofile)) + # No error should happen here: it is a bug otherwise + data = np.array(a, [(a.name, a.dtype) for a in attr]) + return data, meta + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/arffread.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/arffread.py new file mode 100644 index 0000000000000000000000000000000000000000..c42ae31db6bde3987bd059cc7451d1ae87f0073c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/arffread.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.arff` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MetaData', 'loadarff', 'ArffError', 'ParseArffError', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.arff", module="arffread", + private_modules=["_arffread"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/iris.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/iris.arff new file mode 100644 index 0000000000000000000000000000000000000000..780480c7c6b9a68bf71aaf357c7d3f7a5b3b3f57 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/iris.arff @@ -0,0 +1,225 @@ +% 1. Title: Iris Plants Database +% +% 2. Sources: +% (a) Creator: R.A. Fisher +% (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) +% (c) Date: July, 1988 +% +% 3. Past Usage: +% - Publications: too many to mention!!! Here are a few. +% 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems" +% Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions +% to Mathematical Statistics" (John Wiley, NY, 1950). +% 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis. +% (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. +% 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System +% Structure and Classification Rule for Recognition in Partially Exposed +% Environments". IEEE Transactions on Pattern Analysis and Machine +% Intelligence, Vol. PAMI-2, No. 1, 67-71. +% -- Results: +% -- very low misclassification rates (0% for the setosa class) +% 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE +% Transactions on Information Theory, May 1972, 431-433. +% -- Results: +% -- very low misclassification rates again +% 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II +% conceptual clustering system finds 3 classes in the data. +% +% 4. Relevant Information: +% --- This is perhaps the best known database to be found in the pattern +% recognition literature. Fisher's paper is a classic in the field +% and is referenced frequently to this day. (See Duda & Hart, for +% example.) The data set contains 3 classes of 50 instances each, +% where each class refers to a type of iris plant. One class is +% linearly separable from the other 2; the latter are NOT linearly +% separable from each other. +% --- Predicted attribute: class of iris plant. +% --- This is an exceedingly simple domain. +% +% 5. Number of Instances: 150 (50 in each of three classes) +% +% 6. Number of Attributes: 4 numeric, predictive attributes and the class +% +% 7. Attribute Information: +% 1. sepal length in cm +% 2. sepal width in cm +% 3. petal length in cm +% 4. petal width in cm +% 5. class: +% -- Iris Setosa +% -- Iris Versicolour +% -- Iris Virginica +% +% 8. Missing Attribute Values: None +% +% Summary Statistics: +% Min Max Mean SD Class Correlation +% sepal length: 4.3 7.9 5.84 0.83 0.7826 +% sepal width: 2.0 4.4 3.05 0.43 -0.4194 +% petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) +% petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) +% +% 9. Class Distribution: 33.3% for each of 3 classes. + +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA +5.1,3.5,1.4,0.2,Iris-setosa +4.9,3.0,1.4,0.2,Iris-setosa +4.7,3.2,1.3,0.2,Iris-setosa +4.6,3.1,1.5,0.2,Iris-setosa +5.0,3.6,1.4,0.2,Iris-setosa +5.4,3.9,1.7,0.4,Iris-setosa +4.6,3.4,1.4,0.3,Iris-setosa +5.0,3.4,1.5,0.2,Iris-setosa +4.4,2.9,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.4,3.7,1.5,0.2,Iris-setosa +4.8,3.4,1.6,0.2,Iris-setosa +4.8,3.0,1.4,0.1,Iris-setosa +4.3,3.0,1.1,0.1,Iris-setosa +5.8,4.0,1.2,0.2,Iris-setosa +5.7,4.4,1.5,0.4,Iris-setosa +5.4,3.9,1.3,0.4,Iris-setosa +5.1,3.5,1.4,0.3,Iris-setosa +5.7,3.8,1.7,0.3,Iris-setosa +5.1,3.8,1.5,0.3,Iris-setosa +5.4,3.4,1.7,0.2,Iris-setosa +5.1,3.7,1.5,0.4,Iris-setosa +4.6,3.6,1.0,0.2,Iris-setosa +5.1,3.3,1.7,0.5,Iris-setosa +4.8,3.4,1.9,0.2,Iris-setosa +5.0,3.0,1.6,0.2,Iris-setosa +5.0,3.4,1.6,0.4,Iris-setosa +5.2,3.5,1.5,0.2,Iris-setosa +5.2,3.4,1.4,0.2,Iris-setosa +4.7,3.2,1.6,0.2,Iris-setosa +4.8,3.1,1.6,0.2,Iris-setosa +5.4,3.4,1.5,0.4,Iris-setosa +5.2,4.1,1.5,0.1,Iris-setosa +5.5,4.2,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.0,3.2,1.2,0.2,Iris-setosa +5.5,3.5,1.3,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +4.4,3.0,1.3,0.2,Iris-setosa +5.1,3.4,1.5,0.2,Iris-setosa +5.0,3.5,1.3,0.3,Iris-setosa +4.5,2.3,1.3,0.3,Iris-setosa +4.4,3.2,1.3,0.2,Iris-setosa +5.0,3.5,1.6,0.6,Iris-setosa +5.1,3.8,1.9,0.4,Iris-setosa +4.8,3.0,1.4,0.3,Iris-setosa +5.1,3.8,1.6,0.2,Iris-setosa +4.6,3.2,1.4,0.2,Iris-setosa +5.3,3.7,1.5,0.2,Iris-setosa +5.0,3.3,1.4,0.2,Iris-setosa +7.0,3.2,4.7,1.4,Iris-versicolor +6.4,3.2,4.5,1.5,Iris-versicolor +6.9,3.1,4.9,1.5,Iris-versicolor +5.5,2.3,4.0,1.3,Iris-versicolor +6.5,2.8,4.6,1.5,Iris-versicolor +5.7,2.8,4.5,1.3,Iris-versicolor +6.3,3.3,4.7,1.6,Iris-versicolor +4.9,2.4,3.3,1.0,Iris-versicolor +6.6,2.9,4.6,1.3,Iris-versicolor +5.2,2.7,3.9,1.4,Iris-versicolor +5.0,2.0,3.5,1.0,Iris-versicolor +5.9,3.0,4.2,1.5,Iris-versicolor +6.0,2.2,4.0,1.0,Iris-versicolor +6.1,2.9,4.7,1.4,Iris-versicolor +5.6,2.9,3.6,1.3,Iris-versicolor +6.7,3.1,4.4,1.4,Iris-versicolor +5.6,3.0,4.5,1.5,Iris-versicolor +5.8,2.7,4.1,1.0,Iris-versicolor +6.2,2.2,4.5,1.5,Iris-versicolor +5.6,2.5,3.9,1.1,Iris-versicolor +5.9,3.2,4.8,1.8,Iris-versicolor +6.1,2.8,4.0,1.3,Iris-versicolor +6.3,2.5,4.9,1.5,Iris-versicolor +6.1,2.8,4.7,1.2,Iris-versicolor +6.4,2.9,4.3,1.3,Iris-versicolor +6.6,3.0,4.4,1.4,Iris-versicolor +6.8,2.8,4.8,1.4,Iris-versicolor +6.7,3.0,5.0,1.7,Iris-versicolor +6.0,2.9,4.5,1.5,Iris-versicolor +5.7,2.6,3.5,1.0,Iris-versicolor +5.5,2.4,3.8,1.1,Iris-versicolor +5.5,2.4,3.7,1.0,Iris-versicolor +5.8,2.7,3.9,1.2,Iris-versicolor +6.0,2.7,5.1,1.6,Iris-versicolor +5.4,3.0,4.5,1.5,Iris-versicolor +6.0,3.4,4.5,1.6,Iris-versicolor +6.7,3.1,4.7,1.5,Iris-versicolor +6.3,2.3,4.4,1.3,Iris-versicolor +5.6,3.0,4.1,1.3,Iris-versicolor +5.5,2.5,4.0,1.3,Iris-versicolor +5.5,2.6,4.4,1.2,Iris-versicolor +6.1,3.0,4.6,1.4,Iris-versicolor +5.8,2.6,4.0,1.2,Iris-versicolor +5.0,2.3,3.3,1.0,Iris-versicolor +5.6,2.7,4.2,1.3,Iris-versicolor +5.7,3.0,4.2,1.2,Iris-versicolor +5.7,2.9,4.2,1.3,Iris-versicolor +6.2,2.9,4.3,1.3,Iris-versicolor +5.1,2.5,3.0,1.1,Iris-versicolor +5.7,2.8,4.1,1.3,Iris-versicolor +6.3,3.3,6.0,2.5,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +7.1,3.0,5.9,2.1,Iris-virginica +6.3,2.9,5.6,1.8,Iris-virginica +6.5,3.0,5.8,2.2,Iris-virginica +7.6,3.0,6.6,2.1,Iris-virginica +4.9,2.5,4.5,1.7,Iris-virginica +7.3,2.9,6.3,1.8,Iris-virginica +6.7,2.5,5.8,1.8,Iris-virginica +7.2,3.6,6.1,2.5,Iris-virginica +6.5,3.2,5.1,2.0,Iris-virginica +6.4,2.7,5.3,1.9,Iris-virginica +6.8,3.0,5.5,2.1,Iris-virginica +5.7,2.5,5.0,2.0,Iris-virginica +5.8,2.8,5.1,2.4,Iris-virginica +6.4,3.2,5.3,2.3,Iris-virginica +6.5,3.0,5.5,1.8,Iris-virginica +7.7,3.8,6.7,2.2,Iris-virginica +7.7,2.6,6.9,2.3,Iris-virginica +6.0,2.2,5.0,1.5,Iris-virginica +6.9,3.2,5.7,2.3,Iris-virginica +5.6,2.8,4.9,2.0,Iris-virginica +7.7,2.8,6.7,2.0,Iris-virginica +6.3,2.7,4.9,1.8,Iris-virginica +6.7,3.3,5.7,2.1,Iris-virginica +7.2,3.2,6.0,1.8,Iris-virginica +6.2,2.8,4.8,1.8,Iris-virginica +6.1,3.0,4.9,1.8,Iris-virginica +6.4,2.8,5.6,2.1,Iris-virginica +7.2,3.0,5.8,1.6,Iris-virginica +7.4,2.8,6.1,1.9,Iris-virginica +7.9,3.8,6.4,2.0,Iris-virginica +6.4,2.8,5.6,2.2,Iris-virginica +6.3,2.8,5.1,1.5,Iris-virginica +6.1,2.6,5.6,1.4,Iris-virginica +7.7,3.0,6.1,2.3,Iris-virginica +6.3,3.4,5.6,2.4,Iris-virginica +6.4,3.1,5.5,1.8,Iris-virginica +6.0,3.0,4.8,1.8,Iris-virginica +6.9,3.1,5.4,2.1,Iris-virginica +6.7,3.1,5.6,2.4,Iris-virginica +6.9,3.1,5.1,2.3,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +6.8,3.2,5.9,2.3,Iris-virginica +6.7,3.3,5.7,2.5,Iris-virginica +6.7,3.0,5.2,2.3,Iris-virginica +6.3,2.5,5.0,1.9,Iris-virginica +6.5,3.0,5.2,2.0,Iris-virginica +6.2,3.4,5.4,2.3,Iris-virginica +5.9,3.0,5.1,1.8,Iris-virginica +% +% +% diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/missing.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/missing.arff new file mode 100644 index 0000000000000000000000000000000000000000..dedc64c8fa2fcdc0081b30b7804be85114495ce2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/missing.arff @@ -0,0 +1,8 @@ +% This arff file contains some missing data +@relation missing +@attribute yop real +@attribute yap real +@data +1,5 +2,4 +?,? diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff new file mode 100644 index 0000000000000000000000000000000000000000..5766aeb229a1b31378026274c366e8e9e44fd487 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff @@ -0,0 +1,11 @@ +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA + +% This file has no data diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal.arff new file mode 100644 index 0000000000000000000000000000000000000000..7cd16d1ef9b50cc1194d034ef4d458ef3cf0d417 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal.arff @@ -0,0 +1,13 @@ +% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes +% Spaces between elements are stripped by the parser + +@relation SOME_DATA +@attribute age numeric +@attribute smoker {'yes', 'no'} +@data +18, 'no' +24, 'yes' +44, 'no' +56, 'no' +89,'yes' +11, 'no' diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal_spaces.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal_spaces.arff new file mode 100644 index 0000000000000000000000000000000000000000..c799127862b6060442b29c9a0382836cc9c55537 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal_spaces.arff @@ -0,0 +1,13 @@ +% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes +% Spaces inside quotes are NOT stripped by the parser + +@relation SOME_DATA +@attribute age numeric +@attribute smoker {' yes', 'no '} +@data +18,'no ' +24,' yes' +44,'no ' +56,'no ' +89,' yes' +11,'no ' diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test1.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test1.arff new file mode 100644 index 0000000000000000000000000000000000000000..ccc8e0cc7c43dc66ad7b3a8e4738c3322d3f79d8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test1.arff @@ -0,0 +1,10 @@ +@RELATION test1 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff new file mode 100644 index 0000000000000000000000000000000000000000..094ac5094a842866666726b358d2c66bf927c9d2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff @@ -0,0 +1,8 @@ +@relation test9 + +@attribute attr_relational relational + @attribute attr_number integer +@end attr_relational + +@data +'0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n1862\n1863\n1864\n1865\n1866\n1867\n1868\n1869\n1870\n1871\n1872\n1873\n1874\n1875\n1876\n1877\n1878\n1879\n1880\n1881\n1882\n1883\n1884\n1885\n1886\n1887\n1888\n1889\n1890\n1891\n1892\n1893\n1894\n1895\n1896\n1897\n1898\n1899\n1900\n1901\n1902\n1903\n1904\n1905\n1906\n1907\n1908\n1909\n1910\n1911\n1912\n1913\n1914\n1915\n1916\n1917\n1918\n1919\n1920\n1921\n1922\n1923\n1924\n1925\n1926\n1927\n1928\n1929\n1930\n1931\n1932\n1933\n1934\n1935\n1936\n1937\n1938\n1939\n1940\n1941\n1942\n1943\n1944\n1945\n1946\n1947\n1948\n1949\n1950\n1951\n1952\n1953\n1954\n1955\n1956\n1957\n1958\n1959\n1960\n1961\n1962\n1963\n1964\n1965\n1966\n1967\n1968\n1969\n1970\n1971\n1972\n1973\n1974\n1975\n1976\n1977\n1978\n1979\n1980\n1981\n1982\n1983\n1984\n1985\n1986\n1987\n1988\n1989\n1990\n1991\n1992\n1993\n1994\n1995\n1996\n1997\n1998\n1999\n2000\n2001\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025\n2026\n2027\n2028\n2029\n2030\n2031\n2032\n2033\n2034\n2035\n2036\n2037\n2038\n2039\n2040\n2041\n2042\n2043\n2044\n2045\n2046\n2047\n2048\n2049\n2050\n2051\n2052\n2053\n2054\n2055\n2056\n2057\n2058\n2059\n2060\n2061\n2062\n2063\n2064\n2065\n2066\n2067\n2068\n2069\n2070\n2071\n2072\n2073\n2074\n2075\n2076\n2077\n2078\n2079\n2080\n2081\n2082\n2083\n2084\n2085\n2086\n2087\n2088\n2089\n2090\n2091\n2092\n2093\n2094\n2095\n2096\n2097\n2098\n2099\n2100\n2101\n2102\n2103\n2104\n2105\n2106\n2107\n2108\n2109\n2110\n2111\n2112\n2113\n2114\n2115\n2116\n2117\n2118\n2119\n2120\n2121\n2122\n2123\n2124\n2125\n2126\n2127\n2128\n2129\n2130\n2131\n2132\n2133\n2134\n2135\n2136\n2137\n2138\n2139\n2140\n2141\n2142\n2143\n2144\n2145\n2146\n2147\n2148\n2149\n2150\n2151\n2152\n2153\n2154\n2155\n2156\n2157\n2158\n2159\n2160\n2161\n2162\n2163\n2164\n2165\n2166\n2167\n2168\n2169\n2170\n2171\n2172\n2173\n2174\n2175\n2176\n2177\n2178\n2179\n2180\n2181\n2182\n2183\n2184\n2185\n2186\n2187\n2188\n2189\n2190\n2191\n2192\n2193\n2194\n2195\n2196\n2197\n2198\n2199\n2200\n2201\n2202\n2203\n2204\n2205\n2206\n2207\n2208\n2209\n2210\n2211\n2212\n2213\n2214\n2215\n2216\n2217\n2218\n2219\n2220\n2221\n2222\n2223\n2224\n2225\n2226\n2227\n2228\n2229\n2230\n2231\n2232\n2233\n2234\n2235\n2236\n2237\n2238\n2239\n2240\n2241\n2242\n2243\n2244\n2245\n2246\n2247\n2248\n2249\n2250\n2251\n2252\n2253\n2254\n2255\n2256\n2257\n2258\n2259\n2260\n2261\n2262\n2263\n2264\n2265\n2266\n2267\n2268\n2269\n2270\n2271\n2272\n2273\n2274\n2275\n2276\n2277\n2278\n2279\n2280\n2281\n2282\n2283\n2284\n2285\n2286\n2287\n2288\n2289\n2290\n2291\n2292\n2293\n2294\n2295\n2296\n2297\n2298\n2299\n2300\n2301\n2302\n2303\n2304\n2305\n2306\n2307\n2308\n2309\n2310\n2311\n2312\n2313\n2314\n2315\n2316\n2317\n2318\n2319\n2320\n2321\n2322\n2323\n2324\n2325\n2326\n2327\n2328\n2329\n2330\n2331\n2332\n2333\n2334\n2335\n2336\n2337\n2338\n2339\n2340\n2341\n2342\n2343\n2344\n2345\n2346\n2347\n2348\n2349\n2350\n2351\n2352\n2353\n2354\n2355\n2356\n2357\n2358\n2359\n2360\n2361\n2362\n2363\n2364\n2365\n2366\n2367\n2368\n2369\n2370\n2371\n2372\n2373\n2374\n2375\n2376\n2377\n2378\n2379\n2380\n2381\n2382\n2383\n2384\n2385\n2386\n2387\n2388\n2389\n2390\n2391\n2392\n2393\n2394\n2395\n2396\n2397\n2398\n2399\n2400\n2401\n2402\n2403\n2404\n2405\n2406\n2407\n2408\n2409\n2410\n2411\n2412\n2413\n2414\n2415\n2416\n2417\n2418\n2419\n2420\n2421\n2422\n2423\n2424\n2425\n2426\n2427\n2428\n2429\n2430\n2431\n2432\n2433\n2434\n2435\n2436\n2437\n2438\n2439\n2440\n2441\n2442\n2443\n2444\n2445\n2446\n2447\n2448\n2449\n2450\n2451\n2452\n2453\n2454\n2455\n2456\n2457\n2458\n2459\n2460\n2461\n2462\n2463\n2464\n2465\n2466\n2467\n2468\n2469\n2470\n2471\n2472\n2473\n2474\n2475\n2476\n2477\n2478\n2479\n2480\n2481\n2482\n2483\n2484\n2485\n2486\n2487\n2488\n2489\n2490\n2491\n2492\n2493\n2494\n2495\n2496\n2497\n2498\n2499\n2500\n2501\n2502\n2503\n2504\n2505\n2506\n2507\n2508\n2509\n2510\n2511\n2512\n2513\n2514\n2515\n2516\n2517\n2518\n2519\n2520\n2521\n2522\n2523\n2524\n2525\n2526\n2527\n2528\n2529\n2530\n2531\n2532\n2533\n2534\n2535\n2536\n2537\n2538\n2539\n2540\n2541\n2542\n2543\n2544\n2545\n2546\n2547\n2548\n2549\n2550\n2551\n2552\n2553\n2554\n2555\n2556\n2557\n2558\n2559\n2560\n2561\n2562\n2563\n2564\n2565\n2566\n2567\n2568\n2569\n2570\n2571\n2572\n2573\n2574\n2575\n2576\n2577\n2578\n2579\n2580\n2581\n2582\n2583\n2584\n2585\n2586\n2587\n2588\n2589\n2590\n2591\n2592\n2593\n2594\n2595\n2596\n2597\n2598\n2599\n2600\n2601\n2602\n2603\n2604\n2605\n2606\n2607\n2608\n2609\n2610\n2611\n2612\n2613\n2614\n2615\n2616\n2617\n2618\n2619\n2620\n2621\n2622\n2623\n2624\n2625\n2626\n2627\n2628\n2629\n2630\n2631\n2632\n2633\n2634\n2635\n2636\n2637\n2638\n2639\n2640\n2641\n2642\n2643\n2644\n2645\n2646\n2647\n2648\n2649\n2650\n2651\n2652\n2653\n2654\n2655\n2656\n2657\n2658\n2659\n2660\n2661\n2662\n2663\n2664\n2665\n2666\n2667\n2668\n2669\n2670\n2671\n2672\n2673\n2674\n2675\n2676\n2677\n2678\n2679\n2680\n2681\n2682\n2683\n2684\n2685\n2686\n2687\n2688\n2689\n2690\n2691\n2692\n2693\n2694\n2695\n2696\n2697\n2698\n2699\n2700\n2701\n2702\n2703\n2704\n2705\n2706\n2707\n2708\n2709\n2710\n2711\n2712\n2713\n2714\n2715\n2716\n2717\n2718\n2719\n2720\n2721\n2722\n2723\n2724\n2725\n2726\n2727\n2728\n2729\n2730\n2731\n2732\n2733\n2734\n2735\n2736\n2737\n2738\n2739\n2740\n2741\n2742\n2743\n2744\n2745\n2746\n2747\n2748\n2749\n2750\n2751\n2752\n2753\n2754\n2755\n2756\n2757\n2758\n2759\n2760\n2761\n2762\n2763\n2764\n2765\n2766\n2767\n2768\n2769\n2770\n2771\n2772\n2773\n2774\n2775\n2776\n2777\n2778\n2779\n2780\n2781\n2782\n2783\n2784\n2785\n2786\n2787\n2788\n2789\n2790\n2791\n2792\n2793\n2794\n2795\n2796\n2797\n2798\n2799\n2800\n2801\n2802\n2803\n2804\n2805\n2806\n2807\n2808\n2809\n2810\n2811\n2812\n2813\n2814\n2815\n2816\n2817\n2818\n2819\n2820\n2821\n2822\n2823\n2824\n2825\n2826\n2827\n2828\n2829\n2830\n2831\n2832\n2833\n2834\n2835\n2836\n2837\n2838\n2839\n2840\n2841\n2842\n2843\n2844\n2845\n2846\n2847\n2848\n2849\n2850\n2851\n2852\n2853\n2854\n2855\n2856\n2857\n2858\n2859\n2860\n2861\n2862\n2863\n2864\n2865\n2866\n2867\n2868\n2869\n2870\n2871\n2872\n2873\n2874\n2875\n2876\n2877\n2878\n2879\n2880\n2881\n2882\n2883\n2884\n2885\n2886\n2887\n2888\n2889\n2890\n2891\n2892\n2893\n2894\n2895\n2896\n2897\n2898\n2899\n2900\n2901\n2902\n2903\n2904\n2905\n2906\n2907\n2908\n2909\n2910\n2911\n2912\n2913\n2914\n2915\n2916\n2917\n2918\n2919\n2920\n2921\n2922\n2923\n2924\n2925\n2926\n2927\n2928\n2929\n2930\n2931\n2932\n2933\n2934\n2935\n2936\n2937\n2938\n2939\n2940\n2941\n2942\n2943\n2944\n2945\n2946\n2947\n2948\n2949\n2950\n2951\n2952\n2953\n2954\n2955\n2956\n2957\n2958\n2959\n2960\n2961\n2962\n2963\n2964\n2965\n2966\n2967\n2968\n2969\n2970\n2971\n2972\n2973\n2974\n2975\n2976\n2977\n2978\n2979\n2980\n2981\n2982\n2983\n2984\n2985\n2986\n2987\n2988\n2989\n2990\n2991\n2992\n2993\n2994\n2995\n2996\n2997\n2998\n2999\n3000\n3001\n3002\n3003\n3004\n3005\n3006\n3007\n3008\n3009\n3010\n3011\n3012\n3013\n3014\n3015\n3016\n3017\n3018\n3019\n3020\n3021\n3022\n3023\n3024\n3025\n3026\n3027\n3028\n3029\n3030\n3031\n3032\n3033\n3034\n3035\n3036\n3037\n3038\n3039\n3040\n3041\n3042\n3043\n3044\n3045\n3046\n3047\n3048\n3049\n3050\n3051\n3052\n3053\n3054\n3055\n3056\n3057\n3058\n3059\n3060\n3061\n3062\n3063\n3064\n3065\n3066\n3067\n3068\n3069\n3070\n3071\n3072\n3073\n3074\n3075\n3076\n3077\n3078\n3079\n3080\n3081\n3082\n3083\n3084\n3085\n3086\n3087\n3088\n3089\n3090\n3091\n3092\n3093\n3094\n3095\n3096\n3097\n3098\n3099\n3100\n3101\n3102\n3103\n3104\n3105\n3106\n3107\n3108\n3109\n3110\n3111\n3112\n3113\n3114\n3115\n3116\n3117\n3118\n3119\n3120\n3121\n3122\n3123\n3124\n3125\n3126\n3127\n3128\n3129\n3130\n3131\n3132\n3133\n3134\n3135\n3136\n3137\n3138\n3139\n3140\n3141\n3142\n3143\n3144\n3145\n3146\n3147\n3148\n3149\n3150\n3151\n3152\n3153\n3154\n3155\n3156\n3157\n3158\n3159\n3160\n3161\n3162\n3163\n3164\n3165\n3166\n3167\n3168\n3169\n3170\n3171\n3172\n3173\n3174\n3175\n3176\n3177\n3178\n3179\n3180\n3181\n3182\n3183\n3184\n3185\n3186\n3187\n3188\n3189\n3190\n3191\n3192\n3193\n3194\n3195\n3196\n3197\n3198\n3199\n3200\n3201\n3202\n3203\n3204\n3205\n3206\n3207\n3208\n3209\n3210\n3211\n3212\n3213\n3214\n3215\n3216\n3217\n3218\n3219\n3220\n3221\n3222\n3223\n3224\n3225\n3226\n3227\n3228\n3229\n3230\n3231\n3232\n3233\n3234\n3235\n3236\n3237\n3238\n3239\n3240\n3241\n3242\n3243\n3244\n3245\n3246\n3247\n3248\n3249\n3250\n3251\n3252\n3253\n3254\n3255\n3256\n3257\n3258\n3259\n3260\n3261\n3262\n3263\n3264\n3265\n3266\n3267\n3268\n3269\n3270\n3271\n3272\n3273\n3274\n3275\n3276\n3277\n3278\n3279\n3280\n3281\n3282\n3283\n3284\n3285\n3286\n3287\n3288\n3289\n3290\n3291\n3292\n3293\n3294\n3295\n3296\n3297\n3298\n3299\n3300\n3301\n3302\n3303\n3304\n3305\n3306\n3307\n3308\n3309\n3310\n3311\n3312\n3313\n3314\n3315\n3316\n3317\n3318\n3319\n3320\n3321\n3322\n3323\n3324\n3325\n3326\n3327\n3328\n3329\n3330\n3331\n3332\n3333\n3334\n3335\n3336\n3337\n3338\n3339\n3340\n3341\n3342\n3343\n3344\n3345\n3346\n3347\n3348\n3349\n3350\n3351\n3352\n3353\n3354\n3355\n3356\n3357\n3358\n3359\n3360\n3361\n3362\n3363\n3364\n3365\n3366\n3367\n3368\n3369\n3370\n3371\n3372\n3373\n3374\n3375\n3376\n3377\n3378\n3379\n3380\n3381\n3382\n3383\n3384\n3385\n3386\n3387\n3388\n3389\n3390\n3391\n3392\n3393\n3394\n3395\n3396\n3397\n3398\n3399\n3400\n3401\n3402\n3403\n3404\n3405\n3406\n3407\n3408\n3409\n3410\n3411\n3412\n3413\n3414\n3415\n3416\n3417\n3418\n3419\n3420\n3421\n3422\n3423\n3424\n3425\n3426\n3427\n3428\n3429\n3430\n3431\n3432\n3433\n3434\n3435\n3436\n3437\n3438\n3439\n3440\n3441\n3442\n3443\n3444\n3445\n3446\n3447\n3448\n3449\n3450\n3451\n3452\n3453\n3454\n3455\n3456\n3457\n3458\n3459\n3460\n3461\n3462\n3463\n3464\n3465\n3466\n3467\n3468\n3469\n3470\n3471\n3472\n3473\n3474\n3475\n3476\n3477\n3478\n3479\n3480\n3481\n3482\n3483\n3484\n3485\n3486\n3487\n3488\n3489\n3490\n3491\n3492\n3493\n3494\n3495\n3496\n3497\n3498\n3499\n3500\n3501\n3502\n3503\n3504\n3505\n3506\n3507\n3508\n3509\n3510\n3511\n3512\n3513\n3514\n3515\n3516\n3517\n3518\n3519\n3520\n3521\n3522\n3523\n3524\n3525\n3526\n3527\n3528\n3529\n3530\n3531\n3532\n3533\n3534\n3535\n3536\n3537\n3538\n3539\n3540\n3541\n3542\n3543\n3544\n3545\n3546\n3547\n3548\n3549\n3550\n3551\n3552\n3553\n3554\n3555\n3556\n3557\n3558\n3559\n3560\n3561\n3562\n3563\n3564\n3565\n3566\n3567\n3568\n3569\n3570\n3571\n3572\n3573\n3574\n3575\n3576\n3577\n3578\n3579\n3580\n3581\n3582\n3583\n3584\n3585\n3586\n3587\n3588\n3589\n3590\n3591\n3592\n3593\n3594\n3595\n3596\n3597\n3598\n3599\n3600\n3601\n3602\n3603\n3604\n3605\n3606\n3607\n3608\n3609\n3610\n3611\n3612\n3613\n3614\n3615\n3616\n3617\n3618\n3619\n3620\n3621\n3622\n3623\n3624\n3625\n3626\n3627\n3628\n3629\n3630\n3631\n3632\n3633\n3634\n3635\n3636\n3637\n3638\n3639\n3640\n3641\n3642\n3643\n3644\n3645\n3646\n3647\n3648\n3649\n3650\n3651\n3652\n3653\n3654\n3655\n3656\n3657\n3658\n3659\n3660\n3661\n3662\n3663\n3664\n3665\n3666\n3667\n3668\n3669\n3670\n3671\n3672\n3673\n3674\n3675\n3676\n3677\n3678\n3679\n3680\n3681\n3682\n3683\n3684\n3685\n3686\n3687\n3688\n3689\n3690\n3691\n3692\n3693\n3694\n3695\n3696\n3697\n3698\n3699\n3700\n3701\n3702\n3703\n3704\n3705\n3706\n3707\n3708\n3709\n3710\n3711\n3712\n3713\n3714\n3715\n3716\n3717\n3718\n3719\n3720\n3721\n3722\n3723\n3724\n3725\n3726\n3727\n3728\n3729\n3730\n3731\n3732\n3733\n3734\n3735\n3736\n3737\n3738\n3739\n3740\n3741\n3742\n3743\n3744\n3745\n3746\n3747\n3748\n3749\n3750\n3751\n3752\n3753\n3754\n3755\n3756\n3757\n3758\n3759\n3760\n3761\n3762\n3763\n3764\n3765\n3766\n3767\n3768\n3769\n3770\n3771\n3772\n3773\n3774\n3775\n3776\n3777\n3778\n3779\n3780\n3781\n3782\n3783\n3784\n3785\n3786\n3787\n3788\n3789\n3790\n3791\n3792\n3793\n3794\n3795\n3796\n3797\n3798\n3799\n3800\n3801\n3802\n3803\n3804\n3805\n3806\n3807\n3808\n3809\n3810\n3811\n3812\n3813\n3814\n3815\n3816\n3817\n3818\n3819\n3820\n3821\n3822\n3823\n3824\n3825\n3826\n3827\n3828\n3829\n3830\n3831\n3832\n3833\n3834\n3835\n3836\n3837\n3838\n3839\n3840\n3841\n3842\n3843\n3844\n3845\n3846\n3847\n3848\n3849\n3850\n3851\n3852\n3853\n3854\n3855\n3856\n3857\n3858\n3859\n3860\n3861\n3862\n3863\n3864\n3865\n3866\n3867\n3868\n3869\n3870\n3871\n3872\n3873\n3874\n3875\n3876\n3877\n3878\n3879\n3880\n3881\n3882\n3883\n3884\n3885\n3886\n3887\n3888\n3889\n3890\n3891\n3892\n3893\n3894\n3895\n3896\n3897\n3898\n3899\n3900\n3901\n3902\n3903\n3904\n3905\n3906\n3907\n3908\n3909\n3910\n3911\n3912\n3913\n3914\n3915\n3916\n3917\n3918\n3919\n3920\n3921\n3922\n3923\n3924\n3925\n3926\n3927\n3928\n3929\n3930\n3931\n3932\n3933\n3934\n3935\n3936\n3937\n3938\n3939\n3940\n3941\n3942\n3943\n3944\n3945\n3946\n3947\n3948\n3949\n3950\n3951\n3952\n3953\n3954\n3955\n3956\n3957\n3958\n3959\n3960\n3961\n3962\n3963\n3964\n3965\n3966\n3967\n3968\n3969\n3970\n3971\n3972\n3973\n3974\n3975\n3976\n3977\n3978\n3979\n3980\n3981\n3982\n3983\n3984\n3985\n3986\n3987\n3988\n3989\n3990\n3991\n3992\n3993\n3994\n3995\n3996\n3997\n3998\n3999\n4000\n4001\n4002\n4003\n4004\n4005\n4006\n4007\n4008\n4009\n4010\n4011\n4012\n4013\n4014\n4015\n4016\n4017\n4018\n4019\n4020\n4021\n4022\n4023\n4024\n4025\n4026\n4027\n4028\n4029\n4030\n4031\n4032\n4033\n4034\n4035\n4036\n4037\n4038\n4039\n4040\n4041\n4042\n4043\n4044\n4045\n4046\n4047\n4048\n4049\n4050\n4051\n4052\n4053\n4054\n4055\n4056\n4057\n4058\n4059\n4060\n4061\n4062\n4063\n4064\n4065\n4066\n4067\n4068\n4069\n4070\n4071\n4072\n4073\n4074\n4075\n4076\n4077\n4078\n4079\n4080\n4081\n4082\n4083\n4084\n4085\n4086\n4087\n4088\n4089\n4090\n4091\n4092\n4093\n4094\n4095\n4096\n4097\n4098\n4099\n4100\n4101\n4102\n4103\n4104\n4105\n4106\n4107\n4108\n4109\n4110\n4111\n4112\n4113\n4114\n4115\n4116\n4117\n4118\n4119\n4120\n4121\n4122\n4123\n4124\n4125\n4126\n4127\n4128\n4129\n4130\n4131\n4132\n4133\n4134\n4135\n4136\n4137\n4138\n4139\n4140\n4141\n4142\n4143\n4144\n4145\n4146\n4147\n4148\n4149\n4150\n4151\n4152\n4153\n4154\n4155\n4156\n4157\n4158\n4159\n4160\n4161\n4162\n4163\n4164\n4165\n4166\n4167\n4168\n4169\n4170\n4171\n4172\n4173\n4174\n4175\n4176\n4177\n4178\n4179\n4180\n4181\n4182\n4183\n4184\n4185\n4186\n4187\n4188\n4189\n4190\n4191\n4192\n4193\n4194\n4195\n4196\n4197\n4198\n4199\n4200\n4201\n4202\n4203\n4204\n4205\n4206\n4207\n4208\n4209\n4210\n4211\n4212\n4213\n4214\n4215\n4216\n4217\n4218\n4219\n4220\n4221\n4222\n4223\n4224\n4225\n4226\n4227\n4228\n4229\n4230\n4231\n4232\n4233\n4234\n4235\n4236\n4237\n4238\n4239\n4240\n4241\n4242\n4243\n4244\n4245\n4246\n4247\n4248\n4249\n4250\n4251\n4252\n4253\n4254\n4255\n4256\n4257\n4258\n4259\n4260\n4261\n4262\n4263\n4264\n4265\n4266\n4267\n4268\n4269\n4270\n4271\n4272\n4273\n4274\n4275\n4276\n4277\n4278\n4279\n4280\n4281\n4282\n4283\n4284\n4285\n4286\n4287\n4288\n4289\n4290\n4291\n4292\n4293\n4294\n4295\n4296\n4297\n4298\n4299\n4300\n4301\n4302\n4303\n4304\n4305\n4306\n4307\n4308\n4309\n4310\n4311\n4312\n4313\n4314\n4315\n4316\n4317\n4318\n4319\n4320\n4321\n4322\n4323\n4324\n4325\n4326\n4327\n4328\n4329\n4330\n4331\n4332\n4333\n4334\n4335\n4336\n4337\n4338\n4339\n4340\n4341\n4342\n4343\n4344\n4345\n4346\n4347\n4348\n4349\n4350\n4351\n4352\n4353\n4354\n4355\n4356\n4357\n4358\n4359\n4360\n4361\n4362\n4363\n4364\n4365\n4366\n4367\n4368\n4369\n4370\n4371\n4372\n4373\n4374\n4375\n4376\n4377\n4378\n4379\n4380\n4381\n4382\n4383\n4384\n4385\n4386\n4387\n4388\n4389\n4390\n4391\n4392\n4393\n4394\n4395\n4396\n4397\n4398\n4399\n4400\n4401\n4402\n4403\n4404\n4405\n4406\n4407\n4408\n4409\n4410\n4411\n4412\n4413\n4414\n4415\n4416\n4417\n4418\n4419\n4420\n4421\n4422\n4423\n4424\n4425\n4426\n4427\n4428\n4429\n4430\n4431\n4432\n4433\n4434\n4435\n4436\n4437\n4438\n4439\n4440\n4441\n4442\n4443\n4444\n4445\n4446\n4447\n4448\n4449\n4450\n4451\n4452\n4453\n4454\n4455\n4456\n4457\n4458\n4459\n4460\n4461\n4462\n4463\n4464\n4465\n4466\n4467\n4468\n4469\n4470\n4471\n4472\n4473\n4474\n4475\n4476\n4477\n4478\n4479\n4480\n4481\n4482\n4483\n4484\n4485\n4486\n4487\n4488\n4489\n4490\n4491\n4492\n4493\n4494\n4495\n4496\n4497\n4498\n4499\n4500\n4501\n4502\n4503\n4504\n4505\n4506\n4507\n4508\n4509\n4510\n4511\n4512\n4513\n4514\n4515\n4516\n4517\n4518\n4519\n4520\n4521\n4522\n4523\n4524\n4525\n4526\n4527\n4528\n4529\n4530\n4531\n4532\n4533\n4534\n4535\n4536\n4537\n4538\n4539\n4540\n4541\n4542\n4543\n4544\n4545\n4546\n4547\n4548\n4549\n4550\n4551\n4552\n4553\n4554\n4555\n4556\n4557\n4558\n4559\n4560\n4561\n4562\n4563\n4564\n4565\n4566\n4567\n4568\n4569\n4570\n4571\n4572\n4573\n4574\n4575\n4576\n4577\n4578\n4579\n4580\n4581\n4582\n4583\n4584\n4585\n4586\n4587\n4588\n4589\n4590\n4591\n4592\n4593\n4594\n4595\n4596\n4597\n4598\n4599\n4600\n4601\n4602\n4603\n4604\n4605\n4606\n4607\n4608\n4609\n4610\n4611\n4612\n4613\n4614\n4615\n4616\n4617\n4618\n4619\n4620\n4621\n4622\n4623\n4624\n4625\n4626\n4627\n4628\n4629\n4630\n4631\n4632\n4633\n4634\n4635\n4636\n4637\n4638\n4639\n4640\n4641\n4642\n4643\n4644\n4645\n4646\n4647\n4648\n4649\n4650\n4651\n4652\n4653\n4654\n4655\n4656\n4657\n4658\n4659\n4660\n4661\n4662\n4663\n4664\n4665\n4666\n4667\n4668\n4669\n4670\n4671\n4672\n4673\n4674\n4675\n4676\n4677\n4678\n4679\n4680\n4681\n4682\n4683\n4684\n4685\n4686\n4687\n4688\n4689\n4690\n4691\n4692\n4693\n4694\n4695\n4696\n4697\n4698\n4699\n4700\n4701\n4702\n4703\n4704\n4705\n4706\n4707\n4708\n4709\n4710\n4711\n4712\n4713\n4714\n4715\n4716\n4717\n4718\n4719\n4720\n4721\n4722\n4723\n4724\n4725\n4726\n4727\n4728\n4729\n4730\n4731\n4732\n4733\n4734\n4735\n4736\n4737\n4738\n4739\n4740\n4741\n4742\n4743\n4744\n4745\n4746\n4747\n4748\n4749\n4750\n4751\n4752\n4753\n4754\n4755\n4756\n4757\n4758\n4759\n4760\n4761\n4762\n4763\n4764\n4765\n4766\n4767\n4768\n4769\n4770\n4771\n4772\n4773\n4774\n4775\n4776\n4777\n4778\n4779\n4780\n4781\n4782\n4783\n4784\n4785\n4786\n4787\n4788\n4789\n4790\n4791\n4792\n4793\n4794\n4795\n4796\n4797\n4798\n4799\n4800\n4801\n4802\n4803\n4804\n4805\n4806\n4807\n4808\n4809\n4810\n4811\n4812\n4813\n4814\n4815\n4816\n4817\n4818\n4819\n4820\n4821\n4822\n4823\n4824\n4825\n4826\n4827\n4828\n4829\n4830\n4831\n4832\n4833\n4834\n4835\n4836\n4837\n4838\n4839\n4840\n4841\n4842\n4843\n4844\n4845\n4846\n4847\n4848\n4849\n4850\n4851\n4852\n4853\n4854\n4855\n4856\n4857\n4858\n4859\n4860\n4861\n4862\n4863\n4864\n4865\n4866\n4867\n4868\n4869\n4870\n4871\n4872\n4873\n4874\n4875\n4876\n4877\n4878\n4879\n4880\n4881\n4882\n4883\n4884\n4885\n4886\n4887\n4888\n4889\n4890\n4891\n4892\n4893\n4894\n4895\n4896\n4897\n4898\n4899\n4900\n4901\n4902\n4903\n4904\n4905\n4906\n4907\n4908\n4909\n4910\n4911\n4912\n4913\n4914\n4915\n4916\n4917\n4918\n4919\n4920\n4921\n4922\n4923\n4924\n4925\n4926\n4927\n4928\n4929\n4930\n4931\n4932\n4933\n4934\n4935\n4936\n4937\n4938\n4939\n4940\n4941\n4942\n4943\n4944\n4945\n4946\n4947\n4948\n4949\n4950\n4951\n4952\n4953\n4954\n4955\n4956\n4957\n4958\n4959\n4960\n4961\n4962\n4963\n4964\n4965\n4966\n4967\n4968\n4969\n4970\n4971\n4972\n4973\n4974\n4975\n4976\n4977\n4978\n4979\n4980\n4981\n4982\n4983\n4984\n4985\n4986\n4987\n4988\n4989\n4990\n4991\n4992\n4993\n4994\n4995\n4996\n4997\n4998\n4999\n5000\n5001\n5002\n5003\n5004\n5005\n5006\n5007\n5008\n5009\n5010\n5011\n5012\n5013\n5014\n5015\n5016\n5017\n5018\n5019\n5020\n5021\n5022\n5023\n5024\n5025\n5026\n5027\n5028\n5029\n5030\n5031\n5032\n5033\n5034\n5035\n5036\n5037\n5038\n5039\n5040\n5041\n5042\n5043\n5044\n5045\n5046\n5047\n5048\n5049\n5050\n5051\n5052\n5053\n5054\n5055\n5056\n5057\n5058\n5059\n5060\n5061\n5062\n5063\n5064\n5065\n5066\n5067\n5068\n5069\n5070\n5071\n5072\n5073\n5074\n5075\n5076\n5077\n5078\n5079\n5080\n5081\n5082\n5083\n5084\n5085\n5086\n5087\n5088\n5089\n5090\n5091\n5092\n5093\n5094\n5095\n5096\n5097\n5098\n5099\n5100\n5101\n5102\n5103\n5104\n5105\n5106\n5107\n5108\n5109\n5110\n5111\n5112\n5113\n5114\n5115\n5116\n5117\n5118\n5119\n5120\n5121\n5122\n5123\n5124\n5125\n5126\n5127\n5128\n5129\n5130\n5131\n5132\n5133\n5134\n5135\n5136\n5137\n5138\n5139\n5140\n5141\n5142\n5143\n5144\n5145\n5146\n5147\n5148\n5149\n5150\n5151\n5152\n5153\n5154\n5155\n5156\n5157\n5158\n5159\n5160\n5161\n5162\n5163\n5164\n5165\n5166\n5167\n5168\n5169\n5170\n5171\n5172\n5173\n5174\n5175\n5176\n5177\n5178\n5179\n5180\n5181\n5182\n5183\n5184\n5185\n5186\n5187\n5188\n5189\n5190\n5191\n5192\n5193\n5194\n5195\n5196\n5197\n5198\n5199\n5200\n5201\n5202\n5203\n5204\n5205\n5206\n5207\n5208\n5209\n5210\n5211\n5212\n5213\n5214\n5215\n5216\n5217\n5218\n5219\n5220\n5221\n5222\n5223\n5224\n5225\n5226\n5227\n5228\n5229\n5230\n5231\n5232\n5233\n5234\n5235\n5236\n5237\n5238\n5239\n5240\n5241\n5242\n5243\n5244\n5245\n5246\n5247\n5248\n5249\n5250\n5251\n5252\n5253\n5254\n5255\n5256\n5257\n5258\n5259\n5260\n5261\n5262\n5263\n5264\n5265\n5266\n5267\n5268\n5269\n5270\n5271\n5272\n5273\n5274\n5275\n5276\n5277\n5278\n5279\n5280\n5281\n5282\n5283\n5284\n5285\n5286\n5287\n5288\n5289\n5290\n5291\n5292\n5293\n5294\n5295\n5296\n5297\n5298\n5299\n5300\n5301\n5302\n5303\n5304\n5305\n5306\n5307\n5308\n5309\n5310\n5311\n5312\n5313\n5314\n5315\n5316\n5317\n5318\n5319\n5320\n5321\n5322\n5323\n5324\n5325\n5326\n5327\n5328\n5329\n5330\n5331\n5332\n5333\n5334\n5335\n5336\n5337\n5338\n5339\n5340\n5341\n5342\n5343\n5344\n5345\n5346\n5347\n5348\n5349\n5350\n5351\n5352\n5353\n5354\n5355\n5356\n5357\n5358\n5359\n5360\n5361\n5362\n5363\n5364\n5365\n5366\n5367\n5368\n5369\n5370\n5371\n5372\n5373\n5374\n5375\n5376\n5377\n5378\n5379\n5380\n5381\n5382\n5383\n5384\n5385\n5386\n5387\n5388\n5389\n5390\n5391\n5392\n5393\n5394\n5395\n5396\n5397\n5398\n5399\n5400\n5401\n5402\n5403\n5404\n5405\n5406\n5407\n5408\n5409\n5410\n5411\n5412\n5413\n5414\n5415\n5416\n5417\n5418\n5419\n5420\n5421\n5422\n5423\n5424\n5425\n5426\n5427\n5428\n5429\n5430\n5431\n5432\n5433\n5434\n5435\n5436\n5437\n5438\n5439\n5440\n5441\n5442\n5443\n5444\n5445\n5446\n5447\n5448\n5449\n5450\n5451\n5452\n5453\n5454\n5455\n5456\n5457\n5458\n5459\n5460\n5461\n5462\n5463\n5464\n5465\n5466\n5467\n5468\n5469\n5470\n5471\n5472\n5473\n5474\n5475\n5476\n5477\n5478\n5479\n5480\n5481\n5482\n5483\n5484\n5485\n5486\n5487\n5488\n5489\n5490\n5491\n5492\n5493\n5494\n5495\n5496\n5497\n5498\n5499\n5500\n5501\n5502\n5503\n5504\n5505\n5506\n5507\n5508\n5509\n5510\n5511\n5512\n5513\n5514\n5515\n5516\n5517\n5518\n5519\n5520\n5521\n5522\n5523\n5524\n5525\n5526\n5527\n5528\n5529\n5530\n5531\n5532\n5533\n5534\n5535\n5536\n5537\n5538\n5539\n5540\n5541\n5542\n5543\n5544\n5545\n5546\n5547\n5548\n5549\n5550\n5551\n5552\n5553\n5554\n5555\n5556\n5557\n5558\n5559\n5560\n5561\n5562\n5563\n5564\n5565\n5566\n5567\n5568\n5569\n5570\n5571\n5572\n5573\n5574\n5575\n5576\n5577\n5578\n5579\n5580\n5581\n5582\n5583\n5584\n5585\n5586\n5587\n5588\n5589\n5590\n5591\n5592\n5593\n5594\n5595\n5596\n5597\n5598\n5599\n5600\n5601\n5602\n5603\n5604\n5605\n5606\n5607\n5608\n5609\n5610\n5611\n5612\n5613\n5614\n5615\n5616\n5617\n5618\n5619\n5620\n5621\n5622\n5623\n5624\n5625\n5626\n5627\n5628\n5629\n5630\n5631\n5632\n5633\n5634\n5635\n5636\n5637\n5638\n5639\n5640\n5641\n5642\n5643\n5644\n5645\n5646\n5647\n5648\n5649\n5650\n5651\n5652\n5653\n5654\n5655\n5656\n5657\n5658\n5659\n5660\n5661\n5662\n5663\n5664\n5665\n5666\n5667\n5668\n5669\n5670\n5671\n5672\n5673\n5674\n5675\n5676\n5677\n5678\n5679\n5680\n5681\n5682\n5683\n5684\n5685\n5686\n5687\n5688\n5689\n5690\n5691\n5692\n5693\n5694\n5695\n5696\n5697\n5698\n5699\n5700\n5701\n5702\n5703\n5704\n5705\n5706\n5707\n5708\n5709\n5710\n5711\n5712\n5713\n5714\n5715\n5716\n5717\n5718\n5719\n5720\n5721\n5722\n5723\n5724\n5725\n5726\n5727\n5728\n5729\n5730\n5731\n5732\n5733\n5734\n5735\n5736\n5737\n5738\n5739\n5740\n5741\n5742\n5743\n5744\n5745\n5746\n5747\n5748\n5749\n5750\n5751\n5752\n5753\n5754\n5755\n5756\n5757\n5758\n5759\n5760\n5761\n5762\n5763\n5764\n5765\n5766\n5767\n5768\n5769\n5770\n5771\n5772\n5773\n5774\n5775\n5776\n5777\n5778\n5779\n5780\n5781\n5782\n5783\n5784\n5785\n5786\n5787\n5788\n5789\n5790\n5791\n5792\n5793\n5794\n5795\n5796\n5797\n5798\n5799\n5800\n5801\n5802\n5803\n5804\n5805\n5806\n5807\n5808\n5809\n5810\n5811\n5812\n5813\n5814\n5815\n5816\n5817\n5818\n5819\n5820\n5821\n5822\n5823\n5824\n5825\n5826\n5827\n5828\n5829\n5830\n5831\n5832\n5833\n5834\n5835\n5836\n5837\n5838\n5839\n5840\n5841\n5842\n5843\n5844\n5845\n5846\n5847\n5848\n5849\n5850\n5851\n5852\n5853\n5854\n5855\n5856\n5857\n5858\n5859\n5860\n5861\n5862\n5863\n5864\n5865\n5866\n5867\n5868\n5869\n5870\n5871\n5872\n5873\n5874\n5875\n5876\n5877\n5878\n5879\n5880\n5881\n5882\n5883\n5884\n5885\n5886\n5887\n5888\n5889\n5890\n5891\n5892\n5893\n5894\n5895\n5896\n5897\n5898\n5899\n5900\n5901\n5902\n5903\n5904\n5905\n5906\n5907\n5908\n5909\n5910\n5911\n5912\n5913\n5914\n5915\n5916\n5917\n5918\n5919\n5920\n5921\n5922\n5923\n5924\n5925\n5926\n5927\n5928\n5929\n5930\n5931\n5932\n5933\n5934\n5935\n5936\n5937\n5938\n5939\n5940\n5941\n5942\n5943\n5944\n5945\n5946\n5947\n5948\n5949\n5950\n5951\n5952\n5953\n5954\n5955\n5956\n5957\n5958\n5959\n5960\n5961\n5962\n5963\n5964\n5965\n5966\n5967\n5968\n5969\n5970\n5971\n5972\n5973\n5974\n5975\n5976\n5977\n5978\n5979\n5980\n5981\n5982\n5983\n5984\n5985\n5986\n5987\n5988\n5989\n5990\n5991\n5992\n5993\n5994\n5995\n5996\n5997\n5998\n5999\n6000\n6001\n6002\n6003\n6004\n6005\n6006\n6007\n6008\n6009\n6010\n6011\n6012\n6013\n6014\n6015\n6016\n6017\n6018\n6019\n6020\n6021\n6022\n6023\n6024\n6025\n6026\n6027\n6028\n6029\n6030\n6031\n6032\n6033\n6034\n6035\n6036\n6037\n6038\n6039\n6040\n6041\n6042\n6043\n6044\n6045\n6046\n6047\n6048\n6049\n6050\n6051\n6052\n6053\n6054\n6055\n6056\n6057\n6058\n6059\n6060\n6061\n6062\n6063\n6064\n6065\n6066\n6067\n6068\n6069\n6070\n6071\n6072\n6073\n6074\n6075\n6076\n6077\n6078\n6079\n6080\n6081\n6082\n6083\n6084\n6085\n6086\n6087\n6088\n6089\n6090\n6091\n6092\n6093\n6094\n6095\n6096\n6097\n6098\n6099\n6100\n6101\n6102\n6103\n6104\n6105\n6106\n6107\n6108\n6109\n6110\n6111\n6112\n6113\n6114\n6115\n6116\n6117\n6118\n6119\n6120\n6121\n6122\n6123\n6124\n6125\n6126\n6127\n6128\n6129\n6130\n6131\n6132\n6133\n6134\n6135\n6136\n6137\n6138\n6139\n6140\n6141\n6142\n6143\n6144\n6145\n6146\n6147\n6148\n6149\n6150\n6151\n6152\n6153\n6154\n6155\n6156\n6157\n6158\n6159\n6160\n6161\n6162\n6163\n6164\n6165\n6166\n6167\n6168\n6169\n6170\n6171\n6172\n6173\n6174\n6175\n6176\n6177\n6178\n6179\n6180\n6181\n6182\n6183\n6184\n6185\n6186\n6187\n6188\n6189\n6190\n6191\n6192\n6193\n6194\n6195\n6196\n6197\n6198\n6199\n6200\n6201\n6202\n6203\n6204\n6205\n6206\n6207\n6208\n6209\n6210\n6211\n6212\n6213\n6214\n6215\n6216\n6217\n6218\n6219\n6220\n6221\n6222\n6223\n6224\n6225\n6226\n6227\n6228\n6229\n6230\n6231\n6232\n6233\n6234\n6235\n6236\n6237\n6238\n6239\n6240\n6241\n6242\n6243\n6244\n6245\n6246\n6247\n6248\n6249\n6250\n6251\n6252\n6253\n6254\n6255\n6256\n6257\n6258\n6259\n6260\n6261\n6262\n6263\n6264\n6265\n6266\n6267\n6268\n6269\n6270\n6271\n6272\n6273\n6274\n6275\n6276\n6277\n6278\n6279\n6280\n6281\n6282\n6283\n6284\n6285\n6286\n6287\n6288\n6289\n6290\n6291\n6292\n6293\n6294\n6295\n6296\n6297\n6298\n6299\n6300\n6301\n6302\n6303\n6304\n6305\n6306\n6307\n6308\n6309\n6310\n6311\n6312\n6313\n6314\n6315\n6316\n6317\n6318\n6319\n6320\n6321\n6322\n6323\n6324\n6325\n6326\n6327\n6328\n6329\n6330\n6331\n6332\n6333\n6334\n6335\n6336\n6337\n6338\n6339\n6340\n6341\n6342\n6343\n6344\n6345\n6346\n6347\n6348\n6349\n6350\n6351\n6352\n6353\n6354\n6355\n6356\n6357\n6358\n6359\n6360\n6361\n6362\n6363\n6364\n6365\n6366\n6367\n6368\n6369\n6370\n6371\n6372\n6373\n6374\n6375\n6376\n6377\n6378\n6379\n6380\n6381\n6382\n6383\n6384\n6385\n6386\n6387\n6388\n6389\n6390\n6391\n6392\n6393\n6394\n6395\n6396\n6397\n6398\n6399\n6400\n6401\n6402\n6403\n6404\n6405\n6406\n6407\n6408\n6409\n6410\n6411\n6412\n6413\n6414\n6415\n6416\n6417\n6418\n6419\n6420\n6421\n6422\n6423\n6424\n6425\n6426\n6427\n6428\n6429\n6430\n6431\n6432\n6433\n6434\n6435\n6436\n6437\n6438\n6439\n6440\n6441\n6442\n6443\n6444\n6445\n6446\n6447\n6448\n6449\n6450\n6451\n6452\n6453\n6454\n6455\n6456\n6457\n6458\n6459\n6460\n6461\n6462\n6463\n6464\n6465\n6466\n6467\n6468\n6469\n6470\n6471\n6472\n6473\n6474\n6475\n6476\n6477\n6478\n6479\n6480\n6481\n6482\n6483\n6484\n6485\n6486\n6487\n6488\n6489\n6490\n6491\n6492\n6493\n6494\n6495\n6496\n6497\n6498\n6499\n6500\n6501\n6502\n6503\n6504\n6505\n6506\n6507\n6508\n6509\n6510\n6511\n6512\n6513\n6514\n6515\n6516\n6517\n6518\n6519\n6520\n6521\n6522\n6523\n6524\n6525\n6526\n6527\n6528\n6529\n6530\n6531\n6532\n6533\n6534\n6535\n6536\n6537\n6538\n6539\n6540\n6541\n6542\n6543\n6544\n6545\n6546\n6547\n6548\n6549\n6550\n6551\n6552\n6553\n6554\n6555\n6556\n6557\n6558\n6559\n6560\n6561\n6562\n6563\n6564\n6565\n6566\n6567\n6568\n6569\n6570\n6571\n6572\n6573\n6574\n6575\n6576\n6577\n6578\n6579\n6580\n6581\n6582\n6583\n6584\n6585\n6586\n6587\n6588\n6589\n6590\n6591\n6592\n6593\n6594\n6595\n6596\n6597\n6598\n6599\n6600\n6601\n6602\n6603\n6604\n6605\n6606\n6607\n6608\n6609\n6610\n6611\n6612\n6613\n6614\n6615\n6616\n6617\n6618\n6619\n6620\n6621\n6622\n6623\n6624\n6625\n6626\n6627\n6628\n6629\n6630\n6631\n6632\n6633\n6634\n6635\n6636\n6637\n6638\n6639\n6640\n6641\n6642\n6643\n6644\n6645\n6646\n6647\n6648\n6649\n6650\n6651\n6652\n6653\n6654\n6655\n6656\n6657\n6658\n6659\n6660\n6661\n6662\n6663\n6664\n6665\n6666\n6667\n6668\n6669\n6670\n6671\n6672\n6673\n6674\n6675\n6676\n6677\n6678\n6679\n6680\n6681\n6682\n6683\n6684\n6685\n6686\n6687\n6688\n6689\n6690\n6691\n6692\n6693\n6694\n6695\n6696\n6697\n6698\n6699\n6700\n6701\n6702\n6703\n6704\n6705\n6706\n6707\n6708\n6709\n6710\n6711\n6712\n6713\n6714\n6715\n6716\n6717\n6718\n6719\n6720\n6721\n6722\n6723\n6724\n6725\n6726\n6727\n6728\n6729\n6730\n6731\n6732\n6733\n6734\n6735\n6736\n6737\n6738\n6739\n6740\n6741\n6742\n6743\n6744\n6745\n6746\n6747\n6748\n6749\n6750\n6751\n6752\n6753\n6754\n6755\n6756\n6757\n6758\n6759\n6760\n6761\n6762\n6763\n6764\n6765\n6766\n6767\n6768\n6769\n6770\n6771\n6772\n6773\n6774\n6775\n6776\n6777\n6778\n6779\n6780\n6781\n6782\n6783\n6784\n6785\n6786\n6787\n6788\n6789\n6790\n6791\n6792\n6793\n6794\n6795\n6796\n6797\n6798\n6799\n6800\n6801\n6802\n6803\n6804\n6805\n6806\n6807\n6808\n6809\n6810\n6811\n6812\n6813\n6814\n6815\n6816\n6817\n6818\n6819\n6820\n6821\n6822\n6823\n6824\n6825\n6826\n6827\n6828\n6829\n6830\n6831\n6832\n6833\n6834\n6835\n6836\n6837\n6838\n6839\n6840\n6841\n6842\n6843\n6844\n6845\n6846\n6847\n6848\n6849\n6850\n6851\n6852\n6853\n6854\n6855\n6856\n6857\n6858\n6859\n6860\n6861\n6862\n6863\n6864\n6865\n6866\n6867\n6868\n6869\n6870\n6871\n6872\n6873\n6874\n6875\n6876\n6877\n6878\n6879\n6880\n6881\n6882\n6883\n6884\n6885\n6886\n6887\n6888\n6889\n6890\n6891\n6892\n6893\n6894\n6895\n6896\n6897\n6898\n6899\n6900\n6901\n6902\n6903\n6904\n6905\n6906\n6907\n6908\n6909\n6910\n6911\n6912\n6913\n6914\n6915\n6916\n6917\n6918\n6919\n6920\n6921\n6922\n6923\n6924\n6925\n6926\n6927\n6928\n6929\n6930\n6931\n6932\n6933\n6934\n6935\n6936\n6937\n6938\n6939\n6940\n6941\n6942\n6943\n6944\n6945\n6946\n6947\n6948\n6949\n6950\n6951\n6952\n6953\n6954\n6955\n6956\n6957\n6958\n6959\n6960\n6961\n6962\n6963\n6964\n6965\n6966\n6967\n6968\n6969\n6970\n6971\n6972\n6973\n6974\n6975\n6976\n6977\n6978\n6979\n6980\n6981\n6982\n6983\n6984\n6985\n6986\n6987\n6988\n6989\n6990\n6991\n6992\n6993\n6994\n6995\n6996\n6997\n6998\n6999\n7000\n7001\n7002\n7003\n7004\n7005\n7006\n7007\n7008\n7009\n7010\n7011\n7012\n7013\n7014\n7015\n7016\n7017\n7018\n7019\n7020\n7021\n7022\n7023\n7024\n7025\n7026\n7027\n7028\n7029\n7030\n7031\n7032\n7033\n7034\n7035\n7036\n7037\n7038\n7039\n7040\n7041\n7042\n7043\n7044\n7045\n7046\n7047\n7048\n7049\n7050\n7051\n7052\n7053\n7054\n7055\n7056\n7057\n7058\n7059\n7060\n7061\n7062\n7063\n7064\n7065\n7066\n7067\n7068\n7069\n7070\n7071\n7072\n7073\n7074\n7075\n7076\n7077\n7078\n7079\n7080\n7081\n7082\n7083\n7084\n7085\n7086\n7087\n7088\n7089\n7090\n7091\n7092\n7093\n7094\n7095\n7096\n7097\n7098\n7099\n7100\n7101\n7102\n7103\n7104\n7105\n7106\n7107\n7108\n7109\n7110\n7111\n7112\n7113\n7114\n7115\n7116\n7117\n7118\n7119\n7120\n7121\n7122\n7123\n7124\n7125\n7126\n7127\n7128\n7129\n7130\n7131\n7132\n7133\n7134\n7135\n7136\n7137\n7138\n7139\n7140\n7141\n7142\n7143\n7144\n7145\n7146\n7147\n7148\n7149\n7150\n7151\n7152\n7153\n7154\n7155\n7156\n7157\n7158\n7159\n7160\n7161\n7162\n7163\n7164\n7165\n7166\n7167\n7168\n7169\n7170\n7171\n7172\n7173\n7174\n7175\n7176\n7177\n7178\n7179\n7180\n7181\n7182\n7183\n7184\n7185\n7186\n7187\n7188\n7189\n7190\n7191\n7192\n7193\n7194\n7195\n7196\n7197\n7198\n7199\n7200\n7201\n7202\n7203\n7204\n7205\n7206\n7207\n7208\n7209\n7210\n7211\n7212\n7213\n7214\n7215\n7216\n7217\n7218\n7219\n7220\n7221\n7222\n7223\n7224\n7225\n7226\n7227\n7228\n7229\n7230\n7231\n7232\n7233\n7234\n7235\n7236\n7237\n7238\n7239\n7240\n7241\n7242\n7243\n7244\n7245\n7246\n7247\n7248\n7249\n7250\n7251\n7252\n7253\n7254\n7255\n7256\n7257\n7258\n7259\n7260\n7261\n7262\n7263\n7264\n7265\n7266\n7267\n7268\n7269\n7270\n7271\n7272\n7273\n7274\n7275\n7276\n7277\n7278\n7279\n7280\n7281\n7282\n7283\n7284\n7285\n7286\n7287\n7288\n7289\n7290\n7291\n7292\n7293\n7294\n7295\n7296\n7297\n7298\n7299\n7300\n7301\n7302\n7303\n7304\n7305\n7306\n7307\n7308\n7309\n7310\n7311\n7312\n7313\n7314\n7315\n7316\n7317\n7318\n7319\n7320\n7321\n7322\n7323\n7324\n7325\n7326\n7327\n7328\n7329\n7330\n7331\n7332\n7333\n7334\n7335\n7336\n7337\n7338\n7339\n7340\n7341\n7342\n7343\n7344\n7345\n7346\n7347\n7348\n7349\n7350\n7351\n7352\n7353\n7354\n7355\n7356\n7357\n7358\n7359\n7360\n7361\n7362\n7363\n7364\n7365\n7366\n7367\n7368\n7369\n7370\n7371\n7372\n7373\n7374\n7375\n7376\n7377\n7378\n7379\n7380\n7381\n7382\n7383\n7384\n7385\n7386\n7387\n7388\n7389\n7390\n7391\n7392\n7393\n7394\n7395\n7396\n7397\n7398\n7399\n7400\n7401\n7402\n7403\n7404\n7405\n7406\n7407\n7408\n7409\n7410\n7411\n7412\n7413\n7414\n7415\n7416\n7417\n7418\n7419\n7420\n7421\n7422\n7423\n7424\n7425\n7426\n7427\n7428\n7429\n7430\n7431\n7432\n7433\n7434\n7435\n7436\n7437\n7438\n7439\n7440\n7441\n7442\n7443\n7444\n7445\n7446\n7447\n7448\n7449\n7450\n7451\n7452\n7453\n7454\n7455\n7456\n7457\n7458\n7459\n7460\n7461\n7462\n7463\n7464\n7465\n7466\n7467\n7468\n7469\n7470\n7471\n7472\n7473\n7474\n7475\n7476\n7477\n7478\n7479\n7480\n7481\n7482\n7483\n7484\n7485\n7486\n7487\n7488\n7489\n7490\n7491\n7492\n7493\n7494\n7495\n7496\n7497\n7498\n7499\n7500\n7501\n7502\n7503\n7504\n7505\n7506\n7507\n7508\n7509\n7510\n7511\n7512\n7513\n7514\n7515\n7516\n7517\n7518\n7519\n7520\n7521\n7522\n7523\n7524\n7525\n7526\n7527\n7528\n7529\n7530\n7531\n7532\n7533\n7534\n7535\n7536\n7537\n7538\n7539\n7540\n7541\n7542\n7543\n7544\n7545\n7546\n7547\n7548\n7549\n7550\n7551\n7552\n7553\n7554\n7555\n7556\n7557\n7558\n7559\n7560\n7561\n7562\n7563\n7564\n7565\n7566\n7567\n7568\n7569\n7570\n7571\n7572\n7573\n7574\n7575\n7576\n7577\n7578\n7579\n7580\n7581\n7582\n7583\n7584\n7585\n7586\n7587\n7588\n7589\n7590\n7591\n7592\n7593\n7594\n7595\n7596\n7597\n7598\n7599\n7600\n7601\n7602\n7603\n7604\n7605\n7606\n7607\n7608\n7609\n7610\n7611\n7612\n7613\n7614\n7615\n7616\n7617\n7618\n7619\n7620\n7621\n7622\n7623\n7624\n7625\n7626\n7627\n7628\n7629\n7630\n7631\n7632\n7633\n7634\n7635\n7636\n7637\n7638\n7639\n7640\n7641\n7642\n7643\n7644\n7645\n7646\n7647\n7648\n7649\n7650\n7651\n7652\n7653\n7654\n7655\n7656\n7657\n7658\n7659\n7660\n7661\n7662\n7663\n7664\n7665\n7666\n7667\n7668\n7669\n7670\n7671\n7672\n7673\n7674\n7675\n7676\n7677\n7678\n7679\n7680\n7681\n7682\n7683\n7684\n7685\n7686\n7687\n7688\n7689\n7690\n7691\n7692\n7693\n7694\n7695\n7696\n7697\n7698\n7699\n7700\n7701\n7702\n7703\n7704\n7705\n7706\n7707\n7708\n7709\n7710\n7711\n7712\n7713\n7714\n7715\n7716\n7717\n7718\n7719\n7720\n7721\n7722\n7723\n7724\n7725\n7726\n7727\n7728\n7729\n7730\n7731\n7732\n7733\n7734\n7735\n7736\n7737\n7738\n7739\n7740\n7741\n7742\n7743\n7744\n7745\n7746\n7747\n7748\n7749\n7750\n7751\n7752\n7753\n7754\n7755\n7756\n7757\n7758\n7759\n7760\n7761\n7762\n7763\n7764\n7765\n7766\n7767\n7768\n7769\n7770\n7771\n7772\n7773\n7774\n7775\n7776\n7777\n7778\n7779\n7780\n7781\n7782\n7783\n7784\n7785\n7786\n7787\n7788\n7789\n7790\n7791\n7792\n7793\n7794\n7795\n7796\n7797\n7798\n7799\n7800\n7801\n7802\n7803\n7804\n7805\n7806\n7807\n7808\n7809\n7810\n7811\n7812\n7813\n7814\n7815\n7816\n7817\n7818\n7819\n7820\n7821\n7822\n7823\n7824\n7825\n7826\n7827\n7828\n7829\n7830\n7831\n7832\n7833\n7834\n7835\n7836\n7837\n7838\n7839\n7840\n7841\n7842\n7843\n7844\n7845\n7846\n7847\n7848\n7849\n7850\n7851\n7852\n7853\n7854\n7855\n7856\n7857\n7858\n7859\n7860\n7861\n7862\n7863\n7864\n7865\n7866\n7867\n7868\n7869\n7870\n7871\n7872\n7873\n7874\n7875\n7876\n7877\n7878\n7879\n7880\n7881\n7882\n7883\n7884\n7885\n7886\n7887\n7888\n7889\n7890\n7891\n7892\n7893\n7894\n7895\n7896\n7897\n7898\n7899\n7900\n7901\n7902\n7903\n7904\n7905\n7906\n7907\n7908\n7909\n7910\n7911\n7912\n7913\n7914\n7915\n7916\n7917\n7918\n7919\n7920\n7921\n7922\n7923\n7924\n7925\n7926\n7927\n7928\n7929\n7930\n7931\n7932\n7933\n7934\n7935\n7936\n7937\n7938\n7939\n7940\n7941\n7942\n7943\n7944\n7945\n7946\n7947\n7948\n7949\n7950\n7951\n7952\n7953\n7954\n7955\n7956\n7957\n7958\n7959\n7960\n7961\n7962\n7963\n7964\n7965\n7966\n7967\n7968\n7969\n7970\n7971\n7972\n7973\n7974\n7975\n7976\n7977\n7978\n7979\n7980\n7981\n7982\n7983\n7984\n7985\n7986\n7987\n7988\n7989\n7990\n7991\n7992\n7993\n7994\n7995\n7996\n7997\n7998\n7999\n8000\n8001\n8002\n8003\n8004\n8005\n8006\n8007\n8008\n8009\n8010\n8011\n8012\n8013\n8014\n8015\n8016\n8017\n8018\n8019\n8020\n8021\n8022\n8023\n8024\n8025\n8026\n8027\n8028\n8029\n8030\n8031\n8032\n8033\n8034\n8035\n8036\n8037\n8038\n8039\n8040\n8041\n8042\n8043\n8044\n8045\n8046\n8047\n8048\n8049\n8050\n8051\n8052\n8053\n8054\n8055\n8056\n8057\n8058\n8059\n8060\n8061\n8062\n8063\n8064\n8065\n8066\n8067\n8068\n8069\n8070\n8071\n8072\n8073\n8074\n8075\n8076\n8077\n8078\n8079\n8080\n8081\n8082\n8083\n8084\n8085\n8086\n8087\n8088\n8089\n8090\n8091\n8092\n8093\n8094\n8095\n8096\n8097\n8098\n8099\n8100\n8101\n8102\n8103\n8104\n8105\n8106\n8107\n8108\n8109\n8110\n8111\n8112\n8113\n8114\n8115\n8116\n8117\n8118\n8119\n8120\n8121\n8122\n8123\n8124\n8125\n8126\n8127\n8128\n8129\n8130\n8131\n8132\n8133\n8134\n8135\n8136\n8137\n8138\n8139\n8140\n8141\n8142\n8143\n8144\n8145\n8146\n8147\n8148\n8149\n8150\n8151\n8152\n8153\n8154\n8155\n8156\n8157\n8158\n8159\n8160\n8161\n8162\n8163\n8164\n8165\n8166\n8167\n8168\n8169\n8170\n8171\n8172\n8173\n8174\n8175\n8176\n8177\n8178\n8179\n8180\n8181\n8182\n8183\n8184\n8185\n8186\n8187\n8188\n8189\n8190\n8191\n8192\n8193\n8194\n8195\n8196\n8197\n8198\n8199\n8200\n8201\n8202\n8203\n8204\n8205\n8206\n8207\n8208\n8209\n8210\n8211\n8212\n8213\n8214\n8215\n8216\n8217\n8218\n8219\n8220\n8221\n8222\n8223\n8224\n8225\n8226\n8227\n8228\n8229\n8230\n8231\n8232\n8233\n8234\n8235\n8236\n8237\n8238\n8239\n8240\n8241\n8242\n8243\n8244\n8245\n8246\n8247\n8248\n8249\n8250\n8251\n8252\n8253\n8254\n8255\n8256\n8257\n8258\n8259\n8260\n8261\n8262\n8263\n8264\n8265\n8266\n8267\n8268\n8269\n8270\n8271\n8272\n8273\n8274\n8275\n8276\n8277\n8278\n8279\n8280\n8281\n8282\n8283\n8284\n8285\n8286\n8287\n8288\n8289\n8290\n8291\n8292\n8293\n8294\n8295\n8296\n8297\n8298\n8299\n8300\n8301\n8302\n8303\n8304\n8305\n8306\n8307\n8308\n8309\n8310\n8311\n8312\n8313\n8314\n8315\n8316\n8317\n8318\n8319\n8320\n8321\n8322\n8323\n8324\n8325\n8326\n8327\n8328\n8329\n8330\n8331\n8332\n8333\n8334\n8335\n8336\n8337\n8338\n8339\n8340\n8341\n8342\n8343\n8344\n8345\n8346\n8347\n8348\n8349\n8350\n8351\n8352\n8353\n8354\n8355\n8356\n8357\n8358\n8359\n8360\n8361\n8362\n8363\n8364\n8365\n8366\n8367\n8368\n8369\n8370\n8371\n8372\n8373\n8374\n8375\n8376\n8377\n8378\n8379\n8380\n8381\n8382\n8383\n8384\n8385\n8386\n8387\n8388\n8389\n8390\n8391\n8392\n8393\n8394\n8395\n8396\n8397\n8398\n8399\n8400\n8401\n8402\n8403\n8404\n8405\n8406\n8407\n8408\n8409\n8410\n8411\n8412\n8413\n8414\n8415\n8416\n8417\n8418\n8419\n8420\n8421\n8422\n8423\n8424\n8425\n8426\n8427\n8428\n8429\n8430\n8431\n8432\n8433\n8434\n8435\n8436\n8437\n8438\n8439\n8440\n8441\n8442\n8443\n8444\n8445\n8446\n8447\n8448\n8449\n8450\n8451\n8452\n8453\n8454\n8455\n8456\n8457\n8458\n8459\n8460\n8461\n8462\n8463\n8464\n8465\n8466\n8467\n8468\n8469\n8470\n8471\n8472\n8473\n8474\n8475\n8476\n8477\n8478\n8479\n8480\n8481\n8482\n8483\n8484\n8485\n8486\n8487\n8488\n8489\n8490\n8491\n8492\n8493\n8494\n8495\n8496\n8497\n8498\n8499\n8500\n8501\n8502\n8503\n8504\n8505\n8506\n8507\n8508\n8509\n8510\n8511\n8512\n8513\n8514\n8515\n8516\n8517\n8518\n8519\n8520\n8521\n8522\n8523\n8524\n8525\n8526\n8527\n8528\n8529\n8530\n8531\n8532\n8533\n8534\n8535\n8536\n8537\n8538\n8539\n8540\n8541\n8542\n8543\n8544\n8545\n8546\n8547\n8548\n8549\n8550\n8551\n8552\n8553\n8554\n8555\n8556\n8557\n8558\n8559\n8560\n8561\n8562\n8563\n8564\n8565\n8566\n8567\n8568\n8569\n8570\n8571\n8572\n8573\n8574\n8575\n8576\n8577\n8578\n8579\n8580\n8581\n8582\n8583\n8584\n8585\n8586\n8587\n8588\n8589\n8590\n8591\n8592\n8593\n8594\n8595\n8596\n8597\n8598\n8599\n8600\n8601\n8602\n8603\n8604\n8605\n8606\n8607\n8608\n8609\n8610\n8611\n8612\n8613\n8614\n8615\n8616\n8617\n8618\n8619\n8620\n8621\n8622\n8623\n8624\n8625\n8626\n8627\n8628\n8629\n8630\n8631\n8632\n8633\n8634\n8635\n8636\n8637\n8638\n8639\n8640\n8641\n8642\n8643\n8644\n8645\n8646\n8647\n8648\n8649\n8650\n8651\n8652\n8653\n8654\n8655\n8656\n8657\n8658\n8659\n8660\n8661\n8662\n8663\n8664\n8665\n8666\n8667\n8668\n8669\n8670\n8671\n8672\n8673\n8674\n8675\n8676\n8677\n8678\n8679\n8680\n8681\n8682\n8683\n8684\n8685\n8686\n8687\n8688\n8689\n8690\n8691\n8692\n8693\n8694\n8695\n8696\n8697\n8698\n8699\n8700\n8701\n8702\n8703\n8704\n8705\n8706\n8707\n8708\n8709\n8710\n8711\n8712\n8713\n8714\n8715\n8716\n8717\n8718\n8719\n8720\n8721\n8722\n8723\n8724\n8725\n8726\n8727\n8728\n8729\n8730\n8731\n8732\n8733\n8734\n8735\n8736\n8737\n8738\n8739\n8740\n8741\n8742\n8743\n8744\n8745\n8746\n8747\n8748\n8749\n8750\n8751\n8752\n8753\n8754\n8755\n8756\n8757\n8758\n8759\n8760\n8761\n8762\n8763\n8764\n8765\n8766\n8767\n8768\n8769\n8770\n8771\n8772\n8773\n8774\n8775\n8776\n8777\n8778\n8779\n8780\n8781\n8782\n8783\n8784\n8785\n8786\n8787\n8788\n8789\n8790\n8791\n8792\n8793\n8794\n8795\n8796\n8797\n8798\n8799\n8800\n8801\n8802\n8803\n8804\n8805\n8806\n8807\n8808\n8809\n8810\n8811\n8812\n8813\n8814\n8815\n8816\n8817\n8818\n8819\n8820\n8821\n8822\n8823\n8824\n8825\n8826\n8827\n8828\n8829\n8830\n8831\n8832\n8833\n8834\n8835\n8836\n8837\n8838\n8839\n8840\n8841\n8842\n8843\n8844\n8845\n8846\n8847\n8848\n8849\n8850\n8851\n8852\n8853\n8854\n8855\n8856\n8857\n8858\n8859\n8860\n8861\n8862\n8863\n8864\n8865\n8866\n8867\n8868\n8869\n8870\n8871\n8872\n8873\n8874\n8875\n8876\n8877\n8878\n8879\n8880\n8881\n8882\n8883\n8884\n8885\n8886\n8887\n8888\n8889\n8890\n8891\n8892\n8893\n8894\n8895\n8896\n8897\n8898\n8899\n8900\n8901\n8902\n8903\n8904\n8905\n8906\n8907\n8908\n8909\n8910\n8911\n8912\n8913\n8914\n8915\n8916\n8917\n8918\n8919\n8920\n8921\n8922\n8923\n8924\n8925\n8926\n8927\n8928\n8929\n8930\n8931\n8932\n8933\n8934\n8935\n8936\n8937\n8938\n8939\n8940\n8941\n8942\n8943\n8944\n8945\n8946\n8947\n8948\n8949\n8950\n8951\n8952\n8953\n8954\n8955\n8956\n8957\n8958\n8959\n8960\n8961\n8962\n8963\n8964\n8965\n8966\n8967\n8968\n8969\n8970\n8971\n8972\n8973\n8974\n8975\n8976\n8977\n8978\n8979\n8980\n8981\n8982\n8983\n8984\n8985\n8986\n8987\n8988\n8989\n8990\n8991\n8992\n8993\n8994\n8995\n8996\n8997\n8998\n8999\n9000\n9001\n9002\n9003\n9004\n9005\n9006\n9007\n9008\n9009\n9010\n9011\n9012\n9013\n9014\n9015\n9016\n9017\n9018\n9019\n9020\n9021\n9022\n9023\n9024\n9025\n9026\n9027\n9028\n9029\n9030\n9031\n9032\n9033\n9034\n9035\n9036\n9037\n9038\n9039\n9040\n9041\n9042\n9043\n9044\n9045\n9046\n9047\n9048\n9049\n9050\n9051\n9052\n9053\n9054\n9055\n9056\n9057\n9058\n9059\n9060\n9061\n9062\n9063\n9064\n9065\n9066\n9067\n9068\n9069\n9070\n9071\n9072\n9073\n9074\n9075\n9076\n9077\n9078\n9079\n9080\n9081\n9082\n9083\n9084\n9085\n9086\n9087\n9088\n9089\n9090\n9091\n9092\n9093\n9094\n9095\n9096\n9097\n9098\n9099\n9100\n9101\n9102\n9103\n9104\n9105\n9106\n9107\n9108\n9109\n9110\n9111\n9112\n9113\n9114\n9115\n9116\n9117\n9118\n9119\n9120\n9121\n9122\n9123\n9124\n9125\n9126\n9127\n9128\n9129\n9130\n9131\n9132\n9133\n9134\n9135\n9136\n9137\n9138\n9139\n9140\n9141\n9142\n9143\n9144\n9145\n9146\n9147\n9148\n9149\n9150\n9151\n9152\n9153\n9154\n9155\n9156\n9157\n9158\n9159\n9160\n9161\n9162\n9163\n9164\n9165\n9166\n9167\n9168\n9169\n9170\n9171\n9172\n9173\n9174\n9175\n9176\n9177\n9178\n9179\n9180\n9181\n9182\n9183\n9184\n9185\n9186\n9187\n9188\n9189\n9190\n9191\n9192\n9193\n9194\n9195\n9196\n9197\n9198\n9199\n9200\n9201\n9202\n9203\n9204\n9205\n9206\n9207\n9208\n9209\n9210\n9211\n9212\n9213\n9214\n9215\n9216\n9217\n9218\n9219\n9220\n9221\n9222\n9223\n9224\n9225\n9226\n9227\n9228\n9229\n9230\n9231\n9232\n9233\n9234\n9235\n9236\n9237\n9238\n9239\n9240\n9241\n9242\n9243\n9244\n9245\n9246\n9247\n9248\n9249\n9250\n9251\n9252\n9253\n9254\n9255\n9256\n9257\n9258\n9259\n9260\n9261\n9262\n9263\n9264\n9265\n9266\n9267\n9268\n9269\n9270\n9271\n9272\n9273\n9274\n9275\n9276\n9277\n9278\n9279\n9280\n9281\n9282\n9283\n9284\n9285\n9286\n9287\n9288\n9289\n9290\n9291\n9292\n9293\n9294\n9295\n9296\n9297\n9298\n9299\n9300\n9301\n9302\n9303\n9304\n9305\n9306\n9307\n9308\n9309\n9310\n9311\n9312\n9313\n9314\n9315\n9316\n9317\n9318\n9319\n9320\n9321\n9322\n9323\n9324\n9325\n9326\n9327\n9328\n9329\n9330\n9331\n9332\n9333\n9334\n9335\n9336\n9337\n9338\n9339\n9340\n9341\n9342\n9343\n9344\n9345\n9346\n9347\n9348\n9349\n9350\n9351\n9352\n9353\n9354\n9355\n9356\n9357\n9358\n9359\n9360\n9361\n9362\n9363\n9364\n9365\n9366\n9367\n9368\n9369\n9370\n9371\n9372\n9373\n9374\n9375\n9376\n9377\n9378\n9379\n9380\n9381\n9382\n9383\n9384\n9385\n9386\n9387\n9388\n9389\n9390\n9391\n9392\n9393\n9394\n9395\n9396\n9397\n9398\n9399\n9400\n9401\n9402\n9403\n9404\n9405\n9406\n9407\n9408\n9409\n9410\n9411\n9412\n9413\n9414\n9415\n9416\n9417\n9418\n9419\n9420\n9421\n9422\n9423\n9424\n9425\n9426\n9427\n9428\n9429\n9430\n9431\n9432\n9433\n9434\n9435\n9436\n9437\n9438\n9439\n9440\n9441\n9442\n9443\n9444\n9445\n9446\n9447\n9448\n9449\n9450\n9451\n9452\n9453\n9454\n9455\n9456\n9457\n9458\n9459\n9460\n9461\n9462\n9463\n9464\n9465\n9466\n9467\n9468\n9469\n9470\n9471\n9472\n9473\n9474\n9475\n9476\n9477\n9478\n9479\n9480\n9481\n9482\n9483\n9484\n9485\n9486\n9487\n9488\n9489\n9490\n9491\n9492\n9493\n9494\n9495\n9496\n9497\n9498\n9499\n9500\n9501\n9502\n9503\n9504\n9505\n9506\n9507\n9508\n9509\n9510\n9511\n9512\n9513\n9514\n9515\n9516\n9517\n9518\n9519\n9520\n9521\n9522\n9523\n9524\n9525\n9526\n9527\n9528\n9529\n9530\n9531\n9532\n9533\n9534\n9535\n9536\n9537\n9538\n9539\n9540\n9541\n9542\n9543\n9544\n9545\n9546\n9547\n9548\n9549\n9550\n9551\n9552\n9553\n9554\n9555\n9556\n9557\n9558\n9559\n9560\n9561\n9562\n9563\n9564\n9565\n9566\n9567\n9568\n9569\n9570\n9571\n9572\n9573\n9574\n9575\n9576\n9577\n9578\n9579\n9580\n9581\n9582\n9583\n9584\n9585\n9586\n9587\n9588\n9589\n9590\n9591\n9592\n9593\n9594\n9595\n9596\n9597\n9598\n9599\n9600\n9601\n9602\n9603\n9604\n9605\n9606\n9607\n9608\n9609\n9610\n9611\n9612\n9613\n9614\n9615\n9616\n9617\n9618\n9619\n9620\n9621\n9622\n9623\n9624\n9625\n9626\n9627\n9628\n9629\n9630\n9631\n9632\n9633\n9634\n9635\n9636\n9637\n9638\n9639\n9640\n9641\n9642\n9643\n9644\n9645\n9646\n9647\n9648\n9649\n9650\n9651\n9652\n9653\n9654\n9655\n9656\n9657\n9658\n9659\n9660\n9661\n9662\n9663\n9664\n9665\n9666\n9667\n9668\n9669\n9670\n9671\n9672\n9673\n9674\n9675\n9676\n9677\n9678\n9679\n9680\n9681\n9682\n9683\n9684\n9685\n9686\n9687\n9688\n9689\n9690\n9691\n9692\n9693\n9694\n9695\n9696\n9697\n9698\n9699\n9700\n9701\n9702\n9703\n9704\n9705\n9706\n9707\n9708\n9709\n9710\n9711\n9712\n9713\n9714\n9715\n9716\n9717\n9718\n9719\n9720\n9721\n9722\n9723\n9724\n9725\n9726\n9727\n9728\n9729\n9730\n9731\n9732\n9733\n9734\n9735\n9736\n9737\n9738\n9739\n9740\n9741\n9742\n9743\n9744\n9745\n9746\n9747\n9748\n9749\n9750\n9751\n9752\n9753\n9754\n9755\n9756\n9757\n9758\n9759\n9760\n9761\n9762\n9763\n9764\n9765\n9766\n9767\n9768\n9769\n9770\n9771\n9772\n9773\n9774\n9775\n9776\n9777\n9778\n9779\n9780\n9781\n9782\n9783\n9784\n9785\n9786\n9787\n9788\n9789\n9790\n9791\n9792\n9793\n9794\n9795\n9796\n9797\n9798\n9799\n9800\n9801\n9802\n9803\n9804\n9805\n9806\n9807\n9808\n9809\n9810\n9811\n9812\n9813\n9814\n9815\n9816\n9817\n9818\n9819\n9820\n9821\n9822\n9823\n9824\n9825\n9826\n9827\n9828\n9829\n9830\n9831\n9832\n9833\n9834\n9835\n9836\n9837\n9838\n9839\n9840\n9841\n9842\n9843\n9844\n9845\n9846\n9847\n9848\n9849\n9850\n9851\n9852\n9853\n9854\n9855\n9856\n9857\n9858\n9859\n9860\n9861\n9862\n9863\n9864\n9865\n9866\n9867\n9868\n9869\n9870\n9871\n9872\n9873\n9874\n9875\n9876\n9877\n9878\n9879\n9880\n9881\n9882\n9883\n9884\n9885\n9886\n9887\n9888\n9889\n9890\n9891\n9892\n9893\n9894\n9895\n9896\n9897\n9898\n9899\n9900\n9901\n9902\n9903\n9904\n9905\n9906\n9907\n9908\n9909\n9910\n9911\n9912\n9913\n9914\n9915\n9916\n9917\n9918\n9919\n9920\n9921\n9922\n9923\n9924\n9925\n9926\n9927\n9928\n9929\n9930\n9931\n9932\n9933\n9934\n9935\n9936\n9937\n9938\n9939\n9940\n9941\n9942\n9943\n9944\n9945\n9946\n9947\n9948\n9949\n9950\n9951\n9952\n9953\n9954\n9955\n9956\n9957\n9958\n9959\n9960\n9961\n9962\n9963\n9964\n9965\n9966\n9967\n9968\n9969\n9970\n9971\n9972\n9973\n9974\n9975\n9976\n9977\n9978\n9979\n9980\n9981\n9982\n9983\n9984\n9985\n9986\n9987\n9988\n9989\n9990\n9991\n9992\n9993\n9994\n9995\n9996\n9997\n9998\n9999\n10000\n10001\n10002\n10003\n10004\n10005\n10006\n10007\n10008\n10009\n10010\n10011\n10012\n10013\n10014\n10015\n10016\n10017\n10018\n10019\n10020\n10021\n10022\n10023\n10024\n10025\n10026\n10027\n10028\n10029\n10030\n10031\n10032\n10033\n10034\n10035\n10036\n10037\n10038\n10039\n10040\n10041\n10042\n10043\n10044\n10045\n10046\n10047\n10048\n10049\n10050\n10051\n10052\n10053\n10054\n10055\n10056\n10057\n10058\n10059\n10060\n10061\n10062\n10063\n10064\n10065\n10066\n10067\n10068\n10069\n10070\n10071\n10072\n10073\n10074\n10075\n10076\n10077\n10078\n10079\n10080\n10081\n10082\n10083\n10084\n10085\n10086\n10087\n10088\n10089\n10090\n10091\n10092\n10093\n10094\n10095\n10096\n10097\n10098\n10099\n10100\n10101\n10102\n10103\n10104\n10105\n10106\n10107\n10108\n10109\n10110\n10111\n10112\n10113\n10114\n10115\n10116\n10117\n10118\n10119\n10120\n10121\n10122\n10123\n10124\n10125\n10126\n10127\n10128\n10129\n10130\n10131\n10132\n10133\n10134\n10135\n10136\n10137\n10138\n10139\n10140\n10141\n10142\n10143\n10144\n10145\n10146\n10147\n10148\n10149\n10150\n10151\n10152\n10153\n10154\n10155\n10156\n10157\n10158\n10159\n10160\n10161\n10162\n10163\n10164\n10165\n10166\n10167\n10168\n10169\n10170\n10171\n10172\n10173\n10174\n10175\n10176\n10177\n10178\n10179\n10180\n10181\n10182\n10183\n10184\n10185\n10186\n10187\n10188\n10189\n10190\n10191\n10192\n10193\n10194\n10195\n10196\n10197\n10198\n10199\n10200\n10201\n10202\n10203\n10204\n10205\n10206\n10207\n10208\n10209\n10210\n10211\n10212\n10213\n10214\n10215\n10216\n10217\n10218\n10219\n10220\n10221\n10222\n10223\n10224\n10225\n10226\n10227\n10228\n10229\n10230\n10231\n10232\n10233\n10234\n10235\n10236\n10237\n10238\n10239\n10240\n10241\n10242\n10243\n10244\n10245\n10246\n10247\n10248\n10249\n10250\n10251\n10252\n10253\n10254\n10255\n10256\n10257\n10258\n10259\n10260\n10261\n10262\n10263\n10264\n10265\n10266\n10267\n10268\n10269\n10270\n10271\n10272\n10273\n10274\n10275\n10276\n10277\n10278\n10279\n10280\n10281\n10282\n10283\n10284\n10285\n10286\n10287\n10288\n10289\n10290\n10291\n10292\n10293\n10294\n10295\n10296\n10297\n10298\n10299\n10300\n10301\n10302\n10303\n10304\n10305\n10306\n10307\n10308\n10309\n10310\n10311\n10312\n10313\n10314\n10315\n10316\n10317\n10318\n10319\n10320\n10321\n10322\n10323\n10324\n10325\n10326\n10327\n10328\n10329\n10330\n10331\n10332\n10333\n10334\n10335\n10336\n10337\n10338\n10339\n10340\n10341\n10342\n10343\n10344\n10345\n10346\n10347\n10348\n10349\n10350\n10351\n10352\n10353\n10354\n10355\n10356\n10357\n10358\n10359\n10360\n10361\n10362\n10363\n10364\n10365\n10366\n10367\n10368\n10369\n10370\n10371\n10372\n10373\n10374\n10375\n10376\n10377\n10378\n10379\n10380\n10381\n10382\n10383\n10384\n10385\n10386\n10387\n10388\n10389\n10390\n10391\n10392\n10393\n10394\n10395\n10396\n10397\n10398\n10399\n10400\n10401\n10402\n10403\n10404\n10405\n10406\n10407\n10408\n10409\n10410\n10411\n10412\n10413\n10414\n10415\n10416\n10417\n10418\n10419\n10420\n10421\n10422\n10423\n10424\n10425\n10426\n10427\n10428\n10429\n10430\n10431\n10432\n10433\n10434\n10435\n10436\n10437\n10438\n10439\n10440\n10441\n10442\n10443\n10444\n10445\n10446\n10447\n10448\n10449\n10450\n10451\n10452\n10453\n10454\n10455\n10456\n10457\n10458\n10459\n10460\n10461\n10462\n10463\n10464\n10465\n10466\n10467\n10468\n10469\n10470\n10471\n10472\n10473\n10474\n10475\n10476\n10477\n10478\n10479\n10480\n10481\n10482\n10483\n10484\n10485\n10486\n10487\n10488\n10489\n10490\n10491\n10492\n10493\n10494\n10495\n10496\n10497\n10498\n10499\n10500\n10501\n10502\n10503\n10504\n10505\n10506\n10507\n10508\n10509\n10510\n10511\n10512\n10513\n10514\n10515\n10516\n10517\n10518\n10519\n10520\n10521\n10522\n10523\n10524\n10525\n10526\n10527\n10528\n10529\n10530\n10531\n10532\n10533\n10534\n10535\n10536\n10537\n10538\n10539\n10540\n10541\n10542\n10543\n10544\n10545\n10546\n10547\n10548\n10549\n10550\n10551\n10552\n10553\n10554\n10555\n10556\n10557\n10558\n10559\n10560\n10561\n10562\n10563\n10564\n10565\n10566\n10567\n10568\n10569\n10570\n10571\n10572\n10573\n10574\n10575\n10576\n10577\n10578\n10579\n10580\n10581\n10582\n10583\n10584\n10585\n10586\n10587\n10588\n10589\n10590\n10591\n10592\n10593\n10594\n10595\n10596\n10597\n10598\n10599\n10600\n10601\n10602\n10603\n10604\n10605\n10606\n10607\n10608\n10609\n10610\n10611\n10612\n10613\n10614\n10615\n10616\n10617\n10618\n10619\n10620\n10621\n10622\n10623\n10624\n10625\n10626\n10627\n10628\n10629\n10630\n10631\n10632\n10633\n10634\n10635\n10636\n10637\n10638\n10639\n10640\n10641\n10642\n10643\n10644\n10645\n10646\n10647\n10648\n10649\n10650\n10651\n10652\n10653\n10654\n10655\n10656\n10657\n10658\n10659\n10660\n10661\n10662\n10663\n10664\n10665\n10666\n10667\n10668\n10669\n10670\n10671\n10672\n10673\n10674\n10675\n10676\n10677\n10678\n10679\n10680\n10681\n10682\n10683\n10684\n10685\n10686\n10687\n10688\n10689\n10690\n10691\n10692\n10693\n10694\n10695\n10696\n10697\n10698\n10699\n10700\n10701\n10702\n10703\n10704\n10705\n10706\n10707\n10708\n10709\n10710\n10711\n10712\n10713\n10714\n10715\n10716\n10717\n10718\n10719\n10720\n10721\n10722\n10723\n10724\n10725\n10726\n10727\n10728\n10729\n10730\n10731\n10732\n10733\n10734\n10735\n10736\n10737\n10738\n10739\n10740\n10741\n10742\n10743\n10744\n10745\n10746\n10747\n10748\n10749\n10750\n10751\n10752\n10753\n10754\n10755\n10756\n10757\n10758\n10759\n10760\n10761\n10762\n10763\n10764\n10765\n10766\n10767\n10768\n10769\n10770\n10771\n10772\n10773\n10774\n10775\n10776\n10777\n10778\n10779\n10780\n10781\n10782\n10783\n10784\n10785\n10786\n10787\n10788\n10789\n10790\n10791\n10792\n10793\n10794\n10795\n10796\n10797\n10798\n10799\n10800\n10801\n10802\n10803\n10804\n10805\n10806\n10807\n10808\n10809\n10810\n10811\n10812\n10813\n10814\n10815\n10816\n10817\n10818\n10819\n10820\n10821\n10822\n10823\n10824\n10825\n10826\n10827\n10828\n10829\n10830\n10831\n10832\n10833\n10834\n10835\n10836\n10837\n10838\n10839\n10840\n10841\n10842\n10843\n10844\n10845\n10846\n10847\n10848\n10849\n10850\n10851\n10852\n10853\n10854\n10855\n10856\n10857\n10858\n10859\n10860\n10861\n10862\n10863\n10864\n10865\n10866\n10867\n10868\n10869\n10870\n10871\n10872\n10873\n10874\n10875\n10876\n10877\n10878\n10879\n10880\n10881\n10882\n10883\n10884\n10885\n10886\n10887\n10888\n10889\n10890\n10891\n10892\n10893\n10894\n10895\n10896\n10897\n10898\n10899\n10900\n10901\n10902\n10903\n10904\n10905\n10906\n10907\n10908\n10909\n10910\n10911\n10912\n10913\n10914\n10915\n10916\n10917\n10918\n10919\n10920\n10921\n10922\n10923\n10924\n10925\n10926\n10927\n10928\n10929\n10930\n10931\n10932\n10933\n10934\n10935\n10936\n10937\n10938\n10939\n10940\n10941\n10942\n10943\n10944\n10945\n10946\n10947\n10948\n10949\n10950\n10951\n10952\n10953\n10954\n10955\n10956\n10957\n10958\n10959\n10960\n10961\n10962\n10963\n10964\n10965\n10966\n10967\n10968\n10969\n10970\n10971\n10972\n10973\n10974\n10975\n10976\n10977\n10978\n10979\n10980\n10981\n10982\n10983\n10984\n10985\n10986\n10987\n10988\n10989\n10990\n10991\n10992\n10993\n10994\n10995\n10996\n10997\n10998\n10999\n11000\n11001\n11002\n11003\n11004\n11005\n11006\n11007\n11008\n11009\n11010\n11011\n11012\n11013\n11014\n11015\n11016\n11017\n11018\n11019\n11020\n11021\n11022\n11023\n11024\n11025\n11026\n11027\n11028\n11029\n11030\n11031\n11032\n11033\n11034\n11035\n11036\n11037\n11038\n11039\n11040\n11041\n11042\n11043\n11044\n11045\n11046\n11047\n11048\n11049\n11050\n11051\n11052\n11053\n11054\n11055\n11056\n11057\n11058\n11059\n11060\n11061\n11062\n11063\n11064\n11065\n11066\n11067\n11068\n11069\n11070\n11071\n11072\n11073\n11074\n11075\n11076\n11077\n11078\n11079\n11080\n11081\n11082\n11083\n11084\n11085\n11086\n11087\n11088\n11089\n11090\n11091\n11092\n11093\n11094\n11095\n11096\n11097\n11098\n11099\n11100\n11101\n11102\n11103\n11104\n11105\n11106\n11107\n11108\n11109\n11110\n11111\n11112\n11113\n11114\n11115\n11116\n11117\n11118\n11119\n11120\n11121\n11122\n11123\n11124\n11125\n11126\n11127\n11128\n11129\n11130\n11131\n11132\n11133\n11134\n11135\n11136\n11137\n11138\n11139\n11140\n11141\n11142\n11143\n11144\n11145\n11146\n11147\n11148\n11149\n11150\n11151\n11152\n11153\n11154\n11155\n11156\n11157\n11158\n11159\n11160\n11161\n11162\n11163\n11164\n11165\n11166\n11167\n11168\n11169\n11170\n11171\n11172\n11173\n11174\n11175\n11176\n11177\n11178\n11179\n11180\n11181\n11182\n11183\n11184\n11185\n11186\n11187\n11188\n11189\n11190\n11191\n11192\n11193\n11194\n11195\n11196\n11197\n11198\n11199\n11200\n11201\n11202\n11203\n11204\n11205\n11206\n11207\n11208\n11209\n11210\n11211\n11212\n11213\n11214\n11215\n11216\n11217\n11218\n11219\n11220\n11221\n11222\n11223\n11224\n11225\n11226\n11227\n11228\n11229\n11230\n11231\n11232\n11233\n11234\n11235\n11236\n11237\n11238\n11239\n11240\n11241\n11242\n11243\n11244\n11245\n11246\n11247\n11248\n11249\n11250\n11251\n11252\n11253\n11254\n11255\n11256\n11257\n11258\n11259\n11260\n11261\n11262\n11263\n11264\n11265\n11266\n11267\n11268\n11269\n11270\n11271\n11272\n11273\n11274\n11275\n11276\n11277\n11278\n11279\n11280\n11281\n11282\n11283\n11284\n11285\n11286\n11287\n11288\n11289\n11290\n11291\n11292\n11293\n11294\n11295\n11296\n11297\n11298\n11299\n11300\n11301\n11302\n11303\n11304\n11305\n11306\n11307\n11308\n11309\n11310\n11311\n11312\n11313\n11314\n11315\n11316\n11317\n11318\n11319\n11320\n11321\n11322\n11323\n11324\n11325\n11326\n11327\n11328\n11329\n11330\n11331\n11332\n11333\n11334\n11335\n11336\n11337\n11338\n11339\n11340\n11341\n11342\n11343\n11344\n11345\n11346\n11347\n11348\n11349\n11350\n11351\n11352\n11353\n11354\n11355\n11356\n11357\n11358\n11359\n11360\n11361\n11362\n11363\n11364\n11365\n11366\n11367\n11368\n11369\n11370\n11371\n11372\n11373\n11374\n11375\n11376\n11377\n11378\n11379\n11380\n11381\n11382\n11383\n11384\n11385\n11386\n11387\n11388\n11389\n11390\n11391\n11392\n11393\n11394\n11395\n11396\n11397\n11398\n11399\n11400\n11401\n11402\n11403\n11404\n11405\n11406\n11407\n11408\n11409\n11410\n11411\n11412\n11413\n11414\n11415\n11416\n11417\n11418\n11419\n11420\n11421\n11422\n11423\n11424\n11425\n11426\n11427\n11428\n11429\n11430\n11431\n11432\n11433\n11434\n11435\n11436\n11437\n11438\n11439\n11440\n11441\n11442\n11443\n11444\n11445\n11446\n11447\n11448\n11449\n11450\n11451\n11452\n11453\n11454\n11455\n11456\n11457\n11458\n11459\n11460\n11461\n11462\n11463\n11464\n11465\n11466\n11467\n11468\n11469\n11470\n11471\n11472\n11473\n11474\n11475\n11476\n11477\n11478\n11479\n11480\n11481\n11482\n11483\n11484\n11485\n11486\n11487\n11488\n11489\n11490\n11491\n11492\n11493\n11494\n11495\n11496\n11497\n11498\n11499\n11500\n11501\n11502\n11503\n11504\n11505\n11506\n11507\n11508\n11509\n11510\n11511\n11512\n11513\n11514\n11515\n11516\n11517\n11518\n11519\n11520\n11521\n11522\n11523\n11524\n11525\n11526\n11527\n11528\n11529\n11530\n11531\n11532\n11533\n11534\n11535\n11536\n11537\n11538\n11539\n11540\n11541\n11542\n11543\n11544\n11545\n11546\n11547\n11548\n11549\n11550\n11551\n11552\n11553\n11554\n11555\n11556\n11557\n11558\n11559\n11560\n11561\n11562\n11563\n11564\n11565\n11566\n11567\n11568\n11569\n11570\n11571\n11572\n11573\n11574\n11575\n11576\n11577\n11578\n11579\n11580\n11581\n11582\n11583\n11584\n11585\n11586\n11587\n11588\n11589\n11590\n11591\n11592\n11593\n11594\n11595\n11596\n11597\n11598\n11599\n11600\n11601\n11602\n11603\n11604\n11605\n11606\n11607\n11608\n11609\n11610\n11611\n11612\n11613\n11614\n11615\n11616\n11617\n11618\n11619\n11620\n11621\n11622\n11623\n11624\n11625\n11626\n11627\n11628\n11629\n11630\n11631\n11632\n11633\n11634\n11635\n11636\n11637\n11638\n11639\n11640\n11641\n11642\n11643\n11644\n11645\n11646\n11647\n11648\n11649\n11650\n11651\n11652\n11653\n11654\n11655\n11656\n11657\n11658\n11659\n11660\n11661\n11662\n11663\n11664\n11665\n11666\n11667\n11668\n11669\n11670\n11671\n11672\n11673\n11674\n11675\n11676\n11677\n11678\n11679\n11680\n11681\n11682\n11683\n11684\n11685\n11686\n11687\n11688\n11689\n11690\n11691\n11692\n11693\n11694\n11695\n11696\n11697\n11698\n11699\n11700\n11701\n11702\n11703\n11704\n11705\n11706\n11707\n11708\n11709\n11710\n11711\n11712\n11713\n11714\n11715\n11716\n11717\n11718\n11719\n11720\n11721\n11722\n11723\n11724\n11725\n11726\n11727\n11728\n11729\n11730\n11731\n11732\n11733\n11734\n11735\n11736\n11737\n11738\n11739\n11740\n11741\n11742\n11743\n11744\n11745\n11746\n11747\n11748\n11749\n11750\n11751\n11752\n11753\n11754\n11755\n11756\n11757\n11758\n11759\n11760\n11761\n11762\n11763\n11764\n11765\n11766\n11767\n11768\n11769\n11770\n11771\n11772\n11773\n11774\n11775\n11776\n11777\n11778\n11779\n11780\n11781\n11782\n11783\n11784\n11785\n11786\n11787\n11788\n11789\n11790\n11791\n11792\n11793\n11794\n11795\n11796\n11797\n11798\n11799\n11800\n11801\n11802\n11803\n11804\n11805\n11806\n11807\n11808\n11809\n11810\n11811\n11812\n11813\n11814\n11815\n11816\n11817\n11818\n11819\n11820\n11821\n11822\n11823\n11824\n11825\n11826\n11827\n11828\n11829\n11830\n11831\n11832\n11833\n11834\n11835\n11836\n11837\n11838\n11839\n11840\n11841\n11842\n11843\n11844\n11845\n11846\n11847\n11848\n11849\n11850\n11851\n11852\n11853\n11854\n11855\n11856\n11857\n11858\n11859\n11860\n11861\n11862\n11863\n11864\n11865\n11866\n11867\n11868\n11869\n11870\n11871\n11872\n11873\n11874\n11875\n11876\n11877\n11878\n11879\n11880\n11881\n11882\n11883\n11884\n11885\n11886\n11887\n11888\n11889\n11890\n11891\n11892\n11893\n11894\n11895\n11896\n11897\n11898\n11899\n11900\n11901\n11902\n11903\n11904\n11905\n11906\n11907\n11908\n11909\n11910\n11911\n11912\n11913\n11914\n11915\n11916\n11917\n11918\n11919\n11920\n11921\n11922\n11923\n11924\n11925\n11926\n11927\n11928\n11929\n11930\n11931\n11932\n11933\n11934\n11935\n11936\n11937\n11938\n11939\n11940\n11941\n11942\n11943\n11944\n11945\n11946\n11947\n11948\n11949\n11950\n11951\n11952\n11953\n11954\n11955\n11956\n11957\n11958\n11959\n11960\n11961\n11962\n11963\n11964\n11965\n11966\n11967\n11968\n11969\n11970\n11971\n11972\n11973\n11974\n11975\n11976\n11977\n11978\n11979\n11980\n11981\n11982\n11983\n11984\n11985\n11986\n11987\n11988\n11989\n11990\n11991\n11992\n11993\n11994\n11995\n11996\n11997\n11998\n11999\n12000\n12001\n12002\n12003\n12004\n12005\n12006\n12007\n12008\n12009\n12010\n12011\n12012\n12013\n12014\n12015\n12016\n12017\n12018\n12019\n12020\n12021\n12022\n12023\n12024\n12025\n12026\n12027\n12028\n12029\n12030\n12031\n12032\n12033\n12034\n12035\n12036\n12037\n12038\n12039\n12040\n12041\n12042\n12043\n12044\n12045\n12046\n12047\n12048\n12049\n12050\n12051\n12052\n12053\n12054\n12055\n12056\n12057\n12058\n12059\n12060\n12061\n12062\n12063\n12064\n12065\n12066\n12067\n12068\n12069\n12070\n12071\n12072\n12073\n12074\n12075\n12076\n12077\n12078\n12079\n12080\n12081\n12082\n12083\n12084\n12085\n12086\n12087\n12088\n12089\n12090\n12091\n12092\n12093\n12094\n12095\n12096\n12097\n12098\n12099\n12100\n12101\n12102\n12103\n12104\n12105\n12106\n12107\n12108\n12109\n12110\n12111\n12112\n12113\n12114\n12115\n12116\n12117\n12118\n12119\n12120\n12121\n12122\n12123\n12124\n12125\n12126\n12127\n12128\n12129\n12130\n12131\n12132\n12133\n12134\n12135\n12136\n12137\n12138\n12139\n12140\n12141\n12142\n12143\n12144\n12145\n12146\n12147\n12148\n12149\n12150\n12151\n12152\n12153\n12154\n12155\n12156\n12157\n12158\n12159\n12160\n12161\n12162\n12163\n12164\n12165\n12166\n12167\n12168\n12169\n12170\n12171\n12172\n12173\n12174\n12175\n12176\n12177\n12178\n12179\n12180\n12181\n12182\n12183\n12184\n12185\n12186\n12187\n12188\n12189\n12190\n12191\n12192\n12193\n12194\n12195\n12196\n12197\n12198\n12199\n12200\n12201\n12202\n12203\n12204\n12205\n12206\n12207\n12208\n12209\n12210\n12211\n12212\n12213\n12214\n12215\n12216\n12217\n12218\n12219\n12220\n12221\n12222\n12223\n12224\n12225\n12226\n12227\n12228\n12229\n12230\n12231\n12232\n12233\n12234\n12235\n12236\n12237\n12238\n12239\n12240\n12241\n12242\n12243\n12244\n12245\n12246\n12247\n12248\n12249\n12250\n12251\n12252\n12253\n12254\n12255\n12256\n12257\n12258\n12259\n12260\n12261\n12262\n12263\n12264\n12265\n12266\n12267\n12268\n12269\n12270\n12271\n12272\n12273\n12274\n12275\n12276\n12277\n12278\n12279\n12280\n12281\n12282\n12283\n12284\n12285\n12286\n12287\n12288\n12289\n12290\n12291\n12292\n12293\n12294\n12295\n12296\n12297\n12298\n12299\n12300\n12301\n12302\n12303\n12304\n12305\n12306\n12307\n12308\n12309\n12310\n12311\n12312\n12313\n12314\n12315\n12316\n12317\n12318\n12319\n12320\n12321\n12322\n12323\n12324\n12325\n12326\n12327\n12328\n12329\n12330\n12331\n12332\n12333\n12334\n12335\n12336\n12337\n12338\n12339\n12340\n12341\n12342\n12343\n12344\n12345\n12346\n12347\n12348\n12349\n12350\n12351\n12352\n12353\n12354\n12355\n12356\n12357\n12358\n12359\n12360\n12361\n12362\n12363\n12364\n12365\n12366\n12367\n12368\n12369\n12370\n12371\n12372\n12373\n12374\n12375\n12376\n12377\n12378\n12379\n12380\n12381\n12382\n12383\n12384\n12385\n12386\n12387\n12388\n12389\n12390\n12391\n12392\n12393\n12394\n12395\n12396\n12397\n12398\n12399\n12400\n12401\n12402\n12403\n12404\n12405\n12406\n12407\n12408\n12409\n12410\n12411\n12412\n12413\n12414\n12415\n12416\n12417\n12418\n12419\n12420\n12421\n12422\n12423\n12424\n12425\n12426\n12427\n12428\n12429\n12430\n12431\n12432\n12433\n12434\n12435\n12436\n12437\n12438\n12439\n12440\n12441\n12442\n12443\n12444\n12445\n12446\n12447\n12448\n12449\n12450\n12451\n12452\n12453\n12454\n12455\n12456\n12457\n12458\n12459\n12460\n12461\n12462\n12463\n12464\n12465\n12466\n12467\n12468\n12469\n12470\n12471\n12472\n12473\n12474\n12475\n12476\n12477\n12478\n12479\n12480\n12481\n12482\n12483\n12484\n12485\n12486\n12487\n12488\n12489\n12490\n12491\n12492\n12493\n12494\n12495\n12496\n12497\n12498\n12499\n12500\n12501\n12502\n12503\n12504\n12505\n12506\n12507\n12508\n12509\n12510\n12511\n12512\n12513\n12514\n12515\n12516\n12517\n12518\n12519\n12520\n12521\n12522\n12523\n12524\n12525\n12526\n12527\n12528\n12529\n12530\n12531\n12532\n12533\n12534\n12535\n12536\n12537\n12538\n12539\n12540\n12541\n12542\n12543\n12544\n12545\n12546\n12547\n12548\n12549\n12550\n12551\n12552\n12553\n12554\n12555\n12556\n12557\n12558\n12559\n12560\n12561\n12562\n12563\n12564\n12565\n12566\n12567\n12568\n12569\n12570\n12571\n12572\n12573\n12574\n12575\n12576\n12577\n12578\n12579\n12580\n12581\n12582\n12583\n12584\n12585\n12586\n12587\n12588\n12589\n12590\n12591\n12592\n12593\n12594\n12595\n12596\n12597\n12598\n12599\n12600\n12601\n12602\n12603\n12604\n12605\n12606\n12607\n12608\n12609\n12610\n12611\n12612\n12613\n12614\n12615\n12616\n12617\n12618\n12619\n12620\n12621\n12622\n12623\n12624\n12625\n12626\n12627\n12628\n12629\n12630\n12631\n12632\n12633\n12634\n12635\n12636\n12637\n12638\n12639\n12640\n12641\n12642\n12643\n12644\n12645\n12646\n12647\n12648\n12649\n12650\n12651\n12652\n12653\n12654\n12655\n12656\n12657\n12658\n12659\n12660\n12661\n12662\n12663\n12664\n12665\n12666\n12667\n12668\n12669\n12670\n12671\n12672\n12673\n12674\n12675\n12676\n12677\n12678\n12679\n12680\n12681\n12682\n12683\n12684\n12685\n12686\n12687\n12688\n12689\n12690\n12691\n12692\n12693\n12694\n12695\n12696\n12697\n12698\n12699\n12700\n12701\n12702\n12703\n12704\n12705\n12706\n12707\n12708\n12709\n12710\n12711\n12712\n12713\n12714\n12715\n12716\n12717\n12718\n12719\n12720\n12721\n12722\n12723\n12724\n12725\n12726\n12727\n12728\n12729\n12730\n12731\n12732\n12733\n12734\n12735\n12736\n12737\n12738\n12739\n12740\n12741\n12742\n12743\n12744\n12745\n12746\n12747\n12748\n12749\n12750\n12751\n12752\n12753\n12754\n12755\n12756\n12757\n12758\n12759\n12760\n12761\n12762\n12763\n12764\n12765\n12766\n12767\n12768\n12769\n12770\n12771\n12772\n12773\n12774\n12775\n12776\n12777\n12778\n12779\n12780\n12781\n12782\n12783\n12784\n12785\n12786\n12787\n12788\n12789\n12790\n12791\n12792\n12793\n12794\n12795\n12796\n12797\n12798\n12799\n12800\n12801\n12802\n12803\n12804\n12805\n12806\n12807\n12808\n12809\n12810\n12811\n12812\n12813\n12814\n12815\n12816\n12817\n12818\n12819\n12820\n12821\n12822\n12823\n12824\n12825\n12826\n12827\n12828\n12829\n12830\n12831\n12832\n12833\n12834\n12835\n12836\n12837\n12838\n12839\n12840\n12841\n12842\n12843\n12844\n12845\n12846\n12847\n12848\n12849\n12850\n12851\n12852\n12853\n12854\n12855\n12856\n12857\n12858\n12859\n12860\n12861\n12862\n12863\n12864\n12865\n12866\n12867\n12868\n12869\n12870\n12871\n12872\n12873\n12874\n12875\n12876\n12877\n12878\n12879\n12880\n12881\n12882\n12883\n12884\n12885\n12886\n12887\n12888\n12889\n12890\n12891\n12892\n12893\n12894\n12895\n12896\n12897\n12898\n12899\n12900\n12901\n12902\n12903\n12904\n12905\n12906\n12907\n12908\n12909\n12910\n12911\n12912\n12913\n12914\n12915\n12916\n12917\n12918\n12919\n12920\n12921\n12922\n12923\n12924\n12925\n12926\n12927\n12928\n12929\n12930\n12931\n12932\n12933\n12934\n12935\n12936\n12937\n12938\n12939\n12940\n12941\n12942\n12943\n12944\n12945\n12946\n12947\n12948\n12949\n12950\n12951\n12952\n12953\n12954\n12955\n12956\n12957\n12958\n12959\n12960\n12961\n12962\n12963\n12964\n12965\n12966\n12967\n12968\n12969\n12970\n12971\n12972\n12973\n12974\n12975\n12976\n12977\n12978\n12979\n12980\n12981\n12982\n12983\n12984\n12985\n12986\n12987\n12988\n12989\n12990\n12991\n12992\n12993\n12994\n12995\n12996\n12997\n12998\n12999\n13000\n13001\n13002\n13003\n13004\n13005\n13006\n13007\n13008\n13009\n13010\n13011\n13012\n13013\n13014\n13015\n13016\n13017\n13018\n13019\n13020\n13021\n13022\n13023\n13024\n13025\n13026\n13027\n13028\n13029\n13030\n13031\n13032\n13033\n13034\n13035\n13036\n13037\n13038\n13039\n13040\n13041\n13042\n13043\n13044\n13045\n13046\n13047\n13048\n13049\n13050\n13051\n13052\n13053\n13054\n13055\n13056\n13057\n13058\n13059\n13060\n13061\n13062\n13063\n13064\n13065\n13066\n13067\n13068\n13069\n13070\n13071\n13072\n13073\n13074\n13075\n13076\n13077\n13078\n13079\n13080\n13081\n13082\n13083\n13084\n13085\n13086\n13087\n13088\n13089\n13090\n13091\n13092\n13093\n13094\n13095\n13096\n13097\n13098\n13099\n13100\n13101\n13102\n13103\n13104\n13105\n13106\n13107\n13108\n13109\n13110\n13111\n13112\n13113\n13114\n13115\n13116\n13117\n13118\n13119\n13120\n13121\n13122\n13123\n13124\n13125\n13126\n13127\n13128\n13129\n13130\n13131\n13132\n13133\n13134\n13135\n13136\n13137\n13138\n13139\n13140\n13141\n13142\n13143\n13144\n13145\n13146\n13147\n13148\n13149\n13150\n13151\n13152\n13153\n13154\n13155\n13156\n13157\n13158\n13159\n13160\n13161\n13162\n13163\n13164\n13165\n13166\n13167\n13168\n13169\n13170\n13171\n13172\n13173\n13174\n13175\n13176\n13177\n13178\n13179\n13180\n13181\n13182\n13183\n13184\n13185\n13186\n13187\n13188\n13189\n13190\n13191\n13192\n13193\n13194\n13195\n13196\n13197\n13198\n13199\n13200\n13201\n13202\n13203\n13204\n13205\n13206\n13207\n13208\n13209\n13210\n13211\n13212\n13213\n13214\n13215\n13216\n13217\n13218\n13219\n13220\n13221\n13222\n13223\n13224\n13225\n13226\n13227\n13228\n13229\n13230\n13231\n13232\n13233\n13234\n13235\n13236\n13237\n13238\n13239\n13240\n13241\n13242\n13243\n13244\n13245\n13246\n13247\n13248\n13249\n13250\n13251\n13252\n13253\n13254\n13255\n13256\n13257\n13258\n13259\n13260\n13261\n13262\n13263\n13264\n13265\n13266\n13267\n13268\n13269\n13270\n13271\n13272\n13273\n13274\n13275\n13276\n13277\n13278\n13279\n13280\n13281\n13282\n13283\n13284\n13285\n13286\n13287\n13288\n13289\n13290\n13291\n13292\n13293\n13294\n13295\n13296\n13297\n13298\n13299\n13300\n13301\n13302\n13303\n13304\n13305\n13306\n13307\n13308\n13309\n13310\n13311\n13312\n13313\n13314\n13315\n13316\n13317\n13318\n13319\n13320\n13321\n13322\n13323\n13324\n13325\n13326\n13327\n13328\n13329\n13330\n13331\n13332\n13333\n13334\n13335\n13336\n13337\n13338\n13339\n13340\n13341\n13342\n13343\n13344\n13345\n13346\n13347\n13348\n13349\n13350\n13351\n13352\n13353\n13354\n13355\n13356\n13357\n13358\n13359\n13360\n13361\n13362\n13363\n13364\n13365\n13366\n13367\n13368\n13369\n13370\n13371\n13372\n13373\n13374\n13375\n13376\n13377\n13378\n13379\n13380\n13381\n13382\n13383\n13384\n13385\n13386\n13387\n13388\n13389\n13390\n13391\n13392\n13393\n13394\n13395\n13396\n13397\n13398\n13399\n13400\n13401\n13402\n13403\n13404\n13405\n13406\n13407\n13408\n13409\n13410\n13411\n13412\n13413\n13414\n13415\n13416\n13417\n13418\n13419\n13420\n13421\n13422\n13423\n13424\n13425\n13426\n13427\n13428\n13429\n13430\n13431\n13432\n13433\n13434\n13435\n13436\n13437\n13438\n13439\n13440\n13441\n13442\n13443\n13444\n13445\n13446\n13447\n13448\n13449\n13450\n13451\n13452\n13453\n13454\n13455\n13456\n13457\n13458\n13459\n13460\n13461\n13462\n13463\n13464\n13465\n13466\n13467\n13468\n13469\n13470\n13471\n13472\n13473\n13474\n13475\n13476\n13477\n13478\n13479\n13480\n13481\n13482\n13483\n13484\n13485\n13486\n13487\n13488\n13489\n13490\n13491\n13492\n13493\n13494\n13495\n13496\n13497\n13498\n13499\n13500\n13501\n13502\n13503\n13504\n13505\n13506\n13507\n13508\n13509\n13510\n13511\n13512\n13513\n13514\n13515\n13516\n13517\n13518\n13519\n13520\n13521\n13522\n13523\n13524\n13525\n13526\n13527\n13528\n13529\n13530\n13531\n13532\n13533\n13534\n13535\n13536\n13537\n13538\n13539\n13540\n13541\n13542\n13543\n13544\n13545\n13546\n13547\n13548\n13549\n13550\n13551\n13552\n13553\n13554\n13555\n13556\n13557\n13558\n13559\n13560\n13561\n13562\n13563\n13564\n13565\n13566\n13567\n13568\n13569\n13570\n13571\n13572\n13573\n13574\n13575\n13576\n13577\n13578\n13579\n13580\n13581\n13582\n13583\n13584\n13585\n13586\n13587\n13588\n13589\n13590\n13591\n13592\n13593\n13594\n13595\n13596\n13597\n13598\n13599\n13600\n13601\n13602\n13603\n13604\n13605\n13606\n13607\n13608\n13609\n13610\n13611\n13612\n13613\n13614\n13615\n13616\n13617\n13618\n13619\n13620\n13621\n13622\n13623\n13624\n13625\n13626\n13627\n13628\n13629\n13630\n13631\n13632\n13633\n13634\n13635\n13636\n13637\n13638\n13639\n13640\n13641\n13642\n13643\n13644\n13645\n13646\n13647\n13648\n13649\n13650\n13651\n13652\n13653\n13654\n13655\n13656\n13657\n13658\n13659\n13660\n13661\n13662\n13663\n13664\n13665\n13666\n13667\n13668\n13669\n13670\n13671\n13672\n13673\n13674\n13675\n13676\n13677\n13678\n13679\n13680\n13681\n13682\n13683\n13684\n13685\n13686\n13687\n13688\n13689\n13690\n13691\n13692\n13693\n13694\n13695\n13696\n13697\n13698\n13699\n13700\n13701\n13702\n13703\n13704\n13705\n13706\n13707\n13708\n13709\n13710\n13711\n13712\n13713\n13714\n13715\n13716\n13717\n13718\n13719\n13720\n13721\n13722\n13723\n13724\n13725\n13726\n13727\n13728\n13729\n13730\n13731\n13732\n13733\n13734\n13735\n13736\n13737\n13738\n13739\n13740\n13741\n13742\n13743\n13744\n13745\n13746\n13747\n13748\n13749\n13750\n13751\n13752\n13753\n13754\n13755\n13756\n13757\n13758\n13759\n13760\n13761\n13762\n13763\n13764\n13765\n13766\n13767\n13768\n13769\n13770\n13771\n13772\n13773\n13774\n13775\n13776\n13777\n13778\n13779\n13780\n13781\n13782\n13783\n13784\n13785\n13786\n13787\n13788\n13789\n13790\n13791\n13792\n13793\n13794\n13795\n13796\n13797\n13798\n13799\n13800\n13801\n13802\n13803\n13804\n13805\n13806\n13807\n13808\n13809\n13810\n13811\n13812\n13813\n13814\n13815\n13816\n13817\n13818\n13819\n13820\n13821\n13822\n13823\n13824\n13825\n13826\n13827\n13828\n13829\n13830\n13831\n13832\n13833\n13834\n13835\n13836\n13837\n13838\n13839\n13840\n13841\n13842\n13843\n13844\n13845\n13846\n13847\n13848\n13849\n13850\n13851\n13852\n13853\n13854\n13855\n13856\n13857\n13858\n13859\n13860\n13861\n13862\n13863\n13864\n13865\n13866\n13867\n13868\n13869\n13870\n13871\n13872\n13873\n13874\n13875\n13876\n13877\n13878\n13879\n13880\n13881\n13882\n13883\n13884\n13885\n13886\n13887\n13888\n13889\n13890\n13891\n13892\n13893\n13894\n13895\n13896\n13897\n13898\n13899\n13900\n13901\n13902\n13903\n13904\n13905\n13906\n13907\n13908\n13909\n13910\n13911\n13912\n13913\n13914\n13915\n13916\n13917\n13918\n13919\n13920\n13921\n13922\n13923\n13924\n13925\n13926\n13927\n13928\n13929\n13930\n13931\n13932\n13933\n13934\n13935\n13936\n13937\n13938\n13939\n13940\n13941\n13942\n13943\n13944\n13945\n13946\n13947\n13948\n13949\n13950\n13951\n13952\n13953\n13954\n13955\n13956\n13957\n13958\n13959\n13960\n13961\n13962\n13963\n13964\n13965\n13966\n13967\n13968\n13969\n13970\n13971\n13972\n13973\n13974\n13975\n13976\n13977\n13978\n13979\n13980\n13981\n13982\n13983\n13984\n13985\n13986\n13987\n13988\n13989\n13990\n13991\n13992\n13993\n13994\n13995\n13996\n13997\n13998\n13999\n14000\n14001\n14002\n14003\n14004\n14005\n14006\n14007\n14008\n14009\n14010\n14011\n14012\n14013\n14014\n14015\n14016\n14017\n14018\n14019\n14020\n14021\n14022\n14023\n14024\n14025\n14026\n14027\n14028\n14029\n14030\n14031\n14032\n14033\n14034\n14035\n14036\n14037\n14038\n14039\n14040\n14041\n14042\n14043\n14044\n14045\n14046\n14047\n14048\n14049\n14050\n14051\n14052\n14053\n14054\n14055\n14056\n14057\n14058\n14059\n14060\n14061\n14062\n14063\n14064\n14065\n14066\n14067\n14068\n14069\n14070\n14071\n14072\n14073\n14074\n14075\n14076\n14077\n14078\n14079\n14080\n14081\n14082\n14083\n14084\n14085\n14086\n14087\n14088\n14089\n14090\n14091\n14092\n14093\n14094\n14095\n14096\n14097\n14098\n14099\n14100\n14101\n14102\n14103\n14104\n14105\n14106\n14107\n14108\n14109\n14110\n14111\n14112\n14113\n14114\n14115\n14116\n14117\n14118\n14119\n14120\n14121\n14122\n14123\n14124\n14125\n14126\n14127\n14128\n14129\n14130\n14131\n14132\n14133\n14134\n14135\n14136\n14137\n14138\n14139\n14140\n14141\n14142\n14143\n14144\n14145\n14146\n14147\n14148\n14149\n14150\n14151\n14152\n14153\n14154\n14155\n14156\n14157\n14158\n14159\n14160\n14161\n14162\n14163\n14164\n14165\n14166\n14167\n14168\n14169\n14170\n14171\n14172\n14173\n14174\n14175\n14176\n14177\n14178\n14179\n14180\n14181\n14182\n14183\n14184\n14185\n14186\n14187\n14188\n14189\n14190\n14191\n14192\n14193\n14194\n14195\n14196\n14197\n14198\n14199\n14200\n14201\n14202\n14203\n14204\n14205\n14206\n14207\n14208\n14209\n14210\n14211\n14212\n14213\n14214\n14215\n14216\n14217\n14218\n14219\n14220\n14221\n14222\n14223\n14224\n14225\n14226\n14227\n14228\n14229\n14230\n14231\n14232\n14233\n14234\n14235\n14236\n14237\n14238\n14239\n14240\n14241\n14242\n14243\n14244\n14245\n14246\n14247\n14248\n14249\n14250\n14251\n14252\n14253\n14254\n14255\n14256\n14257\n14258\n14259\n14260\n14261\n14262\n14263\n14264\n14265\n14266\n14267\n14268\n14269\n14270\n14271\n14272\n14273\n14274\n14275\n14276\n14277\n14278\n14279\n14280\n14281\n14282\n14283\n14284\n14285\n14286\n14287\n14288\n14289\n14290\n14291\n14292\n14293\n14294\n14295\n14296\n14297\n14298\n14299\n14300\n14301\n14302\n14303\n14304\n14305\n14306\n14307\n14308\n14309\n14310\n14311\n14312\n14313\n14314\n14315\n14316\n14317\n14318\n14319\n14320\n14321\n14322\n14323\n14324\n14325\n14326\n14327\n14328\n14329\n14330\n14331\n14332\n14333\n14334\n14335\n14336\n14337\n14338\n14339\n14340\n14341\n14342\n14343\n14344\n14345\n14346\n14347\n14348\n14349\n14350\n14351\n14352\n14353\n14354\n14355\n14356\n14357\n14358\n14359\n14360\n14361\n14362\n14363\n14364\n14365\n14366\n14367\n14368\n14369\n14370\n14371\n14372\n14373\n14374\n14375\n14376\n14377\n14378\n14379\n14380\n14381\n14382\n14383\n14384\n14385\n14386\n14387\n14388\n14389\n14390\n14391\n14392\n14393\n14394\n14395\n14396\n14397\n14398\n14399\n14400\n14401\n14402\n14403\n14404\n14405\n14406\n14407\n14408\n14409\n14410\n14411\n14412\n14413\n14414\n14415\n14416\n14417\n14418\n14419\n14420\n14421\n14422\n14423\n14424\n14425\n14426\n14427\n14428\n14429\n14430\n14431\n14432\n14433\n14434\n14435\n14436\n14437\n14438\n14439\n14440\n14441\n14442\n14443\n14444\n14445\n14446\n14447\n14448\n14449\n14450\n14451\n14452\n14453\n14454\n14455\n14456\n14457\n14458\n14459\n14460\n14461\n14462\n14463\n14464\n14465\n14466\n14467\n14468\n14469\n14470\n14471\n14472\n14473\n14474\n14475\n14476\n14477\n14478\n14479\n14480\n14481\n14482\n14483\n14484\n14485\n14486\n14487\n14488\n14489\n14490\n14491\n14492\n14493\n14494\n14495\n14496\n14497\n14498\n14499\n14500\n14501\n14502\n14503\n14504\n14505\n14506\n14507\n14508\n14509\n14510\n14511\n14512\n14513\n14514\n14515\n14516\n14517\n14518\n14519\n14520\n14521\n14522\n14523\n14524\n14525\n14526\n14527\n14528\n14529\n14530\n14531\n14532\n14533\n14534\n14535\n14536\n14537\n14538\n14539\n14540\n14541\n14542\n14543\n14544\n14545\n14546\n14547\n14548\n14549\n14550\n14551\n14552\n14553\n14554\n14555\n14556\n14557\n14558\n14559\n14560\n14561\n14562\n14563\n14564\n14565\n14566\n14567\n14568\n14569\n14570\n14571\n14572\n14573\n14574\n14575\n14576\n14577\n14578\n14579\n14580\n14581\n14582\n14583\n14584\n14585\n14586\n14587\n14588\n14589\n14590\n14591\n14592\n14593\n14594\n14595\n14596\n14597\n14598\n14599\n14600\n14601\n14602\n14603\n14604\n14605\n14606\n14607\n14608\n14609\n14610\n14611\n14612\n14613\n14614\n14615\n14616\n14617\n14618\n14619\n14620\n14621\n14622\n14623\n14624\n14625\n14626\n14627\n14628\n14629\n14630\n14631\n14632\n14633\n14634\n14635\n14636\n14637\n14638\n14639\n14640\n14641\n14642\n14643\n14644\n14645\n14646\n14647\n14648\n14649\n14650\n14651\n14652\n14653\n14654\n14655\n14656\n14657\n14658\n14659\n14660\n14661\n14662\n14663\n14664\n14665\n14666\n14667\n14668\n14669\n14670\n14671\n14672\n14673\n14674\n14675\n14676\n14677\n14678\n14679\n14680\n14681\n14682\n14683\n14684\n14685\n14686\n14687\n14688\n14689\n14690\n14691\n14692\n14693\n14694\n14695\n14696\n14697\n14698\n14699\n14700\n14701\n14702\n14703\n14704\n14705\n14706\n14707\n14708\n14709\n14710\n14711\n14712\n14713\n14714\n14715\n14716\n14717\n14718\n14719\n14720\n14721\n14722\n14723\n14724\n14725\n14726\n14727\n14728\n14729\n14730\n14731\n14732\n14733\n14734\n14735\n14736\n14737\n14738\n14739\n14740\n14741\n14742\n14743\n14744\n14745\n14746\n14747\n14748\n14749\n14750\n14751\n14752\n14753\n14754\n14755\n14756\n14757\n14758\n14759\n14760\n14761\n14762\n14763\n14764\n14765\n14766\n14767\n14768\n14769\n14770\n14771\n14772\n14773\n14774\n14775\n14776\n14777\n14778\n14779\n14780\n14781\n14782\n14783\n14784\n14785\n14786\n14787\n14788\n14789\n14790\n14791\n14792\n14793\n14794\n14795\n14796\n14797\n14798\n14799\n14800\n14801\n14802\n14803\n14804\n14805\n14806\n14807\n14808\n14809\n14810\n14811\n14812\n14813\n14814\n14815\n14816\n14817\n14818\n14819\n14820\n14821\n14822\n14823\n14824\n14825\n14826\n14827\n14828\n14829\n14830\n14831\n14832\n14833\n14834\n14835\n14836\n14837\n14838\n14839\n14840\n14841\n14842\n14843\n14844\n14845\n14846\n14847\n14848\n14849\n14850\n14851\n14852\n14853\n14854\n14855\n14856\n14857\n14858\n14859\n14860\n14861\n14862\n14863\n14864\n14865\n14866\n14867\n14868\n14869\n14870\n14871\n14872\n14873\n14874\n14875\n14876\n14877\n14878\n14879\n14880\n14881\n14882\n14883\n14884\n14885\n14886\n14887\n14888\n14889\n14890\n14891\n14892\n14893\n14894\n14895\n14896\n14897\n14898\n14899\n14900\n14901\n14902\n14903\n14904\n14905\n14906\n14907\n14908\n14909\n14910\n14911\n14912\n14913\n14914\n14915\n14916\n14917\n14918\n14919\n14920\n14921\n14922\n14923\n14924\n14925\n14926\n14927\n14928\n14929\n14930\n14931\n14932\n14933\n14934\n14935\n14936\n14937\n14938\n14939\n14940\n14941\n14942\n14943\n14944\n14945\n14946\n14947\n14948\n14949\n14950\n14951\n14952\n14953\n14954\n14955\n14956\n14957\n14958\n14959\n14960\n14961\n14962\n14963\n14964\n14965\n14966\n14967\n14968\n14969\n14970\n14971\n14972\n14973\n14974\n14975\n14976\n14977\n14978\n14979\n14980\n14981\n14982\n14983\n14984\n14985\n14986\n14987\n14988\n14989\n14990\n14991\n14992\n14993\n14994\n14995\n14996\n14997\n14998\n14999\n15000\n15001\n15002\n15003\n15004\n15005\n15006\n15007\n15008\n15009\n15010\n15011\n15012\n15013\n15014\n15015\n15016\n15017\n15018\n15019\n15020\n15021\n15022\n15023\n15024\n15025\n15026\n15027\n15028\n15029\n15030\n15031\n15032\n15033\n15034\n15035\n15036\n15037\n15038\n15039\n15040\n15041\n15042\n15043\n15044\n15045\n15046\n15047\n15048\n15049\n15050\n15051\n15052\n15053\n15054\n15055\n15056\n15057\n15058\n15059\n15060\n15061\n15062\n15063\n15064\n15065\n15066\n15067\n15068\n15069\n15070\n15071\n15072\n15073\n15074\n15075\n15076\n15077\n15078\n15079\n15080\n15081\n15082\n15083\n15084\n15085\n15086\n15087\n15088\n15089\n15090\n15091\n15092\n15093\n15094\n15095\n15096\n15097\n15098\n15099\n15100\n15101\n15102\n15103\n15104\n15105\n15106\n15107\n15108\n15109\n15110\n15111\n15112\n15113\n15114\n15115\n15116\n15117\n15118\n15119\n15120\n15121\n15122\n15123\n15124\n15125\n15126\n15127\n15128\n15129\n15130\n15131\n15132\n15133\n15134\n15135\n15136\n15137\n15138\n15139\n15140\n15141\n15142\n15143\n15144\n15145\n15146\n15147\n15148\n15149\n15150\n15151\n15152\n15153\n15154\n15155\n15156\n15157\n15158\n15159\n15160\n15161\n15162\n15163\n15164\n15165\n15166\n15167\n15168\n15169\n15170\n15171\n15172\n15173\n15174\n15175\n15176\n15177\n15178\n15179\n15180\n15181\n15182\n15183\n15184\n15185\n15186\n15187\n15188\n15189\n15190\n15191\n15192\n15193\n15194\n15195\n15196\n15197\n15198\n15199\n15200\n15201\n15202\n15203\n15204\n15205\n15206\n15207\n15208\n15209\n15210\n15211\n15212\n15213\n15214\n15215\n15216\n15217\n15218\n15219\n15220\n15221\n15222\n15223\n15224\n15225\n15226\n15227\n15228\n15229\n15230\n15231\n15232\n15233\n15234\n15235\n15236\n15237\n15238\n15239\n15240\n15241\n15242\n15243\n15244\n15245\n15246\n15247\n15248\n15249\n15250\n15251\n15252\n15253\n15254\n15255\n15256\n15257\n15258\n15259\n15260\n15261\n15262\n15263\n15264\n15265\n15266\n15267\n15268\n15269\n15270\n15271\n15272\n15273\n15274\n15275\n15276\n15277\n15278\n15279\n15280\n15281\n15282\n15283\n15284\n15285\n15286\n15287\n15288\n15289\n15290\n15291\n15292\n15293\n15294\n15295\n15296\n15297\n15298\n15299\n15300\n15301\n15302\n15303\n15304\n15305\n15306\n15307\n15308\n15309\n15310\n15311\n15312\n15313\n15314\n15315\n15316\n15317\n15318\n15319\n15320\n15321\n15322\n15323\n15324\n15325\n15326\n15327\n15328\n15329\n15330\n15331\n15332\n15333\n15334\n15335\n15336\n15337\n15338\n15339\n15340\n15341\n15342\n15343\n15344\n15345\n15346\n15347\n15348\n15349\n15350\n15351\n15352\n15353\n15354\n15355\n15356\n15357\n15358\n15359\n15360\n15361\n15362\n15363\n15364\n15365\n15366\n15367\n15368\n15369\n15370\n15371\n15372\n15373\n15374\n15375\n15376\n15377\n15378\n15379\n15380\n15381\n15382\n15383\n15384\n15385\n15386\n15387\n15388\n15389\n15390\n15391\n15392\n15393\n15394\n15395\n15396\n15397\n15398\n15399\n15400\n15401\n15402\n15403\n15404\n15405\n15406\n15407\n15408\n15409\n15410\n15411\n15412\n15413\n15414\n15415\n15416\n15417\n15418\n15419\n15420\n15421\n15422\n15423\n15424\n15425\n15426\n15427\n15428\n15429\n15430\n15431\n15432\n15433\n15434\n15435\n15436\n15437\n15438\n15439\n15440\n15441\n15442\n15443\n15444\n15445\n15446\n15447\n15448\n15449\n15450\n15451\n15452\n15453\n15454\n15455\n15456\n15457\n15458\n15459\n15460\n15461\n15462\n15463\n15464\n15465\n15466\n15467\n15468\n15469\n15470\n15471\n15472\n15473\n15474\n15475\n15476\n15477\n15478\n15479\n15480\n15481\n15482\n15483\n15484\n15485\n15486\n15487\n15488\n15489\n15490\n15491\n15492\n15493\n15494\n15495\n15496\n15497\n15498\n15499\n15500\n15501\n15502\n15503\n15504\n15505\n15506\n15507\n15508\n15509\n15510\n15511\n15512\n15513\n15514\n15515\n15516\n15517\n15518\n15519\n15520\n15521\n15522\n15523\n15524\n15525\n15526\n15527\n15528\n15529\n15530\n15531\n15532\n15533\n15534\n15535\n15536\n15537\n15538\n15539\n15540\n15541\n15542\n15543\n15544\n15545\n15546\n15547\n15548\n15549\n15550\n15551\n15552\n15553\n15554\n15555\n15556\n15557\n15558\n15559\n15560\n15561\n15562\n15563\n15564\n15565\n15566\n15567\n15568\n15569\n15570\n15571\n15572\n15573\n15574\n15575\n15576\n15577\n15578\n15579\n15580\n15581\n15582\n15583\n15584\n15585\n15586\n15587\n15588\n15589\n15590\n15591\n15592\n15593\n15594\n15595\n15596\n15597\n15598\n15599\n15600\n15601\n15602\n15603\n15604\n15605\n15606\n15607\n15608\n15609\n15610\n15611\n15612\n15613\n15614\n15615\n15616\n15617\n15618\n15619\n15620\n15621\n15622\n15623\n15624\n15625\n15626\n15627\n15628\n15629\n15630\n15631\n15632\n15633\n15634\n15635\n15636\n15637\n15638\n15639\n15640\n15641\n15642\n15643\n15644\n15645\n15646\n15647\n15648\n15649\n15650\n15651\n15652\n15653\n15654\n15655\n15656\n15657\n15658\n15659\n15660\n15661\n15662\n15663\n15664\n15665\n15666\n15667\n15668\n15669\n15670\n15671\n15672\n15673\n15674\n15675\n15676\n15677\n15678\n15679\n15680\n15681\n15682\n15683\n15684\n15685\n15686\n15687\n15688\n15689\n15690\n15691\n15692\n15693\n15694\n15695\n15696\n15697\n15698\n15699\n15700\n15701\n15702\n15703\n15704\n15705\n15706\n15707\n15708\n15709\n15710\n15711\n15712\n15713\n15714\n15715\n15716\n15717\n15718\n15719\n15720\n15721\n15722\n15723\n15724\n15725\n15726\n15727\n15728\n15729\n15730\n15731\n15732\n15733\n15734\n15735\n15736\n15737\n15738\n15739\n15740\n15741\n15742\n15743\n15744\n15745\n15746\n15747\n15748\n15749\n15750\n15751\n15752\n15753\n15754\n15755\n15756\n15757\n15758\n15759\n15760\n15761\n15762\n15763\n15764\n15765\n15766\n15767\n15768\n15769\n15770\n15771\n15772\n15773\n15774\n15775\n15776\n15777\n15778\n15779\n15780\n15781\n15782\n15783\n15784\n15785\n15786\n15787\n15788\n15789\n15790\n15791\n15792\n15793\n15794\n15795\n15796\n15797\n15798\n15799\n15800\n15801\n15802\n15803\n15804\n15805\n15806\n15807\n15808\n15809\n15810\n15811\n15812\n15813\n15814\n15815\n15816\n15817\n15818\n15819\n15820\n15821\n15822\n15823\n15824\n15825\n15826\n15827\n15828\n15829\n15830\n15831\n15832\n15833\n15834\n15835\n15836\n15837\n15838\n15839\n15840\n15841\n15842\n15843\n15844\n15845\n15846\n15847\n15848\n15849\n15850\n15851\n15852\n15853\n15854\n15855\n15856\n15857\n15858\n15859\n15860\n15861\n15862\n15863\n15864\n15865\n15866\n15867\n15868\n15869\n15870\n15871\n15872\n15873\n15874\n15875\n15876\n15877\n15878\n15879\n15880\n15881\n15882\n15883\n15884\n15885\n15886\n15887\n15888\n15889\n15890\n15891\n15892\n15893\n15894\n15895\n15896\n15897\n15898\n15899\n15900\n15901\n15902\n15903\n15904\n15905\n15906\n15907\n15908\n15909\n15910\n15911\n15912\n15913\n15914\n15915\n15916\n15917\n15918\n15919\n15920\n15921\n15922\n15923\n15924\n15925\n15926\n15927\n15928\n15929\n15930\n15931\n15932\n15933\n15934\n15935\n15936\n15937\n15938\n15939\n15940\n15941\n15942\n15943\n15944\n15945\n15946\n15947\n15948\n15949\n15950\n15951\n15952\n15953\n15954\n15955\n15956\n15957\n15958\n15959\n15960\n15961\n15962\n15963\n15964\n15965\n15966\n15967\n15968\n15969\n15970\n15971\n15972\n15973\n15974\n15975\n15976\n15977\n15978\n15979\n15980\n15981\n15982\n15983\n15984\n15985\n15986\n15987\n15988\n15989\n15990\n15991\n15992\n15993\n15994\n15995\n15996\n15997\n15998\n15999\n16000\n16001\n16002\n16003\n16004\n16005\n16006\n16007\n16008\n16009\n16010\n16011\n16012\n16013\n16014\n16015\n16016\n16017\n16018\n16019\n16020\n16021\n16022\n16023\n16024\n16025\n16026\n16027\n16028\n16029\n16030\n16031\n16032\n16033\n16034\n16035\n16036\n16037\n16038\n16039\n16040\n16041\n16042\n16043\n16044\n16045\n16046\n16047\n16048\n16049\n16050\n16051\n16052\n16053\n16054\n16055\n16056\n16057\n16058\n16059\n16060\n16061\n16062\n16063\n16064\n16065\n16066\n16067\n16068\n16069\n16070\n16071\n16072\n16073\n16074\n16075\n16076\n16077\n16078\n16079\n16080\n16081\n16082\n16083\n16084\n16085\n16086\n16087\n16088\n16089\n16090\n16091\n16092\n16093\n16094\n16095\n16096\n16097\n16098\n16099\n16100\n16101\n16102\n16103\n16104\n16105\n16106\n16107\n16108\n16109\n16110\n16111\n16112\n16113\n16114\n16115\n16116\n16117\n16118\n16119\n16120\n16121\n16122\n16123\n16124\n16125\n16126\n16127\n16128\n16129\n16130\n16131\n16132\n16133\n16134\n16135\n16136\n16137\n16138\n16139\n16140\n16141\n16142\n16143\n16144\n16145\n16146\n16147\n16148\n16149\n16150\n16151\n16152\n16153\n16154\n16155\n16156\n16157\n16158\n16159\n16160\n16161\n16162\n16163\n16164\n16165\n16166\n16167\n16168\n16169\n16170\n16171\n16172\n16173\n16174\n16175\n16176\n16177\n16178\n16179\n16180\n16181\n16182\n16183\n16184\n16185\n16186\n16187\n16188\n16189\n16190\n16191\n16192\n16193\n16194\n16195\n16196\n16197\n16198\n16199\n16200\n16201\n16202\n16203\n16204\n16205\n16206\n16207\n16208\n16209\n16210\n16211\n16212\n16213\n16214\n16215\n16216\n16217\n16218\n16219\n16220\n16221\n16222\n16223\n16224\n16225\n16226\n16227\n16228\n16229\n16230\n16231\n16232\n16233\n16234\n16235\n16236\n16237\n16238\n16239\n16240\n16241\n16242\n16243\n16244\n16245\n16246\n16247\n16248\n16249\n16250\n16251\n16252\n16253\n16254\n16255\n16256\n16257\n16258\n16259\n16260\n16261\n16262\n16263\n16264\n16265\n16266\n16267\n16268\n16269\n16270\n16271\n16272\n16273\n16274\n16275\n16276\n16277\n16278\n16279\n16280\n16281\n16282\n16283\n16284\n16285\n16286\n16287\n16288\n16289\n16290\n16291\n16292\n16293\n16294\n16295\n16296\n16297\n16298\n16299\n16300\n16301\n16302\n16303\n16304\n16305\n16306\n16307\n16308\n16309\n16310\n16311\n16312\n16313\n16314\n16315\n16316\n16317\n16318\n16319\n16320\n16321\n16322\n16323\n16324\n16325\n16326\n16327\n16328\n16329\n16330\n16331\n16332\n16333\n16334\n16335\n16336\n16337\n16338\n16339\n16340\n16341\n16342\n16343\n16344\n16345\n16346\n16347\n16348\n16349\n16350\n16351\n16352\n16353\n16354\n16355\n16356\n16357\n16358\n16359\n16360\n16361\n16362\n16363\n16364\n16365\n16366\n16367\n16368\n16369\n16370\n16371\n16372\n16373\n16374\n16375\n16376\n16377\n16378\n16379\n16380\n16381\n16382\n16383\n16384\n16385\n16386\n16387\n16388\n16389\n16390\n16391\n16392\n16393\n16394\n16395\n16396\n16397\n16398\n16399\n16400\n16401\n16402\n16403\n16404\n16405\n16406\n16407\n16408\n16409\n16410\n16411\n16412\n16413\n16414\n16415\n16416\n16417\n16418\n16419\n16420\n16421\n16422\n16423\n16424\n16425\n16426\n16427\n16428\n16429\n16430\n16431\n16432\n16433\n16434\n16435\n16436\n16437\n16438\n16439\n16440\n16441\n16442\n16443\n16444\n16445\n16446\n16447\n16448\n16449\n16450\n16451\n16452\n16453\n16454\n16455\n16456\n16457\n16458\n16459\n16460\n16461\n16462\n16463\n16464\n16465\n16466\n16467\n16468\n16469\n16470\n16471\n16472\n16473\n16474\n16475\n16476\n16477\n16478\n16479\n16480\n16481\n16482\n16483\n16484\n16485\n16486\n16487\n16488\n16489\n16490\n16491\n16492\n16493\n16494\n16495\n16496\n16497\n16498\n16499\n16500\n16501\n16502\n16503\n16504\n16505\n16506\n16507\n16508\n16509\n16510\n16511\n16512\n16513\n16514\n16515\n16516\n16517\n16518\n16519\n16520\n16521\n16522\n16523\n16524\n16525\n16526\n16527\n16528\n16529\n16530\n16531\n16532\n16533\n16534\n16535\n16536\n16537\n16538\n16539\n16540\n16541\n16542\n16543\n16544\n16545\n16546\n16547\n16548\n16549\n16550\n16551\n16552\n16553\n16554\n16555\n16556\n16557\n16558\n16559\n16560\n16561\n16562\n16563\n16564\n16565\n16566\n16567\n16568\n16569\n16570\n16571\n16572\n16573\n16574\n16575\n16576\n16577\n16578\n16579\n16580\n16581\n16582\n16583\n16584\n16585\n16586\n16587\n16588\n16589\n16590\n16591\n16592\n16593\n16594\n16595\n16596\n16597\n16598\n16599\n16600\n16601\n16602\n16603\n16604\n16605\n16606\n16607\n16608\n16609\n16610\n16611\n16612\n16613\n16614\n16615\n16616\n16617\n16618\n16619\n16620\n16621\n16622\n16623\n16624\n16625\n16626\n16627\n16628\n16629\n16630\n16631\n16632\n16633\n16634\n16635\n16636\n16637\n16638\n16639\n16640\n16641\n16642\n16643\n16644\n16645\n16646\n16647\n16648\n16649\n16650\n16651\n16652\n16653\n16654\n16655\n16656\n16657\n16658\n16659\n16660\n16661\n16662\n16663\n16664\n16665\n16666\n16667\n16668\n16669\n16670\n16671\n16672\n16673\n16674\n16675\n16676\n16677\n16678\n16679\n16680\n16681\n16682\n16683\n16684\n16685\n16686\n16687\n16688\n16689\n16690\n16691\n16692\n16693\n16694\n16695\n16696\n16697\n16698\n16699\n16700\n16701\n16702\n16703\n16704\n16705\n16706\n16707\n16708\n16709\n16710\n16711\n16712\n16713\n16714\n16715\n16716\n16717\n16718\n16719\n16720\n16721\n16722\n16723\n16724\n16725\n16726\n16727\n16728\n16729\n16730\n16731\n16732\n16733\n16734\n16735\n16736\n16737\n16738\n16739\n16740\n16741\n16742\n16743\n16744\n16745\n16746\n16747\n16748\n16749\n16750\n16751\n16752\n16753\n16754\n16755\n16756\n16757\n16758\n16759\n16760\n16761\n16762\n16763\n16764\n16765\n16766\n16767\n16768\n16769\n16770\n16771\n16772\n16773\n16774\n16775\n16776\n16777\n16778\n16779\n16780\n16781\n16782\n16783\n16784\n16785\n16786\n16787\n16788\n16789\n16790\n16791\n16792\n16793\n16794\n16795\n16796\n16797\n16798\n16799\n16800\n16801\n16802\n16803\n16804\n16805\n16806\n16807\n16808\n16809\n16810\n16811\n16812\n16813\n16814\n16815\n16816\n16817\n16818\n16819\n16820\n16821\n16822\n16823\n16824\n16825\n16826\n16827\n16828\n16829\n16830\n16831\n16832\n16833\n16834\n16835\n16836\n16837\n16838\n16839\n16840\n16841\n16842\n16843\n16844\n16845\n16846\n16847\n16848\n16849\n16850\n16851\n16852\n16853\n16854\n16855\n16856\n16857\n16858\n16859\n16860\n16861\n16862\n16863\n16864\n16865\n16866\n16867\n16868\n16869\n16870\n16871\n16872\n16873\n16874\n16875\n16876\n16877\n16878\n16879\n16880\n16881\n16882\n16883\n16884\n16885\n16886\n16887\n16888\n16889\n16890\n16891\n16892\n16893\n16894\n16895\n16896\n16897\n16898\n16899\n16900\n16901\n16902\n16903\n16904\n16905\n16906\n16907\n16908\n16909\n16910\n16911\n16912\n16913\n16914\n16915\n16916\n16917\n16918\n16919\n16920\n16921\n16922\n16923\n16924\n16925\n16926\n16927\n16928\n16929\n16930\n16931\n16932\n16933\n16934\n16935\n16936\n16937\n16938\n16939\n16940\n16941\n16942\n16943\n16944\n16945\n16946\n16947\n16948\n16949\n16950\n16951\n16952\n16953\n16954\n16955\n16956\n16957\n16958\n16959\n16960\n16961\n16962\n16963\n16964\n16965\n16966\n16967\n16968\n16969\n16970\n16971\n16972\n16973\n16974\n16975\n16976\n16977\n16978\n16979\n16980\n16981\n16982\n16983\n16984\n16985\n16986\n16987\n16988\n16989\n16990\n16991\n16992\n16993\n16994\n16995\n16996\n16997\n16998\n16999\n17000\n17001\n17002\n17003\n17004\n17005\n17006\n17007\n17008\n17009\n17010\n17011\n17012\n17013\n17014\n17015\n17016\n17017\n17018\n17019\n17020\n17021\n17022\n17023\n17024\n17025\n17026\n17027\n17028\n17029\n17030\n17031\n17032\n17033\n17034\n17035\n17036\n17037\n17038\n17039\n17040\n17041\n17042\n17043\n17044\n17045\n17046\n17047\n17048\n17049\n17050\n17051\n17052\n17053\n17054\n17055\n17056\n17057\n17058\n17059\n17060\n17061\n17062\n17063\n17064\n17065\n17066\n17067\n17068\n17069\n17070\n17071\n17072\n17073\n17074\n17075\n17076\n17077\n17078\n17079\n17080\n17081\n17082\n17083\n17084\n17085\n17086\n17087\n17088\n17089\n17090\n17091\n17092\n17093\n17094\n17095\n17096\n17097\n17098\n17099\n17100\n17101\n17102\n17103\n17104\n17105\n17106\n17107\n17108\n17109\n17110\n17111\n17112\n17113\n17114\n17115\n17116\n17117\n17118\n17119\n17120\n17121\n17122\n17123\n17124\n17125\n17126\n17127\n17128\n17129\n17130\n17131\n17132\n17133\n17134\n17135\n17136\n17137\n17138\n17139\n17140\n17141\n17142\n17143\n17144\n17145\n17146\n17147\n17148\n17149\n17150\n17151\n17152\n17153\n17154\n17155\n17156\n17157\n17158\n17159\n17160\n17161\n17162\n17163\n17164\n17165\n17166\n17167\n17168\n17169\n17170\n17171\n17172\n17173\n17174\n17175\n17176\n17177\n17178\n17179\n17180\n17181\n17182\n17183\n17184\n17185\n17186\n17187\n17188\n17189\n17190\n17191\n17192\n17193\n17194\n17195\n17196\n17197\n17198\n17199\n17200\n17201\n17202\n17203\n17204\n17205\n17206\n17207\n17208\n17209\n17210\n17211\n17212\n17213\n17214\n17215\n17216\n17217\n17218\n17219\n17220\n17221\n17222\n17223\n17224\n17225\n17226\n17227\n17228\n17229\n17230\n17231\n17232\n17233\n17234\n17235\n17236\n17237\n17238\n17239\n17240\n17241\n17242\n17243\n17244\n17245\n17246\n17247\n17248\n17249\n17250\n17251\n17252\n17253\n17254\n17255\n17256\n17257\n17258\n17259\n17260\n17261\n17262\n17263\n17264\n17265\n17266\n17267\n17268\n17269\n17270\n17271\n17272\n17273\n17274\n17275\n17276\n17277\n17278\n17279\n17280\n17281\n17282\n17283\n17284\n17285\n17286\n17287\n17288\n17289\n17290\n17291\n17292\n17293\n17294\n17295\n17296\n17297\n17298\n17299\n17300\n17301\n17302\n17303\n17304\n17305\n17306\n17307\n17308\n17309\n17310\n17311\n17312\n17313\n17314\n17315\n17316\n17317\n17318\n17319\n17320\n17321\n17322\n17323\n17324\n17325\n17326\n17327\n17328\n17329\n17330\n17331\n17332\n17333\n17334\n17335\n17336\n17337\n17338\n17339\n17340\n17341\n17342\n17343\n17344\n17345\n17346\n17347\n17348\n17349\n17350\n17351\n17352\n17353\n17354\n17355\n17356\n17357\n17358\n17359\n17360\n17361\n17362\n17363\n17364\n17365\n17366\n17367\n17368\n17369\n17370\n17371\n17372\n17373\n17374\n17375\n17376\n17377\n17378\n17379\n17380\n17381\n17382\n17383\n17384\n17385\n17386\n17387\n17388\n17389\n17390\n17391\n17392\n17393\n17394\n17395\n17396\n17397\n17398\n17399\n17400\n17401\n17402\n17403\n17404\n17405\n17406\n17407\n17408\n17409\n17410\n17411\n17412\n17413\n17414\n17415\n17416\n17417\n17418\n17419\n17420\n17421\n17422\n17423\n17424\n17425\n17426\n17427\n17428\n17429\n17430\n17431\n17432\n17433\n17434\n17435\n17436\n17437\n17438\n17439\n17440\n17441\n17442\n17443\n17444\n17445\n17446\n17447\n17448\n17449\n17450\n17451\n17452\n17453\n17454\n17455\n17456\n17457\n17458\n17459\n17460\n17461\n17462\n17463\n17464\n17465\n17466\n17467\n17468\n17469\n17470\n17471\n17472\n17473\n17474\n17475\n17476\n17477\n17478\n17479\n17480\n17481\n17482\n17483\n17484\n17485\n17486\n17487\n17488\n17489\n17490\n17491\n17492\n17493\n17494\n17495\n17496\n17497\n17498\n17499\n17500\n17501\n17502\n17503\n17504\n17505\n17506\n17507\n17508\n17509\n17510\n17511\n17512\n17513\n17514\n17515\n17516\n17517\n17518\n17519\n17520\n17521\n17522\n17523\n17524\n17525\n17526\n17527\n17528\n17529\n17530\n17531\n17532\n17533\n17534\n17535\n17536\n17537\n17538\n17539\n17540\n17541\n17542\n17543\n17544\n17545\n17546\n17547\n17548\n17549\n17550\n17551\n17552\n17553\n17554\n17555\n17556\n17557\n17558\n17559\n17560\n17561\n17562\n17563\n17564\n17565\n17566\n17567\n17568\n17569\n17570\n17571\n17572\n17573\n17574\n17575\n17576\n17577\n17578\n17579\n17580\n17581\n17582\n17583\n17584\n17585\n17586\n17587\n17588\n17589\n17590\n17591\n17592\n17593\n17594\n17595\n17596\n17597\n17598\n17599\n17600\n17601\n17602\n17603\n17604\n17605\n17606\n17607\n17608\n17609\n17610\n17611\n17612\n17613\n17614\n17615\n17616\n17617\n17618\n17619\n17620\n17621\n17622\n17623\n17624\n17625\n17626\n17627\n17628\n17629\n17630\n17631\n17632\n17633\n17634\n17635\n17636\n17637\n17638\n17639\n17640\n17641\n17642\n17643\n17644\n17645\n17646\n17647\n17648\n17649\n17650\n17651\n17652\n17653\n17654\n17655\n17656\n17657\n17658\n17659\n17660\n17661\n17662\n17663\n17664\n17665\n17666\n17667\n17668\n17669\n17670\n17671\n17672\n17673\n17674\n17675\n17676\n17677\n17678\n17679\n17680\n17681\n17682\n17683\n17684\n17685\n17686\n17687\n17688\n17689\n17690\n17691\n17692\n17693\n17694\n17695\n17696\n17697\n17698\n17699\n17700\n17701\n17702\n17703\n17704\n17705\n17706\n17707\n17708\n17709\n17710\n17711\n17712\n17713\n17714\n17715\n17716\n17717\n17718\n17719\n17720\n17721\n17722\n17723\n17724\n17725\n17726\n17727\n17728\n17729\n17730\n17731\n17732\n17733\n17734\n17735\n17736\n17737\n17738\n17739\n17740\n17741\n17742\n17743\n17744\n17745\n17746\n17747\n17748\n17749\n17750\n17751\n17752\n17753\n17754\n17755\n17756\n17757\n17758\n17759\n17760\n17761\n17762\n17763\n17764\n17765\n17766\n17767\n17768\n17769\n17770\n17771\n17772\n17773\n17774\n17775\n17776\n17777\n17778\n17779\n17780\n17781\n17782\n17783\n17784\n17785\n17786\n17787\n17788\n17789\n17790\n17791\n17792\n17793\n17794\n17795\n17796\n17797\n17798\n17799\n17800\n17801\n17802\n17803\n17804\n17805\n17806\n17807\n17808\n17809\n17810\n17811\n17812\n17813\n17814\n17815\n17816\n17817\n17818\n17819\n17820\n17821\n17822\n17823\n17824\n17825\n17826\n17827\n17828\n17829\n17830\n17831\n17832\n17833\n17834\n17835\n17836\n17837\n17838\n17839\n17840\n17841\n17842\n17843\n17844\n17845\n17846\n17847\n17848\n17849\n17850\n17851\n17852\n17853\n17854\n17855\n17856\n17857\n17858\n17859\n17860\n17861\n17862\n17863\n17864\n17865\n17866\n17867\n17868\n17869\n17870\n17871\n17872\n17873\n17874\n17875\n17876\n17877\n17878\n17879\n17880\n17881\n17882\n17883\n17884\n17885\n17886\n17887\n17888\n17889\n17890\n17891\n17892\n17893\n17894\n17895\n17896\n17897\n17898\n17899\n17900\n17901\n17902\n17903\n17904\n17905\n17906\n17907\n17908\n17909\n17910\n17911\n17912\n17913\n17914\n17915\n17916\n17917\n17918\n17919\n17920\n17921\n17922\n17923\n17924\n17925\n17926\n17927\n17928\n17929\n17930\n17931\n17932\n17933\n17934\n17935\n17936\n17937\n17938\n17939\n17940\n17941\n17942\n17943\n17944\n17945\n17946\n17947\n17948\n17949\n17950\n17951\n17952\n17953\n17954\n17955\n17956\n17957\n17958\n17959\n17960\n17961\n17962\n17963\n17964\n17965\n17966\n17967\n17968\n17969\n17970\n17971\n17972\n17973\n17974\n17975\n17976\n17977\n17978\n17979\n17980\n17981\n17982\n17983\n17984\n17985\n17986\n17987\n17988\n17989\n17990\n17991\n17992\n17993\n17994\n17995\n17996\n17997\n17998\n17999\n18000\n18001\n18002\n18003\n18004\n18005\n18006\n18007\n18008\n18009\n18010\n18011\n18012\n18013\n18014\n18015\n18016\n18017\n18018\n18019\n18020\n18021\n18022\n18023\n18024\n18025\n18026\n18027\n18028\n18029\n18030\n18031\n18032\n18033\n18034\n18035\n18036\n18037\n18038\n18039\n18040\n18041\n18042\n18043\n18044\n18045\n18046\n18047\n18048\n18049\n18050\n18051\n18052\n18053\n18054\n18055\n18056\n18057\n18058\n18059\n18060\n18061\n18062\n18063\n18064\n18065\n18066\n18067\n18068\n18069\n18070\n18071\n18072\n18073\n18074\n18075\n18076\n18077\n18078\n18079\n18080\n18081\n18082\n18083\n18084\n18085\n18086\n18087\n18088\n18089\n18090\n18091\n18092\n18093\n18094\n18095\n18096\n18097\n18098\n18099\n18100\n18101\n18102\n18103\n18104\n18105\n18106\n18107\n18108\n18109\n18110\n18111\n18112\n18113\n18114\n18115\n18116\n18117\n18118\n18119\n18120\n18121\n18122\n18123\n18124\n18125\n18126\n18127\n18128\n18129\n18130\n18131\n18132\n18133\n18134\n18135\n18136\n18137\n18138\n18139\n18140\n18141\n18142\n18143\n18144\n18145\n18146\n18147\n18148\n18149\n18150\n18151\n18152\n18153\n18154\n18155\n18156\n18157\n18158\n18159\n18160\n18161\n18162\n18163\n18164\n18165\n18166\n18167\n18168\n18169\n18170\n18171\n18172\n18173\n18174\n18175\n18176\n18177\n18178\n18179\n18180\n18181\n18182\n18183\n18184\n18185\n18186\n18187\n18188\n18189\n18190\n18191\n18192\n18193\n18194\n18195\n18196\n18197\n18198\n18199\n18200\n18201\n18202\n18203\n18204\n18205\n18206\n18207\n18208\n18209\n18210\n18211\n18212\n18213\n18214\n18215\n18216\n18217\n18218\n18219\n18220\n18221\n18222\n18223\n18224\n18225\n18226\n18227\n18228\n18229\n18230\n18231\n18232\n18233\n18234\n18235\n18236\n18237\n18238\n18239\n18240\n18241\n18242\n18243\n18244\n18245\n18246\n18247\n18248\n18249\n18250\n18251\n18252\n18253\n18254\n18255\n18256\n18257\n18258\n18259\n18260\n18261\n18262\n18263\n18264\n18265\n18266\n18267\n18268\n18269\n18270\n18271\n18272\n18273\n18274\n18275\n18276\n18277\n18278\n18279\n18280\n18281\n18282\n18283\n18284\n18285\n18286\n18287\n18288\n18289\n18290\n18291\n18292\n18293\n18294\n18295\n18296\n18297\n18298\n18299\n18300\n18301\n18302\n18303\n18304\n18305\n18306\n18307\n18308\n18309\n18310\n18311\n18312\n18313\n18314\n18315\n18316\n18317\n18318\n18319\n18320\n18321\n18322\n18323\n18324\n18325\n18326\n18327\n18328\n18329\n18330\n18331\n18332\n18333\n18334\n18335\n18336\n18337\n18338\n18339\n18340\n18341\n18342\n18343\n18344\n18345\n18346\n18347\n18348\n18349\n18350\n18351\n18352\n18353\n18354\n18355\n18356\n18357\n18358\n18359\n18360\n18361\n18362\n18363\n18364\n18365\n18366\n18367\n18368\n18369\n18370\n18371\n18372\n18373\n18374\n18375\n18376\n18377\n18378\n18379\n18380\n18381\n18382\n18383\n18384\n18385\n18386\n18387\n18388\n18389\n18390\n18391\n18392\n18393\n18394\n18395\n18396\n18397\n18398\n18399\n18400\n18401\n18402\n18403\n18404\n18405\n18406\n18407\n18408\n18409\n18410\n18411\n18412\n18413\n18414\n18415\n18416\n18417\n18418\n18419\n18420\n18421\n18422\n18423\n18424\n18425\n18426\n18427\n18428\n18429\n18430\n18431\n18432\n18433\n18434\n18435\n18436\n18437\n18438\n18439\n18440\n18441\n18442\n18443\n18444\n18445\n18446\n18447\n18448\n18449\n18450\n18451\n18452\n18453\n18454\n18455\n18456\n18457\n18458\n18459\n18460\n18461\n18462\n18463\n18464\n18465\n18466\n18467\n18468\n18469\n18470\n18471\n18472\n18473\n18474\n18475\n18476\n18477\n18478\n18479\n18480\n18481\n18482\n18483\n18484\n18485\n18486\n18487\n18488\n18489\n18490\n18491\n18492\n18493\n18494\n18495\n18496\n18497\n18498\n18499\n18500\n18501\n18502\n18503\n18504\n18505\n18506\n18507\n18508\n18509\n18510\n18511\n18512\n18513\n18514\n18515\n18516\n18517\n18518\n18519\n18520\n18521\n18522\n18523\n18524\n18525\n18526\n18527\n18528\n18529\n18530\n18531\n18532\n18533\n18534\n18535\n18536\n18537\n18538\n18539\n18540\n18541\n18542\n18543\n18544\n18545\n18546\n18547\n18548\n18549\n18550\n18551\n18552\n18553\n18554\n18555\n18556\n18557\n18558\n18559\n18560\n18561\n18562\n18563\n18564\n18565\n18566\n18567\n18568\n18569\n18570\n18571\n18572\n18573\n18574\n18575\n18576\n18577\n18578\n18579\n18580\n18581\n18582\n18583\n18584\n18585\n18586\n18587\n18588\n18589\n18590\n18591\n18592\n18593\n18594\n18595\n18596\n18597\n18598\n18599\n18600\n18601\n18602\n18603\n18604\n18605\n18606\n18607\n18608\n18609\n18610\n18611\n18612\n18613\n18614\n18615\n18616\n18617\n18618\n18619\n18620\n18621\n18622\n18623\n18624\n18625\n18626\n18627\n18628\n18629\n18630\n18631\n18632\n18633\n18634\n18635\n18636\n18637\n18638\n18639\n18640\n18641\n18642\n18643\n18644\n18645\n18646\n18647\n18648\n18649\n18650\n18651\n18652\n18653\n18654\n18655\n18656\n18657\n18658\n18659\n18660\n18661\n18662\n18663\n18664\n18665\n18666\n18667\n18668\n18669\n18670\n18671\n18672\n18673\n18674\n18675\n18676\n18677\n18678\n18679\n18680\n18681\n18682\n18683\n18684\n18685\n18686\n18687\n18688\n18689\n18690\n18691\n18692\n18693\n18694\n18695\n18696\n18697\n18698\n18699\n18700\n18701\n18702\n18703\n18704\n18705\n18706\n18707\n18708\n18709\n18710\n18711\n18712\n18713\n18714\n18715\n18716\n18717\n18718\n18719\n18720\n18721\n18722\n18723\n18724\n18725\n18726\n18727\n18728\n18729\n18730\n18731\n18732\n18733\n18734\n18735\n18736\n18737\n18738\n18739\n18740\n18741\n18742\n18743\n18744\n18745\n18746\n18747\n18748\n18749\n18750\n18751\n18752\n18753\n18754\n18755\n18756\n18757\n18758\n18759\n18760\n18761\n18762\n18763\n18764\n18765\n18766\n18767\n18768\n18769\n18770\n18771\n18772\n18773\n18774\n18775\n18776\n18777\n18778\n18779\n18780\n18781\n18782\n18783\n18784\n18785\n18786\n18787\n18788\n18789\n18790\n18791\n18792\n18793\n18794\n18795\n18796\n18797\n18798\n18799\n18800\n18801\n18802\n18803\n18804\n18805\n18806\n18807\n18808\n18809\n18810\n18811\n18812\n18813\n18814\n18815\n18816\n18817\n18818\n18819\n18820\n18821\n18822\n18823\n18824\n18825\n18826\n18827\n18828\n18829\n18830\n18831\n18832\n18833\n18834\n18835\n18836\n18837\n18838\n18839\n18840\n18841\n18842\n18843\n18844\n18845\n18846\n18847\n18848\n18849\n18850\n18851\n18852\n18853\n18854\n18855\n18856\n18857\n18858\n18859\n18860\n18861\n18862\n18863\n18864\n18865\n18866\n18867\n18868\n18869\n18870\n18871\n18872\n18873\n18874\n18875\n18876\n18877\n18878\n18879\n18880\n18881\n18882\n18883\n18884\n18885\n18886\n18887\n18888\n18889\n18890\n18891\n18892\n18893\n18894\n18895\n18896\n18897\n18898\n18899\n18900\n18901\n18902\n18903\n18904\n18905\n18906\n18907\n18908\n18909\n18910\n18911\n18912\n18913\n18914\n18915\n18916\n18917\n18918\n18919\n18920\n18921\n18922\n18923\n18924\n18925\n18926\n18927\n18928\n18929\n18930\n18931\n18932\n18933\n18934\n18935\n18936\n18937\n18938\n18939\n18940\n18941\n18942\n18943\n18944\n18945\n18946\n18947\n18948\n18949\n18950\n18951\n18952\n18953\n18954\n18955\n18956\n18957\n18958\n18959\n18960\n18961\n18962\n18963\n18964\n18965\n18966\n18967\n18968\n18969\n18970\n18971\n18972\n18973\n18974\n18975\n18976\n18977\n18978\n18979\n18980\n18981\n18982\n18983\n18984\n18985\n18986\n18987\n18988\n18989\n18990\n18991\n18992\n18993\n18994\n18995\n18996\n18997\n18998\n18999\n19000\n19001\n19002\n19003\n19004\n19005\n19006\n19007\n19008\n19009\n19010\n19011\n19012\n19013\n19014\n19015\n19016\n19017\n19018\n19019\n19020\n19021\n19022\n19023\n19024\n19025\n19026\n19027\n19028\n19029\n19030\n19031\n19032\n19033\n19034\n19035\n19036\n19037\n19038\n19039\n19040\n19041\n19042\n19043\n19044\n19045\n19046\n19047\n19048\n19049\n19050\n19051\n19052\n19053\n19054\n19055\n19056\n19057\n19058\n19059\n19060\n19061\n19062\n19063\n19064\n19065\n19066\n19067\n19068\n19069\n19070\n19071\n19072\n19073\n19074\n19075\n19076\n19077\n19078\n19079\n19080\n19081\n19082\n19083\n19084\n19085\n19086\n19087\n19088\n19089\n19090\n19091\n19092\n19093\n19094\n19095\n19096\n19097\n19098\n19099\n19100\n19101\n19102\n19103\n19104\n19105\n19106\n19107\n19108\n19109\n19110\n19111\n19112\n19113\n19114\n19115\n19116\n19117\n19118\n19119\n19120\n19121\n19122\n19123\n19124\n19125\n19126\n19127\n19128\n19129\n19130\n19131\n19132\n19133\n19134\n19135\n19136\n19137\n19138\n19139\n19140\n19141\n19142\n19143\n19144\n19145\n19146\n19147\n19148\n19149\n19150\n19151\n19152\n19153\n19154\n19155\n19156\n19157\n19158\n19159\n19160\n19161\n19162\n19163\n19164\n19165\n19166\n19167\n19168\n19169\n19170\n19171\n19172\n19173\n19174\n19175\n19176\n19177\n19178\n19179\n19180\n19181\n19182\n19183\n19184\n19185\n19186\n19187\n19188\n19189\n19190\n19191\n19192\n19193\n19194\n19195\n19196\n19197\n19198\n19199\n19200\n19201\n19202\n19203\n19204\n19205\n19206\n19207\n19208\n19209\n19210\n19211\n19212\n19213\n19214\n19215\n19216\n19217\n19218\n19219\n19220\n19221\n19222\n19223\n19224\n19225\n19226\n19227\n19228\n19229\n19230\n19231\n19232\n19233\n19234\n19235\n19236\n19237\n19238\n19239\n19240\n19241\n19242\n19243\n19244\n19245\n19246\n19247\n19248\n19249\n19250\n19251\n19252\n19253\n19254\n19255\n19256\n19257\n19258\n19259\n19260\n19261\n19262\n19263\n19264\n19265\n19266\n19267\n19268\n19269\n19270\n19271\n19272\n19273\n19274\n19275\n19276\n19277\n19278\n19279\n19280\n19281\n19282\n19283\n19284\n19285\n19286\n19287\n19288\n19289\n19290\n19291\n19292\n19293\n19294\n19295\n19296\n19297\n19298\n19299\n19300\n19301\n19302\n19303\n19304\n19305\n19306\n19307\n19308\n19309\n19310\n19311\n19312\n19313\n19314\n19315\n19316\n19317\n19318\n19319\n19320\n19321\n19322\n19323\n19324\n19325\n19326\n19327\n19328\n19329\n19330\n19331\n19332\n19333\n19334\n19335\n19336\n19337\n19338\n19339\n19340\n19341\n19342\n19343\n19344\n19345\n19346\n19347\n19348\n19349\n19350\n19351\n19352\n19353\n19354\n19355\n19356\n19357\n19358\n19359\n19360\n19361\n19362\n19363\n19364\n19365\n19366\n19367\n19368\n19369\n19370\n19371\n19372\n19373\n19374\n19375\n19376\n19377\n19378\n19379\n19380\n19381\n19382\n19383\n19384\n19385\n19386\n19387\n19388\n19389\n19390\n19391\n19392\n19393\n19394\n19395\n19396\n19397\n19398\n19399\n19400\n19401\n19402\n19403\n19404\n19405\n19406\n19407\n19408\n19409\n19410\n19411\n19412\n19413\n19414\n19415\n19416\n19417\n19418\n19419\n19420\n19421\n19422\n19423\n19424\n19425\n19426\n19427\n19428\n19429\n19430\n19431\n19432\n19433\n19434\n19435\n19436\n19437\n19438\n19439\n19440\n19441\n19442\n19443\n19444\n19445\n19446\n19447\n19448\n19449\n19450\n19451\n19452\n19453\n19454\n19455\n19456\n19457\n19458\n19459\n19460\n19461\n19462\n19463\n19464\n19465\n19466\n19467\n19468\n19469\n19470\n19471\n19472\n19473\n19474\n19475\n19476\n19477\n19478\n19479\n19480\n19481\n19482\n19483\n19484\n19485\n19486\n19487\n19488\n19489\n19490\n19491\n19492\n19493\n19494\n19495\n19496\n19497\n19498\n19499\n19500\n19501\n19502\n19503\n19504\n19505\n19506\n19507\n19508\n19509\n19510\n19511\n19512\n19513\n19514\n19515\n19516\n19517\n19518\n19519\n19520\n19521\n19522\n19523\n19524\n19525\n19526\n19527\n19528\n19529\n19530\n19531\n19532\n19533\n19534\n19535\n19536\n19537\n19538\n19539\n19540\n19541\n19542\n19543\n19544\n19545\n19546\n19547\n19548\n19549\n19550\n19551\n19552\n19553\n19554\n19555\n19556\n19557\n19558\n19559\n19560\n19561\n19562\n19563\n19564\n19565\n19566\n19567\n19568\n19569\n19570\n19571\n19572\n19573\n19574\n19575\n19576\n19577\n19578\n19579\n19580\n19581\n19582\n19583\n19584\n19585\n19586\n19587\n19588\n19589\n19590\n19591\n19592\n19593\n19594\n19595\n19596\n19597\n19598\n19599\n19600\n19601\n19602\n19603\n19604\n19605\n19606\n19607\n19608\n19609\n19610\n19611\n19612\n19613\n19614\n19615\n19616\n19617\n19618\n19619\n19620\n19621\n19622\n19623\n19624\n19625\n19626\n19627\n19628\n19629\n19630\n19631\n19632\n19633\n19634\n19635\n19636\n19637\n19638\n19639\n19640\n19641\n19642\n19643\n19644\n19645\n19646\n19647\n19648\n19649\n19650\n19651\n19652\n19653\n19654\n19655\n19656\n19657\n19658\n19659\n19660\n19661\n19662\n19663\n19664\n19665\n19666\n19667\n19668\n19669\n19670\n19671\n19672\n19673\n19674\n19675\n19676\n19677\n19678\n19679\n19680\n19681\n19682\n19683\n19684\n19685\n19686\n19687\n19688\n19689\n19690\n19691\n19692\n19693\n19694\n19695\n19696\n19697\n19698\n19699\n19700\n19701\n19702\n19703\n19704\n19705\n19706\n19707\n19708\n19709\n19710\n19711\n19712\n19713\n19714\n19715\n19716\n19717\n19718\n19719\n19720\n19721\n19722\n19723\n19724\n19725\n19726\n19727\n19728\n19729\n19730\n19731\n19732\n19733\n19734\n19735\n19736\n19737\n19738\n19739\n19740\n19741\n19742\n19743\n19744\n19745\n19746\n19747\n19748\n19749\n19750\n19751\n19752\n19753\n19754\n19755\n19756\n19757\n19758\n19759\n19760\n19761\n19762\n19763\n19764\n19765\n19766\n19767\n19768\n19769\n19770\n19771\n19772\n19773\n19774\n19775\n19776\n19777\n19778\n19779\n19780\n19781\n19782\n19783\n19784\n19785\n19786\n19787\n19788\n19789\n19790\n19791\n19792\n19793\n19794\n19795\n19796\n19797\n19798\n19799\n19800\n19801\n19802\n19803\n19804\n19805\n19806\n19807\n19808\n19809\n19810\n19811\n19812\n19813\n19814\n19815\n19816\n19817\n19818\n19819\n19820\n19821\n19822\n19823\n19824\n19825\n19826\n19827\n19828\n19829\n19830\n19831\n19832\n19833\n19834\n19835\n19836\n19837\n19838\n19839\n19840\n19841\n19842\n19843\n19844\n19845\n19846\n19847\n19848\n19849\n19850\n19851\n19852\n19853\n19854\n19855\n19856\n19857\n19858\n19859\n19860\n19861\n19862\n19863\n19864\n19865\n19866\n19867\n19868\n19869\n19870\n19871\n19872\n19873\n19874\n19875\n19876\n19877\n19878\n19879\n19880\n19881\n19882\n19883\n19884\n19885\n19886\n19887\n19888\n19889\n19890\n19891\n19892\n19893\n19894\n19895\n19896\n19897\n19898\n19899\n19900\n19901\n19902\n19903\n19904\n19905\n19906\n19907\n19908\n19909\n19910\n19911\n19912\n19913\n19914\n19915\n19916\n19917\n19918\n19919\n19920\n19921\n19922\n19923\n19924\n19925\n19926\n19927\n19928\n19929\n19930\n19931\n19932\n19933\n19934\n19935\n19936\n19937\n19938\n19939\n19940\n19941\n19942\n19943\n19944\n19945\n19946\n19947\n19948\n19949\n19950\n19951\n19952\n19953\n19954\n19955\n19956\n19957\n19958\n19959\n19960\n19961\n19962\n19963\n19964\n19965\n19966\n19967\n19968\n19969\n19970\n19971\n19972\n19973\n19974\n19975\n19976\n19977\n19978\n19979\n19980\n19981\n19982\n19983\n19984\n19985\n19986\n19987\n19988\n19989\n19990\n19991\n19992\n19993\n19994\n19995\n19996\n19997\n19998\n19999\n20000\n20001\n20002\n20003\n20004\n20005\n20006\n20007\n20008\n20009\n20010\n20011\n20012\n20013\n20014\n20015\n20016\n20017\n20018\n20019\n20020\n20021\n20022\n20023\n20024\n20025\n20026\n20027\n20028\n20029\n20030\n20031\n20032\n20033\n20034\n20035\n20036\n20037\n20038\n20039\n20040\n20041\n20042\n20043\n20044\n20045\n20046\n20047\n20048\n20049\n20050\n20051\n20052\n20053\n20054\n20055\n20056\n20057\n20058\n20059\n20060\n20061\n20062\n20063\n20064\n20065\n20066\n20067\n20068\n20069\n20070\n20071\n20072\n20073\n20074\n20075\n20076\n20077\n20078\n20079\n20080\n20081\n20082\n20083\n20084\n20085\n20086\n20087\n20088\n20089\n20090\n20091\n20092\n20093\n20094\n20095\n20096\n20097\n20098\n20099\n20100\n20101\n20102\n20103\n20104\n20105\n20106\n20107\n20108\n20109\n20110\n20111\n20112\n20113\n20114\n20115\n20116\n20117\n20118\n20119\n20120\n20121\n20122\n20123\n20124\n20125\n20126\n20127\n20128\n20129\n20130\n20131\n20132\n20133\n20134\n20135\n20136\n20137\n20138\n20139\n20140\n20141\n20142\n20143\n20144\n20145\n20146\n20147\n20148\n20149\n20150\n20151\n20152\n20153\n20154\n20155\n20156\n20157\n20158\n20159\n20160\n20161\n20162\n20163\n20164\n20165\n20166\n20167\n20168\n20169\n20170\n20171\n20172\n20173\n20174\n20175\n20176\n20177\n20178\n20179\n20180\n20181\n20182\n20183\n20184\n20185\n20186\n20187\n20188\n20189\n20190\n20191\n20192\n20193\n20194\n20195\n20196\n20197\n20198\n20199\n20200\n20201\n20202\n20203\n20204\n20205\n20206\n20207\n20208\n20209\n20210\n20211\n20212\n20213\n20214\n20215\n20216\n20217\n20218\n20219\n20220\n20221\n20222\n20223\n20224\n20225\n20226\n20227\n20228\n20229\n20230\n20231\n20232\n20233\n20234\n20235\n20236\n20237\n20238\n20239\n20240\n20241\n20242\n20243\n20244\n20245\n20246\n20247\n20248\n20249\n20250\n20251\n20252\n20253\n20254\n20255\n20256\n20257\n20258\n20259\n20260\n20261\n20262\n20263\n20264\n20265\n20266\n20267\n20268\n20269\n20270\n20271\n20272\n20273\n20274\n20275\n20276\n20277\n20278\n20279\n20280\n20281\n20282\n20283\n20284\n20285\n20286\n20287\n20288\n20289\n20290\n20291\n20292\n20293\n20294\n20295\n20296\n20297\n20298\n20299\n20300\n20301\n20302\n20303\n20304\n20305\n20306\n20307\n20308\n20309\n20310\n20311\n20312\n20313\n20314\n20315\n20316\n20317\n20318\n20319\n20320\n20321\n20322\n20323\n20324\n20325\n20326\n20327\n20328\n20329\n20330\n20331\n20332\n20333\n20334\n20335\n20336\n20337\n20338\n20339\n20340\n20341\n20342\n20343\n20344\n20345\n20346\n20347\n20348\n20349\n20350\n20351\n20352\n20353\n20354\n20355\n20356\n20357\n20358\n20359\n20360\n20361\n20362\n20363\n20364\n20365\n20366\n20367\n20368\n20369\n20370\n20371\n20372\n20373\n20374\n20375\n20376\n20377\n20378\n20379\n20380\n20381\n20382\n20383\n20384\n20385\n20386\n20387\n20388\n20389\n20390\n20391\n20392\n20393\n20394\n20395\n20396\n20397\n20398\n20399\n20400\n20401\n20402\n20403\n20404\n20405\n20406\n20407\n20408\n20409\n20410\n20411\n20412\n20413\n20414\n20415\n20416\n20417\n20418\n20419\n20420\n20421\n20422\n20423\n20424\n20425\n20426\n20427\n20428\n20429\n20430\n20431\n20432\n20433\n20434\n20435\n20436\n20437\n20438\n20439\n20440\n20441\n20442\n20443\n20444\n20445\n20446\n20447\n20448\n20449\n20450\n20451\n20452\n20453\n20454\n20455\n20456\n20457\n20458\n20459\n20460\n20461\n20462\n20463\n20464\n20465\n20466\n20467\n20468\n20469\n20470\n20471\n20472\n20473\n20474\n20475\n20476\n20477\n20478\n20479\n20480\n20481\n20482\n20483\n20484\n20485\n20486\n20487\n20488\n20489\n20490\n20491\n20492\n20493\n20494\n20495\n20496\n20497\n20498\n20499\n20500\n20501\n20502\n20503\n20504\n20505\n20506\n20507\n20508\n20509\n20510\n20511\n20512\n20513\n20514\n20515\n20516\n20517\n20518\n20519\n20520\n20521\n20522\n20523\n20524\n20525\n20526\n20527\n20528\n20529\n20530\n20531\n20532\n20533\n20534\n20535\n20536\n20537\n20538\n20539\n20540\n20541\n20542\n20543\n20544\n20545\n20546\n20547\n20548\n20549\n20550\n20551\n20552\n20553\n20554\n20555\n20556\n20557\n20558\n20559\n20560\n20561\n20562\n20563\n20564\n20565\n20566\n20567\n20568\n20569\n20570\n20571\n20572\n20573\n20574\n20575\n20576\n20577\n20578\n20579\n20580\n20581\n20582\n20583\n20584\n20585\n20586\n20587\n20588\n20589\n20590\n20591\n20592\n20593\n20594\n20595\n20596\n20597\n20598\n20599\n20600\n20601\n20602\n20603\n20604\n20605\n20606\n20607\n20608\n20609\n20610\n20611\n20612\n20613\n20614\n20615\n20616\n20617\n20618\n20619\n20620\n20621\n20622\n20623\n20624\n20625\n20626\n20627\n20628\n20629\n20630\n20631\n20632\n20633\n20634\n20635\n20636\n20637\n20638\n20639\n20640\n20641\n20642\n20643\n20644\n20645\n20646\n20647\n20648\n20649\n20650\n20651\n20652\n20653\n20654\n20655\n20656\n20657\n20658\n20659\n20660\n20661\n20662\n20663\n20664\n20665\n20666\n20667\n20668\n20669\n20670\n20671\n20672\n20673\n20674\n20675\n20676\n20677\n20678\n20679\n20680\n20681\n20682\n20683\n20684\n20685\n20686\n20687\n20688\n20689\n20690\n20691\n20692\n20693\n20694\n20695\n20696\n20697\n20698\n20699\n20700\n20701\n20702\n20703\n20704\n20705\n20706\n20707\n20708\n20709\n20710\n20711\n20712\n20713\n20714\n20715\n20716\n20717\n20718\n20719\n20720\n20721\n20722\n20723\n20724\n20725\n20726\n20727\n20728\n20729\n20730\n20731\n20732\n20733\n20734\n20735\n20736\n20737\n20738\n20739\n20740\n20741\n20742\n20743\n20744\n20745\n20746\n20747\n20748\n20749\n20750\n20751\n20752\n20753\n20754\n20755\n20756\n20757\n20758\n20759\n20760\n20761\n20762\n20763\n20764\n20765\n20766\n20767\n20768\n20769\n20770\n20771\n20772\n20773\n20774\n20775\n20776\n20777\n20778\n20779\n20780\n20781\n20782\n20783\n20784\n20785\n20786\n20787\n20788\n20789\n20790\n20791\n20792\n20793\n20794\n20795\n20796\n20797\n20798\n20799\n20800\n20801\n20802\n20803\n20804\n20805\n20806\n20807\n20808\n20809\n20810\n20811\n20812\n20813\n20814\n20815\n20816\n20817\n20818\n20819\n20820\n20821\n20822\n20823\n20824\n20825\n20826\n20827\n20828\n20829\n20830\n20831\n20832\n20833\n20834\n20835\n20836\n20837\n20838\n20839\n20840\n20841\n20842\n20843\n20844\n20845\n20846\n20847\n20848\n20849\n20850\n20851\n20852\n20853\n20854\n20855\n20856\n20857\n20858\n20859\n20860\n20861\n20862\n20863\n20864\n20865\n20866\n20867\n20868\n20869\n20870\n20871\n20872\n20873\n20874\n20875\n20876\n20877\n20878\n20879\n20880\n20881\n20882\n20883\n20884\n20885\n20886\n20887\n20888\n20889\n20890\n20891\n20892\n20893\n20894\n20895\n20896\n20897\n20898\n20899\n20900\n20901\n20902\n20903\n20904\n20905\n20906\n20907\n20908\n20909\n20910\n20911\n20912\n20913\n20914\n20915\n20916\n20917\n20918\n20919\n20920\n20921\n20922\n20923\n20924\n20925\n20926\n20927\n20928\n20929\n20930\n20931\n20932\n20933\n20934\n20935\n20936\n20937\n20938\n20939\n20940\n20941\n20942\n20943\n20944\n20945\n20946\n20947\n20948\n20949\n20950\n20951\n20952\n20953\n20954\n20955\n20956\n20957\n20958\n20959\n20960\n20961\n20962\n20963\n20964\n20965\n20966\n20967\n20968\n20969\n20970\n20971\n20972\n20973\n20974\n20975\n20976\n20977\n20978\n20979\n20980\n20981\n20982\n20983\n20984\n20985\n20986\n20987\n20988\n20989\n20990\n20991\n20992\n20993\n20994\n20995\n20996\n20997\n20998\n20999\n21000\n21001\n21002\n21003\n21004\n21005\n21006\n21007\n21008\n21009\n21010\n21011\n21012\n21013\n21014\n21015\n21016\n21017\n21018\n21019\n21020\n21021\n21022\n21023\n21024\n21025\n21026\n21027\n21028\n21029\n21030\n21031\n21032\n21033\n21034\n21035\n21036\n21037\n21038\n21039\n21040\n21041\n21042\n21043\n21044\n21045\n21046\n21047\n21048\n21049\n21050\n21051\n21052\n21053\n21054\n21055\n21056\n21057\n21058\n21059\n21060\n21061\n21062\n21063\n21064\n21065\n21066\n21067\n21068\n21069\n21070\n21071\n21072\n21073\n21074\n21075\n21076\n21077\n21078\n21079\n21080\n21081\n21082\n21083\n21084\n21085\n21086\n21087\n21088\n21089\n21090\n21091\n21092\n21093\n21094\n21095\n21096\n21097\n21098\n21099\n21100\n21101\n21102\n21103\n21104\n21105\n21106\n21107\n21108\n21109\n21110\n21111\n21112\n21113\n21114\n21115\n21116\n21117\n21118\n21119\n21120\n21121\n21122\n21123\n21124\n21125\n21126\n21127\n21128\n21129\n21130\n21131\n21132\n21133\n21134\n21135\n21136\n21137\n21138\n21139\n21140\n21141\n21142\n21143\n21144\n21145\n21146\n21147\n21148\n21149\n21150\n21151\n21152\n21153\n21154\n21155\n21156\n21157\n21158\n21159\n21160\n21161\n21162\n21163\n21164\n21165\n21166\n21167\n21168\n21169\n21170\n21171\n21172\n21173\n21174\n21175\n21176\n21177\n21178\n21179\n21180\n21181\n21182\n21183\n21184\n21185\n21186\n21187\n21188\n21189\n21190\n21191\n21192\n21193\n21194\n21195\n21196\n21197\n21198\n21199\n21200\n21201\n21202\n21203\n21204\n21205\n21206\n21207\n21208\n21209\n21210\n21211\n21212\n21213\n21214\n21215\n21216\n21217\n21218\n21219\n21220\n21221\n21222\n21223\n21224\n21225\n21226\n21227\n21228\n21229\n21230\n21231\n21232\n21233\n21234\n21235\n21236\n21237\n21238\n21239\n21240\n21241\n21242\n21243\n21244\n21245\n21246\n21247\n21248\n21249\n21250\n21251\n21252\n21253\n21254\n21255\n21256\n21257\n21258\n21259\n21260\n21261\n21262\n21263\n21264\n21265\n21266\n21267\n21268\n21269\n21270\n21271\n21272\n21273\n21274\n21275\n21276\n21277\n21278\n21279\n21280\n21281\n21282\n21283\n21284\n21285\n21286\n21287\n21288\n21289\n21290\n21291\n21292\n21293\n21294\n21295\n21296\n21297\n21298\n21299\n21300\n21301\n21302\n21303\n21304\n21305\n21306\n21307\n21308\n21309\n21310\n21311\n21312\n21313\n21314\n21315\n21316\n21317\n21318\n21319\n21320\n21321\n21322\n21323\n21324\n21325\n21326\n21327\n21328\n21329\n21330\n21331\n21332\n21333\n21334\n21335\n21336\n21337\n21338\n21339\n21340\n21341\n21342\n21343\n21344\n21345\n21346\n21347\n21348\n21349\n21350\n21351\n21352\n21353\n21354\n21355\n21356\n21357\n21358\n21359\n21360\n21361\n21362\n21363\n21364\n21365\n21366\n21367\n21368\n21369\n21370\n21371\n21372\n21373\n21374\n21375\n21376\n21377\n21378\n21379\n21380\n21381\n21382\n21383\n21384\n21385\n21386\n21387\n21388\n21389\n21390\n21391\n21392\n21393\n21394\n21395\n21396\n21397\n21398\n21399\n21400\n21401\n21402\n21403\n21404\n21405\n21406\n21407\n21408\n21409\n21410\n21411\n21412\n21413\n21414\n21415\n21416\n21417\n21418\n21419\n21420\n21421\n21422\n21423\n21424\n21425\n21426\n21427\n21428\n21429\n21430\n21431\n21432\n21433\n21434\n21435\n21436\n21437\n21438\n21439\n21440\n21441\n21442\n21443\n21444\n21445\n21446\n21447\n21448\n21449\n21450\n21451\n21452\n21453\n21454\n21455\n21456\n21457\n21458\n21459\n21460\n21461\n21462\n21463\n21464\n21465\n21466\n21467\n21468\n21469\n21470\n21471\n21472\n21473\n21474\n21475\n21476\n21477\n21478\n21479\n21480\n21481\n21482\n21483\n21484\n21485\n21486\n21487\n21488\n21489\n21490\n21491\n21492\n21493\n21494\n21495\n21496\n21497\n21498\n21499\n21500\n21501\n21502\n21503\n21504\n21505\n21506\n21507\n21508\n21509\n21510\n21511\n21512\n21513\n21514\n21515\n21516\n21517\n21518\n21519\n21520\n21521\n21522\n21523\n21524\n21525\n21526\n21527\n21528\n21529\n21530\n21531\n21532\n21533\n21534\n21535\n21536\n21537\n21538\n21539\n21540\n21541\n21542\n21543\n21544\n21545\n21546\n21547\n21548\n21549\n21550\n21551\n21552\n21553\n21554\n21555\n21556\n21557\n21558\n21559\n21560\n21561\n21562\n21563\n21564\n21565\n21566\n21567\n21568\n21569\n21570\n21571\n21572\n21573\n21574\n21575\n21576\n21577\n21578\n21579\n21580\n21581\n21582\n21583\n21584\n21585\n21586\n21587\n21588\n21589\n21590\n21591\n21592\n21593\n21594\n21595\n21596\n21597\n21598\n21599\n21600\n21601\n21602\n21603\n21604\n21605\n21606\n21607\n21608\n21609\n21610\n21611\n21612\n21613\n21614\n21615\n21616\n21617\n21618\n21619\n21620\n21621\n21622\n21623\n21624\n21625\n21626\n21627\n21628\n21629\n21630\n21631\n21632\n21633\n21634\n21635\n21636\n21637\n21638\n21639\n21640\n21641\n21642\n21643\n21644\n21645\n21646\n21647\n21648\n21649\n21650\n21651\n21652\n21653\n21654\n21655\n21656\n21657\n21658\n21659\n21660\n21661\n21662\n21663\n21664\n21665\n21666\n21667\n21668\n21669\n21670\n21671\n21672\n21673\n21674\n21675\n21676\n21677\n21678\n21679\n21680\n21681\n21682\n21683\n21684\n21685\n21686\n21687\n21688\n21689\n21690\n21691\n21692\n21693\n21694\n21695\n21696\n21697\n21698\n21699\n21700\n21701\n21702\n21703\n21704\n21705\n21706\n21707\n21708\n21709\n21710\n21711\n21712\n21713\n21714\n21715\n21716\n21717\n21718\n21719\n21720\n21721\n21722\n21723\n21724\n21725\n21726\n21727\n21728\n21729\n21730\n21731\n21732\n21733\n21734\n21735\n21736\n21737\n21738\n21739\n21740\n21741\n21742\n21743\n21744\n21745\n21746\n21747\n21748\n21749\n21750\n21751\n21752\n21753\n21754\n21755\n21756\n21757\n21758\n21759\n21760\n21761\n21762\n21763\n21764\n21765\n21766\n21767\n21768\n21769\n21770\n21771\n21772\n21773\n21774\n21775\n21776\n21777\n21778\n21779\n21780\n21781\n21782\n21783\n21784\n21785\n21786\n21787\n21788\n21789\n21790\n21791\n21792\n21793\n21794\n21795\n21796\n21797\n21798\n21799\n21800\n21801\n21802\n21803\n21804\n21805\n21806\n21807\n21808\n21809\n21810\n21811\n21812\n21813\n21814\n21815\n21816\n21817\n21818\n21819\n21820\n21821\n21822\n21823\n21824\n21825\n21826\n21827\n21828\n21829\n21830\n21831\n21832\n21833\n21834\n21835\n21836\n21837\n21838\n21839\n21840\n21841\n21842\n21843\n21844\n21845\n21846\n21847\n21848\n21849\n21850\n21851\n21852\n21853\n21854\n21855\n21856\n21857\n21858\n21859\n21860\n21861\n21862\n21863\n21864\n21865\n21866\n21867\n21868\n21869\n21870\n21871\n21872\n21873\n21874\n21875\n21876\n21877\n21878\n21879\n21880\n21881\n21882\n21883\n21884\n21885\n21886\n21887\n21888\n21889\n21890\n21891\n21892\n21893\n21894\n21895\n21896\n21897\n21898\n21899\n21900\n21901\n21902\n21903\n21904\n21905\n21906\n21907\n21908\n21909\n21910\n21911\n21912\n21913\n21914\n21915\n21916\n21917\n21918\n21919\n21920\n21921\n21922\n21923\n21924\n21925\n21926\n21927\n21928\n21929\n21930\n21931\n21932\n21933\n21934\n21935\n21936\n21937\n21938\n21939\n21940\n21941\n21942\n21943\n21944\n21945\n21946\n21947\n21948\n21949\n21950\n21951\n21952\n21953\n21954\n21955\n21956\n21957\n21958\n21959\n21960\n21961\n21962\n21963\n21964\n21965\n21966\n21967\n21968\n21969\n21970\n21971\n21972\n21973\n21974\n21975\n21976\n21977\n21978\n21979\n21980\n21981\n21982\n21983\n21984\n21985\n21986\n21987\n21988\n21989\n21990\n21991\n21992\n21993\n21994\n21995\n21996\n21997\n21998\n21999\n22000\n22001\n22002\n22003\n22004\n22005\n22006\n22007\n22008\n22009\n22010\n22011\n22012\n22013\n22014\n22015\n22016\n22017\n22018\n22019\n22020\n22021\n22022\n22023\n22024\n22025\n22026\n22027\n22028\n22029\n22030\n22031\n22032\n22033\n22034\n22035\n22036\n22037\n22038\n22039\n22040\n22041\n22042\n22043\n22044\n22045\n22046\n22047\n22048\n22049\n22050\n22051\n22052\n22053\n22054\n22055\n22056\n22057\n22058\n22059\n22060\n22061\n22062\n22063\n22064\n22065\n22066\n22067\n22068\n22069\n22070\n22071\n22072\n22073\n22074\n22075\n22076\n22077\n22078\n22079\n22080\n22081\n22082\n22083\n22084\n22085\n22086\n22087\n22088\n22089\n22090\n22091\n22092\n22093\n22094\n22095\n22096\n22097\n22098\n22099\n22100\n22101\n22102\n22103\n22104\n22105\n22106\n22107\n22108\n22109\n22110\n22111\n22112\n22113\n22114\n22115\n22116\n22117\n22118\n22119\n22120\n22121\n22122\n22123\n22124\n22125\n22126\n22127\n22128\n22129\n22130\n22131\n22132\n22133\n22134\n22135\n22136\n22137\n22138\n22139\n22140\n22141\n22142\n22143\n22144\n22145\n22146\n22147\n22148\n22149\n22150\n22151\n22152\n22153\n22154\n22155\n22156\n22157\n22158\n22159\n22160\n22161\n22162\n22163\n22164\n22165\n22166\n22167\n22168\n22169\n22170\n22171\n22172\n22173\n22174\n22175\n22176\n22177\n22178\n22179\n22180\n22181\n22182\n22183\n22184\n22185\n22186\n22187\n22188\n22189\n22190\n22191\n22192\n22193\n22194\n22195\n22196\n22197\n22198\n22199\n22200\n22201\n22202\n22203\n22204\n22205\n22206\n22207\n22208\n22209\n22210\n22211\n22212\n22213\n22214\n22215\n22216\n22217\n22218\n22219\n22220\n22221\n22222\n22223\n22224\n22225\n22226\n22227\n22228\n22229\n22230\n22231\n22232\n22233\n22234\n22235\n22236\n22237\n22238\n22239\n22240\n22241\n22242\n22243\n22244\n22245\n22246\n22247\n22248\n22249\n22250\n22251\n22252\n22253\n22254\n22255\n22256\n22257\n22258\n22259\n22260\n22261\n22262\n22263\n22264\n22265\n22266\n22267\n22268\n22269\n22270\n22271\n22272\n22273\n22274\n22275\n22276\n22277\n22278\n22279\n22280\n22281\n22282\n22283\n22284\n22285\n22286\n22287\n22288\n22289\n22290\n22291\n22292\n22293\n22294\n22295\n22296\n22297\n22298\n22299\n22300\n22301\n22302\n22303\n22304\n22305\n22306\n22307\n22308\n22309\n22310\n22311\n22312\n22313\n22314\n22315\n22316\n22317\n22318\n22319\n22320\n22321\n22322\n22323\n22324\n22325\n22326\n22327\n22328\n22329\n22330\n22331\n22332\n22333\n22334\n22335\n22336\n22337\n22338\n22339\n22340\n22341\n22342\n22343\n22344\n22345\n22346\n22347\n22348\n22349\n22350\n22351\n22352\n22353\n22354\n22355\n22356\n22357\n22358\n22359\n22360\n22361\n22362\n22363\n22364\n22365\n22366\n22367\n22368\n22369\n22370\n22371\n22372\n22373\n22374\n22375\n22376\n22377\n22378\n22379\n22380\n22381\n22382\n22383\n22384\n22385\n22386\n22387\n22388\n22389\n22390\n22391\n22392\n22393\n22394\n22395\n22396\n22397\n22398\n22399\n22400\n22401\n22402\n22403\n22404\n22405\n22406\n22407\n22408\n22409\n22410\n22411\n22412\n22413\n22414\n22415\n22416\n22417\n22418\n22419\n22420\n22421\n22422\n22423\n22424\n22425\n22426\n22427\n22428\n22429\n22430\n22431\n22432\n22433\n22434\n22435\n22436\n22437\n22438\n22439\n22440\n22441\n22442\n22443\n22444\n22445\n22446\n22447\n22448\n22449\n22450\n22451\n22452\n22453\n22454\n22455\n22456\n22457\n22458\n22459\n22460\n22461\n22462\n22463\n22464\n22465\n22466\n22467\n22468\n22469\n22470\n22471\n22472\n22473\n22474\n22475\n22476\n22477\n22478\n22479\n22480\n22481\n22482\n22483\n22484\n22485\n22486\n22487\n22488\n22489\n22490\n22491\n22492\n22493\n22494\n22495\n22496\n22497\n22498\n22499\n22500\n22501\n22502\n22503\n22504\n22505\n22506\n22507\n22508\n22509\n22510\n22511\n22512\n22513\n22514\n22515\n22516\n22517\n22518\n22519\n22520\n22521\n22522\n22523\n22524\n22525\n22526\n22527\n22528\n22529\n22530\n22531\n22532\n22533\n22534\n22535\n22536\n22537\n22538\n22539\n22540\n22541\n22542\n22543\n22544\n22545\n22546\n22547\n22548\n22549\n22550\n22551\n22552\n22553\n22554\n22555\n22556\n22557\n22558\n22559\n22560\n22561\n22562\n22563\n22564\n22565\n22566\n22567\n22568\n22569\n22570\n22571\n22572\n22573\n22574\n22575\n22576\n22577\n22578\n22579\n22580\n22581\n22582\n22583\n22584\n22585\n22586\n22587\n22588\n22589\n22590\n22591\n22592\n22593\n22594\n22595\n22596\n22597\n22598\n22599\n22600\n22601\n22602\n22603\n22604\n22605\n22606\n22607\n22608\n22609\n22610\n22611\n22612\n22613\n22614\n22615\n22616\n22617\n22618\n22619\n22620\n22621\n22622\n22623\n22624\n22625\n22626\n22627\n22628\n22629\n22630\n22631\n22632\n22633\n22634\n22635\n22636\n22637\n22638\n22639\n22640\n22641\n22642\n22643\n22644\n22645\n22646\n22647\n22648\n22649\n22650\n22651\n22652\n22653\n22654\n22655\n22656\n22657\n22658\n22659\n22660\n22661\n22662\n22663\n22664\n22665\n22666\n22667\n22668\n22669\n22670\n22671\n22672\n22673\n22674\n22675\n22676\n22677\n22678\n22679\n22680\n22681\n22682\n22683\n22684\n22685\n22686\n22687\n22688\n22689\n22690\n22691\n22692\n22693\n22694\n22695\n22696\n22697\n22698\n22699\n22700\n22701\n22702\n22703\n22704\n22705\n22706\n22707\n22708\n22709\n22710\n22711\n22712\n22713\n22714\n22715\n22716\n22717\n22718\n22719\n22720\n22721\n22722\n22723\n22724\n22725\n22726\n22727\n22728\n22729\n22730\n22731\n22732\n22733\n22734\n22735\n22736\n22737\n22738\n22739\n22740\n22741\n22742\n22743\n22744\n22745\n22746\n22747\n22748\n22749\n22750\n22751\n22752\n22753\n22754\n22755\n22756\n22757\n22758\n22759\n22760\n22761\n22762\n22763\n22764\n22765\n22766\n22767\n22768\n22769\n22770\n22771\n22772\n22773\n22774\n22775\n22776\n22777\n22778\n22779\n22780\n22781\n22782\n22783\n22784\n22785\n22786\n22787\n22788\n22789\n22790\n22791\n22792\n22793\n22794\n22795\n22796\n22797\n22798\n22799\n22800\n22801\n22802\n22803\n22804\n22805\n22806\n22807\n22808\n22809\n22810\n22811\n22812\n22813\n22814\n22815\n22816\n22817\n22818\n22819\n22820\n22821\n22822\n22823\n22824\n22825\n22826\n22827\n22828\n22829\n22830\n22831\n22832\n22833\n22834\n22835\n22836\n22837\n22838\n22839\n22840\n22841\n22842\n22843\n22844\n22845\n22846\n22847\n22848\n22849\n22850\n22851\n22852\n22853\n22854\n22855\n22856\n22857\n22858\n22859\n22860\n22861\n22862\n22863\n22864\n22865\n22866\n22867\n22868\n22869\n22870\n22871\n22872\n22873\n22874\n22875\n22876\n22877\n22878\n22879\n22880\n22881\n22882\n22883\n22884\n22885\n22886\n22887\n22888\n22889\n22890\n22891\n22892\n22893\n22894\n22895\n22896\n22897\n22898\n22899\n22900\n22901\n22902\n22903\n22904\n22905\n22906\n22907\n22908\n22909\n22910\n22911\n22912\n22913\n22914\n22915\n22916\n22917\n22918\n22919\n22920\n22921\n22922\n22923\n22924\n22925\n22926\n22927\n22928\n22929\n22930\n22931\n22932\n22933\n22934\n22935\n22936\n22937\n22938\n22939\n22940\n22941\n22942\n22943\n22944\n22945\n22946\n22947\n22948\n22949\n22950\n22951\n22952\n22953\n22954\n22955\n22956\n22957\n22958\n22959\n22960\n22961\n22962\n22963\n22964\n22965\n22966\n22967\n22968\n22969\n22970\n22971\n22972\n22973\n22974\n22975\n22976\n22977\n22978\n22979\n22980\n22981\n22982\n22983\n22984\n22985\n22986\n22987\n22988\n22989\n22990\n22991\n22992\n22993\n22994\n22995\n22996\n22997\n22998\n22999\n23000\n23001\n23002\n23003\n23004\n23005\n23006\n23007\n23008\n23009\n23010\n23011\n23012\n23013\n23014\n23015\n23016\n23017\n23018\n23019\n23020\n23021\n23022\n23023\n23024\n23025\n23026\n23027\n23028\n23029\n23030\n23031\n23032\n23033\n23034\n23035\n23036\n23037\n23038\n23039\n23040\n23041\n23042\n23043\n23044\n23045\n23046\n23047\n23048\n23049\n23050\n23051\n23052\n23053\n23054\n23055\n23056\n23057\n23058\n23059\n23060\n23061\n23062\n23063\n23064\n23065\n23066\n23067\n23068\n23069\n23070\n23071\n23072\n23073\n23074\n23075\n23076\n23077\n23078\n23079\n23080\n23081\n23082\n23083\n23084\n23085\n23086\n23087\n23088\n23089\n23090\n23091\n23092\n23093\n23094\n23095\n23096\n23097\n23098\n23099\n23100\n23101\n23102\n23103\n23104\n23105\n23106\n23107\n23108\n23109\n23110\n23111\n23112\n23113\n23114\n23115\n23116\n23117\n23118\n23119\n23120\n23121\n23122\n23123\n23124\n23125\n23126\n23127\n23128\n23129\n23130\n23131\n23132\n23133\n23134\n23135\n23136\n23137\n23138\n23139\n23140\n23141\n23142\n23143\n23144\n23145\n23146\n23147\n23148\n23149\n23150\n23151\n23152\n23153\n23154\n23155\n23156\n23157\n23158\n23159\n23160\n23161\n23162\n23163\n23164\n23165\n23166\n23167\n23168\n23169\n23170\n23171\n23172\n23173\n23174\n23175\n23176\n23177\n23178\n23179\n23180\n23181\n23182\n23183\n23184\n23185\n23186\n23187\n23188\n23189\n23190\n23191\n23192\n23193\n23194\n23195\n23196\n23197\n23198\n23199\n23200\n23201\n23202\n23203\n23204\n23205\n23206\n23207\n23208\n23209\n23210\n23211\n23212\n23213\n23214\n23215\n23216\n23217\n23218\n23219\n23220\n23221\n23222\n23223\n23224\n23225\n23226\n23227\n23228\n23229\n23230\n23231\n23232\n23233\n23234\n23235\n23236\n23237\n23238\n23239\n23240\n23241\n23242\n23243\n23244\n23245\n23246\n23247\n23248\n23249\n23250\n23251\n23252\n23253\n23254\n23255\n23256\n23257\n23258\n23259\n23260\n23261\n23262\n23263\n23264\n23265\n23266\n23267\n23268\n23269\n23270\n23271\n23272\n23273\n23274\n23275\n23276\n23277\n23278\n23279\n23280\n23281\n23282\n23283\n23284\n23285\n23286\n23287\n23288\n23289\n23290\n23291\n23292\n23293\n23294\n23295\n23296\n23297\n23298\n23299\n23300\n23301\n23302\n23303\n23304\n23305\n23306\n23307\n23308\n23309\n23310\n23311\n23312\n23313\n23314\n23315\n23316\n23317\n23318\n23319\n23320\n23321\n23322\n23323\n23324\n23325\n23326\n23327\n23328\n23329\n23330\n23331\n23332\n23333\n23334\n23335\n23336\n23337\n23338\n23339\n23340\n23341\n23342\n23343\n23344\n23345\n23346\n23347\n23348\n23349\n23350\n23351\n23352\n23353\n23354\n23355\n23356\n23357\n23358\n23359\n23360\n23361\n23362\n23363\n23364\n23365\n23366\n23367\n23368\n23369\n23370\n23371\n23372\n23373\n23374\n23375\n23376\n23377\n23378\n23379\n23380\n23381\n23382\n23383\n23384\n23385\n23386\n23387\n23388\n23389\n23390\n23391\n23392\n23393\n23394\n23395\n23396\n23397\n23398\n23399\n23400\n23401\n23402\n23403\n23404\n23405\n23406\n23407\n23408\n23409\n23410\n23411\n23412\n23413\n23414\n23415\n23416\n23417\n23418\n23419\n23420\n23421\n23422\n23423\n23424\n23425\n23426\n23427\n23428\n23429\n23430\n23431\n23432\n23433\n23434\n23435\n23436\n23437\n23438\n23439\n23440\n23441\n23442\n23443\n23444\n23445\n23446\n23447\n23448\n23449\n23450\n23451\n23452\n23453\n23454\n23455\n23456\n23457\n23458\n23459\n23460\n23461\n23462\n23463\n23464\n23465\n23466\n23467\n23468\n23469\n23470\n23471\n23472\n23473\n23474\n23475\n23476\n23477\n23478\n23479\n23480\n23481\n23482\n23483\n23484\n23485\n23486\n23487\n23488\n23489\n23490\n23491\n23492\n23493\n23494\n23495\n23496\n23497\n23498\n23499\n23500\n23501\n23502\n23503\n23504\n23505\n23506\n23507\n23508\n23509\n23510\n23511\n23512\n23513\n23514\n23515\n23516\n23517\n23518\n23519\n23520\n23521\n23522\n23523\n23524\n23525\n23526\n23527\n23528\n23529\n23530\n23531\n23532\n23533\n23534\n23535\n23536\n23537\n23538\n23539\n23540\n23541\n23542\n23543\n23544\n23545\n23546\n23547\n23548\n23549\n23550\n23551\n23552\n23553\n23554\n23555\n23556\n23557\n23558\n23559\n23560\n23561\n23562\n23563\n23564\n23565\n23566\n23567\n23568\n23569\n23570\n23571\n23572\n23573\n23574\n23575\n23576\n23577\n23578\n23579\n23580\n23581\n23582\n23583\n23584\n23585\n23586\n23587\n23588\n23589\n23590\n23591\n23592\n23593\n23594\n23595\n23596\n23597\n23598\n23599\n23600\n23601\n23602\n23603\n23604\n23605\n23606\n23607\n23608\n23609\n23610\n23611\n23612\n23613\n23614\n23615\n23616\n23617\n23618\n23619\n23620\n23621\n23622\n23623\n23624\n23625\n23626\n23627\n23628\n23629\n23630\n23631\n23632\n23633\n23634\n23635\n23636\n23637\n23638\n23639\n23640\n23641\n23642\n23643\n23644\n23645\n23646\n23647\n23648\n23649\n23650\n23651\n23652\n23653\n23654\n23655\n23656\n23657\n23658\n23659\n23660\n23661\n23662\n23663\n23664\n23665\n23666\n23667\n23668\n23669\n23670\n23671\n23672\n23673\n23674\n23675\n23676\n23677\n23678\n23679\n23680\n23681\n23682\n23683\n23684\n23685\n23686\n23687\n23688\n23689\n23690\n23691\n23692\n23693\n23694\n23695\n23696\n23697\n23698\n23699\n23700\n23701\n23702\n23703\n23704\n23705\n23706\n23707\n23708\n23709\n23710\n23711\n23712\n23713\n23714\n23715\n23716\n23717\n23718\n23719\n23720\n23721\n23722\n23723\n23724\n23725\n23726\n23727\n23728\n23729\n23730\n23731\n23732\n23733\n23734\n23735\n23736\n23737\n23738\n23739\n23740\n23741\n23742\n23743\n23744\n23745\n23746\n23747\n23748\n23749\n23750\n23751\n23752\n23753\n23754\n23755\n23756\n23757\n23758\n23759\n23760\n23761\n23762\n23763\n23764\n23765\n23766\n23767\n23768\n23769\n23770\n23771\n23772\n23773\n23774\n23775\n23776\n23777\n23778\n23779\n23780\n23781\n23782\n23783\n23784\n23785\n23786\n23787\n23788\n23789\n23790\n23791\n23792\n23793\n23794\n23795\n23796\n23797\n23798\n23799\n23800\n23801\n23802\n23803\n23804\n23805\n23806\n23807\n23808\n23809\n23810\n23811\n23812\n23813\n23814\n23815\n23816\n23817\n23818\n23819\n23820\n23821\n23822\n23823\n23824\n23825\n23826\n23827\n23828\n23829\n23830\n23831\n23832\n23833\n23834\n23835\n23836\n23837\n23838\n23839\n23840\n23841\n23842\n23843\n23844\n23845\n23846\n23847\n23848\n23849\n23850\n23851\n23852\n23853\n23854\n23855\n23856\n23857\n23858\n23859\n23860\n23861\n23862\n23863\n23864\n23865\n23866\n23867\n23868\n23869\n23870\n23871\n23872\n23873\n23874\n23875\n23876\n23877\n23878\n23879\n23880\n23881\n23882\n23883\n23884\n23885\n23886\n23887\n23888\n23889\n23890\n23891\n23892\n23893\n23894\n23895\n23896\n23897\n23898\n23899\n23900\n23901\n23902\n23903\n23904\n23905\n23906\n23907\n23908\n23909\n23910\n23911\n23912\n23913\n23914\n23915\n23916\n23917\n23918\n23919\n23920\n23921\n23922\n23923\n23924\n23925\n23926\n23927\n23928\n23929\n23930\n23931\n23932\n23933\n23934\n23935\n23936\n23937\n23938\n23939\n23940\n23941\n23942\n23943\n23944\n23945\n23946\n23947\n23948\n23949\n23950\n23951\n23952\n23953\n23954\n23955\n23956\n23957\n23958\n23959\n23960\n23961\n23962\n23963\n23964\n23965\n23966\n23967\n23968\n23969\n23970\n23971\n23972\n23973\n23974\n23975\n23976\n23977\n23978\n23979\n23980\n23981\n23982\n23983\n23984\n23985\n23986\n23987\n23988\n23989\n23990\n23991\n23992\n23993\n23994\n23995\n23996\n23997\n23998\n23999\n24000\n24001\n24002\n24003\n24004\n24005\n24006\n24007\n24008\n24009\n24010\n24011\n24012\n24013\n24014\n24015\n24016\n24017\n24018\n24019\n24020\n24021\n24022\n24023\n24024\n24025\n24026\n24027\n24028\n24029\n24030\n24031\n24032\n24033\n24034\n24035\n24036\n24037\n24038\n24039\n24040\n24041\n24042\n24043\n24044\n24045\n24046\n24047\n24048\n24049\n24050\n24051\n24052\n24053\n24054\n24055\n24056\n24057\n24058\n24059\n24060\n24061\n24062\n24063\n24064\n24065\n24066\n24067\n24068\n24069\n24070\n24071\n24072\n24073\n24074\n24075\n24076\n24077\n24078\n24079\n24080\n24081\n24082\n24083\n24084\n24085\n24086\n24087\n24088\n24089\n24090\n24091\n24092\n24093\n24094\n24095\n24096\n24097\n24098\n24099\n24100\n24101\n24102\n24103\n24104\n24105\n24106\n24107\n24108\n24109\n24110\n24111\n24112\n24113\n24114\n24115\n24116\n24117\n24118\n24119\n24120\n24121\n24122\n24123\n24124\n24125\n24126\n24127\n24128\n24129\n24130\n24131\n24132\n24133\n24134\n24135\n24136\n24137\n24138\n24139\n24140\n24141\n24142\n24143\n24144\n24145\n24146\n24147\n24148\n24149\n24150\n24151\n24152\n24153\n24154\n24155\n24156\n24157\n24158\n24159\n24160\n24161\n24162\n24163\n24164\n24165\n24166\n24167\n24168\n24169\n24170\n24171\n24172\n24173\n24174\n24175\n24176\n24177\n24178\n24179\n24180\n24181\n24182\n24183\n24184\n24185\n24186\n24187\n24188\n24189\n24190\n24191\n24192\n24193\n24194\n24195\n24196\n24197\n24198\n24199\n24200\n24201\n24202\n24203\n24204\n24205\n24206\n24207\n24208\n24209\n24210\n24211\n24212\n24213\n24214\n24215\n24216\n24217\n24218\n24219\n24220\n24221\n24222\n24223\n24224\n24225\n24226\n24227\n24228\n24229\n24230\n24231\n24232\n24233\n24234\n24235\n24236\n24237\n24238\n24239\n24240\n24241\n24242\n24243\n24244\n24245\n24246\n24247\n24248\n24249\n24250\n24251\n24252\n24253\n24254\n24255\n24256\n24257\n24258\n24259\n24260\n24261\n24262\n24263\n24264\n24265\n24266\n24267\n24268\n24269\n24270\n24271\n24272\n24273\n24274\n24275\n24276\n24277\n24278\n24279\n24280\n24281\n24282\n24283\n24284\n24285\n24286\n24287\n24288\n24289\n24290\n24291\n24292\n24293\n24294\n24295\n24296\n24297\n24298\n24299\n24300\n24301\n24302\n24303\n24304\n24305\n24306\n24307\n24308\n24309\n24310\n24311\n24312\n24313\n24314\n24315\n24316\n24317\n24318\n24319\n24320\n24321\n24322\n24323\n24324\n24325\n24326\n24327\n24328\n24329\n24330\n24331\n24332\n24333\n24334\n24335\n24336\n24337\n24338\n24339\n24340\n24341\n24342\n24343\n24344\n24345\n24346\n24347\n24348\n24349\n24350\n24351\n24352\n24353\n24354\n24355\n24356\n24357\n24358\n24359\n24360\n24361\n24362\n24363\n24364\n24365\n24366\n24367\n24368\n24369\n24370\n24371\n24372\n24373\n24374\n24375\n24376\n24377\n24378\n24379\n24380\n24381\n24382\n24383\n24384\n24385\n24386\n24387\n24388\n24389\n24390\n24391\n24392\n24393\n24394\n24395\n24396\n24397\n24398\n24399\n24400\n24401\n24402\n24403\n24404\n24405\n24406\n24407\n24408\n24409\n24410\n24411\n24412\n24413\n24414\n24415\n24416\n24417\n24418\n24419\n24420\n24421\n24422\n24423\n24424\n24425\n24426\n24427\n24428\n24429\n24430\n24431\n24432\n24433\n24434\n24435\n24436\n24437\n24438\n24439\n24440\n24441\n24442\n24443\n24444\n24445\n24446\n24447\n24448\n24449\n24450\n24451\n24452\n24453\n24454\n24455\n24456\n24457\n24458\n24459\n24460\n24461\n24462\n24463\n24464\n24465\n24466\n24467\n24468\n24469\n24470\n24471\n24472\n24473\n24474\n24475\n24476\n24477\n24478\n24479\n24480\n24481\n24482\n24483\n24484\n24485\n24486\n24487\n24488\n24489\n24490\n24491\n24492\n24493\n24494\n24495\n24496\n24497\n24498\n24499\n24500\n24501\n24502\n24503\n24504\n24505\n24506\n24507\n24508\n24509\n24510\n24511\n24512\n24513\n24514\n24515\n24516\n24517\n24518\n24519\n24520\n24521\n24522\n24523\n24524\n24525\n24526\n24527\n24528\n24529\n24530\n24531\n24532\n24533\n24534\n24535\n24536\n24537\n24538\n24539\n24540\n24541\n24542\n24543\n24544\n24545\n24546\n24547\n24548\n24549\n24550\n24551\n24552\n24553\n24554\n24555\n24556\n24557\n24558\n24559\n24560\n24561\n24562\n24563\n24564\n24565\n24566\n24567\n24568\n24569\n24570\n24571\n24572\n24573\n24574\n24575\n24576\n24577\n24578\n24579\n24580\n24581\n24582\n24583\n24584\n24585\n24586\n24587\n24588\n24589\n24590\n24591\n24592\n24593\n24594\n24595\n24596\n24597\n24598\n24599\n24600\n24601\n24602\n24603\n24604\n24605\n24606\n24607\n24608\n24609\n24610\n24611\n24612\n24613\n24614\n24615\n24616\n24617\n24618\n24619\n24620\n24621\n24622\n24623\n24624\n24625\n24626\n24627\n24628\n24629\n24630\n24631\n24632\n24633\n24634\n24635\n24636\n24637\n24638\n24639\n24640\n24641\n24642\n24643\n24644\n24645\n24646\n24647\n24648\n24649\n24650\n24651\n24652\n24653\n24654\n24655\n24656\n24657\n24658\n24659\n24660\n24661\n24662\n24663\n24664\n24665\n24666\n24667\n24668\n24669\n24670\n24671\n24672\n24673\n24674\n24675\n24676\n24677\n24678\n24679\n24680\n24681\n24682\n24683\n24684\n24685\n24686\n24687\n24688\n24689\n24690\n24691\n24692\n24693\n24694\n24695\n24696\n24697\n24698\n24699\n24700\n24701\n24702\n24703\n24704\n24705\n24706\n24707\n24708\n24709\n24710\n24711\n24712\n24713\n24714\n24715\n24716\n24717\n24718\n24719\n24720\n24721\n24722\n24723\n24724\n24725\n24726\n24727\n24728\n24729\n24730\n24731\n24732\n24733\n24734\n24735\n24736\n24737\n24738\n24739\n24740\n24741\n24742\n24743\n24744\n24745\n24746\n24747\n24748\n24749\n24750\n24751\n24752\n24753\n24754\n24755\n24756\n24757\n24758\n24759\n24760\n24761\n24762\n24763\n24764\n24765\n24766\n24767\n24768\n24769\n24770\n24771\n24772\n24773\n24774\n24775\n24776\n24777\n24778\n24779\n24780\n24781\n24782\n24783\n24784\n24785\n24786\n24787\n24788\n24789\n24790\n24791\n24792\n24793\n24794\n24795\n24796\n24797\n24798\n24799\n24800\n24801\n24802\n24803\n24804\n24805\n24806\n24807\n24808\n24809\n24810\n24811\n24812\n24813\n24814\n24815\n24816\n24817\n24818\n24819\n24820\n24821\n24822\n24823\n24824\n24825\n24826\n24827\n24828\n24829\n24830\n24831\n24832\n24833\n24834\n24835\n24836\n24837\n24838\n24839\n24840\n24841\n24842\n24843\n24844\n24845\n24846\n24847\n24848\n24849\n24850\n24851\n24852\n24853\n24854\n24855\n24856\n24857\n24858\n24859\n24860\n24861\n24862\n24863\n24864\n24865\n24866\n24867\n24868\n24869\n24870\n24871\n24872\n24873\n24874\n24875\n24876\n24877\n24878\n24879\n24880\n24881\n24882\n24883\n24884\n24885\n24886\n24887\n24888\n24889\n24890\n24891\n24892\n24893\n24894\n24895\n24896\n24897\n24898\n24899\n24900\n24901\n24902\n24903\n24904\n24905\n24906\n24907\n24908\n24909\n24910\n24911\n24912\n24913\n24914\n24915\n24916\n24917\n24918\n24919\n24920\n24921\n24922\n24923\n24924\n24925\n24926\n24927\n24928\n24929\n24930\n24931\n24932\n24933\n24934\n24935\n24936\n24937\n24938\n24939\n24940\n24941\n24942\n24943\n24944\n24945\n24946\n24947\n24948\n24949\n24950\n24951\n24952\n24953\n24954\n24955\n24956\n24957\n24958\n24959\n24960\n24961\n24962\n24963\n24964\n24965\n24966\n24967\n24968\n24969\n24970\n24971\n24972\n24973\n24974\n24975\n24976\n24977\n24978\n24979\n24980\n24981\n24982\n24983\n24984\n24985\n24986\n24987\n24988\n24989\n24990\n24991\n24992\n24993\n24994\n24995\n24996\n24997\n24998\n24999\n25000\n25001\n25002\n25003\n25004\n25005\n25006\n25007\n25008\n25009\n25010\n25011\n25012\n25013\n25014\n25015\n25016\n25017\n25018\n25019\n25020\n25021\n25022\n25023\n25024\n25025\n25026\n25027\n25028\n25029\n25030\n25031\n25032\n25033\n25034\n25035\n25036\n25037\n25038\n25039\n25040\n25041\n25042\n25043\n25044\n25045\n25046\n25047\n25048\n25049\n25050\n25051\n25052\n25053\n25054\n25055\n25056\n25057\n25058\n25059\n25060\n25061\n25062\n25063\n25064\n25065\n25066\n25067\n25068\n25069\n25070\n25071\n25072\n25073\n25074\n25075\n25076\n25077\n25078\n25079\n25080\n25081\n25082\n25083\n25084\n25085\n25086\n25087\n25088\n25089\n25090\n25091\n25092\n25093\n25094\n25095\n25096\n25097\n25098\n25099\n25100\n25101\n25102\n25103\n25104\n25105\n25106\n25107\n25108\n25109\n25110\n25111\n25112\n25113\n25114\n25115\n25116\n25117\n25118\n25119\n25120\n25121\n25122\n25123\n25124\n25125\n25126\n25127\n25128\n25129\n25130\n25131\n25132\n25133\n25134\n25135\n25136\n25137\n25138\n25139\n25140\n25141\n25142\n25143\n25144\n25145\n25146\n25147\n25148\n25149\n25150\n25151\n25152\n25153\n25154\n25155\n25156\n25157\n25158\n25159\n25160\n25161\n25162\n25163\n25164\n25165\n25166\n25167\n25168\n25169\n25170\n25171\n25172\n25173\n25174\n25175\n25176\n25177\n25178\n25179\n25180\n25181\n25182\n25183\n25184\n25185\n25186\n25187\n25188\n25189\n25190\n25191\n25192\n25193\n25194\n25195\n25196\n25197\n25198\n25199\n25200\n25201\n25202\n25203\n25204\n25205\n25206\n25207\n25208\n25209\n25210\n25211\n25212\n25213\n25214\n25215\n25216\n25217\n25218\n25219\n25220\n25221\n25222\n25223\n25224\n25225\n25226\n25227\n25228\n25229\n25230\n25231\n25232\n25233\n25234\n25235\n25236\n25237\n25238\n25239\n25240\n25241\n25242\n25243\n25244\n25245\n25246\n25247\n25248\n25249\n25250\n25251\n25252\n25253\n25254\n25255\n25256\n25257\n25258\n25259\n25260\n25261\n25262\n25263\n25264\n25265\n25266\n25267\n25268\n25269\n25270\n25271\n25272\n25273\n25274\n25275\n25276\n25277\n25278\n25279\n25280\n25281\n25282\n25283\n25284\n25285\n25286\n25287\n25288\n25289\n25290\n25291\n25292\n25293\n25294\n25295\n25296\n25297\n25298\n25299\n25300\n25301\n25302\n25303\n25304\n25305\n25306\n25307\n25308\n25309\n25310\n25311\n25312\n25313\n25314\n25315\n25316\n25317\n25318\n25319\n25320\n25321\n25322\n25323\n25324\n25325\n25326\n25327\n25328\n25329\n25330\n25331\n25332\n25333\n25334\n25335\n25336\n25337\n25338\n25339\n25340\n25341\n25342\n25343\n25344\n25345\n25346\n25347\n25348\n25349\n25350\n25351\n25352\n25353\n25354\n25355\n25356\n25357\n25358\n25359\n25360\n25361\n25362\n25363\n25364\n25365\n25366\n25367\n25368\n25369\n25370\n25371\n25372\n25373\n25374\n25375\n25376\n25377\n25378\n25379\n25380\n25381\n25382\n25383\n25384\n25385\n25386\n25387\n25388\n25389\n25390\n25391\n25392\n25393\n25394\n25395\n25396\n25397\n25398\n25399\n25400\n25401\n25402\n25403\n25404\n25405\n25406\n25407\n25408\n25409\n25410\n25411\n25412\n25413\n25414\n25415\n25416\n25417\n25418\n25419\n25420\n25421\n25422\n25423\n25424\n25425\n25426\n25427\n25428\n25429\n25430\n25431\n25432\n25433\n25434\n25435\n25436\n25437\n25438\n25439\n25440\n25441\n25442\n25443\n25444\n25445\n25446\n25447\n25448\n25449\n25450\n25451\n25452\n25453\n25454\n25455\n25456\n25457\n25458\n25459\n25460\n25461\n25462\n25463\n25464\n25465\n25466\n25467\n25468\n25469\n25470\n25471\n25472\n25473\n25474\n25475\n25476\n25477\n25478\n25479\n25480\n25481\n25482\n25483\n25484\n25485\n25486\n25487\n25488\n25489\n25490\n25491\n25492\n25493\n25494\n25495\n25496\n25497\n25498\n25499\n25500\n25501\n25502\n25503\n25504\n25505\n25506\n25507\n25508\n25509\n25510\n25511\n25512\n25513\n25514\n25515\n25516\n25517\n25518\n25519\n25520\n25521\n25522\n25523\n25524\n25525\n25526\n25527\n25528\n25529\n25530\n25531\n25532\n25533\n25534\n25535\n25536\n25537\n25538\n25539\n25540\n25541\n25542\n25543\n25544\n25545\n25546\n25547\n25548\n25549\n25550\n25551\n25552\n25553\n25554\n25555\n25556\n25557\n25558\n25559\n25560\n25561\n25562\n25563\n25564\n25565\n25566\n25567\n25568\n25569\n25570\n25571\n25572\n25573\n25574\n25575\n25576\n25577\n25578\n25579\n25580\n25581\n25582\n25583\n25584\n25585\n25586\n25587\n25588\n25589\n25590\n25591\n25592\n25593\n25594\n25595\n25596\n25597\n25598\n25599\n25600\n25601\n25602\n25603\n25604\n25605\n25606\n25607\n25608\n25609\n25610\n25611\n25612\n25613\n25614\n25615\n25616\n25617\n25618\n25619\n25620\n25621\n25622\n25623\n25624\n25625\n25626\n25627\n25628\n25629\n25630\n25631\n25632\n25633\n25634\n25635\n25636\n25637\n25638\n25639\n25640\n25641\n25642\n25643\n25644\n25645\n25646\n25647\n25648\n25649\n25650\n25651\n25652\n25653\n25654\n25655\n25656\n25657\n25658\n25659\n25660\n25661\n25662\n25663\n25664\n25665\n25666\n25667\n25668\n25669\n25670\n25671\n25672\n25673\n25674\n25675\n25676\n25677\n25678\n25679\n25680\n25681\n25682\n25683\n25684\n25685\n25686\n25687\n25688\n25689\n25690\n25691\n25692\n25693\n25694\n25695\n25696\n25697\n25698\n25699\n25700\n25701\n25702\n25703\n25704\n25705\n25706\n25707\n25708\n25709\n25710\n25711\n25712\n25713\n25714\n25715\n25716\n25717\n25718\n25719\n25720\n25721\n25722\n25723\n25724\n25725\n25726\n25727\n25728\n25729\n25730\n25731\n25732\n25733\n25734\n25735\n25736\n25737\n25738\n25739\n25740\n25741\n25742\n25743\n25744\n25745\n25746\n25747\n25748\n25749\n25750\n25751\n25752\n25753\n25754\n25755\n25756\n25757\n25758\n25759\n25760\n25761\n25762\n25763\n25764\n25765\n25766\n25767\n25768\n25769\n25770\n25771\n25772\n25773\n25774\n25775\n25776\n25777\n25778\n25779\n25780\n25781\n25782\n25783\n25784\n25785\n25786\n25787\n25788\n25789\n25790\n25791\n25792\n25793\n25794\n25795\n25796\n25797\n25798\n25799\n25800\n25801\n25802\n25803\n25804\n25805\n25806\n25807\n25808\n25809\n25810\n25811\n25812\n25813\n25814\n25815\n25816\n25817\n25818\n25819\n25820\n25821\n25822\n25823\n25824\n25825\n25826\n25827\n25828\n25829\n25830\n25831\n25832\n25833\n25834\n25835\n25836\n25837\n25838\n25839\n25840\n25841\n25842\n25843\n25844\n25845\n25846\n25847\n25848\n25849\n25850\n25851\n25852\n25853\n25854\n25855\n25856\n25857\n25858\n25859\n25860\n25861\n25862\n25863\n25864\n25865\n25866\n25867\n25868\n25869\n25870\n25871\n25872\n25873\n25874\n25875\n25876\n25877\n25878\n25879\n25880\n25881\n25882\n25883\n25884\n25885\n25886\n25887\n25888\n25889\n25890\n25891\n25892\n25893\n25894\n25895\n25896\n25897\n25898\n25899\n25900\n25901\n25902\n25903\n25904\n25905\n25906\n25907\n25908\n25909\n25910\n25911\n25912\n25913\n25914\n25915\n25916\n25917\n25918\n25919\n25920\n25921\n25922\n25923\n25924\n25925\n25926\n25927\n25928\n25929\n25930\n25931\n25932\n25933\n25934\n25935\n25936\n25937\n25938\n25939\n25940\n25941\n25942\n25943\n25944\n25945\n25946\n25947\n25948\n25949\n25950\n25951\n25952\n25953\n25954\n25955\n25956\n25957\n25958\n25959\n25960\n25961\n25962\n25963\n25964\n25965\n25966\n25967\n25968\n25969\n25970\n25971\n25972\n25973\n25974\n25975\n25976\n25977\n25978\n25979\n25980\n25981\n25982\n25983\n25984\n25985\n25986\n25987\n25988\n25989\n25990\n25991\n25992\n25993\n25994\n25995\n25996\n25997\n25998\n25999\n26000\n26001\n26002\n26003\n26004\n26005\n26006\n26007\n26008\n26009\n26010\n26011\n26012\n26013\n26014\n26015\n26016\n26017\n26018\n26019\n26020\n26021\n26022\n26023\n26024\n26025\n26026\n26027\n26028\n26029\n26030\n26031\n26032\n26033\n26034\n26035\n26036\n26037\n26038\n26039\n26040\n26041\n26042\n26043\n26044\n26045\n26046\n26047\n26048\n26049\n26050\n26051\n26052\n26053\n26054\n26055\n26056\n26057\n26058\n26059\n26060\n26061\n26062\n26063\n26064\n26065\n26066\n26067\n26068\n26069\n26070\n26071\n26072\n26073\n26074\n26075\n26076\n26077\n26078\n26079\n26080\n26081\n26082\n26083\n26084\n26085\n26086\n26087\n26088\n26089\n26090\n26091\n26092\n26093\n26094\n26095\n26096\n26097\n26098\n26099\n26100\n26101\n26102\n26103\n26104\n26105\n26106\n26107\n26108\n26109\n26110\n26111\n26112\n26113\n26114\n26115\n26116\n26117\n26118\n26119\n26120\n26121\n26122\n26123\n26124\n26125\n26126\n26127\n26128\n26129\n26130\n26131\n26132\n26133\n26134\n26135\n26136\n26137\n26138\n26139\n26140\n26141\n26142\n26143\n26144\n26145\n26146\n26147\n26148\n26149\n26150\n26151\n26152\n26153\n26154\n26155\n26156\n26157\n26158\n26159\n26160\n26161\n26162\n26163\n26164\n26165\n26166\n26167\n26168\n26169\n26170\n26171\n26172\n26173\n26174\n26175\n26176\n26177\n26178\n26179\n26180\n26181\n26182\n26183\n26184\n26185\n26186\n26187\n26188\n26189\n26190\n26191\n26192\n26193\n26194\n26195\n26196\n26197\n26198\n26199\n26200\n26201\n26202\n26203\n26204\n26205\n26206\n26207\n26208\n26209\n26210\n26211\n26212\n26213\n26214\n26215\n26216\n26217\n26218\n26219\n26220\n26221\n26222\n26223\n26224\n26225\n26226\n26227\n26228\n26229\n26230\n26231\n26232\n26233\n26234\n26235\n26236\n26237\n26238\n26239\n26240\n26241\n26242\n26243\n26244\n26245\n26246\n26247\n26248\n26249\n26250\n26251\n26252\n26253\n26254\n26255\n26256\n26257\n26258\n26259\n26260\n26261\n26262\n26263\n26264\n26265\n26266\n26267\n26268\n26269\n26270\n26271\n26272\n26273\n26274\n26275\n26276\n26277\n26278\n26279\n26280\n26281\n26282\n26283\n26284\n26285\n26286\n26287\n26288\n26289\n26290\n26291\n26292\n26293\n26294\n26295\n26296\n26297\n26298\n26299\n26300\n26301\n26302\n26303\n26304\n26305\n26306\n26307\n26308\n26309\n26310\n26311\n26312\n26313\n26314\n26315\n26316\n26317\n26318\n26319\n26320\n26321\n26322\n26323\n26324\n26325\n26326\n26327\n26328\n26329\n26330\n26331\n26332\n26333\n26334\n26335\n26336\n26337\n26338\n26339\n26340\n26341\n26342\n26343\n26344\n26345\n26346\n26347\n26348\n26349\n26350\n26351\n26352\n26353\n26354\n26355\n26356\n26357\n26358\n26359\n26360\n26361\n26362\n26363\n26364\n26365\n26366\n26367\n26368\n26369\n26370\n26371\n26372\n26373\n26374\n26375\n26376\n26377\n26378\n26379\n26380\n26381\n26382\n26383\n26384\n26385\n26386\n26387\n26388\n26389\n26390\n26391\n26392\n26393\n26394\n26395\n26396\n26397\n26398\n26399\n26400\n26401\n26402\n26403\n26404\n26405\n26406\n26407\n26408\n26409\n26410\n26411\n26412\n26413\n26414\n26415\n26416\n26417\n26418\n26419\n26420\n26421\n26422\n26423\n26424\n26425\n26426\n26427\n26428\n26429\n26430\n26431\n26432\n26433\n26434\n26435\n26436\n26437\n26438\n26439\n26440\n26441\n26442\n26443\n26444\n26445\n26446\n26447\n26448\n26449\n26450\n26451\n26452\n26453\n26454\n26455\n26456\n26457\n26458\n26459\n26460\n26461\n26462\n26463\n26464\n26465\n26466\n26467\n26468\n26469\n26470\n26471\n26472\n26473\n26474\n26475\n26476\n26477\n26478\n26479\n26480\n26481\n26482\n26483\n26484\n26485\n26486\n26487\n26488\n26489\n26490\n26491\n26492\n26493\n26494\n26495\n26496\n26497\n26498\n26499\n26500\n26501\n26502\n26503\n26504\n26505\n26506\n26507\n26508\n26509\n26510\n26511\n26512\n26513\n26514\n26515\n26516\n26517\n26518\n26519\n26520\n26521\n26522\n26523\n26524\n26525\n26526\n26527\n26528\n26529\n26530\n26531\n26532\n26533\n26534\n26535\n26536\n26537\n26538\n26539\n26540\n26541\n26542\n26543\n26544\n26545\n26546\n26547\n26548\n26549\n26550\n26551\n26552\n26553\n26554\n26555\n26556\n26557\n26558\n26559\n26560\n26561\n26562\n26563\n26564\n26565\n26566\n26567\n26568\n26569\n26570\n26571\n26572\n26573\n26574\n26575\n26576\n26577\n26578\n26579\n26580\n26581\n26582\n26583\n26584\n26585\n26586\n26587\n26588\n26589\n26590\n26591\n26592\n26593\n26594\n26595\n26596\n26597\n26598\n26599\n26600\n26601\n26602\n26603\n26604\n26605\n26606\n26607\n26608\n26609\n26610\n26611\n26612\n26613\n26614\n26615\n26616\n26617\n26618\n26619\n26620\n26621\n26622\n26623\n26624\n26625\n26626\n26627\n26628\n26629\n26630\n26631\n26632\n26633\n26634\n26635\n26636\n26637\n26638\n26639\n26640\n26641\n26642\n26643\n26644\n26645\n26646\n26647\n26648\n26649\n26650\n26651\n26652\n26653\n26654\n26655\n26656\n26657\n26658\n26659\n26660\n26661\n26662\n26663\n26664\n26665\n26666\n26667\n26668\n26669\n26670\n26671\n26672\n26673\n26674\n26675\n26676\n26677\n26678\n26679\n26680\n26681\n26682\n26683\n26684\n26685\n26686\n26687\n26688\n26689\n26690\n26691\n26692\n26693\n26694\n26695\n26696\n26697\n26698\n26699\n26700\n26701\n26702\n26703\n26704\n26705\n26706\n26707\n26708\n26709\n26710\n26711\n26712\n26713\n26714\n26715\n26716\n26717\n26718\n26719\n26720\n26721\n26722\n26723\n26724\n26725\n26726\n26727\n26728\n26729\n26730\n26731\n26732\n26733\n26734\n26735\n26736\n26737\n26738\n26739\n26740\n26741\n26742\n26743\n26744\n26745\n26746\n26747\n26748\n26749\n26750\n26751\n26752\n26753\n26754\n26755\n26756\n26757\n26758\n26759\n26760\n26761\n26762\n26763\n26764\n26765\n26766\n26767\n26768\n26769\n26770\n26771\n26772\n26773\n26774\n26775\n26776\n26777\n26778\n26779\n26780\n26781\n26782\n26783\n26784\n26785\n26786\n26787\n26788\n26789\n26790\n26791\n26792\n26793\n26794\n26795\n26796\n26797\n26798\n26799\n26800\n26801\n26802\n26803\n26804\n26805\n26806\n26807\n26808\n26809\n26810\n26811\n26812\n26813\n26814\n26815\n26816\n26817\n26818\n26819\n26820\n26821\n26822\n26823\n26824\n26825\n26826\n26827\n26828\n26829\n26830\n26831\n26832\n26833\n26834\n26835\n26836\n26837\n26838\n26839\n26840\n26841\n26842\n26843\n26844\n26845\n26846\n26847\n26848\n26849\n26850\n26851\n26852\n26853\n26854\n26855\n26856\n26857\n26858\n26859\n26860\n26861\n26862\n26863\n26864\n26865\n26866\n26867\n26868\n26869\n26870\n26871\n26872\n26873\n26874\n26875\n26876\n26877\n26878\n26879\n26880\n26881\n26882\n26883\n26884\n26885\n26886\n26887\n26888\n26889\n26890\n26891\n26892\n26893\n26894\n26895\n26896\n26897\n26898\n26899\n26900\n26901\n26902\n26903\n26904\n26905\n26906\n26907\n26908\n26909\n26910\n26911\n26912\n26913\n26914\n26915\n26916\n26917\n26918\n26919\n26920\n26921\n26922\n26923\n26924\n26925\n26926\n26927\n26928\n26929\n26930\n26931\n26932\n26933\n26934\n26935\n26936\n26937\n26938\n26939\n26940\n26941\n26942\n26943\n26944\n26945\n26946\n26947\n26948\n26949\n26950\n26951\n26952\n26953\n26954\n26955\n26956\n26957\n26958\n26959\n26960\n26961\n26962\n26963\n26964\n26965\n26966\n26967\n26968\n26969\n26970\n26971\n26972\n26973\n26974\n26975\n26976\n26977\n26978\n26979\n26980\n26981\n26982\n26983\n26984\n26985\n26986\n26987\n26988\n26989\n26990\n26991\n26992\n26993\n26994\n26995\n26996\n26997\n26998\n26999\n27000\n27001\n27002\n27003\n27004\n27005\n27006\n27007\n27008\n27009\n27010\n27011\n27012\n27013\n27014\n27015\n27016\n27017\n27018\n27019\n27020\n27021\n27022\n27023\n27024\n27025\n27026\n27027\n27028\n27029\n27030\n27031\n27032\n27033\n27034\n27035\n27036\n27037\n27038\n27039\n27040\n27041\n27042\n27043\n27044\n27045\n27046\n27047\n27048\n27049\n27050\n27051\n27052\n27053\n27054\n27055\n27056\n27057\n27058\n27059\n27060\n27061\n27062\n27063\n27064\n27065\n27066\n27067\n27068\n27069\n27070\n27071\n27072\n27073\n27074\n27075\n27076\n27077\n27078\n27079\n27080\n27081\n27082\n27083\n27084\n27085\n27086\n27087\n27088\n27089\n27090\n27091\n27092\n27093\n27094\n27095\n27096\n27097\n27098\n27099\n27100\n27101\n27102\n27103\n27104\n27105\n27106\n27107\n27108\n27109\n27110\n27111\n27112\n27113\n27114\n27115\n27116\n27117\n27118\n27119\n27120\n27121\n27122\n27123\n27124\n27125\n27126\n27127\n27128\n27129\n27130\n27131\n27132\n27133\n27134\n27135\n27136\n27137\n27138\n27139\n27140\n27141\n27142\n27143\n27144\n27145\n27146\n27147\n27148\n27149\n27150\n27151\n27152\n27153\n27154\n27155\n27156\n27157\n27158\n27159\n27160\n27161\n27162\n27163\n27164\n27165\n27166\n27167\n27168\n27169\n27170\n27171\n27172\n27173\n27174\n27175\n27176\n27177\n27178\n27179\n27180\n27181\n27182\n27183\n27184\n27185\n27186\n27187\n27188\n27189\n27190\n27191\n27192\n27193\n27194\n27195\n27196\n27197\n27198\n27199\n27200\n27201\n27202\n27203\n27204\n27205\n27206\n27207\n27208\n27209\n27210\n27211\n27212\n27213\n27214\n27215\n27216\n27217\n27218\n27219\n27220\n27221\n27222\n27223\n27224\n27225\n27226\n27227\n27228\n27229\n27230\n27231\n27232\n27233\n27234\n27235\n27236\n27237\n27238\n27239\n27240\n27241\n27242\n27243\n27244\n27245\n27246\n27247\n27248\n27249\n27250\n27251\n27252\n27253\n27254\n27255\n27256\n27257\n27258\n27259\n27260\n27261\n27262\n27263\n27264\n27265\n27266\n27267\n27268\n27269\n27270\n27271\n27272\n27273\n27274\n27275\n27276\n27277\n27278\n27279\n27280\n27281\n27282\n27283\n27284\n27285\n27286\n27287\n27288\n27289\n27290\n27291\n27292\n27293\n27294\n27295\n27296\n27297\n27298\n27299\n27300\n27301\n27302\n27303\n27304\n27305\n27306\n27307\n27308\n27309\n27310\n27311\n27312\n27313\n27314\n27315\n27316\n27317\n27318\n27319\n27320\n27321\n27322\n27323\n27324\n27325\n27326\n27327\n27328\n27329\n27330\n27331\n27332\n27333\n27334\n27335\n27336\n27337\n27338\n27339\n27340\n27341\n27342\n27343\n27344\n27345\n27346\n27347\n27348\n27349\n27350\n27351\n27352\n27353\n27354\n27355\n27356\n27357\n27358\n27359\n27360\n27361\n27362\n27363\n27364\n27365\n27366\n27367\n27368\n27369\n27370\n27371\n27372\n27373\n27374\n27375\n27376\n27377\n27378\n27379\n27380\n27381\n27382\n27383\n27384\n27385\n27386\n27387\n27388\n27389\n27390\n27391\n27392\n27393\n27394\n27395\n27396\n27397\n27398\n27399\n27400\n27401\n27402\n27403\n27404\n27405\n27406\n27407\n27408\n27409\n27410\n27411\n27412\n27413\n27414\n27415\n27416\n27417\n27418\n27419\n27420\n27421\n27422\n27423\n27424\n27425\n27426\n27427\n27428\n27429\n27430\n27431\n27432\n27433\n27434\n27435\n27436\n27437\n27438\n27439\n27440\n27441\n27442\n27443\n27444\n27445\n27446\n27447\n27448\n27449\n27450\n27451\n27452\n27453\n27454\n27455\n27456\n27457\n27458\n27459\n27460\n27461\n27462\n27463\n27464\n27465\n27466\n27467\n27468\n27469\n27470\n27471\n27472\n27473\n27474\n27475\n27476\n27477\n27478\n27479\n27480\n27481\n27482\n27483\n27484\n27485\n27486\n27487\n27488\n27489\n27490\n27491\n27492\n27493\n27494\n27495\n27496\n27497\n27498\n27499\n27500\n27501\n27502\n27503\n27504\n27505\n27506\n27507\n27508\n27509\n27510\n27511\n27512\n27513\n27514\n27515\n27516\n27517\n27518\n27519\n27520\n27521\n27522\n27523\n27524\n27525\n27526\n27527\n27528\n27529\n27530\n27531\n27532\n27533\n27534\n27535\n27536\n27537\n27538\n27539\n27540\n27541\n27542\n27543\n27544\n27545\n27546\n27547\n27548\n27549\n27550\n27551\n27552\n27553\n27554\n27555\n27556\n27557\n27558\n27559\n27560\n27561\n27562\n27563\n27564\n27565\n27566\n27567\n27568\n27569\n27570\n27571\n27572\n27573\n27574\n27575\n27576\n27577\n27578\n27579\n27580\n27581\n27582\n27583\n27584\n27585\n27586\n27587\n27588\n27589\n27590\n27591\n27592\n27593\n27594\n27595\n27596\n27597\n27598\n27599\n27600\n27601\n27602\n27603\n27604\n27605\n27606\n27607\n27608\n27609\n27610\n27611\n27612\n27613\n27614\n27615\n27616\n27617\n27618\n27619\n27620\n27621\n27622\n27623\n27624\n27625\n27626\n27627\n27628\n27629\n27630\n27631\n27632\n27633\n27634\n27635\n27636\n27637\n27638\n27639\n27640\n27641\n27642\n27643\n27644\n27645\n27646\n27647\n27648\n27649\n27650\n27651\n27652\n27653\n27654\n27655\n27656\n27657\n27658\n27659\n27660\n27661\n27662\n27663\n27664\n27665\n27666\n27667\n27668\n27669\n27670\n27671\n27672\n27673\n27674\n27675\n27676\n27677\n27678\n27679\n27680\n27681\n27682\n27683\n27684\n27685\n27686\n27687\n27688\n27689\n27690\n27691\n27692\n27693\n27694\n27695\n27696\n27697\n27698\n27699\n27700\n27701\n27702\n27703\n27704\n27705\n27706\n27707\n27708\n27709\n27710\n27711\n27712\n27713\n27714\n27715\n27716\n27717\n27718\n27719\n27720\n27721\n27722\n27723\n27724\n27725\n27726\n27727\n27728\n27729\n27730\n27731\n27732\n27733\n27734\n27735\n27736\n27737\n27738\n27739\n27740\n27741\n27742\n27743\n27744\n27745\n27746\n27747\n27748\n27749\n27750\n27751\n27752\n27753\n27754\n27755\n27756\n27757\n27758\n27759\n27760\n27761\n27762\n27763\n27764\n27765\n27766\n27767\n27768\n27769\n27770\n27771\n27772\n27773\n27774\n27775\n27776\n27777\n27778\n27779\n27780\n27781\n27782\n27783\n27784\n27785\n27786\n27787\n27788\n27789\n27790\n27791\n27792\n27793\n27794\n27795\n27796\n27797\n27798\n27799\n27800\n27801\n27802\n27803\n27804\n27805\n27806\n27807\n27808\n27809\n27810\n27811\n27812\n27813\n27814\n27815\n27816\n27817\n27818\n27819\n27820\n27821\n27822\n27823\n27824\n27825\n27826\n27827\n27828\n27829\n27830\n27831\n27832\n27833\n27834\n27835\n27836\n27837\n27838\n27839\n27840\n27841\n27842\n27843\n27844\n27845\n27846\n27847\n27848\n27849\n27850\n27851\n27852\n27853\n27854\n27855\n27856\n27857\n27858\n27859\n27860\n27861\n27862\n27863\n27864\n27865\n27866\n27867\n27868\n27869\n27870\n27871\n27872\n27873\n27874\n27875\n27876\n27877\n27878\n27879\n27880\n27881\n27882\n27883\n27884\n27885\n27886\n27887\n27888\n27889\n27890\n27891\n27892\n27893\n27894\n27895\n27896\n27897\n27898\n27899\n27900\n27901\n27902\n27903\n27904\n27905\n27906\n27907\n27908\n27909\n27910\n27911\n27912\n27913\n27914\n27915\n27916\n27917\n27918\n27919\n27920\n27921\n27922\n27923\n27924\n27925\n27926\n27927\n27928\n27929\n27930\n27931\n27932\n27933\n27934\n27935\n27936\n27937\n27938\n27939\n27940\n27941\n27942\n27943\n27944\n27945\n27946\n27947\n27948\n27949\n27950\n27951\n27952\n27953\n27954\n27955\n27956\n27957\n27958\n27959\n27960\n27961\n27962\n27963\n27964\n27965\n27966\n27967\n27968\n27969\n27970\n27971\n27972\n27973\n27974\n27975\n27976\n27977\n27978\n27979\n27980\n27981\n27982\n27983\n27984\n27985\n27986\n27987\n27988\n27989\n27990\n27991\n27992\n27993\n27994\n27995\n27996\n27997\n27998\n27999\n28000\n28001\n28002\n28003\n28004\n28005\n28006\n28007\n28008\n28009\n28010\n28011\n28012\n28013\n28014\n28015\n28016\n28017\n28018\n28019\n28020\n28021\n28022\n28023\n28024\n28025\n28026\n28027\n28028\n28029\n28030\n28031\n28032\n28033\n28034\n28035\n28036\n28037\n28038\n28039\n28040\n28041\n28042\n28043\n28044\n28045\n28046\n28047\n28048\n28049\n28050\n28051\n28052\n28053\n28054\n28055\n28056\n28057\n28058\n28059\n28060\n28061\n28062\n28063\n28064\n28065\n28066\n28067\n28068\n28069\n28070\n28071\n28072\n28073\n28074\n28075\n28076\n28077\n28078\n28079\n28080\n28081\n28082\n28083\n28084\n28085\n28086\n28087\n28088\n28089\n28090\n28091\n28092\n28093\n28094\n28095\n28096\n28097\n28098\n28099\n28100\n28101\n28102\n28103\n28104\n28105\n28106\n28107\n28108\n28109\n28110\n28111\n28112\n28113\n28114\n28115\n28116\n28117\n28118\n28119\n28120\n28121\n28122\n28123\n28124\n28125\n28126\n28127\n28128\n28129\n28130\n28131\n28132\n28133\n28134\n28135\n28136\n28137\n28138\n28139\n28140\n28141\n28142\n28143\n28144\n28145\n28146\n28147\n28148\n28149\n28150\n28151\n28152\n28153\n28154\n28155\n28156\n28157\n28158\n28159\n28160\n28161\n28162\n28163\n28164\n28165\n28166\n28167\n28168\n28169\n28170\n28171\n28172\n28173\n28174\n28175\n28176\n28177\n28178\n28179\n28180\n28181\n28182\n28183\n28184\n28185\n28186\n28187\n28188\n28189\n28190\n28191\n28192\n28193\n28194\n28195\n28196\n28197\n28198\n28199\n28200\n28201\n28202\n28203\n28204\n28205\n28206\n28207\n28208\n28209\n28210\n28211\n28212\n28213\n28214\n28215\n28216\n28217\n28218\n28219\n28220\n28221\n28222\n28223\n28224\n28225\n28226\n28227\n28228\n28229\n28230\n28231\n28232\n28233\n28234\n28235\n28236\n28237\n28238\n28239\n28240\n28241\n28242\n28243\n28244\n28245\n28246\n28247\n28248\n28249\n28250\n28251\n28252\n28253\n28254\n28255\n28256\n28257\n28258\n28259\n28260\n28261\n28262\n28263\n28264\n28265\n28266\n28267\n28268\n28269\n28270\n28271\n28272\n28273\n28274\n28275\n28276\n28277\n28278\n28279\n28280\n28281\n28282\n28283\n28284\n28285\n28286\n28287\n28288\n28289\n28290\n28291\n28292\n28293\n28294\n28295\n28296\n28297\n28298\n28299\n28300\n28301\n28302\n28303\n28304\n28305\n28306\n28307\n28308\n28309\n28310\n28311\n28312\n28313\n28314\n28315\n28316\n28317\n28318\n28319\n28320\n28321\n28322\n28323\n28324\n28325\n28326\n28327\n28328\n28329\n28330\n28331\n28332\n28333\n28334\n28335\n28336\n28337\n28338\n28339\n28340\n28341\n28342\n28343\n28344\n28345\n28346\n28347\n28348\n28349\n28350\n28351\n28352\n28353\n28354\n28355\n28356\n28357\n28358\n28359\n28360\n28361\n28362\n28363\n28364\n28365\n28366\n28367\n28368\n28369\n28370\n28371\n28372\n28373\n28374\n28375\n28376\n28377\n28378\n28379\n28380\n28381\n28382\n28383\n28384\n28385\n28386\n28387\n28388\n28389\n28390\n28391\n28392\n28393\n28394\n28395\n28396\n28397\n28398\n28399\n28400\n28401\n28402\n28403\n28404\n28405\n28406\n28407\n28408\n28409\n28410\n28411\n28412\n28413\n28414\n28415\n28416\n28417\n28418\n28419\n28420\n28421\n28422\n28423\n28424\n28425\n28426\n28427\n28428\n28429\n28430\n28431\n28432\n28433\n28434\n28435\n28436\n28437\n28438\n28439\n28440\n28441\n28442\n28443\n28444\n28445\n28446\n28447\n28448\n28449\n28450\n28451\n28452\n28453\n28454\n28455\n28456\n28457\n28458\n28459\n28460\n28461\n28462\n28463\n28464\n28465\n28466\n28467\n28468\n28469\n28470\n28471\n28472\n28473\n28474\n28475\n28476\n28477\n28478\n28479\n28480\n28481\n28482\n28483\n28484\n28485\n28486\n28487\n28488\n28489\n28490\n28491\n28492\n28493\n28494\n28495\n28496\n28497\n28498\n28499\n28500\n28501\n28502\n28503\n28504\n28505\n28506\n28507\n28508\n28509\n28510\n28511\n28512\n28513\n28514\n28515\n28516\n28517\n28518\n28519\n28520\n28521\n28522\n28523\n28524\n28525\n28526\n28527\n28528\n28529\n28530\n28531\n28532\n28533\n28534\n28535\n28536\n28537\n28538\n28539\n28540\n28541\n28542\n28543\n28544\n28545\n28546\n28547\n28548\n28549\n28550\n28551\n28552\n28553\n28554\n28555\n28556\n28557\n28558\n28559\n28560\n28561\n28562\n28563\n28564\n28565\n28566\n28567\n28568\n28569\n28570\n28571\n28572\n28573\n28574\n28575\n28576\n28577\n28578\n28579\n28580\n28581\n28582\n28583\n28584\n28585\n28586\n28587\n28588\n28589\n28590\n28591\n28592\n28593\n28594\n28595\n28596\n28597\n28598\n28599\n28600\n28601\n28602\n28603\n28604\n28605\n28606\n28607\n28608\n28609\n28610\n28611\n28612\n28613\n28614\n28615\n28616\n28617\n28618\n28619\n28620\n28621\n28622\n28623\n28624\n28625\n28626\n28627\n28628\n28629\n28630\n28631\n28632\n28633\n28634\n28635\n28636\n28637\n28638\n28639\n28640\n28641\n28642\n28643\n28644\n28645\n28646\n28647\n28648\n28649\n28650\n28651\n28652\n28653\n28654\n28655\n28656\n28657\n28658\n28659\n28660\n28661\n28662\n28663\n28664\n28665\n28666\n28667\n28668\n28669\n28670\n28671\n28672\n28673\n28674\n28675\n28676\n28677\n28678\n28679\n28680\n28681\n28682\n28683\n28684\n28685\n28686\n28687\n28688\n28689\n28690\n28691\n28692\n28693\n28694\n28695\n28696\n28697\n28698\n28699\n28700\n28701\n28702\n28703\n28704\n28705\n28706\n28707\n28708\n28709\n28710\n28711\n28712\n28713\n28714\n28715\n28716\n28717\n28718\n28719\n28720\n28721\n28722\n28723\n28724\n28725\n28726\n28727\n28728\n28729\n28730\n28731\n28732\n28733\n28734\n28735\n28736\n28737\n28738\n28739\n28740\n28741\n28742\n28743\n28744\n28745\n28746\n28747\n28748\n28749\n28750\n28751\n28752\n28753\n28754\n28755\n28756\n28757\n28758\n28759\n28760\n28761\n28762\n28763\n28764\n28765\n28766\n28767\n28768\n28769\n28770\n28771\n28772\n28773\n28774\n28775\n28776\n28777\n28778\n28779\n28780\n28781\n28782\n28783\n28784\n28785\n28786\n28787\n28788\n28789\n28790\n28791\n28792\n28793\n28794\n28795\n28796\n28797\n28798\n28799\n28800\n28801\n28802\n28803\n28804\n28805\n28806\n28807\n28808\n28809\n28810\n28811\n28812\n28813\n28814\n28815\n28816\n28817\n28818\n28819\n28820\n28821\n28822\n28823\n28824\n28825\n28826\n28827\n28828\n28829\n28830\n28831\n28832\n28833\n28834\n28835\n28836\n28837\n28838\n28839\n28840\n28841\n28842\n28843\n28844\n28845\n28846\n28847\n28848\n28849\n28850\n28851\n28852\n28853\n28854\n28855\n28856\n28857\n28858\n28859\n28860\n28861\n28862\n28863\n28864\n28865\n28866\n28867\n28868\n28869\n28870\n28871\n28872\n28873\n28874\n28875\n28876\n28877\n28878\n28879\n28880\n28881\n28882\n28883\n28884\n28885\n28886\n28887\n28888\n28889\n28890\n28891\n28892\n28893\n28894\n28895\n28896\n28897\n28898\n28899\n28900\n28901\n28902\n28903\n28904\n28905\n28906\n28907\n28908\n28909\n28910\n28911\n28912\n28913\n28914\n28915\n28916\n28917\n28918\n28919\n28920\n28921\n28922\n28923\n28924\n28925\n28926\n28927\n28928\n28929\n28930\n28931\n28932\n28933\n28934\n28935\n28936\n28937\n28938\n28939\n28940\n28941\n28942\n28943\n28944\n28945\n28946\n28947\n28948\n28949\n28950\n28951\n28952\n28953\n28954\n28955\n28956\n28957\n28958\n28959\n28960\n28961\n28962\n28963\n28964\n28965\n28966\n28967\n28968\n28969\n28970\n28971\n28972\n28973\n28974\n28975\n28976\n28977\n28978\n28979\n28980\n28981\n28982\n28983\n28984\n28985\n28986\n28987\n28988\n28989\n28990\n28991\n28992\n28993\n28994\n28995\n28996\n28997\n28998\n28999\n29000\n29001\n29002\n29003\n29004\n29005\n29006\n29007\n29008\n29009\n29010\n29011\n29012\n29013\n29014\n29015\n29016\n29017\n29018\n29019\n29020\n29021\n29022\n29023\n29024\n29025\n29026\n29027\n29028\n29029\n29030\n29031\n29032\n29033\n29034\n29035\n29036\n29037\n29038\n29039\n29040\n29041\n29042\n29043\n29044\n29045\n29046\n29047\n29048\n29049\n29050\n29051\n29052\n29053\n29054\n29055\n29056\n29057\n29058\n29059\n29060\n29061\n29062\n29063\n29064\n29065\n29066\n29067\n29068\n29069\n29070\n29071\n29072\n29073\n29074\n29075\n29076\n29077\n29078\n29079\n29080\n29081\n29082\n29083\n29084\n29085\n29086\n29087\n29088\n29089\n29090\n29091\n29092\n29093\n29094\n29095\n29096\n29097\n29098\n29099\n29100\n29101\n29102\n29103\n29104\n29105\n29106\n29107\n29108\n29109\n29110\n29111\n29112\n29113\n29114\n29115\n29116\n29117\n29118\n29119\n29120\n29121\n29122\n29123\n29124\n29125\n29126\n29127\n29128\n29129\n29130\n29131\n29132\n29133\n29134\n29135\n29136\n29137\n29138\n29139\n29140\n29141\n29142\n29143\n29144\n29145\n29146\n29147\n29148\n29149\n29150\n29151\n29152\n29153\n29154\n29155\n29156\n29157\n29158\n29159\n29160\n29161\n29162\n29163\n29164\n29165\n29166\n29167\n29168\n29169\n29170\n29171\n29172\n29173\n29174\n29175\n29176\n29177\n29178\n29179\n29180\n29181\n29182\n29183\n29184\n29185\n29186\n29187\n29188\n29189\n29190\n29191\n29192\n29193\n29194\n29195\n29196\n29197\n29198\n29199\n29200\n29201\n29202\n29203\n29204\n29205\n29206\n29207\n29208\n29209\n29210\n29211\n29212\n29213\n29214\n29215\n29216\n29217\n29218\n29219\n29220\n29221\n29222\n29223\n29224\n29225\n29226\n29227\n29228\n29229\n29230\n29231\n29232\n29233\n29234\n29235\n29236\n29237\n29238\n29239\n29240\n29241\n29242\n29243\n29244\n29245\n29246\n29247\n29248\n29249\n29250\n29251\n29252\n29253\n29254\n29255\n29256\n29257\n29258\n29259\n29260\n29261\n29262\n29263\n29264\n29265\n29266\n29267\n29268\n29269\n29270\n29271\n29272\n29273\n29274\n29275\n29276\n29277\n29278\n29279\n29280\n29281\n29282\n29283\n29284\n29285\n29286\n29287\n29288\n29289\n29290\n29291\n29292\n29293\n29294\n29295\n29296\n29297\n29298\n29299\n29300\n29301\n29302\n29303\n29304\n29305\n29306\n29307\n29308\n29309\n29310\n29311\n29312\n29313\n29314\n29315\n29316\n29317\n29318\n29319\n29320\n29321\n29322\n29323\n29324\n29325\n29326\n29327\n29328\n29329\n29330\n29331\n29332\n29333\n29334\n29335\n29336\n29337\n29338\n29339\n29340\n29341\n29342\n29343\n29344\n29345\n29346\n29347\n29348\n29349\n29350\n29351\n29352\n29353\n29354\n29355\n29356\n29357\n29358\n29359\n29360\n29361\n29362\n29363\n29364\n29365\n29366\n29367\n29368\n29369\n29370\n29371\n29372\n29373\n29374\n29375\n29376\n29377\n29378\n29379\n29380\n29381\n29382\n29383\n29384\n29385\n29386\n29387\n29388\n29389\n29390\n29391\n29392\n29393\n29394\n29395\n29396\n29397\n29398\n29399\n29400\n29401\n29402\n29403\n29404\n29405\n29406\n29407\n29408\n29409\n29410\n29411\n29412\n29413\n29414\n29415\n29416\n29417\n29418\n29419\n29420\n29421\n29422\n29423\n29424\n29425\n29426\n29427\n29428\n29429\n29430\n29431\n29432\n29433\n29434\n29435\n29436\n29437\n29438\n29439\n29440\n29441\n29442\n29443\n29444\n29445\n29446\n29447\n29448\n29449\n29450\n29451\n29452\n29453\n29454\n29455\n29456\n29457\n29458\n29459\n29460\n29461\n29462\n29463\n29464\n29465\n29466\n29467\n29468\n29469\n29470\n29471\n29472\n29473\n29474\n29475\n29476\n29477\n29478\n29479\n29480\n29481\n29482\n29483\n29484\n29485\n29486\n29487\n29488\n29489\n29490\n29491\n29492\n29493\n29494\n29495\n29496\n29497\n29498\n29499\n29500\n29501\n29502\n29503\n29504\n29505\n29506\n29507\n29508\n29509\n29510\n29511\n29512\n29513\n29514\n29515\n29516\n29517\n29518\n29519\n29520\n29521\n29522\n29523\n29524\n29525\n29526\n29527\n29528\n29529\n29530\n29531\n29532\n29533\n29534\n29535\n29536\n29537\n29538\n29539\n29540\n29541\n29542\n29543\n29544\n29545\n29546\n29547\n29548\n29549\n29550\n29551\n29552\n29553\n29554\n29555\n29556\n29557\n29558\n29559\n29560\n29561\n29562\n29563\n29564\n29565\n29566\n29567\n29568\n29569\n29570\n29571\n29572\n29573\n29574\n29575\n29576\n29577\n29578\n29579\n29580\n29581\n29582\n29583\n29584\n29585\n29586\n29587\n29588\n29589\n29590\n29591\n29592\n29593\n29594\n29595\n29596\n29597\n29598\n29599\n29600\n29601\n29602\n29603\n29604\n29605\n29606\n29607\n29608\n29609\n29610\n29611\n29612\n29613\n29614\n29615\n29616\n29617\n29618\n29619\n29620\n29621\n29622\n29623\n29624\n29625\n29626\n29627\n29628\n29629\n29630\n29631\n29632\n29633\n29634\n29635\n29636\n29637\n29638\n29639\n29640\n29641\n29642\n29643\n29644\n29645\n29646\n29647\n29648\n29649\n29650\n29651\n29652\n29653\n29654\n29655\n29656\n29657\n29658\n29659\n29660\n29661\n29662\n29663\n29664\n29665\n29666\n29667\n29668\n29669\n29670\n29671\n29672\n29673\n29674\n29675\n29676\n29677\n29678\n29679\n29680\n29681\n29682\n29683\n29684\n29685\n29686\n29687\n29688\n29689\n29690\n29691\n29692\n29693\n29694\n29695\n29696\n29697\n29698\n29699\n29700\n29701\n29702\n29703\n29704\n29705\n29706\n29707\n29708\n29709\n29710\n29711\n29712\n29713\n29714\n29715\n29716\n29717\n29718\n29719\n29720\n29721\n29722\n29723\n29724\n29725\n29726\n29727\n29728\n29729\n29730\n29731\n29732\n29733\n29734\n29735\n29736\n29737\n29738\n29739\n29740\n29741\n29742\n29743\n29744\n29745\n29746\n29747\n29748\n29749\n29750\n29751\n29752\n29753\n29754\n29755\n29756\n29757\n29758\n29759\n29760\n29761\n29762\n29763\n29764\n29765\n29766\n29767\n29768\n29769\n29770\n29771\n29772\n29773\n29774\n29775\n29776\n29777\n29778\n29779\n29780\n29781\n29782\n29783\n29784\n29785\n29786\n29787\n29788\n29789\n29790\n29791\n29792\n29793\n29794\n29795\n29796\n29797\n29798\n29799\n29800\n29801\n29802\n29803\n29804\n29805\n29806\n29807\n29808\n29809\n29810\n29811\n29812\n29813\n29814\n29815\n29816\n29817\n29818\n29819\n29820\n29821\n29822\n29823\n29824\n29825\n29826\n29827\n29828\n29829\n29830\n29831\n29832\n29833\n29834\n29835\n29836\n29837\n29838\n29839\n29840\n29841\n29842\n29843\n29844\n29845\n29846\n29847\n29848\n29849\n29850\n29851\n29852\n29853\n29854\n29855\n29856\n29857\n29858\n29859\n29860\n29861\n29862\n29863\n29864\n29865\n29866\n29867\n29868\n29869\n29870\n29871\n29872\n29873\n29874\n29875\n29876\n29877\n29878\n29879\n29880\n29881\n29882\n29883\n29884\n29885\n29886\n29887\n29888\n29889\n29890\n29891\n29892\n29893\n29894\n29895\n29896\n29897\n29898\n29899\n29900\n29901\n29902\n29903\n29904\n29905\n29906\n29907\n29908\n29909\n29910\n29911\n29912\n29913\n29914\n29915\n29916\n29917\n29918\n29919\n29920\n29921\n29922\n29923\n29924\n29925\n29926\n29927\n29928\n29929\n29930\n29931\n29932\n29933\n29934\n29935\n29936\n29937\n29938\n29939\n29940\n29941\n29942\n29943\n29944\n29945\n29946\n29947\n29948\n29949\n29950\n29951\n29952\n29953\n29954\n29955\n29956\n29957\n29958\n29959\n29960\n29961\n29962\n29963\n29964\n29965\n29966\n29967\n29968\n29969\n29970\n29971\n29972\n29973\n29974\n29975\n29976\n29977\n29978\n29979\n29980\n29981\n29982\n29983\n29984\n29985\n29986\n29987\n29988\n29989\n29990\n29991\n29992\n29993\n29994\n29995\n29996\n29997\n29998\n29999' \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test11.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test11.arff new file mode 100644 index 0000000000000000000000000000000000000000..fadfaee884e3e91cd59f691afd954a6a6d4042da --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test11.arff @@ -0,0 +1,11 @@ +@RELATION test11 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class { class0, class1, class2, class3 } +@DATA +0.1, 0.2, 0.3, 0.4,class1 +-0.1, -0.2, -0.3, -0.4,class2 +1, 2, 3, 4,class3 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff new file mode 100644 index 0000000000000000000000000000000000000000..30f0dbf91b078ef670868d5e7321f956a6a7a506 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff @@ -0,0 +1,15 @@ +@RELATION test2 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 real +@ATTRIBUTE attr2 integer +@ATTRIBUTE attr3 Integer +@ATTRIBUTE attr4 Numeric +@ATTRIBUTE attr5 numeric +@ATTRIBUTE attr6 string +@ATTRIBUTE attr7 STRING +@ATTRIBUTE attr8 {bla} +@ATTRIBUTE attr9 {bla, bla} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test3.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test3.arff new file mode 100644 index 0000000000000000000000000000000000000000..23da3b30967fcc95d70883f70be9ef6e39d577fa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test3.arff @@ -0,0 +1,6 @@ +@RELATION test3 + +@ATTRIBUTE attr0 crap + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test4.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test4.arff new file mode 100644 index 0000000000000000000000000000000000000000..bf5f99ca89375fbd980185fd25711901f23ff844 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test4.arff @@ -0,0 +1,11 @@ +@RELATION test5 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} +@DATA +0.1, 0.2, 0.3, 0.4,class1 +-0.1, -0.2, -0.3, -0.4,class2 +1, 2, 3, 4,class3 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test5.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test5.arff new file mode 100644 index 0000000000000000000000000000000000000000..0075daf05e7792e80dcd565e791ce40e4dd49e85 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test5.arff @@ -0,0 +1,26 @@ +@RELATION test4 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA + +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +0.1, 0.2, 0.3, 0.4,class1 +% laksjdhf + +% lsdflkjhaksjdhf +-0.1, -0.2, -0.3, -0.4,class2 + +% lsdflkjhaksjdhf +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +1, 2, 3, 4,class3 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test6.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test6.arff new file mode 100644 index 0000000000000000000000000000000000000000..b63280b03aef8e0553a83fbf96692d280a3f86b7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test6.arff @@ -0,0 +1,12 @@ +@RELATION test6 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {C} + +@DATA +0.1, 0.2, 0.3, 0.4,C +-0.1, -0.2, -0.3, -0.4,C +1, 2, 3, 4,C diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test7.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test7.arff new file mode 100644 index 0000000000000000000000000000000000000000..38ef6c9a7a10afb10caa5913687ea3636ab1d38e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test7.arff @@ -0,0 +1,15 @@ +@RELATION test7 + +@ATTRIBUTE attr_year DATE yyyy +@ATTRIBUTE attr_month DATE yyyy-MM +@ATTRIBUTE attr_date DATE yyyy-MM-dd +@ATTRIBUTE attr_datetime_local DATE "yyyy-MM-dd HH:mm" +@ATTRIBUTE attr_datetime_missing DATE "yyyy-MM-dd HH:mm" + +@DATA +1999,1999-01,1999-01-31,"1999-01-31 00:01",? +2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59" +1817,1817-04,1817-04-28,"1817-04-28 13:00",? +2100,2100-09,2100-09-10,"2100-09-10 12:00",? +2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55" +1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04" \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test8.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test8.arff new file mode 100644 index 0000000000000000000000000000000000000000..776deb4c9e7550eafdb26d16826f5651da37ef12 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test8.arff @@ -0,0 +1,12 @@ +@RELATION test8 + +@ATTRIBUTE attr_datetime_utc DATE "yyyy-MM-dd HH:mm Z" +@ATTRIBUTE attr_datetime_full DATE "yy-MM-dd HH:mm:ss z" + +@DATA +"1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430" +"2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800" +"1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000" +"2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300" +"2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100" +"1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000" \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test9.arff b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test9.arff new file mode 100644 index 0000000000000000000000000000000000000000..b3f97e32a3fd4909a3f9cbf8d5d2e8d250f8dbad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/data/test9.arff @@ -0,0 +1,14 @@ +@RELATION test9 + +@ATTRIBUTE attr_date_number RELATIONAL + @ATTRIBUTE attr_date DATE "yyyy-MM-dd" + @ATTRIBUTE attr_number INTEGER +@END attr_date_number + +@DATA +"1999-01-31 1\n1935-11-27 10" +"2004-12-01 2\n1942-08-13 20" +"1817-04-28 3" +"2100-09-10 4\n1957-04-17 40\n1721-01-14 400" +"2013-11-30 5" +"1631-10-15 6" \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/test_arffread.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/test_arffread.py new file mode 100644 index 0000000000000000000000000000000000000000..d13ebe6dd1af3044794b28f5375d06ed60787966 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/arff/tests/test_arffread.py @@ -0,0 +1,421 @@ +import datetime +import os +import sys +from os.path import join as pjoin + +from io import StringIO + +import numpy as np + +from numpy.testing import (assert_array_almost_equal, + assert_array_equal, assert_equal, assert_) +from pytest import raises as assert_raises + +from scipy.io.arff import loadarff +from scipy.io.arff._arffread import read_header, ParseArffError + + +data_path = pjoin(os.path.dirname(__file__), 'data') + +test1 = pjoin(data_path, 'test1.arff') +test2 = pjoin(data_path, 'test2.arff') +test3 = pjoin(data_path, 'test3.arff') + +test4 = pjoin(data_path, 'test4.arff') +test5 = pjoin(data_path, 'test5.arff') +test6 = pjoin(data_path, 'test6.arff') +test7 = pjoin(data_path, 'test7.arff') +test8 = pjoin(data_path, 'test8.arff') +test9 = pjoin(data_path, 'test9.arff') +test10 = pjoin(data_path, 'test10.arff') +test11 = pjoin(data_path, 'test11.arff') +test_quoted_nominal = pjoin(data_path, 'quoted_nominal.arff') +test_quoted_nominal_spaces = pjoin(data_path, 'quoted_nominal_spaces.arff') + +expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'), + (-0.1, -0.2, -0.3, -0.4, 'class2'), + (1, 2, 3, 4, 'class3')] +expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal'] + +missing = pjoin(data_path, 'missing.arff') +expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +expect_missing = np.empty(3, [('yop', float), ('yap', float)]) +expect_missing['yop'] = expect_missing_raw[:, 0] # type: ignore[call-overload] +expect_missing['yap'] = expect_missing_raw[:, 1] # type: ignore[call-overload] + + +class TestData: + def test1(self): + # Parsing trivial file with nothing. + self._test(test4) + + def test2(self): + # Parsing trivial file with some comments in the data section. + self._test(test5) + + def test3(self): + # Parsing trivial file with nominal attribute of 1 character. + self._test(test6) + + def test4(self): + # Parsing trivial file with trailing spaces in attribute declaration. + self._test(test11) + + def _test(self, test_file): + data, meta = loadarff(test_file) + for i in range(len(data)): + for j in range(4): + assert_array_almost_equal(expect4_data[i][j], data[i][j]) + assert_equal(meta.types(), expected_types) + + def test_filelike(self): + # Test reading from file-like object (StringIO) + with open(test1) as f1: + data1, meta1 = loadarff(f1) + with open(test1) as f2: + data2, meta2 = loadarff(StringIO(f2.read())) + assert_(data1 == data2) + assert_(repr(meta1) == repr(meta2)) + + def test_path(self): + # Test reading from `pathlib.Path` object + from pathlib import Path + + with open(test1) as f1: + data1, meta1 = loadarff(f1) + + data2, meta2 = loadarff(Path(test1)) + + assert_(data1 == data2) + assert_(repr(meta1) == repr(meta2)) + + +class TestMissingData: + def test_missing(self): + data, meta = loadarff(missing) + for i in ['yop', 'yap']: + assert_array_almost_equal(data[i], expect_missing[i]) + + +class TestNoData: + def test_nodata(self): + # The file nodata.arff has no data in the @DATA section. + # Reading it should result in an array with length 0. + nodata_filename = os.path.join(data_path, 'nodata.arff') + data, meta = loadarff(nodata_filename) + if sys.byteorder == 'big': + end = '>' + else: + end = '<' + expected_dtype = np.dtype([('sepallength', f'{end}f8'), + ('sepalwidth', f'{end}f8'), + ('petallength', f'{end}f8'), + ('petalwidth', f'{end}f8'), + ('class', 'S15')]) + assert_equal(data.dtype, expected_dtype) + assert_equal(data.size, 0) + + +class TestHeader: + def test_type_parsing(self): + # Test parsing type of attribute from their value. + with open(test2) as ofile: + rel, attrs = read_header(ofile) + + expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric', + 'numeric', 'string', 'string', 'nominal', 'nominal'] + + for i in range(len(attrs)): + assert_(attrs[i].type_name == expected[i]) + + def test_badtype_parsing(self): + # Test parsing wrong type of attribute from their value. + def badtype_read(): + with open(test3) as ofile: + _, _ = read_header(ofile) + + assert_raises(ParseArffError, badtype_read) + + def test_fullheader1(self): + # Parsing trivial header with nothing. + with open(test1) as ofile: + rel, attrs = read_header(ofile) + + # Test relation + assert_(rel == 'test1') + + # Test numerical attributes + assert_(len(attrs) == 5) + for i in range(4): + assert_(attrs[i].name == 'attr%d' % i) + assert_(attrs[i].type_name == 'numeric') + + # Test nominal attribute + assert_(attrs[4].name == 'class') + assert_(attrs[4].values == ('class0', 'class1', 'class2', 'class3')) + + def test_dateheader(self): + with open(test7) as ofile: + rel, attrs = read_header(ofile) + + assert_(rel == 'test7') + + assert_(len(attrs) == 5) + + assert_(attrs[0].name == 'attr_year') + assert_(attrs[0].date_format == '%Y') + + assert_(attrs[1].name == 'attr_month') + assert_(attrs[1].date_format == '%Y-%m') + + assert_(attrs[2].name == 'attr_date') + assert_(attrs[2].date_format == '%Y-%m-%d') + + assert_(attrs[3].name == 'attr_datetime_local') + assert_(attrs[3].date_format == '%Y-%m-%d %H:%M') + + assert_(attrs[4].name == 'attr_datetime_missing') + assert_(attrs[4].date_format == '%Y-%m-%d %H:%M') + + def test_dateheader_unsupported(self): + def read_dateheader_unsupported(): + with open(test8) as ofile: + _, _ = read_header(ofile) + + assert_raises(ValueError, read_dateheader_unsupported) + + +class TestDateAttribute: + def setup_method(self): + self.data, self.meta = loadarff(test7) + + def test_year_attribute(self): + expected = np.array([ + '1999', + '2004', + '1817', + '2100', + '2013', + '1631' + ], dtype='datetime64[Y]') + + assert_array_equal(self.data["attr_year"], expected) + + def test_month_attribute(self): + expected = np.array([ + '1999-01', + '2004-12', + '1817-04', + '2100-09', + '2013-11', + '1631-10' + ], dtype='datetime64[M]') + + assert_array_equal(self.data["attr_month"], expected) + + def test_date_attribute(self): + expected = np.array([ + '1999-01-31', + '2004-12-01', + '1817-04-28', + '2100-09-10', + '2013-11-30', + '1631-10-15' + ], dtype='datetime64[D]') + + assert_array_equal(self.data["attr_date"], expected) + + def test_datetime_local_attribute(self): + expected = np.array([ + datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1), + datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59), + datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0), + datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0), + datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55), + datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4) + ], dtype='datetime64[m]') + + assert_array_equal(self.data["attr_datetime_local"], expected) + + def test_datetime_missing(self): + expected = np.array([ + 'nat', + '2004-12-01T23:59', + 'nat', + 'nat', + '2013-11-30T04:55', + '1631-10-15T20:04' + ], dtype='datetime64[m]') + + assert_array_equal(self.data["attr_datetime_missing"], expected) + + def test_datetime_timezone(self): + assert_raises(ParseArffError, loadarff, test8) + + +class TestRelationalAttribute: + def setup_method(self): + self.data, self.meta = loadarff(test9) + + def test_attributes(self): + assert_equal(len(self.meta._attributes), 1) + + relational = list(self.meta._attributes.values())[0] + + assert_equal(relational.name, 'attr_date_number') + assert_equal(relational.type_name, 'relational') + assert_equal(len(relational.attributes), 2) + assert_equal(relational.attributes[0].name, + 'attr_date') + assert_equal(relational.attributes[0].type_name, + 'date') + assert_equal(relational.attributes[1].name, + 'attr_number') + assert_equal(relational.attributes[1].type_name, + 'numeric') + + def test_data(self): + dtype_instance = [('attr_date', 'datetime64[D]'), + ('attr_number', np.float64)] + + expected = [ + np.array([('1999-01-31', 1), ('1935-11-27', 10)], + dtype=dtype_instance), + np.array([('2004-12-01', 2), ('1942-08-13', 20)], + dtype=dtype_instance), + np.array([('1817-04-28', 3)], + dtype=dtype_instance), + np.array([('2100-09-10', 4), ('1957-04-17', 40), + ('1721-01-14', 400)], + dtype=dtype_instance), + np.array([('2013-11-30', 5)], + dtype=dtype_instance), + np.array([('1631-10-15', 6)], + dtype=dtype_instance) + ] + + for i in range(len(self.data["attr_date_number"])): + assert_array_equal(self.data["attr_date_number"][i], + expected[i]) + + +class TestRelationalAttributeLong: + def setup_method(self): + self.data, self.meta = loadarff(test10) + + def test_attributes(self): + assert_equal(len(self.meta._attributes), 1) + + relational = list(self.meta._attributes.values())[0] + + assert_equal(relational.name, 'attr_relational') + assert_equal(relational.type_name, 'relational') + assert_equal(len(relational.attributes), 1) + assert_equal(relational.attributes[0].name, + 'attr_number') + assert_equal(relational.attributes[0].type_name, 'numeric') + + def test_data(self): + dtype_instance = [('attr_number', np.float64)] + + expected = np.array([(n,) for n in range(30000)], + dtype=dtype_instance) + + assert_array_equal(self.data["attr_relational"][0], + expected) + + +class TestQuotedNominal: + """ + Regression test for issue #10232: + + Exception in loadarff with quoted nominal attributes. + """ + + def setup_method(self): + self.data, self.meta = loadarff(test_quoted_nominal) + + def test_attributes(self): + assert_equal(len(self.meta._attributes), 2) + + age, smoker = self.meta._attributes.values() + + assert_equal(age.name, 'age') + assert_equal(age.type_name, 'numeric') + assert_equal(smoker.name, 'smoker') + assert_equal(smoker.type_name, 'nominal') + assert_equal(smoker.values, ['yes', 'no']) + + def test_data(self): + + age_dtype_instance = np.float64 + smoker_dtype_instance = '' (big endian) + +''' +import sys + +__all__ = [ + 'aliases', 'native_code', 'swapped_code', + 'sys_is_le', 'to_numpy_code' +] + +sys_is_le = sys.byteorder == 'little' +native_code = sys_is_le and '<' or '>' +swapped_code = sys_is_le and '>' or '<' + +aliases = {'little': ('little', '<', 'l', 'le'), + 'big': ('big', '>', 'b', 'be'), + 'native': ('native', '='), + 'swapped': ('swapped', 'S')} + + +def to_numpy_code(code): + """ + Convert various order codings to NumPy format. + + Parameters + ---------- + code : str + The code to convert. It is converted to lower case before parsing. + Legal values are: + 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=', + 'swapped', 's'. + + Returns + ------- + out_code : {'<', '>'} + Here '<' is the numpy dtype code for little endian, + and '>' is the code for big endian. + + Examples + -------- + >>> import sys + >>> from scipy.io.matlab._byteordercodes import to_numpy_code + >>> sys_is_le = (sys.byteorder == 'little') + >>> sys_is_le + True + >>> to_numpy_code('big') + '>' + >>> to_numpy_code('little') + '<' + >>> nc = to_numpy_code('native') + >>> nc == '<' if sys_is_le else nc == '>' + True + >>> sc = to_numpy_code('swapped') + >>> sc == '>' if sys_is_le else sc == '<' + True + + """ + code = code.lower() + if code is None: + return native_code + if code in aliases['little']: + return '<' + elif code in aliases['big']: + return '>' + elif code in aliases['native']: + return native_code + elif code in aliases['swapped']: + return swapped_code + else: + raise ValueError( + f'We cannot handle byte order {code}') diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio.py new file mode 100644 index 0000000000000000000000000000000000000000..4c86d873bd11fb45676d8db37a5d60b032276ecc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio.py @@ -0,0 +1,372 @@ +""" +Module for reading and writing matlab (TM) .mat files +""" +# Authors: Travis Oliphant, Matthew Brett + +from contextlib import contextmanager + +from ._miobase import _get_matfile_version, docfiller +from ._mio4 import MatFile4Reader, MatFile4Writer +from ._mio5 import MatFile5Reader, MatFile5Writer + +__all__ = ['loadmat', 'savemat', 'whosmat'] + + +@contextmanager +def _open_file_context(file_like, appendmat, mode='rb'): + f, opened = _open_file(file_like, appendmat, mode) + try: + yield f + finally: + if opened: + f.close() + + +def _open_file(file_like, appendmat, mode='rb'): + """ + Open `file_like` and return as file-like object. First, check if object is + already file-like; if so, return it as-is. Otherwise, try to pass it + to open(). If that fails, and `file_like` is a string, and `appendmat` is true, + append '.mat' and try again. + """ + reqs = {'read'} if set(mode) & set('r+') else set() + if set(mode) & set('wax+'): + reqs.add('write') + if reqs.issubset(dir(file_like)): + return file_like, False + + try: + return open(file_like, mode), True + except OSError as e: + # Probably "not found" + if isinstance(file_like, str): + if appendmat and not file_like.endswith('.mat'): + file_like += '.mat' + return open(file_like, mode), True + else: + raise OSError( + 'Reader needs file name or open file-like object' + ) from e + + +@docfiller +def mat_reader_factory(file_name, appendmat=True, **kwargs): + """ + Create reader for matlab .mat format files. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + matreader : MatFileReader object + Initialized instance of MatFileReader class matching the mat file + type detected in `filename`. + file_opened : bool + Whether the file was opened by this routine. + + """ + byte_stream, file_opened = _open_file(file_name, appendmat) + mjv, mnv = _get_matfile_version(byte_stream) + if mjv == 0: + return MatFile4Reader(byte_stream, **kwargs), file_opened + elif mjv == 1: + return MatFile5Reader(byte_stream, **kwargs), file_opened + elif mjv == 2: + raise NotImplementedError('Please use HDF reader for matlab v7.3 ' + 'files, e.g. h5py') + else: + raise TypeError(f'Did not recognize version {mjv}') + + +@docfiller +def loadmat(file_name, mdict=None, appendmat=True, *, spmatrix=True, **kwargs): + """ + Load MATLAB file. + + Parameters + ---------- + file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True). Can also pass open file-like object. + mdict : dict, optional + Dictionary in which to insert matfile variables. + appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True. + spmatrix : bool, optional (default: True) + If ``True``, return sparse ``coo_matrix``. Otherwise return ``coo_array``. + Only relevant for sparse variables. + byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). + mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). + squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. + chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. + matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True). + struct_as_record : bool, optional + Whether to load MATLAB structs as NumPy record arrays, or as + old-style NumPy arrays with dtype=object. Setting this flag to + False replicates the behavior of scipy version 0.7.x (returning + NumPy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files. + verify_compressed_data_integrity : bool, optional + Whether the length of compressed sequences in the MATLAB file + should be checked, to ensure that they are not longer than we expect. + It is advisable to enable this (the default) because overlong + compressed sequences in MATLAB files generally indicate that the + files have experienced some sort of corruption. + variable_names : None or sequence + If None (the default) - read all variables in file. Otherwise, + `variable_names` should be a sequence of strings, giving names of the + MATLAB variables to read from the file. The reader will skip any + variable with a name not in this sequence, possibly saving some read + processing. + simplify_cells : False, optional + If True, return a simplified dict structure (which is useful if the mat + file contains cell arrays). Note that this only affects the structure + of the result and not its contents (which is identical for both output + structures). If True, this automatically sets `struct_as_record` to + False and `squeeze_me` to True, which is required to simplify cells. + uint16_codec : str, optional + The codec to use for decoding characters, which are stored as uint16 + values. The default uses the system encoding, but this can be manually + set to other values such as 'ascii', 'latin1', and 'utf-8'. This + parameter is relevant only for files stored as v6 and above, and not + for files stored as v4. + + Returns + ------- + mat_dict : dict + dictionary with variable names as keys, and loaded matrices as values. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 Python library to read MATLAB 7.3 format mat + files. Because SciPy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + Examples + -------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + + Get the filename for an example .mat file from the tests/data directory. + + >>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data') + >>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat') + + Load the .mat file contents. + + >>> mat_contents = sio.loadmat(mat_fname, spmatrix=False) + + The result is a dictionary, one key/value pair for each variable: + + >>> sorted(mat_contents.keys()) + ['__globals__', '__header__', '__version__', 'testdouble'] + >>> mat_contents['testdouble'] + array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265, + 3.92699082, 4.71238898, 5.49778714, 6.28318531]]) + + By default SciPy reads MATLAB structs as structured NumPy arrays where the + dtype fields are of type `object` and the names correspond to the MATLAB + struct field names. This can be disabled by setting the optional argument + `struct_as_record=False`. + + Get the filename for an example .mat file that contains a MATLAB struct + called `teststruct` and load the contents. + + >>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat') + >>> matstruct_contents = sio.loadmat(matstruct_fname) + >>> teststruct = matstruct_contents['teststruct'] + >>> teststruct.dtype + dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')]) + + The size of the structured array is the size of the MATLAB struct, not the + number of elements in any particular field. The shape defaults to 2-D + unless the optional argument `squeeze_me=True`, in which case all length 1 + dimensions are removed. + + >>> teststruct.size + 1 + >>> teststruct.shape + (1, 1) + + Get the 'stringfield' of the first element in the MATLAB struct. + + >>> teststruct[0, 0]['stringfield'] + array(['Rats live on no evil star.'], + dtype='>> teststruct['doublefield'][0, 0] + array([[ 1.41421356, 2.71828183, 3.14159265]]) + + Load the MATLAB struct, squeezing out length 1 dimensions, and get the item + from the 'complexfield'. + + >>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True) + >>> matstruct_squeezed['teststruct'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].item() + array([ 1.41421356+1.41421356j, 2.71828183+2.71828183j, + 3.14159265+3.14159265j]) + """ + variable_names = kwargs.pop('variable_names', None) + with _open_file_context(file_name, appendmat) as f: + MR, _ = mat_reader_factory(f, **kwargs) + matfile_dict = MR.get_variables(variable_names) + if spmatrix: + from scipy.sparse import issparse, coo_matrix + for name, var in list(matfile_dict.items()): + if issparse(var): + matfile_dict[name] = coo_matrix(var) + + if mdict is not None: + mdict.update(matfile_dict) + else: + mdict = matfile_dict + + return mdict + + +@docfiller +def savemat(file_name, mdict, + appendmat=True, + format='5', + long_field_names=False, + do_compression=False, + oned_as='row'): + """ + Save a dictionary of names and arrays into a MATLAB-style .mat file. + + This saves the array objects in the given dictionary to a MATLAB- + style .mat file. + + Parameters + ---------- + file_name : str or file-like object + Name of the .mat file (.mat extension not needed if ``appendmat == + True``). + Can also pass open file_like object. + mdict : dict + Dictionary from which to save matfile variables. + appendmat : bool, optional + True (the default) to append the .mat extension to the end of the + given filename, if not already present. + format : {'5', '4'}, string, optional + '5' (the default) for MATLAB 5 and up (to 7.2), + '4' for MATLAB 4 .mat files. + long_field_names : bool, optional + False (the default) - maximum field name length in a structure is + 31 characters which is the documented maximum length. + True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6+. + do_compression : bool, optional + Whether or not to compress matrices on write. Default is False. + oned_as : {'row', 'column'}, optional + If 'column', write 1-D NumPy arrays as column vectors. + If 'row', write 1-D NumPy arrays as row vectors. + + Examples + -------- + >>> from scipy.io import savemat + >>> import numpy as np + >>> a = np.arange(20) + >>> mdic = {"a": a, "label": "experiment"} + >>> mdic + {'a': array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19]), + 'label': 'experiment'} + >>> savemat("matlab_matrix.mat", mdic) + """ + with _open_file_context(file_name, appendmat, 'wb') as file_stream: + if format == '4': + if long_field_names: + message = "Long field names are not available for version 4 files" + raise ValueError(message) + MW = MatFile4Writer(file_stream, oned_as) + elif format == '5': + MW = MatFile5Writer(file_stream, + do_compression=do_compression, + unicode_strings=True, + long_field_names=long_field_names, + oned_as=oned_as) + else: + raise ValueError("Format should be '4' or '5'") + MW.put_variables(mdict) + + +@docfiller +def whosmat(file_name, appendmat=True, **kwargs): + """ + List variables inside a MATLAB file. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + variables : list of tuples + A list of tuples, where each tuple holds the matrix name (a string), + its shape (tuple of ints), and its data class (a string). + Possible data classes are: int8, uint8, int16, uint16, int32, uint32, + int64, uint64, single, double, cell, struct, object, char, sparse, + function, opaque, logical, unknown. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 python library to read matlab 7.3 format mat + files (e.g. h5py). Because SciPy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + .. versionadded:: 0.12.0 + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.io import savemat, whosmat + + Create some arrays, and use `savemat` to write them to a ``BytesIO`` + instance. + + >>> a = np.array([[10, 20, 30], [11, 21, 31]], dtype=np.int32) + >>> b = np.geomspace(1, 10, 5) + >>> f = BytesIO() + >>> savemat(f, {'a': a, 'b': b}) + + Use `whosmat` to inspect ``f``. Each tuple in the output list gives + the name, shape and data type of the array in ``f``. + + >>> whosmat(f) + [('a', (2, 3), 'int32'), ('b', (1, 5), 'double')] + + """ + with _open_file_context(file_name, appendmat) as f: + ML, file_opened = mat_reader_factory(f, **kwargs) + variables = ML.list_variables() + return variables diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py new file mode 100644 index 0000000000000000000000000000000000000000..b108386d110e6062d088498259e849603583eb94 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py @@ -0,0 +1,632 @@ +''' Classes for read / write of matlab (TM) 4 files +''' +import sys +import warnings +import math +from operator import mul + +import numpy as np + +import scipy.sparse + +from ._miobase import (MatFileReader, docfiller, matdims, read_dtype, + convert_dtypes, arr_to_chars, arr_dtype_number) + +from ._mio_utils import squeeze_element, chars_to_strings +from functools import reduce + + +__all__ = [ + 'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN', + 'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info', + 'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE', + 'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS', + 'np_to_mtypes', 'order_codes' +] + + +SYS_LITTLE_ENDIAN = sys.byteorder == 'little' + +miDOUBLE = 0 +miSINGLE = 1 +miINT32 = 2 +miINT16 = 3 +miUINT16 = 4 +miUINT8 = 5 + +mdtypes_template = { + miDOUBLE: 'f8', + miSINGLE: 'f4', + miINT32: 'i4', + miINT16: 'i2', + miUINT16: 'u2', + miUINT8: 'u1', + 'header': [('mopt', 'i4'), + ('mrows', 'i4'), + ('ncols', 'i4'), + ('imagf', 'i4'), + ('namlen', 'i4')], + 'U1': 'U1', + } + +np_to_mtypes = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i4': miINT32, + 'i2': miINT16, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + } + +# matrix classes +mxFULL_CLASS = 0 +mxCHAR_CLASS = 1 +mxSPARSE_CLASS = 2 + +order_codes = { + 0: '<', + 1: '>', + 2: 'VAX D-float', # ! + 3: 'VAX G-float', + 4: 'Cray', # !! + } + +mclass_info = { + mxFULL_CLASS: 'double', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + } + + +_MAX_INTP = np.iinfo(np.intp).max + + +class VarHeader4: + # Mat4 variables never logical or global + is_logical = False + is_global = False + + def __init__(self, + name, + dtype, + mclass, + dims, + is_complex): + self.name = name + self.dtype = dtype + self.mclass = mclass + self.dims = dims + self.is_complex = is_complex + + +class VarReader4: + ''' Class to read matlab 4 variables ''' + + def __init__(self, file_reader): + self.file_reader = file_reader + self.mat_stream = file_reader.mat_stream + self.dtypes = file_reader.dtypes + self.chars_as_strings = file_reader.chars_as_strings + self.squeeze_me = file_reader.squeeze_me + + def read_header(self): + ''' Read and return header for variable ''' + data = read_dtype(self.mat_stream, self.dtypes['header']) + name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00') + if data['mopt'] < 0 or data['mopt'] > 5000: + raise ValueError('Mat 4 mopt wrong format, byteswapping problem?') + M, rest = divmod(data['mopt'], 1000) # order code + if M not in (0, 1): + warnings.warn(f"We do not support byte ordering '{order_codes[M]}';" + " returned data may be corrupt", + UserWarning, stacklevel=3) + O, rest = divmod(rest, 100) # unused, should be 0 + if O != 0: + raise ValueError('O in MOPT integer should be 0, wrong format?') + P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above) + T = rest # matrix type code e.g., mxFULL_CLASS (see above) + dims = (data['mrows'], data['ncols']) + is_complex = data['imagf'] == 1 + dtype = self.dtypes[P] + return VarHeader4( + name, + dtype, + T, + dims, + is_complex) + + def array_from_header(self, hdr, process=True): + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + arr = self.read_full_array(hdr) + elif mclass == mxCHAR_CLASS: + arr = self.read_char_array(hdr) + if process and self.chars_as_strings: + arr = chars_to_strings(arr) + elif mclass == mxSPARSE_CLASS: + # no current processing (below) makes sense for sparse + return self.read_sparse_array(hdr) + else: + raise TypeError(f'No reader for class code {mclass}') + if process and self.squeeze_me: + return squeeze_element(arr) + return arr + + def read_sub_array(self, hdr, copy=True): + ''' Mat4 read using header `hdr` dtype and dims + + Parameters + ---------- + hdr : object + object with attributes ``dtype``, ``dims``. dtype is assumed to be + the correct endianness + copy : bool, optional + copies array before return if True (default True) + (buffer is usually read only) + + Returns + ------- + arr : ndarray + of dtype given by `hdr` ``dtype`` and shape given by `hdr` ``dims`` + ''' + dt = hdr.dtype + # Fast product for large (>2GB) arrays. + num_bytes = reduce(mul, hdr.dims, np.int64(dt.itemsize)) + if num_bytes > _MAX_INTP: + raise ValueError( + f"Variable '{hdr.name.decode('latin1')}' has byte length " + f"longer than largest possible NumPy array on this platform.") + buffer = self.mat_stream.read(num_bytes) + if len(buffer) != num_bytes: + raise ValueError( + f"Not enough bytes to read matrix " + f"'{hdr.name.decode('latin1')}'; is this a badly-formed file? " + f"Consider listing matrices with `whosmat` and loading named " + f"matrices with `variable_names` kwarg to `loadmat`") + arr = np.ndarray(shape=hdr.dims, + dtype=dt, + buffer=buffer, + order='F') + if copy: + arr = arr.copy() + return arr + + def read_full_array(self, hdr): + ''' Full (rather than sparse) matrix getter + + Read matrix (array) can be real or complex + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + complex array if ``hdr.is_complex`` is True, otherwise a real + numeric array + ''' + if hdr.is_complex: + # avoid array copy to save memory + res = self.read_sub_array(hdr, copy=False) + res_j = self.read_sub_array(hdr, copy=False) + return res + (res_j * 1j) + return self.read_sub_array(hdr) + + def read_char_array(self, hdr): + ''' latin-1 text matrix (char matrix) reader + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + with dtype 'U1', shape given by `hdr` ``dims`` + ''' + arr = self.read_sub_array(hdr).astype(np.uint8) + S = arr.tobytes().decode('latin-1') + return np.ndarray(shape=hdr.dims, + dtype=np.dtype('U1'), + buffer=np.array(S)).copy() + + def read_sparse_array(self, hdr): + ''' Read and return sparse matrix type + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : coo_array + with dtype ``float`` and shape read from the sparse array data + + Notes + ----- + MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where + N is the number of non-zero values. Column 1 values [0:N] are the + (1-based) row indices of the each non-zero value, column 2 [0:N] are the + column indices, column 3 [0:N] are the (real) values. The last values + [-1,0:2] of the rows, column indices are shape[0] and shape[1] + respectively of the output matrix. The last value for the values column + is a padding 0. mrows and ncols values from the header give the shape of + the stored matrix, here [N+1, 3]. Complex data are saved as a 4 column + matrix, where the fourth column contains the imaginary component; the + last value is again 0. Complex sparse data do *not* have the header + ``imagf`` field set to True; the fact that the data are complex is only + detectable because there are 4 storage columns. + ''' + res = self.read_sub_array(hdr) + tmp = res[:-1,:] + # All numbers are float64 in Matlab, but SciPy sparse expects int shape + dims = (int(res[-1,0]), int(res[-1,1])) + I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also + J = np.ascontiguousarray(tmp[:,1],dtype='intc') + I -= 1 # for 1-based indexing + J -= 1 + if res.shape[1] == 3: + V = np.ascontiguousarray(tmp[:,2],dtype='float') + else: + V = np.ascontiguousarray(tmp[:,2],dtype='complex') + V.imag = tmp[:,3] + return scipy.sparse.coo_array((V,(I,J)), dims) + + def shape_from_header(self, hdr): + '''Read the shape of the array described by the header. + The file position after this call is unspecified. + ''' + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + shape = tuple(map(int, hdr.dims)) + elif mclass == mxCHAR_CLASS: + shape = tuple(map(int, hdr.dims)) + if self.chars_as_strings: + shape = shape[:-1] + elif mclass == mxSPARSE_CLASS: + dt = hdr.dtype + dims = hdr.dims + + if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1): + return () + + # Read only the row and column counts + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + rows = np.ndarray(shape=(), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + cols = np.ndarray(shape=(), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + + shape = (int(rows), int(cols)) + else: + raise TypeError(f'No reader for class code {mclass}') + + if self.squeeze_me: + shape = tuple([x for x in shape if x != 1]) + return shape + + +class MatFile4Reader(MatFileReader): + ''' Reader for Mat4 files ''' + @docfiller + def __init__(self, mat_stream, *args, **kwargs): + ''' Initialize matlab 4 file reader + + %(matstream_arg)s + %(load_args)s + ''' + super().__init__(mat_stream, *args, **kwargs) + self._matrix_reader = None + + def guess_byte_order(self): + self.mat_stream.seek(0) + mopt = read_dtype(self.mat_stream, np.dtype('i4')) + self.mat_stream.seek(0) + if mopt == 0: + return '<' + if mopt < 0 or mopt > 5000: + # Number must have been byteswapped + return SYS_LITTLE_ENDIAN and '>' or '<' + # Not byteswapped + return SYS_LITTLE_ENDIAN and '<' or '>' + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) + self._matrix_reader = VarReader4(self) + + def read_var_header(self): + ''' Read and return header, next position + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes ``name`` and ``is_global`` + next_position : int + position in stream of next variable + ''' + hdr = self._matrix_reader.read_header() + # Fast product for large (>2GB) arrays. + remaining_bytes = reduce(mul, hdr.dims, np.int64(hdr.dtype.itemsize)) + if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: + remaining_bytes *= 2 + next_position = self.mat_stream.tell() + remaining_bytes + return hdr, next_position + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False}, optional + If True, apply recursive post-processing during loading of array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + Parameters + ---------- + variable_names : None or str or sequence of str, optional + variable name, or sequence of variable names to get from Mat file / + file stream. If None, then get all variables in file. + ''' + if isinstance(variable_names, str): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + mdict = {} + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + mdict[name] = self.read_var_array(hdr) + self.mat_stream.seek(next_position) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + shape = self._matrix_reader.shape_from_header(hdr) + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def arr_to_2d(arr, oned_as='row'): + ''' Make ``arr`` exactly two dimensional + + If `arr` has more than 2 dimensions, raise a ValueError + + Parameters + ---------- + arr : array + oned_as : {'row', 'column'}, optional + Whether to reshape 1-D vectors as row vectors or column vectors. + See documentation for ``matdims`` for more detail + + Returns + ------- + arr2d : array + 2-D version of the array + ''' + dims = matdims(arr, oned_as) + if len(dims) > 2: + raise ValueError('Matlab 4 files cannot save arrays with more than ' + '2 dimensions') + return arr.reshape(dims) + + +class VarWriter4: + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.oned_as = file_writer.oned_as + + def write_bytes(self, arr): + self.file_stream.write(arr.tobytes(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0): + ''' Write header for given data options + + Parameters + ---------- + name : str + name of variable + shape : sequence + Shape of array as it will be read in matlab + P : int, optional + code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32, + miINT16, miUINT16, miUINT8`` + T : int, optional + code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS, + mxSPARSE_CLASS`` + imagf : int, optional + flag indicating complex + ''' + header = np.empty((), mdtypes_template['header']) + M = not SYS_LITTLE_ENDIAN + O = 0 + header['mopt'] = (M * 1000 + + O * 100 + + P * 10 + + T) + header['mrows'] = shape[0] + header['ncols'] = shape[1] + header['imagf'] = imagf + header['namlen'] = len(name) + 1 + self.write_bytes(header) + data = name + '\0' + self.write_string(data.encode('latin1')) + + def write(self, arr, name): + ''' Write matrix `arr`, with name `name` + + Parameters + ---------- + arr : array_like + array to write + name : str + name in matlab workspace + ''' + # we need to catch sparse first, because np.asarray returns an + # an object array for scipy.sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr, name) + return + arr = np.asarray(arr) + dt = arr.dtype + if not dt.isnative: + arr = arr.astype(dt.newbyteorder('=')) + dtt = dt.type + if dtt is np.object_: + raise TypeError('Cannot save object arrays in Mat4') + elif dtt is np.void: + raise TypeError('Cannot save void type arrays') + elif dtt in (np.str_, np.bytes_): + self.write_char(arr, name) + return + self.write_numeric(arr, name) + + def write_numeric(self, arr, name): + arr = arr_to_2d(arr, self.oned_as) + imagf = arr.dtype.kind == 'c' + try: + P = np_to_mtypes[arr.dtype.str[1:]] + except KeyError: + if imagf: + arr = arr.astype('c128') + else: + arr = arr.astype('f8') + P = miDOUBLE + self.write_header(name, + arr.shape, + P=P, + T=mxFULL_CLASS, + imagf=imagf) + if imagf: + self.write_bytes(arr.real) + self.write_bytes(arr.imag) + else: + self.write_bytes(arr) + + def write_char(self, arr, name): + if arr.dtype.type == np.str_ and arr.dtype.itemsize != np.dtype('U1').itemsize: + arr = arr_to_chars(arr) + arr = arr_to_2d(arr, self.oned_as) + dims = arr.shape + self.write_header( + name, + dims, + P=miUINT8, + T=mxCHAR_CLASS) + if arr.dtype.kind == 'U': + # Recode unicode to latin1 + n_chars = math.prod(dims) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr) + st = st_arr.item().encode('latin-1') + arr = np.ndarray(shape=dims, dtype='S1', buffer=st) + self.write_bytes(arr) + + def write_sparse(self, arr, name): + ''' Sparse matrices are 2-D + + See docstring for VarReader4.read_sparse_array + ''' + A = arr.tocoo() # convert to sparse COO format (ijv) + imagf = A.dtype.kind == 'c' + ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8') + ijv[:-1,0] = A.row + ijv[:-1,1] = A.col + ijv[:-1,0:2] += 1 # 1 based indexing + if imagf: + ijv[:-1,2] = A.data.real + ijv[:-1,3] = A.data.imag + else: + ijv[:-1,2] = A.data + ijv[-1,0:2] = A.shape + self.write_header( + name, + ijv.shape, + P=miDOUBLE, + T=mxSPARSE_CLASS) + self.write_bytes(ijv) + + +class MatFile4Writer: + ''' Class for writing matlab 4 format files ''' + def __init__(self, file_stream, oned_as=None): + self.file_stream = file_stream + if oned_as is None: + oned_as = 'row' + self.oned_as = oned_as + self._matrix_writer = None + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` return name, contents pairs + where ``name`` which will appeak in the matlab workspace in + file load, and ``contents`` is something writeable to a + matlab file, such as a NumPy array. + write_header : {None, True, False} + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # there is no header for a matlab 4 mat file, so we ignore the + # ``write_header`` input argument. It's there for compatibility + # with the matlab 5 version of this method + self._matrix_writer = VarWriter4(self) + for name, var in mdict.items(): + self._matrix_writer.write(var, name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py new file mode 100644 index 0000000000000000000000000000000000000000..5c4ed0361a603e10338a8d494838ebd861f56cb8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py @@ -0,0 +1,895 @@ +''' Classes for read / write of matlab (TM) 5 files + +The matfile specification last found here: + +https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf + +(as of December 5 2008) + +================================= + Note on functions and mat files +================================= + +The document above does not give any hints as to the storage of matlab +function handles, or anonymous function handles. I had, therefore, to +guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and +``mxOPAQUE_CLASS`` by looking at example mat files. + +``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to +contain a struct matrix with a set pattern of fields. For anonymous +functions, a sub-fields of one of these fields seems to contain the +well-named ``mxOPAQUE_CLASS``. This seems to contain: + +* array flags as for any matlab matrix +* 3 int8 strings +* a matrix + +It seems that whenever the mat file contains a ``mxOPAQUE_CLASS`` +instance, there is also an un-named matrix (name == '') at the end of +the mat file. I'll call this the ``__function_workspace__`` matrix. + +When I saved two anonymous functions in a mat file, or appended another +anonymous function to the mat file, there was still only one +``__function_workspace__`` un-named matrix at the end, but larger than +that for a mat file with a single anonymous function, suggesting that +the workspaces for the two functions had been merged. + +The ``__function_workspace__`` matrix appears to be of double class +(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in +the format of a mini .mat file, without the first 124 bytes of the file +header (the description and the subsystem_offset), but with the version +U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes, +presumably for 8 byte padding, and then a series of ``miMATRIX`` +entries, as in a standard mat file. The ``miMATRIX`` entries appear to +be series of un-named (name == '') matrices, and may also contain arrays +of this same mini-mat format. + +I guess that: + +* saving an anonymous function back to a mat file will need the + associated ``__function_workspace__`` matrix saved as well for the + anonymous function to work correctly. +* appending to a mat file that has a ``__function_workspace__`` would + involve first pulling off this workspace, appending, checking whether + there were any more anonymous functions appended, and then somehow + merging the relevant workspaces, and saving at the end of the mat + file. + +The mat files I was playing with are in ``tests/data``: + +* sqr.mat +* parabola.mat +* some_functions.mat + +See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging +script I was working with. + +Small fragments of current code adapted from matfile.py by Heiko +Henkelmann; parts of the code for simplify_cells=True adapted from +http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/. +''' + +import math +import os +import time +import sys +import zlib + +from io import BytesIO + +import warnings + +import numpy as np + +import scipy.sparse + +from ._byteordercodes import native_code, swapped_code + +from ._miobase import (MatFileReader, docfiller, matdims, read_dtype, + arr_to_chars, arr_dtype_number, MatWriteError, + MatReadError, MatReadWarning) + +# Reader object for matlab 5 format variables +from ._mio5_utils import VarReader5 + +# Constants and helper objects +from ._mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES, + NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8, + miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS, + mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS, + mxDOUBLE_CLASS, mclass_info, mat_struct) + +from ._streams import ZlibInputStream + + +def _has_struct(elem): + """Determine if elem is an array and if first array item is a struct.""" + return (isinstance(elem, np.ndarray) and (elem.size > 0) and (elem.ndim > 0) and + isinstance(elem[0], mat_struct)) + + +def _inspect_cell_array(ndarray): + """Construct lists from cell arrays (loaded as numpy ndarrays), recursing + into items if they contain mat_struct objects.""" + elem_list = [] + for sub_elem in ndarray: + if isinstance(sub_elem, mat_struct): + elem_list.append(_matstruct_to_dict(sub_elem)) + elif _has_struct(sub_elem): + elem_list.append(_inspect_cell_array(sub_elem)) + else: + elem_list.append(sub_elem) + return elem_list + + +def _matstruct_to_dict(matobj): + """Construct nested dicts from mat_struct objects.""" + d = {} + for f in matobj._fieldnames: + elem = matobj.__dict__[f] + if isinstance(elem, mat_struct): + d[f] = _matstruct_to_dict(elem) + elif _has_struct(elem): + d[f] = _inspect_cell_array(elem) + else: + d[f] = elem + return d + + +def _simplify_cells(d): + """Convert mat objects in dict to nested dicts.""" + for key in d: + if isinstance(d[key], mat_struct): + d[key] = _matstruct_to_dict(d[key]) + elif _has_struct(d[key]): + d[key] = _inspect_cell_array(d[key]) + return d + + +class MatFile5Reader(MatFileReader): + ''' Reader for Mat 5 mat files + Adds the following attribute to base class + + uint16_codec - char codec to use for uint16 char arrays + (defaults to system default codec) + + Uses variable reader that has the following standard interface (see + abstract class in ``miobase``:: + + __init__(self, file_reader) + read_header(self) + array_from_header(self) + + and added interface:: + + set_stream(self, stream) + read_full_tag(self) + + ''' + @docfiller + def __init__(self, + mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True, + uint16_codec=None, + simplify_cells=False): + '''Initializer for matlab 5 file format reader + + %(matstream_arg)s + %(load_args)s + %(struct_arg)s + uint16_codec : {None, string} + Set codec to use for uint16 char arrays (e.g., 'utf-8'). + Use system default codec if None + ''' + super().__init__( + mat_stream, + byte_order, + mat_dtype, + squeeze_me, + chars_as_strings, + matlab_compatible, + struct_as_record, + verify_compressed_data_integrity, + simplify_cells) + # Set uint16 codec + if not uint16_codec: + uint16_codec = sys.getdefaultencoding() + self.uint16_codec = uint16_codec + # placeholders for readers - see initialize_read method + self._file_reader = None + self._matrix_reader = None + + def guess_byte_order(self): + ''' Guess byte order. + Sets stream pointer to 0''' + self.mat_stream.seek(126) + mi = self.mat_stream.read(2) + self.mat_stream.seek(0) + return mi == b'IM' and '<' or '>' + + def read_file_header(self): + ''' Read in mat 5 file header ''' + hdict = {} + hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] + hdr = read_dtype(self.mat_stream, hdr_dtype) + hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000') + v_major = hdr['version'] >> 8 + v_minor = hdr['version'] & 0xFF + hdict['__version__'] = '%d.%d' % (v_major, v_minor) + return hdict + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + # reader for top level stream. We need this extra top-level + # reader because we use the matrix_reader object to contain + # compressed matrices (so they have their own stream) + self._file_reader = VarReader5(self) + # reader for matrix streams + self._matrix_reader = VarReader5(self) + + def read_var_header(self): + ''' Read header, return header, next position + + Header has to define at least .name and .is_global + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes .name and .is_global + next_position : int + position in stream of next variable + ''' + mdtype, byte_count = self._file_reader.read_full_tag() + if not byte_count > 0: + raise ValueError("Did not read any bytes") + next_pos = self.mat_stream.tell() + byte_count + if mdtype == miCOMPRESSED: + # Make new stream from compressed data + stream = ZlibInputStream(self.mat_stream, byte_count) + self._matrix_reader.set_stream(stream) + check_stream_limit = self.verify_compressed_data_integrity + mdtype, byte_count = self._matrix_reader.read_full_tag() + else: + check_stream_limit = False + self._matrix_reader.set_stream(self.mat_stream) + if not mdtype == miMATRIX: + raise TypeError('Expecting miMATRIX type here, got %d' % mdtype) + header = self._matrix_reader.read_header(check_stream_limit) + return header, next_pos + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False} bool, optional + If True, apply recursive post-processing during loading of + array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + variable_names - optional list of variable names to get + + If variable_names is None, then get all variables in file + ''' + if isinstance(variable_names, str): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + mdict = self.read_file_header() + mdict['__globals__'] = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if name in mdict: + msg = ( + f'Duplicate variable name "{name}" in stream' + " - replacing previous with new\nConsider" + "scipy.io.matlab.varmats_from_mat to split " + "file into single variable files" + ) + warnings.warn(msg, MatReadWarning, stacklevel=2) + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + # We want to keep this raw because mat_dtype processing + # will break the format (uint8 as mxDOUBLE_CLASS) + process = False + else: + process = True + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + try: + res = self.read_var_array(hdr, process) + except MatReadError as err: + warnings.warn( + f'Unreadable variable "{name}", because "{err}"', + Warning, stacklevel=2) + res = f"Read error: {err}" + self.mat_stream.seek(next_position) + mdict[name] = res + if hdr.is_global: + mdict['__globals__'].append(name) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + if self.simplify_cells: + return _simplify_cells(mdict) + else: + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + self.read_file_header() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + + shape = self._matrix_reader.shape_from_header(hdr) + if hdr.is_logical: + info = 'logical' + else: + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def varmats_from_mat(file_obj): + """ Pull variables out of mat 5 file as a sequence of mat file objects + + This can be useful with a difficult mat file, containing unreadable + variables. This routine pulls the variables out in raw form and puts them, + unread, back into a file stream for saving or reading. Another use is the + pathological case where there is more than one variable of the same name in + the file; this routine returns the duplicates, whereas the standard reader + will overwrite duplicates in the returned dictionary. + + The file pointer in `file_obj` will be undefined. File pointers for the + returned file-like objects are set at 0. + + Parameters + ---------- + file_obj : file-like + file object containing mat file + + Returns + ------- + named_mats : list + list contains tuples of (name, BytesIO) where BytesIO is a file-like + object containing mat file contents as for a single variable. The + BytesIO contains a string with the original header and a single var. If + ``var_file_obj`` is an individual BytesIO instance, then save as a mat + file with something like ``open('test.mat', + 'wb').write(var_file_obj.read())`` + + Examples + -------- + >>> import scipy.io + >>> import numpy as np + >>> from io import BytesIO + >>> from scipy.io.matlab._mio5 import varmats_from_mat + >>> mat_fileobj = BytesIO() + >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'}) + >>> varmats = varmats_from_mat(mat_fileobj) + >>> sorted([name for name, str_obj in varmats]) + ['a', 'b'] + """ + rdr = MatFile5Reader(file_obj) + file_obj.seek(0) + # Raw read of top-level file header + hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize + raw_hdr = file_obj.read(hdr_len) + # Initialize variable reading + file_obj.seek(0) + rdr.initialize_read() + rdr.read_file_header() + next_position = file_obj.tell() + named_mats = [] + while not rdr.end_of_stream(): + start_position = next_position + hdr, next_position = rdr.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + # Read raw variable string + file_obj.seek(start_position) + byte_count = next_position - start_position + var_str = file_obj.read(byte_count) + # write to stringio object + out_obj = BytesIO() + out_obj.write(raw_hdr) + out_obj.write(var_str) + out_obj.seek(0) + named_mats.append((name, out_obj)) + return named_mats + + +class EmptyStructMarker: + """ Class to indicate presence of empty matlab struct on output """ + + +def to_writeable(source): + ''' Convert input object ``source`` to something we can write + + Parameters + ---------- + source : object + + Returns + ------- + arr : None or ndarray or EmptyStructMarker + If `source` cannot be converted to something we can write to a matfile, + return None. If `source` is equivalent to an empty dictionary, return + ``EmptyStructMarker``. Otherwise return `source` converted to an + ndarray with contents for writing to matfile. + ''' + if isinstance(source, np.ndarray): + return source + if source is None: + return None + if hasattr(source, "__array__"): + return np.asarray(source) + # Objects that implement mappings + is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and + hasattr(source, 'items')) + # Objects that don't implement mappings, but do have dicts + if isinstance(source, np.generic): + # NumPy scalars are never mappings (PyPy issue workaround) + pass + elif not is_mapping and hasattr(source, '__dict__'): + source = {key: value for key, value in source.__dict__.items() + if not key.startswith('_')} + is_mapping = True + if is_mapping: + dtype = [] + values = [] + for field, value in source.items(): + if (isinstance(field, str) and + field[0] not in '_0123456789'): + dtype.append((str(field), object)) + values.append(value) + if dtype: + return np.array([tuple(values)], dtype) + else: + return EmptyStructMarker + # Next try and convert to an array + try: + narr = np.asanyarray(source) + except ValueError: + narr = np.asanyarray(source, dtype=object) + if narr.dtype.type in (object, np.object_) and \ + narr.shape == () and narr == source: + # No interesting conversion possible + return None + return narr + + +# Native byte ordered dtypes for convenience for writers +NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header'] +NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full'] +NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata'] +NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags'] + + +class VarWriter5: + ''' Generic matlab matrix writing class ''' + mat_tag = np.zeros((), NDT_TAG_FULL) + mat_tag['mdtype'] = miMATRIX # type: ignore[call-overload] + + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.unicode_strings = file_writer.unicode_strings + self.long_field_names = file_writer.long_field_names + self.oned_as = file_writer.oned_as + # These are used for top level writes, and unset after + self._var_name = None + self._var_is_global = False + + def write_bytes(self, arr): + self.file_stream.write(arr.tobytes(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_element(self, arr, mdtype=None): + ''' write tag and data ''' + if mdtype is None: + mdtype = NP_TO_MTYPES[arr.dtype.str[1:]] + # Array needs to be in native byte order + if arr.dtype.byteorder == swapped_code: + arr = arr.byteswap().view(arr.dtype.newbyteorder()) + byte_count = arr.size*arr.itemsize + if byte_count <= 4: + self.write_smalldata_element(arr, mdtype, byte_count) + else: + self.write_regular_element(arr, mdtype, byte_count) + + def write_smalldata_element(self, arr, mdtype, byte_count): + # write tag with embedded data + tag = np.zeros((), NDT_TAG_SMALL) + tag['byte_count_mdtype'] = (byte_count << 16) + mdtype + # if arr.tobytes is < 4, the element will be zero-padded as needed. + tag['data'] = arr.tobytes(order='F') + self.write_bytes(tag) + + def write_regular_element(self, arr, mdtype, byte_count): + # write tag, data + tag = np.zeros((), NDT_TAG_FULL) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + self.write_bytes(tag) + self.write_bytes(arr) + # pad to next 64-bit boundary + bc_mod_8 = byte_count % 8 + if bc_mod_8: + self.file_stream.write(b'\x00' * (8-bc_mod_8)) + + def write_header(self, + shape, + mclass, + is_complex=False, + is_logical=False, + nzmax=0): + ''' Write header for given data options + shape : sequence + array shape + mclass - mat5 matrix class + is_complex - True if matrix is complex + is_logical - True if matrix is logical + nzmax - max non zero elements for sparse arrays + + We get the name and the global flag from the object, and reset + them to defaults after we've used them + ''' + # get name and is_global from one-shot object store + name = self._var_name + is_global = self._var_is_global + # initialize the top-level matrix tag, store position + self._mat_tag_pos = self.file_stream.tell() + self.write_bytes(self.mat_tag) + # write array flags (complex, global, logical, class, nzmax) + af = np.zeros((), NDT_ARRAY_FLAGS) + af['data_type'] = miUINT32 + af['byte_count'] = 8 + flags = is_complex << 3 | is_global << 2 | is_logical << 1 + af['flags_class'] = mclass | flags << 8 + af['nzmax'] = nzmax + self.write_bytes(af) + # shape + self.write_element(np.array(shape, dtype='i4')) + # write name + name = np.asarray(name) + if name == '': # empty string zero-terminated + self.write_smalldata_element(name, miINT8, 0) + else: + self.write_element(name, miINT8) + # reset the one-shot store to defaults + self._var_name = '' + self._var_is_global = False + + def update_matrix_tag(self, start_pos): + curr_pos = self.file_stream.tell() + self.file_stream.seek(start_pos) + byte_count = curr_pos - start_pos - 8 + if byte_count >= 2**32: + raise MatWriteError("Matrix too large to save with Matlab " + "5 format") + self.mat_tag['byte_count'] = byte_count + self.write_bytes(self.mat_tag) + self.file_stream.seek(curr_pos) + + def write_top(self, arr, name, is_global): + """ Write variable at top level of mat file + + Parameters + ---------- + arr : array_like + array-like object to create writer for + name : str, optional + name as it will appear in matlab workspace + default is empty string + is_global : {False, True}, optional + whether variable will be global on load into matlab + """ + # these are set before the top-level header write, and unset at + # the end of the same write, because they do not apply for lower levels + self._var_is_global = is_global + self._var_name = name + # write the header and data + self.write(arr) + + def write(self, arr): + ''' Write `arr` to stream at top and sub levels + + Parameters + ---------- + arr : array_like + array-like object to create writer for + ''' + # store position, so we can update the matrix tag + mat_tag_pos = self.file_stream.tell() + # First check if these are sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr) + self.update_matrix_tag(mat_tag_pos) + return + # Try to convert things that aren't arrays + narr = to_writeable(arr) + if narr is None: + raise TypeError(f'Could not convert {arr} (type {type(arr)}) to array') + if isinstance(narr, MatlabObject): + self.write_object(narr) + elif isinstance(narr, MatlabFunction): + raise MatWriteError('Cannot write matlab functions') + elif narr is EmptyStructMarker: # empty struct array + self.write_empty_struct() + elif narr.dtype.fields: # struct array + self.write_struct(narr) + elif narr.dtype.hasobject: # cell array + self.write_cells(narr) + elif narr.dtype.kind in ('U', 'S'): + if self.unicode_strings: + codec = 'UTF8' + else: + codec = 'ascii' + self.write_char(narr, codec) + else: + self.write_numeric(narr) + self.update_matrix_tag(mat_tag_pos) + + def write_numeric(self, arr): + imagf = arr.dtype.kind == 'c' + logif = arr.dtype.kind == 'b' + try: + mclass = NP_TO_MXTYPES[arr.dtype.str[1:]] + except KeyError: + # No matching matlab type, probably complex256 / float128 / float96 + # Cast data to complex128 / float64. + if imagf: + arr = arr.astype('c128') + elif logif: + arr = arr.astype('i1') # Should only contain 0/1 + else: + arr = arr.astype('f8') + mclass = mxDOUBLE_CLASS + self.write_header(matdims(arr, self.oned_as), + mclass, + is_complex=imagf, + is_logical=logif) + if imagf: + self.write_element(arr.real) + self.write_element(arr.imag) + else: + self.write_element(arr) + + def write_char(self, arr, codec='ascii'): + ''' Write string array `arr` with given `codec` + ''' + if arr.size == 0 or np.all(arr == ''): + # This an empty string array or a string array containing + # only empty strings. Matlab cannot distinguish between a + # string array that is empty, and a string array containing + # only empty strings, because it stores strings as arrays of + # char. There is no way of having an array of char that is + # not empty, but contains an empty string. We have to + # special-case the array-with-empty-strings because even + # empty strings have zero padding, which would otherwise + # appear in matlab as a string with a space. + shape = (0,) * np.max([arr.ndim, 2]) + self.write_header(shape, mxCHAR_CLASS) + self.write_smalldata_element(arr, miUTF8, 0) + return + # non-empty string. + # + # Convert to char array + arr = arr_to_chars(arr) + # We have to write the shape directly, because we are going + # recode the characters, and the resulting stream of chars + # may have a different length + shape = arr.shape + self.write_header(shape, mxCHAR_CLASS) + if arr.dtype.kind == 'U' and arr.size: + # Make one long string from all the characters. We need to + # transpose here, because we're flattening the array, before + # we write the bytes. The bytes have to be written in + # Fortran order. + n_chars = math.prod(shape) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr.T.copy()) # Fortran order + # Recode with codec to give byte string + st = st_arr.item().encode(codec) + # Reconstruct as 1-D byte array + arr = np.ndarray(shape=(len(st),), + dtype='S1', + buffer=st) + self.write_element(arr, mdtype=miUTF8) + + def write_sparse(self, arr): + ''' Sparse matrices are 2D + ''' + A = arr.tocsc() # convert to sparse CSC format + A.sort_indices() # MATLAB expects sorted row indices + is_complex = (A.dtype.kind == 'c') + is_logical = (A.dtype.kind == 'b') + nz = A.nnz + self.write_header(matdims(arr, self.oned_as), + mxSPARSE_CLASS, + is_complex=is_complex, + is_logical=is_logical, + # matlab won't load file with 0 nzmax + nzmax=1 if nz == 0 else nz) + self.write_element(A.indices.astype('i4')) + self.write_element(A.indptr.astype('i4')) + self.write_element(A.data.real) + if is_complex: + self.write_element(A.data.imag) + + def write_cells(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxCELL_CLASS) + # loop over data, column major + A = np.atleast_2d(arr).flatten('F') + for el in A: + self.write(el) + + def write_empty_struct(self): + self.write_header((1, 1), mxSTRUCT_CLASS) + # max field name length set to 1 in an example matlab struct + self.write_element(np.array(1, dtype=np.int32)) + # Field names element is empty + self.write_element(np.array([], dtype=np.int8)) + + def write_struct(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxSTRUCT_CLASS) + self._write_items(arr) + + def _write_items(self, arr): + # write fieldnames + fieldnames = [f[0] for f in arr.dtype.descr] + length = max([len(fieldname) for fieldname in fieldnames])+1 + max_length = (self.long_field_names and 64) or 32 + if length > max_length: + raise ValueError("Field names are restricted to %d characters" % + (max_length-1)) + self.write_element(np.array([length], dtype='i4')) + self.write_element( + np.array(fieldnames, dtype='S%d' % (length)), + mdtype=miINT8) + A = np.atleast_2d(arr).flatten('F') + for el in A: + for f in fieldnames: + self.write(el[f]) + + def write_object(self, arr): + '''Same as writing structs, except different mx class, and extra + classname element after header + ''' + self.write_header(matdims(arr, self.oned_as), + mxOBJECT_CLASS) + self.write_element(np.array(arr.classname, dtype='S'), + mdtype=miINT8) + self._write_items(arr) + + +class MatFile5Writer: + ''' Class for writing mat5 files ''' + + @docfiller + def __init__(self, file_stream, + do_compression=False, + unicode_strings=False, + global_vars=None, + long_field_names=False, + oned_as='row'): + ''' Initialize writer for matlab 5 format files + + Parameters + ---------- + %(do_compression)s + %(unicode_strings)s + global_vars : None or sequence of strings, optional + Names of variables to be marked as global for matlab + %(long_fields)s + %(oned_as)s + ''' + self.file_stream = file_stream + self.do_compression = do_compression + self.unicode_strings = unicode_strings + if global_vars: + self.global_vars = global_vars + else: + self.global_vars = [] + self.long_field_names = long_field_names + self.oned_as = oned_as + self._matrix_writer = None + + def write_file_header(self): + # write header + hdr = np.zeros((), NDT_FILE_HDR) + hdr['description'] = (f'MATLAB 5.0 MAT-file Platform: {os.name}, ' + f'Created on: {time.asctime()}') + hdr['version'] = 0x0100 + hdr['endian_test'] = np.ndarray(shape=(), + dtype='S2', + buffer=np.uint16(0x4d49)) + self.file_stream.write(hdr.tobytes()) + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` returns name, contents pairs where + ``name`` which will appear in the matlab workspace in file load, and + ``contents`` is something writeable to a matlab file, such as a NumPy + array. + write_header : {None, True, False}, optional + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # write header if requested, or None and start of file + if write_header is None: + write_header = self.file_stream.tell() == 0 + if write_header: + self.write_file_header() + self._matrix_writer = VarWriter5(self) + for name, var in mdict.items(): + if name[0] == '_': + continue + is_global = name in self.global_vars + if self.do_compression: + stream = BytesIO() + self._matrix_writer.file_stream = stream + self._matrix_writer.write_top(var, name.encode('latin1'), is_global) + out_str = zlib.compress(stream.getvalue()) + tag = np.empty((), NDT_TAG_FULL) + tag['mdtype'] = miCOMPRESSED + tag['byte_count'] = len(out_str) + self.file_stream.write(tag.tobytes()) + self.file_stream.write(out_str) + else: # not compressing + self._matrix_writer.write_top(var, name.encode('latin1'), is_global) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py new file mode 100644 index 0000000000000000000000000000000000000000..0d60b8e7a4a2dd1e6a336139f67ce984743e27bb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py @@ -0,0 +1,281 @@ +''' Constants and classes for matlab 5 read and write + +See also mio5_utils.pyx where these same constants arise as c enums. + +If you make changes in this file, don't forget to change mio5_utils.pyx +''' +import numpy as np + +from ._miobase import convert_dtypes + + +__all__ = [ + 'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque', + 'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template', + 'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template', + 'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8', + 'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8', + 'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS', + 'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS', + 'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS', + 'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS', + 'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS', + 'mxUINT64_CLASS', 'mxUINT8_CLASS' +] +miINT8 = 1 +miUINT8 = 2 +miINT16 = 3 +miUINT16 = 4 +miINT32 = 5 +miUINT32 = 6 +miSINGLE = 7 +miDOUBLE = 9 +miINT64 = 12 +miUINT64 = 13 +miMATRIX = 14 +miCOMPRESSED = 15 +miUTF8 = 16 +miUTF16 = 17 +miUTF32 = 18 + +mxCELL_CLASS = 1 +mxSTRUCT_CLASS = 2 +# The March 2008 edition of "Matlab 7 MAT-File Format" says that +# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3. +# Matlab 2008a appears to save logicals as type 9, so we assume that +# the document is correct. See type 18, below. +mxOBJECT_CLASS = 3 +mxCHAR_CLASS = 4 +mxSPARSE_CLASS = 5 +mxDOUBLE_CLASS = 6 +mxSINGLE_CLASS = 7 +mxINT8_CLASS = 8 +mxUINT8_CLASS = 9 +mxINT16_CLASS = 10 +mxUINT16_CLASS = 11 +mxINT32_CLASS = 12 +mxUINT32_CLASS = 13 +# The following are not in the March 2008 edition of "Matlab 7 +# MAT-File Format," but were guessed from matrix.h. +mxINT64_CLASS = 14 +mxUINT64_CLASS = 15 +mxFUNCTION_CLASS = 16 +# Not doing anything with these at the moment. +mxOPAQUE_CLASS = 17 # This appears to be a function workspace +# Thread 'saving/loading symbol table of annymous functions', +# octave-maintainers, April-May 2007 +# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html +# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html +# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html) +mxOBJECT_CLASS_FROM_MATRIX_H = 18 + +mdtypes_template = { + miINT8: 'i1', + miUINT8: 'u1', + miINT16: 'i2', + miUINT16: 'u2', + miINT32: 'i4', + miUINT32: 'u4', + miSINGLE: 'f4', + miDOUBLE: 'f8', + miINT64: 'i8', + miUINT64: 'u8', + miUTF8: 'u1', + miUTF16: 'u2', + miUTF32: 'u4', + 'file_header': [('description', 'S116'), + ('subsystem_offset', 'i8'), + ('version', 'u2'), + ('endian_test', 'S2')], + 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')], + 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')], + 'array_flags': [('data_type', 'u4'), + ('byte_count', 'u4'), + ('flags_class','u4'), + ('nzmax', 'u4')], + 'U1': 'U1', + } + +mclass_dtypes_template = { + mxINT8_CLASS: 'i1', + mxUINT8_CLASS: 'u1', + mxINT16_CLASS: 'i2', + mxUINT16_CLASS: 'u2', + mxINT32_CLASS: 'i4', + mxUINT32_CLASS: 'u4', + mxINT64_CLASS: 'i8', + mxUINT64_CLASS: 'u8', + mxSINGLE_CLASS: 'f4', + mxDOUBLE_CLASS: 'f8', + } + +mclass_info = { + mxINT8_CLASS: 'int8', + mxUINT8_CLASS: 'uint8', + mxINT16_CLASS: 'int16', + mxUINT16_CLASS: 'uint16', + mxINT32_CLASS: 'int32', + mxUINT32_CLASS: 'uint32', + mxINT64_CLASS: 'int64', + mxUINT64_CLASS: 'uint64', + mxSINGLE_CLASS: 'single', + mxDOUBLE_CLASS: 'double', + mxCELL_CLASS: 'cell', + mxSTRUCT_CLASS: 'struct', + mxOBJECT_CLASS: 'object', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + mxFUNCTION_CLASS: 'function', + mxOPAQUE_CLASS: 'opaque', + } + +NP_TO_MTYPES = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i8': miINT64, + 'i4': miINT32, + 'i2': miINT16, + 'i1': miINT8, + 'u8': miUINT64, + 'u4': miUINT32, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + 'U1': miUTF16, + 'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022) + } + + +NP_TO_MXTYPES = { + 'f8': mxDOUBLE_CLASS, + 'c32': mxDOUBLE_CLASS, + 'c24': mxDOUBLE_CLASS, + 'c16': mxDOUBLE_CLASS, + 'f4': mxSINGLE_CLASS, + 'c8': mxSINGLE_CLASS, + 'i8': mxINT64_CLASS, + 'i4': mxINT32_CLASS, + 'i2': mxINT16_CLASS, + 'i1': mxINT8_CLASS, + 'u8': mxUINT64_CLASS, + 'u4': mxUINT32_CLASS, + 'u2': mxUINT16_CLASS, + 'u1': mxUINT8_CLASS, + 'S1': mxUINT8_CLASS, + 'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this + } + +''' Before release v7.1 (release 14) matlab (TM) used the system +default character encoding scheme padded out to 16-bits. Release 14 +and later use Unicode. When saving character data, R14 checks if it +can be encoded in 7-bit ascii, and saves in that format if so.''' + +codecs_template = { + miUTF8: {'codec': 'utf_8', 'width': 1}, + miUTF16: {'codec': 'utf_16', 'width': 2}, + miUTF32: {'codec': 'utf_32','width': 4}, + } + + +def _convert_codecs(template, byte_order): + ''' Convert codec template mapping to byte order + + Set codecs not on this system to None + + Parameters + ---------- + template : mapping + key, value are respectively codec name, and root name for codec + (without byte order suffix) + byte_order : {'<', '>'} + code for little or big endian + + Returns + ------- + codecs : dict + key, value are name, codec (as in .encode(codec)) + ''' + codecs = {} + postfix = byte_order == '<' and '_le' or '_be' + for k, v in template.items(): + codec = v['codec'] + try: + " ".encode(codec) + except LookupError: + codecs[k] = None + continue + if v['width'] > 1: + codec += postfix + codecs[k] = codec + return codecs.copy() + + +MDTYPES = {} +for _bytecode in '<>': + _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode), + 'classes': convert_dtypes(mclass_dtypes_template, _bytecode), + 'codecs': _convert_codecs(codecs_template, _bytecode)} + MDTYPES[_bytecode] = _def + + +class mat_struct: + """Placeholder for holding read data from structs. + + We use instances of this class when the user passes False as a value to the + ``struct_as_record`` parameter of the :func:`scipy.io.loadmat` function. + """ + pass + + +class MatlabObject(np.ndarray): + """Subclass of ndarray to signal this is a matlab object. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be instantiated directly. + """ + + def __new__(cls, input_array, classname=None): + # Input array is an already formed ndarray instance + # We first cast to be our class type + obj = np.asarray(input_array).view(cls) + # add the new attribute to the created instance + obj.classname = classname + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self,obj): + # reset the attribute from passed original object + self.classname = getattr(obj, 'classname', None) + # We do not need to return anything + + +class MatlabFunction(np.ndarray): + """Subclass for a MATLAB function. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be directly instantiated. + """ + + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +class MatlabOpaque(np.ndarray): + """Subclass for a MATLAB opaque matrix. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be directly instantiated. + """ + + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +OPAQUE_DTYPE = np.dtype( + [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fc8a904d06026119fb14be7ced171f184d44e58b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..1ad7fd7395bb67cc2926b03ed7a6002dc4f9e3f6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py @@ -0,0 +1,432 @@ +# Authors: Travis Oliphant, Matthew Brett + +""" +Base classes for MATLAB file stream reading. + +MATLAB is a registered trademark of the Mathworks inc. +""" + +from typing import Final + +import numpy as np +from scipy._lib import doccer + +from . import _byteordercodes as boc + +__all__ = [ + 'MatReadError', 'MatReadWarning', 'MatWriteError', +] + +class MatReadError(Exception): + """Exception indicating a read issue.""" + + +class MatWriteError(Exception): + """Exception indicating a write issue.""" + + +class MatReadWarning(UserWarning): + """Warning class for read issues.""" + + +doc_dict = \ + {'file_arg': + '''file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True) Can also pass open file-like object.''', + 'append_arg': + '''appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True.''', + 'load_args': + '''byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). +mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). +squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. +chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. +matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True).''', + 'struct_arg': + '''struct_as_record : bool, optional + Whether to load MATLAB structs as NumPy record arrays, or as + old-style NumPy arrays with dtype=object. Setting this flag to + False replicates the behavior of SciPy version 0.7.x (returning + numpy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files.''', + 'matstream_arg': + '''mat_stream : file-like + Object with file API, open for reading.''', + 'long_fields': + '''long_field_names : bool, optional + * False - maximum field name length in a structure is 31 characters + which is the documented maximum length. This is the default. + * True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6''', + 'do_compression': + '''do_compression : bool, optional + Whether to compress matrices on write. Default is False.''', + 'oned_as': + '''oned_as : {'row', 'column'}, optional + If 'column', write 1-D NumPy arrays as column vectors. + If 'row', write 1D NumPy arrays as row vectors.''', + 'unicode_strings': + '''unicode_strings : bool, optional + If True, write strings as Unicode, else MATLAB usual encoding.'''} + +docfiller: Final = doccer.filldoc(doc_dict) + +''' + + Note on architecture +====================== + +There are three sets of parameters relevant for reading files. The +first are *file read parameters* - containing options that are common +for reading the whole file, and therefore every variable within that +file. At the moment these are: + +* mat_stream +* dtypes (derived from byte code) +* byte_order +* chars_as_strings +* squeeze_me +* struct_as_record (MATLAB 5 files) +* class_dtypes (derived from order code, MATLAB 5 files) +* codecs (MATLAB 5 files) +* uint16_codec (MATLAB 5 files) + +Another set of parameters are those that apply only to the current +variable being read - the *header*: + +* header related variables (different for v4 and v5 mat files) +* is_complex +* mclass +* var_stream + +With the header, we need ``next_position`` to tell us where the next +variable in the stream is. + +Then, for each element in a matrix, there can be *element read +parameters*. An element is, for example, one element in a MATLAB cell +array. At the moment, these are: + +* mat_dtype + +The file-reading object contains the *file read parameters*. The +*header* is passed around as a data object, or may be read and discarded +in a single function. The *element read parameters* - the mat_dtype in +this instance, is passed into a general post-processing function - see +``mio_utils`` for details. +''' + + +def convert_dtypes(dtype_template, order_code): + ''' Convert dtypes in mapping to given order + + Parameters + ---------- + dtype_template : mapping + mapping with values returning numpy dtype from ``np.dtype(val)`` + order_code : str + an order code suitable for using in ``dtype.newbyteorder()`` + + Returns + ------- + dtypes : mapping + mapping where values have been replaced by + ``np.dtype(val).newbyteorder(order_code)`` + + ''' + dtypes = dtype_template.copy() + for k in dtypes: + dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code) + return dtypes + + +def read_dtype(mat_stream, a_dtype): + """ + Generic get of byte stream data of known type + + Parameters + ---------- + mat_stream : file_like object + MATLAB (tm) mat file stream + a_dtype : dtype + dtype of array to read. `a_dtype` is assumed to be correct + endianness. + + Returns + ------- + arr : ndarray + Array of dtype `a_dtype` read from stream. + + """ + num_bytes = a_dtype.itemsize + arr = np.ndarray(shape=(), + dtype=a_dtype, + buffer=mat_stream.read(num_bytes), + order='F') + return arr + + +def matfile_version(file_name, *, appendmat=True): + """ + Return major, minor tuple depending on apparent mat file type + + Where: + + #. 0,x -> version 4 format mat files + #. 1,x -> version 5 format mat files + #. 2,x -> version 7.3 format mat files (HDF format) + + Parameters + ---------- + file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True). Can also pass open file-like object. + appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True. + + Returns + ------- + major_version : {0, 1, 2} + major MATLAB File format version + minor_version : int + minor MATLAB file format version + + Raises + ------ + MatReadError + If the file is empty. + ValueError + The matfile version is unknown. + + Notes + ----- + Has the side effect of setting the file read pointer to 0 + """ + from ._mio import _open_file_context + with _open_file_context(file_name, appendmat=appendmat) as fileobj: + return _get_matfile_version(fileobj) + + +get_matfile_version = matfile_version + + +_HDR_N_BYTES = 20 + + +def _get_matfile_version(fileobj): + # Mat4 files have a zero somewhere in first 4 bytes + fileobj.seek(0) + hdr_bytes = fileobj.read(_HDR_N_BYTES) + if len(hdr_bytes) < _HDR_N_BYTES: + raise MatReadError("Mat file appears to be truncated") + if hdr_bytes.count(0) == _HDR_N_BYTES: + raise MatReadError("Mat file appears to be corrupt " + f"(first {_HDR_N_BYTES} bytes == 0)") + mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=hdr_bytes[:4]) + if 0 in mopt_ints: + fileobj.seek(0) + return (0,0) + # For 5 format or 7.3 format we need to read an integer in the + # header. Bytes 124 through 128 contain a version integer and an + # endian test string + fileobj.seek(124) + tst_str = fileobj.read(4) + fileobj.seek(0) + maj_ind = int(tst_str[2] == b'I'[0]) + maj_val = int(tst_str[maj_ind]) + min_val = int(tst_str[1 - maj_ind]) + ret = (maj_val, min_val) + if maj_val in (1, 2): + return ret + raise ValueError('Unknown mat file type, version {}, {}'.format(*ret)) + + +def matdims(arr, oned_as='column'): + """ + Determine equivalent MATLAB dimensions for given array + + Parameters + ---------- + arr : ndarray + Input array + oned_as : {'column', 'row'}, optional + Whether 1-D arrays are returned as MATLAB row or column matrices. + Default is 'column'. + + Returns + ------- + dims : tuple + Shape tuple, in the form MATLAB expects it. + + Notes + ----- + We had to decide what shape a 1 dimensional array would be by + default. ``np.atleast_2d`` thinks it is a row vector. The + default for a vector in MATLAB (e.g., ``>> 1:12``) is a row vector. + + Versions of scipy up to and including 0.11 resulted (accidentally) + in 1-D arrays being read as column vectors. For the moment, we + maintain the same tradition here. + + Examples + -------- + >>> import numpy as np + >>> from scipy.io.matlab._miobase import matdims + >>> matdims(np.array(1)) # NumPy scalar + (1, 1) + >>> matdims(np.array([1])) # 1-D array, 1 element + (1, 1) + >>> matdims(np.array([1,2])) # 1-D array, 2 elements + (2, 1) + >>> matdims(np.array([[2],[3]])) # 2-D array, column vector + (2, 1) + >>> matdims(np.array([[2,3]])) # 2-D array, row vector + (1, 2) + >>> matdims(np.array([[[2,3]]])) # 3-D array, rowish vector + (1, 1, 2) + >>> matdims(np.array([])) # empty 1-D array + (0, 0) + >>> matdims(np.array([[]])) # empty 2-D array + (0, 0) + >>> matdims(np.array([[[]]])) # empty 3-D array + (0, 0, 0) + + Optional argument flips 1-D shape behavior. + + >>> matdims(np.array([1,2]), 'row') # 1-D array, 2 elements + (1, 2) + + The argument has to make sense though + + >>> matdims(np.array([1,2]), 'bizarre') + Traceback (most recent call last): + ... + ValueError: 1-D option "bizarre" is strange + + """ + shape = arr.shape + if shape == (): # scalar + return (1, 1) + if len(shape) == 1: # 1D + if shape[0] == 0: + return (0, 0) + elif oned_as == 'column': + return shape + (1,) + elif oned_as == 'row': + return (1,) + shape + else: + raise ValueError(f'1-D option "{oned_as}" is strange') + return shape + + +class MatVarReader: + ''' Abstract class defining required interface for var readers''' + def __init__(self, file_reader): + pass + + def read_header(self): + ''' Returns header ''' + pass + + def array_from_header(self, header): + ''' Reads array given header ''' + pass + + +class MatFileReader: + """ Base object for reading mat files + + To make this class functional, you will need to override the + following methods: + + matrix_getter_factory - gives object to fetch next matrix from stream + guess_byte_order - guesses file byte order from file + """ + + @docfiller + def __init__(self, mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True, + simplify_cells=False): + ''' + Initializer for mat file reader + + mat_stream : file-like + object with file API, open for reading + %(load_args)s + ''' + # Initialize stream + self.mat_stream = mat_stream + self.dtypes = {} + if not byte_order: + byte_order = self.guess_byte_order() + else: + byte_order = boc.to_numpy_code(byte_order) + self.byte_order = byte_order + self.struct_as_record = struct_as_record + if matlab_compatible: + self.set_matlab_compatible() + else: + self.squeeze_me = squeeze_me + self.chars_as_strings = chars_as_strings + self.mat_dtype = mat_dtype + self.verify_compressed_data_integrity = verify_compressed_data_integrity + self.simplify_cells = simplify_cells + if simplify_cells: + self.squeeze_me = True + self.struct_as_record = False + + def set_matlab_compatible(self): + ''' Sets options to return arrays as MATLAB loads them ''' + self.mat_dtype = True + self.squeeze_me = False + self.chars_as_strings = False + + def guess_byte_order(self): + ''' As we do not know what file type we have, assume native ''' + return boc.native_code + + def end_of_stream(self): + b = self.mat_stream.read(1) + curpos = self.mat_stream.tell() + self.mat_stream.seek(curpos-1) + return len(b) == 0 + + +def arr_dtype_number(arr, num): + ''' Return dtype for given number of items per element''' + return np.dtype(arr.dtype.str[:2] + str(num)) + + +def arr_to_chars(arr): + ''' Convert string array to char array ''' + dims = list(arr.shape) + if not dims: + dims = [1] + dims.append(int(arr.dtype.str[2:])) + arr = np.ndarray(shape=dims, + dtype=arr_dtype_number(arr, 1), + buffer=arr) + empties = [arr == np.array('', dtype=arr.dtype)] + if not np.any(empties): + return arr + arr = arr.copy() + arr[tuple(empties)] = ' ' + return arr diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1c5b0f5e77fdd461d6085037bfdf2850f40fa0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="byteordercodes", + private_modules=["_byteordercodes"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio.py new file mode 100644 index 0000000000000000000000000000000000000000..65bb31e52dc719b485b12ba1294fc3d09806c9d0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio.py @@ -0,0 +1,16 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ["loadmat", "savemat", "whosmat"] # noqa: F822 + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio", + private_modules=["_mio"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio4.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio4.py new file mode 100644 index 0000000000000000000000000000000000000000..d13b99a0bcedc9746f7681843989791e0918df2e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio4.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio4", + private_modules=["_mio4"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5.py new file mode 100644 index 0000000000000000000000000000000000000000..b84ca19799b32999032833b4e1be1b21f6bc70da --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MatWriteError', 'MatReadError', 'MatReadWarning', 'MatlabObject', + 'MatlabFunction', 'mat_struct', 'varmats_from_mat', +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5", + private_modules=["_mio5"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcc9a4f353794546f0d8c07f9afe369baa992f5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py @@ -0,0 +1,18 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MatlabFunction', 'MatlabObject', 'MatlabOpaque', 'mat_struct', +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5_params", + private_modules=["_mio5_params"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..37ad9e2dc2f50b85bf5aba517c4ac7d661b5039a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5_utils", + private_modules=["_mio5_utils"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6920511d2635b44acd33ce6f5e00247daf6578d9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio_utils", + private_modules=["_mio_utils"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/miobase.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..13e16848394471f9a1744a7b27fa4e6c86a9248b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/miobase.py @@ -0,0 +1,16 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ["MatReadError", "MatReadWarning", "MatWriteError"] # noqa: F822 + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="miobase", + private_modules=["_miobase"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/streams.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..8125271b06cc6f44cee19b2f6079d26b8f32e268 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/streams.py @@ -0,0 +1,16 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="streams", + private_modules=["_streams"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat new file mode 100644 index 0000000000000000000000000000000000000000..a17203fbb2a7628db644b953ac7723b866a2a0a4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a0c982c298fba9df96fd5a927a9c08ee12b09df Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat new file mode 100644 index 0000000000000000000000000000000000000000..4f6323870368cd97a6294e108ffea9067cf5e69b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat new file mode 100644 index 0000000000000000000000000000000000000000..c88cbb6f54b70d4e795de7cf43f7b46ff6d4d5ef Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat new file mode 100644 index 0000000000000000000000000000000000000000..45a2ef4e39755ea1f41aab045f18a035af58ea07 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/debigged_m4.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/debigged_m4.mat new file mode 100644 index 0000000000000000000000000000000000000000..28aad199045d0b3bf31060300aff9231ee6d9a71 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/debigged_m4.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt new file mode 100644 index 0000000000000000000000000000000000000000..1459b6b6ea635b17b5eb04c941e197f98cf04bf1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt @@ -0,0 +1,5 @@ +Japanese: +すべての人間は、生まれながらにして自由であり、 +かつ、尊厳と権利と について平等である。 +人間は、理性と良心とを授けられており、 +互いに同胞の精神をもって行動しなければならない。 \ No newline at end of file diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/little_endian.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/little_endian.mat new file mode 100644 index 0000000000000000000000000000000000000000..df6db666dcf2b98d66e04933bd4011f649dcbe30 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/little_endian.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat new file mode 100644 index 0000000000000000000000000000000000000000..a60ad5b605a9dc6b0d85eb0a0e3e655c4955dd34 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat new file mode 100644 index 0000000000000000000000000000000000000000..fd2c4994578edbf31431902ecfcb601b11f60b0b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat new file mode 100644 index 0000000000000000000000000000000000000000..ccfdaa8adb7879ba852eab9ce55b602e11dad06d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat new file mode 100644 index 0000000000000000000000000000000000000000..35dcb715bca4cb7f4b0dca287648ef8ee797cd73 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat new file mode 100644 index 0000000000000000000000000000000000000000..07e7dca456843004dcfd9023a800ea91d309814d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat new file mode 100644 index 0000000000000000000000000000000000000000..66350532a7737c475a3ae6ef1b1d8406543d890e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat new file mode 100644 index 0000000000000000000000000000000000000000..293f387719e8bdcacb075e0de5737894e5dafed3 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat new file mode 100644 index 0000000000000000000000000000000000000000..cc818593b48dd8d29a40a827210b54373e5acf50 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/sqr.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/sqr.mat new file mode 100644 index 0000000000000000000000000000000000000000..2436d87cc5dfb6d558b841c2367bfe2363bd1b3c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/sqr.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat new file mode 100644 index 0000000000000000000000000000000000000000..30c8c8ad5378be4508bd785da8b7cef38adbd13e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat new file mode 100644 index 0000000000000000000000000000000000000000..6643c42ddcc9579930980b7eb30e11f339638404 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat new file mode 100644 index 0000000000000000000000000000000000000000..efbe3fec64ee54c9f8b3998e5035ccfa251e74ff Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat new file mode 100644 index 0000000000000000000000000000000000000000..faa30b10bc61ea4889bd9e776c0a1a079e2c2a90 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..512f7d889420a016094a903585f27acaa50bc658 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a7633104c1e4f32fe30fd43f389d7559527c8211 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..fc893f331c985cf17b7ce9b7b8c179eaf2103659 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..4198a4f2aeb8effcccf94a9c0114539f98124179 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..2c7826eeacdb456e5290cafba343703c7596d191 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b3b086cc31dce2de1e300a1d018b0bf5661b69f3 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..32fcd2a93c91eff478a3ab3076e5c78e31f09bf1 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f3ecd203376c17b09d97a24aceab824dae0f91c1 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c0c083855f38e62e3a29460b745f198c9c79313d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6a187edb1828256362617d3fe24d26cf58e7ca3b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..8e36c0c8ce62d7559b60fde454a96e8eefcbcb92 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9097bb08712d5bfccf172b0366573f503136228d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..e7dec3b81abdae8769e0ae0329948548f4038adf Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f29d4f9327aa906729234a38caa05ebfc50cfc30 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..3698c8853b46d4a42194002523b57fddfb225908 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..164be1109d977cf7681b1ea00a5df80d5e8f8e71 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a8735e9a23558ce86a528ceafa8f3475b053e43b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b6fb05bb7564c863d5bb6c145fe8b06928d3805a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..eb537ab1042b0f989d49711b1a36cc508946fe55 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..cc207ed9f32095f39b7690e2dc1e2dc0d55ee8e0 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..c2f0ba2ae4c8a1750cace6eae0267e9736272fc0 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b4dbd152d6e9f3d289b3c4a9792729d2735a4c5c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..fadcd2366b1867239782f073291ff327c2af3001 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9ce65f91116f68332d1c16e21319e965541d0d73 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6d3e068977edfe6407f29404f0a7d1737f7d3eba Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f68323b0c8eb7fc999dead349ea3bd3a6da66bd4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..83dcad34249afa543bf66dae9b836276246aab4a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..cdb4191c7d2eb0ac66d4f6add250e1f6a604d892 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..3b5a428501a53ae7308c7b6edc42f4881820664d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8cef2dd7ea6df8aac26ed067a9427935b81c7ac7 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..5ba4810ac67756c17b0ef3163a496e913c0b5e57 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8964765f7bd207bfab63b4d16569cb1c3763bda7 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a98f48917f8f275e541eeac5ef1fe741c40bb0b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..55cbd3c1b3d65630beae47832ffbcc7a6fd43354 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..55b510762ee9b0ac04776e38f6b4bb46b0d10021 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..bdb6ce66ce79b808f044124156db4b803dab155e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..520e1cedb3823b859666b1fa8872e073904fd4c6 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..969b7143dfff3bb817dbf70c54af8303c3b5822e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9117dce3092e3e6a39b67da9a7ad1dcfc3ded385 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..15424266a3bd4aa1e7525a8fdc4945b51d2b5ad6 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..137561e1f636d7b08959e43e969a6984eb7a3b37 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..2ad75f2e17d8b3fda285490d52b426d1f27d0d95 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..fa687ee988ce530bca87f46235667baa30ac038b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..11afb412056ad803f0d8ac1d9dcb188d42285fdf Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..75e07a0b55e008b070f41dabba7480a4e463b67a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..7d76f63643737834053f80539188c9dad75ed0cb Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..954e39beb8156b460ca904ff66261d8f2fc338cb Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6feb6e42375ebebf6dd9440ee09312204cbf1a33 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b2ff2226223181ec5c42d36afe4f56728f25972d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..da57365926afe1e8d7dd424a6fcd5b52bc3233ac Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..d1c97a7a2e1edf9683959ec36e899ef8e355073c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c7ca09594106a765e815a55e942019d17c181270 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..2c34c4d8c1477bc4859880a8d2f800073825dcd1 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..0f6f5444b0c1e4bcd80dc0f63b28523d655b05d0 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..faf9221b776eee67cd5d2971da5ba77732ef8016 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..1b7b3d7f002080839f672e4eb858bbfbddda27ec Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..d22fb57c81fc3ec9ee7e9b447a05e8a89ff1fcfe Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..76c51d01388a1770b348bc603ebfdd51bc011f0c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py new file mode 100644 index 0000000000000000000000000000000000000000..535434d188ff575029cc7a0de807b0daa7348f73 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py @@ -0,0 +1,29 @@ +''' Tests for byteorder module ''' + +import sys + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +import scipy.io.matlab._byteordercodes as sibc + + +def test_native(): + native_is_le = sys.byteorder == 'little' + assert_(sibc.sys_is_le == native_is_le) + + +def test_to_numpy(): + if sys.byteorder == 'little': + assert_(sibc.to_numpy_code('native') == '<') + assert_(sibc.to_numpy_code('swapped') == '>') + else: + assert_(sibc.to_numpy_code('native') == '>') + assert_(sibc.to_numpy_code('swapped') == '<') + assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('=')) + assert_(sibc.to_numpy_code('big') == '>') + for code in ('little', '<', 'l', 'L', 'le'): + assert_(sibc.to_numpy_code(code) == '<') + for code in ('big', '>', 'b', 'B', 'be'): + assert_(sibc.to_numpy_code(code) == '>') + assert_raises(ValueError, sibc.to_numpy_code, 'silly string') diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py new file mode 100644 index 0000000000000000000000000000000000000000..ef8b3e34ee666fa297d9e25eec1e409ef68edb5f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py @@ -0,0 +1,1371 @@ +import os +from collections import OrderedDict +from os.path import join as pjoin, dirname +from glob import glob +from io import BytesIO +import re +from tempfile import mkdtemp + +import warnings +import shutil +import gzip + +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal, assert_, assert_warns, assert_allclose) +import pytest +from pytest import raises as assert_raises + +import numpy as np +from numpy import array +from scipy.sparse import issparse, eye_array, coo_array, csc_array + +import scipy.io +from scipy.io.matlab import MatlabOpaque, MatlabFunction, MatlabObject +import scipy.io.matlab._byteordercodes as boc +from scipy.io.matlab._miobase import ( + matdims, MatWriteError, MatReadError, matfile_version) +from scipy.io.matlab._mio import mat_reader_factory, loadmat, savemat, whosmat +from scipy.io.matlab._mio5 import ( + MatFile5Writer, MatFile5Reader, varmats_from_mat, to_writeable, + EmptyStructMarker) +import scipy.io.matlab._mio5_params as mio5p +from scipy._lib._util import VisibleDeprecationWarning + + +test_data_path = pjoin(dirname(__file__), 'data') +pytestmark = pytest.mark.thread_unsafe + + +def mlarr(*args, **kwargs): + """Convenience function to return matlab-compatible 2-D array.""" + arr = np.array(*args, **kwargs) + arr.shape = matdims(arr) + return arr + + +# Define cases to test +theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) +case_table4 = [ + {'name': 'double', + 'classes': {'testdouble': 'double'}, + 'expected': {'testdouble': theta} + }] +case_table4.append( + {'name': 'string', + 'classes': {'teststring': 'char'}, + 'expected': {'teststring': + array(['"Do nine men interpret?" "Nine men," I nod.'])} + }) +case_table4.append( + {'name': 'complex', + 'classes': {'testcomplex': 'double'}, + 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} + }) +A = np.zeros((3,5)) +A[0] = list(range(1,6)) +A[:,0] = list(range(1,4)) +case_table4.append( + {'name': 'matrix', + 'classes': {'testmatrix': 'double'}, + 'expected': {'testmatrix': A}, + }) +case_table4.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': coo_array(A)}, + }) +B = A.astype(complex) +B[0,0] += 1j +case_table4.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': coo_array(B)}, + }) +case_table4.append( + {'name': 'multi', + 'classes': {'theta': 'double', 'a': 'double'}, + 'expected': {'theta': theta, 'a': A}, + }) +case_table4.append( + {'name': 'minus', + 'classes': {'testminus': 'double'}, + 'expected': {'testminus': mlarr(-1)}, + }) +case_table4.append( + {'name': 'onechar', + 'classes': {'testonechar': 'char'}, + 'expected': {'testonechar': array(['r'])}, + }) +# Cell arrays stored as object arrays +CA = mlarr(( # tuple for object array creation + [], + mlarr([1]), + mlarr([[1,2]]), + mlarr([[1,2,3]])), dtype=object).reshape(1,-1) +CA[0,0] = array( + ['This cell contains this string and 3 arrays of increasing length']) +case_table5 = [ + {'name': 'cell', + 'classes': {'testcell': 'cell'}, + 'expected': {'testcell': CA}}] +CAE = mlarr(( # tuple for object array creation + mlarr(1), + mlarr(2), + mlarr([]), + mlarr([]), + mlarr(3)), dtype=object).reshape(1,-1) +objarr = np.empty((1,1),dtype=object) +objarr[0,0] = mlarr(1) +case_table5.append( + {'name': 'scalarcell', + 'classes': {'testscalarcell': 'cell'}, + 'expected': {'testscalarcell': objarr} + }) +case_table5.append( + {'name': 'emptycell', + 'classes': {'testemptycell': 'cell'}, + 'expected': {'testemptycell': CAE}}) +case_table5.append( + {'name': 'stringarray', + 'classes': {'teststringarray': 'char'}, + 'expected': {'teststringarray': array( + ['one ', 'two ', 'three'])}, + }) +case_table5.append( + {'name': '3dmatrix', + 'classes': {'test3dmatrix': 'double'}, + 'expected': { + 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} + }) +st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) +dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] +st1 = np.zeros((1,1), dtype) +st1['stringfield'][0,0] = array(['Rats live on no evil star.']) +st1['doublefield'][0,0] = st_sub_arr +st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) +case_table5.append( + {'name': 'struct', + 'classes': {'teststruct': 'struct'}, + 'expected': {'teststruct': st1} + }) +CN = np.zeros((1,2), dtype=object) +CN[0,0] = mlarr(1) +CN[0,1] = np.zeros((1,3), dtype=object) +CN[0,1][0,0] = mlarr(2, dtype=np.uint8) +CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) +CN[0,1][0,2] = np.zeros((1,2), dtype=object) +CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) +CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) +case_table5.append( + {'name': 'cellnest', + 'classes': {'testcellnest': 'cell'}, + 'expected': {'testcellnest': CN}, + }) +st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) +st2[0,0]['one'] = mlarr(1) +st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) +st2[0,0]['two'][0,0]['three'] = array(['number 3']) +case_table5.append( + {'name': 'structnest', + 'classes': {'teststructnest': 'struct'}, + 'expected': {'teststructnest': st2} + }) +a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) +a[0,0]['one'] = mlarr(1) +a[0,0]['two'] = mlarr(2) +a[0,1]['one'] = array(['number 1']) +a[0,1]['two'] = array(['number 2']) +case_table5.append( + {'name': 'structarr', + 'classes': {'teststructarr': 'struct'}, + 'expected': {'teststructarr': a} + }) +ODT = np.dtype([(n, object) for n in + ['expr', 'inputExpr', 'args', + 'isEmpty', 'numArgs', 'version']]) +MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') +m0 = MO[0,0] +m0['expr'] = array(['x']) +m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};']) +m0['args'] = array(['x']) +m0['isEmpty'] = mlarr(0) +m0['numArgs'] = mlarr(1) +m0['version'] = mlarr(1) +case_table5.append( + {'name': 'object', + 'classes': {'testobject': 'object'}, + 'expected': {'testobject': MO} + }) +fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') +u_str = fp_u_str.read().decode('utf-8') +fp_u_str.close() +case_table5.append( + {'name': 'unicode', + 'classes': {'testunicode': 'char'}, + 'expected': {'testunicode': array([u_str])} + }) +case_table5.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': coo_array(A)}, + }) +case_table5.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': coo_array(B)}, + }) +case_table5.append( + {'name': 'bool', + 'classes': {'testbools': 'logical'}, + 'expected': {'testbools': + array([[True], [False]])}, + }) + +case_table5_rt = case_table5[:] +# Inline functions can't be concatenated in matlab, so RT only +case_table5_rt.append( + {'name': 'objectarray', + 'classes': {'testobjectarray': 'object'}, + 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) + + +def types_compatible(var1, var2): + """Check if types are same or compatible. + + 0-D numpy scalars are compatible with bare python scalars. + """ + type1 = type(var1) + type2 = type(var2) + if type1 is type2: + return True + if type1 is np.ndarray and var1.shape == (): + return type(var1.item()) is type2 + if type2 is np.ndarray and var2.shape == (): + return type(var2.item()) is type1 + return False + + +def _check_level(label, expected, actual): + """ Check one level of a potentially nested array """ + if issparse(expected): # allow different types of sparse matrices + assert_(issparse(actual)) + assert_array_almost_equal(actual.toarray(), + expected.toarray(), + err_msg=label, + decimal=5) + return + # Check types are as expected + assert_(types_compatible(expected, actual), + f"Expected type {type(expected)}, got {type(actual)} at {label}") + # A field in a record array may not be an ndarray + # A scalar from a record array will be type np.void + if not isinstance(expected, np.void | np.ndarray | MatlabObject): + assert_equal(expected, actual) + return + # This is an ndarray-like thing + assert_(expected.shape == actual.shape, + msg=f'Expected shape {expected.shape}, got {actual.shape} at {label}') + ex_dtype = expected.dtype + if ex_dtype.hasobject: # array of objects + if isinstance(expected, MatlabObject): + assert_equal(expected.classname, actual.classname) + for i, ev in enumerate(expected): + level_label = "%s, [%d], " % (label, i) + _check_level(level_label, ev, actual[i]) + return + if ex_dtype.fields: # probably recarray + for fn in ex_dtype.fields: + level_label = f"{label}, field {fn}, " + _check_level(level_label, + expected[fn], actual[fn]) + return + if ex_dtype.type in (str, # string or bool + np.str_, + np.bool_): + assert_equal(actual, expected, err_msg=label) + return + # Something numeric + assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) + + +def _load_check_case(name, files, case): + for file_name in files: + matdict = loadmat(file_name, struct_as_record=True, spmatrix=False) + label = f"test {name}; file {file_name}" + for k, expected in case.items(): + k_label = f"{label}, variable {k}" + assert_(k in matdict, f"Missing key at {k_label}") + _check_level(k_label, expected, matdict[k]) + + +def _whos_check_case(name, files, case, classes): + for file_name in files: + label = f"test {name}; file {file_name}" + + whos = whosmat(file_name) + + expected_whos = [ + (k, expected.shape, classes[k]) for k, expected in case.items()] + + whos.sort() + expected_whos.sort() + assert_equal(whos, expected_whos, + f"{label}: {whos!r} != {expected_whos!r}" + ) + + +# Round trip tests +def _rt_check_case(name, expected, format): + mat_stream = BytesIO() + savemat(mat_stream, expected, format=format) + mat_stream.seek(0) + _load_check_case(name, [mat_stream], expected) + + +# generator for tests +def _cases(version, filt='test%(name)s_*.mat'): + if version == '4': + cases = case_table4 + elif version == '5': + cases = case_table5 + else: + assert version == '5_rt' + cases = case_table5_rt + for case in cases: + name = case['name'] + expected = case['expected'] + if filt is None: + files = None + else: + use_filt = pjoin(test_data_path, filt % dict(name=name)) + files = glob(use_filt) + assert len(files) > 0, \ + f"No files for test {name} using filter {filt}" + classes = case['classes'] + yield name, files, expected, classes + + +@pytest.mark.parametrize('version', ('4', '5')) +def test_load(version): + for case in _cases(version): + _load_check_case(*case[:3]) + + +@pytest.mark.parametrize('version', ('4', '5')) +def test_whos(version): + for case in _cases(version): + _whos_check_case(*case) + + +# generator for round trip tests +@pytest.mark.parametrize('version, fmts', [ + ('4', ['4', '5']), + ('5_rt', ['5']), +]) +def test_round_trip(version, fmts): + for case in _cases(version, filt=None): + for fmt in fmts: + _rt_check_case(case[0], case[2], fmt) + + +def test_gzip_simple(): + xdense = np.zeros((20,20)) + xdense[2,3] = 2.3 + xdense[4,5] = 4.5 + x = csc_array(xdense) + + name = 'gzip_test' + expected = {'x':x} + format = '4' + + tmpdir = mkdtemp() + try: + fname = pjoin(tmpdir,name) + mat_stream = gzip.open(fname, mode='wb') + savemat(mat_stream, expected, format=format) + mat_stream.close() + + mat_stream = gzip.open(fname, mode='rb') + actual = loadmat(mat_stream, struct_as_record=True, spmatrix=False) + mat_stream.close() + finally: + shutil.rmtree(tmpdir) + + assert_array_almost_equal(actual['x'].toarray(), + expected['x'].toarray(), + err_msg=repr(actual)) + + +def test_multiple_open(): + # Ticket #1039, on Windows: check that files are not left open + tmpdir = mkdtemp() + try: + x = dict(x=np.zeros((2, 2))) + + fname = pjoin(tmpdir, "a.mat") + + # Check that file is not left open + savemat(fname, x) + os.unlink(fname) + savemat(fname, x) + loadmat(fname) + os.unlink(fname) + + # Check that stream is left open + f = open(fname, 'wb') + savemat(f, x) + f.seek(0) + f.close() + + f = open(fname, 'rb') + loadmat(f) + f.seek(0) + f.close() + finally: + shutil.rmtree(tmpdir) + + +def test_mat73(): + # Check any hdf5 files raise an error + filenames = glob( + pjoin(test_data_path, 'testhdf5*.mat')) + assert_(len(filenames) > 0) + for filename in filenames: + fp = open(filename, 'rb') + assert_raises(NotImplementedError, + loadmat, + fp, + struct_as_record=True) + fp.close() + + +def test_warnings(): + # This test is an echo of the previous behavior, which was to raise a + # warning if the user triggered a search for mat files on the Python system + # path. We can remove the test in the next version after upcoming (0.13). + fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') + with warnings.catch_warnings(): + warnings.simplefilter('error') + # This should not generate a warning + loadmat(fname, struct_as_record=True) + # This neither + loadmat(fname, struct_as_record=False) + + +def test_regression_653(): + # Saving a dictionary with only invalid keys used to raise an error. Now we + # save this as an empty struct in matlab space. + sio = BytesIO() + savemat(sio, {'d':{1:2}}, format='5') + back = loadmat(sio)['d'] + # Check we got an empty struct equivalent + assert_equal(back.shape, (1,1)) + assert_equal(back.dtype, np.dtype(object)) + assert_(back[0,0] is None) + + +def test_structname_len(): + # Test limit for length of field names in structs + lim = 31 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5') + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5') + + +def test_4_and_long_field_names_incompatible(): + # Long field names option not supported in 4 + my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'my_struct':my_struct}, format='4', long_field_names=True) + + +def test_long_field_names(): + # Test limit for length of field names in structs + lim = 63 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5',long_field_names=True) + + +def test_long_field_names_in_struct(): + # Regression test - long_field_names was erased if you passed a struct + # within a struct + lim = 63 + fldname = 'a' * lim + cell = np.ndarray((1,2),dtype=object) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + cell[0,0] = st1 + cell[0,1] = st1 + savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) + # + # Check to make sure it fails with long field names off + # + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': cell}, format='5', long_field_names=False) + + +def test_cell_with_one_thing_in_it(): + # Regression test - make a cell array that's 1 x 2 and put two + # strings in it. It works. Make a cell array that's 1 x 1 and put + # a string in it. It should work but, in the old days, it didn't. + cells = np.ndarray((1,2),dtype=object) + cells[0,0] = 'Hello' + cells[0,1] = 'World' + savemat(BytesIO(), {'x': cells}, format='5') + + cells = np.ndarray((1,1),dtype=object) + cells[0,0] = 'Hello, world' + savemat(BytesIO(), {'x': cells}, format='5') + + +def test_writer_properties(): + # Tests getting, setting of properties of matrix writer + mfw = MatFile5Writer(BytesIO()) + assert_equal(mfw.global_vars, []) + mfw.global_vars = ['avar'] + assert_equal(mfw.global_vars, ['avar']) + assert_equal(mfw.unicode_strings, False) + mfw.unicode_strings = True + assert_equal(mfw.unicode_strings, True) + assert_equal(mfw.long_field_names, False) + mfw.long_field_names = True + assert_equal(mfw.long_field_names, True) + + +def test_use_small_element(): + # Test whether we're using small data element or not + sio = BytesIO() + wtr = MatFile5Writer(sio) + # First check size for no sde for name + arr = np.zeros(10) + wtr.put_variables({'aaaaa': arr}) + w_sz = len(sio.getvalue()) + # Check small name results in largish difference in size + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaa': arr}) + assert_(w_sz - len(sio.getvalue()) > 4) + # Whereas increasing name size makes less difference + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaaaa': arr}) + assert_(len(sio.getvalue()) - w_sz < 4) + + +def test_save_dict(): + # Test that both dict and OrderedDict can be saved (as recarray), + # loaded as matstruct, and preserve order + ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) + for dict_type in (dict, OrderedDict): + # Initialize with tuples to keep order + d = dict_type([('a', 1), ('b', 2)]) + stream = BytesIO() + savemat(stream, {'dict': d}) + stream.seek(0) + vals = loadmat(stream)['dict'] + assert_equal(vals.dtype.names, ('a', 'b')) + assert_array_equal(vals, ab_exp) + + +def test_1d_shape(): + # New 5 behavior is 1D -> row vector + arr = np.arange(5) + for format in ('4', '5'): + # Column is the default + stream = BytesIO() + savemat(stream, {'oned': arr}, format=format) + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1, 5)) + # can be explicitly 'column' for oned_as + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='column') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (5,1)) + # but different from 'row' + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='row') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1,5)) + + +def test_compression(): + arr = np.zeros(100).reshape((5,20)) + arr[2,10] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr}) + raw_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + stream = BytesIO() + savemat(stream, {'arr':arr}, do_compression=True) + compressed_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + assert_(raw_len > compressed_len) + # Concatenate, test later + arr2 = arr.copy() + arr2[0,0] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + + +def test_single_object(): + stream = BytesIO() + savemat(stream, {'A':np.array(1, dtype=object)}) + + +def test_skip_variable(): + # Test skipping over the first of two variables in a MAT file + # using mat_reader_factory and put_variables to read them in. + # + # This is a regression test of a problem that's caused by + # using the compressed file reader seek instead of the raw file + # I/O seek when skipping over a compressed chunk. + # + # The problem arises when the chunk is large: this file has + # a 256x256 array of random (uncompressible) doubles. + # + filename = pjoin(test_data_path,'test_skip_variable.mat') + # + # Prove that it loads with loadmat + # + d = loadmat(filename, struct_as_record=True) + assert_('first' in d) + assert_('second' in d) + # + # Make the factory + # + factory, file_opened = mat_reader_factory(filename, struct_as_record=True) + # + # This is where the factory breaks with an error in MatMatrixGetter.to_next + # + d = factory.get_variables('second') + assert_('second' in d) + factory.mat_stream.close() + + +def test_empty_struct(): + # ticket 885 + filename = pjoin(test_data_path,'test_empty_struct.mat') + # before ticket fix, this would crash with ValueError, empty data + # type + d = loadmat(filename, struct_as_record=True) + a = d['a'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + stream = BytesIO() + arr = np.array((), dtype='U') + # before ticket fix, this used to give data type not understood + savemat(stream, {'arr':arr}) + d = loadmat(stream) + a2 = d['arr'] + assert_array_equal(a2, arr) + + +def test_save_empty_dict(): + # saving empty dict also gives empty struct + stream = BytesIO() + savemat(stream, {'arr': {}}) + d = loadmat(stream) + a = d['arr'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + + +def assert_any_equal(output, alternatives): + """ Assert `output` is equal to at least one element in `alternatives` + """ + one_equal = False + for expected in alternatives: + if np.all(output == expected): + one_equal = True + break + assert_(one_equal) + + +def test_to_writeable(): + # Test to_writeable function + res = to_writeable(np.array([1])) # pass through ndarrays + assert_equal(res.shape, (1,)) + assert_array_equal(res, 1) + # Dict fields can be written in any order + expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) + expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) + alternatives = (expected1, expected2) + assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) + # Fields with underscores discarded + assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) + # Not-string fields discarded + assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) + # String fields that are valid Python identifiers discarded + assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) + # Object with field names is equivalent + + class klass: + pass + + c = klass + c.a = 1 + c.b = 2 + assert_any_equal(to_writeable(c), alternatives) + # empty list and tuple go to empty array + res = to_writeable([]) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + res = to_writeable(()) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + # None -> None + assert_(to_writeable(None) is None) + # String to strings + assert_equal(to_writeable('a string').dtype.type, np.str_) + # Scalars to numpy to NumPy scalars + res = to_writeable(1) + assert_equal(res.shape, ()) + assert_equal(res.dtype.type, np.array(1).dtype.type) + assert_array_equal(res, 1) + # Empty dict returns EmptyStructMarker + assert_(to_writeable({}) is EmptyStructMarker) + # Object does not have (even empty) __dict__ + assert_(to_writeable(object()) is None) + # Custom object does have empty __dict__, returns EmptyStructMarker + + class C: + pass + + assert_(to_writeable(c()) is EmptyStructMarker) + # dict keys with legal characters are convertible + res = to_writeable({'a': 1})['a'] + assert_equal(res.shape, (1,)) + assert_equal(res.dtype.type, np.object_) + # Only fields with illegal characters, falls back to EmptyStruct + assert_(to_writeable({'1':1}) is EmptyStructMarker) + assert_(to_writeable({'_a':1}) is EmptyStructMarker) + # Unless there are valid fields, in which case structured array + assert_equal(to_writeable({'1':1, 'f': 2}), + np.array([(2,)], dtype=[('f', '|O8')])) + + +def test_recarray(): + # check roundtrip of structured array + dt = [('f1', 'f8'), + ('f2', 'S10')] + arr = np.zeros((2,), dtype=dt) + arr[0]['f1'] = 0.5 + arr[0]['f2'] = 'python' + arr[1]['f1'] = 99 + arr[1]['f2'] = 'not perl' + stream = BytesIO() + savemat(stream, {'arr': arr}) + d = loadmat(stream, struct_as_record=False) + a20 = d['arr'][0,0] + assert_equal(a20.f1, 0.5) + assert_equal(a20.f2, 'python') + d = loadmat(stream, struct_as_record=True) + a20 = d['arr'][0,0] + assert_equal(a20['f1'], 0.5) + assert_equal(a20['f2'], 'python') + # structs always come back as object types + assert_equal(a20.dtype, np.dtype([('f1', 'O'), + ('f2', 'O')])) + a21 = d['arr'].flat[1] + assert_equal(a21['f1'], 99) + assert_equal(a21['f2'], 'not perl') + + +def test_save_object(): + class C: + pass + c = C() + c.field1 = 1 + c.field2 = 'a string' + stream = BytesIO() + savemat(stream, {'c': c}) + d = loadmat(stream, struct_as_record=False) + c2 = d['c'][0,0] + assert_equal(c2.field1, 1) + assert_equal(c2.field2, 'a string') + d = loadmat(stream, struct_as_record=True) + c2 = d['c'][0,0] + assert_equal(c2['field1'], 1) + assert_equal(c2['field2'], 'a string') + + +def test_read_opts(): + # tests if read is seeing option sets, at initialization and after + # initialization + arr = np.arange(6).reshape(1,6) + stream = BytesIO() + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + back_dict = rdr.get_variables() + rarr = back_dict['a'] + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, squeeze_me=True) + assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) + rdr.squeeze_me = False + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, byte_order=boc.native_code) + assert_array_equal(rdr.get_variables()['a'], arr) + # inverted byte code leads to error on read because of swapped + # header etc. + rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) + assert_raises(Exception, rdr.get_variables) + rdr.byte_order = boc.native_code + assert_array_equal(rdr.get_variables()['a'], arr) + arr = np.array(['a string']) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + assert_array_equal(rdr.get_variables()['a'], arr) + rdr = MatFile5Reader(stream, chars_as_strings=False) + carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) + assert_array_equal(rdr.get_variables()['a'], carr) + rdr.chars_as_strings = True + assert_array_equal(rdr.get_variables()['a'], arr) + + +def test_empty_string(): + # make sure reading empty string does not raise error + estring_fname = pjoin(test_data_path, 'single_empty_string.mat') + fp = open(estring_fname, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['a'], np.array([], dtype='U1')) + # Empty string round trip. Matlab cannot distinguish + # between a string array that is empty, and a string array + # containing a single empty string, because it stores strings as + # arrays of char. There is no way of having an array of char that + # is not empty, but contains an empty string. + stream = BytesIO() + savemat(stream, {'a': np.array([''])}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': np.array([], dtype='U1')}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.close() + + +def test_corrupted_data(): + import zlib + for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), + (zlib.error, 'corrupted_zlib_checksum.mat')]: + with open(pjoin(test_data_path, fname), 'rb') as fp: + rdr = MatFile5Reader(fp) + assert_raises(exc, rdr.get_variables) + + +def test_corrupted_data_check_can_be_disabled(): + with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: + rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) + rdr.get_variables() + + +def test_read_both_endian(): + # make sure big- and little- endian data is read correctly + for fname in ('big_endian.mat', 'little_endian.mat'): + fp = open(pjoin(test_data_path, fname), 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['strings'], + np.array([['hello'], + ['world']], dtype=object)) + assert_array_equal(d['floats'], + np.array([[2., 3.], + [3., 4.]], dtype=np.float32)) + + +def test_write_opposite_endian(): + # We don't support writing opposite endian .mat files, but we need to behave + # correctly if the user supplies an other-endian NumPy array to write out. + float_arr = np.array([[2., 3.], + [3., 4.]]) + int_arr = np.arange(6).reshape((2, 3)) + uni_arr = np.array(['hello', 'world'], dtype='U') + stream = BytesIO() + savemat(stream, { + 'floats': float_arr.byteswap().view(float_arr.dtype.newbyteorder()), + 'ints': int_arr.byteswap().view(int_arr.dtype.newbyteorder()), + 'uni_arr': uni_arr.byteswap().view(uni_arr.dtype.newbyteorder()), + }) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['floats'], float_arr) + assert_array_equal(d['ints'], int_arr) + assert_array_equal(d['uni_arr'], uni_arr) + stream.close() + + +def test_logical_array(): + # The roundtrip test doesn't verify that we load the data up with the + # correct (bool) dtype + with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: + rdr = MatFile5Reader(fobj, mat_dtype=True) + d = rdr.get_variables() + x = np.array([[True], [False]], dtype=np.bool_) + assert_array_equal(d['testbools'], x) + assert_equal(d['testbools'].dtype, x.dtype) + + +def test_logical_out_type(): + # Confirm that bool type written as uint8, uint8 class + # See gh-4022 + stream = BytesIO() + barr = np.array([False, True, False]) + savemat(stream, {'barray': barr}) + stream.seek(0) + reader = MatFile5Reader(stream) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) + assert_equal(hdr.is_logical, True) + var = reader.read_var_array(hdr, False) + assert_equal(var.dtype.type, np.uint8) + + +def test_roundtrip_zero_dimensions(): + stream = BytesIO() + savemat(stream, {'d':np.empty((10, 0))}) + d = loadmat(stream) + assert d['d'].shape == (10, 0) + + +def test_mat4_3d(): + # test behavior when writing 3-D arrays to matlab 4 files + stream = BytesIO() + arr = np.arange(24).reshape((2,3,4)) + assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') + + +def test_func_read(): + func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert isinstance(d['testfunc'], MatlabFunction) + stream = BytesIO() + wtr = MatFile5Writer(stream) + assert_raises(MatWriteError, wtr.put_variables, d) + + +def test_mat_dtype(): + double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=False) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'u') + + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=True) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'f') + + +def test_sparse_in_struct(): + # reproduces bug found by DC where Cython code was insisting on + # ndarray return type, but getting sparse matrix + st = {'sparsefield': eye_array(4)} + stream = BytesIO() + savemat(stream, {'a':st}) + d = loadmat(stream, struct_as_record=True) + assert_array_equal(d['a'][0, 0]['sparsefield'].toarray(), np.eye(4)) + + +def test_mat_struct_squeeze(): + stream = BytesIO() + in_d = {'st':{'one':1, 'two':2}} + savemat(stream, in_d) + # no error without squeeze + loadmat(stream, struct_as_record=False) + # previous error was with squeeze, with mat_struct + loadmat(stream, struct_as_record=False, squeeze_me=True) + + +def test_scalar_squeeze(): + stream = BytesIO() + in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} + savemat(stream, in_d) + out_d = loadmat(stream, squeeze_me=True) + assert_(isinstance(out_d['scalar'], float)) + assert_(isinstance(out_d['string'], str)) + assert_(isinstance(out_d['st'], np.ndarray)) + + +def test_str_round(): + # from report by Angus McMorland on mailing list 3 May 2010 + stream = BytesIO() + in_arr = np.array(['Hello', 'Foob']) + out_arr = np.array(['Hello', 'Foob ']) + savemat(stream, dict(a=in_arr)) + res = loadmat(stream) + # resulted in ['HloolFoa', 'elWrdobr'] + assert_array_equal(res['a'], out_arr) + stream.truncate(0) + stream.seek(0) + # Make Fortran ordered version of string + in_str = in_arr.tobytes(order='F') + in_from_str = np.ndarray(shape=a.shape, + dtype=in_arr.dtype, + order='F', + buffer=in_str) + savemat(stream, dict(a=in_from_str)) + assert_array_equal(res['a'], out_arr) + # unicode save did lead to buffer too small error + stream.truncate(0) + stream.seek(0) + in_arr_u = in_arr.astype('U') + out_arr_u = out_arr.astype('U') + savemat(stream, {'a': in_arr_u}) + res = loadmat(stream) + assert_array_equal(res['a'], out_arr_u) + + +def test_fieldnames(): + # Check that field names are as expected + stream = BytesIO() + savemat(stream, {'a': {'a':1, 'b':2}}) + res = loadmat(stream) + field_names = res['a'].dtype.names + assert_equal(set(field_names), {'a', 'b'}) + + +def test_loadmat_varnames(): + # Test that we can get just one variable from a mat file using loadmat + mat5_sys_names = ['__globals__', + '__header__', + '__version__'] + for eg_file, sys_v_names in ( + (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( + test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): + vars = loadmat(eg_file) + assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names='a') + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['a']) + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['theta']) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=('theta',)) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=[]) + assert_equal(set(vars.keys()), set(sys_v_names)) + vnames = ['theta'] + vars = loadmat(eg_file, variable_names=vnames) + assert_equal(vnames, ['theta']) + + +def test_round_types(): + # Check that saving, loading preserves dtype in most cases + arr = np.arange(10) + stream = BytesIO() + for dts in ('f8','f4','i8','i4','i2','i1', + 'u8','u4','u2','u1','c16','c8'): + stream.truncate(0) + stream.seek(0) # needed for BytesIO in Python 3 + savemat(stream, {'arr': arr.astype(dts)}) + vars = loadmat(stream) + assert_equal(np.dtype(dts), vars['arr'].dtype) + + +def test_varmats_from_mat(): + # Make a mat file with several variables, write it, read it back + names_vars = (('arr', mlarr(np.arange(10))), + ('mystr', mlarr('a string')), + ('mynum', mlarr(10))) + + # Dict like thing to give variables in defined order + class C: + def items(self): + return names_vars + stream = BytesIO() + savemat(stream, C()) + varmats = varmats_from_mat(stream) + assert_equal(len(varmats), 3) + for i in range(3): + name, var_stream = varmats[i] + exp_name, exp_res = names_vars[i] + assert_equal(name, exp_name) + res = loadmat(var_stream) + assert_array_equal(res[name], exp_res) + + +def test_one_by_zero(): + # Test 1x0 chars get read correctly + func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_equal(d['var'].shape, (0,)) + + +def test_load_mat4_le(): + # We were getting byte order wrong when reading little-endian floa64 dense + # matrices on big-endian platforms + mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') + vars = loadmat(mat4_fname) + assert_array_equal(vars['a'], [[0.1, 1.2]]) + + +def test_unicode_mat4(): + # Mat4 should save unicode as latin1 + bio = BytesIO() + var = {'second_cat': 'Schrödinger'} + savemat(bio, var, format='4') + var_back = loadmat(bio) + assert_equal(var_back['second_cat'], var['second_cat']) + + +def test_logical_sparse(): + # Test we can read logical sparse stored in mat file as bytes. + # See https://github.com/scipy/scipy/issues/3539. + # In some files saved by MATLAB, the sparse data elements (Real Part + # Subelement in MATLAB speak) are stored with apparent type double + # (miDOUBLE) but are in fact single bytes. + filename = pjoin(test_data_path,'logical_sparse.mat') + # Before fix, this would crash with: + # ValueError: indices and data should have the same size + d = loadmat(filename, struct_as_record=True, spmatrix=False) + log_sp = d['sp_log_5_4'] + assert_(issparse(log_sp) and log_sp.format == "csc") + assert_equal(log_sp.dtype.type, np.bool_) + assert_array_equal(log_sp.toarray(), + [[True, True, True, False], + [False, False, True, False], + [False, False, True, False], + [False, False, False, False], + [False, False, False, False]]) + + +def test_empty_sparse(): + # Can we read empty sparse matrices? + sio = BytesIO() + import scipy.sparse + empty_sparse = scipy.sparse.csr_array([[0,0],[0,0]]) + savemat(sio, dict(x=empty_sparse)) + sio.seek(0) + + res = loadmat(sio, spmatrix=False) + assert not scipy.sparse.isspmatrix(res['x']) + res = loadmat(sio, spmatrix=True) + assert scipy.sparse.isspmatrix(res['x']) + res = loadmat(sio) # chk default + assert scipy.sparse.isspmatrix(res['x']) + + assert_array_equal(res['x'].shape, empty_sparse.shape) + assert_array_equal(res['x'].toarray(), 0) + # Do empty sparse matrices get written with max nnz 1? + # See https://github.com/scipy/scipy/issues/4208 + sio.seek(0) + reader = MatFile5Reader(sio) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.nzmax, 1) + + +def test_empty_mat_error(): + # Test we get a specific warning for an empty mat file + sio = BytesIO() + assert_raises(MatReadError, loadmat, sio) + + +def test_miuint32_compromise(): + # Reader should accept miUINT32 for miINT32, but check signs + # mat file with miUINT32 for miINT32, but OK values + filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') + res = loadmat(filename) + assert_equal(res['an_array'], np.arange(10)[None, :]) + # mat file with miUINT32 for miINT32, with negative value + filename = pjoin(test_data_path, 'bad_miuint32.mat') + with assert_raises(ValueError): + loadmat(filename) + + +def test_miutf8_for_miint8_compromise(): + # Check reader accepts ascii as miUTF8 for array names + filename = pjoin(test_data_path, 'miutf8_array_name.mat') + res = loadmat(filename) + assert_equal(res['array_name'], [[1]]) + # mat file with non-ascii utf8 name raises error + filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') + with assert_raises(ValueError): + loadmat(filename) + + +def test_bad_utf8(): + # Check that reader reads bad UTF with 'replace' option + filename = pjoin(test_data_path,'broken_utf8.mat') + res = loadmat(filename) + assert_equal(res['bad_string'], + b'\x80 am broken'.decode('utf8', 'replace')) + + +def test_save_unicode_field(tmpdir): + filename = os.path.join(str(tmpdir), 'test.mat') + test_dict = {'a':{'b':1,'c':'test_str'}} + savemat(filename, test_dict) + + +def test_save_custom_array_type(tmpdir): + class CustomArray: + def __array__(self, dtype=None, copy=None): + return np.arange(6.0).reshape(2, 3) + a = CustomArray() + filename = os.path.join(str(tmpdir), 'test.mat') + savemat(filename, {'a': a}) + out = loadmat(filename) + assert_array_equal(out['a'], np.array(a)) + + +def test_filenotfound(): + # Check the correct error is thrown + assert_raises(OSError, loadmat, "NotExistentFile00.mat") + assert_raises(OSError, loadmat, "NotExistentFile00") + + +def test_simplify_cells(): + # Test output when simplify_cells=True + filename = pjoin(test_data_path, 'testsimplecell.mat') + res1 = loadmat(filename, simplify_cells=True) + res2 = loadmat(filename, simplify_cells=False) + assert_(isinstance(res1["s"], dict)) + assert_(isinstance(res2["s"], np.ndarray)) + assert_array_equal(res1["s"]["mycell"], np.array(["a", "b", "c"])) + + +@pytest.mark.parametrize('version, filt, regex', [ + (0, '_4*_*', None), + (1, '_5*_*', None), + (1, '_6*_*', None), + (1, '_7*_*', '^((?!hdf5).)*$'), # not containing hdf5 + (2, '_7*_*', '.*hdf5.*'), + (1, '8*_*', None), +]) +def test_matfile_version(version, filt, regex): + use_filt = pjoin(test_data_path, f'test*{filt}.mat') + files = glob(use_filt) + if regex is not None: + files = [file for file in files if re.match(regex, file) is not None] + assert len(files) > 0, \ + f"No files for version {version} using filter {filt}" + for file in files: + got_version = matfile_version(file) + assert got_version[0] == version + + +def test_opaque(): + """Test that we can read a MatlabOpaque object.""" + data = loadmat(pjoin(test_data_path, 'parabola.mat')) + assert isinstance(data['parabola'], MatlabFunction) + assert isinstance(data['parabola'].item()[3].item()[3], MatlabOpaque) + + +def test_opaque_simplify(): + """Test that we can read a MatlabOpaque object when simplify_cells=True.""" + data = loadmat(pjoin(test_data_path, 'parabola.mat'), simplify_cells=True) + assert isinstance(data['parabola'], MatlabFunction) + + +def test_deprecation(): + """Test that access to previous attributes still works.""" + # This should be accessible immediately from scipy.io import + with assert_warns(DeprecationWarning): + scipy.io.matlab.mio5_params.MatlabOpaque + + # These should be importable but warn as well + with assert_warns(DeprecationWarning): + from scipy.io.matlab.miobase import MatReadError # noqa: F401 + + +def test_gh_17992(tmp_path): + rng = np.random.default_rng(12345) + outfile = tmp_path / "lists.mat" + array_one = rng.random((5,3)) + array_two = rng.random((6,3)) + list_of_arrays = [array_one, array_two] + # warning suppression only needed for NumPy < 1.24.0 + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning) + savemat(outfile, + {'data': list_of_arrays}, + long_field_names=True, + do_compression=True) + # round trip check + new_dict = {} + loadmat(outfile, + new_dict) + assert_allclose(new_dict["data"][0][0], array_one) + assert_allclose(new_dict["data"][0][1], array_two) + + +def test_gh_19659(tmp_path): + d = { + "char_array": np.array([list("char"), list("char")], dtype="U1"), + "string_array": np.array(["string", "string"]), + } + outfile = tmp_path / "tmp.mat" + # should not error: + savemat(outfile, d, format="4") + + +def test_large_m4(): + # Test we can read a Matlab 4 file with array > 2GB. + # (In fact, test we get the correct error from reading a truncated + # version). + # See https://github.com/scipy/scipy/issues/21256 + # Data file is first 1024 bytes of: + # >>> a = np.zeros((134217728, 3)) + # >>> siom.savemat('big_m4.mat', {'a': a}, format='4') + truncated_mat = pjoin(test_data_path, 'debigged_m4.mat') + match = ("Not enough bytes to read matrix 'a';" + if np.intp == np.int64 else + "Variable 'a' has byte length longer than largest possible") + with pytest.raises(ValueError, match=match): + loadmat(truncated_mat) + + +def test_gh_19223(): + from scipy.io.matlab import varmats_from_mat # noqa: F401 + +def test_corrupt_files(): + # Test we can detect truncated or corrupt (all zero) files. + for n in (2, 4, 10, 19): + with pytest.raises(MatReadError, + match="Mat file appears to be truncated"): + loadmat(BytesIO(b'\x00' * n)) + with pytest.raises(MatReadError, + match="Mat file appears to be corrupt"): + loadmat(BytesIO(b'\x00' * 20)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f27114c4a4ed10c1a2526058f4d0dbbd0e5638 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py @@ -0,0 +1,179 @@ +""" Testing mio5_utils Cython module + +""" +import sys + +from io import BytesIO + +import numpy as np + +from numpy.testing import assert_array_equal, assert_equal, assert_ +from pytest import raises as assert_raises + +import scipy.io.matlab._byteordercodes as boc +import scipy.io.matlab._streams as streams +import scipy.io.matlab._mio5_params as mio5p +import scipy.io.matlab._mio5_utils as m5u + + +def test_byteswap(): + for val in ( + 1, + 0x100, + 0x10000): + a = np.array(val, dtype=np.uint32) + b = a.byteswap() + c = m5u.byteswap_u4(a) + assert_equal(b.item(), c) + d = m5u.byteswap_u4(c) + assert_equal(a.item(), d) + + +def _make_tag(base_dt, val, mdtype, sde=False): + ''' Makes a simple matlab tag, full or sde ''' + base_dt = np.dtype(base_dt) + bo = boc.to_numpy_code(base_dt.byteorder) + byte_count = base_dt.itemsize + if not sde: + udt = bo + 'u4' + padding = 8 - (byte_count % 8) + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + else: # is sde + udt = bo + 'u2' + padding = 4-byte_count + if bo == '<': # little endian + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + else: # big endian + all_dt = [('byte_count', udt), + ('mdtype', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + tag = np.zeros((1,), dtype=all_dt) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + tag['val'] = val + return tag + + +def _write_stream(stream, *strings): + stream.truncate(0) + stream.seek(0) + for s in strings: + stream.write(s) + stream.seek(0) + + +def _make_readerlike(stream, byte_order=boc.native_code): + class R: + pass + r = R() + r.mat_stream = stream + r.byte_order = byte_order + r.struct_as_record = True + r.uint16_codec = sys.getdefaultencoding() + r.chars_as_strings = False + r.mat_dtype = False + r.squeeze_me = False + return r + + +def test_read_tag(): + # mainly to test errors + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io) + c_reader = m5u.VarReader5(r) + # This works for StringIO but _not_ BytesIO + assert_raises(OSError, c_reader.read_tag) + # bad SDE + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag['byte_count'] = 5 + _write_stream(str_io, tag.tobytes()) + assert_raises(ValueError, c_reader.read_tag) + + +def test_read_stream(): + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag_str = tag.tobytes() + str_io = BytesIO(tag_str) + st = streams.make_stream(str_io) + s = streams._read_into(st, tag.itemsize) + assert_equal(s, tag.tobytes()) + + +def test_read_numeric(): + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io) + # check simplest of tags + for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16), + ('i4', 1, mio5p.miINT32), + ('i2', -1, mio5p.miINT16)): + for byte_code in ('<', '>'): + r.byte_order = byte_code + c_reader = m5u.VarReader5(r) + assert_equal(c_reader.little_endian, byte_code == '<') + assert_equal(c_reader.is_swapped, byte_code != boc.native_code) + for sde_f in (False, True): + dt = np.dtype(base_dt).newbyteorder(byte_code) + a = _make_tag(dt, val, mdtype, sde_f) + a_str = a.tobytes() + _write_stream(str_io, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + # two sequential reads + _write_stream(str_io, a_str, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + el = c_reader.read_numeric() + assert_equal(el, val) + + +def test_read_numeric_writeable(): + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io, '<') + c_reader = m5u.VarReader5(r) + dt = np.dtype('' + rdr.mat_stream.read(4) # presumably byte padding + mdict = read_minimat_vars(rdr) + fp.close() + return mdict + + +def test_jottings(): + # example + fname = os.path.join(test_data_path, 'parabola.mat') + read_workspace_vars(fname) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d19a9797faa2221307a7330b69fffa26410f624 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py @@ -0,0 +1,45 @@ +""" Testing + +""" + +import numpy as np + +from numpy.testing import assert_array_equal, assert_ + +from scipy.io.matlab._mio_utils import squeeze_element, chars_to_strings + + +def test_squeeze_element(): + a = np.zeros((1,3)) + assert_array_equal(np.squeeze(a), squeeze_element(a)) + # 0-D output from squeeze gives scalar + sq_int = squeeze_element(np.zeros((1,1), dtype=float)) + assert_(isinstance(sq_int, float)) + # Unless it's a structured array + sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')])) + assert_(isinstance(sq_sa, np.ndarray)) + # Squeezing empty arrays maintain their dtypes. + sq_empty = squeeze_element(np.empty(0, np.uint8)) + assert sq_empty.dtype == np.uint8 + + +def test_chars_strings(): + # chars as strings + strings = ['learn ', 'python', 'fast ', 'here '] + str_arr = np.array(strings, dtype='U6') # shape (4,) + chars = [list(s) for s in strings] + char_arr = np.array(chars, dtype='U1') # shape (4,6) + assert_array_equal(chars_to_strings(char_arr), str_arr) + ca2d = char_arr.reshape((2,2,6)) + sa2d = str_arr.reshape((2,2)) + assert_array_equal(chars_to_strings(ca2d), sa2d) + ca3d = char_arr.reshape((1,2,2,6)) + sa3d = str_arr.reshape((1,2,2)) + assert_array_equal(chars_to_strings(ca3d), sa3d) + # Fortran ordered arrays + char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6) + assert_array_equal(chars_to_strings(char_arrf), str_arr) + # empty array + arr = np.array([['']], dtype='U1') + out_arr = np.array([''], dtype='U1') + assert_array_equal(chars_to_strings(arr), out_arr) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c8eb2a56aaa1d1de77bfb90c859ed0af0b7bc4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py @@ -0,0 +1,32 @@ +""" Testing miobase module +""" + +import numpy as np + +from numpy.testing import assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab._miobase import matdims + + +def test_matdims(): + # Test matdims dimension finder + assert_equal(matdims(np.array(1)), (1, 1)) # NumPy scalar + assert_equal(matdims(np.array([1])), (1, 1)) # 1-D array, 1 element + assert_equal(matdims(np.array([1,2])), (2, 1)) # 1-D array, 2 elements + assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2-D array, column vector + assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2-D array, row vector + # 3d array, rowish vector + assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2)) + assert_equal(matdims(np.array([])), (0, 0)) # empty 1-D array + assert_equal(matdims(np.array([[]])), (1, 0)) # empty 2-D array + assert_equal(matdims(np.array([[[]]])), (1, 1, 0)) # empty 3-D array + assert_equal(matdims(np.empty((1, 0, 1))), (1, 0, 1)) # empty 3-D array + # Optional argument flips 1-D shape behavior. + assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1-D array, 2 elements + # The argument has to make sense though + assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre') + # Check empty sparse matrices get their own shape + from scipy.sparse import csr_array, csc_array + assert_equal(matdims(csr_array(np.zeros((3, 3)))), (3, 3)) + assert_equal(matdims(csc_array(np.zeros((2, 2)))), (2, 2)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c86decb7e90f69f293e90eba74fb47dd4f1277 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py @@ -0,0 +1,33 @@ +""" Test reading of files not conforming to matlab specification + +We try and read any file that matlab reads, these files included +""" +from os.path import dirname, join as pjoin + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy.io.matlab._mio import loadmat + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + + +def test_multiple_fieldnames(): + # Example provided by Dharhas Pothina + # Extracted using mio5.varmats_from_mat + multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat') + vars = loadmat(multi_fname) + funny_names = vars['Summary'].dtype.names + assert_({'_1_Station_Q', '_2_Station_Q', + '_3_Station_Q'}.issubset(funny_names)) + + +def test_malformed1(): + # Example from gh-6072 + # Contains malformed header data, which previously resulted into a + # buffer overflow. + # + # Should raise an exception, not segfault + fname = pjoin(TEST_DATA_PATH, 'malformed1.mat') + with open(fname, 'rb') as f: + assert_raises(ValueError, loadmat, f) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py new file mode 100644 index 0000000000000000000000000000000000000000..d8768d8e9251c6e47debeb65dff3ec056d38ee56 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py @@ -0,0 +1,232 @@ +""" Testing + +""" + +import os +import zlib + +from io import BytesIO + + +from tempfile import mkstemp +from contextlib import contextmanager + +import numpy as np + +from numpy.testing import assert_, assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab._streams import (make_stream, + GenericStream, ZlibInputStream, + _read_into, _read_string, BLOCK_SIZE) + + +@contextmanager +def setup_test_file(): + val = b'a\x00string' + fd, fname = mkstemp() + + with os.fdopen(fd, 'wb') as fs: + fs.write(val) + with open(fname, 'rb') as fs: + gs = BytesIO(val) + cs = BytesIO(val) + yield fs, gs, cs + os.unlink(fname) + + +def test_make_stream(): + with setup_test_file() as (fs, gs, cs): + # test stream initialization + assert_(isinstance(make_stream(gs), GenericStream)) + + +def test_tell_seek(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + res = st.seek(0) + assert_equal(res, 0) + assert_equal(st.tell(), 0) + res = st.seek(5) + assert_equal(res, 0) + assert_equal(st.tell(), 5) + res = st.seek(2, 1) + assert_equal(res, 0) + assert_equal(st.tell(), 7) + res = st.seek(-2, 2) + assert_equal(res, 0) + assert_equal(st.tell(), 6) + + +def test_read(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + st.seek(0) + res = st.read(-1) + assert_equal(res, b'a\x00string') + st.seek(0) + res = st.read(4) + assert_equal(res, b'a\x00st') + # read into + st.seek(0) + res = _read_into(st, 4) + assert_equal(res, b'a\x00st') + res = _read_into(st, 4) + assert_equal(res, b'ring') + assert_raises(OSError, _read_into, st, 2) + # read alloc + st.seek(0) + res = _read_string(st, 4) + assert_equal(res, b'a\x00st') + res = _read_string(st, 4) + assert_equal(res, b'ring') + assert_raises(OSError, _read_string, st, 2) + + +class TestZlibInputStream: + def _get_data(self, size): + data = np.random.randint(0, 256, size).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + stream = BytesIO(compressed_data) + return stream, len(compressed_data), data + + def test_read(self): + SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1, + BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1] + + READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1, + BLOCK_SIZE, BLOCK_SIZE+1] + + def check(size, read_size): + compressed_stream, compressed_data_len, data = self._get_data(size) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + data2 = b'' + so_far = 0 + while True: + block = stream.read(min(read_size, + size - so_far)) + if not block: + break + so_far += len(block) + data2 += block + assert_equal(data, data2) + + for size in SIZES: + for read_size in READ_SIZES: + check(size, read_size) + + def test_read_max_length(self): + size = 1234 + data = np.random.randint(0, 256, size).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + compressed_stream = BytesIO(compressed_data + b"abbacaca") + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + stream.read(len(data)) + assert_equal(compressed_stream.tell(), len(compressed_data)) + + assert_raises(OSError, stream.read, 1) + + def test_read_bad_checksum(self): + data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + assert_raises(zlib.error, stream.read, len(data)) + + def test_seek(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + + stream = ZlibInputStream(compressed_stream, compressed_data_len) + + stream.seek(123) + p = 123 + assert_equal(stream.tell(), p) + d1 = stream.read(11) + assert_equal(d1, data[p:p+11]) + + stream.seek(321, 1) + p = 123+11+321 + assert_equal(stream.tell(), p) + d2 = stream.read(21) + assert_equal(d2, data[p:p+21]) + + stream.seek(641, 0) + p = 641 + assert_equal(stream.tell(), p) + d3 = stream.read(11) + assert_equal(d3, data[p:p+11]) + + assert_raises(OSError, stream.seek, 10, 2) + assert_raises(OSError, stream.seek, -1, 1) + assert_raises(ValueError, stream.seek, 1, 123) + + stream.seek(10000, 1) + assert_raises(OSError, stream.read, 12) + + def test_seek_bad_checksum(self): + data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + assert_raises(zlib.error, stream.seek, len(data)) + + def test_all_data_read(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(512) + assert_(not stream.all_data_read()) + stream.seek(1024) + assert_(stream.all_data_read()) + + def test_all_data_read_overlap(self): + COMPRESSION_LEVEL = 6 + + data = np.arange(33707000).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data, COMPRESSION_LEVEL) + compressed_data_len = len(compressed_data) + + # check that part of the checksum overlaps + assert_(compressed_data_len == BLOCK_SIZE + 2) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(len(data)) + assert_(stream.all_data_read()) + + def test_all_data_read_bad_checksum(self): + COMPRESSION_LEVEL = 6 + + data = np.arange(33707000).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data, COMPRESSION_LEVEL) + compressed_data_len = len(compressed_data) + + # check that part of the checksum overlaps + assert_(compressed_data_len == BLOCK_SIZE + 2) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(len(data)) + + assert_raises(zlib.error, stream.all_data_read) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/mmio.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/mmio.py new file mode 100644 index 0000000000000000000000000000000000000000..67cf0684cbf9468468027957a5b7f3da2c43c845 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/mmio.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ["mminfo", "mmread", "mmwrite"] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="mmio", + private_modules=["_mmio"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/netcdf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f119dd2bad72d772c3d1db6ceec9fd3d91316d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/netcdf.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ["netcdf_file", "netcdf_variable"] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="netcdf", + private_modules=["_netcdf"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani new file mode 100644 index 0000000000000000000000000000000000000000..3be500032786398c3efdbd9f873f705b6c1636bd Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..619a1259670a361ac76ffa86c481a813dbaec07a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..804d8b1a8a90636c880e974b6f85bd385033306b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3fa56c450eaa916d9c91b492ba17e7e843df2d53 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4bb951e274a399f091ff70b639d6e3b55ee1e122 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..2854dbc8b1e53f298ac3b135eac1f06e73940152 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..91588d348d5f89af354209840062202d5b28c1df Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3e978fad540a8979435d4561de151573696affd8 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..f699fe2427dfe876283de0fcade2c2325a262061 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..8e3a402c60a515149811e2ca21628e97180c4956 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..dd3504f0ecfaed178ace02e1a8a84650111c3936 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..285da7f78ffbbf2155fd2e4e648f19a1d3a42ac3 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..d99fa48f0a43ec06c3101560f9cade829c8b1940 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..de5e984e49f507ae550b1ae2fd54b799e742a195 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bb76671a65be41fd2a426146c6c366f1e7fb07c3 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..995d23c6ed05b095442b6247b09191126f797f23 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4249ec62119e264d55a81d3faf9c87dcaed1c7c8 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc new file mode 100644 index 0000000000000000000000000000000000000000..5775622d0ef85828b436dffcd21366f7538fc55c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc new file mode 100644 index 0000000000000000000000000000000000000000..07db1cd986a4c3b9929c01c1f22bcc3f562b1c16 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc new file mode 100644 index 0000000000000000000000000000000000000000..57f8bf9da3bca295c15508963c77a870222af0bc Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat new file mode 100644 index 0000000000000000000000000000000000000000..87731eb9d4b1f2ac827a212436fe6de175431e11 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat new file mode 100644 index 0000000000000000000000000000000000000000..a165a7a30424b20af9a3a0636c5e655239ea6fa5 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..c3bb9dcbe50ef784ce3282b28e53f4c40beb48ce Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..351801fd47a2e3e48d9b63034fbae28f8318c9f9 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..64bf92f74a457d2f4bc42798493db15cc3ab1008 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..3d3f27f88eef4e02451d18204cdcfd51f96f6d15 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..0bd683096f18eadceb7168f811c75bf072baecfe Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..25269ff9ea4f6dd3f8a9ca0c8ad27d399e4248f5 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..9850de37cf86af622b759625c15e6b1a9477ce47 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..98c09c2dff6e1ef605e25ed1d00afe94597abddc Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..959098d2a9cdd6140758843e059d4ca529b14279 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..49c0ec1d18d9f08111fe2d2a269ed407da71b158 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..bb936b8789920ce18281fa754a5c048b31e59ba8 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..cb3e9e4876249f42924a43232b74f05b91123815 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d53893c6c734e6c7771e08042c16874623dc6f0e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..8cee5ebecc3bef248ed37c438e0731160b31a310 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav new file mode 100644 index 0000000000000000000000000000000000000000..e4027b3cf302b8610b87d9ef8b0aac39d5a40ef9 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav new file mode 100644 index 0000000000000000000000000000000000000000..182e29bc57dc05154388553a71876820025bca8d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav new file mode 100644 index 0000000000000000000000000000000000000000..593e8c6208ab0bf3aa869de89e213b8aa9f8c071 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav new file mode 100644 index 0000000000000000000000000000000000000000..edb19d388afbaff44e5f0883978e6a74e9755613 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav new file mode 100644 index 0000000000000000000000000000000000000000..be9e3877ea845da76d9466c14d70c4cce882368c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav new file mode 100644 index 0000000000000000000000000000000000000000..9680b2878c6008a27c8fc9ae6966903ff936cc4a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d02b1756ac043a4ba6119acb28ef34c40359a4dd Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav new file mode 100644 index 0000000000000000000000000000000000000000..603525694cc307d47412717c4c2f85ddc960897b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav new file mode 100644 index 0000000000000000000000000000000000000000..40210b889402c0f27562296ab39ce1a714f0d0ef Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav new file mode 100644 index 0000000000000000000000000000000000000000..c91cd0a561e011a2f18c86119e45392fbc0be825 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav new file mode 100644 index 0000000000000000000000000000000000000000..ee6e69fe8461edfa580f682761118c8afe2add3a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav new file mode 100644 index 0000000000000000000000000000000000000000..759c2e64fa034c6ddbdbe6181efae1e699a0c314 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav new file mode 100644 index 0000000000000000000000000000000000000000..74dec7b8933418d30d17c83d617443a73ceef0c6 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav new file mode 100644 index 0000000000000000000000000000000000000000..fc9da5796eab6ce9fb59488b836ba2f567de7b25 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..40c9cd330e0c731968d71dbbfeae9bd8c4a745a2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav new file mode 100644 index 0000000000000000000000000000000000000000..f1aa416f8e661893be282a490005536953d4b7af Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..6f01fbfd109e76c94b6e6e9bfd9eb388f39d99ee Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bac9b207488eb9712ec27fb3567155f0dd773f34 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav new file mode 100644 index 0000000000000000000000000000000000000000..8babd56306f09fa612f731ce593ae13c75f84f4c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..a3c678162911426702a9a6e932761385a01f247e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..38b812261125e6aabef8618955b234f6c7b04955 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..db1c256c85a707f0a0d78c28241b78d1eddcab1e Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav new file mode 100644 index 0000000000000000000000000000000000000000..acbb058a307090f6c9e2d8402c7badf6bb48144c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..d16f4655cc20318db2b0d629cd5ed6d7be01b518 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..732dd2cbfa9c7fd029bb59b4cfcb630cc1077f54 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav new file mode 100644 index 0000000000000000000000000000000000000000..69d7eaf4ecf8747c21d07e14edcf65b4e394974c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..2222391ae5b93ba34c1fdb982c02eb97d9658b58 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..a35f1acfb4cb93ecb637310bbfa7fc1a2151d483 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav new file mode 100644 index 0000000000000000000000000000000000000000..dcdb0b0d433939d6a240c86e5060214cd8875732 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e2ecdb8cd332a0a7806bdbc442c66124225077 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py @@ -0,0 +1,264 @@ +''' Tests for fortran sequential files ''' + +import tempfile +import shutil +import os +from os import path +from glob import iglob +import threading +import re + +from numpy.testing import assert_equal, assert_allclose +import numpy as np +import pytest + +from scipy.io import (FortranFile, + _test_fortran, + FortranEOFError, + FortranFormattingError) + + +DATA_PATH = path.join(path.dirname(__file__), 'data') + + +@pytest.fixture +def io_lock(): + return threading.Lock() + + +def test_fortranfiles_read(io_lock): + for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): + m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) + if not m: + raise RuntimeError(f"Couldn't match {filename} filename to regex") + + dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) + + dtype = m.group(1).replace('s', '<') + + with io_lock: + f = FortranFile(filename, 'r', ' 0] = 1 + info = (2, 2, 3, 'coordinate', 'pattern', 'general') + mmwrite(self.fn, a, field='pattern') + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn, spmatrix=False) + assert_array_almost_equal(p, b.toarray()) + assert not scipy.sparse.isspmatrix(b) + + b = mmread(self.fn, spmatrix=True) + assert scipy.sparse.isspmatrix(b) + b = mmread(self.fn) # chk default + assert scipy.sparse.isspmatrix(b) + + def test_gh13634_non_skew_symmetric_int(self): + a = scipy.sparse.csr_array([[1, 2], [-2, 99]], dtype=np.int32) + self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) + + def test_gh13634_non_skew_symmetric_float(self): + a = scipy.sparse.csr_array([[1, 2], [-2, 99.]], dtype=np.float32) + self.check(a, (2, 2, 4, 'coordinate', 'real', 'general')) + + +_32bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 +2147483647 +2147483646 +2147483647 +2147483646 +''' + +_32bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483647 +2 2 2147483646 +''' + +_64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +-9223372036854775806 + -2147483648 + 9223372036854775807 +''' + +_64bit_integer_sparse_general_example = '''\ +%%MatrixMarket matrix coordinate integer general +2 2 3 +1 1 2147483648 +1 2 9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_symmetric_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_skew_example = '''\ +%%MatrixMarket matrix coordinate integer skew-symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_over64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +9223372036854775807 + 2147483648 +9223372036854775808 +''' + +_over64bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483648 +2 2 19223372036854775808 +''' + + +class TestMMIOReadLargeIntegers: + def setup_method(self): + self.tmpdir = mkdtemp(suffix=str(threading.get_native_id())) + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info, dense, over32, over64): + with open(self.fn, 'w') as f: + f.write(example) + assert_equal(mminfo(self.fn), info) + if ((over32 and (np.intp(0).itemsize < 8) and mmwrite == scipy.io._mmio.mmwrite) + or over64): + assert_raises(OverflowError, mmread, self.fn) + else: + b = mmread(self.fn, spmatrix=False) + if not dense: + b = b.toarray() + assert_equal(a, b) + + def test_read_32bit_integer_dense(self): + a = array([[2**31-1, 2**31-1], + [2**31-2, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=False, + over64=False) + + def test_read_32bit_integer_sparse(self): + a = array([[2**31-1, 0], + [0, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_sparse_example, + a, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=False, + over64=False) + + def test_read_64bit_integer_dense(self): + a = array([[2**31, -2**31], + [-2**63+2, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_general(self): + a = array([[2**31, 2**63-1], + [0, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_general_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'general'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_symmetric(self): + a = array([[2**31, -2**63+1], + [-2**63+1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_symmetric_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_skew(self): + a = array([[2**31, -2**63+1], + [2**63-1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_skew_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_over64bit_integer_dense(self): + self.check_read(_over64bit_integer_dense_example, + None, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=True) + + def test_read_over64bit_integer_sparse(self): + self.check_read(_over64bit_integer_sparse_example, + None, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=True) + + +_general_example = '''\ +%%MatrixMarket matrix coordinate real general +%================================================================================= +% +% This ASCII file represents a sparse MxN matrix with L +% nonzeros in the following Matrix Market format: +% +% +----------------------------------------------+ +% |%%MatrixMarket matrix coordinate real general | <--- header line +% |% | <--+ +% |% comments | |-- 0 or more comment lines +% |% | <--+ +% | M N L | <--- rows, columns, entries +% | I1 J1 A(I1, J1) | <--+ +% | I2 J2 A(I2, J2) | | +% | I3 J3 A(I3, J3) | |-- L lines +% | . . . | | +% | IL JL A(IL, JL) | <--+ +% +----------------------------------------------+ +% +% Indices are 1-based, i.e. A(1,1) is the first element. +% +%================================================================================= + 5 5 8 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 +''' + +_hermitian_example = '''\ +%%MatrixMarket matrix coordinate complex hermitian + 5 5 7 + 1 1 1.0 0 + 2 2 10.5 0 + 4 2 250.5 22.22 + 3 3 1.5e-2 0 + 4 4 -2.8e2 0 + 5 5 12. 0 + 5 4 0 33.32 +''' + +_skew_example = '''\ +%%MatrixMarket matrix coordinate real skew-symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 0 +''' + +_symmetric_example = '''\ +%%MatrixMarket matrix coordinate real symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 8 +''' + +_symmetric_pattern_example = '''\ +%%MatrixMarket matrix coordinate pattern symmetric + 5 5 7 + 1 1 + 2 2 + 4 2 + 3 3 + 4 4 + 5 5 + 5 4 +''' + +# example (without comment lines) from Figure 1 in +# https://math.nist.gov/MatrixMarket/reports/MMformat.ps +_empty_lines_example = '''\ +%%MatrixMarket MATRIX Coordinate Real General + + 5 5 8 + +1 1 1.0 +2 2 10.5 +3 3 1.5e-2 +4 4 -2.8E2 +5 5 12. + 1 4 6 + 4 2 250.5 + 4 5 33.32 + +''' + + +class TestMMIOCoordinate: + def setup_method(self): + self.tmpdir = mkdtemp(suffix=str(threading.get_native_id())) + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info): + f = open(self.fn, 'w') + f.write(example) + f.close() + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn, spmatrix=False).toarray() + assert_array_almost_equal(a, b) + + def test_read_general(self): + a = [[1, 0, 0, 6, 0], + [0, 10.5, 0, 0, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 33.32], + [0, 0, 0, 0, 12]] + self.check_read(_general_example, a, + (5, 5, 8, 'coordinate', 'real', 'general')) + + def test_read_hermitian(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5 - 22.22j, 0], + [0, 0, .015, 0, 0], + [0, 250.5 + 22.22j, 0, -280, -33.32j], + [0, 0, 0, 33.32j, 12]] + self.check_read(_hermitian_example, a, + (5, 5, 7, 'coordinate', 'complex', 'hermitian')) + + def test_read_skew(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, -250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 0], + [0, 0, 0, 0, 12]] + self.check_read(_skew_example, a, + (5, 5, 7, 'coordinate', 'real', 'skew-symmetric')) + + def test_read_symmetric(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 8], + [0, 0, 0, 8, 12]] + self.check_read(_symmetric_example, a, + (5, 5, 7, 'coordinate', 'real', 'symmetric')) + + def test_read_symmetric_pattern(self): + a = [[1, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [0, 1, 0, 1, 1], + [0, 0, 0, 1, 1]] + self.check_read(_symmetric_pattern_example, a, + (5, 5, 7, 'coordinate', 'pattern', 'symmetric')) + + def test_read_empty_lines(self): + a = [[1, 0, 0, 6, 0], + [0, 10.5, 0, 0, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 33.32], + [0, 0, 0, 0, 12]] + self.check_read(_empty_lines_example, a, + (5, 5, 8, 'coordinate', 'real', 'general')) + + def test_empty_write_read(self): + # https://github.com/scipy/scipy/issues/1410 (Trac #883) + + b = scipy.sparse.coo_array((10, 10)) + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (10, 10, 0, 'coordinate', 'real', 'symmetric')) + a = b.toarray() + b = mmread(self.fn, spmatrix=False).toarray() + assert_array_almost_equal(a, b) + + def test_bzip2_py3(self): + # test if fix for #2152 works + try: + # bz2 module isn't always built when building Python. + import bz2 + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_array((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_bzip2 = f"{self.fn}.bz2" + with open(self.fn, 'rb') as f_in: + f_out = bz2.BZ2File(fn_bzip2, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_bzip2, spmatrix=False).toarray() + assert_array_almost_equal(a, b.toarray()) + + def test_gzip_py3(self): + # test if fix for #2152 works + try: + # gzip module can be missing from Python installation + import gzip + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_array((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_gzip = f"{self.fn}.gz" + with open(self.fn, 'rb') as f_in: + f_out = gzip.open(fn_gzip, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_gzip, spmatrix=False).toarray() + assert_array_almost_equal(a, b.toarray()) + + def test_real_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_array((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'real', 'general')) + a = b.toarray() + b = mmread(self.fn, spmatrix=False).toarray() + assert_array_almost_equal(a, b) + + def test_complex_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + + b = scipy.sparse.coo_array((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'complex', 'general')) + a = b.toarray() + b = mmread(self.fn, spmatrix=False).toarray() + assert_array_almost_equal(a, b) + + def test_sparse_formats(self, tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + tmpdir = tmp_path / 'sparse_formats' + tmpdir.mkdir() + + mats = [] + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + mats.append(scipy.sparse.coo_array((V, (I, J)), shape=(5, 5))) + + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + mats.append(scipy.sparse.coo_array((V, (I, J)), shape=(5, 5))) + + for mat in mats: + expected = mat.toarray() + for fmt in ['csr', 'csc', 'coo']: + fname = tmpdir / (fmt + '.mtx') + mmwrite(fname, mat.asformat(fmt)) + result = mmread(fname, spmatrix=False).toarray() + assert_array_almost_equal(result, expected) + + def test_precision(self): + test_values = [pi] + [10**(i) for i in range(0, -10, -1)] + test_precisions = range(1, 10) + for value in test_values: + for precision in test_precisions: + # construct sparse matrix with test value at last main diagonal + n = 10**precision + 1 + A = scipy.sparse.dok_array((n, n)) + A[n-1, n-1] = value + # write matrix with test precision and read again + mmwrite(self.fn, A, precision=precision) + A = scipy.io.mmread(self.fn, spmatrix=False) + # check for right entries in matrix + assert_array_equal(A.row, [n-1]) + assert_array_equal(A.col, [n-1]) + assert_allclose(A.data, [float('%%.%dg' % precision % value)]) + + def test_bad_number_of_coordinate_header_fields(self): + s = """\ + %%MatrixMarket matrix coordinate real general + 5 5 8 999 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 + """ + text = textwrap.dedent(s).encode('ascii') + with pytest.raises(ValueError, match='not of length 3'): + scipy.io.mmread(io.BytesIO(text)) + + +def test_gh11389(): + mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n" + " 1 1 1\n" + "1 1 -2.1846000000000e+02 0.0000000000000e+00"), + spmatrix=False) + + +def test_gh18123(tmp_path): + lines = [" %%MatrixMarket matrix coordinate real general\n", + "5 5 3\n", + "2 3 1.0\n", + "3 4 2.0\n", + "3 5 3.0\n"] + test_file = tmp_path / "test.mtx" + with open(test_file, "w") as f: + f.writelines(lines) + mmread(test_file, spmatrix=False) + +def test_mtx_append(tmp_path): + a = mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n" + " 1 1 1\n" + "1 1 -2.1846000000000e+02 0.0000000000000e+00"), + spmatrix=False) + test_writefile = tmp_path / "test_mtx" + test_readfile = tmp_path / "test_mtx.mtx" + mmwrite(test_writefile, a) + mmread(test_readfile, spmatrix=False) + + +def test_threadpoolctl(): + try: + import threadpoolctl + if not hasattr(threadpoolctl, "register"): + pytest.skip("threadpoolctl too old") + return + except ImportError: + pytest.skip("no threadpoolctl") + return + + with threadpoolctl.threadpool_limits(limits=4): + assert_equal(fmm.PARALLELISM, 4) + + with threadpoolctl.threadpool_limits(limits=2, user_api='scipy'): + assert_equal(fmm.PARALLELISM, 2) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..161406076d0b5078e8e11aa5762b7715cd83c4a7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py @@ -0,0 +1,550 @@ +''' Tests for netcdf ''' +import os +from os.path import join as pjoin, dirname +import shutil +import tempfile +import warnings +from io import BytesIO +from glob import glob +from contextlib import contextmanager + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + break_cycles, suppress_warnings, IS_PYPY) +import pytest +from pytest import raises as assert_raises + +from scipy.io import netcdf_file +from scipy._lib._tmpdirs import in_tempdir + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + +N_EG_ELS = 11 # number of elements for example variable +VARTYPE_EG = 'b' # var type for example variable + + +pytestmark = pytest.mark.thread_unsafe + + +@contextmanager +def make_simple(*args, **kwargs): + f = netcdf_file(*args, **kwargs) + f.history = 'Created for a test' + f.createDimension('time', N_EG_ELS) + time = f.createVariable('time', VARTYPE_EG, ('time',)) + time[:] = np.arange(N_EG_ELS) + time.units = 'days since 2008-01-01' + f.flush() + yield f + f.close() + + +def check_simple(ncfileobj): + '''Example fileobj tests ''' + assert_equal(ncfileobj.history, b'Created for a test') + time = ncfileobj.variables['time'] + assert_equal(time.units, b'days since 2008-01-01') + assert_equal(time.shape, (N_EG_ELS,)) + assert_equal(time[-1], N_EG_ELS-1) + +def assert_mask_matches(arr, expected_mask): + ''' + Asserts that the mask of arr is effectively the same as expected_mask. + + In contrast to numpy.ma.testutils.assert_mask_equal, this function allows + testing the 'mask' of a standard numpy array (the mask in this case is treated + as all False). + + Parameters + ---------- + arr : ndarray or MaskedArray + Array to test. + expected_mask : array_like of booleans + A list giving the expected mask. + ''' + + mask = np.ma.getmaskarray(arr) + assert_equal(mask, expected_mask) + + +def test_read_write_files(): + # test round trip for example file + cwd = os.getcwd() + try: + tmpdir = tempfile.mkdtemp() + os.chdir(tmpdir) + with make_simple('simple.nc', 'w') as f: + pass + # read the file we just created in 'a' mode + with netcdf_file('simple.nc', 'a') as f: + check_simple(f) + # add something + f._attributes['appendRan'] = 1 + + # To read the NetCDF file we just created:: + with netcdf_file('simple.nc') as f: + # Using mmap is the default (but not on pypy) + assert_equal(f.use_mmap, not IS_PYPY) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Read it in append (and check mmap is off) + with netcdf_file('simple.nc', 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Now without mmap + with netcdf_file('simple.nc', mmap=False) as f: + # Using mmap is the default + assert_(not f.use_mmap) + check_simple(f) + + # To read the NetCDF file we just created, as file object, no + # mmap. When n * n_bytes(var_type) is not divisible by 4, this + # raised an error in pupynere 1.0.12 and scipy rev 5893, because + # calculated vsize was rounding up in units of 4 - see + # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj) as f: + # by default, don't use mmap for file-like + assert_(not f.use_mmap) + check_simple(f) + + # Read file from fileobj, with mmap + with suppress_warnings() as sup: + if IS_PYPY: + sup.filter(RuntimeWarning, + "Cannot close a netcdf_file opened with mmap=True.*") + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj, mmap=True) as f: + assert_(f.use_mmap) + check_simple(f) + + # Again read it in append mode (adding another att) + with open('simple.nc', 'r+b') as fobj: + with netcdf_file(fobj, 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + f.createDimension('app_dim', 1) + var = f.createVariable('app_var', 'i', ('app_dim',)) + var[:] = 42 + + # And... check that app_var made it in... + with netcdf_file('simple.nc') as f: + check_simple(f) + assert_equal(f.variables['app_var'][:], 42) + + finally: + if IS_PYPY: + # windows cannot remove a dead file held by a mmap + # that has not been collected in PyPy + break_cycles() + break_cycles() + os.chdir(cwd) + shutil.rmtree(tmpdir) + + +def test_read_write_sio(): + eg_sio1 = BytesIO() + with make_simple(eg_sio1, 'w'): + str_val = eg_sio1.getvalue() + + eg_sio2 = BytesIO(str_val) + with netcdf_file(eg_sio2) as f2: + check_simple(f2) + + # Test that error is raised if attempting mmap for sio + eg_sio3 = BytesIO(str_val) + assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True) + # Test 64-bit offset write / read + eg_sio_64 = BytesIO() + with make_simple(eg_sio_64, 'w', version=2) as f_64: + str_val = eg_sio_64.getvalue() + + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + # also when version 2 explicitly specified + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64, version=2) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + + +def test_bytes(): + raw_file = BytesIO() + f = netcdf_file(raw_file, mode='w') + # Dataset only has a single variable, dimension and attribute to avoid + # any ambiguity related to order. + f.a = 'b' + f.createDimension('dim', 1) + var = f.createVariable('var', np.int16, ('dim',)) + var[0] = -9999 + var.c = 'd' + f.sync() + + actual = raw_file.getvalue() + + expected = (b'CDF\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0a' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'dim\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'a\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'b\x00\x00\x00' + b'\x00\x00\x00\x0b' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'var\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'c\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'd\x00\x00\x00' + b'\x00\x00\x00\x03' + b'\x00\x00\x00\x04' + b'\x00\x00\x00\x78' + b'\xd8\xf1\x80\x01') + + assert_equal(actual, expected) + + +def test_encoded_fill_value(): + with netcdf_file(BytesIO(), mode='w') as f: + f.createDimension('x', 1) + var = f.createVariable('var', 'S1', ('x',)) + assert_equal(var._get_encoded_fill_value(), b'\x00') + var._FillValue = b'\x01' + assert_equal(var._get_encoded_fill_value(), b'\x01') + var._FillValue = b'\x00\x00' # invalid, wrong size + assert_equal(var._get_encoded_fill_value(), b'\x00') + + +def test_read_example_data(): + # read any example data files + for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')): + with netcdf_file(fname, 'r'): + pass + with netcdf_file(fname, 'r', mmap=False): + pass + + +def test_itemset_no_segfault_on_readonly(): + # Regression test for ticket #1202. + # Open the test file in read-only mode. + + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + with suppress_warnings() as sup: + message = ("Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist") + sup.filter(RuntimeWarning, message) + with netcdf_file(filename, 'r', mmap=True) as f: + time_var = f.variables['time'] + + # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! + assert_raises(RuntimeError, time_var.assignValue, 42) + + +def test_appending_issue_gh_8625(): + stream = BytesIO() + + with make_simple(stream, mode='w') as f: + f.createDimension('x', 2) + f.createVariable('x', float, ('x',)) + f.variables['x'][...] = 1 + f.flush() + contents = stream.getvalue() + + stream = BytesIO(contents) + with netcdf_file(stream, mode='a') as f: + f.variables['x'][...] = 2 + + +def test_write_invalid_dtype(): + dtypes = ['int64', 'uint64'] + if np.dtype('int').itemsize == 8: # 64-bit machines + dtypes.append('int') + if np.dtype('uint').itemsize == 8: # 64-bit machines + dtypes.append('uint') + + with netcdf_file(BytesIO(), 'w') as f: + f.createDimension('time', N_EG_ELS) + for dt in dtypes: + assert_raises(ValueError, f.createVariable, 'time', dt, ('time',)) + + +def test_flush_rewind(): + stream = BytesIO() + with make_simple(stream, mode='w') as f: + f.createDimension('x',4) # x is used in createVariable + v = f.createVariable('v', 'i2', ['x']) + v[:] = 1 + f.flush() + len_single = len(stream.getvalue()) + f.flush() + len_double = len(stream.getvalue()) + + assert_(len_single == len_double) + + +def test_dtype_specifiers(): + # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work. + # Specifying np.int16 or similar only works from the same commit as this + # comment was made. + with make_simple(BytesIO(), mode='w') as f: + f.createDimension('x',4) + f.createVariable('v1', 'i2', ['x']) + f.createVariable('v2', np.int16, ['x']) + f.createVariable('v3', np.dtype(np.int16), ['x']) + + +def test_ticket_1720(): + io = BytesIO() + + items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] + + with netcdf_file(io, 'w') as f: + f.history = 'Created for a test' + f.createDimension('float_var', 10) + float_var = f.createVariable('float_var', 'f', ('float_var',)) + float_var[:] = items + float_var.units = 'metres' + f.flush() + contents = io.getvalue() + + io = BytesIO(contents) + with netcdf_file(io, 'r') as f: + assert_equal(f.history, b'Created for a test') + float_var = f.variables['float_var'] + assert_equal(float_var.units, b'metres') + assert_equal(float_var.shape, (10,)) + assert_allclose(float_var[:], items) + + +def test_mmaps_segfault(): + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + + if not IS_PYPY: + with warnings.catch_warnings(): + warnings.simplefilter("error") + with netcdf_file(filename, mmap=True) as f: + x = f.variables['lat'][:] + # should not raise warnings + del x + + def doit(): + with netcdf_file(filename, mmap=True) as f: + return f.variables['lat'][:] + + # should not crash + with suppress_warnings() as sup: + message = ("Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist") + sup.filter(RuntimeWarning, message) + x = doit() + x.sum() + + +def test_zero_dimensional_var(): + io = BytesIO() + with make_simple(io, 'w') as f: + v = f.createVariable('zerodim', 'i2', []) + # This is checking that .isrec returns a boolean - don't simplify it + # to 'assert not ...' + assert v.isrec is False, v.isrec + f.flush() + + +def test_byte_gatts(): + # Check that global "string" atts work like they did before py3k + # unicode and general bytes confusion + with in_tempdir(): + filename = 'g_byte_atts.nc' + f = netcdf_file(filename, 'w') + f._attributes['holy'] = b'grail' + f._attributes['witch'] = 'floats' + f.close() + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['holy'], b'grail') + assert_equal(f._attributes['witch'], b'floats') + f.close() + + +def test_open_append(): + # open 'w' put one attr + with in_tempdir(): + filename = 'append_dat.nc' + f = netcdf_file(filename, 'w') + f._attributes['Kilroy'] = 'was here' + f.close() + + # open again in 'a', read the att and a new one + f = netcdf_file(filename, 'a') + assert_equal(f._attributes['Kilroy'], b'was here') + f._attributes['naughty'] = b'Zoot' + f.close() + + # open yet again in 'r' and check both atts + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['Kilroy'], b'was here') + assert_equal(f._attributes['naughty'], b'Zoot') + f.close() + + +def test_append_recordDimension(): + dataSize = 100 + + with in_tempdir(): + # Create file with record time dimension + with netcdf_file('withRecordDimension.nc', 'w') as f: + f.createDimension('time', None) + f.createVariable('time', 'd', ('time',)) + f.createDimension('x', dataSize) + x = f.createVariable('x', 'd', ('x',)) + x[:] = np.array(range(dataSize)) + f.createDimension('y', dataSize) + y = f.createVariable('y', 'd', ('y',)) + y[:] = np.array(range(dataSize)) + f.createVariable('testData', 'i', ('time', 'x', 'y')) + f.flush() + f.close() + + for i in range(2): + # Open the file in append mode and add data + with netcdf_file('withRecordDimension.nc', 'a') as f: + f.variables['time'].data = np.append(f.variables["time"].data, i) + f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i) + f.flush() + + # Read the file and check that append worked + with netcdf_file('withRecordDimension.nc') as f: + assert_equal(f.variables['time'][-1], i) + assert_equal(f.variables['testData'][-1, :, :].copy(), + np.full((dataSize, dataSize), i)) + assert_equal(f.variables['time'].data.shape[0], i+1) + assert_equal(f.variables['testData'].data.shape[0], i+1) + + # Read the file and check that 'data' was not saved as user defined + # attribute of testData variable during append operation + with netcdf_file('withRecordDimension.nc') as f: + with assert_raises(KeyError) as ar: + f.variables['testData']._attributes['data'] + ex = ar.value + assert_equal(ex.args[0], 'data') + +def test_maskandscale(): + t = np.linspace(20, 30, 15) + t[3] = 100 + tm = np.ma.masked_greater(t, 99) + fname = pjoin(TEST_DATA_PATH, 'example_2.nc') + with netcdf_file(fname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + found = Temp[:].compressed() + del Temp # Remove ref to mmap, so file can be closed. + expected = np.round(tm.compressed(), 2) + assert_allclose(found, expected) + + with in_tempdir(): + newfname = 'ms.nc' + f = netcdf_file(newfname, 'w', maskandscale=True) + f.createDimension('Temperature', len(tm)) + temp = f.createVariable('Temperature', 'i', ('Temperature',)) + temp.missing_value = 9999 + temp.scale_factor = 0.01 + temp.add_offset = 20 + temp[:] = tm + f.close() + + with netcdf_file(newfname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + expected = np.round(tm.compressed(), 2) + found = Temp[:].compressed() + del Temp + assert_allclose(found, expected) + + +# ------------------------------------------------------------------------ +# Test reading with masked values (_FillValue / missing_value) +# ------------------------------------------------------------------------ + +def test_read_withValuesNearFillValue(): + # Regression test for ticket #5626 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var1_fillval0'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withNoFillValue(): + # For a variable with no fill value, reading data with maskandscale=True + # should return unmasked data + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var2_noFillval'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1,2,3]) + +def test_read_withFillValueAndMissingValue(): + # For a variable with both _FillValue and missing_value, the _FillValue + # should be used + IRRELEVANT_VALUE = 9999 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [True, False, False]) + assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3]) + +def test_read_withMissingValue(): + # For a variable with missing_value but not _FillValue, the missing_value + # should be used + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var4_missingValue'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withFillValNaN(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var5_fillvalNaN'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withChar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var6_char'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_with2dVar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var7_2d'][:] + assert_mask_matches(vardata, [[True, False], [False, False], [False, True]]) + +def test_read_withMaskAndScaleFalse(): + # If a variable has a _FillValue (or missing_value) attribute, but is read + # with maskandscale set to False, the result should be unmasked + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + # Open file with mmap=False to avoid problems with closing a mmap'ed file + # when arrays referring to its data still exist: + with netcdf_file(fname, maskandscale=False, mmap=False) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1, 2, 3]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_paths.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..1e7c4167ace335fb5fc86f6499ee54c3360ded6e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_paths.py @@ -0,0 +1,93 @@ +""" +Ensure that we can use pathlib.Path objects in all relevant IO functions. +""" +from pathlib import Path + +import numpy as np + +import scipy.io +import scipy.io.wavfile +from scipy._lib._tmpdirs import tempdir +import scipy.sparse + + +class TestPaths: + data = np.arange(5).astype(np.int64) + + def test_savemat(self): + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(path, {'data': self.data}) + assert path.is_file() + + def test_loadmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + mat_contents = scipy.io.loadmat(path) + assert (mat_contents['data'] == self.data).all() + + def test_whosmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + contents = scipy.io.whosmat(path) + assert contents[0] == ('data', (1, 5), 'int64') + + def test_readsav(self): + path = Path(__file__).parent / 'data/scalar_string.sav' + scipy.io.readsav(path) + + def test_hb_read(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + data = scipy.sparse.eye_array(3, format='csr') + path = Path(temp_dir) / 'data.hb' + scipy.io.hb_write(str(path), data) + + data_new = scipy.io.hb_read(path, spmatrix=False) + assert (data_new != data).nnz == 0 + + def test_hb_write(self): + with tempdir() as temp_dir: + data = scipy.sparse.eye_array(3, format='csr') + path = Path(temp_dir) / 'data.hb' + scipy.io.hb_write(path, data) + assert path.is_file() + + def test_mmio_read(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + data = scipy.sparse.eye_array(3, format='csr') + path = Path(temp_dir) / 'data.mtx' + scipy.io.mmwrite(str(path), data) + + data_new = scipy.io.mmread(path, spmatrix=False) + assert (data_new != data).nnz == 0 + + def test_mmio_write(self): + with tempdir() as temp_dir: + data = scipy.sparse.eye_array(3, format='csr') + path = Path(temp_dir) / 'data.mtx' + scipy.io.mmwrite(path, data) + + def test_netcdf_file(self): + path = Path(__file__).parent / 'data/example_1.nc' + scipy.io.netcdf_file(path) + + def test_wavfile_read(self): + path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + scipy.io.wavfile.read(path) + + def test_wavfile_write(self): + # Read from str path, write to Path + input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + rate, data = scipy.io.wavfile.read(str(input_path)) + + with tempdir() as temp_dir: + output_path = Path(temp_dir) / input_path.name + scipy.io.wavfile.write(output_path, rate, data) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py new file mode 100644 index 0000000000000000000000000000000000000000..8e0a545495a842916a3cddd48c0b1b3859ae4ca5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py @@ -0,0 +1,460 @@ +import os +import sys +from io import BytesIO +import threading + +import numpy as np +from numpy.testing import (assert_equal, assert_, assert_array_equal, + break_cycles, suppress_warnings, IS_PYPY) +import pytest +from pytest import raises, warns + +from scipy.io import wavfile + + +def datafile(fn): + return os.path.join(os.path.dirname(__file__), 'data', fn) + + +def test_read_1(): + # 32-bit PCM (which uses extensible format) + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (4410,)) + + del data + + +def test_read_2(): + # 8-bit unsigned PCM + for mmap in [False, True]: + filename = 'test-8000Hz-le-2ch-1byteu.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.uint8)) + assert_equal(data.shape, (800, 2)) + + del data + + +def test_read_3(): + # Little-endian float + for mmap in [False, True]: + filename = 'test-44100Hz-2ch-32bit-float-le.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_equal(data.shape, (441, 2)) + + del data + + +def test_read_4(): + # Contains unsupported 'PEAK' chunk + for mmap in [False, True]: + with suppress_warnings() as sup: + sup.filter(wavfile.WavFileWarning, + "Chunk .non-data. not understood, skipping it") + filename = 'test-48000Hz-2ch-64bit-float-le-wavex.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 48000) + assert_(np.issubdtype(data.dtype, np.float64)) + assert_equal(data.shape, (480, 2)) + + del data + + +def test_read_5(): + # Big-endian float + for mmap in [False, True]: + filename = 'test-44100Hz-2ch-32bit-float-be.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and + data.dtype.byteorder == '=')) + assert_equal(data.shape, (441, 2)) + + del data + + +def test_5_bit_odd_size_no_pad(): + # 5-bit, 1 B container, 5 channels, 9 samples, 45 B data chunk + # Generated by LTspice, which incorrectly omits pad byte, but should be + # readable anyway + for mmap in [False, True]: + filename = 'test-8000Hz-le-5ch-9S-5bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.uint8)) + assert_equal(data.shape, (9, 5)) + + # 8-5 = 3 LSBits should be 0 + assert_equal(data & 0b00000111, 0) + + # Unsigned + assert_equal(data.max(), 0b11111000) # Highest possible + assert_equal(data[0, 0], 128) # Midpoint is 128 for <= 8-bit + assert_equal(data.min(), 0) # Lowest possible + + del data + + +def test_12_bit_even_size(): + # 12-bit, 2 B container, 4 channels, 9 samples, 72 B data chunk + # Generated by LTspice from 1 Vpk sine waves + for mmap in [False, True]: + filename = 'test-8000Hz-le-4ch-9S-12bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int16)) + assert_equal(data.shape, (9, 4)) + + # 16-12 = 4 LSBits should be 0 + assert_equal(data & 0b00000000_00001111, 0) + + # Signed + assert_equal(data.max(), 0b01111111_11110000) # Highest possible + assert_equal(data[0, 0], 0) # Midpoint is 0 for >= 9-bit + assert_equal(data.min(), -0b10000000_00000000) # Lowest possible + + del data + + +def test_24_bit_odd_size_with_pad(): + # 24-bit, 3 B container, 3 channels, 5 samples, 45 B data chunk + # Should not raise any warnings about the data chunk pad byte + filename = 'test-8000Hz-le-3ch-5S-24bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (5, 3)) + + # All LSBytes should be 0 + assert_equal(data & 0xff, 0) + + # Hand-made max/min samples under different conventions: + # 2**(N-1) 2**(N-1)-1 LSB + assert_equal(data, [[-0x8000_0000, -0x7fff_ff00, -0x200], + [-0x4000_0000, -0x3fff_ff00, -0x100], + [+0x0000_0000, +0x0000_0000, +0x000], + [+0x4000_0000, +0x3fff_ff00, +0x100], + [+0x7fff_ff00, +0x7fff_ff00, +0x200]]) + # ^ clipped + + +def test_20_bit_extra_data(): + # 20-bit, 3 B container, 1 channel, 10 samples, 30 B data chunk + # with extra data filling container beyond the bit depth + filename = 'test-1234Hz-le-1ch-10S-20bit-extra.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 1234) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (10,)) + + # All LSBytes should still be 0, because 3 B container in 4 B dtype + assert_equal(data & 0xff, 0) + + # But it should load the data beyond 20 bits + assert_((data & 0xf00).any()) + + # Full-scale positive/negative samples, then being halved each time + assert_equal(data, [+0x7ffff000, # +full-scale 20-bit + -0x7ffff000, # -full-scale 20-bit + +0x7ffff000 >> 1, # +1/2 + -0x7ffff000 >> 1, # -1/2 + +0x7ffff000 >> 2, # +1/4 + -0x7ffff000 >> 2, # -1/4 + +0x7ffff000 >> 3, # +1/8 + -0x7ffff000 >> 3, # -1/8 + +0x7ffff000 >> 4, # +1/16 + -0x7ffff000 >> 4, # -1/16 + ]) + + +def test_36_bit_odd_size(): + # 36-bit, 5 B container, 3 channels, 5 samples, 75 B data chunk + pad + filename = 'test-8000Hz-le-3ch-5S-36bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 28 LSBits should be 0 + assert_equal(data & 0xfffffff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_f000_0000, -0x2000_0000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_f000_0000, -0x1000_0000], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000_0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_f000_0000, +0x1000_0000], + [+0x7fff_ffff_f000_0000, +0x7fff_ffff_f000_0000, +0x2000_0000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_45_bit_even_size(): + # 45-bit, 6 B container, 3 channels, 5 samples, 90 B data chunk + filename = 'test-8000Hz-le-3ch-5S-45bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 19 LSBits should be 0 + assert_equal(data & 0x7ffff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_fff8_0000, -0x10_0000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_fff8_0000, -0x08_0000], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x00_0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_fff8_0000, +0x08_0000], + [+0x7fff_ffff_fff8_0000, +0x7fff_ffff_fff8_0000, +0x10_0000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_53_bit_odd_size(): + # 53-bit, 7 B container, 3 channels, 5 samples, 105 B data chunk + pad + filename = 'test-8000Hz-le-3ch-5S-53bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 11 LSBits should be 0 + assert_equal(data & 0x7ff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_f800, -0x1000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_f800, -0x0800], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_f800, +0x0800], + [+0x7fff_ffff_ffff_f800, +0x7fff_ffff_ffff_f800, +0x1000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_64_bit_even_size(): + # 64-bit, 8 B container, 3 channels, 5 samples, 120 B data chunk + for mmap in [False, True]: + filename = 'test-8000Hz-le-3ch-5S-64bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_ffff, -0x2], + [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_ffff, -0x1], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0], + [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_ffff, +0x1], + [+0x7fff_ffff_ffff_ffff, +0x7fff_ffff_ffff_ffff, +0x2]] + # ^ clipped + + assert_equal(data, correct) + + del data + + +def test_unsupported_mmap(): + # Test containers that cannot be mapped to numpy types + for filename in {'test-8000Hz-le-3ch-5S-24bit.wav', + 'test-8000Hz-le-3ch-5S-36bit.wav', + 'test-8000Hz-le-3ch-5S-45bit.wav', + 'test-8000Hz-le-3ch-5S-53bit.wav', + 'test-1234Hz-le-1ch-10S-20bit-extra.wav'}: + with raises(ValueError, match="mmap.*not compatible"): + rate, data = wavfile.read(datafile(filename), mmap=True) + + +def test_rifx(): + # Compare equivalent RIFX and RIFF files + for rifx, riff in {('test-44100Hz-be-1ch-4bytes.wav', + 'test-44100Hz-le-1ch-4bytes.wav'), + ('test-8000Hz-be-3ch-5S-24bit.wav', + 'test-8000Hz-le-3ch-5S-24bit.wav')}: + rate1, data1 = wavfile.read(datafile(rifx), mmap=False) + rate2, data2 = wavfile.read(datafile(riff), mmap=False) + assert_equal(rate1, rate2) + assert_equal(data1, data2) + + +def test_rf64(): + # Compare equivalent RF64 and RIFF files + for rf64, riff in {('test-44100Hz-le-1ch-4bytes-rf64.wav', + 'test-44100Hz-le-1ch-4bytes.wav'), + ('test-8000Hz-le-3ch-5S-24bit-rf64.wav', + 'test-8000Hz-le-3ch-5S-24bit.wav')}: + rate1, data1 = wavfile.read(datafile(rf64), mmap=False) + rate2, data2 = wavfile.read(datafile(riff), mmap=False) + assert_array_equal(rate1, rate2) + assert_array_equal(data1, data2) + + +@pytest.mark.xslow +def test_write_roundtrip_rf64(tmpdir): + dtype = np.dtype(" 0 + assert rate == 44100 + # also test writing (gh-12176) + data[0] = 0 + + +def test_read_early_eof(): + # File ends after 'fact' chunk at boundary, no data read + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="Unexpected end of file."): + wavfile.read(fp, mmap=mmap) + + +def test_read_incomplete_chunk(): + # File ends inside 'fmt ' chunk ID, no data read + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="Incomplete chunk ID.*b'f'"): + wavfile.read(fp, mmap=mmap) + + +def test_read_inconsistent_header(): + # File header's size fields contradict each other + for mmap in [False, True]: + filename = 'test-8000Hz-le-3ch-5S-24bit-inconsistent.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="header is invalid"): + wavfile.read(fp, mmap=mmap) + + +# signed 8-bit integer PCM is not allowed +# unsigned > 8-bit integer PCM is not allowed +# 8- or 16-bit float PCM is not expected +# g and q are platform-dependent, so not included +@pytest.mark.parametrize("dt_str", ["i2", ">i4", ">i8", ">f4", ">f8", '|u1']) +@pytest.mark.parametrize("channels", [1, 2, 5]) +@pytest.mark.parametrize("rate", [8000, 32000]) +@pytest.mark.parametrize("mmap", [False, True]) +@pytest.mark.parametrize("realfile", [False, True]) +def test_write_roundtrip(realfile, mmap, rate, channels, dt_str, tmpdir): + dtype = np.dtype(dt_str) + if realfile: + tmpfile = str(tmpdir.join(str(threading.get_native_id()), 'temp.wav')) + os.makedirs(os.path.dirname(tmpfile), exist_ok=True) + else: + tmpfile = BytesIO() + data = np.random.rand(100, channels) + if channels == 1: + data = data[:, 0] + if dtype.kind == 'f': + # The range of the float type should be in [-1, 1] + data = data.astype(dtype) + else: + data = (data*128).astype(dtype) + + wavfile.write(tmpfile, rate, data) + + rate2, data2 = wavfile.read(tmpfile, mmap=mmap) + + assert_equal(rate, rate2) + assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) + assert_array_equal(data, data2) + # also test writing (gh-12176) + if realfile: + data2[0] = 0 + else: + with pytest.raises(ValueError, match='read-only'): + data2[0] = 0 + + if realfile and mmap and IS_PYPY and sys.platform == 'win32': + # windows cannot remove a dead file held by a mmap but not collected + # in PyPy; since the filename gets reused in this test, clean this up + break_cycles() + break_cycles() + + +@pytest.mark.parametrize("dtype", [np.float16]) +def test_wavfile_dtype_unsupported(tmpdir, dtype): + tmpfile = str(tmpdir.join('temp.wav')) + rng = np.random.default_rng(1234) + data = rng.random((100, 5)).astype(dtype) + rate = 8000 + with pytest.raises(ValueError, match="Unsupported"): + wavfile.write(tmpfile, rate, data) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/wavfile.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/wavfile.py new file mode 100644 index 0000000000000000000000000000000000000000..b6978a1c461c825e35b8a1f0d7de39fceba38bd6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/io/wavfile.py @@ -0,0 +1,891 @@ +""" +Module to read / write wav files using NumPy arrays + +Functions +--------- +`read`: Return the sample rate (in samples/sec) and data from a WAV file. + +`write`: Write a NumPy array as a WAV file. + +""" +import io +import sys +import numpy as np +import struct +import warnings +from enum import IntEnum + + +__all__ = [ + 'WavFileWarning', + 'read', + 'write' +] + + +class WavFileWarning(UserWarning): + pass + + +class WAVE_FORMAT(IntEnum): + """ + WAVE form wFormatTag IDs + + Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the + newest additions, in v10.0.14393 2016-07 + """ + UNKNOWN = 0x0000 + PCM = 0x0001 + ADPCM = 0x0002 + IEEE_FLOAT = 0x0003 + VSELP = 0x0004 + IBM_CVSD = 0x0005 + ALAW = 0x0006 + MULAW = 0x0007 + DTS = 0x0008 + DRM = 0x0009 + WMAVOICE9 = 0x000A + WMAVOICE10 = 0x000B + OKI_ADPCM = 0x0010 + DVI_ADPCM = 0x0011 + IMA_ADPCM = 0x0011 # Duplicate + MEDIASPACE_ADPCM = 0x0012 + SIERRA_ADPCM = 0x0013 + G723_ADPCM = 0x0014 + DIGISTD = 0x0015 + DIGIFIX = 0x0016 + DIALOGIC_OKI_ADPCM = 0x0017 + MEDIAVISION_ADPCM = 0x0018 + CU_CODEC = 0x0019 + HP_DYN_VOICE = 0x001A + YAMAHA_ADPCM = 0x0020 + SONARC = 0x0021 + DSPGROUP_TRUESPEECH = 0x0022 + ECHOSC1 = 0x0023 + AUDIOFILE_AF36 = 0x0024 + APTX = 0x0025 + AUDIOFILE_AF10 = 0x0026 + PROSODY_1612 = 0x0027 + LRC = 0x0028 + DOLBY_AC2 = 0x0030 + GSM610 = 0x0031 + MSNAUDIO = 0x0032 + ANTEX_ADPCME = 0x0033 + CONTROL_RES_VQLPC = 0x0034 + DIGIREAL = 0x0035 + DIGIADPCM = 0x0036 + CONTROL_RES_CR10 = 0x0037 + NMS_VBXADPCM = 0x0038 + CS_IMAADPCM = 0x0039 + ECHOSC3 = 0x003A + ROCKWELL_ADPCM = 0x003B + ROCKWELL_DIGITALK = 0x003C + XEBEC = 0x003D + G721_ADPCM = 0x0040 + G728_CELP = 0x0041 + MSG723 = 0x0042 + INTEL_G723_1 = 0x0043 + INTEL_G729 = 0x0044 + SHARP_G726 = 0x0045 + MPEG = 0x0050 + RT24 = 0x0052 + PAC = 0x0053 + MPEGLAYER3 = 0x0055 + LUCENT_G723 = 0x0059 + CIRRUS = 0x0060 + ESPCM = 0x0061 + VOXWARE = 0x0062 + CANOPUS_ATRAC = 0x0063 + G726_ADPCM = 0x0064 + G722_ADPCM = 0x0065 + DSAT = 0x0066 + DSAT_DISPLAY = 0x0067 + VOXWARE_BYTE_ALIGNED = 0x0069 + VOXWARE_AC8 = 0x0070 + VOXWARE_AC10 = 0x0071 + VOXWARE_AC16 = 0x0072 + VOXWARE_AC20 = 0x0073 + VOXWARE_RT24 = 0x0074 + VOXWARE_RT29 = 0x0075 + VOXWARE_RT29HW = 0x0076 + VOXWARE_VR12 = 0x0077 + VOXWARE_VR18 = 0x0078 + VOXWARE_TQ40 = 0x0079 + VOXWARE_SC3 = 0x007A + VOXWARE_SC3_1 = 0x007B + SOFTSOUND = 0x0080 + VOXWARE_TQ60 = 0x0081 + MSRT24 = 0x0082 + G729A = 0x0083 + MVI_MVI2 = 0x0084 + DF_G726 = 0x0085 + DF_GSM610 = 0x0086 + ISIAUDIO = 0x0088 + ONLIVE = 0x0089 + MULTITUDE_FT_SX20 = 0x008A + INFOCOM_ITS_G721_ADPCM = 0x008B + CONVEDIA_G729 = 0x008C + CONGRUENCY = 0x008D + SBC24 = 0x0091 + DOLBY_AC3_SPDIF = 0x0092 + MEDIASONIC_G723 = 0x0093 + PROSODY_8KBPS = 0x0094 + ZYXEL_ADPCM = 0x0097 + PHILIPS_LPCBB = 0x0098 + PACKED = 0x0099 + MALDEN_PHONYTALK = 0x00A0 + RACAL_RECORDER_GSM = 0x00A1 + RACAL_RECORDER_G720_A = 0x00A2 + RACAL_RECORDER_G723_1 = 0x00A3 + RACAL_RECORDER_TETRA_ACELP = 0x00A4 + NEC_AAC = 0x00B0 + RAW_AAC1 = 0x00FF + RHETOREX_ADPCM = 0x0100 + IRAT = 0x0101 + VIVO_G723 = 0x0111 + VIVO_SIREN = 0x0112 + PHILIPS_CELP = 0x0120 + PHILIPS_GRUNDIG = 0x0121 + DIGITAL_G723 = 0x0123 + SANYO_LD_ADPCM = 0x0125 + SIPROLAB_ACEPLNET = 0x0130 + SIPROLAB_ACELP4800 = 0x0131 + SIPROLAB_ACELP8V3 = 0x0132 + SIPROLAB_G729 = 0x0133 + SIPROLAB_G729A = 0x0134 + SIPROLAB_KELVIN = 0x0135 + VOICEAGE_AMR = 0x0136 + G726ADPCM = 0x0140 + DICTAPHONE_CELP68 = 0x0141 + DICTAPHONE_CELP54 = 0x0142 + QUALCOMM_PUREVOICE = 0x0150 + QUALCOMM_HALFRATE = 0x0151 + TUBGSM = 0x0155 + MSAUDIO1 = 0x0160 + WMAUDIO2 = 0x0161 + WMAUDIO3 = 0x0162 + WMAUDIO_LOSSLESS = 0x0163 + WMASPDIF = 0x0164 + UNISYS_NAP_ADPCM = 0x0170 + UNISYS_NAP_ULAW = 0x0171 + UNISYS_NAP_ALAW = 0x0172 + UNISYS_NAP_16K = 0x0173 + SYCOM_ACM_SYC008 = 0x0174 + SYCOM_ACM_SYC701_G726L = 0x0175 + SYCOM_ACM_SYC701_CELP54 = 0x0176 + SYCOM_ACM_SYC701_CELP68 = 0x0177 + KNOWLEDGE_ADVENTURE_ADPCM = 0x0178 + FRAUNHOFER_IIS_MPEG2_AAC = 0x0180 + DTS_DS = 0x0190 + CREATIVE_ADPCM = 0x0200 + CREATIVE_FASTSPEECH8 = 0x0202 + CREATIVE_FASTSPEECH10 = 0x0203 + UHER_ADPCM = 0x0210 + ULEAD_DV_AUDIO = 0x0215 + ULEAD_DV_AUDIO_1 = 0x0216 + QUARTERDECK = 0x0220 + ILINK_VC = 0x0230 + RAW_SPORT = 0x0240 + ESST_AC3 = 0x0241 + GENERIC_PASSTHRU = 0x0249 + IPI_HSX = 0x0250 + IPI_RPELP = 0x0251 + CS2 = 0x0260 + SONY_SCX = 0x0270 + SONY_SCY = 0x0271 + SONY_ATRAC3 = 0x0272 + SONY_SPC = 0x0273 + TELUM_AUDIO = 0x0280 + TELUM_IA_AUDIO = 0x0281 + NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285 + FM_TOWNS_SND = 0x0300 + MICRONAS = 0x0350 + MICRONAS_CELP833 = 0x0351 + BTV_DIGITAL = 0x0400 + INTEL_MUSIC_CODER = 0x0401 + INDEO_AUDIO = 0x0402 + QDESIGN_MUSIC = 0x0450 + ON2_VP7_AUDIO = 0x0500 + ON2_VP6_AUDIO = 0x0501 + VME_VMPCM = 0x0680 + TPC = 0x0681 + LIGHTWAVE_LOSSLESS = 0x08AE + OLIGSM = 0x1000 + OLIADPCM = 0x1001 + OLICELP = 0x1002 + OLISBC = 0x1003 + OLIOPR = 0x1004 + LH_CODEC = 0x1100 + LH_CODEC_CELP = 0x1101 + LH_CODEC_SBC8 = 0x1102 + LH_CODEC_SBC12 = 0x1103 + LH_CODEC_SBC16 = 0x1104 + NORRIS = 0x1400 + ISIAUDIO_2 = 0x1401 + SOUNDSPACE_MUSICOMPRESS = 0x1500 + MPEG_ADTS_AAC = 0x1600 + MPEG_RAW_AAC = 0x1601 + MPEG_LOAS = 0x1602 + NOKIA_MPEG_ADTS_AAC = 0x1608 + NOKIA_MPEG_RAW_AAC = 0x1609 + VODAFONE_MPEG_ADTS_AAC = 0x160A + VODAFONE_MPEG_RAW_AAC = 0x160B + MPEG_HEAAC = 0x1610 + VOXWARE_RT24_SPEECH = 0x181C + SONICFOUNDRY_LOSSLESS = 0x1971 + INNINGS_TELECOM_ADPCM = 0x1979 + LUCENT_SX8300P = 0x1C07 + LUCENT_SX5363S = 0x1C0C + CUSEEME = 0x1F03 + NTCSOFT_ALF2CM_ACM = 0x1FC4 + DVM = 0x2000 + DTS2 = 0x2001 + MAKEAVIS = 0x3313 + DIVIO_MPEG4_AAC = 0x4143 + NOKIA_ADAPTIVE_MULTIRATE = 0x4201 + DIVIO_G726 = 0x4243 + LEAD_SPEECH = 0x434C + LEAD_VORBIS = 0x564C + WAVPACK_AUDIO = 0x5756 + OGG_VORBIS_MODE_1 = 0x674F + OGG_VORBIS_MODE_2 = 0x6750 + OGG_VORBIS_MODE_3 = 0x6751 + OGG_VORBIS_MODE_1_PLUS = 0x676F + OGG_VORBIS_MODE_2_PLUS = 0x6770 + OGG_VORBIS_MODE_3_PLUS = 0x6771 + ALAC = 0x6C61 + _3COM_NBX = 0x7000 # Can't have leading digit + OPUS = 0x704F + FAAD_AAC = 0x706D + AMR_NB = 0x7361 + AMR_WB = 0x7362 + AMR_WP = 0x7363 + GSM_AMR_CBR = 0x7A21 + GSM_AMR_VBR_SID = 0x7A22 + COMVERSE_INFOSYS_G723_1 = 0xA100 + COMVERSE_INFOSYS_AVQSBC = 0xA101 + COMVERSE_INFOSYS_SBC = 0xA102 + SYMBOL_G729_A = 0xA103 + VOICEAGE_AMR_WB = 0xA104 + INGENIENT_G726 = 0xA105 + MPEG4_AAC = 0xA106 + ENCORE_G726 = 0xA107 + ZOLL_ASAO = 0xA108 + SPEEX_VOICE = 0xA109 + VIANIX_MASC = 0xA10A + WM9_SPECTRUM_ANALYZER = 0xA10B + WMF_SPECTRUM_ANAYZER = 0xA10C + GSM_610 = 0xA10D + GSM_620 = 0xA10E + GSM_660 = 0xA10F + GSM_690 = 0xA110 + GSM_ADAPTIVE_MULTIRATE_WB = 0xA111 + POLYCOM_G722 = 0xA112 + POLYCOM_G728 = 0xA113 + POLYCOM_G729_A = 0xA114 + POLYCOM_SIREN = 0xA115 + GLOBAL_IP_ILBC = 0xA116 + RADIOTIME_TIME_SHIFT_RADIO = 0xA117 + NICE_ACA = 0xA118 + NICE_ADPCM = 0xA119 + VOCORD_G721 = 0xA11A + VOCORD_G726 = 0xA11B + VOCORD_G722_1 = 0xA11C + VOCORD_G728 = 0xA11D + VOCORD_G729 = 0xA11E + VOCORD_G729_A = 0xA11F + VOCORD_G723_1 = 0xA120 + VOCORD_LBC = 0xA121 + NICE_G728 = 0xA122 + FRACE_TELECOM_G729 = 0xA123 + CODIAN = 0xA124 + FLAC = 0xF1AC + EXTENSIBLE = 0xFFFE + DEVELOPMENT = 0xFFFF + + +KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT} + + +def _raise_bad_format(format_tag): + try: + format_name = WAVE_FORMAT(format_tag).name + except ValueError: + format_name = f'{format_tag:#06x}' + raise ValueError(f"Unknown wave file format: {format_name}. Supported " + "formats: " + + ', '.join(x.name for x in KNOWN_WAVE_FORMATS)) + + +def _read_fmt_chunk(fid, is_big_endian): + """ + Returns + ------- + size : int + size of format subchunk in bytes (minus 8 for "fmt " and itself) + format_tag : int + PCM, float, or compressed format + channels : int + number of channels + fs : int + sampling frequency in samples per second + bytes_per_second : int + overall byte rate for the file + block_align : int + bytes per sample, including all channels + bit_depth : int + bits per sample + + Notes + ----- + Assumes file pointer is immediately after the 'fmt ' id + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + size = struct.unpack(fmt+'I', fid.read(4))[0] + + if size < 16: + raise ValueError("Binary structure of wave file is not compliant") + + res = struct.unpack(fmt+'HHIIHH', fid.read(16)) + bytes_read = 16 + + format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res + + if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2): + ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] + bytes_read += 2 + if ext_chunk_size >= 22: + extensible_chunk_data = fid.read(22) + bytes_read += 22 + raw_guid = extensible_chunk_data[2+4:2+4+16] + # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) + # MS GUID byte order: first three groups are native byte order, + # rest is Big Endian + if is_big_endian: + tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' + else: + tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' + if raw_guid.endswith(tail): + format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] + else: + raise ValueError("Binary structure of wave file is not compliant") + + if format_tag not in KNOWN_WAVE_FORMATS: + _raise_bad_format(format_tag) + + # move file pointer to next chunk + if size > bytes_read: + fid.read(size - bytes_read) + + # fmt should always be 16, 18 or 40, but handle it just in case + _handle_pad_byte(fid, size) + + if format_tag == WAVE_FORMAT.PCM: + if bytes_per_second != fs * block_align: + raise ValueError("WAV header is invalid: nAvgBytesPerSec must" + " equal product of nSamplesPerSec and" + " nBlockAlign, but file has nSamplesPerSec =" + f" {fs}, nBlockAlign = {block_align}, and" + f" nAvgBytesPerSec = {bytes_per_second}") + + return (size, format_tag, channels, fs, bytes_per_second, block_align, + bit_depth) + + +def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, is_rf64, + block_align, mmap=False): + """ + Notes + ----- + Assumes file pointer is immediately after the 'data' id + + It's possible to not use all available bits in a container, or to store + samples in a container bigger than necessary, so bytes_per_sample uses + the actual reported container size (nBlockAlign / nChannels). Real-world + examples: + + Adobe Audition's "24-bit packed int (type 1, 20-bit)" + + nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav + is: + + nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf + gives an example of: + + nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20 + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + # Size of the data subchunk in bytes + if not is_rf64: + size = struct.unpack(fmt+'I', fid.read(4))[0] + else: + pos = fid.tell() + # chunk size is stored in global file header for RF64 + fid.seek(28) + size = struct.unpack(' 1: + data = data.reshape(-1, channels) + return data + + +def _skip_unknown_chunk(fid, is_big_endian): + if is_big_endian: + fmt = '>I' + else: + fmt = '>> from os.path import dirname, join as pjoin + >>> from scipy.io import wavfile + >>> import scipy.io + + Get the filename for an example .wav file from the tests/data directory. + + >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data') + >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav') + + Load the .wav file contents. + + >>> samplerate, data = wavfile.read(wav_fname) + >>> print(f"number of channels = {data.shape[1]}") + number of channels = 2 + >>> length = data.shape[0] / samplerate + >>> print(f"length = {length}s") + length = 0.01s + + Plot the waveform. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> time = np.linspace(0., length, data.shape[0]) + >>> plt.plot(time, data[:, 0], label="Left channel") + >>> plt.plot(time, data[:, 1], label="Right channel") + >>> plt.legend() + >>> plt.xlabel("Time [s]") + >>> plt.ylabel("Amplitude") + >>> plt.show() + + """ + if hasattr(filename, 'read'): + fid = filename + mmap = False + else: + fid = open(filename, 'rb') + + try: + file_size, is_big_endian, is_rf64 = _read_riff_chunk(fid) + fmt_chunk_received = False + data_chunk_received = False + while fid.tell() < file_size: + # read the next chunk + chunk_id = fid.read(4) + + if not chunk_id: + if data_chunk_received: + # End of file but data successfully read + warnings.warn( + f"Reached EOF prematurely; finished at {fid.tell():d} bytes, " + f"expected {file_size:d} bytes from header.", + WavFileWarning, stacklevel=2) + break + else: + raise ValueError("Unexpected end of file.") + elif len(chunk_id) < 4: + msg = f"Incomplete chunk ID: {repr(chunk_id)}" + # If we have the data, ignore the broken chunk + if fmt_chunk_received and data_chunk_received: + warnings.warn(msg + ", ignoring it.", WavFileWarning, + stacklevel=2) + else: + raise ValueError(msg) + + if chunk_id == b'fmt ': + fmt_chunk_received = True + fmt_chunk = _read_fmt_chunk(fid, is_big_endian) + format_tag, channels, fs = fmt_chunk[1:4] + bit_depth = fmt_chunk[6] + block_align = fmt_chunk[5] + elif chunk_id == b'fact': + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id == b'data': + data_chunk_received = True + if not fmt_chunk_received: + raise ValueError("No fmt chunk before data") + data = _read_data_chunk(fid, format_tag, channels, bit_depth, + is_big_endian, is_rf64, block_align, mmap) + elif chunk_id == b'LIST': + # Someday this could be handled properly but for now skip it + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id in {b'JUNK', b'Fake'}: + # Skip alignment chunks without warning + _skip_unknown_chunk(fid, is_big_endian) + else: + warnings.warn("Chunk (non-data) not understood, skipping it.", + WavFileWarning, stacklevel=2) + _skip_unknown_chunk(fid, is_big_endian) + finally: + if not hasattr(filename, 'read'): + fid.close() + else: + fid.seek(0) + + return fs, data + + +def write(filename, rate, data): + """ + Write a NumPy array as a WAV file. + + Parameters + ---------- + filename : string or open file handle + Output wav file. + rate : int + The sample rate (in samples/sec). + data : ndarray + A 1-D or 2-D NumPy array of either integer or float data-type. + + Notes + ----- + * Writes a simple uncompressed WAV file. + * To write multiple-channels, use a 2-D array of shape + (Nsamples, Nchannels). + * The bits-per-sample and PCM/float will be determined by the data-type. + + Common data types: [1]_ + + ===================== =========== =========== ============= + WAV format Min Max NumPy dtype + ===================== =========== =========== ============= + 32-bit floating-point -1.0 +1.0 float32 + 32-bit PCM -2147483648 +2147483647 int32 + 16-bit PCM -32768 +32767 int16 + 8-bit PCM 0 255 uint8 + ===================== =========== =========== ============= + + Note that 8-bit PCM is unsigned. + + References + ---------- + .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming + Interface and Data Specifications 1.0", section "Data Format of the + Samples", August 1991 + http://www.tactilemedia.com/info/MCI_Control_Info.html + + Examples + -------- + Create a 100Hz sine wave, sampled at 44100Hz. + Write to 16-bit PCM, Mono. + + >>> from scipy.io.wavfile import write + >>> import numpy as np + >>> samplerate = 44100; fs = 100 + >>> t = np.linspace(0., 1., samplerate) + >>> amplitude = np.iinfo(np.int16).max + >>> data = amplitude * np.sin(2. * np.pi * fs * t) + >>> write("example.wav", samplerate, data.astype(np.int16)) + + """ + if hasattr(filename, 'write'): + fid = filename + else: + fid = open(filename, 'wb') + + fs = rate + + try: + dkind = data.dtype.kind + allowed_dtypes = ['float32', 'float64', + 'uint8', 'int16', 'int32', 'int64'] + if data.dtype.name not in allowed_dtypes: + raise ValueError(f"Unsupported data type '{data.dtype}'") + + header_data = b'' + + header_data += b'RIFF' + header_data += b'\x00\x00\x00\x00' + header_data += b'WAVE' + + # fmt chunk + header_data += b'fmt ' + if dkind == 'f': + format_tag = WAVE_FORMAT.IEEE_FLOAT + else: + format_tag = WAVE_FORMAT.PCM + if data.ndim == 1: + channels = 1 + else: + channels = data.shape[1] + bit_depth = data.dtype.itemsize * 8 + bytes_per_second = fs*(bit_depth // 8)*channels + block_align = channels * (bit_depth // 8) + + fmt_chunk_data = struct.pack(' 0xFFFFFFFF + if is_rf64: + header_data = b'' + header_data += b'RF64' + header_data += b'\xFF\xFF\xFF\xFF' + header_data += b'WAVE' + header_data += b'ds64' + # size of ds64 chunk + header_data += struct.pack('' or (data.dtype.byteorder == '=' and + sys.byteorder == 'big'): + data = data.byteswap() + _array_tofile(fid, data) + + # Determine file size and place it in correct + # position at start of the file or the data chunk. + size = fid.tell() + if not is_rf64: + fid.seek(4) + fid.write(struct.pack('=1, <= 1.62 | +| | | | | - brenth | - Yes | - >=1, <= 1.62 | +| | | | | - ridder | - Yes | - 2.0 (1.41) | +| | | | | - toms748 | - Yes | - 2.7 (1.65) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ + +.. seealso:: + + `scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions + +Fixed point finding: + +.. autosummary:: + :toctree: generated/ + + fixed_point - Single-variable fixed-point solver. + +Multidimensional +---------------- + +.. autosummary:: + :toctree: generated/ + + root - Unified interface for nonlinear solvers of multivariate functions. + +The `root` function supports the following methods: + +.. toctree:: + + optimize.root-hybr + optimize.root-lm + optimize.root-broyden1 + optimize.root-broyden2 + optimize.root-anderson + optimize.root-linearmixing + optimize.root-diagbroyden + optimize.root-excitingmixing + optimize.root-krylov + optimize.root-dfsane + +Elementwise Minimization and Root Finding +========================================= + +.. toctree:: + :maxdepth: 3 + + optimize.elementwise + +Linear programming / MILP +========================= + +.. autosummary:: + :toctree: generated/ + + milp -- Mixed integer linear programming. + linprog -- Unified interface for minimizers of linear programming problems. + +The `linprog` function supports the following methods: + +.. toctree:: + + optimize.linprog-simplex + optimize.linprog-interior-point + optimize.linprog-revised_simplex + optimize.linprog-highs-ipm + optimize.linprog-highs-ds + optimize.linprog-highs + +The simplex, interior-point, and revised simplex methods support callback +functions, such as: + +.. autosummary:: + :toctree: generated/ + + linprog_verbose_callback -- Sample callback function for linprog (simplex). + +Assignment problems +=================== + +.. autosummary:: + :toctree: generated/ + + linear_sum_assignment -- Solves the linear-sum assignment problem. + quadratic_assignment -- Solves the quadratic assignment problem. + +The `quadratic_assignment` function supports the following methods: + +.. toctree:: + + optimize.qap-faq + optimize.qap-2opt + +Utilities +========= + +Finite-difference approximation +------------------------------- + +.. autosummary:: + :toctree: generated/ + + approx_fprime - Approximate the gradient of a scalar function. + check_grad - Check the supplied derivative using finite differences. + + +Line search +----------- + +.. autosummary:: + :toctree: generated/ + + bracket - Bracket a minimum, given two starting points. + line_search - Return a step that satisfies the strong Wolfe conditions. + +Hessian approximation +--------------------- + +.. autosummary:: + :toctree: generated/ + + LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian. + HessianUpdateStrategy - Interface for implementing Hessian update strategies + +Benchmark problems +------------------ + +.. autosummary:: + :toctree: generated/ + + rosen - The Rosenbrock function. + rosen_der - The derivative of the Rosenbrock function. + rosen_hess - The Hessian matrix of the Rosenbrock function. + rosen_hess_prod - Product of the Rosenbrock Hessian with a vector. + +Legacy functions +================ + +The functions below are not recommended for use in new scripts; +all of these methods are accessible via a newer, more consistent +interfaces, provided by the interfaces above. + +Optimization +------------ + +General-purpose multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin - Nelder-Mead Simplex algorithm. + fmin_powell - Powell's (modified) conjugate direction method. + fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm. + fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno). + fmin_ncg - Line-search Newton Conjugate Gradient. + +Constrained multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer. + fmin_tnc - Truncated Newton code. + fmin_cobyla - Constrained optimization by linear approximation. + fmin_slsqp - Minimization using sequential least-squares programming. + +Univariate (scalar) minimization methods: + +.. autosummary:: + :toctree: generated/ + + fminbound - Bounded minimization of a scalar function. + brent - 1-D function minimization using Brent method. + golden - 1-D function minimization using Golden Section method. + +Least-squares +------------- + +.. autosummary:: + :toctree: generated/ + + leastsq - Minimize the sum of squares of M equations in N unknowns. + +Root finding +------------ + +General nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + fsolve - Non-linear multivariable equation solver. + broyden1 - Broyden's first method. + broyden2 - Broyden's second method. + NoConvergence - Exception raised when nonlinear solver does not converge. + +Large-scale nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + newton_krylov + anderson + + BroydenFirst + InverseJacobian + KrylovJacobian + +Simple iteration solvers: + +.. autosummary:: + :toctree: generated/ + + excitingmixing + linearmixing + diagbroyden + +""" # noqa: E501 + +from ._optimize import * +from ._minimize import * +from ._root import * +from ._root_scalar import * +from ._minpack_py import * +from ._zeros_py import * +from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct +from ._tnc import fmin_tnc +from ._cobyla_py import fmin_cobyla +from ._nonlin import * +from ._slsqp_py import fmin_slsqp +from ._nnls import nnls +from ._basinhopping import basinhopping +from ._linprog import linprog, linprog_verbose_callback +from ._lsap import linear_sum_assignment +from ._differentialevolution import differential_evolution +from ._lsq import least_squares, lsq_linear +from ._isotonic import isotonic_regression +from ._constraints import (NonlinearConstraint, + LinearConstraint, + Bounds) +from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1 +from ._shgo import shgo +from ._dual_annealing import dual_annealing +from ._qap import quadratic_assignment +from ._direct_py import direct +from ._milp import milp + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize, + slsqp, tnc, zeros +) + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f83fea7509223eb0e7735955a2271bb9823789 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c497aa5c108ce79f43411669708d7b626105eba Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5e19e724445fecf145f6c6d430cf95a01e30052 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0e0a0e30bea1202d61a253528efc1547f3d7788 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24a2c4848baf77ee0433c75a58eb7dbf6403eba5 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyqa_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyqa_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f3469f06b9abff2ddfce34e09c7eea54021a15f Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyqa_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f649fd4c33a6185fb679bb23493ff4a6d808d8 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd8732dcde3437ab30d07c3e36eb4ca0950a768 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61ccd1d7f9eb76e0e8cd429c2004bc08098ca787 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ab1cf4d423da7f7b708def83b4a25c97dbeb398 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c35d6b0f1eb697c1f72fb18c583462181695d0d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc0cae50e01808d7cc0e2e235d46f5285e841a06 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84fee57946621dbeeee4206e06c3aa20c540d089 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a65d1e1fabbf069a4c5428f096184478f09c1fda Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd41f0b51d24df1941ce5fb301880c1c9d8d956a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e96bb888ebd7983c53b9c5b91ef187e0c561a54c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57e0826676a61fb952da9cbc5f7b608fd18e2afc Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bc7e6017cf163ff8c3f9d90b59612ff140539d2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f078863d216f8cabb6a66b2cad8a30c6036c4a49 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c024e98a660bc9223e1603c685d3b40ac846b76d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df3a6c34753067a02546f9457abff96bfa6ac151 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd1998c8c1762be06a643331b628c90caca50e07 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44d528972bb4d662b06ee3712bdd5bfac35f67cd Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2b6a506753c0756094fcc878c908ea8ca4f2588 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minimize.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0696ae836260cda1681214c00dd16870b8510be Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minimize.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2dee0569f0b4505e939a1a5f3b080c72990f5d2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nnls.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nnls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cacfca6789fdf92a477f4eebb791747a28399245 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nnls.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab7c41c768916e2f8f4b92e42941bc58dbba89e1 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb12b3b26e56d4b7cece56da5ac6440ff55ffba7 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e3b31c36492e628e0813d9a2c90c8845b526505 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ebdbc44595fe08f848344ce823f319c00a70c50 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4101f119fe9a6e9b20b3ffc69ea8d03b1c2084a9 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..812020e429478c9619f86408caf81dc1e29aa850 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9df33a6cff45a82244f9c2de4d8d2f6d6163b27 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d1fe7e38afe11ad8279f9f9ce5ada8e959006a Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b74aef000da1ab348fd6e825524ea612c103aed2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tnc.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d983b87193df5b053b853b7a16363e10941641ed Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tnc.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0661daf4f01c2bef4fbae7c4958be963b1349be Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffafd1afd882e80ef73469361ff24180347fff36 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60c0d07a4ca00b85f71249443ef3031d004ef1c4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e5162db7655831d969bda6fbcaae6ac353053a2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d3b2358952d8d9cad3fe452549b0aad2faeaf24 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0872009dbc9d1efc7be758c81c9a080afe631122 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5162e1325986462c98c7ed6f650af1b1f6ea467b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23fa4499a63398cf77ce648e56cec943c467b24c Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ca97506e0acc604ac6f4070fe4a105886d0b894 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c80dca8b594ace6a8d88b3c6e523d657e28fc05 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7cf0ec9c4c5e4b12ef85784f5672f573bd2c094 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af792d55507b790a0f6e8e477f245a259acad1d0 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..811037f9acea7c5b9e72175fe3b442d60cf56a03 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d3ce9bd8a009716b9655edf88ab71433cf641b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9163dd64c08020ab8bfc7310528454dd63506aa9 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9fef02337ab0e7a9557248d5c248198e4379109 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d5cf1dbb423d5e5d676fe5547c2936006707bae Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py new file mode 100644 index 0000000000000000000000000000000000000000..90498155887fc45ba0748c0d798bda17caef39f0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py @@ -0,0 +1,735 @@ +""" +basinhopping: The basinhopping global optimization algorithm +""" +import numpy as np +import math +import inspect +import scipy.optimize +from scipy._lib._util import check_random_state, _transition_to_rng + +__all__ = ['basinhopping'] + + +_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY), + inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY)) +_new_accept_test_signature = inspect.Signature(parameters=_params) + + +class Storage: + """ + Class used to store the lowest energy structure + """ + def __init__(self, minres): + self._add(minres) + + def _add(self, minres): + self.minres = minres + self.minres.x = np.copy(minres.x) + + def update(self, minres): + if minres.success and (minres.fun < self.minres.fun + or not self.minres.success): + self._add(minres) + return True + else: + return False + + def get_lowest(self): + return self.minres + + +class BasinHoppingRunner: + """This class implements the core of the basinhopping algorithm. + + x0 : ndarray + The starting coordinates. + minimizer : callable + The local minimizer, with signature ``result = minimizer(x)``. + The return value is an `optimize.OptimizeResult` object. + step_taking : callable + This function displaces the coordinates randomly. Signature should + be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. + accept_tests : list of callables + Each test is passed the kwargs `f_new`, `x_new`, `f_old` and + `x_old`. These tests will be used to judge whether or not to accept + the step. The acceptable return values are True, False, or ``"force + accept"``. If any of the tests return False then the step is rejected. + If ``"force accept"``, then this will override any other tests in + order to accept the step. This can be used, for example, to forcefully + escape from a local minimum that ``basinhopping`` is trapped in. + disp : bool, optional + Display status messages. + + """ + def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): + self.x = np.copy(x0) + self.minimizer = minimizer + self.step_taking = step_taking + self.accept_tests = accept_tests + self.disp = disp + + self.nstep = 0 + + # initialize return object + self.res = scipy.optimize.OptimizeResult() + self.res.minimization_failures = 0 + + # do initial minimization + minres = minimizer(self.x) + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + self.x = np.copy(minres.x) + self.energy = minres.fun + self.incumbent_minres = minres # best minimize result found so far + if self.disp: + print("basinhopping step %d: f %g" % (self.nstep, self.energy)) + + # initialize storage class + self.storage = Storage(minres) + + if hasattr(minres, "nfev"): + self.res.nfev = minres.nfev + if hasattr(minres, "njev"): + self.res.njev = minres.njev + if hasattr(minres, "nhev"): + self.res.nhev = minres.nhev + + def _monte_carlo_step(self): + """Do one Monte Carlo iteration + + Randomly displace the coordinates, minimize, and decide whether + or not to accept the new coordinates. + """ + # Take a random step. Make a copy of x because the step_taking + # algorithm might change x in place + x_after_step = np.copy(self.x) + x_after_step = self.step_taking(x_after_step) + + # do a local minimization + minres = self.minimizer(x_after_step) + x_after_quench = minres.x + energy_after_quench = minres.fun + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + if hasattr(minres, "nfev"): + self.res.nfev += minres.nfev + if hasattr(minres, "njev"): + self.res.njev += minres.njev + if hasattr(minres, "nhev"): + self.res.nhev += minres.nhev + + # accept the move based on self.accept_tests. If any test is False, + # then reject the step. If any test returns the special string + # 'force accept', then accept the step regardless. This can be used + # to forcefully escape from a local minimum if normal basin hopping + # steps are not sufficient. + accept = True + for test in self.accept_tests: + if inspect.signature(test) == _new_accept_test_signature: + testres = test(res_new=minres, res_old=self.incumbent_minres) + else: + testres = test(f_new=energy_after_quench, x_new=x_after_quench, + f_old=self.energy, x_old=self.x) + + if testres == 'force accept': + accept = True + break + elif testres is None: + raise ValueError("accept_tests must return True, False, or " + "'force accept'") + elif not testres: + accept = False + + # Report the result of the acceptance test to the take step class. + # This is for adaptive step taking + if hasattr(self.step_taking, "report"): + self.step_taking.report(accept, f_new=energy_after_quench, + x_new=x_after_quench, f_old=self.energy, + x_old=self.x) + + return accept, minres + + def one_cycle(self): + """Do one cycle of the basinhopping algorithm + """ + self.nstep += 1 + new_global_min = False + + accept, minres = self._monte_carlo_step() + + if accept: + self.energy = minres.fun + self.x = np.copy(minres.x) + self.incumbent_minres = minres # best minimize result found so far + new_global_min = self.storage.update(minres) + + # print some information + if self.disp: + self.print_report(minres.fun, accept) + if new_global_min: + print("found new global minimum on step %d with function" + " value %g" % (self.nstep, self.energy)) + + # save some variables as BasinHoppingRunner attributes + self.xtrial = minres.x + self.energy_trial = minres.fun + self.accept = accept + + return new_global_min + + def print_report(self, energy_trial, accept): + """print a status update""" + minres = self.storage.get_lowest() + print("basinhopping step %d: f %g trial_f %g accepted %d " + " lowest_f %g" % (self.nstep, self.energy, energy_trial, + accept, minres.fun)) + + +class AdaptiveStepsize: + """ + Class to implement adaptive stepsize. + + This class wraps the step taking class and modifies the stepsize to + ensure the true acceptance rate is as close as possible to the target. + + Parameters + ---------- + takestep : callable + The step taking routine. Must contain modifiable attribute + takestep.stepsize + accept_rate : float, optional + The target step acceptance rate + interval : int, optional + Interval for how often to update the stepsize + factor : float, optional + The step size is multiplied or divided by this factor upon each + update. + verbose : bool, optional + Print information about each update + + """ + def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, + verbose=True): + self.takestep = takestep + self.target_accept_rate = accept_rate + self.interval = interval + self.factor = factor + self.verbose = verbose + + self.nstep = 0 + self.nstep_tot = 0 + self.naccept = 0 + + def __call__(self, x): + return self.take_step(x) + + def _adjust_step_size(self): + old_stepsize = self.takestep.stepsize + accept_rate = float(self.naccept) / self.nstep + if accept_rate > self.target_accept_rate: + # We're accepting too many steps. This generally means we're + # trapped in a basin. Take bigger steps. + self.takestep.stepsize /= self.factor + else: + # We're not accepting enough steps. Take smaller steps. + self.takestep.stepsize *= self.factor + if self.verbose: + print(f"adaptive stepsize: acceptance rate {accept_rate:f} target " + f"{self.target_accept_rate:f} new stepsize " + f"{self.takestep.stepsize:g} old stepsize {old_stepsize:g}") + + def take_step(self, x): + self.nstep += 1 + self.nstep_tot += 1 + if self.nstep % self.interval == 0: + self._adjust_step_size() + return self.takestep(x) + + def report(self, accept, **kwargs): + "called by basinhopping to report the result of the step" + if accept: + self.naccept += 1 + + +class RandomDisplacement: + """Add a random displacement of maximum size `stepsize` to each coordinate. + + Calling this updates `x` in-place. + + Parameters + ---------- + stepsize : float, optional + Maximum stepsize in any dimension + rng : {None, int, `numpy.random.Generator`}, optional + Random number generator + """ + + def __init__(self, stepsize=0.5, rng=None): + self.stepsize = stepsize + self.rng = check_random_state(rng) + + def __call__(self, x): + x += self.rng.uniform(-self.stepsize, self.stepsize, + np.shape(x)) + return x + + +class MinimizerWrapper: + """ + wrap a minimizer function as a minimizer class + """ + def __init__(self, minimizer, func=None, **kwargs): + self.minimizer = minimizer + self.func = func + self.kwargs = kwargs + + def __call__(self, x0): + if self.func is None: + return self.minimizer(x0, **self.kwargs) + else: + return self.minimizer(self.func, x0, **self.kwargs) + + +class Metropolis: + """Metropolis acceptance criterion. + + Parameters + ---------- + T : float + The "temperature" parameter for the accept or reject criterion. + rng : {None, int, `numpy.random.Generator`}, optional + Random number generator used for acceptance test. + + """ + + def __init__(self, T, rng=None): + # Avoid ZeroDivisionError since "MBH can be regarded as a special case + # of the BH framework with the Metropolis criterion, where temperature + # T = 0." (Reject all steps that increase energy.) + self.beta = 1.0 / T if T != 0 else float('inf') + self.rng = check_random_state(rng) + + def accept_reject(self, res_new, res_old): + """ + Assuming the local search underlying res_new was successful: + If new energy is lower than old, it will always be accepted. + If new is higher than old, there is a chance it will be accepted, + less likely for larger differences. + """ + with np.errstate(invalid='ignore'): + # The energy values being fed to Metropolis are 1-length arrays, and if + # they are equal, their difference is 0, which gets multiplied by beta, + # which is inf, and array([0]) * float('inf') causes + # + # RuntimeWarning: invalid value encountered in multiply + # + # Ignore this warning so when the algorithm is on a flat plane, it always + # accepts the step, to try to move off the plane. + prod = -(res_new.fun - res_old.fun) * self.beta + w = math.exp(min(0, prod)) + + rand = self.rng.uniform() + return w >= rand and (res_new.success or not res_old.success) + + def __call__(self, *, res_new, res_old): + """ + f_new and f_old are mandatory in kwargs + """ + return bool(self.accept_reject(res_new, res_old)) + + +@_transition_to_rng("seed", position_num=12, replace_doc=True) +def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, + minimizer_kwargs=None, take_step=None, accept_test=None, + callback=None, interval=50, disp=False, niter_success=None, + rng=None, *, target_accept_rate=0.5, stepwise_factor=0.9): + """Find the global minimum of a function using the basin-hopping algorithm. + + Basin-hopping is a two-phase method that combines a global stepping + algorithm with local minimization at each step. Designed to mimic + the natural process of energy minimization of clusters of atoms, it works + well for similar problems with "funnel-like, but rugged" energy landscapes + [5]_. + + As the step-taking, step acceptance, and minimization methods are all + customizable, this function can also be used to implement other two-phase + methods. + + Parameters + ---------- + func : callable ``f(x, *args)`` + Function to be optimized. ``args`` can be passed as an optional item + in the dict `minimizer_kwargs` + x0 : array_like + Initial guess. + niter : integer, optional + The number of basin-hopping iterations. There will be a total of + ``niter + 1`` runs of the local minimizer. + T : float, optional + The "temperature" parameter for the acceptance or rejection criterion. + Higher "temperatures" mean that larger jumps in function value will be + accepted. For best results `T` should be comparable to the + separation (in function value) between local minima. + stepsize : float, optional + Maximum step size for use in the random displacement. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + `scipy.optimize.minimize` Some important options could be: + + method : str + The minimization method (e.g. ``"L-BFGS-B"``) + args : tuple + Extra arguments passed to the objective function (`func`) and + its derivatives (Jacobian, Hessian). + + take_step : callable ``take_step(x)``, optional + Replace the default step-taking routine with this routine. The default + step-taking routine is a random displacement of the coordinates, but + other step-taking algorithms may be better for some systems. + `take_step` can optionally have the attribute ``take_step.stepsize``. + If this attribute exists, then `basinhopping` will adjust + ``take_step.stepsize`` in order to try to optimize the global minimum + search. + accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional + Define a test which will be used to judge whether to accept the + step. This will be used in addition to the Metropolis test based on + "temperature" `T`. The acceptable return values are True, + False, or ``"force accept"``. If any of the tests return False + then the step is rejected. If the latter, then this will override any + other tests in order to accept the step. This can be used, for example, + to forcefully escape from a local minimum that `basinhopping` is + trapped in. + callback : callable, ``callback(x, f, accept)``, optional + A callback function which will be called for all minima found. ``x`` + and ``f`` are the coordinates and function value of the trial minimum, + and ``accept`` is whether that minimum was accepted. This can + be used, for example, to save the lowest N minima found. Also, + `callback` can be used to specify a user defined stop criterion by + optionally returning True to stop the `basinhopping` routine. + interval : integer, optional + interval for how often to update the `stepsize` + disp : bool, optional + Set to True to print status messages + niter_success : integer, optional + Stop the run if the global minimum candidate remains the same for this + number of iterations. + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + + The random numbers generated only affect the default Metropolis + `accept_test` and the default `take_step`. If you supply your own + `take_step` and `accept_test`, and these functions use random + number generation, then those functions are responsible for the state + of their random number generator. + target_accept_rate : float, optional + The target acceptance rate that is used to adjust the `stepsize`. + If the current acceptance rate is greater than the target, + then the `stepsize` is increased. Otherwise, it is decreased. + Range is (0, 1). Default is 0.5. + + .. versionadded:: 1.8.0 + + stepwise_factor : float, optional + The `stepsize` is multiplied or divided by this stepwise factor upon + each update. Range is (0, 1). Default is 0.9. + + .. versionadded:: 1.8.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. The ``OptimizeResult`` object returned by the + selected minimizer at the lowest minimum is also contained within this + object and can be accessed through the ``lowest_optimization_result`` + attribute. See `OptimizeResult` for a description of other attributes. + + See Also + -------- + minimize : + The local minimization function called once for each basinhopping step. + `minimizer_kwargs` is passed to this routine. + + Notes + ----- + Basin-hopping is a stochastic algorithm which attempts to find the global + minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ + [4]_. The algorithm in its current form was described by David Wales and + Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. + + The algorithm is iterative with each cycle composed of the following + features + + 1) random perturbation of the coordinates + + 2) local minimization + + 3) accept or reject the new coordinates based on the minimized function + value + + The acceptance test used here is the Metropolis criterion of standard Monte + Carlo algorithms, although there are many other possibilities [3]_. + + This global minimization method has been shown to be extremely efficient + for a wide variety of problems in physics and chemistry. It is + particularly useful when the function has many minima separated by large + barriers. See the `Cambridge Cluster Database + `_ for databases of molecular + systems that have been optimized primarily using basin-hopping. This + database includes minimization problems exceeding 300 degrees of freedom. + + See the free software program `GMIN `_ + for a Fortran implementation of basin-hopping. This implementation has many + variations of the procedure described above, including more + advanced step taking algorithms and alternate acceptance criterion. + + For stochastic global optimization there is no way to determine if the true + global minimum has actually been found. Instead, as a consistency check, + the algorithm can be run from a number of different random starting points + to ensure the lowest minimum found in each example has converged to the + global minimum. For this reason, `basinhopping` will by default simply + run for the number of iterations `niter` and return the lowest minimum + found. It is left to the user to ensure that this is in fact the global + minimum. + + Choosing `stepsize`: This is a crucial parameter in `basinhopping` and + depends on the problem being solved. The step is chosen uniformly in the + region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it + should be comparable to the typical separation (in argument values) between + local minima of the function being optimized. `basinhopping` will, by + default, adjust `stepsize` to find an optimal value, but this may take + many iterations. You will get quicker results if you set a sensible + initial value for ``stepsize``. + + Choosing `T`: The parameter `T` is the "temperature" used in the + Metropolis criterion. Basinhopping steps are always accepted if + ``func(xnew) < func(xold)``. Otherwise, they are accepted with + probability:: + + exp( -(func(xnew) - func(xold)) / T ) + + So, for best results, `T` should to be comparable to the typical + difference (in function values) between local minima. (The height of + "walls" between local minima is irrelevant.) + + If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all + steps that increase energy are rejected. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, + Cambridge, UK. + .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and + the Lowest Energy Structures of Lennard-Jones Clusters Containing up to + 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. + .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the + multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, + 1987, 84, 6611. + .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, + crystals, and biomolecules, Science, 1999, 285, 1368. + .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as + a General and Versatile Optimization Framework for the Characterization + of Biological Macromolecules, Advances in Artificial Intelligence, + Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` + + Examples + -------- + The following example is a 1-D minimization problem, with many + local minima superimposed on a parabola. + + >>> import numpy as np + >>> from scipy.optimize import basinhopping + >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x + >>> x0 = [1.] + + Basinhopping, internally, uses a local minimization algorithm. We will use + the parameter `minimizer_kwargs` to tell basinhopping which algorithm to + use and how to set up that minimizer. This parameter will be passed to + `scipy.optimize.minimize`. + + >>> minimizer_kwargs = {"method": "BFGS"} + >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> # the global minimum is: + >>> ret.x, ret.fun + -0.1951, -1.0009 + + Next consider a 2-D minimization problem. Also, this time, we + will use gradient information to significantly speed up the search. + + >>> def func2d(x): + ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + + ... 0.2) * x[0] + ... df = np.zeros(2) + ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + ... df[1] = 2. * x[1] + 0.2 + ... return f, df + + We'll also use a different local minimization algorithm. Also, we must tell + the minimizer that our function returns both energy and gradient (Jacobian). + + >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} + >>> x0 = [1.0, 1.0] + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Here is an example using a custom step-taking routine. Imagine you want + the first coordinate to take larger steps than the rest of the coordinates. + This can be implemented like so: + + >>> class MyTakeStep: + ... def __init__(self, stepsize=0.5): + ... self.stepsize = stepsize + ... self.rng = np.random.default_rng() + ... def __call__(self, x): + ... s = self.stepsize + ... x[0] += self.rng.uniform(-2.*s, 2.*s) + ... x[1:] += self.rng.uniform(-s, s, x[1:].shape) + ... return x + + Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude + of `stepsize` to optimize the search. We'll use the same 2-D function as + before + + >>> mytakestep = MyTakeStep() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200, take_step=mytakestep) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Now, let's do an example using a custom callback function which prints the + value of every minimum found + + >>> def print_fun(x, f, accepted): + ... print("at minimum %.4f accepted %d" % (f, int(accepted))) + + We'll run it for only 10 basinhopping steps this time. + + >>> rng = np.random.default_rng() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=10, callback=print_fun, rng=rng) + at minimum 0.4159 accepted 1 + at minimum -0.4317 accepted 1 + at minimum -1.0109 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.1021 accepted 1 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + + The minimum at -1.0109 is actually the global minimum, found already on the + 8th iteration. + + """ # numpy/numpydoc#87 # noqa: E501 + if target_accept_rate <= 0. or target_accept_rate >= 1.: + raise ValueError('target_accept_rate has to be in range (0, 1)') + if stepwise_factor <= 0. or stepwise_factor >= 1.: + raise ValueError('stepwise_factor has to be in range (0, 1)') + + x0 = np.array(x0) + + # set up the np.random generator + rng = check_random_state(rng) + + # set up minimizer + if minimizer_kwargs is None: + minimizer_kwargs = dict() + wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, + **minimizer_kwargs) + + # set up step-taking algorithm + if take_step is not None: + if not callable(take_step): + raise TypeError("take_step must be callable") + # if take_step.stepsize exists then use AdaptiveStepsize to control + # take_step.stepsize + if hasattr(take_step, "stepsize"): + take_step_wrapped = AdaptiveStepsize( + take_step, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + else: + take_step_wrapped = take_step + else: + # use default + displace = RandomDisplacement(stepsize=stepsize, rng=rng) + take_step_wrapped = AdaptiveStepsize(displace, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + + # set up accept tests + accept_tests = [] + if accept_test is not None: + if not callable(accept_test): + raise TypeError("accept_test must be callable") + accept_tests = [accept_test] + + # use default + metropolis = Metropolis(T, rng=rng) + accept_tests.append(metropolis) + + if niter_success is None: + niter_success = niter + 2 + + bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, + accept_tests, disp=disp) + + # The wrapped minimizer is called once during construction of + # BasinHoppingRunner, so run the callback + if callable(callback): + callback(bh.storage.minres.x, bh.storage.minres.fun, True) + + # start main iteration loop + count, i = 0, 0 + message = ["requested number of basinhopping iterations completed" + " successfully"] + for i in range(niter): + new_global_min = bh.one_cycle() + + if callable(callback): + # should we pass a copy of x? + val = callback(bh.xtrial, bh.energy_trial, bh.accept) + if val is not None: + if val: + message = ["callback function requested stop early by" + "returning True"] + break + + count += 1 + if new_global_min: + count = 0 + elif count > niter_success: + message = ["success condition satisfied"] + break + + # prepare return object + res = bh.res + res.lowest_optimization_result = bh.storage.get_lowest() + res.x = np.copy(res.lowest_optimization_result.x) + res.fun = res.lowest_optimization_result.fun + res.message = message + res.nit = i + 1 + res.success = res.lowest_optimization_result.success + return res diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_bracket.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..263243c612d08ebdc9939cc892771b49ac766d0c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_bracket.py @@ -0,0 +1,713 @@ +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult +from scipy._lib._array_api import array_namespace, xp_ravel, xp_default_dtype + +_ELIMITS = -1 # used in _bracket_root +_ESTOPONESIDE = 2 # used in _bracket_root + +def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xp = array_namespace(xl0) + xl0 = xp.asarray(xl0)[()] + if (not xp.isdtype(xl0.dtype, "numeric") + or xp.isdtype(xl0.dtype, "complex floating")): + raise ValueError('`xl0` must be numeric and real.') + if not xp.isdtype(xl0.dtype, "real floating"): + xl0 = xp.asarray(xl0, dtype=xp_default_dtype(xp)) + + # If xr0 is not supplied, fill with a dummy value for the sake of + # broadcasting. We need to wait until xmax has been validated to + # compute the default value. + xr0_not_supplied = False + if xr0 is None: + xr0 = xp.nan + xr0_not_supplied = True + + xmin = -xp.inf if xmin is None else xmin + xmax = xp.inf if xmax is None else xmax + factor = 2. if factor is None else factor + xl0, xr0, xmin, xmax, factor = xp.broadcast_arrays( + xl0, xp.asarray(xr0), xp.asarray(xmin), xp.asarray(xmax), xp.asarray(factor)) + + if (not xp.isdtype(xr0.dtype, "numeric") + or xp.isdtype(xr0.dtype, "complex floating")): + raise ValueError('`xr0` must be numeric and real.') + + if (not xp.isdtype(xmin.dtype, "numeric") + or xp.isdtype(xmin.dtype, "complex floating")): + raise ValueError('`xmin` must be numeric and real.') + + if (not xp.isdtype(xmax.dtype, "numeric") + or xp.isdtype(xmax.dtype, "complex floating")): + raise ValueError('`xmax` must be numeric and real.') + + if (not xp.isdtype(factor.dtype, "numeric") + or xp.isdtype(factor.dtype, "complex floating")): + raise ValueError('`factor` must be numeric and real.') + if not xp.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + # Calculate the default value of xr0 if a value has not been supplied. + # Be careful to ensure xr0 is not larger than xmax. + if xr0_not_supplied: + xr0 = xl0 + xp.minimum((xmax - xl0)/ 8, xp.asarray(1.0)) + xr0 = xp.astype(xr0, xl0.dtype, copy=False) + + maxiter = xp.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not xp.isdtype(maxiter.dtype, "numeric") or maxiter.shape != tuple() + or xp.isdtype(maxiter.dtype, "complex floating")): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + return func, xl0, xr0, xmin, xmax, factor, args, maxiter, xp + + +def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None, + args=(), maxiter=1000): + """Bracket the root of a monotonic scalar function of one variable + + This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and + the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the root is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + xl0, xr0: float array_like + Starting guess of bracket, which need not contain a root. If `xr0` is + not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0` and `xr0`. + factor : float array_like, default: 2 + The factor used to grow the bracket. See notes for details. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be + bracketed requires arguments that are not broadcastable with these + arrays, wrap that callable with `func` such that `func` accepts + only `x` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xr : float + The lower and upper ends of the bracket, if the algorithm + terminated successfully. + fl, fr : float + The function value at the lower and upper ends of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + This is distinct from the number of times `func` is *called* + because the function may evaluated at multiple points in a single + call. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits without finding a bracket. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``-5``: The initial bracket does not satisfy `xmin <= xl0 < xr0 < xmax`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + - ``2`` : A bracket was found in the opposite search direction (in `callback` only). + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + This function generalizes an algorithm found in pieces throughout + `scipy.stats`. The strategy is to iteratively grow the bracket ``(l, r)`` + until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows. + + - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively + increased by `factor`. + - If `xmin` is provided, the distance between `xmin` and `l` is iteratively + decreased by `factor`. Note that this also *increases* the bracket size. + + Growth of the bracket to the right is analogous. + + Growth of the bracket in one direction stops when the endpoint is no longer + finite, the function value at the endpoint is no longer finite, or the + endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates + when the bracket stops growing in both directions, the bracket surrounds + the root, or a root is found (accidentally). + + If two brackets are found - that is, a bracket is found on both sides in + the same iteration, the smaller of the two is returned. + If roots of the function are found, both `l` and `r` are set to the + leftmost root. + + """ # noqa: E501 + # Todo: + # - find bracket with sign change in specified direction + # - Add tolerance + # - allow factor < 1? + + callback = None # works; I just don't want to test it + temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xl0, xr0, xmin, xmax, factor, args, maxiter, xp = temp + + xs = (xl0, xr0) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype, xp = temp # line split for PEP8 + xl0, xr0 = xs + xmin = xp_ravel(xp.astype(xp.broadcast_to(xmin, shape), dtype, copy=False), xp=xp) + xmax = xp_ravel(xp.astype(xp.broadcast_to(xmax, shape), dtype, copy=False), xp=xp) + invalid_bracket = ~((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)) + + # The approach is to treat the left and right searches as though they were + # (almost) totally independent one-sided bracket searches. (The interaction + # is considered when checking for termination and preparing the result + # object.) + # `x` is the "moving" end of the bracket + x = xp.concat(xs) + f = xp.concat(fs) + invalid_bracket = xp.concat((invalid_bracket, invalid_bracket)) + n = x.shape[0] // 2 + + # `x_last` is the previous location of the moving end of the bracket. If + # the signs of `f` and `f_last` are different, `x` and `x_last` form a + # bracket. + x_last = xp.concat((x[n:], x[:n])) + f_last = xp.concat((f[n:], f[:n])) + # `x0` is the "fixed" end of the bracket. + x0 = x_last + # We don't need to retain the corresponding function value, since the + # fixed end of the bracket is only needed to compute the new value of the + # moving end; it is never returned. + limit = xp.concat((xmin, xmax)) + + factor = xp_ravel(xp.broadcast_to(factor, shape), xp=xp) + factor = xp.astype(factor, dtype, copy=False) + factor = xp.concat((factor, factor)) + + active = xp.arange(2*n) + args = [xp.concat((arg, arg)) for arg in args] + + # This is needed due to inner workings of `eim._loop`. + # We're abusing it a tiny bit. + shape = shape + (2,) + + # `d` is for "distance". + # For searches without a limit, the distance between the fixed end of the + # bracket `x0` and the moving end `x` will grow by `factor` each iteration. + # For searches with a limit, the distance between the `limit` and moving + # end of the bracket `x` will shrink by `factor` each iteration. + i = xp.isinf(limit) + ni = ~i + d = xp.zeros_like(x) + d[i] = x[i] - x0[i] + d[ni] = limit[ni] - x[ni] + + status = xp.full_like(x, eim._EINPROGRESS, dtype=xp.int32) # in progress + status[invalid_bracket] = eim._EINPUTERR + nit, nfev = 0, 1 # one function evaluation per side performed above + + work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor, + active=active, d=d, x_last=x_last, f_last=f_last, + nit=nit, nfev=nfev, status=status, args=args, + xl=xp.nan, xr=xp.nan, fl=xp.nan, fr=xp.nan, n=n) + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), + ('fr', 'fr'), ('x', 'x'), ('f', 'f'), + ('x_last', 'x_last'), ('f_last', 'f_last')] + + def pre_func_eval(work): + # Initialize moving end of bracket + x = xp.zeros_like(work.x) + + # Unlimited brackets grow by `factor` by increasing distance from fixed + # end to moving end. + i = xp.isinf(work.limit) # indices of unlimited brackets + work.d[i] *= work.factor[i] + x[i] = work.x0[i] + work.d[i] + + # Limited brackets grow by decreasing the distance from the limit to + # the moving end. + ni = ~i # indices of limited brackets + work.d[ni] /= work.factor[ni] + x[ni] = work.limit[ni] - work.d[ni] + + return x + + def post_func_eval(x, f, work): + # Keep track of the previous location of the moving end so that we can + # return a narrower bracket. (The alternative is to remember the + # original fixed end, but then the bracket would be wider than needed.) + work.x_last = work.x + work.f_last = work.f + work.x = x + work.f = f + + def check_termination(work): + # Condition 0: initial bracket is invalid + stop = (work.status == eim._EINPUTERR) + + # Condition 1: a valid bracket (or the root itself) has been found + sf = xp.sign(work.f) + sf_last = xp.sign(work.f_last) + i = ((sf_last == -sf) | (sf_last == 0) | (sf == 0)) & ~stop + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Condition 2: the other side's search found a valid bracket. + # (If we just found a bracket with the rightward search, we can stop + # the leftward search, and vice-versa.) + # To do this, we need to set the status of the other side's search; + # this is tricky because `work.status` contains only the *active* + # elements, so we don't immediately know the index of the element we + # need to set - or even if it's still there. (That search may have + # terminated already, e.g. by reaching its `limit`.) + # To facilitate this, `work.active` contains a unit integer index of + # each search. Index `k` (`k < n)` and `k + n` correspond with a + # leftward and rightward search, respectively. Elements are removed + # from `work.active` just as they are removed from `work.status`, so + # we use `work.active` to help find the right location in + # `work.status`. + # Get the integer indices of the elements that can also stop + also_stop = (work.active[i] + work.n) % (2*work.n) + # Check whether they are still active. We want to find the indices + # in work.active where the associated values in work.active are + # contained in also_stop. xp.searchsorted let's us take advantage + # of work.active being sorted, but requires some hackery because + # searchsorted solves the separate but related problem of finding + # the indices where the values in also_stop should be added to + # maintain sorted order. + j = xp.searchsorted(work.active, also_stop) + # If the location exceeds the length of the `work.active`, they are + # not there. This happens when a value in also_stop is larger than + # the greatest value in work.active. This case needs special handling + # because we cannot simply check that also_stop == work.active[j]. + mask = j < work.active.shape[0] + # Note that we also have to use the mask to filter also_stop to ensure + # that also_stop and j will still have the same shape. + j, also_stop = j[mask], also_stop[mask] + j = j[also_stop == work.active[j]] + # Now convert these to boolean indices to use with `work.status`. + i = xp.zeros_like(stop) + i[j] = True # boolean indices of elements that can also stop + i = i & ~stop + work.status[i] = _ESTOPONESIDE + stop[i] = True + + # Condition 3: moving end of bracket reaches limit + i = (work.x == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 4: non-finite value encountered + i = ~(xp.isfinite(work.x) & xp.isfinite(work.f)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + n = res['x'].shape[0] // 2 + + # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint + # as `a` and `xr0`, the initial right endpoint, as `b`. + # Because we treat the two one-sided searches as though they were + # independent, what we keep track of in `work` and what we want to + # return in `res` look quite different. Combine the results from the + # two one-sided searches before reporting the results to the user. + # - "a" refers to the leftward search (the moving end started at `a`) + # - "b" refers to the rightward search (the moving end started at `b`) + # - "l" refers to the left end of the bracket (closer to -oo) + # - "r" refers to the right end of the bracket (closer to +oo) + xal = res['x'][:n] + xar = res['x_last'][:n] + xbl = res['x_last'][n:] + xbr = res['x'][n:] + + fal = res['f'][:n] + far = res['f_last'][:n] + fbl = res['f_last'][n:] + fbr = res['f'][n:] + + # Initialize the brackets and corresponding function values to return + # to the user. Brackets may not be valid (e.g. there is no root, + # there weren't enough iterations, NaN encountered), but we still need + # to return something. One option would be all NaNs, but what I've + # chosen here is the left- and right-most points at which the function + # has been evaluated. This gives the user some information about what + # interval of the real line has been searched and shows that there is + # no sign change between the two ends. + xl = xp.asarray(xal, copy=True) + fl = xp.asarray(fal, copy=True) + xr = xp.asarray(xbr, copy=True) + fr = xp.asarray(fbr, copy=True) + + # `status` indicates whether the bracket is valid or not. If so, + # we want to adjust the bracket we return to be the narrowest possible + # given the points at which we evaluated the function. + # For example if bracket "a" is valid and smaller than bracket "b" OR + # if bracket "a" is valid and bracket "b" is not valid, we want to + # return bracket "a" (and vice versa). + sa = res['status'][:n] + sb = res['status'][n:] + + da = xar - xal + db = xbr - xbl + + i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0)) + i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0)) + + xr[i1] = xar[i1] + fr[i1] = far[i1] + xl[i2] = xbl[i2] + fl[i2] = fbl[i2] + + # Finish assembling the result object + res['xl'] = xl + res['xr'] = xr + res['fl'] = fl + res['fr'] = fr + + res['nit'] = xp.maximum(res['nit'][:n], res['nit'][n:]) + res['nfev'] = res['nfev'][:n] + res['nfev'][n:] + # If the status on one side is zero, the status is zero. In any case, + # report the status from one side only. + res['status'] = xp.where(sa == 0, sa, sb) + res['success'] = (res['status'] == 0) + + del res['x'] + del res['f'] + del res['x_last'] + del res['f_last'] + + return shape[:-1] + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + xp) + + +def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xp = array_namespace(xm0) + xm0 = xp.asarray(xm0)[()] + if (not xp.isdtype(xm0.dtype, "numeric") + or xp.isdtype(xm0.dtype, "complex floating")): + raise ValueError('`xm0` must be numeric and real.') + if not xp.isdtype(xm0.dtype, "real floating"): + xm0 = xp.asarray(xm0, dtype=xp_default_dtype(xp)) + + xmin = -xp.inf if xmin is None else xmin + xmax = xp.inf if xmax is None else xmax + + # If xl0 (xr0) is not supplied, fill with a dummy value for the sake + # of broadcasting. We need to wait until xmin (xmax) has been validated + # to compute the default values. + xl0_not_supplied = False + if xl0 is None: + xl0 = xp.nan + xl0_not_supplied = True + + xr0_not_supplied = False + if xr0 is None: + xr0 = xp.nan + xr0_not_supplied = True + + factor = 2.0 if factor is None else factor + xl0, xm0, xr0, xmin, xmax, factor = xp.broadcast_arrays( + xp.asarray(xl0), xm0, xp.asarray(xr0), xp.asarray(xmin), + xp.asarray(xmax), xp.asarray(factor) + ) + + if (not xp.isdtype(xl0.dtype, "numeric") + or xp.isdtype(xl0.dtype, "complex floating")): + raise ValueError('`xl0` must be numeric and real.') + + if (not xp.isdtype(xr0.dtype, "numeric") + or xp.isdtype(xr0.dtype, "complex floating")): + raise ValueError('`xr0` must be numeric and real.') + + if (not xp.isdtype(xmin.dtype, "numeric") + or xp.isdtype(xmin.dtype, "complex floating")): + raise ValueError('`xmin` must be numeric and real.') + + if (not xp.isdtype(xmax.dtype, "numeric") + or xp.isdtype(xmax.dtype, "complex floating")): + raise ValueError('`xmax` must be numeric and real.') + + if (not xp.isdtype(factor.dtype, "numeric") + or xp.isdtype(factor.dtype, "complex floating")): + raise ValueError('`factor` must be numeric and real.') + if not xp.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + # Calculate default values of xl0 and/or xr0 if they have not been supplied + # by the user. We need to be careful to ensure xl0 and xr0 are not outside + # of (xmin, xmax). + if xl0_not_supplied: + xl0 = xm0 - xp.minimum((xm0 - xmin)/16, xp.asarray(0.5)) + xl0 = xp.astype(xl0, xm0.dtype, copy=False) + if xr0_not_supplied: + xr0 = xm0 + xp.minimum((xmax - xm0)/16, xp.asarray(0.5)) + xr0 = xp.astype(xr0, xm0.dtype, copy=False) + + maxiter = xp.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not xp.isdtype(maxiter.dtype, "numeric") or maxiter.shape != tuple() + or xp.isdtype(maxiter.dtype, "complex floating")): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter, xp + + +def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None, + factor=None, args=(), maxiter=1000): + """Bracket the minimum of a unimodal scalar function of one variable + + This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`, + and the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the minimum is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. `func` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`. + xm0: float array_like + Starting guess for middle point of bracket. + xl0, xr0: float array_like, optional + Starting guesses for left and right endpoints of the bracket. Must be + broadcastable with one another and with `xm0`. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0`, `xm0`, and `xr0`. + factor : float array_like, optional + Controls expansion of bracket endpoint in downhill direction. Works + differently in the cases where a limit is set in the downhill direction + with `xmax` or `xmin`. See Notes. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the + callable to be bracketed requires arguments that are not broadcastable + with these arrays, wrap that callable with `func` such that `func` + accepts only ``x`` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. The number + of function evaluations is three greater than the number of iterations. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xm, xr : float + The left, middle, and right points of the bracket, if the algorithm + terminated successfully. + fl, fm, fr : float + The function value at the left, middle, and right points of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits. Assuming + unimodality, this implies the endpoint at the limit is a + minimizer. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : ``None`` shall pass. + - ``-5`` : The initial bracket does not satisfy + `xmin <= xl0 < xm0 < xr0 <= xmax`. + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + Similar to `scipy.optimize.bracket`, this function seeks to find real + points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``, + where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`, + this function can operate in a vectorized manner on array input, so long as + the input arrays are broadcastable with each other. Also unlike + `scipy.optimize.bracket`, users may specify minimum and maximum endpoints + for the desired bracket. + + Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``, + the algorithm checks if these points already give a valid bracket. If not, + a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new + opposite endpoint, and either `xl` or `xr` becomes the new middle point, + depending on which direction is downhill. The algorithm repeats from here. + + The new endpoint `w` is chosen differently depending on whether or not a + boundary `xmin` or `xmax` has been set in the downhill direction. Without + loss of generality, suppose the downhill direction is to the right, so that + ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w` + is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by + the user (defaults to 2.0) so that step sizes increase in geometric proportion. + If there is a boundary, `xmax` in this case, then `w` is chosen to be + ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at + `xmax`. This cautious approach ensures that a minimum near but distinct from + the boundary isn't missed while also detecting whether or not the `xmax` is + a minimizer when `xmax` is reached after a finite number of steps. + """ # noqa: E501 + callback = None # works; I just don't want to test it + + temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter, xp = temp + + xs = (xl0, xm0, xr0) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype, xp = temp + + xl0, xm0, xr0 = xs + fl0, fm0, fr0 = fs + xmin = xp.astype(xp.broadcast_to(xmin, shape), dtype, copy=False) + xmin = xp_ravel(xmin, xp=xp) + xmax = xp.astype(xp.broadcast_to(xmax, shape), dtype, copy=False) + xmax = xp_ravel(xmax, xp=xp) + invalid_bracket = ~((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)) + # We will modify factor later on so make a copy. np.broadcast_to returns + # a read-only view. + factor = xp.astype(xp.broadcast_to(factor, shape), dtype, copy=True) + factor = xp_ravel(factor) + + # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be + # marching downhill in the direction from xl to xr. + comp = fl0 < fr0 + xl0[comp], xr0[comp] = xr0[comp], xl0[comp] + fl0[comp], fr0[comp] = fr0[comp], fl0[comp] + # We only need the boundary in the direction we're traveling. + limit = xp.where(comp, xmin, xmax) + + unlimited = xp.isinf(limit) + limited = ~unlimited + step = xp.empty_like(xl0) + + step[unlimited] = (xr0[unlimited] - xm0[unlimited]) + step[limited] = (limit[limited] - xr0[limited]) + + # Step size is divided by factor for case where there is a limit. + factor[limited] = 1 / factor[limited] + + status = xp.full_like(xl0, eim._EINPROGRESS, dtype=xp.int32) + status[invalid_bracket] = eim._EINPUTERR + nit, nfev = 0, 3 + + work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0, + step=step, limit=limit, limited=limited, factor=factor, nit=nit, + nfev=nfev, status=status, args=args) + + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'), + ('fr', 'fr')] + + def pre_func_eval(work): + work.step *= work.factor + x = xp.empty_like(work.xr) + x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited] + x[work.limited] = work.limit[work.limited] - work.step[work.limited] + # Since the new bracket endpoint is calculated from an offset with the + # limit, it may be the case that the new endpoint equals the old endpoint, + # when the old endpoint is sufficiently close to the limit. We use the + # limit itself as the new endpoint in these cases. + x[work.limited] = xp.where( + x[work.limited] == work.xr[work.limited], + work.limit[work.limited], + x[work.limited], + ) + return x + + def post_func_eval(x, f, work): + work.xl, work.xm, work.xr = work.xm, work.xr, x + work.fl, work.fm, work.fr = work.fm, work.fr, f + + def check_termination(work): + # Condition 0: Initial bracket is invalid. + stop = (work.status == eim._EINPUTERR) + + # Condition 1: A valid bracket has been found. + i = ( + (work.fl >= work.fm) & (work.fr > work.fm) + | (work.fl > work.fm) & (work.fr >= work.fm) + ) & ~stop + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Condition 2: Moving end of bracket reaches limit. + i = (work.xr == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 3: non-finite value encountered + i = ~(xp.isfinite(work.xr) & xp.isfinite(work.fr)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0). + comp = res['xl'] > res['xr'] + res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp] + res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp] + return shape + + return eim._loop(work, callback, shape, + maxiter, func, args, dtype, + pre_func_eval, post_func_eval, + check_termination, post_termination_check, + customize_result, res_work_pairs, xp) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..5a4b70098919b9fba626bfecd5c1bcc559ab7702 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py @@ -0,0 +1,552 @@ +import math +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult +from scipy._lib._array_api import xp_sign, xp_copy, xp_take_along_axis + +# TODO: +# - (maybe?) don't use fancy indexing assignment +# - figure out how to replace the new `try`/`except`s + + +def _chandrupatla(func, a, b, *, args=(), xatol=None, xrtol=None, + fatol=None, frtol=0, maxiter=None, callback=None): + """Find the root of an elementwise function using Chandrupatla's algorithm. + + For each element of the output of `func`, `chandrupatla` seeks the scalar + root that makes the element 0. This function allows for `a`, `b`, and the + output of `func` to be of any broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose root is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of components of any type(s). + ``func`` must be an elementwise function: each element ``func(x)[i]`` + must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla` + seeks an array ``x`` such that ``func(x)`` is an array of zeros. + a, b : array_like + The lower and upper bounds of the root of the function. Must be + broadcastable with one another. + args : tuple, optional + Additional positional arguments to be passed to `func`. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the root and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + The default is the maximum possible number of bisections within + the (normal) floating point numbers of the relevant dtype. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + x : float + The root of the function, if the algorithm terminated successfully. + nfev : int + The number of times the function was called to find the root. + nit : int + The number of iterations of Chandrupatla's algorithm performed. + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + fun : float + The value of `func` evaluated at `x`. + xl, xr : float + The lower and upper ends of the bracket. + fl, fr : float + The function value at the lower and upper ends of the bracket. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``xl`` and ``xr`` are the left and right ends of the bracket, + ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``, + and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to + have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or + ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the + termination condition described in [1]_ with ``xrtol = 4e-10``, + ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are + ``xatol = 4*tiny``, ``xrtol = 4*eps``, ``frtol = 0``, and ``fatol = tiny``, + where ``eps`` and ``tiny`` are the precision and smallest normal number + of the result ``dtype`` of function inputs and outputs. + + References + ---------- + + .. [1] Chandrupatla, Tirupathi R. + "A new hybrid quadratic/bisection algorithm for finding the zero of a + nonlinear function without using derivatives". + Advances in Engineering Software, 28(3), 145-149. + https://doi.org/10.1016/s0965-9978(96)00051-8 + + See Also + -------- + brentq, brenth, ridder, bisect, newton + + Examples + -------- + >>> from scipy import optimize + >>> def f(x, c): + ... return x**3 - 2*x - c + >>> c = 5 + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + 2.0945514818937463 + + >>> c = [3, 4, 5] + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + array([1.8932892 , 2. , 2.09455148]) + + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + temp = eim._initialize(func, (a, b), args) + func, xs, fs, args, shape, dtype, xp = temp + x1, x2 = xs + f1, f2 = fs + status = xp.full_like(x1, xp.asarray(eim._EINPROGRESS), + dtype=xp.int32) # in progress + nit, nfev = 0, 2 # two function evaluations performed above + finfo = xp.finfo(dtype) + xatol = 4*finfo.smallest_normal if xatol is None else xatol + xrtol = 4*finfo.eps if xrtol is None else xrtol + fatol = finfo.smallest_normal if fatol is None else fatol + frtol = frtol * xp.minimum(xp.abs(f1), xp.abs(f2)) + maxiter = (math.log2(finfo.max) - math.log2(finfo.smallest_normal) + if maxiter is None else maxiter) + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status) + res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'), + ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'), + ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')] + + def pre_func_eval(work): + # [1] Figure 1 (first box) + x = work.x1 + work.t * (work.x2 - work.x1) + return x + + def post_func_eval(x, f, work): + # [1] Figure 1 (first diamond and boxes) + # Note: y/n are reversed in figure; compare to BASIC in appendix + work.x3, work.f3 = (xp.asarray(work.x2, copy=True), + xp.asarray(work.f2, copy=True)) + j = xp.sign(f) == xp.sign(work.f1) + nj = ~j + work.x3[j], work.f3[j] = work.x1[j], work.f1[j] + work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj] + work.x1, work.f1 = x, f + + def check_termination(work): + # [1] Figure 1 (second diamond) + # Check for all terminal conditions and record statuses. + + # See [1] Section 4 (first two sentences) + i = xp.abs(work.f1) < xp.abs(work.f2) + work.xmin = xp.where(i, work.x1, work.x2) + work.fmin = xp.where(i, work.f1, work.f2) + stop = xp.zeros_like(work.x1, dtype=xp.bool) # termination condition met + + # If function value tolerance is met, report successful convergence, + # regardless of other conditions. Note that `frtol` has been redefined + # as `frtol = frtol * minimum(f1, f2)`, where `f1` and `f2` are the + # function evaluated at the original ends of the bracket. + i = xp.abs(work.fmin) <= work.fatol + work.frtol + work.status[i] = eim._ECONVERGED + stop[i] = True + + # If the bracket is no longer valid, report failure (unless a function + # tolerance is met, as detected above). + i = (xp_sign(work.f1) == xp_sign(work.f2)) & ~stop + NaN = xp.asarray(xp.nan, dtype=work.xmin.dtype) + work.xmin[i], work.fmin[i], work.status[i] = NaN, NaN, eim._ESIGNERR + stop[i] = True + + # If the abscissae are non-finite or either function value is NaN, + # report failure. + x_nonfinite = ~(xp.isfinite(work.x1) & xp.isfinite(work.x2)) + f_nan = xp.isnan(work.f1) & xp.isnan(work.f2) + i = (x_nonfinite | f_nan) & ~stop + work.xmin[i], work.fmin[i], work.status[i] = NaN, NaN, eim._EVALUEERR + stop[i] = True + + # This is the convergence criterion used in bisect. Chandrupatla's + # criterion is equivalent to this except with a factor of 4 on `xrtol`. + work.dx = xp.abs(work.x2 - work.x1) + work.tol = xp.abs(work.xmin) * work.xrtol + work.xatol + i = work.dx < work.tol + work.status[i] = eim._ECONVERGED + stop[i] = True + + return stop + + def post_termination_check(work): + # [1] Figure 1 (third diamond and boxes / Equation 1) + xi1 = (work.x1 - work.x2) / (work.x3 - work.x2) + with np.errstate(divide='ignore', invalid='ignore'): + phi1 = (work.f1 - work.f2) / (work.f3 - work.f2) + alpha = (work.x3 - work.x1) / (work.x2 - work.x1) + j = ((1 - xp.sqrt(1 - xi1)) < phi1) & (phi1 < xp.sqrt(xi1)) + + f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j] + t = xp.full_like(alpha, xp.asarray(0.5)) + t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j) + - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j)) + + # [1] Figure 1 (last box; see also BASIC in appendix with comment + # "Adjust T Away from the Interval Boundary") + tl = 0.5 * work.tol / work.dx + work.t = xp.clip(t, tl, 1 - tl) + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] < res['xr'] + res['xl'] = xp.where(i, xl, xr) + res['xr'] = xp.where(i, xr, xl) + res['fl'] = xp.where(i, fl, fr) + res['fr'] = xp.where(i, fr, fl) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + xp=xp) + + +def _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback): + # Input validation for `_chandrupatla` + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + # tolerances are floats, not arrays; OK to use NumPy + tols = np.asarray([xatol if xatol is not None else 1, + xrtol if xrtol is not None else 1, + fatol if fatol is not None else 1, + frtol if frtol is not None else 1]) + if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0) + or np.any(np.isnan(tols)) or tols.shape != (4,)): + raise ValueError('Tolerances must be non-negative scalars.') + + if maxiter is not None: + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter < 0: + raise ValueError('`maxiter` must be a non-negative integer.') + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return func, args, xatol, xrtol, fatol, frtol, maxiter, callback + + +def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None, + xrtol=None, fatol=None, frtol=None, maxiter=100, + callback=None): + """Find the minimizer of an elementwise function. + + For each element of the output of `func`, `_chandrupatla_minimize` seeks + the scalar minimizer that minimizes the element. This function allows for + `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any + broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose minimizer is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array + of minima. + x1, x2, x3 : array_like + The abscissae of a standard scalar minimization bracket. A bracket is + valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``. + Must be broadcastable with one another and `args`. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `x1`, `x2`, and `x3`. If the callable to be + differentiated requires arguments that are not broadcastable with `x`, + wrap that callable with `func` such that `func` accepts only `x` and + broadcastable arrays. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the minimizer and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla_minimize` (but containing + the current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla_minimize` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + x : float + The minimizer of the function, if the algorithm terminated + successfully. + fun : float + The value of `func` evaluated at `x`. + nfev : int + The number of points at which `func` was evaluated. + nit : int + The number of iterations of the algorithm that were performed. + xl, xm, xr : float + The final three-point bracket. + fl, fm, fr : float + The function value at the bracket points. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3`` + are the values of ``func`` at those points, then the algorithm is + considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol`` + or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of + these differs from the termination conditions described in [1]_. The + default values of `xrtol` is the square root of the precision of the + appropriate dtype, and ``xatol = fatol = frtol`` is the smallest normal + number of the appropriate dtype. + + References + ---------- + .. [1] Chandrupatla, Tirupathi R. (1998). + "An efficient quadratic fit-sectioning algorithm for minimization + without derivatives". + Computer Methods in Applied Mechanics and Engineering, 152 (1-2), + 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4 + + See Also + -------- + golden, brent, bounded + + Examples + -------- + >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize + >>> def f(x, args=1): + ... return (x - args)**2 + >>> res = _chandrupatla_minimize(f, -5, 0, 5) + >>> res.x + 1.0 + >>> c = [1, 1.5, 2] + >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,)) + >>> res.x + array([1. , 1.5, 2. ]) + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + xs = (x1, x2, x3) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype, xp = temp # line split for PEP8 + x1, x2, x3 = xs + f1, f2, f3 = fs + phi = xp.asarray(0.5 + 0.5*5**0.5, dtype=dtype)[()] # golden ratio + status = xp.full_like(x1, xp.asarray(eim._EINPROGRESS), + dtype=xp.int32) # in progress + nit, nfev = 0, 3 # three function evaluations performed above + fatol = xp.finfo(dtype).smallest_normal if fatol is None else fatol + frtol = xp.finfo(dtype).smallest_normal if frtol is None else frtol + xatol = xp.finfo(dtype).smallest_normal if xatol is None else xatol + xrtol = math.sqrt(xp.finfo(dtype).eps) if xrtol is None else xrtol + + # Ensure that x1 < x2 < x3 initially. + xs, fs = xp.stack((x1, x2, x3)), xp.stack((f1, f2, f3)) + i = xp.argsort(xs, axis=0) + x1, x2, x3 = xp_take_along_axis(xs, i, axis=0) # data-apis/array-api#808 + f1, f2, f3 = xp_take_along_axis(fs, i, axis=0) # data-apis/array-api#808 + q0 = xp_copy(x3) # "At the start, q0 is set at x3..." ([1] after (7)) + + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status, q0=q0, args=args) + res_work_pairs = [('status', 'status'), + ('x', 'x2'), ('fun', 'f2'), + ('nit', 'nit'), ('nfev', 'nfev'), + ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'), + ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')] + + def pre_func_eval(work): + # `_check_termination` is called first -> `x3 - x2 > x2 - x1` + # But let's calculate a few terms that we'll reuse + x21 = work.x2 - work.x1 + x32 = work.x3 - work.x2 + + # [1] Section 3. "The quadratic minimum point Q1 is calculated using + # the relations developed in the previous section." [1] Section 2 (5/6) + A = x21 * (work.f3 - work.f2) + B = x32 * (work.f1 - work.f2) + C = A / (A + B) + # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2 + q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster + # this is an array, so multiplying by 0.5 does not change dtype + + # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is + # sufficiently away from the inside point x2" + i = xp.abs(q1 - work.q0) < 0.5 * xp.abs(x21) # [1] (7) + xi = q1[i] + # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of + # x2, the new point is chosen in the larger interval at a distance + # tol away from x2." + # See also QBASIC code after "Accept Ql adjust if close to X2". + j = xp.abs(q1[i] - work.x2[i]) <= work.xtol[i] + xi[j] = work.x2[i][j] + xp_sign(x32[i][j]) * work.xtol[i][j] + + # "If condition (7) is not satisfied, golden sectioning of the larger + # interval is carried out to introduce the new point." + # (For simplicity, we go ahead and calculate it for all points, but we + # change the elements for which the condition was satisfied.) + x = work.x2 + (2 - work.phi) * x32 + x[i] = xi + + # "We define Q0 as the value of Q1 at the previous iteration." + work.q0 = q1 + return x + + def post_func_eval(x, f, work): + # Standard logic for updating a three-point bracket based on a new + # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...". + # There is an awful lot of data copying going on here; this would + # probably benefit from code optimization or implementation in Pythran. + i = xp_sign(x - work.x2) == xp_sign(work.x3 - work.x2) + xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i], + fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i] + j = fi > f2i + x3i[j], f3i[j] = xi[j], fi[j] + j = ~j + x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j] + + ni = ~i + xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni], + fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni] + j = fni > f2ni + x1ni[j], f1ni[j] = xni[j], fni[j] + j = ~j + x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j] + + work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i + work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i + work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni, + work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni + + def check_termination(work): + # Check for all terminal conditions and record statuses. + stop = xp.zeros_like(work.x1, dtype=bool) # termination condition met + + # Bracket is invalid; stop and don't return minimizer/minimum + i = ((work.f2 > work.f1) | (work.f2 > work.f3)) + work.x2[i], work.f2[i] = xp.nan, xp.nan + stop[i], work.status[i] = True, eim._ESIGNERR + + # Non-finite values; stop and don't return minimizer/minimum + finite = xp.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3) + i = ~(finite | stop) + work.x2[i], work.f2[i] = xp.nan, xp.nan + stop[i], work.status[i] = True, eim._EVALUEERR + + # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make + # the (x2, x3) the larger interval." + # Note: I had used np.choose; this is much faster. This would be a good + # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and + # didn't notice a speed boost, so let's keep it simple. + i = xp.abs(work.x3 - work.x2) < xp.abs(work.x2 - work.x1) + temp = work.x1[i] + work.x1[i] = work.x3[i] + work.x3[i] = temp + temp = work.f1[i] + work.f1[i] = work.f3[i] + work.f3[i] = temp + + # [1] Section 3 (bottom of page 212) + # "We set a tolerance value xtol..." + work.xtol = xp.abs(work.x2) * work.xrtol + work.xatol # [1] (8) + # "The convergence based on interval is achieved when..." + # Note: Equality allowed in case of `xtol=0` + i = xp.abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9) + + # "We define ftol using..." + ftol = xp.abs(work.f2) * work.frtol + work.fatol # [1] (10) + # "The convergence based on function values is achieved when..." + # Note 1: modify in place to incorporate tolerance on function value. + # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop + i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11) + i &= ~stop + stop[i], work.status[i] = True, eim._ECONVERGED + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] >= res['xr'] + res['xl'] = xp.where(i, xr, xl) + res['xr'] = xp.where(i, xl, xr) + res['fl'] = xp.where(i, fr, fl) + res['fr'] = xp.where(i, fl, fr) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + xp=xp) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py new file mode 100644 index 0000000000000000000000000000000000000000..7e99acf373df59524f66e19f625f50b8d5d3cc76 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py @@ -0,0 +1,316 @@ +""" +Interface to Constrained Optimization By Linear Approximation + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_cobyla + +""" + +import functools +from threading import RLock + +import numpy as np +from scipy.optimize import _cobyla as cobyla +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +try: + from itertools import izip +except ImportError: + izip = zip + +__all__ = ['fmin_cobyla'] + +# Workaround as _cobyla.minimize is not threadsafe +# due to an unknown f2py bug and can segfault, +# see gh-9658. +_module_lock = RLock() +def synchronized(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + with _module_lock: + return func(*args, **kwargs) + return wrapper + +@synchronized +def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, + rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4, + *, callback=None): + """ + Minimize a function using the Constrained Optimization By Linear + Approximation (COBYLA) method. This method wraps a FORTRAN + implementation of the algorithm. + + Parameters + ---------- + func : callable + Function to minimize. In the form func(x, \\*args). + x0 : ndarray + Initial guess. + cons : sequence + Constraint functions; must all be ``>=0`` (a single function + if only 1 constraint). Each function takes the parameters `x` + as its first argument, and it can return either a single number or + an array or list of numbers. + args : tuple, optional + Extra arguments to pass to function. + consargs : tuple, optional + Extra arguments to pass to constraint functions (default of None means + use same extra arguments as those passed to func). + Use ``()`` for no extra arguments. + rhobeg : float, optional + Reasonable initial changes to the variables. + rhoend : float, optional + Final accuracy in the optimization (not precisely guaranteed). This + is a lower bound on the size of the trust region. + disp : {0, 1, 2, 3}, optional + Controls the frequency of output; 0 implies no output. + maxfun : int, optional + Maximum number of function evaluations. + catol : float, optional + Absolute tolerance for constraint violations. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + x : ndarray + The argument that minimises `f`. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'COBYLA' `method` in particular. + + Notes + ----- + This algorithm is based on linear approximations to the objective + function and each constraint. We briefly describe the algorithm. + + Suppose the function is being minimized over k variables. At the + jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), + an approximate solution x_j, and a radius RHO_j. + (i.e., linear plus a constant) approximations to the objective + function and constraint functions such that their function values + agree with the linear approximation on the k+1 points v_1,.., v_(k+1). + This gives a linear program to solve (where the linear approximations + of the constraint functions are constrained to be non-negative). + + However, the linear approximations are likely only good + approximations near the current simplex, so the linear program is + given the further requirement that the solution, which + will become x_(j+1), must be within RHO_j from x_j. RHO_j only + decreases, never increases. The initial RHO_j is rhobeg and the + final RHO_j is rhoend. In this way COBYLA's iterations behave + like a trust region algorithm. + + Additionally, the linear program may be inconsistent, or the + approximation may give poor improvement. For details about + how these issues are resolved, as well as how the points v_i are + updated, refer to the source code or the references below. + + + References + ---------- + Powell M.J.D. (1994), "A direct search optimization method that models + the objective and constraint functions by linear interpolation.", in + Advances in Optimization and Numerical Analysis, eds. S. Gomez and + J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 + + Powell M.J.D. (1998), "Direct search algorithms for optimization + calculations", Acta Numerica 7, 287-336 + + Powell M.J.D. (2007), "A view of algorithms for optimization without + derivatives", Cambridge University Technical Report DAMTP 2007/NA03 + + + Examples + -------- + Minimize the objective function f(x,y) = x*y subject + to the constraints x**2 + y**2 < 1 and y > 0:: + + >>> def objective(x): + ... return x[0]*x[1] + ... + >>> def constr1(x): + ... return 1 - (x[0]**2 + x[1]**2) + ... + >>> def constr2(x): + ... return x[1] + ... + >>> from scipy.optimize import fmin_cobyla + >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) + array([-0.70710685, 0.70710671]) + + The exact solution is (-sqrt(2)/2, sqrt(2)/2). + + + + """ + err = "cons must be a sequence of callable functions or a single"\ + " callable function." + try: + len(cons) + except TypeError as e: + if callable(cons): + cons = [cons] + else: + raise TypeError(err) from e + else: + for thisfunc in cons: + if not callable(thisfunc): + raise TypeError(err) + + if consargs is None: + consargs = args + + # build constraints + con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) + + # options + opts = {'rhobeg': rhobeg, + 'tol': rhoend, + 'disp': disp, + 'maxiter': maxfun, + 'catol': catol, + 'callback': callback} + + sol = _minimize_cobyla(func, x0, args, constraints=con, + **opts) + if disp and not sol['success']: + print(f"COBYLA failed to find a solution: {sol.message}") + return sol['x'] + + +@synchronized +def _minimize_cobyla(fun, x0, args=(), constraints=(), + rhobeg=1.0, tol=1e-4, maxiter=1000, + disp=False, catol=2e-4, callback=None, bounds=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using the + Constrained Optimization BY Linear Approximation (COBYLA) algorithm. + + Options + ------- + rhobeg : float + Reasonable initial changes to the variables. + tol : float + Final accuracy in the optimization (not precisely guaranteed). + This is a lower bound on the size of the trust region. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored as set to 0. + maxiter : int + Maximum number of function evaluations. + catol : float + Tolerance (absolute) for constraint violations + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + rhoend = tol + iprint = int(bool(disp)) + + # check constraints + if isinstance(constraints, dict): + constraints = (constraints, ) + + if bounds: + i_lb = np.isfinite(bounds.lb) + if np.any(i_lb): + def lb_constraint(x, *args, **kwargs): + return x[i_lb] - bounds.lb[i_lb] + + constraints.append({'type': 'ineq', 'fun': lb_constraint}) + + i_ub = np.isfinite(bounds.ub) + if np.any(i_ub): + def ub_constraint(x): + return bounds.ub[i_ub] - x[i_ub] + + constraints.append({'type': 'ineq', 'fun': ub_constraint}) + + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype != 'ineq': + raise ValueError(f"Constraints of type '{con['type']}' not handled by " + "COBYLA.") + + # check function + if 'fun' not in con: + raise KeyError('Constraint %d has no function defined.' % ic) + + # check extra arguments + if 'args' not in con: + con['args'] = () + + # m is the total number of constraint values + # it takes into account that some constraints may be vector-valued + cons_lengths = [] + for c in constraints: + f = c['fun'](x0, *c['args']) + try: + cons_length = len(f) + except TypeError: + cons_length = 1 + cons_lengths.append(cons_length) + m = sum(cons_lengths) + + # create the ScalarFunction, cobyla doesn't require derivative function + def _jac(x, *args): + return None + + sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac) + + def calcfc(x, con): + f = sf.fun(x) + i = 0 + for size, c in izip(cons_lengths, constraints): + con[i: i + size] = c['fun'](x, *c['args']) + i += size + return f + + def wrapped_callback(x): + if callback is not None: + callback(np.copy(x)) + + info = np.zeros(4, np.float64) + xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, + rhoend=rhoend, iprint=iprint, maxfun=maxfun, + dinfo=info, callback=wrapped_callback) + + if info[3] > catol: + # Check constraint violation + info[0] = 4 + + return OptimizeResult(x=xopt, + status=int(info[0]), + success=info[0] == 1, + message={1: 'Optimization terminated successfully.', + 2: 'Maximum number of function evaluations ' + 'has been exceeded.', + 3: 'Rounding errors are becoming damaging ' + 'in COBYLA subroutine.', + 4: 'Did not converge to a solution ' + 'satisfying the constraints. See ' + '`maxcv` for magnitude of violation.', + 5: 'NaN result encountered.' + }.get(info[0], 'Unknown exit status.'), + nfev=int(info[1]), + fun=info[2], + maxcv=info[3]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py new file mode 100644 index 0000000000000000000000000000000000000000..38ae0477ca38e28dda80e0bf2dd1f0905eacff6e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py @@ -0,0 +1,72 @@ +import numpy as np +from threading import Lock + +from ._optimize import _check_unknown_options + + +COBYQA_LOCK = Lock() + + +def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(), + callback=None, disp=False, maxfev=None, maxiter=None, + f_target=-np.inf, feasibility_tol=1e-8, + initial_tr_radius=1.0, final_tr_radius=1e-6, scale=False, + **unknown_options): + """ + Minimize a scalar function of one or more variables using the + Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_. + + .. versionadded:: 1.14.0 + + Options + ------- + disp : bool + Set to True to print information about the optimization procedure. + Default is ``False``. + maxfev : int + Maximum number of function evaluations. Default is ``500 * n``, where + ``n`` is the number of variables. + maxiter : int + Maximum number of iterations. Default is ``1000 * n``, where ``n`` is + the number of variables. + f_target : float + Target value for the objective function. The optimization procedure is + terminated when the objective function value of a feasible point (see + `feasibility_tol` below) is less than or equal to this target. Default + is ``-numpy.inf``. + feasibility_tol : float + Absolute tolerance for the constraint violation. Default is ``1e-8``. + initial_tr_radius : float + Initial trust-region radius. Typically, this value should be in the + order of one tenth of the greatest expected change to the variables. + Default is ``1.0``. + final_tr_radius : float + Final trust-region radius. It should indicate the accuracy required in + the final values of the variables. If provided, this option overrides + the value of `tol` in the `minimize` function. Default is ``1e-6``. + scale : bool + Set to True to scale the variables according to the bounds. If True and + if all the lower and upper bounds are finite, the variables are scaled + to be within the range :math:`[-1, 1]`. If any of the lower or upper + bounds is infinite, the variables are not scaled. Default is ``False``. + + References + ---------- + .. [1] COBYQA + https://www.cobyqa.com/stable/ + """ + from .._lib.cobyqa import minimize # import here to avoid circular imports + + _check_unknown_options(unknown_options) + options = { + 'disp': bool(disp), + 'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0), + 'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0), + 'target': float(f_target), + 'feasibility_tol': float(feasibility_tol), + 'radius_init': float(initial_tr_radius), + 'radius_final': float(final_tr_radius), + 'scale': bool(scale), + } + with COBYQA_LOCK: + return minimize(fun, x0, args, bounds, constraints, callback, options) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_constraints.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..1bae893e231eb9bd89308e441b8abf841f4605bb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_constraints.py @@ -0,0 +1,594 @@ +"""Constraints definition for minimize.""" +import numpy as np +from ._hessian_update_strategy import BFGS +from ._differentiable_functions import ( + VectorFunction, LinearVectorFunction, IdentityVectorFunction) +from ._optimize import OptimizeWarning +from warnings import warn, catch_warnings, simplefilter, filterwarnings +from scipy.sparse import issparse + + +def _arr_to_scalar(x): + # If x is a numpy array, return x.item(). This will + # fail if the array has more than one element. + return x.item() if isinstance(x, np.ndarray) else x + + +class NonlinearConstraint: + """Nonlinear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= fun(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and ``fun`` returns a vector with m components. + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + fun : callable + The function defining the constraint. + The signature is ``fun(x) -> array_like, shape (m,)``. + lb, ub : array_like + Lower and upper bounds on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. + jac : {callable, '2-point', '3-point', 'cs'}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, + where element (i, j) is the partial derivative of f[i] with + respect to x[j]). The keywords {'2-point', '3-point', + 'cs'} select a finite difference scheme for the numerical estimation. + A callable must have the following signature:: + + jac(x) -> {ndarray, sparse matrix}, shape (m, n) + + Default is '2-point'. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional + Method for computing the Hessian matrix. The keywords + {'2-point', '3-point', 'cs'} select a finite difference scheme for + numerical estimation. Alternatively, objects implementing + `HessianUpdateStrategy` interface can be used to approximate the + Hessian. Currently available implementations are: + + - `BFGS` (default option) + - `SR1` + + A callable must return the Hessian matrix of ``dot(fun, v)`` and + must have the following signature: + ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. + Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + finite_diff_rel_step: None or array_like, optional + Relative step size for the finite difference approximation. Default is + None, which will select a reasonable value automatically depending + on a finite difference scheme. + finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations. A zero entry means + that a corresponding element in the Jacobian is identically zero. + If provided, forces the use of 'lsmr' trust-region solver. + If None (default) then dense differencing will be used. + + Notes + ----- + Finite difference schemes {'2-point', '3-point', 'cs'} may be used for + approximating either the Jacobian or the Hessian. We, however, do not allow + its use for approximating both simultaneously. Hence whenever the Jacobian + is estimated via finite-differences, we require the Hessian to be estimated + using one of the quasi-Newton strategies. + + The scheme 'cs' is potentially the most accurate, but requires the function + to correctly handles complex inputs and be analytically continuable to the + complex plane. The scheme '3-point' is more accurate than '2-point' but + requires twice as many operations. + + Examples + -------- + Constrain ``x[0] < sin(x[1]) + 1.9`` + + >>> from scipy.optimize import NonlinearConstraint + >>> import numpy as np + >>> con = lambda x: x[0] - np.sin(x[1]) + >>> nlc = NonlinearConstraint(con, -np.inf, 1.9) + + """ + def __init__(self, fun, lb, ub, jac='2-point', hess=None, + keep_feasible=False, finite_diff_rel_step=None, + finite_diff_jac_sparsity=None): + if hess is None: + hess = BFGS() + self.fun = fun + self.lb = lb + self.ub = ub + self.finite_diff_rel_step = finite_diff_rel_step + self.finite_diff_jac_sparsity = finite_diff_jac_sparsity + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + +class LinearConstraint: + """Linear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= A.dot(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and the matrix A has shape (m, n). + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + A : {array_like, sparse matrix}, shape (m, n) + Matrix defining the constraint. + lb, ub : dense array_like, optional + Lower and upper limits on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no limits). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + if self.A.ndim != 2: + message = "`A` must have exactly two dimensions." + raise ValueError(message) + + try: + shape = self.A.shape[0:1] + self.lb = np.broadcast_to(self.lb, shape) + self.ub = np.broadcast_to(self.ub, shape) + self.keep_feasible = np.broadcast_to(self.keep_feasible, shape) + except ValueError: + message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable " + "to shape `A.shape[0:1]`") + raise ValueError(message) + + def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False): + if not issparse(A): + # In some cases, if the constraint is not valid, this emits a + # VisibleDeprecationWarning about ragged nested sequences + # before eventually causing an error. `scipy.optimize.milp` would + # prefer that this just error out immediately so it can handle it + # rather than concerning the user. + with catch_warnings(): + simplefilter("error") + self.A = np.atleast_2d(A).astype(np.float64) + else: + self.A = A + if issparse(lb) or issparse(ub): + raise ValueError("Constraint limits must be dense arrays.") + self.lb = np.atleast_1d(lb).astype(np.float64) + self.ub = np.atleast_1d(ub).astype(np.float64) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def residual(self, x): + """ + Calculate the residual between the constraint function and the limits + + For a linear constraint of the form:: + + lb <= A@x <= ub + + the lower and upper residuals between ``A@x`` and the limits are values + ``sl`` and ``sb`` such that:: + + lb + sl == A@x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + the constraint are satisfied; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of the constraint is not + satisfied. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return self.A@x - self.lb, self.ub - self.A@x + + +class Bounds: + """Bounds constraint on the variables. + + The constraint has the general inequality form:: + + lb <= x <= ub + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + lb, ub : dense array_like, optional + Lower and upper bounds on independent variables. `lb`, `ub`, and + `keep_feasible` must be the same shape or broadcastable. + Set components of `lb` and `ub` equal + to fix a variable. Use ``np.inf`` with an appropriate sign to disable + bounds on all or some variables. Note that you can mix constraints of + different types: interval, one-sided or equality, by setting different + components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no bounds). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. Must be broadcastable with `lb` and `ub`. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + try: + res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible) + self.lb, self.ub, self.keep_feasible = res + except ValueError: + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + raise ValueError(message) + + def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False): + if issparse(lb) or issparse(ub): + raise ValueError("Lower and upper bounds must be dense arrays.") + self.lb = np.atleast_1d(lb) + self.ub = np.atleast_1d(ub) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def __repr__(self): + start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}" + if np.any(self.keep_feasible): + end = f", keep_feasible={self.keep_feasible!r})" + else: + end = ")" + return start + end + + def residual(self, x): + """Calculate the residual (slack) between the input and the bounds + + For a bound constraint of the form:: + + lb <= x <= ub + + the lower and upper residuals between `x` and the bounds are values + ``sl`` and ``sb`` such that:: + + lb + sl == x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + ``x`` lie within the bounds; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of ``x`` is out of bounds. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return x - self.lb, self.ub - x + + +class PreparedConstraint: + """Constraint prepared from a user defined constraint. + + On creation it will check whether a constraint definition is valid and + the initial point is feasible. If created successfully, it will contain + the attributes listed below. + + Parameters + ---------- + constraint : {NonlinearConstraint, LinearConstraint`, Bounds} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables. + sparse_jacobian : bool or None, optional + If bool, then the Jacobian of the constraint will be converted + to the corresponded format if necessary. If None (default), such + conversion is not made. + finite_diff_bounds : 2-tuple, optional + Lower and upper bounds on the independent variables for the finite + difference approximation, if applicable. Defaults to no bounds. + + Attributes + ---------- + fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + keep_feasible : ndarray + Array indicating which components must be kept feasible with a size + equal to the number of the constraints. + """ + def __init__(self, constraint, x0, sparse_jacobian=None, + finite_diff_bounds=(-np.inf, np.inf)): + if isinstance(constraint, NonlinearConstraint): + fun = VectorFunction(constraint.fun, x0, + constraint.jac, constraint.hess, + constraint.finite_diff_rel_step, + constraint.finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian) + elif isinstance(constraint, LinearConstraint): + fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) + elif isinstance(constraint, Bounds): + fun = IdentityVectorFunction(x0, sparse_jacobian) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + m = fun.m + + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) + + lb = np.broadcast_to(lb, m) + ub = np.broadcast_to(ub, m) + keep_feasible = np.broadcast_to(keep_feasible, m) + + if keep_feasible.shape != (m,): + raise ValueError("`keep_feasible` has a wrong shape.") + + mask = keep_feasible & (lb != ub) + f0 = fun.f + if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): + raise ValueError("`x0` is infeasible with respect to some " + "inequality constraint with `keep_feasible` " + "set to True.") + + self.fun = fun + self.bounds = (lb, ub) + self.keep_feasible = keep_feasible + + def violation(self, x): + """How much the constraint is exceeded by. + + Parameters + ---------- + x : array-like + Vector of independent variables + + Returns + ------- + excess : array-like + How much the constraint is exceeded by, for each of the + constraints specified by `PreparedConstraint.fun`. + """ + with catch_warnings(): + # Ignore the following warning, it's not important when + # figuring out total violation + # UserWarning: delta_grad == 0.0. Check if the approximated + # function is linear + filterwarnings("ignore", "delta_grad", UserWarning) + ev = self.fun.fun(np.asarray(x)) + + excess_lb = np.maximum(self.bounds[0] - ev, 0) + excess_ub = np.maximum(ev - self.bounds[1], 0) + + return excess_lb + excess_ub + + +def new_bounds_to_old(lb, ub, n): + """Convert the new bounds representation to the old one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are -np.inf/np.inf they are replaced by + None. + """ + lb = np.broadcast_to(lb, n) + ub = np.broadcast_to(ub, n) + + lb = [float(x) if x > -np.inf else None for x in lb] + ub = [float(x) if x < np.inf else None for x in ub] + + return list(zip(lb, ub)) + + +def old_bound_to_new(bounds): + """Convert the old bounds representation to the new one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are None they are replaced by + -np.inf/np.inf. + """ + lb, ub = zip(*bounds) + + # Convert occurrences of None to -inf or inf, and replace occurrences of + # any numpy array x with x.item(). Then wrap the results in numpy arrays. + lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf + for x in lb]) + ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf + for x in ub]) + + return lb, ub + + +def strict_bounds(lb, ub, keep_feasible, n_vars): + """Remove bounds which are not asked to be kept feasible.""" + strict_lb = np.resize(lb, n_vars).astype(float) + strict_ub = np.resize(ub, n_vars).astype(float) + keep_feasible = np.resize(keep_feasible, n_vars) + strict_lb[~keep_feasible] = -np.inf + strict_ub[~keep_feasible] = np.inf + return strict_lb, strict_ub + + +def new_constraint_to_old(con, x0): + """ + Converts new-style constraint objects to old-style constraint dictionaries. + """ + if isinstance(con, NonlinearConstraint): + if (con.finite_diff_jac_sparsity is not None or + con.finite_diff_rel_step is not None or + not isinstance(con.hess, BFGS) or # misses user specified BFGS + con.keep_feasible): + warn("Constraint options `finite_diff_jac_sparsity`, " + "`finite_diff_rel_step`, `keep_feasible`, and `hess`" + "are ignored by this method.", + OptimizeWarning, stacklevel=3) + + fun = con.fun + if callable(con.jac): + jac = con.jac + else: + jac = None + + else: # LinearConstraint + if np.any(con.keep_feasible): + warn("Constraint option `keep_feasible` is ignored by this method.", + OptimizeWarning, stacklevel=3) + + A = con.A + if issparse(A): + A = A.toarray() + def fun(x): + return np.dot(A, x) + def jac(x): + return A + + # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, + # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. + pcon = PreparedConstraint(con, x0) + lb, ub = pcon.bounds + + i_eq = lb == ub + i_bound_below = np.logical_xor(lb != -np.inf, i_eq) + i_bound_above = np.logical_xor(ub != np.inf, i_eq) + i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) + + if np.any(i_unbounded): + warn("At least one constraint is unbounded above and below. Such " + "constraints are ignored.", + OptimizeWarning, stacklevel=3) + + ceq = [] + if np.any(i_eq): + def f_eq(x): + y = np.array(fun(x)).flatten() + return y[i_eq] - lb[i_eq] + ceq = [{"type": "eq", "fun": f_eq}] + + if jac is not None: + def j_eq(x): + dy = jac(x) + if issparse(dy): + dy = dy.toarray() + dy = np.atleast_2d(dy) + return dy[i_eq, :] + ceq[0]["jac"] = j_eq + + cineq = [] + n_bound_below = np.sum(i_bound_below) + n_bound_above = np.sum(i_bound_above) + if n_bound_below + n_bound_above: + def f_ineq(x): + y = np.zeros(n_bound_below + n_bound_above) + y_all = np.array(fun(x)).flatten() + y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] + y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) + return y + cineq = [{"type": "ineq", "fun": f_ineq}] + + if jac is not None: + def j_ineq(x): + dy = np.zeros((n_bound_below + n_bound_above, len(x0))) + dy_all = jac(x) + if issparse(dy_all): + dy_all = dy_all.toarray() + dy_all = np.atleast_2d(dy_all) + dy[:n_bound_below, :] = dy_all[i_bound_below] + dy[n_bound_below:, :] = -dy_all[i_bound_above] + return dy + cineq[0]["jac"] = j_ineq + + old_constraints = ceq + cineq + + if len(old_constraints) > 1: + warn("Equality and inequality constraints are specified in the same " + "element of the constraint list. For efficient use with this " + "method, equality and inequality constraints should be specified " + "in separate elements of the constraint list. ", + OptimizeWarning, stacklevel=3) + return old_constraints + + +def old_constraint_to_new(ic, con): + """ + Converts old-style constraint dictionaries to new-style constraint objects. + """ + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError( + 'Constraints must be a sequence of dictionaries.' + ) from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError(f"Unknown constraint type '{con['type']}'.") + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + lb = 0 + if ctype == 'eq': + ub = 0 + else: + ub = np.inf + + jac = '2-point' + if 'args' in con: + args = con['args'] + def fun(x): + return con["fun"](x, *args) + if 'jac' in con: + def jac(x): + return con["jac"](x, *args) + else: + fun = con['fun'] + if 'jac' in con: + jac = con['jac'] + + return NonlinearConstraint(fun, lb, ub, jac) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b4df4763ba4f699869431a0b6528383c2f0328 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py @@ -0,0 +1,728 @@ +import numpy as np + +""" +# 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python +c MINPACK-1 Project. June 1983. +c Argonne National Laboratory. +c Jorge J. More' and David J. Thuente. +c +c MINPACK-2 Project. November 1993. +c Argonne National Laboratory and University of Minnesota. +c Brett M. Averick, Richard G. Carter, and Jorge J. More'. +""" + +# NOTE this file was linted by black on first commit, and can be kept that way. + + +class DCSRCH: + """ + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + ftol : float + A nonnegative tolerance for the sufficient decrease condition. + gtol : float + A nonnegative tolerance for the curvature condition. + xtol : float + A nonnegative relative tolerance for an acceptable step. The + subroutine exits with a warning if the relative difference between + sty and stx is less than xtol. + stpmin : float + A nonnegative lower bound for the step. + stpmax : + A nonnegative upper bound for the step. + + Notes + ----- + + This subroutine finds a step that satisfies a sufficient + decrease condition and a curvature condition. + + Each call of the subroutine updates an interval with + endpoints stx and sty. The interval is initially chosen + so that it contains a minimizer of the modified function + + psi(stp) = f(stp) - f(0) - ftol*stp*f'(0). + + If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + interval is chosen so that it contains a minimizer of f. + + The algorithm is designed to find a step that satisfies + the sufficient decrease condition + + f(stp) <= f(0) + ftol*stp*f'(0), + + and the curvature condition + + abs(f'(stp)) <= gtol*abs(f'(0)). + + If ftol is less than gtol and if, for example, the function + is bounded below, then there is always a step which satisfies + both conditions. + + If no step can be found that satisfies both conditions, then + the algorithm stops with a warning. In this case stp only + satisfies the sufficient decrease condition. + + A typical invocation of dcsrch has the following outline: + + Evaluate the function at stp = 0.0d0; store in f. + Evaluate the gradient at stp = 0.0d0; store in g. + Choose a starting step stp. + + task = 'START' + 10 continue + call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax, + isave,dsave) + if (task .eq. 'FG') then + Evaluate the function and the gradient at stp + go to 10 + end if + + NOTE: The user must not alter work arrays between calls. + + The subroutine statement is + + subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, + task,isave,dsave) + where + + stp is a double precision variable. + On entry stp is the current estimate of a satisfactory + step. On initial entry, a positive initial estimate + must be provided. + On exit stp is the current estimate of a satisfactory step + if task = 'FG'. If task = 'CONV' then stp satisfies + the sufficient decrease and curvature condition. + + f is a double precision variable. + On initial entry f is the value of the function at 0. + On subsequent entries f is the value of the + function at stp. + On exit f is the value of the function at stp. + + g is a double precision variable. + On initial entry g is the derivative of the function at 0. + On subsequent entries g is the derivative of the + function at stp. + On exit g is the derivative of the function at stp. + + ftol is a double precision variable. + On entry ftol specifies a nonnegative tolerance for the + sufficient decrease condition. + On exit ftol is unchanged. + + gtol is a double precision variable. + On entry gtol specifies a nonnegative tolerance for the + curvature condition. + On exit gtol is unchanged. + + xtol is a double precision variable. + On entry xtol specifies a nonnegative relative tolerance + for an acceptable step. The subroutine exits with a + warning if the relative difference between sty and stx + is less than xtol. + + On exit xtol is unchanged. + + task is a character variable of length at least 60. + On initial entry task must be set to 'START'. + On exit task indicates the required action: + + If task(1:2) = 'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) = 'CONV' then the search is successful. + + If task(1:4) = 'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) = 'ERROR' then there is an error in the + input arguments. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + stpmin is a double precision variable. + On entry stpmin is a nonnegative lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is a nonnegative upper bound for the step. + On exit stpmax is unchanged. + + isave is an integer work array of dimension 2. + + dsave is a double precision work array of dimension 13. + + Subprograms called + + MINPACK-2 ... dcstep + MINPACK-1 Project. June 1983. + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick, Richard G. Carter, and Jorge J. More'. + """ + + def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax): + self.stage = None + self.ginit = None + self.gtest = None + self.gx = None + self.gy = None + self.finit = None + self.fx = None + self.fy = None + self.stx = None + self.sty = None + self.stmin = None + self.stmax = None + self.width = None + self.width1 = None + + # leave all assessment of tolerances/limits to the first call of + # this object + self.ftol = ftol + self.gtol = gtol + self.xtol = xtol + self.stpmin = stpmin + self.stpmax = stpmax + + self.phi = phi + self.derphi = derphi + + def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100): + """ + Parameters + ---------- + alpha1 : float + alpha1 is the current estimate of a satisfactory + step. A positive initial estimate must be provided. + phi0 : float + the value of `phi` at 0 (if known). + derphi0 : float + the derivative of `derphi` at 0 (if known). + maxiter : int + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found. + phi : float + Value of `phi` at the new point `alpha`. + phi0 : float + Value of `phi` at `alpha=0`. + task : bytes + On exit task indicates status information. + + If task[:4] == b'CONV' then the search is successful. + + If task[:4] == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task[:5] == b'ERROR' then there is an error in the + input arguments. + """ + if phi0 is None: + phi0 = self.phi(0.0) + if derphi0 is None: + derphi0 = self.derphi(0.0) + + phi1 = phi0 + derphi1 = derphi0 + + task = b"START" + for i in range(maxiter): + stp, phi1, derphi1, task = self._iterate( + alpha1, phi1, derphi1, task + ) + + if not np.isfinite(stp): + task = b"WARN" + stp = None + break + + if task[:2] == b"FG": + alpha1 = stp + phi1 = self.phi(stp) + derphi1 = self.derphi(stp) + else: + break + else: + # maxiter reached, the line search did not converge + stp = None + task = b"WARNING: dcsrch did not converge within max iterations" + + if task[:5] == b"ERROR" or task[:4] == b"WARN": + stp = None # failed + + return stp, phi1, phi0, task + + def _iterate(self, stp, f, g, task): + """ + Parameters + ---------- + stp : float + The current estimate of a satisfactory step. On initial entry, a + positive initial estimate must be provided. + f : float + On first call f is the value of the function at 0. On subsequent + entries f should be the value of the function at stp. + g : float + On initial entry g is the derivative of the function at 0. On + subsequent entries g is the derivative of the function at stp. + task : bytes + On initial entry task must be set to 'START'. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + + Returns + ------- + stp, f, g, task: tuple + + stp : float + the current estimate of a satisfactory step if task = 'FG'. If + task = 'CONV' then stp satisfies the sufficient decrease and + curvature condition. + f : float + the value of the function at stp. + g : float + the derivative of the function at stp. + task : bytes + On exit task indicates the required action: + + If task(1:2) == b'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) == b'CONV' then the search is successful. + + If task(1:4) == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) == b'ERROR' then there is an error in the + input arguments. + """ + p5 = 0.5 + p66 = 0.66 + xtrapl = 1.1 + xtrapu = 4.0 + + if task[:5] == b"START": + if stp < self.stpmin: + task = b"ERROR: STP .LT. STPMIN" + if stp > self.stpmax: + task = b"ERROR: STP .GT. STPMAX" + if g >= 0: + task = b"ERROR: INITIAL G .GE. ZERO" + if self.ftol < 0: + task = b"ERROR: FTOL .LT. ZERO" + if self.gtol < 0: + task = b"ERROR: GTOL .LT. ZERO" + if self.xtol < 0: + task = b"ERROR: XTOL .LT. ZERO" + if self.stpmin < 0: + task = b"ERROR: STPMIN .LT. ZERO" + if self.stpmax < self.stpmin: + task = b"ERROR: STPMAX .LT. STPMIN" + + if task[:5] == b"ERROR": + return stp, f, g, task + + # Initialize local variables. + + self.brackt = False + self.stage = 1 + self.finit = f + self.ginit = g + self.gtest = self.ftol * self.ginit + self.width = self.stpmax - self.stpmin + self.width1 = self.width / p5 + + # The variables stx, fx, gx contain the values of the step, + # function, and derivative at the best step. + # The variables sty, fy, gy contain the value of the step, + # function, and derivative at sty. + # The variables stp, f, g contain the values of the step, + # function, and derivative at stp. + + self.stx = 0.0 + self.fx = self.finit + self.gx = self.ginit + self.sty = 0.0 + self.fy = self.finit + self.gy = self.ginit + self.stmin = 0 + self.stmax = stp + xtrapu * stp + task = b"FG" + return stp, f, g, task + + # in the original Fortran this was a location to restore variables + # we don't need to do that because they're attributes. + + # If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + # algorithm enters the second stage. + ftest = self.finit + stp * self.gtest + + if self.stage == 1 and f <= ftest and g >= 0: + self.stage = 2 + + # test for warnings + if self.brackt and (stp <= self.stmin or stp >= self.stmax): + task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS" + if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax: + task = b"WARNING: XTOL TEST SATISFIED" + if stp == self.stpmax and f <= ftest and g <= self.gtest: + task = b"WARNING: STP = STPMAX" + if stp == self.stpmin and (f > ftest or g >= self.gtest): + task = b"WARNING: STP = STPMIN" + + # test for convergence + if f <= ftest and abs(g) <= self.gtol * -self.ginit: + task = b"CONVERGENCE" + + # test for termination + if task[:4] == b"WARN" or task[:4] == b"CONV": + return stp, f, g, task + + # A modified function is used to predict the step during the + # first stage if a lower function value has been obtained but + # the decrease is not sufficient. + if self.stage == 1 and f <= self.fx and f > ftest: + # Define the modified function and derivative values. + fm = f - stp * self.gtest + fxm = self.fx - self.stx * self.gtest + fym = self.fy - self.sty * self.gtest + gm = g - self.gtest + gxm = self.gx - self.gtest + gym = self.gy - self.gtest + + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + fxm, + gxm, + self.sty, + fym, + gym, + stp, + fm, + gm, + self.brackt, + self.stmin, + self.stmax, + ) + self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup + + # Reset the function and derivative values for f + self.fx = fxm + self.stx * self.gtest + self.fy = fym + self.sty * self.gtest + self.gx = gxm + self.gtest + self.gy = gym + self.gtest + + else: + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + f, + g, + self.brackt, + self.stmin, + self.stmax, + ) + ( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + self.brackt, + ) = tup + + # Decide if a bisection step is needed + if self.brackt: + if abs(self.sty - self.stx) >= p66 * self.width1: + stp = self.stx + p5 * (self.sty - self.stx) + self.width1 = self.width + self.width = abs(self.sty - self.stx) + + # Set the minimum and maximum steps allowed for stp. + if self.brackt: + self.stmin = min(self.stx, self.sty) + self.stmax = max(self.stx, self.sty) + else: + self.stmin = stp + xtrapl * (stp - self.stx) + self.stmax = stp + xtrapu * (stp - self.stx) + + # Force the step to be within the bounds stpmax and stpmin. + stp = np.clip(stp, self.stpmin, self.stpmax) + + # If further progress is not possible, let stp be the best + # point obtained during the search. + if ( + self.brackt + and (stp <= self.stmin or stp >= self.stmax) + or ( + self.brackt + and self.stmax - self.stmin <= self.xtol * self.stmax + ) + ): + stp = self.stx + + # Obtain another function and derivative + task = b"FG" + return stp, f, g, task + + +def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax): + """ + Subroutine dcstep + + This subroutine computes a safeguarded step for a search + procedure and updates an interval that contains a step that + satisfies a sufficient decrease and a curvature condition. + + The parameter stx contains the step with the least function + value. If brackt is set to .true. then a minimizer has + been bracketed in an interval with endpoints stx and sty. + The parameter stp contains the current step. + The subroutine assumes that if brackt is set to .true. then + + min(stx,sty) < stp < max(stx,sty), + + and that the derivative at stx is negative in the direction + of the step. + + The subroutine statement is + + subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, + stpmin,stpmax) + + where + + stx is a double precision variable. + On entry stx is the best step obtained so far and is an + endpoint of the interval that contains the minimizer. + On exit stx is the updated best step. + + fx is a double precision variable. + On entry fx is the function at stx. + On exit fx is the function at stx. + + dx is a double precision variable. + On entry dx is the derivative of the function at + stx. The derivative must be negative in the direction of + the step, that is, dx and stp - stx must have opposite + signs. + On exit dx is the derivative of the function at stx. + + sty is a double precision variable. + On entry sty is the second endpoint of the interval that + contains the minimizer. + On exit sty is the updated endpoint of the interval that + contains the minimizer. + + fy is a double precision variable. + On entry fy is the function at sty. + On exit fy is the function at sty. + + dy is a double precision variable. + On entry dy is the derivative of the function at sty. + On exit dy is the derivative of the function at the exit sty. + + stp is a double precision variable. + On entry stp is the current step. If brackt is set to .true. + then on input stp must be between stx and sty. + On exit stp is a new trial step. + + fp is a double precision variable. + On entry fp is the function at stp + On exit fp is unchanged. + + dp is a double precision variable. + On entry dp is the derivative of the function at stp. + On exit dp is unchanged. + + brackt is an logical variable. + On entry brackt specifies if a minimizer has been bracketed. + Initially brackt must be set to .false. + On exit brackt specifies if a minimizer has been bracketed. + When a minimizer is bracketed brackt is set to .true. + + stpmin is a double precision variable. + On entry stpmin is a lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is an upper bound for the step. + On exit stpmax is unchanged. + + MINPACK-1 Project. June 1983 + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick and Jorge J. More'. + + """ + sgn_dp = np.sign(dp) + sgn_dx = np.sign(dx) + + # sgnd = dp * (dx / abs(dx)) + sgnd = sgn_dp * sgn_dx + + # First case: A higher function value. The minimum is bracketed. + # If the cubic step is closer to stx than the quadratic step, the + # cubic step is taken, otherwise the average of the cubic and + # quadratic steps is taken. + if fp > fx: + theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp < stx: + gamma *= -1 + p = (gamma - dx) + theta + q = ((gamma - dx) + gamma) + dp + r = p / q + stpc = stx + r * (stp - stx) + stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx) + if abs(stpc - stx) <= abs(stpq - stx): + stpf = stpc + else: + stpf = stpc + (stpq - stpc) / 2.0 + brackt = True + elif sgnd < 0.0: + # Second case: A lower function value and derivatives of opposite + # sign. The minimum is bracketed. If the cubic step is farther from + # stp than the secant step, the cubic step is taken, otherwise the + # secant step is taken. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp > stx: + gamma *= -1 + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dx + r = p / q + stpc = stp + r * (stx - stp) + stpq = stp + (dp / (dp - dx)) * (stx - stp) + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + brackt = True + elif abs(dp) < abs(dx): + # Third case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative decreases. + + # The cubic step is computed only if the cubic tends to infinity + # in the direction of the step or if the minimum of the cubic + # is beyond stp. Otherwise the cubic step is defined to be the + # secant step. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + + # The case gamma = 0 only arises if the cubic does not tend + # to infinity in the direction of the step. + gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s))) + if stp > stx: + gamma = -gamma + p = (gamma - dp) + theta + q = (gamma + (dx - dp)) + gamma + r = p / q + if r < 0 and gamma != 0: + stpc = stp + r * (stx - stp) + elif stp > stx: + stpc = stpmax + else: + stpc = stpmin + stpq = stp + (dp / (dp - dx)) * (stx - stp) + + if brackt: + # A minimizer has been bracketed. If the cubic step is + # closer to stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) < abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + + if stp > stx: + stpf = min(stp + 0.66 * (sty - stp), stpf) + else: + stpf = max(stp + 0.66 * (sty - stp), stpf) + else: + # A minimizer has not been bracketed. If the cubic step is + # farther from stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + stpf = np.clip(stpf, stpmin, stpmax) + + else: + # Fourth case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative does not decrease. If the + # minimum is not bracketed, the step is either stpmin or stpmax, + # otherwise the cubic step is taken. + if brackt: + theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp + s = max(abs(theta), abs(dy), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s)) + if stp > sty: + gamma = -gamma + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dy + r = p / q + stpc = stp + r * (sty - stp) + stpf = stpc + elif stp > stx: + stpf = stpmax + else: + stpf = stpmin + + # Update the interval which contains a minimizer. + if fp > fx: + sty = stp + fy = fp + dy = dp + else: + if sgnd < 0: + sty = stx + fy = fx + dy = dx + stx = stp + fx = fp + dx = dp + + # Compute the new step. + stp = stpf + + return stx, fx, dx, sty, fy, dy, stp, brackt diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..afbb2152c21a7837e77a6a77b3d8f1f6b0114270 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py @@ -0,0 +1,694 @@ +import numpy as np +import scipy.sparse as sps +from ._numdiff import approx_derivative, group_columns +from ._hessian_update_strategy import HessianUpdateStrategy +from scipy.sparse.linalg import LinearOperator +from scipy._lib._array_api import array_namespace +from scipy._lib import array_api_extra as xpx + + +FD_METHODS = ('2-point', '3-point', 'cs') + + +def _wrapper_fun(fun, args=()): + ncalls = [0] + + def wrapped(x): + ncalls[0] += 1 + # Send a copy because the user may overwrite it. + # Overwriting results in undefined behaviour because + # fun(self.x) will change self.x, with the two no longer linked. + fx = fun(np.copy(x), *args) + # Make sure the function returns a true scalar + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError( + "The user-provided objective function " + "must return a scalar value." + ) from e + return fx + return wrapped, ncalls + + +def _wrapper_grad(grad, fun=None, args=(), finite_diff_options=None): + ncalls = [0] + + if callable(grad): + def wrapped(x, **kwds): + # kwds present to give function same signature as numdiff variant + ncalls[0] += 1 + return np.atleast_1d(grad(np.copy(x), *args)) + return wrapped, ncalls + + elif grad in FD_METHODS: + def wrapped1(x, f0=None): + ncalls[0] += 1 + return approx_derivative( + fun, x, f0=f0, **finite_diff_options + ) + + return wrapped1, ncalls + + +def _wrapper_hess(hess, grad=None, x0=None, args=(), finite_diff_options=None): + if callable(hess): + H = hess(np.copy(x0), *args) + ncalls = [1] + + if sps.issparse(H): + def wrapped(x, **kwds): + ncalls[0] += 1 + return sps.csr_matrix(hess(np.copy(x), *args)) + + H = sps.csr_matrix(H) + + elif isinstance(H, LinearOperator): + def wrapped(x, **kwds): + ncalls[0] += 1 + return hess(np.copy(x), *args) + + else: # dense + def wrapped(x, **kwds): + ncalls[0] += 1 + return np.atleast_2d(np.asarray(hess(np.copy(x), *args))) + + H = np.atleast_2d(np.asarray(H)) + + return wrapped, ncalls, H + elif hess in FD_METHODS: + ncalls = [0] + + def wrapped1(x, f0=None): + return approx_derivative( + grad, x, f0=f0, **finite_diff_options + ) + + return wrapped1, ncalls, None + + +class ScalarFunction: + """Scalar function and its derivatives. + + This class defines a scalar function F: R^n->R and methods for + computing or approximating its first and second derivatives. + + Parameters + ---------- + fun : callable + evaluates the scalar function. Must be of the form ``fun(x, *args)``, + where ``x`` is the argument in the form of a 1-D array and ``args`` is + a tuple of any additional fixed parameters needed to completely specify + the function. Should return a scalar. + x0 : array-like + Provides an initial set of variables for evaluating fun. Array of real + elements of size (n,), where 'n' is the number of independent + variables. + args : tuple, optional + Any additional fixed parameters needed to completely specify the scalar + function. + grad : {callable, '2-point', '3-point', 'cs'} + Method for computing the gradient vector. + If it is a callable, it should be a function that returns the gradient + vector: + + ``grad(x, *args) -> array_like, shape (n,)`` + + where ``x`` is an array with shape (n,) and ``args`` is a tuple with + the fixed parameters. + Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used + to select a finite difference scheme for numerical estimation of the + gradient with a relative step size. These finite difference schemes + obey any specified `bounds`. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy} + Method for computing the Hessian matrix. If it is callable, it should + return the Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + where x is a (n,) ndarray and `args` is a tuple with the fixed + parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} + select a finite difference scheme for numerical estimation. Or, objects + implementing `HessianUpdateStrategy` interface can be used to + approximate the Hessian. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + finite_diff_rel_step : None or array_like + Relative step size to use. The absolute step size is computed as + ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly + adjusted to fit into the bounds. For ``method='3-point'`` the sign + of `h` is ignored. If None then finite_diff_rel_step is selected + automatically, + finite_diff_bounds : tuple of array_like + Lower and upper bounds on independent variables. Defaults to no bounds, + (-np.inf, np.inf). Each bound must match the size of `x0` or be a + scalar, in the latter case the bound will be the same for all + variables. Use it to limit the range of function evaluation. + epsilon : None or array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `epsilon` is ignored. By default + relative steps are used, only if ``epsilon is not None`` are absolute + steps used. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `grad` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, + finite_diff_bounds, epsilon=None): + if not callable(grad) and grad not in FD_METHODS: + raise ValueError( + f"`grad` must be either callable or one of {FD_METHODS}." + ) + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError( + f"`hess` must be either callable, HessianUpdateStrategy" + f" or one of {FD_METHODS}." + ) + + if grad in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the gradient is estimated via " + "finite-differences, we require the Hessian " + "to be estimated using one of the " + "quasi-Newton strategies.") + + self.xp = xp = array_namespace(x0) + _x = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # original arguments + self._wrapped_fun, self._nfev = _wrapper_fun(fun, args=args) + self._orig_fun = fun + self._orig_grad = grad + self._orig_hess = hess + self._args = args + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + self.n = self.x.size + self.f_updated = False + self.g_updated = False + self.H_updated = False + + self._lowest_x = None + self._lowest_f = np.inf + + finite_diff_options = {} + if grad in FD_METHODS: + finite_diff_options["method"] = grad + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["bounds"] = finite_diff_bounds + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["as_linear_operator"] = True + + # Initial function evaluation + self._update_fun() + + # Initial gradient evaluation + self._wrapped_grad, self._ngev = _wrapper_grad( + grad, + fun=self._wrapped_fun, + args=args, + finite_diff_options=finite_diff_options + ) + self._update_grad() + + # Hessian evaluation + if callable(hess): + self._wrapped_hess, self._nhev, self.H = _wrapper_hess( + hess, x0=x0, args=args + ) + self.H_updated = True + elif hess in FD_METHODS: + self._wrapped_hess, self._nhev, self.H = _wrapper_hess( + hess, + grad=self._wrapped_grad, + x0=x0, + finite_diff_options=finite_diff_options + ) + self._update_grad() + self.H = self._wrapped_hess(self.x, f0=self.g) + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.g_prev = None + self._nhev = [0] + + @property + def nfev(self): + return self._nfev[0] + + @property + def ngev(self): + return self._ngev[0] + + @property + def nhev(self): + return self._nhev[0] + + def _update_x(self, x): + if isinstance(self._orig_hess, HessianUpdateStrategy): + self._update_grad() + self.x_prev = self.x + self.g_prev = self.g + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + + _x = xpx.atleast_nd(self.xp.asarray(x), ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_hess() + else: + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + _x = xpx.atleast_nd(self.xp.asarray(x), ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + + def _update_fun(self): + if not self.f_updated: + fx = self._wrapped_fun(self.x) + if fx < self._lowest_f: + self._lowest_x = self.x + self._lowest_f = fx + + self.f = fx + self.f_updated = True + + def _update_grad(self): + if not self.g_updated: + if self._orig_grad in FD_METHODS: + self._update_fun() + self.g = self._wrapped_grad(self.x, f0=self.f) + self.g_updated = True + + def _update_hess(self): + if not self.H_updated: + if self._orig_hess in FD_METHODS: + self._update_grad() + self.H = self._wrapped_hess(self.x, f0=self.g) + elif isinstance(self._orig_hess, HessianUpdateStrategy): + self._update_grad() + self.H.update(self.x - self.x_prev, self.g - self.g_prev) + else: # should be callable(hess) + self.H = self._wrapped_hess(self.x) + + self.H_updated = True + + def fun(self, x): + if not np.array_equal(x, self.x): + self._update_x(x) + self._update_fun() + return self.f + + def grad(self, x): + if not np.array_equal(x, self.x): + self._update_x(x) + self._update_grad() + return self.g + + def hess(self, x): + if not np.array_equal(x, self.x): + self._update_x(x) + self._update_hess() + return self.H + + def fun_and_grad(self, x): + if not np.array_equal(x, self.x): + self._update_x(x) + self._update_fun() + self._update_grad() + return self.f, self.g + + +class VectorFunction: + """Vector function and its derivatives. + + This class defines a vector function F: R^n->R^m and methods for + computing or approximating its first and second derivatives. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `jac` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, jac, hess, + finite_diff_rel_step, finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian): + if not callable(jac) and jac not in FD_METHODS: + raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.") + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError("`hess` must be either callable," + f"HessianUpdateStrategy or one of {FD_METHODS}.") + + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + self.xp = xp = array_namespace(x0) + _x = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.n = self.x.size + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + self.f_updated = False + self.J_updated = False + self.H_updated = False + + finite_diff_options = {} + if jac in FD_METHODS: + finite_diff_options["method"] = jac + finite_diff_options["rel_step"] = finite_diff_rel_step + if finite_diff_jac_sparsity is not None: + sparsity_groups = group_columns(finite_diff_jac_sparsity) + finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, + sparsity_groups) + finite_diff_options["bounds"] = finite_diff_bounds + self.x_diff = np.copy(self.x) + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["as_linear_operator"] = True + self.x_diff = np.copy(self.x) + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + return np.atleast_1d(fun(x)) + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + update_fun() + + self.v = np.zeros_like(self.f) + self.m = self.v.size + + # Jacobian Evaluation + if callable(jac): + self.J = jac(self.x) + self.J_updated = True + self.njev += 1 + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def jac_wrapped(x): + self.njev += 1 + return sps.csr_matrix(jac(x)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def jac_wrapped(x): + self.njev += 1 + return jac(x).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def jac_wrapped(x): + self.njev += 1 + return np.atleast_2d(jac(x)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + def update_jac(): + self.J = jac_wrapped(self.x) + + elif jac in FD_METHODS: + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + self.J_updated = True + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def update_jac(): + self._update_fun() + self.J = sps.csr_matrix( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def update_jac(): + self._update_fun() + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def update_jac(): + self._update_fun() + self.J = np.atleast_2d( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + self._update_jac_impl = update_jac + + # Define Hessian + if callable(hess): + self.H = hess(self.x, self.v) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x, v): + self.nhev += 1 + return sps.csr_matrix(hess(x, v)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x, v): + self.nhev += 1 + return hess(x, v) + + else: + def hess_wrapped(x, v): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(x, v))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x, self.v) + elif hess in FD_METHODS: + def jac_dot_v(x, v): + return jac_wrapped(x).T.dot(v) + + def update_hess(): + self._update_jac() + self.H = approx_derivative(jac_dot_v, self.x, + f0=self.J.T.dot(self.v), + args=(self.v,), + **finite_diff_options) + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.J_prev = None + + def update_hess(): + self._update_jac() + # When v is updated before x was updated, then x_prev and + # J_prev are None and we need this check. + if self.x_prev is not None and self.J_prev is not None: + delta_x = self.x - self.x_prev + delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) + self.H.update(delta_x, delta_g) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_jac() + self.x_prev = self.x + self.J_prev = self.J + _x = xpx.atleast_nd(self.xp.asarray(x), ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + _x = xpx.atleast_nd(self.xp.asarray(x), ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + + self._update_x_impl = update_x + + def _update_v(self, v): + if not np.array_equal(v, self.v): + self.v = v + self.H_updated = False + + def _update_x(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_jac(self): + if not self.J_updated: + self._update_jac_impl() + self.J_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + self._update_x(x) + self._update_fun() + return self.f + + def jac(self, x): + self._update_x(x) + self._update_jac() + return self.J + + def hess(self, x, v): + # v should be updated before x. + self._update_v(v) + self._update_x(x) + self._update_hess() + return self.H + + +class LinearVectorFunction: + """Linear vector function and its derivatives. + + Defines a linear function F = A x, where x is N-D vector and + A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian + is identically zero and it is returned as a csr matrix. + """ + def __init__(self, A, x0, sparse_jacobian): + if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): + self.J = sps.csr_matrix(A) + self.sparse_jacobian = True + elif sps.issparse(A): + self.J = A.toarray() + self.sparse_jacobian = False + else: + # np.asarray makes sure A is ndarray and not matrix + self.J = np.atleast_2d(np.asarray(A)) + self.sparse_jacobian = False + + self.m, self.n = self.J.shape + + self.xp = xp = array_namespace(x0) + _x = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.f = self.J.dot(self.x) + self.f_updated = True + + self.v = np.zeros(self.m, dtype=float) + self.H = sps.csr_matrix((self.n, self.n)) + + def _update_x(self, x): + if not np.array_equal(x, self.x): + _x = xpx.atleast_nd(self.xp.asarray(x), ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + + def fun(self, x): + self._update_x(x) + if not self.f_updated: + self.f = self.J.dot(x) + self.f_updated = True + return self.f + + def jac(self, x): + self._update_x(x) + return self.J + + def hess(self, x, v): + self._update_x(x) + self.v = v + return self.H + + +class IdentityVectorFunction(LinearVectorFunction): + """Identity vector function and its derivatives. + + The Jacobian is the identity matrix, returned as a dense array when + `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is + identically zero and it is returned as a csr matrix. + """ + def __init__(self, x0, sparse_jacobian): + n = len(x0) + if sparse_jacobian or sparse_jacobian is None: + A = sps.eye(n, format='csr') + sparse_jacobian = True + else: + A = np.eye(n) + sparse_jacobian = False + super().__init__(A, x0, sparse_jacobian) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py new file mode 100644 index 0000000000000000000000000000000000000000..70097b7aea61d3eec9f9dabadd4820e7576b44ce --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py @@ -0,0 +1,1969 @@ +""" +differential_evolution: The differential evolution global optimization algorithm +Added by Andrew Nelson 2014 +""" +import warnings + +import numpy as np +from scipy.optimize import OptimizeResult, minimize +from scipy.optimize._optimize import _status_message, _wrap_callback +from scipy._lib._util import (check_random_state, MapWrapper, _FunctionWrapper, + rng_integers, _transition_to_rng) + +from scipy.optimize._constraints import (Bounds, new_bounds_to_old, + NonlinearConstraint, LinearConstraint) +from scipy.sparse import issparse + +__all__ = ['differential_evolution'] + + +_MACHEPS = np.finfo(np.float64).eps + + +@_transition_to_rng("seed", position_num=9) +def differential_evolution(func, bounds, args=(), strategy='best1bin', + maxiter=1000, popsize=15, tol=0.01, + mutation=(0.5, 1), recombination=0.7, rng=None, + callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1, constraints=(), x0=None, *, + integrality=None, vectorized=False): + r"""Finds the global minimum of a multivariate function. + + The differential evolution method [1]_ is stochastic in nature. It does + not use gradient methods to find the minimum, and can search large areas + of candidate space, but often requires larger numbers of function + evaluations than conventional gradient-based techniques. + + The algorithm is due to Storn and Price [2]_. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. The number of parameters, N, is equal + to ``len(x)``. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``, defining the + finite lower and upper bounds for the optimizing argument of + `func`. + + The total number of bounds is used to determine the number of + parameters, N. If there are parameters whose bounds are equal the total + number of free parameters is ``N - N_equal``. + + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : {str, callable}, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1bin' + - 'rand1exp' + - 'rand2bin' + - 'rand2exp' + - 'randtobest1bin' + - 'randtobest1exp' + - 'currenttobest1bin' + - 'currenttobest1exp' + - 'best2exp' + - 'best2bin' + + The default is 'best1bin'. Strategies that may be implemented are + outlined in 'Notes'. + Alternatively the differential evolution strategy can be customized by + providing a callable that constructs a trial vector. The callable must + have the form ``strategy(candidate: int, population: np.ndarray, rng=None)``, + where ``candidate`` is an integer specifying which entry of the + population is being evolved, ``population`` is an array of shape + ``(S, N)`` containing all the population members (where S is the + total population size), and ``rng`` is the random number generator + being used within the solver. + ``candidate`` will be in the range ``[0, S)``. + ``strategy`` must return a trial vector with shape ``(N,)``. The + fitness of this trial vector is compared against the fitness of + ``population[candidate]``. + + .. versionchanged:: 1.12.0 + Customization of evolution strategy via a callable. + + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * (N - N_equal)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * (N - N_equal)`` individuals. This keyword is overridden if + an initial population is supplied via the `init` keyword. When using + ``init='sobol'`` the population size is calculated as the next power + of 2 after ``popsize * (N - N_equal)``. + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(population_energies) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by :math:`F`. + If specified as a float it should be in the range [0, 2). + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + ``U[min, max)``. Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + disp : bool, optional + Prints the evaluated `func` at every iteration. + callback : callable, optional + A callable called after each iteration. Has the signature:: + + callback(intermediate_result: OptimizeResult) + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the best solution + found so far and the objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. + + The callback also supports a signature like:: + + callback(x, convergence: float=val) + + ``val`` represents the fractional value of the population convergence. + When ``val`` is greater than ``1.0``, the function halts. + + Introspection is used to determine which of the signatures is invoked. + + Global minimization will halt if the callback raises ``StopIteration`` + or returns ``True``; any polishing is still carried out. + + .. versionchanged:: 1.12.0 + callback accepts the ``intermediate_result`` keyword. + + polish : bool, optional + If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` + method is used to polish the best population member at the end, which + can improve the minimization slightly. If a constrained problem is + being studied then the `trust-constr` method is used instead. For large + problems with many constraints, polishing can take a long time due to + the Jacobian computations. + + .. versionchanged:: 1.15.0 + If `workers` is specified then the map-like callable that wraps + `func` is supplied to `minimize` instead of it using `func` + directly. This allows the caller to control how and where the + invocations actually run. + + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'sobol' + - 'halton' + - 'random' + - array specifying the initial population. The array should have + shape ``(S, N)``, where S is the total population size and N is + the number of parameters. + + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. + + 'sobol' and 'halton' are superior alternatives and maximize even more + the parameter space. 'sobol' will enforce an initial population + size which is calculated as the next power of 2 after + ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit + less efficient. See `scipy.stats.qmc` for more details. + + 'random' initializes the population randomly - this has the drawback + that clustering can occur, preventing the whole of parameter space + being covered. Use of an array to specify a population could be used, + for example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If ``'immediate'``, the best solution vector is continuously updated + within a single generation [4]_. This can lead to faster convergence as + trial vectors can take advantage of continuous improvements in the best + solution. + With ``'deferred'``, the best solution vector is updated once per + generation. Only ``'deferred'`` is compatible with parallelization or + vectorization, and the `workers` and `vectorized` keywords can + over-ride this option. + + .. versionadded:: 1.2.0 + + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel + (uses `multiprocessing.Pool `). + Supply -1 to use all available CPU cores. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + ``updating='deferred'`` if ``workers != 1``. + This option overrides the `vectorized` keyword if ``workers != 1``. + Requires that `func` be pickleable. + + .. versionadded:: 1.2.0 + + constraints : {NonLinearConstraint, LinearConstraint, Bounds} + Constraints on the solver, over and above those applied by the `bounds` + kwd. Uses the approach by Lampinen [5]_. + + .. versionadded:: 1.4.0 + + x0 : None or array-like, optional + Provides an initial guess to the minimization. Once the population has + been initialized this vector replaces the first (best) member. This + replacement is done even if `init` is given an initial population. + ``x0.shape == (N,)``. + + .. versionadded:: 1.7.0 + + integrality : 1-D array, optional + For each decision variable, a boolean value indicating whether the + decision variable is constrained to integer values. The array is + broadcast to ``(N,)``. + If any decision variables are constrained to be integral, they will not + be changed during polishing. + Only integer values lying between the lower and upper bounds are used. + If there are no integer values lying between the bounds then a + `ValueError` is raised. + + .. versionadded:: 1.9.0 + + vectorized : bool, optional + If ``vectorized is True``, `func` is sent an `x` array with + ``x.shape == (N, S)``, and is expected to return an array of shape + ``(S,)``, where `S` is the number of solution vectors to be calculated. + If constraints are applied, each of the functions used to construct + a `Constraint` object should accept an `x` array with + ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where + `M` is the number of constraint components. + This option is an alternative to the parallelization offered by + `workers`, and may help in optimization speed by reducing interpreter + overhead from multiple function calls. This keyword is ignored if + ``workers != 1``. + This option will override the `updating` keyword to + ``updating='deferred'``. + See the notes section for further discussion on when to use + ``'vectorized'``, and when to use ``'workers'``. + + .. versionadded:: 1.9.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully, + ``message`` which describes the cause of the termination, + ``population`` the solution vectors present in the population, and + ``population_energies`` the value of the objective function for each + entry in ``population``. + See `OptimizeResult` for a description of other attributes. If `polish` + was employed, and a lower minimum was obtained by the polishing, then + OptimizeResult also contains the ``jac`` attribute. + If the eventual solution does not satisfy the applied constraints + ``success`` will be `False`. + + Notes + ----- + Differential evolution is a stochastic population based method that is + useful for global optimization problems. At each pass through the + population the algorithm mutates each candidate solution by mixing with + other candidate solutions to create a trial candidate. There are several + strategies [3]_ for creating trial candidates, which suit some problems + more than others. The 'best1bin' strategy is a good starting point for + many systems. In this strategy two members of the population are randomly + chosen. Their difference is used to mutate the best member (the 'best' in + 'best1bin'), :math:`x_0`, so far: + + .. math:: + + b' = x_0 + F \cdot (x_{r_0} - x_{r_1}) + + where :math:`F` is the `mutation` parameter. + A trial vector is then constructed. Starting with a randomly chosen ith + parameter the trial is sequentially filled (in modulo) with parameters + from ``b'`` or the original candidate. The choice of whether to use ``b'`` + or the original candidate is made with a binomial distribution (the 'bin' + in 'best1bin') - a random number in [0, 1) is generated. If this number is + less than the `recombination` constant then the parameter is loaded from + ``b'``, otherwise it is loaded from the original candidate. The final + parameter is always loaded from ``b'``. Once the trial candidate is built + its fitness is assessed. If the trial is better than the original candidate + then it takes its place. If it is also better than the best overall + candidate it also replaces that. + + The other strategies available are outlined in Qiang and + Mitchell (2014) [3]_. + + + - ``rand1`` : :math:`b' = x_{r_0} + F \cdot (x_{r_1} - x_{r_2})` + - ``rand2`` : :math:`b' = x_{r_0} + F \cdot (x_{r_1} + x_{r_2} - x_{r_3} - x_{r_4})` + - ``best1`` : :math:`b' = x_0 + F \cdot (x_{r_0} - x_{r_1})` + - ``best2`` : :math:`b' = x_0 + F \cdot (x_{r_0} + x_{r_1} - x_{r_2} - x_{r_3})` + - ``currenttobest1`` : :math:`b' = x_i + F \cdot (x_0 - x_i + x_{r_0} - x_{r_1})` + - ``randtobest1`` : :math:`b' = x_{r_0} + F \cdot (x_0 - x_{r_0} + x_{r_1} - x_{r_2})` + + where the integers :math:`r_0, r_1, r_2, r_3, r_4` are chosen randomly + from the interval [0, NP) with `NP` being the total population size and + the original candidate having index `i`. The user can fully customize the + generation of the trial candidates by supplying a callable to ``strategy``. + + To improve your chances of finding a global minimum use higher `popsize` + values, with higher `mutation` and (dithering), but lower `recombination` + values. This has the effect of widening the search radius, but slowing + convergence. + + By default the best solution vector is updated continuously within a single + iteration (``updating='immediate'``). This is a modification [4]_ of the + original differential evolution algorithm which can lead to faster + convergence as trial vectors can immediately benefit from improved + solutions. To use the original Storn and Price behaviour, updating the best + solution once per iteration, set ``updating='deferred'``. + The ``'deferred'`` approach is compatible with both parallelization and + vectorization (``'workers'`` and ``'vectorized'`` keywords). These may + improve minimization speed by using computer resources more efficiently. + The ``'workers'`` distribute calculations over multiple processors. By + default the Python `multiprocessing` module is used, but other approaches + are also possible, such as the Message Passing Interface (MPI) used on + clusters [6]_ [7]_. The overhead from these approaches (creating new + Processes, etc) may be significant, meaning that computational speed + doesn't necessarily scale with the number of processors used. + Parallelization is best suited to computationally expensive objective + functions. If the objective function is less expensive, then + ``'vectorized'`` may aid by only calling the objective function once per + iteration, rather than multiple times for all the population members; the + interpreter overhead is reduced. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Differential evolution, Wikipedia, + http://en.wikipedia.org/wiki/Differential_evolution + .. [2] Storn, R and Price, K, Differential Evolution - a Simple and + Efficient Heuristic for Global Optimization over Continuous Spaces, + Journal of Global Optimization, 1997, 11, 341 - 359. + .. [3] Qiang, J., Mitchell, C., A Unified Differential Evolution Algorithm + for Global Optimization, 2014, https://www.osti.gov/servlets/purl/1163659 + .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., - + Characterization of structures from X-ray scattering data using + genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357, + 2827-2848 + .. [5] Lampinen, J., A constraint handling approach for the differential + evolution algorithm. Proceedings of the 2002 Congress on + Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE, + 2002. + .. [6] https://mpi4py.readthedocs.io/en/stable/ + .. [7] https://schwimmbad.readthedocs.io/en/latest/ + + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function is implemented in `rosen` in `scipy.optimize`. + + >>> import numpy as np + >>> from scipy.optimize import rosen, differential_evolution + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = differential_evolution(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Now repeat, but with parallelization. + + >>> result = differential_evolution(rosen, bounds, updating='deferred', + ... workers=2) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Let's do a constrained minimization. + + >>> from scipy.optimize import LinearConstraint, Bounds + + We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less + than or equal to 1.9. This is a linear constraint, which may be written + ``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as + a `LinearConstraint` instance: + + >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9) + + Specify limits using a `Bounds` object. + + >>> bounds = Bounds([0., 0.], [2., 2.]) + >>> result = differential_evolution(rosen, bounds, constraints=lc, + ... rng=1) + >>> result.x, result.fun + (array([0.96632622, 0.93367155]), 0.0011352416852625719) + + Next find the minimum of the Ackley function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> def ackley(x): + ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2)) + ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1])) + ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e + >>> bounds = [(-5, 5), (-5, 5)] + >>> result = differential_evolution(ackley, bounds, rng=1) + >>> result.x, result.fun + (array([0., 0.]), 4.440892098500626e-16) + + The Ackley function is written in a vectorized manner, so the + ``'vectorized'`` keyword can be employed. Note the reduced number of + function evaluations. + + >>> result = differential_evolution( + ... ackley, bounds, vectorized=True, updating='deferred', rng=1 + ... ) + >>> result.x, result.fun + (array([0., 0.]), 4.440892098500626e-16) + + The following custom strategy function mimics 'best1bin': + + >>> def custom_strategy_fn(candidate, population, rng=None): + ... parameter_count = population.shape(-1) + ... mutation, recombination = 0.7, 0.9 + ... trial = np.copy(population[candidate]) + ... fill_point = rng.choice(parameter_count) + ... + ... pool = np.arange(len(population)) + ... rng.shuffle(pool) + ... + ... # two unique random numbers that aren't the same, and + ... # aren't equal to candidate. + ... idxs = [] + ... while len(idxs) < 2 and len(pool) > 0: + ... idx = pool[0] + ... pool = pool[1:] + ... if idx != candidate: + ... idxs.append(idx) + ... + ... r0, r1 = idxs[:2] + ... + ... bprime = (population[0] + mutation * + ... (population[r0] - population[r1])) + ... + ... crossovers = rng.uniform(size=parameter_count) + ... crossovers = crossovers < recombination + ... crossovers[fill_point] = True + ... trial = np.where(crossovers, bprime, trial) + ... return trial + + """# noqa: E501 + + # using a context manager means that any created Pool objects are + # cleared up. + with DifferentialEvolutionSolver(func, bounds, args=args, + strategy=strategy, + maxiter=maxiter, + popsize=popsize, tol=tol, + mutation=mutation, + recombination=recombination, + rng=rng, polish=polish, + callback=callback, + disp=disp, init=init, atol=atol, + updating=updating, + workers=workers, + constraints=constraints, + x0=x0, + integrality=integrality, + vectorized=vectorized) as solver: + ret = solver.solve() + + return ret + + +class DifferentialEvolutionSolver: + + """This class implements the differential evolution solver + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. The number of parameters, N, is equal + to ``len(x)``. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``, defining the + finite lower and upper bounds for the optimizing argument of + `func`. + + The total number of bounds is used to determine the number of + parameters, N. If there are parameters whose bounds are equal the total + number of free parameters is ``N - N_equal``. + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : {str, callable}, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1bin' + - 'rand1exp' + - 'rand2bin' + - 'rand2exp' + - 'randtobest1bin' + - 'randtobest1exp' + - 'currenttobest1bin' + - 'currenttobest1exp' + - 'best2exp' + - 'best2bin' + + The default is 'best1bin'. Strategies that may be + implemented are outlined in 'Notes'. + + Alternatively the differential evolution strategy can be customized + by providing a callable that constructs a trial vector. The callable + must have the form + ``strategy(candidate: int, population: np.ndarray, rng=None)``, + where ``candidate`` is an integer specifying which entry of the + population is being evolved, ``population`` is an array of shape + ``(S, N)`` containing all the population members (where S is the + total population size), and ``rng`` is the random number generator + being used within the solver. + ``candidate`` will be in the range ``[0, S)``. + ``strategy`` must return a trial vector with shape ``(N,)``. The + fitness of this trial vector is compared against the fitness of + ``population[candidate]``. + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * (N - N_equal)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * (N - N_equal)`` individuals. This keyword is overridden if + an initial population is supplied via the `init` keyword. When using + ``init='sobol'`` the population size is calculated as the next power + of 2 after ``popsize * (N - N_equal)``. + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(population_energies) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by F. + If specified as a float it should be in the range [0, 2]. + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + U[min, max). Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + + rng : {None, int, `numpy.random.Generator`}, optional + + ..versionchanged:: 1.15.0 + As part of the `SPEC-007 `_ + transition from use of `numpy.random.RandomState` to + `numpy.random.Generator` this keyword was changed from `seed` to `rng`. + For an interim period both keywords will continue to work (only specify + one of them). After the interim period using the `seed` keyword will emit + warnings. The behavior of the `seed` and `rng` keywords is outlined below. + + If `rng` is passed by keyword, types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a `Generator`. + If `rng` is already a `Generator` instance, then the provided instance is + used. + + If this argument is passed by position or `seed` is passed by keyword, the + behavior is: + + - If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + - If `seed` is an int, a new `RandomState` instance is used, + seeded with `seed`. + - If `seed` is already a `Generator` or `RandomState` instance then + that instance is used. + + Specify `seed`/`rng` for repeatable minimizations. + disp : bool, optional + Prints the evaluated `func` at every iteration. + callback : callable, optional + A callable called after each iteration. Has the signature: + + ``callback(intermediate_result: OptimizeResult)`` + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the best solution + found so far and the objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. + + The callback also supports a signature like: + + ``callback(x, convergence: float=val)`` + + ``val`` represents the fractional value of the population convergence. + When ``val`` is greater than ``1.0``, the function halts. + + Introspection is used to determine which of the signatures is invoked. + + Global minimization will halt if the callback raises ``StopIteration`` + or returns ``True``; any polishing is still carried out. + + .. versionchanged:: 1.12.0 + callback accepts the ``intermediate_result`` keyword. + + polish : bool, optional + If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` + method is used to polish the best population member at the end, which + can improve the minimization slightly. If a constrained problem is + being studied then the `trust-constr` method is used instead. For large + problems with many constraints, polishing can take a long time due to + the Jacobian computations. + maxfun : int, optional + Set the maximum number of function evaluations. However, it probably + makes more sense to set `maxiter` instead. + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'sobol' + - 'halton' + - 'random' + - array specifying the initial population. The array should have + shape ``(S, N)``, where S is the total population size and + N is the number of parameters. + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. + + 'sobol' and 'halton' are superior alternatives and maximize even more + the parameter space. 'sobol' will enforce an initial population + size which is calculated as the next power of 2 after + ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit + less efficient. See `scipy.stats.qmc` for more details. + + 'random' initializes the population randomly - this has the drawback + that clustering can occur, preventing the whole of parameter space + being covered. Use of an array to specify a population could be used, + for example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If ``'immediate'``, the best solution vector is continuously updated + within a single generation [4]_. This can lead to faster convergence as + trial vectors can take advantage of continuous improvements in the best + solution. + With ``'deferred'``, the best solution vector is updated once per + generation. Only ``'deferred'`` is compatible with parallelization or + vectorization, and the `workers` and `vectorized` keywords can + over-ride this option. + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel + (uses `multiprocessing.Pool `). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + `updating='deferred'` if `workers != 1`. + Requires that `func` be pickleable. + constraints : {NonLinearConstraint, LinearConstraint, Bounds} + Constraints on the solver, over and above those applied by the `bounds` + kwd. Uses the approach by Lampinen. + x0 : None or array-like, optional + Provides an initial guess to the minimization. Once the population has + been initialized this vector replaces the first (best) member. This + replacement is done even if `init` is given an initial population. + ``x0.shape == (N,)``. + integrality : 1-D array, optional + For each decision variable, a boolean value indicating whether the + decision variable is constrained to integer values. The array is + broadcast to ``(N,)``. + If any decision variables are constrained to be integral, they will not + be changed during polishing. + Only integer values lying between the lower and upper bounds are used. + If there are no integer values lying between the bounds then a + `ValueError` is raised. + vectorized : bool, optional + If ``vectorized is True``, `func` is sent an `x` array with + ``x.shape == (N, S)``, and is expected to return an array of shape + ``(S,)``, where `S` is the number of solution vectors to be calculated. + If constraints are applied, each of the functions used to construct + a `Constraint` object should accept an `x` array with + ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where + `M` is the number of constraint components. + This option is an alternative to the parallelization offered by + `workers`, and may help in optimization speed. This keyword is + ignored if ``workers != 1``. + This option will override the `updating` keyword to + ``updating='deferred'``. + """ # noqa: E501 + + # Dispatch of mutation strategy method (binomial or exponential). + _binomial = {'best1bin': '_best1', + 'randtobest1bin': '_randtobest1', + 'currenttobest1bin': '_currenttobest1', + 'best2bin': '_best2', + 'rand2bin': '_rand2', + 'rand1bin': '_rand1'} + _exponential = {'best1exp': '_best1', + 'rand1exp': '_rand1', + 'randtobest1exp': '_randtobest1', + 'currenttobest1exp': '_currenttobest1', + 'best2exp': '_best2', + 'rand2exp': '_rand2'} + + __init_error_msg = ("The population initialization method must be one of " + "'latinhypercube' or 'random', or an array of shape " + "(S, N) where N is the number of parameters and S>5") + + def __init__(self, func, bounds, args=(), + strategy='best1bin', maxiter=1000, popsize=15, + tol=0.01, mutation=(0.5, 1), recombination=0.7, rng=None, + maxfun=np.inf, callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1, constraints=(), x0=None, *, integrality=None, + vectorized=False): + + if callable(strategy): + # a callable strategy is going to be stored in self.strategy anyway + pass + elif strategy in self._binomial: + self.mutation_func = getattr(self, self._binomial[strategy]) + elif strategy in self._exponential: + self.mutation_func = getattr(self, self._exponential[strategy]) + else: + raise ValueError("Please select a valid mutation strategy") + self.strategy = strategy + + self.callback = _wrap_callback(callback, "differential_evolution") + self.polish = polish + + # set the updating / parallelisation options + if updating in ['immediate', 'deferred']: + self._updating = updating + + self.vectorized = vectorized + + # want to use parallelisation, but updating is immediate + if workers != 1 and updating == 'immediate': + warnings.warn("differential_evolution: the 'workers' keyword has" + " overridden updating='immediate' to" + " updating='deferred'", UserWarning, stacklevel=2) + self._updating = 'deferred' + + if vectorized and workers != 1: + warnings.warn("differential_evolution: the 'workers' keyword" + " overrides the 'vectorized' keyword", stacklevel=2) + self.vectorized = vectorized = False + + if vectorized and updating == 'immediate': + warnings.warn("differential_evolution: the 'vectorized' keyword" + " has overridden updating='immediate' to updating" + "='deferred'", UserWarning, stacklevel=2) + self._updating = 'deferred' + + # an object with a map method. + if vectorized: + def maplike_for_vectorized_func(func, x): + # send an array (N, S) to the user func, + # expect to receive (S,). Transposition is required because + # internally the population is held as (S, N) + return np.atleast_1d(func(x.T)) + workers = maplike_for_vectorized_func + + self._mapwrapper = MapWrapper(workers) + + # relative and absolute tolerances for convergence + self.tol, self.atol = tol, atol + + # Mutation constant should be in [0, 2). If specified as a sequence + # then dithering is performed. + self.scale = mutation + if (not np.all(np.isfinite(mutation)) or + np.any(np.array(mutation) >= 2) or + np.any(np.array(mutation) < 0)): + raise ValueError('The mutation constant must be a float in ' + 'U[0, 2), or specified as a tuple(min, max)' + ' where min < max and min, max are in U[0, 2).') + + self.dither = None + if hasattr(mutation, '__iter__') and len(mutation) > 1: + self.dither = [mutation[0], mutation[1]] + self.dither.sort() + + self.cross_over_probability = recombination + + # we create a wrapped function to allow the use of map (and Pool.map + # in the future) + self.func = _FunctionWrapper(func, args) + self.args = args + + # convert tuple of lower and upper bounds to limits + # [(low_0, high_0), ..., (low_n, high_n] + # -> [[low_0, ..., low_n], [high_0, ..., high_n]] + if isinstance(bounds, Bounds): + self.limits = np.array(new_bounds_to_old(bounds.lb, + bounds.ub, + len(bounds.lb)), + dtype=float).T + else: + self.limits = np.array(bounds, dtype='float').T + + if (np.size(self.limits, 0) != 2 or not + np.all(np.isfinite(self.limits))): + raise ValueError('bounds should be a sequence containing finite ' + 'real valued (min, max) pairs for each value' + ' in x') + + if maxiter is None: # the default used to be None + maxiter = 1000 + self.maxiter = maxiter + if maxfun is None: # the default used to be None + maxfun = np.inf + self.maxfun = maxfun + + # population is scaled to between [0, 1]. + # We have to scale between parameter <-> population + # save these arguments for _scale_parameter and + # _unscale_parameter. This is an optimization + self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1]) + self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1]) + with np.errstate(divide='ignore'): + # if lb == ub then the following line will be 1/0, which is why + # we ignore the divide by zero warning. The result from 1/0 is + # inf, so replace those values by 0. + self.__recip_scale_arg2 = 1 / self.__scale_arg2 + self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0 + + self.parameter_count = np.size(self.limits, 1) + + self.random_number_generator = check_random_state(rng) + + # Which parameters are going to be integers? + if np.any(integrality): + # # user has provided a truth value for integer constraints + integrality = np.broadcast_to( + integrality, + self.parameter_count + ) + integrality = np.asarray(integrality, bool) + # For integrality parameters change the limits to only allow + # integer values lying between the limits. + lb, ub = np.copy(self.limits) + + lb = np.ceil(lb) + ub = np.floor(ub) + if not (lb[integrality] <= ub[integrality]).all(): + # there's a parameter that doesn't have an integer value + # lying between the limits + raise ValueError("One of the integrality constraints does not" + " have any possible integer values between" + " the lower/upper bounds.") + nlb = np.nextafter(lb[integrality] - 0.5, np.inf) + nub = np.nextafter(ub[integrality] + 0.5, -np.inf) + + self.integrality = integrality + self.limits[0, self.integrality] = nlb + self.limits[1, self.integrality] = nub + else: + self.integrality = False + + # check for equal bounds + eb = self.limits[0] == self.limits[1] + eb_count = np.count_nonzero(eb) + + # default population initialization is a latin hypercube design, but + # there are other population initializations possible. + # the minimum is 5 because 'best2bin' requires a population that's at + # least 5 long + # 202301 - reduced population size to account for parameters with + # equal bounds. If there are no varying parameters set N to at least 1 + self.num_population_members = max( + 5, + popsize * max(1, self.parameter_count - eb_count) + ) + self.population_shape = (self.num_population_members, + self.parameter_count) + + self._nfev = 0 + # check first str otherwise will fail to compare str with array + if isinstance(init, str): + if init == 'latinhypercube': + self.init_population_lhs() + elif init == 'sobol': + # must be Ns = 2**m for Sobol' + n_s = int(2 ** np.ceil(np.log2(self.num_population_members))) + self.num_population_members = n_s + self.population_shape = (self.num_population_members, + self.parameter_count) + self.init_population_qmc(qmc_engine='sobol') + elif init == 'halton': + self.init_population_qmc(qmc_engine='halton') + elif init == 'random': + self.init_population_random() + else: + raise ValueError(self.__init_error_msg) + else: + self.init_population_array(init) + + if x0 is not None: + # scale to within unit interval and + # ensure parameters are within bounds. + x0_scaled = self._unscale_parameters(np.asarray(x0)) + if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any(): + raise ValueError( + "Some entries in x0 lay outside the specified bounds" + ) + self.population[0] = x0_scaled + + # infrastructure for constraints + self.constraints = constraints + self._wrapped_constraints = [] + + if hasattr(constraints, '__len__'): + # sequence of constraints, this will also deal with default + # keyword parameter + for c in constraints: + self._wrapped_constraints.append( + _ConstraintWrapper(c, self.x) + ) + else: + self._wrapped_constraints = [ + _ConstraintWrapper(constraints, self.x) + ] + self.total_constraints = np.sum( + [c.num_constr for c in self._wrapped_constraints] + ) + self.constraint_violation = np.zeros((self.num_population_members, 1)) + self.feasible = np.ones(self.num_population_members, bool) + + # an array to shuffle when selecting candidates. Create it here + # rather than repeatedly creating it in _select_samples. + self._random_population_index = np.arange(self.num_population_members) + self.disp = disp + + def init_population_lhs(self): + """ + Initializes the population with Latin Hypercube Sampling. + Latin Hypercube Sampling ensures that each parameter is uniformly + sampled over its range. + """ + rng = self.random_number_generator + + # Each parameter range needs to be sampled uniformly. The scaled + # parameter range ([0, 1)) needs to be split into + # `self.num_population_members` segments, each of which has the following + # size: + segsize = 1.0 / self.num_population_members + + # Within each segment we sample from a uniform random distribution. + # We need to do this sampling for each parameter. + samples = (segsize * rng.uniform(size=self.population_shape) + + # Offset each segment to cover the entire parameter range [0, 1) + + np.linspace(0., 1., self.num_population_members, + endpoint=False)[:, np.newaxis]) + + # Create an array for population of candidate solutions. + self.population = np.zeros_like(samples) + + # Initialize population of candidate solutions by permutation of the + # random samples. + for j in range(self.parameter_count): + order = rng.permutation(range(self.num_population_members)) + self.population[:, j] = samples[order, j] + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_qmc(self, qmc_engine): + """Initializes the population with a QMC method. + + QMC methods ensures that each parameter is uniformly + sampled over its range. + + Parameters + ---------- + qmc_engine : str + The QMC method to use for initialization. Can be one of + ``latinhypercube``, ``sobol`` or ``halton``. + + """ + from scipy.stats import qmc + + rng = self.random_number_generator + + # Create an array for population of candidate solutions. + if qmc_engine == 'latinhypercube': + sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng) + elif qmc_engine == 'sobol': + sampler = qmc.Sobol(d=self.parameter_count, seed=rng) + elif qmc_engine == 'halton': + sampler = qmc.Halton(d=self.parameter_count, seed=rng) + else: + raise ValueError(self.__init_error_msg) + + self.population = sampler.random(n=self.num_population_members) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_random(self): + """ + Initializes the population at random. This type of initialization + can possess clustering, Latin Hypercube sampling is generally better. + """ + rng = self.random_number_generator + self.population = rng.uniform(size=self.population_shape) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_array(self, init): + """ + Initializes the population with a user specified population. + + Parameters + ---------- + init : np.ndarray + Array specifying subset of the initial population. The array should + have shape (S, N), where N is the number of parameters. + The population is clipped to the lower and upper bounds. + """ + # make sure you're using a float array + popn = np.asarray(init, dtype=np.float64) + + if (np.size(popn, 0) < 5 or + popn.shape[1] != self.parameter_count or + len(popn.shape) != 2): + raise ValueError("The population supplied needs to have shape" + " (S, len(x)), where S > 4.") + + # scale values and clip to bounds, assigning to population + self.population = np.clip(self._unscale_parameters(popn), 0, 1) + + self.num_population_members = np.size(self.population, 0) + + self.population_shape = (self.num_population_members, + self.parameter_count) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + @property + def x(self): + """ + The best solution from the solver + """ + return self._scale_parameters(self.population[0]) + + @property + def convergence(self): + """ + The standard deviation of the population energies divided by their + mean. + """ + if np.any(np.isinf(self.population_energies)): + return np.inf + return (np.std(self.population_energies) / + (np.abs(np.mean(self.population_energies)) + _MACHEPS)) + + def converged(self): + """ + Return True if the solver has converged. + """ + if np.any(np.isinf(self.population_energies)): + return False + + return (np.std(self.population_energies) <= + self.atol + + self.tol * np.abs(np.mean(self.population_energies))) + + def solve(self): + """ + Runs the DifferentialEvolutionSolver. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully, + ``message`` which describes the cause of the termination, + ``population`` the solution vectors present in the population, and + ``population_energies`` the value of the objective function for + each entry in ``population``. + See `OptimizeResult` for a description of other attributes. If + `polish` was employed, and a lower minimum was obtained by the + polishing, then OptimizeResult also contains the ``jac`` attribute. + If the eventual solution does not satisfy the applied constraints + ``success`` will be `False`. + """ + nit, warning_flag = 0, False + status_message = _status_message['success'] + + # The population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies. + # Although this is also done in the evolve generator it's possible + # that someone can set maxiter=0, at which point we still want the + # initial energies to be calculated (the following loop isn't run). + if np.all(np.isinf(self.population_energies)): + self.feasible, self.constraint_violation = ( + self._calculate_population_feasibilities(self.population)) + + # only work out population energies for feasible solutions + self.population_energies[self.feasible] = ( + self._calculate_population_energies( + self.population[self.feasible])) + + self._promote_lowest_energy() + + # do the optimization. + for nit in range(1, self.maxiter + 1): + # evolve the population by a generation + try: + next(self) + except StopIteration: + warning_flag = True + if self._nfev > self.maxfun: + status_message = _status_message['maxfev'] + elif self._nfev == self.maxfun: + status_message = ('Maximum number of function evaluations' + ' has been reached.') + break + + if self.disp: + print(f"differential_evolution step {nit}: f(x)=" + f" {self.population_energies[0]}" + ) + + if self.callback: + c = self.tol / (self.convergence + _MACHEPS) + res = self._result(nit=nit, message="in progress") + res.convergence = c + try: + warning_flag = bool(self.callback(res)) + except StopIteration: + warning_flag = True + + if warning_flag: + status_message = 'callback function requested stop early' + + # should the solver terminate? + if warning_flag or self.converged(): + break + + else: + status_message = _status_message['maxiter'] + warning_flag = True + + DE_result = self._result( + nit=nit, message=status_message, warning_flag=warning_flag + ) + + if self.polish and not np.all(self.integrality): + # can't polish if all the parameters are integers + if np.any(self.integrality): + # set the lower/upper bounds equal so that any integrality + # constraints work. + limits, integrality = self.limits, self.integrality + limits[0, integrality] = DE_result.x[integrality] + limits[1, integrality] = DE_result.x[integrality] + + polish_method = 'L-BFGS-B' + + if self._wrapped_constraints: + polish_method = 'trust-constr' + + constr_violation = self._constraint_violation_fn(DE_result.x) + if np.any(constr_violation > 0.): + warnings.warn("differential evolution didn't find a " + "solution satisfying the constraints, " + "attempting to polish from the least " + "infeasible solution", + UserWarning, stacklevel=2) + if self.disp: + print(f"Polishing solution with '{polish_method}'") + result = minimize(lambda x: + list(self._mapwrapper(self.func, np.atleast_2d(x)))[0], + np.copy(DE_result.x), + method=polish_method, + bounds=self.limits.T, + constraints=self.constraints) + + self._nfev += result.nfev + DE_result.nfev = self._nfev + + # Polishing solution is only accepted if there is an improvement in + # cost function, the polishing was successful and the solution lies + # within the bounds. + if (result.fun < DE_result.fun and + result.success and + np.all(result.x <= self.limits[1]) and + np.all(self.limits[0] <= result.x)): + DE_result.fun = result.fun + DE_result.x = result.x + DE_result.jac = result.jac + # to keep internal state consistent + self.population_energies[0] = result.fun + self.population[0] = self._unscale_parameters(result.x) + + if self._wrapped_constraints: + DE_result.constr = [c.violation(DE_result.x) for + c in self._wrapped_constraints] + DE_result.constr_violation = np.max( + np.concatenate(DE_result.constr)) + DE_result.maxcv = DE_result.constr_violation + if DE_result.maxcv > 0: + # if the result is infeasible then success must be False + DE_result.success = False + DE_result.message = ("The solution does not satisfy the " + f"constraints, MAXCV = {DE_result.maxcv}") + + return DE_result + + def _result(self, **kwds): + # form an intermediate OptimizeResult + nit = kwds.get('nit', None) + message = kwds.get('message', None) + warning_flag = kwds.get('warning_flag', False) + result = OptimizeResult( + x=self.x, + fun=self.population_energies[0], + nfev=self._nfev, + nit=nit, + message=message, + success=(warning_flag is not True), + population=self._scale_parameters(self.population), + population_energies=self.population_energies + ) + if self._wrapped_constraints: + result.constr = [c.violation(result.x) + for c in self._wrapped_constraints] + result.constr_violation = np.max(np.concatenate(result.constr)) + result.maxcv = result.constr_violation + if result.maxcv > 0: + result.success = False + + return result + + def _calculate_population_energies(self, population): + """ + Calculate the energies of a population. + + Parameters + ---------- + population : ndarray + An array of parameter vectors normalised to [0, 1] using lower + and upper limits. Has shape ``(np.size(population, 0), N)``. + + Returns + ------- + energies : ndarray + An array of energies corresponding to each population member. If + maxfun will be exceeded during this call, then the number of + function evaluations will be reduced and energies will be + right-padded with np.inf. Has shape ``(np.size(population, 0),)`` + """ + num_members = np.size(population, 0) + # S is the number of function evals left to stay under the + # maxfun budget + S = min(num_members, self.maxfun - self._nfev) + + energies = np.full(num_members, np.inf) + + parameters_pop = self._scale_parameters(population) + try: + calc_energies = list( + self._mapwrapper(self.func, parameters_pop[0:S]) + ) + calc_energies = np.squeeze(calc_energies) + except (TypeError, ValueError) as e: + # wrong number of arguments for _mapwrapper + # or wrong length returned from the mapper + raise RuntimeError( + "The map-like callable must be of the form f(func, iterable), " + "returning a sequence of numbers the same length as 'iterable'" + ) from e + + if calc_energies.size != S: + if self.vectorized: + raise RuntimeError("The vectorized function must return an" + " array of shape (S,) when given an array" + " of shape (len(x), S)") + raise RuntimeError("func(x, *args) must return a scalar value") + + energies[0:S] = calc_energies + + if self.vectorized: + self._nfev += 1 + else: + self._nfev += S + + return energies + + def _promote_lowest_energy(self): + # swaps 'best solution' into first population entry + + idx = np.arange(self.num_population_members) + feasible_solutions = idx[self.feasible] + if feasible_solutions.size: + # find the best feasible solution + idx_t = np.argmin(self.population_energies[feasible_solutions]) + l = feasible_solutions[idx_t] + else: + # no solution was feasible, use 'best' infeasible solution, which + # will violate constraints the least + l = np.argmin(np.sum(self.constraint_violation, axis=1)) + + self.population_energies[[0, l]] = self.population_energies[[l, 0]] + self.population[[0, l], :] = self.population[[l, 0], :] + self.feasible[[0, l]] = self.feasible[[l, 0]] + self.constraint_violation[[0, l], :] = ( + self.constraint_violation[[l, 0], :]) + + def _constraint_violation_fn(self, x): + """ + Calculates total constraint violation for all the constraints, for a + set of solutions. + + Parameters + ---------- + x : ndarray + Solution vector(s). Has shape (S, N), or (N,), where S is the + number of solutions to investigate and N is the number of + parameters. + + Returns + ------- + cv : ndarray + Total violation of constraints. Has shape ``(S, M)``, where M is + the total number of constraint components (which is not necessarily + equal to len(self._wrapped_constraints)). + """ + # how many solution vectors you're calculating constraint violations + # for + S = np.size(x) // self.parameter_count + _out = np.zeros((S, self.total_constraints)) + offset = 0 + for con in self._wrapped_constraints: + # the input/output of the (vectorized) constraint function is + # {(N, S), (N,)} --> (M, S) + # The input to _constraint_violation_fn is (S, N) or (N,), so + # transpose to pass it to the constraint. The output is transposed + # from (M, S) to (S, M) for further use. + c = con.violation(x.T).T + + # The shape of c should be (M,), (1, M), or (S, M). Check for + # those shapes, as an incorrect shape indicates that the + # user constraint function didn't return the right thing, and + # the reshape operation will fail. Intercept the wrong shape + # to give a reasonable error message. I'm not sure what failure + # modes an inventive user will come up with. + if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S): + raise RuntimeError("An array returned from a Constraint has" + " the wrong shape. If `vectorized is False`" + " the Constraint should return an array of" + " shape (M,). If `vectorized is True` then" + " the Constraint must return an array of" + " shape (M, S), where S is the number of" + " solution vectors and M is the number of" + " constraint components in a given" + " Constraint object.") + + # the violation function may return a 1D array, but is it a + # sequence of constraints for one solution (S=1, M>=1), or the + # value of a single constraint for a sequence of solutions + # (S>=1, M=1) + c = np.reshape(c, (S, con.num_constr)) + _out[:, offset:offset + con.num_constr] = c + offset += con.num_constr + + return _out + + def _calculate_population_feasibilities(self, population): + """ + Calculate the feasibilities of a population. + + Parameters + ---------- + population : ndarray + An array of parameter vectors normalised to [0, 1] using lower + and upper limits. Has shape ``(np.size(population, 0), N)``. + + Returns + ------- + feasible, constraint_violation : ndarray, ndarray + Boolean array of feasibility for each population member, and an + array of the constraint violation for each population member. + constraint_violation has shape ``(np.size(population, 0), M)``, + where M is the number of constraints. + """ + num_members = np.size(population, 0) + if not self._wrapped_constraints: + # shortcut for no constraints + return np.ones(num_members, bool), np.zeros((num_members, 1)) + + # (S, N) + parameters_pop = self._scale_parameters(population) + + if self.vectorized: + # (S, M) + constraint_violation = np.array( + self._constraint_violation_fn(parameters_pop) + ) + else: + # (S, 1, M) + constraint_violation = np.array([self._constraint_violation_fn(x) + for x in parameters_pop]) + # if you use the list comprehension in the line above it will + # create an array of shape (S, 1, M), because each iteration + # generates an array of (1, M). In comparison the vectorized + # version returns (S, M). It's therefore necessary to remove axis 1 + constraint_violation = constraint_violation[:, 0] + + feasible = ~(np.sum(constraint_violation, axis=1) > 0) + + return feasible, constraint_violation + + def __iter__(self): + return self + + def __enter__(self): + return self + + def __exit__(self, *args): + return self._mapwrapper.__exit__(*args) + + def _accept_trial(self, energy_trial, feasible_trial, cv_trial, + energy_orig, feasible_orig, cv_orig): + """ + Trial is accepted if: + * it satisfies all constraints and provides a lower or equal objective + function value, while both the compared solutions are feasible + - or - + * it is feasible while the original solution is infeasible, + - or - + * it is infeasible, but provides a lower or equal constraint violation + for all constraint functions. + + This test corresponds to section III of Lampinen [1]_. + + Parameters + ---------- + energy_trial : float + Energy of the trial solution + feasible_trial : float + Feasibility of trial solution + cv_trial : array-like + Excess constraint violation for the trial solution + energy_orig : float + Energy of the original solution + feasible_orig : float + Feasibility of original solution + cv_orig : array-like + Excess constraint violation for the original solution + + Returns + ------- + accepted : bool + + """ + if feasible_orig and feasible_trial: + return energy_trial <= energy_orig + elif feasible_trial and not feasible_orig: + return True + elif not feasible_trial and (cv_trial <= cv_orig).all(): + # cv_trial < cv_orig would imply that both trial and orig are not + # feasible + return True + + return False + + def __next__(self): + """ + Evolve the population by a single generation + + Returns + ------- + x : ndarray + The best solution from the solver. + fun : float + Value of objective function obtained from the best solution. + """ + # the population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies + if np.all(np.isinf(self.population_energies)): + self.feasible, self.constraint_violation = ( + self._calculate_population_feasibilities(self.population)) + + # only need to work out population energies for those that are + # feasible + self.population_energies[self.feasible] = ( + self._calculate_population_energies( + self.population[self.feasible])) + + self._promote_lowest_energy() + + if self.dither is not None: + self.scale = self.random_number_generator.uniform(self.dither[0], + self.dither[1]) + + if self._updating == 'immediate': + # update best solution immediately + for candidate in range(self.num_population_members): + if self._nfev > self.maxfun: + raise StopIteration + + # create a trial solution + trial = self._mutate(candidate) + + # ensuring that it's in the range [0, 1) + self._ensure_constraint(trial) + + # scale from [0, 1) to the actual parameter value + parameters = self._scale_parameters(trial) + + # determine the energy of the objective function + if self._wrapped_constraints: + cv = self._constraint_violation_fn(parameters) + feasible = False + energy = np.inf + if not np.sum(cv) > 0: + # solution is feasible + feasible = True + energy = self.func(parameters) + self._nfev += 1 + else: + feasible = True + cv = np.atleast_2d([0.]) + energy = self.func(parameters) + self._nfev += 1 + + # compare trial and population member + if self._accept_trial(energy, feasible, cv, + self.population_energies[candidate], + self.feasible[candidate], + self.constraint_violation[candidate]): + self.population[candidate] = trial + self.population_energies[candidate] = np.squeeze(energy) + self.feasible[candidate] = feasible + self.constraint_violation[candidate] = cv + + # if the trial candidate is also better than the best + # solution then promote it. + if self._accept_trial(energy, feasible, cv, + self.population_energies[0], + self.feasible[0], + self.constraint_violation[0]): + self._promote_lowest_energy() + + elif self._updating == 'deferred': + # update best solution once per generation + if self._nfev >= self.maxfun: + raise StopIteration + + # 'deferred' approach, vectorised form. + # create trial solutions + trial_pop = self._mutate_many( + np.arange(self.num_population_members) + ) + + # enforce bounds + self._ensure_constraint(trial_pop) + + # determine the energies of the objective function, but only for + # feasible trials + feasible, cv = self._calculate_population_feasibilities(trial_pop) + trial_energies = np.full(self.num_population_members, np.inf) + + # only calculate for feasible entries + trial_energies[feasible] = self._calculate_population_energies( + trial_pop[feasible]) + + # which solutions are 'improved'? + loc = [self._accept_trial(*val) for val in + zip(trial_energies, feasible, cv, self.population_energies, + self.feasible, self.constraint_violation)] + loc = np.array(loc) + self.population = np.where(loc[:, np.newaxis], + trial_pop, + self.population) + self.population_energies = np.where(loc, + trial_energies, + self.population_energies) + self.feasible = np.where(loc, + feasible, + self.feasible) + self.constraint_violation = np.where(loc[:, np.newaxis], + cv, + self.constraint_violation) + + # make sure the best solution is updated if updating='deferred'. + # put the lowest energy into the best solution position. + self._promote_lowest_energy() + + return self.x, self.population_energies[0] + + def _scale_parameters(self, trial): + """Scale from a number between 0 and 1 to parameters.""" + # trial either has shape (N, ) or (L, N), where L is the number of + # solutions being scaled + scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2 + if np.count_nonzero(self.integrality): + i = np.broadcast_to(self.integrality, scaled.shape) + scaled[i] = np.round(scaled[i]) + return scaled + + def _unscale_parameters(self, parameters): + """Scale from parameters to a number between 0 and 1.""" + return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5 + + def _ensure_constraint(self, trial): + """Make sure the parameters lie between the limits.""" + mask = np.bitwise_or(trial > 1, trial < 0) + if oob := np.count_nonzero(mask): + trial[mask] = self.random_number_generator.uniform(size=oob) + + def _mutate_custom(self, candidate): + rng = self.random_number_generator + msg = ( + "strategy must have signature" + " f(candidate: int, population: np.ndarray, rng=None) returning an" + " array of shape (N,)" + ) + _population = self._scale_parameters(self.population) + if not len(np.shape(candidate)): + # single entry in population + trial = self.strategy(candidate, _population, rng=rng) + if trial.shape != (self.parameter_count,): + raise RuntimeError(msg) + else: + S = candidate.shape[0] + trial = np.array( + [self.strategy(c, _population, rng=rng) for c in candidate], + dtype=float + ) + if trial.shape != (S, self.parameter_count): + raise RuntimeError(msg) + return self._unscale_parameters(trial) + + def _mutate_many(self, candidates): + """Create trial vectors based on a mutation strategy.""" + rng = self.random_number_generator + + S = len(candidates) + if callable(self.strategy): + return self._mutate_custom(candidates) + + trial = np.copy(self.population[candidates]) + samples = np.array([self._select_samples(c, 5) for c in candidates]) + + if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: + bprime = self.mutation_func(candidates, samples) + else: + bprime = self.mutation_func(samples) + + fill_point = rng_integers(rng, self.parameter_count, size=S) + crossovers = rng.uniform(size=(S, self.parameter_count)) + crossovers = crossovers < self.cross_over_probability + if self.strategy in self._binomial: + # the last one is always from the bprime vector for binomial + # If you fill in modulo with a loop you have to set the last one to + # true. If you don't use a loop then you can have any random entry + # be True. + i = np.arange(S) + crossovers[i, fill_point[i]] = True + trial = np.where(crossovers, bprime, trial) + return trial + + elif self.strategy in self._exponential: + crossovers[..., 0] = True + for j in range(S): + i = 0 + init_fill = fill_point[j] + while (i < self.parameter_count and crossovers[j, i]): + trial[j, init_fill] = bprime[j, init_fill] + init_fill = (init_fill + 1) % self.parameter_count + i += 1 + + return trial + + def _mutate(self, candidate): + """Create a trial vector based on a mutation strategy.""" + rng = self.random_number_generator + + if callable(self.strategy): + return self._mutate_custom(candidate) + + fill_point = rng_integers(rng, self.parameter_count) + samples = self._select_samples(candidate, 5) + + trial = np.copy(self.population[candidate]) + + if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: + bprime = self.mutation_func(candidate, samples) + else: + bprime = self.mutation_func(samples) + + crossovers = rng.uniform(size=self.parameter_count) + crossovers = crossovers < self.cross_over_probability + if self.strategy in self._binomial: + # the last one is always from the bprime vector for binomial + # If you fill in modulo with a loop you have to set the last one to + # true. If you don't use a loop then you can have any random entry + # be True. + crossovers[fill_point] = True + trial = np.where(crossovers, bprime, trial) + return trial + + elif self.strategy in self._exponential: + i = 0 + crossovers[0] = True + while i < self.parameter_count and crossovers[i]: + trial[fill_point] = bprime[fill_point] + fill_point = (fill_point + 1) % self.parameter_count + i += 1 + + return trial + + def _best1(self, samples): + """best1bin, best1exp""" + # samples.shape == (S, 5) + # or + # samples.shape(5,) + r0, r1 = samples[..., :2].T + return (self.population[0] + self.scale * + (self.population[r0] - self.population[r1])) + + def _rand1(self, samples): + """rand1bin, rand1exp""" + r0, r1, r2 = samples[..., :3].T + return (self.population[r0] + self.scale * + (self.population[r1] - self.population[r2])) + + def _randtobest1(self, samples): + """randtobest1bin, randtobest1exp""" + r0, r1, r2 = samples[..., :3].T + bprime = np.copy(self.population[r0]) + bprime += self.scale * (self.population[0] - bprime) + bprime += self.scale * (self.population[r1] - + self.population[r2]) + return bprime + + def _currenttobest1(self, candidate, samples): + """currenttobest1bin, currenttobest1exp""" + r0, r1 = samples[..., :2].T + bprime = (self.population[candidate] + self.scale * + (self.population[0] - self.population[candidate] + + self.population[r0] - self.population[r1])) + return bprime + + def _best2(self, samples): + """best2bin, best2exp""" + r0, r1, r2, r3 = samples[..., :4].T + bprime = (self.population[0] + self.scale * + (self.population[r0] + self.population[r1] - + self.population[r2] - self.population[r3])) + + return bprime + + def _rand2(self, samples): + """rand2bin, rand2exp""" + r0, r1, r2, r3, r4 = samples[..., :5].T + bprime = (self.population[r0] + self.scale * + (self.population[r1] + self.population[r2] - + self.population[r3] - self.population[r4])) + + return bprime + + def _select_samples(self, candidate, number_samples): + """ + obtain random integers from range(self.num_population_members), + without replacement. You can't have the original candidate either. + """ + self.random_number_generator.shuffle(self._random_population_index) + idxs = self._random_population_index[:number_samples + 1] + return idxs[idxs != candidate][:number_samples] + + +class _ConstraintWrapper: + """Object to wrap/evaluate user defined constraints. + + Very similar in practice to `PreparedConstraint`, except that no evaluation + of jac/hess is performed (explicit or implicit). + + If created successfully, it will contain the attributes listed below. + + Parameters + ---------- + constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables, shape (N,) + + Attributes + ---------- + fun : callable + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + + Notes + ----- + _ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent + arrays of shape (N, S) or (N,), where S is the number of vectors of shape + (N,) to consider constraints for. + """ + def __init__(self, constraint, x0): + self.constraint = constraint + + if isinstance(constraint, NonlinearConstraint): + def fun(x): + x = np.asarray(x) + return np.atleast_1d(constraint.fun(x)) + elif isinstance(constraint, LinearConstraint): + def fun(x): + if issparse(constraint.A): + A = constraint.A + else: + A = np.atleast_2d(constraint.A) + + res = A.dot(x) + # x either has shape (N, S) or (N) + # (M, N) x (N, S) --> (M, S) + # (M, N) x (N,) --> (M,) + # However, if (M, N) is a matrix then: + # (M, N) * (N,) --> (M, 1), we need this to be (M,) + if x.ndim == 1 and res.ndim == 2: + # deal with case that constraint.A is an np.matrix + # see gh20041 + res = np.asarray(res)[:, 0] + + return res + elif isinstance(constraint, Bounds): + def fun(x): + return np.asarray(x) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + self.fun = fun + + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + + x0 = np.asarray(x0) + + # find out the number of constraints + f0 = fun(x0) + self.num_constr = m = f0.size + self.parameter_count = x0.size + + if lb.ndim == 0: + lb = np.resize(lb, m) + if ub.ndim == 0: + ub = np.resize(ub, m) + + self.bounds = (lb, ub) + + def __call__(self, x): + return np.atleast_1d(self.fun(x)) + + def violation(self, x): + """How much the constraint is exceeded by. + + Parameters + ---------- + x : array-like + Vector of independent variables, (N, S), where N is number of + parameters and S is the number of solutions to be investigated. + + Returns + ------- + excess : array-like + How much the constraint is exceeded by, for each of the + constraints specified by `_ConstraintWrapper.fun`. + Has shape (M, S) where M is the number of constraint components. + """ + # expect ev to have shape (num_constr, S) or (num_constr,) + ev = self.fun(np.asarray(x)) + + try: + excess_lb = np.maximum(self.bounds[0] - ev.T, 0) + excess_ub = np.maximum(ev.T - self.bounds[1], 0) + except ValueError as e: + raise RuntimeError("An array returned from a Constraint has" + " the wrong shape. If `vectorized is False`" + " the Constraint should return an array of" + " shape (M,). If `vectorized is True` then" + " the Constraint must return an array of" + " shape (M, S), where S is the number of" + " solution vectors and M is the number of" + " constraint components in a given" + " Constraint object.") from e + + v = (excess_lb + excess_ub).T + return v diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fe89bac4a6f6b828def53da54b5eac17d45a4019 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct_py.py new file mode 100644 index 0000000000000000000000000000000000000000..4c01c38747dbef4b9c71e9b593316f466f484bca --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_direct_py.py @@ -0,0 +1,280 @@ +from typing import ( # noqa: UP035 + Any, Callable, Iterable +) + +import numpy as np +from scipy.optimize import OptimizeResult +from ._constraints import old_bound_to_new, Bounds +from ._direct import direct as _direct # type: ignore + +__all__ = ['direct'] + +ERROR_MESSAGES = ( + "Number of function evaluations done is larger than maxfun={}", + "Number of iterations is larger than maxiter={}", + "u[i] < l[i] for some i", + "maxfun is too large", + "Initialization failed", + "There was an error in the creation of the sample points", + "An error occurred while the function was sampled", + "Maximum number of levels has been reached.", + "Forced stop", + "Invalid arguments", + "Out of memory", +) + +SUCCESS_MESSAGES = ( + ("The best function value found is within a relative error={} " + "of the (known) global optimum f_min"), + ("The volume of the hyperrectangle containing the lowest function value " + "found is below vol_tol={}"), + ("The side length measure of the hyperrectangle containing the lowest " + "function value found is below len_tol={}"), +) + + +def direct( + func: Callable[ + [np.ndarray[tuple[int], np.dtype[np.float64]]], + float | np.floating[Any] | np.integer[Any] | np.bool_, + ], + bounds: Iterable | Bounds, + *, + args: tuple = (), + eps: float = 1e-4, + maxfun: int | None = None, + maxiter: int = 1000, + locally_biased: bool = True, + f_min: float = -np.inf, + f_min_rtol: float = 1e-4, + vol_tol: float = 1e-16, + len_tol: float = 1e-6, + callback: Callable[ + [np.ndarray[tuple[int], np.dtype[np.float64]]], + object, + ] | None = None, +) -> OptimizeResult: + """ + Finds the global minimum of a function using the + DIRECT algorithm. + + Parameters + ---------- + func : callable + The objective function to be minimized. + ``func(x, *args) -> float`` + where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of + the fixed parameters needed to completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``. + + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + eps : float, optional + Minimal required difference of the objective function values + between the current best hyperrectangle and the next potentially + optimal hyperrectangle to be divided. In consequence, `eps` serves as a + tradeoff between local and global search: the smaller, the more local + the search becomes. Default is 1e-4. + maxfun : int or None, optional + Approximate upper bound on objective function evaluations. + If `None`, will be automatically set to ``1000 * N`` where ``N`` + represents the number of dimensions. Will be capped if necessary to + limit DIRECT's RAM usage to app. 1GiB. This will only occur for very + high dimensional problems and excessive `max_fun`. Default is `None`. + maxiter : int, optional + Maximum number of iterations. Default is 1000. + locally_biased : bool, optional + If `True` (default), use the locally biased variant of the + algorithm known as DIRECT_L. If `False`, use the original unbiased + DIRECT algorithm. For hard problems with many local minima, + `False` is recommended. + f_min : float, optional + Function value of the global optimum. Set this value only if the + global optimum is known. Default is ``-np.inf``, so that this + termination criterion is deactivated. + f_min_rtol : float, optional + Terminate the optimization once the relative error between the + current best minimum `f` and the supplied global minimum `f_min` + is smaller than `f_min_rtol`. This parameter is only used if + `f_min` is also set. Must lie between 0 and 1. Default is 1e-4. + vol_tol : float, optional + Terminate the optimization once the volume of the hyperrectangle + containing the lowest function value is smaller than `vol_tol` + of the complete search space. Must lie between 0 and 1. + Default is 1e-16. + len_tol : float, optional + If ``locally_biased=True``, terminate the optimization once half of + the normalized maximal side length of the hyperrectangle containing + the lowest function value is smaller than `len_tol`. + If ``locally_biased=False``, terminate the optimization once half of + the normalized diagonal of the hyperrectangle containing the lowest + function value is smaller than `len_tol`. Must lie between 0 and 1. + Default is 1e-6. + callback : callable, optional + A callback function with signature ``callback(xk)`` where ``xk`` + represents the best function value found so far. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Notes + ----- + DIviding RECTangles (DIRECT) is a deterministic global + optimization algorithm capable of minimizing a black box function with + its variables subject to lower and upper bound constraints by sampling + potential solutions in the search space [1]_. The algorithm starts by + normalising the search space to an n-dimensional unit hypercube. + It samples the function at the center of this hypercube and at 2n + (n is the number of variables) more points, 2 in each coordinate + direction. Using these function values, DIRECT then divides the + domain into hyperrectangles, each having exactly one of the sampling + points as its center. In each iteration, DIRECT chooses, using the `eps` + parameter which defaults to 1e-4, some of the existing hyperrectangles + to be further divided. This division process continues until either the + maximum number of iterations or maximum function evaluations allowed + are exceeded, or the hyperrectangle containing the minimal value found + so far becomes small enough. If `f_min` is specified, the optimization + will stop once this function value is reached within a relative tolerance. + The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is + used by default. It makes the search more locally biased and more + efficient for cases with only a few local minima. + + A note about termination criteria: `vol_tol` refers to the volume of the + hyperrectangle containing the lowest function value found so far. This + volume decreases exponentially with increasing dimensionality of the + problem. Therefore `vol_tol` should be decreased to avoid premature + termination of the algorithm for higher dimensions. This does not hold + for `len_tol`: it refers either to half of the maximal side length + (for ``locally_biased=True``) or half of the diagonal of the + hyperrectangle (for ``locally_biased=False``). + + This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at + https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz . + This original version was initially converted via f2c and then cleaned up + and reorganized by Steven G. Johnson, August 2007, for the NLopt project. + The `direct` function wraps the C implementation. + + .. versionadded:: 1.9.0 + + References + ---------- + .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian + optimization without the Lipschitz constant. J Optim Theory Appl + 79, 157-181 (1993). + .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT + Algorithm. Journal of Global Optimization 21, 27-37 (2001). + + Examples + -------- + The following example is a 2-D problem with four local minima: minimizing + the Styblinski-Tang function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> from scipy.optimize import direct, Bounds + >>> def styblinski_tang(pos): + ... x, y = pos + ... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y) + >>> bounds = Bounds([-4., -4.], [4., 4.]) + >>> result = direct(styblinski_tang, bounds) + >>> result.x, result.fun, result.nfev + array([-2.90321597, -2.90321597]), -78.3323279095383, 2011 + + The correct global minimum was found but with a huge number of function + evaluations (2011). Loosening the termination tolerances `vol_tol` and + `len_tol` can be used to stop DIRECT earlier. + + >>> result = direct(styblinski_tang, bounds, len_tol=1e-3) + >>> result.x, result.fun, result.nfev + array([-2.9044353, -2.9044353]), -78.33230330754142, 207 + + """ + # convert bounds to new Bounds class if necessary + if not isinstance(bounds, Bounds): + if isinstance(bounds, list) or isinstance(bounds, tuple): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + else: + message = ("bounds must be a sequence or " + "instance of Bounds class") + raise ValueError(message) + + lb = np.ascontiguousarray(bounds.lb, dtype=np.float64) + ub = np.ascontiguousarray(bounds.ub, dtype=np.float64) + + # validate bounds + # check that lower bounds are smaller than upper bounds + if not np.all(lb < ub): + raise ValueError('Bounds are not consistent min < max') + # check for infs + if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))): + raise ValueError("Bounds must not be inf.") + + # validate tolerances + if (vol_tol < 0 or vol_tol > 1): + raise ValueError("vol_tol must be between 0 and 1.") + if (len_tol < 0 or len_tol > 1): + raise ValueError("len_tol must be between 0 and 1.") + if (f_min_rtol < 0 or f_min_rtol > 1): + raise ValueError("f_min_rtol must be between 0 and 1.") + + # validate maxfun and maxiter + if maxfun is None: + maxfun = 1000 * lb.shape[0] + if not isinstance(maxfun, int): + raise ValueError("maxfun must be of type int.") + if maxfun < 0: + raise ValueError("maxfun must be > 0.") + if not isinstance(maxiter, int): + raise ValueError("maxiter must be of type int.") + if maxiter < 0: + raise ValueError("maxiter must be > 0.") + + # validate boolean parameters + if not isinstance(locally_biased, bool): + raise ValueError("locally_biased must be True or False.") + + def _func_wrap(x, args=None): + x = np.asarray(x) + if args is None: + f = func(x) + else: + f = func(x, *args) + # always return a float + return np.asarray(f).item() + + # TODO: fix disp argument + x, fun, ret_code, nfev, nit = _direct( + _func_wrap, + np.asarray(lb), np.asarray(ub), + args, + False, eps, maxfun, maxiter, + locally_biased, + f_min, f_min_rtol, + vol_tol, len_tol, callback + ) + + format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol) + if ret_code > 2: + message = SUCCESS_MESSAGES[ret_code - 3].format( + format_val[ret_code - 1]) + elif 0 < ret_code <= 2: + message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1]) + elif 0 > ret_code > -100: + message = ERROR_MESSAGES[abs(ret_code) + 1] + else: + message = ERROR_MESSAGES[ret_code + 99] + + return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code, + success=ret_code > 2, message=message, + nfev=nfev, nit=nit) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..eb480a902c593ffee1d242d79018c9175bcc6d3a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py @@ -0,0 +1,732 @@ +# Dual Annealing implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, Yang Xiang, PMP S.A. + +""" +A Dual Annealing global optimization algorithm +""" + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize import minimize, Bounds +from scipy.special import gammaln +from scipy._lib._util import check_random_state, _transition_to_rng +from scipy.optimize._constraints import new_bounds_to_old + +__all__ = ['dual_annealing'] + + +class VisitingDistribution: + """ + Class used to generate new coordinates based on the distorted + Cauchy-Lorentz distribution. Depending on the steps within the strategy + chain, the class implements the strategy for generating new location + changes. + + Parameters + ---------- + lb : array_like + A 1-D NumPy ndarray containing lower bounds of the generated + components. Neither NaN or inf are allowed. + ub : array_like + A 1-D NumPy ndarray containing upper bounds for the generated + components. Neither NaN or inf are allowed. + visiting_param : float + Parameter for visiting distribution. Default value is 2.62. + Higher values give the visiting distribution a heavier tail, this + makes the algorithm jump to a more distant region. + The value range is (1, 3]. Its value is fixed for the life of the + object. + rng_gen : {`~numpy.random.Generator`} + A `~numpy.random.Generator` object for generating new locations. + (can be a `~numpy.random.RandomState` object until SPEC007 transition + is fully complete). + + """ + TAIL_LIMIT = 1.e8 + MIN_VISIT_BOUND = 1.e-10 + + def __init__(self, lb, ub, visiting_param, rng_gen): + # if you wish to make _visiting_param adjustable during the life of + # the object then _factor2, _factor3, _factor5, _d1, _factor6 will + # have to be dynamically calculated in `visit_fn`. They're factored + # out here so they don't need to be recalculated all the time. + self._visiting_param = visiting_param + self.rng_gen = rng_gen + self.lower = lb + self.upper = ub + self.bound_range = ub - lb + + # these are invariant numbers unless visiting_param changes + self._factor2 = np.exp((4.0 - self._visiting_param) * np.log( + self._visiting_param - 1.0)) + self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0) + / (self._visiting_param - 1.0)) + self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * ( + 3.0 - self._visiting_param)) + + self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5 + self._d1 = 2.0 - self._factor5 + self._factor6 = np.pi * (1.0 - self._factor5) / np.sin( + np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1)) + + def visiting(self, x, step, temperature): + """ Based on the step in the strategy chain, new coordinates are + generated by changing all components is the same time or only + one of them, the new values are computed with visit_fn method + """ + dim = x.size + if step < dim: + # Changing all coordinates with a new visiting value + visits = self.visit_fn(temperature, dim) + upper_sample, lower_sample = self.rng_gen.uniform(size=2) + visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample + visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample + x_visit = visits + x + a = x_visit - self.lower + b = np.fmod(a, self.bound_range) + self.bound_range + x_visit = np.fmod(b, self.bound_range) + self.lower + x_visit[np.fabs( + x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10 + else: + # Changing only one coordinate at a time based on strategy + # chain step + x_visit = np.copy(x) + visit = self.visit_fn(temperature, 1)[0] + if visit > self.TAIL_LIMIT: + visit = self.TAIL_LIMIT * self.rng_gen.uniform() + elif visit < -self.TAIL_LIMIT: + visit = -self.TAIL_LIMIT * self.rng_gen.uniform() + index = step - dim + x_visit[index] = visit + x[index] + a = x_visit[index] - self.lower[index] + b = np.fmod(a, self.bound_range[index]) + self.bound_range[index] + x_visit[index] = np.fmod(b, self.bound_range[ + index]) + self.lower[index] + if np.fabs(x_visit[index] - self.lower[ + index]) < self.MIN_VISIT_BOUND: + x_visit[index] += self.MIN_VISIT_BOUND + return x_visit + + def visit_fn(self, temperature, dim): + """ Formula Visita from p. 405 of reference [2] """ + x, y = self.rng_gen.normal(size=(dim, 2)).T + + factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0)) + factor4 = self._factor4_p * factor1 + + # sigmax + x *= np.exp(-(self._visiting_param - 1.0) * np.log( + self._factor6 / factor4) / (3.0 - self._visiting_param)) + + den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) / + (3.0 - self._visiting_param)) + + return x / den + + +class EnergyState: + """ + Class used to record the energy state. At any time, it knows what is the + currently used coordinates and the most recent best location. + + Parameters + ---------- + lower : array_like + A 1-D NumPy ndarray containing lower bounds for generating an initial + random components in the `reset` method. + upper : array_like + A 1-D NumPy ndarray containing upper bounds for generating an initial + random components in the `reset` method + components. Neither NaN or inf are allowed. + callback : callable, ``callback(x, f, context)``, optional + A callback function which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and `context` has value in [0, 1, 2] + """ + # Maximum number of trials for generating a valid starting point + MAX_REINIT_COUNT = 1000 + + def __init__(self, lower, upper, callback=None): + self.ebest = None + self.current_energy = None + self.current_location = None + self.xbest = None + self.lower = lower + self.upper = upper + self.callback = callback + + def reset(self, func_wrapper, rng_gen, x0=None): + """ + Initialize current location is the search domain. If `x0` is not + provided, a random location within the bounds is generated. + """ + if x0 is None: + self.current_location = rng_gen.uniform(self.lower, self.upper, + size=len(self.lower)) + else: + self.current_location = np.copy(x0) + init_error = True + reinit_counter = 0 + while init_error: + self.current_energy = func_wrapper.fun(self.current_location) + if self.current_energy is None: + raise ValueError('Objective function is returning None') + if not np.isfinite(self.current_energy): + if reinit_counter >= EnergyState.MAX_REINIT_COUNT: + init_error = False + message = ( + 'Stopping algorithm because function ' + 'create NaN or (+/-) infinity values even with ' + 'trying new random parameters' + ) + raise ValueError(message) + self.current_location = rng_gen.uniform(self.lower, + self.upper, + size=self.lower.size) + reinit_counter += 1 + else: + init_error = False + # If first time reset, initialize ebest and xbest + if self.ebest is None and self.xbest is None: + self.ebest = self.current_energy + self.xbest = np.copy(self.current_location) + # Otherwise, we keep them in case of reannealing reset + + def update_best(self, e, x, context): + self.ebest = e + self.xbest = np.copy(x) + if self.callback is not None: + val = self.callback(x, e, context) + if val is not None: + if val: + return ('Callback function requested to stop early by ' + 'returning True') + + def update_current(self, e, x): + self.current_energy = e + self.current_location = np.copy(x) + + +class StrategyChain: + """ + Class that implements within a Markov chain the strategy for location + acceptance and local search decision making. + + Parameters + ---------- + acceptance_param : float + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + visit_dist : VisitingDistribution + Instance of `VisitingDistribution` class. + func_wrapper : ObjectiveFunWrapper + Instance of `ObjectiveFunWrapper` class. + minimizer_wrapper: LocalSearchWrapper + Instance of `LocalSearchWrapper` class. + rand_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + energy_state: EnergyState + Instance of `EnergyState` class. + + """ + + def __init__(self, acceptance_param, visit_dist, func_wrapper, + minimizer_wrapper, rand_gen, energy_state): + # Local strategy chain minimum energy and location + self.emin = energy_state.current_energy + self.xmin = np.array(energy_state.current_location) + # Global optimizer state + self.energy_state = energy_state + # Acceptance parameter + self.acceptance_param = acceptance_param + # Visiting distribution instance + self.visit_dist = visit_dist + # Wrapper to objective function + self.func_wrapper = func_wrapper + # Wrapper to the local minimizer + self.minimizer_wrapper = minimizer_wrapper + self.not_improved_idx = 0 + self.not_improved_max_idx = 1000 + self._rand_gen = rand_gen + self.temperature_step = 0 + self.K = 100 * len(energy_state.current_location) + + def accept_reject(self, j, e, x_visit): + r = self._rand_gen.uniform() + pqv_temp = 1.0 - ((1.0 - self.acceptance_param) * + (e - self.energy_state.current_energy) / self.temperature_step) + if pqv_temp <= 0.: + pqv = 0. + else: + pqv = np.exp(np.log(pqv_temp) / ( + 1. - self.acceptance_param)) + + if r <= pqv: + # We accept the new location and update state + self.energy_state.update_current(e, x_visit) + self.xmin = np.copy(self.energy_state.current_location) + + # No improvement for a long time + if self.not_improved_idx >= self.not_improved_max_idx: + if j == 0 or self.energy_state.current_energy < self.emin: + self.emin = self.energy_state.current_energy + self.xmin = np.copy(self.energy_state.current_location) + + def run(self, step, temperature): + self.temperature_step = temperature / float(step + 1) + self.not_improved_idx += 1 + for j in range(self.energy_state.current_location.size * 2): + if j == 0: + if step == 0: + self.energy_state_improved = True + else: + self.energy_state_improved = False + x_visit = self.visit_dist.visiting( + self.energy_state.current_location, j, temperature) + # Calling the objective function + e = self.func_wrapper.fun(x_visit) + if e < self.energy_state.current_energy: + # We have got a better energy value + self.energy_state.update_current(e, x_visit) + if e < self.energy_state.ebest: + val = self.energy_state.update_best(e, x_visit, 0) + if val is not None: + if val: + return val + self.energy_state_improved = True + self.not_improved_idx = 0 + else: + # We have not improved but do we accept the new location? + self.accept_reject(j, e, x_visit) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during annealing') + # End of StrategyChain loop + + def local_search(self): + # Decision making for performing a local search + # based on strategy chain results + # If energy has been improved or no improvement since too long, + # performing a local search with the best strategy chain location + if self.energy_state_improved: + # Global energy has improved, let's see if LS improves further + e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest, + self.energy_state.ebest) + if e < self.energy_state.ebest: + self.not_improved_idx = 0 + val = self.energy_state.update_best(e, x, 1) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during local search') + # Check probability of a need to perform a LS even if no improvement + do_ls = False + if self.K < 90 * len(self.energy_state.current_location): + pls = np.exp(self.K * ( + self.energy_state.ebest - self.energy_state.current_energy) / + self.temperature_step) + if pls >= self._rand_gen.uniform(): + do_ls = True + # Global energy not improved, let's see what LS gives + # on the best strategy chain location + if self.not_improved_idx >= self.not_improved_max_idx: + do_ls = True + if do_ls: + e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin) + self.xmin = np.copy(x) + self.emin = e + self.not_improved_idx = 0 + self.not_improved_max_idx = self.energy_state.current_location.size + if e < self.energy_state.ebest: + val = self.energy_state.update_best( + self.emin, self.xmin, 2) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during dual annealing') + + +class ObjectiveFunWrapper: + + def __init__(self, func, maxfun=1e7, *args): + self.func = func + self.args = args + # Number of objective function evaluations + self.nfev = 0 + # Number of gradient function evaluation if used + self.ngev = 0 + # Number of hessian of the objective function if used + self.nhev = 0 + self.maxfun = maxfun + + def fun(self, x): + self.nfev += 1 + return self.func(x, *self.args) + + +class LocalSearchWrapper: + """ + Class used to wrap around the minimizer used for local search + Default local minimizer is SciPy minimizer L-BFGS-B + """ + + LS_MAXITER_RATIO = 6 + LS_MAXITER_MIN = 100 + LS_MAXITER_MAX = 1000 + + def __init__(self, search_bounds, func_wrapper, *args, **kwargs): + self.func_wrapper = func_wrapper + self.kwargs = kwargs + self.jac = self.kwargs.get('jac', None) + self.hess = self.kwargs.get('hess', None) + self.hessp = self.kwargs.get('hessp', None) + self.kwargs.pop("args", None) + self.minimizer = minimize + bounds_list = list(zip(*search_bounds)) + self.lower = np.array(bounds_list[0]) + self.upper = np.array(bounds_list[1]) + + # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method + if not self.kwargs: + n = len(self.lower) + ls_max_iter = min(max(n * self.LS_MAXITER_RATIO, + self.LS_MAXITER_MIN), + self.LS_MAXITER_MAX) + self.kwargs['method'] = 'L-BFGS-B' + self.kwargs['options'] = { + 'maxiter': ls_max_iter, + } + self.kwargs['bounds'] = list(zip(self.lower, self.upper)) + else: + if callable(self.jac): + def wrapped_jac(x): + return self.jac(x, *args) + self.kwargs['jac'] = wrapped_jac + if callable(self.hess): + def wrapped_hess(x): + return self.hess(x, *args) + self.kwargs['hess'] = wrapped_hess + if callable(self.hessp): + def wrapped_hessp(x, p): + return self.hessp(x, p, *args) + self.kwargs['hessp'] = wrapped_hessp + + def local_search(self, x, e): + # Run local search from the given x location where energy value is e + x_tmp = np.copy(x) + mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs) + if 'njev' in mres: + self.func_wrapper.ngev += mres.njev + if 'nhev' in mres: + self.func_wrapper.nhev += mres.nhev + # Check if is valid value + is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun) + in_bounds = np.all(mres.x >= self.lower) and np.all( + mres.x <= self.upper) + is_valid = is_finite and in_bounds + + # Use the new point only if it is valid and return a better results + if is_valid and mres.fun < e: + return mres.fun, mres.x + else: + return e, x_tmp + + +@_transition_to_rng("seed", position_num=10) +def dual_annealing(func, bounds, args=(), maxiter=1000, + minimizer_kwargs=None, initial_temp=5230., + restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, + maxfun=1e7, rng=None, no_local_search=False, + callback=None, x0=None): + """ + Find the global minimum of a function using Dual Annealing. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + maxiter : int, optional + The maximum number of global search iterations. Default value is 1000. + minimizer_kwargs : dict, optional + Keyword arguments to be passed to the local minimizer + (`minimize`). An important option could be ``method`` for the minimizer + method to use. + If no keyword arguments are provided, the local minimizer defaults to + 'L-BFGS-B' and uses the already supplied bounds. If `minimizer_kwargs` + is specified, then the dict must contain all parameters required to + control the local minimization. `args` is ignored in this dict, as it is + passed automatically. `bounds` is not automatically passed on to the + local minimizer as the method may not support them. + initial_temp : float, optional + The initial temperature, use higher values to facilitates a wider + search of the energy landscape, allowing dual_annealing to escape + local minima that it is trapped in. Default value is 5230. Range is + (0.01, 5.e4]. + restart_temp_ratio : float, optional + During the annealing process, temperature is decreasing, when it + reaches ``initial_temp * restart_temp_ratio``, the reannealing process + is triggered. Default value of the ratio is 2e-5. Range is (0, 1). + visit : float, optional + Parameter for visiting distribution. Default value is 2.62. Higher + values give the visiting distribution a heavier tail, this makes + the algorithm jump to a more distant region. The value range is (1, 3]. + accept : float, optional + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + maxfun : int, optional + Soft limit for the number of objective function calls. If the + algorithm is in the middle of a local search, this number will be + exceeded, the algorithm will stop just after the local search is + done. Default value is 1e7. + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a `Generator`. + + Specify `rng` for repeatable minimizations. The random numbers + generated only affect the visiting distribution function + and new coordinates generation. + no_local_search : bool, optional + If `no_local_search` is set to True, a traditional Generalized + Simulated Annealing will be performed with no local search + strategy applied. + callback : callable, optional + A callback function with signature ``callback(x, f, context)``, + which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and ``context`` has one of the following + values: + + - ``0``: minimum detected in the annealing process. + - ``1``: detection occurred in the local search process. + - ``2``: detection done in the dual annealing process. + + If the callback implementation returns True, the algorithm will stop. + x0 : ndarray, shape(n,), optional + Coordinates of a single N-D starting point. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. + See `OptimizeResult` for a description of other attributes. + + Notes + ----- + This function implements the Dual Annealing optimization. This stochastic + approach derived from [3]_ combines the generalization of CSA (Classical + Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled + to a strategy for applying a local search on accepted locations [4]_. + An alternative implementation of this same algorithm is described in [5]_ + and benchmarks are presented in [6]_. This approach introduces an advanced + method to refine the solution found by the generalized annealing + process. This algorithm uses a distorted Cauchy-Lorentz visiting + distribution, with its shape controlled by the parameter :math:`q_{v}` + + .. math:: + + g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\ + \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\ + \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\ + \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\ + \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}} + + Where :math:`t` is the artificial time. This visiting distribution is used + to generate a trial jump distance :math:`\\Delta x(t)` of variable + :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`. + + From the starting point, after calling the visiting distribution + function, the acceptance probability is computed as follows: + + .. math:: + + p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\ + \\frac{1}{1-q_{a}}}\\}} + + Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero + acceptance probability is assigned to the cases where + + .. math:: + + [1-(1-q_{a}) \\beta \\Delta E] < 0 + + The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to + + .. math:: + + T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\ + 1 + t\\right)^{q_{v}-1}-1} + + Where :math:`q_{v}` is the visiting parameter. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs + statistics. Journal of Statistical Physics, 52, 479-487 (1998). + .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing. + Physica A, 233, 395-406 (1996). + .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated + Annealing Algorithm and Its Application to the Thomson Model. + Physics Letters A, 233, 216-220 (1997). + .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated + Annealing. Physical Review E, 62, 4473 (2000). + .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized + Simulated Annealing for Efficient Global Optimization: the GenSA + Package for R. The R Journal, Volume 5/1 (2013). + .. [6] Mullen, K. Continuous Global Optimization in R. Journal of + Statistical Software, 60(6), 1 - 45, (2014). + :doi:`10.18637/jss.v060.i06` + + Examples + -------- + The following example is a 10-D problem, with many local minima. + The function involved is called Rastrigin + (https://en.wikipedia.org/wiki/Rastrigin_function) + + >>> import numpy as np + >>> from scipy.optimize import dual_annealing + >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) + >>> lw = [-5.12] * 10 + >>> up = [5.12] * 10 + >>> ret = dual_annealing(func, bounds=list(zip(lw, up))) + >>> ret.x + array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, + -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, + -6.05775280e-09, -5.00668935e-09]) # random + >>> ret.fun + 0.000000 + + """ + + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + if x0 is not None and not len(x0) == len(bounds): + raise ValueError('Bounds size does not match x0') + + lu = list(zip(*bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + # Check that restart temperature ratio is correct + if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.: + raise ValueError('Restart temperature ratio has to be in range (0, 1)') + # Checking bounds are valid + if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any( + np.isnan(lower)) or np.any(np.isnan(upper))): + raise ValueError('Some bounds values are inf values or nan values') + # Checking that bounds are consistent + if not np.all(lower < upper): + raise ValueError('Bounds are not consistent min < max') + # Checking that bounds are the same length + if not len(lower) == len(upper): + raise ValueError('Bounds do not have the same dimensions') + + # Wrapper for the objective function + func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) + + # minimizer_kwargs has to be a dict, not None + minimizer_kwargs = minimizer_kwargs or {} + + minimizer_wrapper = LocalSearchWrapper( + bounds, func_wrapper, *args, **minimizer_kwargs) + + # Initialization of random Generator for reproducible runs if rng provided + rng_gen = check_random_state(rng) + # Initialization of the energy state + energy_state = EnergyState(lower, upper, callback) + energy_state.reset(func_wrapper, rng_gen, x0) + # Minimum value of annealing temperature reached to perform + # re-annealing + temperature_restart = initial_temp * restart_temp_ratio + # VisitingDistribution instance + visit_dist = VisitingDistribution(lower, upper, visit, rng_gen) + # Strategy chain instance + strategy_chain = StrategyChain(accept, visit_dist, func_wrapper, + minimizer_wrapper, rng_gen, energy_state) + need_to_stop = False + iteration = 0 + message = [] + # OptimizeResult object to be returned + optimize_res = OptimizeResult() + optimize_res.success = True + optimize_res.status = 0 + + t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0 + # Run the search loop + while not need_to_stop: + for i in range(maxiter): + # Compute temperature for this step + s = float(i) + 2.0 + t2 = np.exp((visit - 1) * np.log(s)) - 1.0 + temperature = initial_temp * t1 / t2 + if iteration >= maxiter: + message.append("Maximum number of iteration reached") + need_to_stop = True + break + # Need a re-annealing process? + if temperature < temperature_restart: + energy_state.reset(func_wrapper, rng_gen) + break + # starting strategy chain + val = strategy_chain.run(i, temperature) + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + # Possible local search at the end of the strategy chain + if not no_local_search: + val = strategy_chain.local_search() + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + iteration += 1 + + # Setting the OptimizeResult values + optimize_res.x = energy_state.xbest + optimize_res.fun = energy_state.ebest + optimize_res.nit = iteration + optimize_res.nfev = func_wrapper.nfev + optimize_res.njev = func_wrapper.ngev + optimize_res.nhev = func_wrapper.nhev + optimize_res.message = message + return optimize_res diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_elementwise.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_elementwise.py new file mode 100644 index 0000000000000000000000000000000000000000..883c644dbcbbb954ea3e3c184bd610b28f05cca8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_elementwise.py @@ -0,0 +1,801 @@ +from scipy.optimize._bracket import _bracket_root, _bracket_minimum +from scipy.optimize._chandrupatla import _chandrupatla, _chandrupatla_minimize +from scipy._lib._util import _RichResult + + +def find_root(f, init, /, *, args=(), tolerances=None, maxiter=None, callback=None): + """Find the root of a monotonic, real-valued function of a real variable. + + For each element of the output of `f`, `find_root` seeks the scalar + root that makes the element 0. This function currently uses Chandrupatla's + bracketing algorithm [1]_ and therefore requires argument `init` to + provide a bracket around the root: the function values at the two endpoints + must have opposite signs. + + Provided a valid bracket, `find_root` is guaranteed to converge to a solution + that satisfies the provided `tolerances` if the function is continuous within + the bracket. + + This function works elementwise when `init` and `args` contain (broadcastable) + arrays. + + Parameters + ---------- + f : callable + The function whose root is desired. The signature must be:: + + f(x: array, *args) -> array + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + + `f` must be an elementwise function: each element ``f(x)[i]`` + must equal ``f(x[i])`` for all indices ``i``. It must not mutate the + array ``x`` or the arrays in ``args``. + + `find_root` seeks an array ``x`` such that ``f(x)`` is an array of zeros. + init : 2-tuple of float array_like + The lower and upper endpoints of a bracket surrounding the desired root. + A bracket is valid if arrays ``xl, xr = init`` satisfy ``xl < xr`` and + ``sign(f(xl)) == -sign(f(xr))`` elementwise. Arrays be broadcastable with + one another and `args`. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. Arrays + must be broadcastable with one another and the arrays of `init`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + tolerances : dictionary of floats, optional + Absolute and relative tolerances on the root and function value. + Valid keys of the dictionary are: + + - ``xatol`` - absolute tolerance on the root + - ``xrtol`` - relative tolerance on the root + - ``fatol`` - absolute tolerance on the function value + - ``frtol`` - relative tolerance on the function value + + See Notes for default values and explicit termination conditions. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + The default is the maximum possible number of bisections within + the (normal) floating point numbers of the relevant dtype. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `find_root` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `find_root` will return a result. `callback` must not mutate + `res` or its attributes. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : The initial bracket was invalid. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + + x : float array + The root of the function, if the algorithm terminated successfully. + f_x : float array + The value of `f` evaluated at `x`. + nfev : int array + The number of abscissae at which `f` was evaluated to find the root. + This is distinct from the number of times `f` is *called* because the + the function may evaluated at multiple points in a single call. + nit : int array + The number of iterations of the algorithm that were performed. + bracket : tuple of float arrays + The lower and upper endpoints of the final bracket. + f_bracket : tuple of float arrays + The value of `f` evaluated at the lower and upper endpoints of the + bracket. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + Let: + + - ``a, b = init`` be the left and right endpoints of the initial bracket, + - ``xl`` and ``xr`` be the left and right endpoints of the final bracket, + - ``xmin = xl if abs(f(xl)) <= abs(f(xr)) else xr`` be the final bracket + endpoint with the smaller function value, and + - ``fmin0 = min(f(a), f(b))`` be the minimum of the two values of the + function evaluated at the initial bracket endpoints. + + Then the algorithm is considered to have converged when + + - ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or + - ``fun(xmin) <= fatol + abs(fmin0) * frtol``. + + This is equivalent to the termination condition described in [1]_ with + ``xrtol = 4e-10``, ``xatol = 1e-5``, and ``fatol = frtol = 0``. + However, the default values of the `tolerances` dictionary are + ``xatol = 4*tiny``, ``xrtol = 4*eps``, ``frtol = 0``, and ``fatol = tiny``, + where ``eps`` and ``tiny`` are the precision and smallest normal number + of the result ``dtype`` of function inputs and outputs. + + References + ---------- + + .. [1] Chandrupatla, Tirupathi R. + "A new hybrid quadratic/bisection algorithm for finding the zero of a + nonlinear function without using derivatives". + Advances in Engineering Software, 28(3), 145-149. + https://doi.org/10.1016/s0965-9978(96)00051-8 + + See Also + -------- + bracket_root + + Examples + -------- + Suppose we wish to find the root of the following function. + + >>> def f(x, c=5): + ... return x**3 - 2*x - c + + First, we must find a valid bracket. The function is not monotonic, + but `bracket_root` may be able to provide a bracket. + + >>> from scipy.optimize import elementwise + >>> res_bracket = elementwise.bracket_root(f, 0) + >>> res_bracket.success + True + >>> res_bracket.bracket + (2.0, 4.0) + + Indeed, the values of the function at the bracket endpoints have + opposite signs. + + >>> res_bracket.f_bracket + (-1.0, 51.0) + + Once we have a valid bracket, `find_root` can be used to provide + a precise root. + + >>> res_root = elementwise.find_root(f, res_bracket.bracket) + >>> res_root.x + 2.0945514815423265 + + The final bracket is only a few ULPs wide, so the error between + this value and the true root cannot be much smaller within values + that are representable in double precision arithmetic. + + >>> import numpy as np + >>> xl, xr = res_root.bracket + >>> (xr - xl) / np.spacing(xl) + 2.0 + >>> res_root.f_bracket + (-8.881784197001252e-16, 9.769962616701378e-15) + + `bracket_root` and `find_root` accept arrays for most arguments. + For instance, to find the root for a few values of the parameter ``c`` + at once: + + >>> c = np.asarray([3, 4, 5]) + >>> res_bracket = elementwise.bracket_root(f, 0, args=(c,)) + >>> res_bracket.bracket + (array([1., 1., 2.]), array([2., 2., 4.])) + >>> res_root = elementwise.find_root(f, res_bracket.bracket, args=(c,)) + >>> res_root.x + array([1.8932892 , 2. , 2.09455148]) + + """ + + def reformat_result(res_in): + res_out = _RichResult() + res_out.status = res_in.status + res_out.success = res_in.success + res_out.x = res_in.x + res_out.f_x = res_in.fun + res_out.nfev = res_in.nfev + res_out.nit = res_in.nit + res_out.bracket = (res_in.xl, res_in.xr) + res_out.f_bracket = (res_in.fl, res_in.fr) + res_out._order_keys = ['success', 'status', 'x', 'f_x', + 'nfev', 'nit', 'bracket', 'f_bracket'] + return res_out + + xl, xr = init + default_tolerances = dict(xatol=None, xrtol=None, fatol=None, frtol=0) + tolerances = {} if tolerances is None else tolerances + default_tolerances.update(tolerances) + tolerances = default_tolerances + + if callable(callback): + def _callback(res): + return callback(reformat_result(res)) + else: + _callback = callback + + res = _chandrupatla(f, xl, xr, args=args, **tolerances, + maxiter=maxiter, callback=_callback) + return reformat_result(res) + + +def find_minimum(f, init, /, *, args=(), tolerances=None, maxiter=100, callback=None): + """Find the minimum of an unimodal, real-valued function of a real variable. + + For each element of the output of `f`, `find_minimum` seeks the scalar minimizer + that minimizes the element. This function currently uses Chandrupatla's + bracketing minimization algorithm [1]_ and therefore requires argument `init` + to provide a three-point minimization bracket: ``x1 < x2 < x3`` such that + ``func(x1) >= func(x2) <= func(x3)``, where one of the inequalities is strict. + + Provided a valid bracket, `find_minimum` is guaranteed to converge to a local + minimum that satisfies the provided `tolerances` if the function is continuous + within the bracket. + + This function works elementwise when `init` and `args` contain (broadcastable) + arrays. + + Parameters + ---------- + f : callable + The function whose minimizer is desired. The signature must be:: + + f(x: array, *args) -> array + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + + `f` must be an elementwise function: each element ``f(x)[i]`` + must equal ``f(x[i])`` for all indices ``i``. It must not mutate the + array ``x`` or the arrays in ``args``. + + `find_minimum` seeks an array ``x`` such that ``f(x)`` is an array of + local minima. + init : 3-tuple of float array_like + The abscissae of a standard scalar minimization bracket. A bracket is + valid if arrays ``x1, x2, x3 = init`` satisfy ``x1 < x2 < x3`` and + ``func(x1) >= func(x2) <= func(x3)``, where one of the inequalities + is strict. Arrays must be broadcastable with one another and the arrays + of `args`. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. Arrays + must be broadcastable with one another and the arrays of `init`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + tolerances : dictionary of floats, optional + Absolute and relative tolerances on the root and function value. + Valid keys of the dictionary are: + + - ``xatol`` - absolute tolerance on the root + - ``xrtol`` - relative tolerance on the root + - ``fatol`` - absolute tolerance on the function value + - ``frtol`` - relative tolerance on the function value + + See Notes for default values and explicit termination conditions. + maxiter : int, default: 100 + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `find_minimum` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `find_root` will return a result. `callback` must not mutate + `res` or its attributes. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : The algorithm encountered an invalid bracket. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + + x : float array + The minimizer of the function, if the algorithm terminated successfully. + f_x : float array + The value of `f` evaluated at `x`. + nfev : int array + The number of abscissae at which `f` was evaluated to find the root. + This is distinct from the number of times `f` is *called* because the + the function may evaluated at multiple points in a single call. + nit : int array + The number of iterations of the algorithm that were performed. + bracket : tuple of float arrays + The final three-point bracket. + f_bracket : tuple of float arrays + The value of `f` evaluated at the bracket points. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``xl < xm < xr`` are the points of the bracket and ``fl >= fm <= fr`` + (where one of the inequalities is strict) are the values of `f` evaluated + at those points, then the algorithm is considered to have converged when: + + - ``xr - xl <= abs(xm)*xrtol + xatol`` or + - ``(fl - 2*fm + fr)/2 <= abs(fm)*frtol + fatol``. + + Note that first of these differs from the termination conditions described + in [1]_. + + The default value of `xrtol` is the square root of the precision of the + appropriate dtype, and ``xatol = fatol = frtol`` is the smallest normal + number of the appropriate dtype. + + References + ---------- + + .. [1] Chandrupatla, Tirupathi R. (1998). + "An efficient quadratic fit-sectioning algorithm for minimization + without derivatives". + Computer Methods in Applied Mechanics and Engineering, 152 (1-2), + 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4 + + See Also + -------- + bracket_minimum + + Examples + -------- + Suppose we wish to minimize the following function. + + >>> def f(x, c=1): + ... return (x - c)**2 + 2 + + First, we must find a valid bracket. The function is unimodal, + so `bracket_minium` will easily find a bracket. + + >>> from scipy.optimize import elementwise + >>> res_bracket = elementwise.bracket_minimum(f, 0) + >>> res_bracket.success + True + >>> res_bracket.bracket + (0.0, 0.5, 1.5) + + Indeed, the bracket points are ordered and the function value + at the middle bracket point is less than at the surrounding + points. + + >>> xl, xm, xr = res_bracket.bracket + >>> fl, fm, fr = res_bracket.f_bracket + >>> (xl < xm < xr) and (fl > fm <= fr) + True + + Once we have a valid bracket, `find_minimum` can be used to provide + an estimate of the minimizer. + + >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket) + >>> res_minimum.x + 1.0000000149011612 + + The function value changes by only a few ULPs within the bracket, so + the minimizer cannot be determined much more precisely by evaluating + the function alone (i.e. we would need its derivative to do better). + + >>> import numpy as np + >>> fl, fm, fr = res_minimum.f_bracket + >>> (fl - fm) / np.spacing(fm), (fr - fm) / np.spacing(fm) + (0.0, 2.0) + + Therefore, a precise minimum of the function is given by: + + >>> res_minimum.f_x + 2.0 + + `bracket_minimum` and `find_minimum` accept arrays for most arguments. + For instance, to find the minimizers and minima for a few values of the + parameter ``c`` at once: + + >>> c = np.asarray([1, 1.5, 2]) + >>> res_bracket = elementwise.bracket_minimum(f, 0, args=(c,)) + >>> res_bracket.bracket + (array([0. , 0.5, 0.5]), array([0.5, 1.5, 1.5]), array([1.5, 2.5, 2.5])) + >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket, args=(c,)) + >>> res_minimum.x + array([1.00000001, 1.5 , 2. ]) + >>> res_minimum.f_x + array([2., 2., 2.]) + + """ + + def reformat_result(res_in): + res_out = _RichResult() + res_out.status = res_in.status + res_out.success = res_in.success + res_out.x = res_in.x + res_out.f_x = res_in.fun + res_out.nfev = res_in.nfev + res_out.nit = res_in.nit + res_out.bracket = (res_in.xl, res_in.xm, res_in.xr) + res_out.f_bracket = (res_in.fl, res_in.fm, res_in.fr) + res_out._order_keys = ['success', 'status', 'x', 'f_x', + 'nfev', 'nit', 'bracket', 'f_bracket'] + return res_out + + xl, xm, xr = init + default_tolerances = dict(xatol=None, xrtol=None, fatol=None, frtol=None) + tolerances = {} if tolerances is None else tolerances + default_tolerances.update(tolerances) + tolerances = default_tolerances + + if callable(callback): + def _callback(res): + return callback(reformat_result(res)) + else: + _callback = callback + + res = _chandrupatla_minimize(f, xl, xm, xr, args=args, **tolerances, + maxiter=maxiter, callback=_callback) + return reformat_result(res) + + +def bracket_root(f, xl0, xr0=None, *, xmin=None, xmax=None, factor=None, args=(), + maxiter=1000): + """Bracket the root of a monotonic, real-valued function of a real variable. + + For each element of the output of `f`, `bracket_root` seeks the scalar + bracket endpoints ``xl`` and ``xr`` such that ``sign(f(xl)) == -sign(f(xr))`` + elementwise. + + The function is guaranteed to find a valid bracket if the function is monotonic, + but it may find a bracket under other conditions. + + This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and + the elements of `args` are (mutually broadcastable) arrays. + + Parameters + ---------- + f : callable + The function for which the root is to be bracketed. The signature must be:: + + f(x: array, *args) -> array + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + + `f` must be an elementwise function: each element ``f(x)[i]`` + must equal ``f(x[i])`` for all indices ``i``. It must not mutate the + array ``x`` or the arrays in ``args``. + xl0, xr0: float array_like + Starting guess of bracket, which need not contain a root. If `xr0` is + not provided, ``xr0 = xl0 + 1``. Must be broadcastable with all other + array inputs. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with all other array inputs. + factor : float array_like, default: 2 + The factor used to grow the bracket. See Notes. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + maxiter : int, default: 1000 + The maximum number of iterations of the algorithm to perform. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits without success. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``-5``: The initial bracket does not satisfy`xmin <= xl0 < xr0 < xmax`. + + bracket : 2-tuple of float arrays + The lower and upper endpoints of the bracket, if the algorithm + terminated successfully. + f_bracket : 2-tuple of float arrays + The values of `f` evaluated at the endpoints of ``res.bracket``, + respectively. + nfev : int array + The number of abscissae at which `f` was evaluated to find the root. + This is distinct from the number of times `f` is *called* because the + the function may evaluated at multiple points in a single call. + nit : int array + The number of iterations of the algorithm that were performed. + + Notes + ----- + This function generalizes an algorithm found in pieces throughout the + `scipy.stats` codebase. The strategy is to iteratively grow the bracket `(l, r)` + until ``f(l) < 0 < f(r)`` or ``f(r) < 0 < f(l)``. The bracket grows to the left + as follows. + + - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively + increased by `factor`. + - If `xmin` is provided, the distance between `xmin` and `l` is iteratively + decreased by `factor`. Note that this also *increases* the bracket size. + + Growth of the bracket to the right is analogous. + + Growth of the bracket in one direction stops when the endpoint is no longer + finite, the function value at the endpoint is no longer finite, or the + endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates + when the bracket stops growing in both directions, the bracket surrounds + the root, or a root is found (by chance). + + If two brackets are found - that is, a bracket is found on both sides in + the same iteration, the smaller of the two is returned. + + If roots of the function are found, both `xl` and `xr` are set to the + leftmost root. + + See Also + -------- + find_root + + Examples + -------- + Suppose we wish to find the root of the following function. + + >>> def f(x, c=5): + ... return x**3 - 2*x - c + + First, we must find a valid bracket. The function is not monotonic, + but `bracket_root` may be able to provide a bracket. + + >>> from scipy.optimize import elementwise + >>> res_bracket = elementwise.bracket_root(f, 0) + >>> res_bracket.success + True + >>> res_bracket.bracket + (2.0, 4.0) + + Indeed, the values of the function at the bracket endpoints have + opposite signs. + + >>> res_bracket.f_bracket + (-1.0, 51.0) + + Once we have a valid bracket, `find_root` can be used to provide + a precise root. + + >>> res_root = elementwise.find_root(f, res_bracket.bracket) + >>> res_root.x + 2.0945514815423265 + + `bracket_root` and `find_root` accept arrays for most arguments. + For instance, to find the root for a few values of the parameter ``c`` + at once: + + >>> import numpy as np + >>> c = np.asarray([3, 4, 5]) + >>> res_bracket = elementwise.bracket_root(f, 0, args=(c,)) + >>> res_bracket.bracket + (array([1., 1., 2.]), array([2., 2., 4.])) + >>> res_root = elementwise.find_root(f, res_bracket.bracket, args=(c,)) + >>> res_root.x + array([1.8932892 , 2. , 2.09455148]) + + """ # noqa: E501 + + res = _bracket_root(f, xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, + args=args, maxiter=maxiter) + res.bracket = res.xl, res.xr + res.f_bracket = res.fl, res.fr + del res.xl + del res.xr + del res.fl + del res.fr + return res + + +def bracket_minimum(f, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None, + factor=None, args=(), maxiter=1000): + """Bracket the minimum of a unimodal, real-valued function of a real variable. + + For each element of the output of `f`, `bracket_minimum` seeks the scalar + bracket points ``xl < xm < xr`` such that ``fl >= fm <= fr`` where one of the + inequalities is strict. + + The function is guaranteed to find a valid bracket if the function is + strongly unimodal, but it may find a bracket under other conditions. + + This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`, `factor`, + and the elements of `args` are (mutually broadcastable) arrays. + + Parameters + ---------- + f : callable + The function for which the root is to be bracketed. The signature must be:: + + f(x: array, *args) -> array + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + + `f` must be an elementwise function: each element ``f(x)[i]`` + must equal ``f(x[i])`` for all indices ``i``. It must not mutate the + array ``x`` or the arrays in ``args``. + xm0: float array_like + Starting guess for middle point of bracket. + xl0, xr0: float array_like, optional + Starting guesses for left and right endpoints of the bracket. Must + be broadcastable with all other array inputs. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with all other array inputs. + factor : float array_like, default: 2 + The factor used to grow the bracket. See Notes. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + maxiter : int, default: 1000 + The maximum number of iterations of the algorithm to perform. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits. Assuming + unimodality, this implies the endpoint at the limit is a minimizer. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : ``None`` shall pass. + - ``-5`` : The initial bracket does not satisfy + `xmin <= xl0 < xm0 < xr0 <= xmax`. + + bracket : 3-tuple of float arrays + The left, middle, and right points of the bracket, if the algorithm + terminated successfully. + f_bracket : 3-tuple of float arrays + The function value at the left, middle, and right points of the bracket. + nfev : int array + The number of abscissae at which `f` was evaluated to find the root. + This is distinct from the number of times `f` is *called* because the + the function may evaluated at multiple points in a single call. + nit : int array + The number of iterations of the algorithm that were performed. + + Notes + ----- + Similar to `scipy.optimize.bracket`, this function seeks to find real + points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``, + where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`, + this function can operate in a vectorized manner on array input, so long as + the input arrays are broadcastable with each other. Also unlike + `scipy.optimize.bracket`, users may specify minimum and maximum endpoints + for the desired bracket. + + Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``, + the algorithm checks if these points already give a valid bracket. If not, + a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new + opposite endpoint, and either `xl` or `xr` becomes the new middle point, + depending on which direction is downhill. The algorithm repeats from here. + + The new endpoint `w` is chosen differently depending on whether or not a + boundary `xmin` or `xmax` has been set in the downhill direction. Without + loss of generality, suppose the downhill direction is to the right, so that + ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w` + is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by + the user (defaults to 2.0) so that step sizes increase in geometric proportion. + If there is a boundary, `xmax` in this case, then `w` is chosen to be + ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at + `xmax`. This cautious approach ensures that a minimum near but distinct from + the boundary isn't missed while also detecting whether or not the `xmax` is + a minimizer when `xmax` is reached after a finite number of steps. + + See Also + -------- + scipy.optimize.bracket + scipy.optimize.elementwise.find_minimum + + Examples + -------- + Suppose we wish to minimize the following function. + + >>> def f(x, c=1): + ... return (x - c)**2 + 2 + + First, we must find a valid bracket. The function is unimodal, + so `bracket_minium` will easily find a bracket. + + >>> from scipy.optimize import elementwise + >>> res_bracket = elementwise.bracket_minimum(f, 0) + >>> res_bracket.success + True + >>> res_bracket.bracket + (0.0, 0.5, 1.5) + + Indeed, the bracket points are ordered and the function value + at the middle bracket point is less than at the surrounding + points. + + >>> xl, xm, xr = res_bracket.bracket + >>> fl, fm, fr = res_bracket.f_bracket + >>> (xl < xm < xr) and (fl > fm <= fr) + True + + Once we have a valid bracket, `find_minimum` can be used to provide + an estimate of the minimizer. + + >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket) + >>> res_minimum.x + 1.0000000149011612 + + `bracket_minimum` and `find_minimum` accept arrays for most arguments. + For instance, to find the minimizers and minima for a few values of the + parameter ``c`` at once: + + >>> import numpy as np + >>> c = np.asarray([1, 1.5, 2]) + >>> res_bracket = elementwise.bracket_minimum(f, 0, args=(c,)) + >>> res_bracket.bracket + (array([0. , 0.5, 0.5]), array([0.5, 1.5, 1.5]), array([1.5, 2.5, 2.5])) + >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket, args=(c,)) + >>> res_minimum.x + array([1.00000001, 1.5 , 2. ]) + >>> res_minimum.f_x + array([2., 2., 2.]) + + """ # noqa: E501 + + res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, + factor=factor, args=args, maxiter=maxiter) + res.bracket = res.xl, res.xm, res.xr + res.f_bracket = res.fl, res.fm, res.fr + del res.xl + del res.xm + del res.xr + del res.fl + del res.fm + del res.fr + return res diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..426fa2565038a67cd669485576802658a2f48d8f Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..15989969349025fd42fba9836b4f5d882c3f6791 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py @@ -0,0 +1,479 @@ +"""Hessian update strategies for quasi-Newton optimization methods.""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import get_blas_funcs, issymmetric +from warnings import warn + + +__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] + + +class HessianUpdateStrategy: + """Interface for implementing Hessian update strategies. + + Many optimization methods make use of Hessian (or inverse Hessian) + approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. + Some of these approximations, however, do not actually need to store + the entire matrix or can compute the internal matrix product with a + given vector in a very efficiently manner. This class serves as an + abstract interface between the optimization algorithm and the + quasi-Newton update strategies, giving freedom of implementation + to store and update the internal matrix as efficiently as possible. + Different choices of initialization and update procedure will result + in different quasi-Newton strategies. + + Four methods should be implemented in derived classes: ``initialize``, + ``update``, ``dot`` and ``get_matrix``. The matrix multiplication + operator ``@`` is also defined to call the ``dot`` method. + + Notes + ----- + Any instance of a class that implements this interface, + can be accepted by the method ``minimize`` and used by + the compatible solvers to approximate the Hessian (or + inverse Hessian) used by the optimization algorithms. + """ + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + raise NotImplementedError("The method ``initialize(n, approx_type)``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + raise NotImplementedError("The method ``update(delta_x, delta_grad)``" + " is not implemented.") + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + raise NotImplementedError("The method ``dot(p)``" + " is not implemented.") + + def get_matrix(self): + """Return current internal matrix. + + Returns + ------- + H : ndarray, shape (n, n) + Dense matrix containing either the Hessian + or its inverse (depending on how 'approx_type' + is defined). + """ + raise NotImplementedError("The method ``get_matrix(p)``" + " is not implemented.") + + def __matmul__(self, p): + return self.dot(p) + + +class FullHessianUpdateStrategy(HessianUpdateStrategy): + """Hessian update strategy with full dimensional internal representation. + """ + _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update + _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update + # Symmetric matrix-vector product + _symv = get_blas_funcs('symv', dtype='d') + + def __init__(self, init_scale='auto'): + self.init_scale = init_scale + # Until initialize is called we can't really use the class, + # so it makes sense to set everything to None. + self.first_iteration = None + self.approx_type = None + self.B = None + self.H = None + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + self.first_iteration = True + self.n = n + self.approx_type = approx_type + if approx_type not in ('hess', 'inv_hess'): + raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") + # Create matrix + if self.approx_type == 'hess': + self.B = np.eye(n, dtype=float) + else: + self.H = np.eye(n, dtype=float) + + def _auto_scale(self, delta_x, delta_grad): + # Heuristic to scale matrix at first iteration. + # Described in Nocedal and Wright "Numerical Optimization" + # p.143 formula (6.20). + s_norm2 = np.dot(delta_x, delta_x) + y_norm2 = np.dot(delta_grad, delta_grad) + ys = np.abs(np.dot(delta_grad, delta_x)) + if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: + return 1 + if self.approx_type == 'hess': + return y_norm2 / ys + else: + return ys / y_norm2 + + def _update_implementation(self, delta_x, delta_grad): + raise NotImplementedError("The method ``_update_implementation``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + if np.all(delta_x == 0.0): + return + if np.all(delta_grad == 0.0): + warn('delta_grad == 0.0. Check if the approximated ' + 'function is linear. If the function is linear ' + 'better results can be obtained by defining the ' + 'Hessian as zero instead of using quasi-Newton ' + 'approximations.', + UserWarning, stacklevel=2) + return + if self.first_iteration: + # Get user specific scale + if isinstance(self.init_scale, str) and self.init_scale == "auto": + scale = self._auto_scale(delta_x, delta_grad) + else: + scale = self.init_scale + + # Check for complex: numpy will silently cast a complex array to + # a real one but not so for scalar as it raises a TypeError. + # Checking here brings a consistent behavior. + replace = False + if np.size(scale) == 1: + # to account for the legacy behavior having the exact same cast + scale = float(scale) + elif np.iscomplexobj(scale): + raise TypeError("init_scale contains complex elements, " + "must be real.") + else: # test explicitly for allowed shapes and values + replace = True + if self.approx_type == 'hess': + shape = np.shape(self.B) + dtype = self.B.dtype + else: + shape = np.shape(self.H) + dtype = self.H.dtype + # copy, will replace the original + scale = np.array(scale, dtype=dtype, copy=True) + + # it has to match the shape of the matrix for the multiplication, + # no implicit broadcasting is allowed + if shape != (init_shape := np.shape(scale)): + raise ValueError("If init_scale is an array, it must have the " + f"dimensions of the hess/inv_hess: {shape}." + f" Got {init_shape}.") + if not issymmetric(scale): + raise ValueError("If init_scale is an array, it must be" + " symmetric (passing scipy.linalg.issymmetric)" + " to be an approximation of a hess/inv_hess.") + + # Scale initial matrix with ``scale * np.eye(n)`` or replace + # This is not ideal, we could assign the scale directly in + # initialize, but we would need to + if self.approx_type == 'hess': + if replace: + self.B = scale + else: + self.B *= scale + else: + if replace: + self.H = scale + else: + self.H *= scale + self.first_iteration = False + self._update_implementation(delta_x, delta_grad) + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + if self.approx_type == 'hess': + return self._symv(1, self.B, p) + else: + return self._symv(1, self.H, p) + + def get_matrix(self): + """Return the current internal matrix. + + Returns + ------- + M : ndarray, shape (n, n) + Dense matrix containing either the Hessian or its inverse + (depending on how `approx_type` was defined). + """ + if self.approx_type == 'hess': + M = np.copy(self.B) + else: + M = np.copy(self.H) + li = np.tril_indices_from(M, k=-1) + M[li] = M.T[li] + return M + + +class BFGS(FullHessianUpdateStrategy): + """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + + Parameters + ---------- + exception_strategy : {'skip_update', 'damp_update'}, optional + Define how to proceed when the curvature condition is violated. + Set it to 'skip_update' to just skip the update. Or, alternatively, + set it to 'damp_update' to interpolate between the actual BFGS + result and the unmodified matrix. Both exceptions strategies + are explained in [1]_, p.536-537. + min_curvature : float + This number, scaled by a normalization factor, defines the + minimum curvature ``dot(delta_grad, delta_x)`` allowed to go + unaffected by the exception strategy. By default is equal to + 1e-8 when ``exception_strategy = 'skip_update'`` and equal + to 0.2 when ``exception_strategy = 'damp_update'``. + init_scale : {float, np.array, 'auto'} + This parameter can be used to initialize the Hessian or its + inverse. When a float is given, the relevant array is initialized + to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension. + Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given, + this array will be used. Otherwise an error is generated. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + The default is 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.140. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, exception_strategy='skip_update', min_curvature=None, + init_scale='auto'): + if exception_strategy == 'skip_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 1e-8 + elif exception_strategy == 'damp_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 0.2 + else: + raise ValueError("`exception_strategy` must be 'skip_update' " + "or 'damp_update'.") + + super().__init__(init_scale) + self.exception_strategy = exception_strategy + + def _update_inverse_hessian(self, ys, Hy, yHy, s): + """Update the inverse Hessian matrix. + + BFGS update using the formula: + + ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) + - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` + + where ``s = delta_x`` and ``y = delta_grad``. This formula is + equivalent to (6.17) in [1]_ written in a more efficient way + for implementation. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) + self.H = self._syr((ys + yHy) / ys ** 2, s, a=self.H) + + def _update_hessian(self, ys, Bs, sBs, y): + """Update the Hessian matrix. + + BFGS update using the formula: + + ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` + + where ``s`` is short for ``delta_x`` and ``y`` is short + for ``delta_grad``. Formula (6.19) in [1]_. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.B = self._syr(1.0 / ys, y, a=self.B) + self.B = self._syr(-1.0 / sBs, Bs, a=self.B) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + wz = np.dot(w, z) + Mw = self @ w + wMw = Mw.dot(w) + # Guarantee that wMw > 0 by reinitializing matrix. + # While this is always true in exact arithmetic, + # indefinite matrix may appear due to roundoff errors. + if wMw <= 0.0: + scale = self._auto_scale(delta_x, delta_grad) + # Reinitialize matrix + if self.approx_type == 'hess': + self.B = scale * np.eye(self.n, dtype=float) + else: + self.H = scale * np.eye(self.n, dtype=float) + # Do common operations for new matrix + Mw = self @ w + wMw = Mw.dot(w) + # Check if curvature condition is violated + if wz <= self.min_curvature * wMw: + # If the option 'skip_update' is set + # we just skip the update when the condition + # is violated. + if self.exception_strategy == 'skip_update': + return + # If the option 'damp_update' is set we + # interpolate between the actual BFGS + # result and the unmodified matrix. + elif self.exception_strategy == 'damp_update': + update_factor = (1-self.min_curvature) / (1 - wz/wMw) + z = update_factor*z + (1-update_factor)*Mw + wz = np.dot(w, z) + # Update matrix + if self.approx_type == 'hess': + self._update_hessian(wz, Mw, wMw, z) + else: + self._update_inverse_hessian(wz, Mw, wMw, z) + + +class SR1(FullHessianUpdateStrategy): + """Symmetric-rank-1 Hessian update strategy. + + Parameters + ---------- + min_denominator : float + This number, scaled by a normalization factor, + defines the minimum denominator magnitude allowed + in the update. When the condition is violated we skip + the update. By default uses ``1e-8``. + init_scale : {float, np.array, 'auto'}, optional + This parameter can be used to initialize the Hessian or its + inverse. When a float is given, the relevant array is initialized + to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension. + Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given, + this array will be used. Otherwise an error is generated. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + The default is 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.144-146. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, min_denominator=1e-8, init_scale='auto'): + self.min_denominator = min_denominator + super().__init__(init_scale) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + Mw = self @ w + z_minus_Mw = z - Mw + denominator = np.dot(w, z_minus_Mw) + # If the denominator is too small + # we just skip the update. + if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): + return + # Update matrix + if self.approx_type == 'hess': + self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) + else: + self.H = self._syr(1/denominator, z_minus_Mw, a=self.H) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b59235e9a48f84b650a4fd8c235c25546471a6a8 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/_highs_wrapper.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/_highs_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10e86fa13900f04817a48f01838411d01203c55f Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/__pycache__/_highs_wrapper.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/_highs_wrapper.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/_highs_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..c88f0fb14c627b10584995fbde17f3a0e445b0cf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_highspy/_highs_wrapper.py @@ -0,0 +1,338 @@ +from warnings import warn + +import numpy as np +import scipy.optimize._highspy._core as _h # type: ignore[import-not-found] +from scipy.optimize._highspy import _highs_options as hopt # type: ignore[attr-defined] +from scipy.optimize import OptimizeWarning + + +def _highs_wrapper(c, indptr, indices, data, lhs, rhs, lb, ub, integrality, options): + '''Solve linear programs using HiGHS [1]_. + + Assume problems of the form: + + MIN c.T @ x + s.t. lhs <= A @ x <= rhs + lb <= x <= ub + + Parameters + ---------- + c : 1-D array, (n,) + Array of objective value coefficients. + astart : 1-D array + CSC format index array. + aindex : 1-D array + CSC format index array. + avalue : 1-D array + Data array of the matrix. + lhs : 1-D array (or None), (m,) + Array of left hand side values of the inequality constraints. + If ``lhs=None``, then an array of ``-inf`` is assumed. + rhs : 1-D array, (m,) + Array of right hand side values of the inequality constraints. + lb : 1-D array (or None), (n,) + Lower bounds on solution variables x. If ``lb=None``, then an + array of all `0` is assumed. + ub : 1-D array (or None), (n,) + Upper bounds on solution variables x. If ``ub=None``, then an + array of ``inf`` is assumed. + options : dict + A dictionary of solver options + + Returns + ------- + res : dict + + If model_status is one of kOptimal, + kObjectiveBound, kTimeLimit, + kIterationLimit: + + - ``status`` : HighsModelStatus + Model status code. + + - ``message`` : str + Message corresponding to model status code. + + - ``x`` : list + Solution variables. + + - ``slack`` : list + Slack variables. + + - ``lambda`` : list + Lagrange multipliers associated with the constraints + Ax = b. + + - ``s`` : list + Lagrange multipliers associated with the constraints + x >= 0. + + - ``fun`` + Final objective value. + + - ``simplex_nit`` : int + Number of iterations accomplished by the simplex + solver. + + - ``ipm_nit`` : int + Number of iterations accomplished by the interior- + point solver. + + If model_status is not one of the above: + + - ``status`` : HighsModelStatus + Model status code. + + - ``message`` : str + Message corresponding to model status code. + + Notes + ----- + If ``options['write_solution_to_file']`` is ``True`` but + ``options['solution_file']`` is unset or ``''``, then the solution + will be printed to ``stdout``. + + If any iteration limit is reached, no solution will be + available. + + ``OptimizeWarning`` will be raised if any option value set by + the user is found to be incorrect. + + References + ---------- + .. [1] https://highs.dev/ + .. [2] https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.html + ''' + numcol = c.size + numrow = rhs.size + isMip = integrality is not None and np.sum(integrality) > 0 + + # default "null" return values + res = { + "x": None, + "fun": None, + } + + # Fill up a HighsLp object + lp = _h.HighsLp() + lp.num_col_ = numcol + lp.num_row_ = numrow + lp.a_matrix_.num_col_ = numcol + lp.a_matrix_.num_row_ = numrow + lp.a_matrix_.format_ = _h.MatrixFormat.kColwise + lp.col_cost_ = c + lp.col_lower_ = lb + lp.col_upper_ = ub + lp.row_lower_ = lhs + lp.row_upper_ = rhs + lp.a_matrix_.start_ = indptr + lp.a_matrix_.index_ = indices + lp.a_matrix_.value_ = data + if integrality.size > 0: + lp.integrality_ = [_h.HighsVarType(i) for i in integrality] + + # Make a Highs object and pass it everything + highs = _h._Highs() + highs_options = _h.HighsOptions() + hoptmanager = hopt.HighsOptionsManager() + for key, val in options.items(): + # handle filtering of unsupported and default options + if val is None or key in ("sense",): + continue + + # ask for the option type + opt_type = hoptmanager.get_option_type(key) + if -1 == opt_type: + warn( + f"Unrecognized options detected: {dict({key: val})}", + OptimizeWarning, + stacklevel=2, + ) + continue + else: + if key in ("presolve", "parallel"): + # handle fake bools (require bool -> str conversions) + if isinstance(val, bool): + val = "on" if val else "off" + else: + warn( + f'Option f"{key}" is "{val}", but only True or False is ' + f"allowed. Using default.", + OptimizeWarning, + stacklevel=2, + ) + continue + opt_type = _h.HighsOptionType(opt_type) + status, msg = check_option(highs, key, val) + if opt_type == _h.HighsOptionType.kBool: + if not isinstance(val, bool): + warn( + f'Option f"{key}" is "{val}", but only True or False is ' + f"allowed. Using default.", + OptimizeWarning, + stacklevel=2, + ) + continue + + # warn or set option + if status != 0: + warn(msg, OptimizeWarning, stacklevel=2) + else: + setattr(highs_options, key, val) + + opt_status = highs.passOptions(highs_options) + if opt_status == _h.HighsStatus.kError: + res.update( + { + "status": highs.getModelStatus(), + "message": highs.modelStatusToString(highs.getModelStatus()), + } + ) + return res + + init_status = highs.passModel(lp) + if init_status == _h.HighsStatus.kError: + # if model fails to load, highs.getModelStatus() will be NOT_SET + err_model_status = _h.HighsModelStatus.kModelError + res.update( + { + "status": err_model_status, + "message": highs.modelStatusToString(err_model_status), + } + ) + return res + + # Solve the LP + run_status = highs.run() + if run_status == _h.HighsStatus.kError: + res.update( + { + "status": highs.getModelStatus(), + "message": highs.modelStatusToString(highs.getModelStatus()), + } + ) + return res + + # Extract what we need from the solution + model_status = highs.getModelStatus() + + # it should always be safe to get the info object + info = highs.getInfo() + + # Failure modes: + # LP: if we have anything other than an Optimal status, it + # is unsafe (and unhelpful) to read any results + # MIP: has a non-Optimal status or has timed out/reached max iterations + # 1) If not Optimal/TimedOut/MaxIter status, there is no solution + # 2) If TimedOut/MaxIter status, there may be a feasible solution. + # if the objective function value is not Infinity, then the + # current solution is feasible and can be returned. Else, there + # is no solution. + mipFailCondition = model_status not in ( + _h.HighsModelStatus.kOptimal, + _h.HighsModelStatus.kTimeLimit, + _h.HighsModelStatus.kIterationLimit, + _h.HighsModelStatus.kSolutionLimit, + ) or ( + model_status + in { + _h.HighsModelStatus.kTimeLimit, + _h.HighsModelStatus.kIterationLimit, + _h.HighsModelStatus.kSolutionLimit, + } + and (info.objective_function_value == _h.kHighsInf) + ) + lpFailCondition = model_status != _h.HighsModelStatus.kOptimal + if (isMip and mipFailCondition) or (not isMip and lpFailCondition): + res.update( + { + "status": model_status, + "message": "model_status is " + f"{highs.modelStatusToString(model_status)}; " + "primal_status is " + f"{highs.solutionStatusToString(info.primal_solution_status)}", + "simplex_nit": info.simplex_iteration_count, + "ipm_nit": info.ipm_iteration_count, + "crossover_nit": info.crossover_iteration_count, + } + ) + return res + + # Should be safe to read the solution: + solution = highs.getSolution() + basis = highs.getBasis() + + # Lagrangians for bounds based on column statuses + marg_bnds = np.zeros((2, numcol)) + basis_col_status = basis.col_status + solution_col_dual = solution.col_dual + for ii in range(numcol): + if basis_col_status[ii] == _h.HighsBasisStatus.kLower: + marg_bnds[0, ii] = solution_col_dual[ii] + elif basis_col_status[ii] == _h.HighsBasisStatus.kUpper: + marg_bnds[1, ii] = solution_col_dual[ii] + + res.update( + { + "status": model_status, + "message": highs.modelStatusToString(model_status), + # Primal solution + "x": np.array(solution.col_value), + # Ax + s = b => Ax = b - s + # Note: this is for all constraints (A_ub and A_eq) + "slack": rhs - solution.row_value, + # lambda are the lagrange multipliers associated with Ax=b + "lambda": np.array(solution.row_dual), + "marg_bnds": marg_bnds, + "fun": info.objective_function_value, + "simplex_nit": info.simplex_iteration_count, + "ipm_nit": info.ipm_iteration_count, + "crossover_nit": info.crossover_iteration_count, + } + ) + + if isMip: + res.update( + { + "mip_node_count": info.mip_node_count, + "mip_dual_bound": info.mip_dual_bound, + "mip_gap": info.mip_gap, + } + ) + + return res + + +def check_option(highs_inst, option, value): + status, option_type = highs_inst.getOptionType(option) + hoptmanager = hopt.HighsOptionsManager() + + if status != _h.HighsStatus.kOk: + return -1, "Invalid option name." + + valid_types = { + _h.HighsOptionType.kBool: bool, + _h.HighsOptionType.kInt: int, + _h.HighsOptionType.kDouble: float, + _h.HighsOptionType.kString: str, + } + + expected_type = valid_types.get(option_type, None) + + if expected_type is str: + if not hoptmanager.check_string_option(option, value): + return -1, "Invalid option value." + if expected_type is float: + if not hoptmanager.check_double_option(option, value): + return -1, "Invalid option value." + if expected_type is int: + if not hoptmanager.check_int_option(option, value): + return -1, "Invalid option value." + + if expected_type is None: + return 3, "Unknown option type." + + status, current_value = highs_inst.getOptionValue(option) + if status != _h.HighsStatus.kOk: + return 4, "Failed to validate option value." + return 0, "Check option succeeded." diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_isotonic.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_isotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..825576535402a9acf8bbff009a5f76282cb4f500 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_isotonic.py @@ -0,0 +1,157 @@ +from typing import TYPE_CHECKING + +import numpy as np + +from ._optimize import OptimizeResult +from ._pava_pybind import pava + +if TYPE_CHECKING: + import numpy.typing as npt + + +__all__ = ["isotonic_regression"] + + +def isotonic_regression( + y: "npt.ArrayLike", + *, + weights: "npt.ArrayLike | None" = None, + increasing: bool = True, +) -> OptimizeResult: + r"""Nonparametric isotonic regression. + + A (not strictly) monotonically increasing array `x` with the same length + as `y` is calculated by the pool adjacent violators algorithm (PAVA), see + [1]_. See the Notes section for more details. + + Parameters + ---------- + y : (N,) array_like + Response variable. + weights : (N,) array_like or None + Case weights. + increasing : bool + If True, fit monotonic increasing, i.e. isotonic, regression. + If False, fit a monotonic decreasing, i.e. antitonic, regression. + Default is True. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: + + - ``x``: The isotonic regression solution, i.e. an increasing (or + decreasing) array of the same length than y, with elements in the + range from min(y) to max(y). + - ``weights`` : Array with the sum of case weights for each block + (or pool) B. + - ``blocks``: Array of length B+1 with the indices of the start + positions of each block (or pool) B. The j-th block is given by + ``x[blocks[j]:blocks[j+1]]`` for which all values are the same. + + Notes + ----- + Given data :math:`y` and case weights :math:`w`, the isotonic regression + solves the following optimization problem: + + .. math:: + + \operatorname{argmin}_{x_i} \sum_i w_i (y_i - x_i)^2 \quad + \text{subject to } x_i \leq x_j \text{ whenever } i \leq j \,. + + For every input value :math:`y_i`, it generates a value :math:`x_i` such + that :math:`x` is increasing (but not strictly), i.e. + :math:`x_i \leq x_{i+1}`. This is accomplished by the PAVA. + The solution consists of pools or blocks, i.e. neighboring elements of + :math:`x`, e.g. :math:`x_i` and :math:`x_{i+1}`, that all have the same + value. + + Most interestingly, the solution stays the same if the squared loss is + replaced by the wide class of Bregman functions which are the unique + class of strictly consistent scoring functions for the mean, see [2]_ + and references therein. + + The implemented version of PAVA according to [1]_ has a computational + complexity of O(N) with input size N. + + References + ---------- + .. [1] Busing, F. M. T. A. (2022). + Monotone Regression: A Simple and Fast O(n) PAVA Implementation. + Journal of Statistical Software, Code Snippets, 102(1), 1-25. + :doi:`10.18637/jss.v102.c01` + .. [2] Jordan, A.I., Mühlemann, A. & Ziegel, J.F. + Characterizing the optimal solutions to the isotonic regression + problem for identifiable functionals. + Ann Inst Stat Math 74, 489-514 (2022). + :doi:`10.1007/s10463-021-00808-0` + + Examples + -------- + This example demonstrates that ``isotonic_regression`` really solves a + constrained optimization problem. + + >>> import numpy as np + >>> from scipy.optimize import isotonic_regression, minimize + >>> y = [1.5, 1.0, 4.0, 6.0, 5.7, 5.0, 7.8, 9.0, 7.5, 9.5, 9.0] + >>> def objective(yhat, y): + ... return np.sum((yhat - y)**2) + >>> def constraint(yhat, y): + ... # This is for a monotonically increasing regression. + ... return np.diff(yhat) + >>> result = minimize(objective, x0=y, args=(y,), + ... constraints=[{'type': 'ineq', + ... 'fun': lambda x: constraint(x, y)}]) + >>> result.x + array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667, + 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 , + 9.25 ]) + >>> result = isotonic_regression(y) + >>> result.x + array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667, + 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 , + 9.25 ]) + + The big advantage of ``isotonic_regression`` compared to calling + ``minimize`` is that it is more user friendly, i.e. one does not need to + define objective and constraint functions, and that it is orders of + magnitudes faster. On commodity hardware (in 2023), for normal distributed + input y of length 1000, the minimizer takes about 4 seconds, while + ``isotonic_regression`` takes about 200 microseconds. + """ + yarr = np.atleast_1d(y) # Check yarr.ndim == 1 is implicit (pybind11) in pava. + order = slice(None) if increasing else slice(None, None, -1) + x = np.array(yarr[order], order="C", dtype=np.float64, copy=True) + if weights is None: + wx = np.ones_like(yarr, dtype=np.float64) + else: + warr = np.atleast_1d(weights) + + if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]): + raise ValueError( + "Input arrays y and w must have one dimension of equal length." + ) + if np.any(warr <= 0): + raise ValueError("Weights w must be strictly positive.") + + wx = np.array(warr[order], order="C", dtype=np.float64, copy=True) + n = x.shape[0] + r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp) + x, wx, r, b = pava(x, wx, r) + # Now that we know the number of blocks b, we only keep the relevant part + # of r and wx. + # As information: Due to the pava implementation, after the last block + # index, there might be smaller numbers appended to r, e.g. + # r = [0, 10, 8, 7] which in the end should be r = [0, 10]. + r = r[:b + 1] # type: ignore[assignment] + wx = wx[:b] + if not increasing: + x = x[::-1] + wx = wx[::-1] + r = r[-1] - r[::-1] + return OptimizeResult( + x=x, + weights=wx, + blocks=r, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e206feaa9333229c3842850eee1f673c8ef02b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py @@ -0,0 +1,578 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from . import _lbfgsb +from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +status_messages = { + 0 : "START", + 1 : "NEW_X", + 2 : "RESTART", + 3 : "FG", + 4 : "CONVERGENCE", + 5 : "STOP", + 6 : "WARNING", + 7 : "ERROR", + 8 : "ABNORMAL" +} + + +task_messages = { + 0 : "", + 301 : "", + 302 : "", + 401 : "NORM OF PROJECTED GRADIENT <= PGTOL", + 402 : "RELATIVE REDUCTION OF F <= FACTR*EPSMCH", + 501 : "CPU EXCEEDING THE TIME LIMIT", + 502 : "TOTAL NO. OF F,G EVALUATIONS EXCEEDS LIMIT", + 503 : "PROJECTED GRADIENT IS SUFFICIENTLY SMALL", + 504 : "TOTAL NO. OF ITERATIONS REACHED LIMIT", + 505 : "CALLBACK REQUESTED HALT", + 601 : "ROUNDING ERRORS PREVENT PROGRESS", + 602 : "STP = STPMAX", + 603 : "STP = STPMIN", + 604 : "XTOL TEST SATISFIED", + 701 : "NO FEASIBLE SOLUTION", + 702 : "FACTR < 0", + 703 : "FTOL < 0", + 704 : "GTOL < 0", + 705 : "XTOL < 0", + 706 : "STP < STPMIN", + 707 : "STP > STPMAX", + 708 : "STPMIN < 0", + 709 : "STPMAX < STPMIN", + 710 : "INITIAL G >= 0", + 711 : "M <= 0", + 712 : "N <= 0", + 713 : "INVALID NBD", +} + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Deprecated option that previously controlled the text printed on the + screen during the problem solution. Now the code does not emit any + output and this keyword has no function. + + .. deprecated:: 1.15.0 + This keyword is deprecated and will be removed from SciPy 1.17.0. + + disp : int, optional + Deprecated option that previously controlled the text printed on the + screen during the problem solution. Now the code does not emit any + output and this keyword has no function. + + .. deprecated:: 1.15.0 + This keyword is deprecated and will be removed from SciPy 1.17.0. + + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + SciPy uses a C-translated and modified version of the Fortran code, + L-BFGS-B v3.0 (released April 25, 2011, BSD-3 licensed). Original Fortran + version was written by Ciyou Zhu, Richard Byrd, Jorge Nocedal and, + Jose Luis Morales. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + Examples + -------- + Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we + define an objective function ``f(m, b) = (y - y_model)**2``, where `y` + describes the observations and `y_model` the prediction of the linear model + as ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, + are arbitrarily chosen as ``(0,5)`` and ``(5,10)`` for this example. + + >>> import numpy as np + >>> from scipy.optimize import fmin_l_bfgs_b + >>> X = np.arange(0, 10, 1) + >>> M = 2 + >>> B = 3 + >>> Y = M * X + B + >>> def func(parameters, *args): + ... x = args[0] + ... y = args[1] + ... m, b = parameters + ... y_model = m*x + b + ... error = sum(np.power((y - y_model), 2)) + ... return error + + >>> initial_values = np.array([0.0, 1.0]) + + >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True) + >>> x_opt, f_opt + array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary + + The optimized parameters in ``x_opt`` agree with the ground truth parameters + ``m`` and ``b``. Next, let us perform a bound constrained optimization using + the `bounds` parameter. + + >>> bounds = [(0, 5), (5, 10)] + >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True, bounds=bounds) + >>> x_opt, f_opt + array([1.65990508, 5.31649385]), 15.721334516453945 # may vary + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + callback = _wrap_callback(callback) + opts = {'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + Deprecated option that previously controlled the text printed on the + screen during the problem solution. Now the code does not emit any + output and this keyword has no function. + + .. deprecated:: 1.15.0 + This keyword is deprecated and will be removed from SciPy 1.17.0. + + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Deprecated option that previously controlled the text printed on the + screen during the problem solution. Now the code does not emit any + output and this keyword has no function. + + .. deprecated:: 1.15.0 + This keyword is deprecated and will be removed from SciPy 1.17.0. + + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + nbd = zeros(n, np.int32) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + L, U = bounds[0, i], bounds[1, i] + if not np.isinf(L): + low_bnd[i] = L + L = 1 + if not np.isinf(U): + upper_bnd[i] = U + U = 1 + nbd[i] = bounds_map[L, U] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, dtype=np.float64) + f = array(0.0, dtype=np.int32) + g = zeros((n,), dtype=np.int32) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, dtype=np.int32) + task = zeros(2, dtype=np.int32) + ln_task = zeros(2, dtype=np.int32) + lsave = zeros(4, dtype=np.int32) + isave = zeros(44, dtype=np.int32) + dsave = zeros(29, dtype=float64) + + n_iterations = 0 + + while True: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying code expects + # float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, + iwa, task, lsave, isave, dsave, maxls, ln_task) + + if task[0] == 3: + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + elif task[0] == 1: + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[0] = 5 + task[1] = 505 + if n_iterations >= maxiter: + task[0] = 5 + task[1] = 504 + elif sf.nfev > maxfun: + task[0] = 5 + task[1] = 502 + else: + break + + if task[0] == 4: + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # function docstring in "__lbfgsb.c", ws and wy arguments. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # isave(31) = the total number of BFGS updates prior the current iteration. + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + msg = status_messages[task[0]] + ": " + task_messages[task[1]] + + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=msg, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I_arr = np.eye(*self.shape, dtype=self.dtype) + Hk = I_arr + + for i in range(n_corrs): + A1 = I_arr - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I_arr - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linesearch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..31442e02d323e0f6d163505bf77dd30855ce1218 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linesearch.py @@ -0,0 +1,896 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + line_search_armijo + line_search_wolfe1 + line_search_wolfe2 + scalar_search_wolfe1 + scalar_search_wolfe2 + +""" +from warnings import warn + +from ._dcsrch import DCSRCH +import numpy as np + +__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', + 'scalar_search_wolfe1', 'scalar_search_wolfe2', + 'line_search_armijo'] + +class LineSearchWarning(RuntimeWarning): + pass + + +def _check_c1_c2(c1, c2): + if not (0 < c1 < c2 < 1): + raise ValueError("'c1' and 'c2' do not satisfy" + "'0 < c1 < c2 < 1'.") + + +#------------------------------------------------------------------------------ +# Minpack's Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_wolfe1(f, fprime, xk, pk, gfk=None, + old_fval=None, old_old_fval=None, + args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, + xtol=1e-14): + """ + As `scalar_search_wolfe1` but do a line search to direction `pk` + + Parameters + ---------- + f : callable + Function `f(x)` + fprime : callable + Gradient of `f` + xk : array_like + Current point + pk : array_like + Search direction + gfk : array_like, optional + Gradient of `f` at point `xk` + old_fval : float, optional + Value of `f` at point `xk` + old_old_fval : float, optional + Value of `f` at point preceding `xk` + + The rest of the parameters are the same as for `scalar_search_wolfe1`. + + Returns + ------- + stp, f_count, g_count, fval, old_fval + As in `line_search_wolfe1` + gval : array + Gradient of `f` at the final point + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + """ + if gfk is None: + gfk = fprime(xk, *args) + + gval = [gfk] + gc = [0] + fc = [0] + + def phi(s): + fc[0] += 1 + return f(xk + s*pk, *args) + + def derphi(s): + gval[0] = fprime(xk + s*pk, *args) + gc[0] += 1 + return np.dot(gval[0], pk) + + derphi0 = np.dot(gfk, pk) + + stp, fval, old_fval = scalar_search_wolfe1( + phi, derphi, old_fval, old_old_fval, derphi0, + c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) + + return stp, fc[0], gc[0], fval, old_fval, gval[0] + + +def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, + amax=50, amin=1e-8, xtol=1e-14): + """ + Scalar function search for alpha that satisfies strong Wolfe conditions + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0 + old_phi0 : float, optional + Value of phi at previous point + derphi0 : float, optional + Value derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax, amin : float, optional + Maximum and minimum step size + xtol : float, optional + Relative tolerance for an acceptable step. + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found + phi : float + Value of `phi` at the new point `alpha` + phi0 : float + Value of `phi` at `alpha=0` + + Notes + ----- + Uses routine DCSRCH from MINPACK. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_. + + References + ---------- + + .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization. + In Springer Series in Operations Research and Financial Engineering. + (Springer Series in Operations Research and Financial Engineering). + Springer Nature. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + if derphi0 is None: + derphi0 = derphi(0.) + + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + if alpha1 < 0: + alpha1 = 1.0 + else: + alpha1 = 1.0 + + maxiter = 100 + + dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax) + stp, phi1, phi0, task = dcsrch( + alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter + ) + + return stp, phi1, phi0 + + +line_search = line_search_wolfe1 + + +#------------------------------------------------------------------------------ +# Pure-Python Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +# Note: `line_search_wolfe2` is the public `scipy.optimize.line_search` + +def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, + old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + Parameters + ---------- + f : callable f(x,*args) + Objective function. + myfprime : callable f'(x,*args) + Objective function gradient. + xk : ndarray + Starting point. + pk : ndarray + Search direction. The search direction must be a descent direction + for the algorithm to converge. + gfk : ndarray, optional + Gradient value for x=xk (xk being the current parameter + estimate). Will be recomputed if omitted. + old_fval : float, optional + Function value for x=xk. Will be recomputed if omitted. + old_old_fval : float, optional + Function value for the point preceding x=xk. + args : tuple, optional + Additional arguments passed to objective function. + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, x, f, g)`` + returning a boolean. Arguments are the proposed step ``alpha`` + and the corresponding ``x``, ``f`` and ``g`` values. The line search + accepts the value of ``alpha`` only if this + callable returns ``True``. If the callable returns ``False`` + for the step length, the algorithm will continue with + new iterates. The callable is only called for iterates + satisfying the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha : float or None + Alpha for which ``x_new = x0 + alpha * pk``, + or None if the line search algorithm did not converge. + fc : int + Number of function evaluations made. + gc : int + Number of gradient evaluations made. + new_fval : float or None + New function value ``f(x_new)=f(x0+alpha*pk)``, + or None if the line search algorithm did not converge. + old_fval : float + Old function value ``f(x0)``. + new_slope : float or None + The local slope along the search direction at the + new value ````, + or None if the line search algorithm did not converge. + + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + The search direction `pk` must be a descent direction (e.g. + ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe + conditions. If the search direction is not a descent direction (e.g. + ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import line_search + + A objective function and its gradient are defined. + + >>> def obj_func(x): + ... return (x[0])**2+(x[1])**2 + >>> def obj_grad(x): + ... return [2*x[0], 2*x[1]] + + We can find alpha that satisfies strong Wolfe conditions. + + >>> start_point = np.array([1.8, 1.7]) + >>> search_gradient = np.array([-1.0, -1.0]) + >>> line_search(obj_func, obj_grad, start_point, search_gradient) + (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4]) + + """ + fc = [0] + gc = [0] + gval = [None] + gval_alpha = [None] + + def phi(alpha): + fc[0] += 1 + return f(xk + alpha * pk, *args) + + fprime = myfprime + + def derphi(alpha): + gc[0] += 1 + gval[0] = fprime(xk + alpha * pk, *args) # store for later use + gval_alpha[0] = alpha + return np.dot(gval[0], pk) + + if gfk is None: + gfk = fprime(xk, *args) + derphi0 = np.dot(gfk, pk) + + if extra_condition is not None: + # Add the current gradient as argument, to avoid needless + # re-evaluation + def extra_condition2(alpha, phi): + if gval_alpha[0] != alpha: + derphi(alpha) + x = xk + alpha * pk + return extra_condition(alpha, x, phi, gval[0]) + else: + extra_condition2 = None + + alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( + phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, + extra_condition2, maxiter=maxiter) + + if derphi_star is None: + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + else: + # derphi_star is a number (derphi) -- so use the most recently + # calculated gradient used in computing it derphi = gfk*pk + # this is the gradient at the next step no need to compute it + # again in the outer loop. + derphi_star = gval[0] + + return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star + + +def scalar_search_wolfe2(phi, derphi, phi0=None, + old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Objective scalar function. + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0. + old_phi0 : float, optional + Value of phi at previous point. + derphi0 : float, optional + Value of derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size. + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, phi_value)`` + returning a boolean. The line search accepts the value + of ``alpha`` only if this callable returns ``True``. + If the callable returns ``False`` for the step length, + the algorithm will continue with new iterates. + The callable is only called for iterates satisfying + the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha_star : float or None + Best alpha, or None if the line search algorithm did not converge. + phi_star : float + phi at alpha_star. + phi0 : float + phi at 0. + derphi_star : float or None + derphi at alpha_star, or None if the line search algorithm + did not converge. + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + + if derphi0 is None: + derphi0 = derphi(0.) + + alpha0 = 0 + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + else: + alpha1 = 1.0 + + if alpha1 < 0: + alpha1 = 1.0 + + if amax is not None: + alpha1 = min(alpha1, amax) + + phi_a1 = phi(alpha1) + #derphi_a1 = derphi(alpha1) evaluated below + + phi_a0 = phi0 + derphi_a0 = derphi0 + + if extra_condition is None: + def extra_condition(alpha, phi): + return True + + for i in range(maxiter): + if alpha1 == 0 or (amax is not None and alpha0 > amax): + # alpha1 == 0: This shouldn't happen. Perhaps the increment has + # slipped below machine precision? + alpha_star = None + phi_star = phi0 + phi0 = old_phi0 + derphi_star = None + + if alpha1 == 0: + msg = 'Rounding errors prevent the line search from converging' + else: + msg = "The line search algorithm could not find a solution " + \ + f"less than or equal to amax: {amax}" + + warn(msg, LineSearchWarning, stacklevel=2) + break + + not_first_iteration = i > 0 + if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ + ((phi_a1 >= phi_a0) and not_first_iteration): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha0, alpha1, phi_a0, + phi_a1, derphi_a0, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + derphi_a1 = derphi(alpha1) + if (abs(derphi_a1) <= -c2*derphi0): + if extra_condition(alpha1, phi_a1): + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = derphi_a1 + break + + if (derphi_a1 >= 0): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha1, alpha0, phi_a1, + phi_a0, derphi_a1, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + alpha2 = 2 * alpha1 # increase by factor of two on each iteration + if amax is not None: + alpha2 = min(alpha2, amax) + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi(alpha1) + derphi_a0 = derphi_a1 + + else: + # stopping test maxiter reached + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = None + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + + return alpha_star, phi_star, phi0, derphi_star + + +def _cubicmin(a, fa, fpa, b, fb, c, fc): + """ + Finds the minimizer for a cubic polynomial that goes through the + points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. + + If no minimizer can be found, return None. + + """ + # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D + + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + C = fpa + db = b - a + dc = c - a + denom = (db * dc) ** 2 * (db - dc) + d1 = np.empty((2, 2)) + d1[0, 0] = dc ** 2 + d1[0, 1] = -db ** 2 + d1[1, 0] = -dc ** 3 + d1[1, 1] = db ** 3 + [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, + fc - fa - C * dc]).flatten()) + A /= denom + B /= denom + radical = B * B - 3 * A * C + xmin = a + (-B + np.sqrt(radical)) / (3 * A) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _quadmin(a, fa, fpa, b, fb): + """ + Finds the minimizer for a quadratic polynomial that goes through + the points (a,fa), (b,fb) with derivative at a of fpa. + + """ + # f(x) = B*(x-a)^2 + C*(x-a) + D + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + D = fa + C = fpa + db = b - a * 1.0 + B = (fb - D - C * db) / (db * db) + xmin = a - C / (2.0 * B) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, + phi, derphi, phi0, derphi0, c1, c2, extra_condition): + """Zoom stage of approximate linesearch satisfying strong Wolfe conditions. + + Part of the optimization algorithm in `scalar_search_wolfe2`. + + Notes + ----- + Implements Algorithm 3.6 (zoom) in Wright and Nocedal, + 'Numerical Optimization', 1999, pp. 61. + + """ + + maxiter = 10 + i = 0 + delta1 = 0.2 # cubic interpolant check + delta2 = 0.1 # quadratic interpolant check + phi_rec = phi0 + a_rec = 0 + while True: + # interpolate to find a trial step length between a_lo and + # a_hi Need to choose interpolation here. Use cubic + # interpolation and then if the result is within delta * + # dalpha or outside of the interval bounded by a_lo or a_hi + # then use quadratic interpolation, if the result is still too + # close, then use bisection + + dalpha = a_hi - a_lo + if dalpha < 0: + a, b = a_hi, a_lo + else: + a, b = a_lo, a_hi + + # minimizer of cubic interpolant + # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) + # + # if the result is too close to the end points (or out of the + # interval), then use quadratic interpolation with phi_lo, + # derphi_lo and phi_hi if the result is still too close to the + # end points (or out of the interval) then use bisection + + if (i > 0): + cchk = delta1 * dalpha + a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, + a_rec, phi_rec) + if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): + qchk = delta2 * dalpha + a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) + if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): + a_j = a_lo + 0.5*dalpha + + # Check new value of a_j + + phi_aj = phi(a_j) + if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_j + phi_hi = phi_aj + else: + derphi_aj = derphi(a_j) + if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): + a_star = a_j + val_star = phi_aj + valprime_star = derphi_aj + break + if derphi_aj*(a_hi - a_lo) >= 0: + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_lo + phi_hi = phi_lo + else: + phi_rec = phi_lo + a_rec = a_lo + a_lo = a_j + phi_lo = phi_aj + derphi_lo = derphi_aj + i += 1 + if (i > maxiter): + # Failed to find a conforming step size + a_star = None + val_star = None + valprime_star = None + break + return a_star, val_star, valprime_star + + +#------------------------------------------------------------------------------ +# Armijo line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """Minimize over alpha, the function ``f(xk+alpha pk)``. + + Parameters + ---------- + f : callable + Function to be minimized. + xk : array_like + Current point. + pk : array_like + Search direction. + gfk : array_like + Gradient of `f` at point `xk`. + old_fval : float + Value of `f` at point `xk`. + args : tuple, optional + Optional arguments. + c1 : float, optional + Value to control stopping criterion. + alpha0 : scalar, optional + Value of `alpha` at start of the optimization. + + Returns + ------- + alpha + f_count + f_val_at_alpha + + Notes + ----- + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + """ + xk = np.atleast_1d(xk) + fc = [0] + + def phi(alpha1): + fc[0] += 1 + return f(xk + alpha1*pk, *args) + + if old_fval is None: + phi0 = phi(0.) + else: + phi0 = old_fval # compute f(xk) -- done in past loop + + derphi0 = np.dot(gfk, pk) + alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, + alpha0=alpha0) + return alpha, fc[0], phi1 + + +def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """ + Compatibility wrapper for `line_search_armijo` + """ + r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, + alpha0=alpha0) + return r[0], r[1], 0, r[2] + + +def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): + """Minimize over alpha, the function ``phi(alpha)``. + + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + alpha > 0 is assumed to be a descent direction. + + Returns + ------- + alpha + phi1 + + """ + phi_a0 = phi(alpha0) + if phi_a0 <= phi0 + c1*alpha0*derphi0: + return alpha0, phi_a0 + + # Otherwise, compute the minimizer of a quadratic interpolant: + + alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) + phi_a1 = phi(alpha1) + + if (phi_a1 <= phi0 + c1*alpha1*derphi0): + return alpha1, phi_a1 + + # Otherwise, loop with cubic interpolation until we find an alpha which + # satisfies the first Wolfe condition (since we are backtracking, we will + # assume that the value of alpha is not too small and satisfies the second + # condition. + + while alpha1 > amin: # we are assuming alpha>0 is a descent direction + factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) + a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ + alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) + a = a / factor + b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ + alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) + b = b / factor + + alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) + phi_a2 = phi(alpha2) + + if (phi_a2 <= phi0 + c1*alpha2*derphi0): + return alpha2, phi_a2 + + if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: + alpha2 = alpha1 / 2.0 + + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi_a2 + + # Failed to find a suitable step length + return None, phi_a1 + + +#------------------------------------------------------------------------------ +# Non-monotone line search for DF-SANE +#------------------------------------------------------------------------------ + +def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5): + """ + Nonmonotone backtracking line search as described in [1]_ + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + prev_fs : float + List of previous merit function values. Should have ``len(prev_fs) <= M`` + where ``M`` is the nonmonotonicity window parameter. + eta : float + Allowed merit function increase, see [1]_ + gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + + References + ---------- + [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + + """ + f_k = prev_fs[-1] + f_bar = max(prev_fs) + + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + return alpha, xp, fp, Fp + + +def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5, + nu=0.85): + """ + Nonmonotone line search from [1] + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + f_k : float + Initial merit function value. + C, Q : float + Control parameters. On the first iteration, give values + Q=1.0, C=f_k + eta : float + Allowed merit function increase, see [1]_ + nu, gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + C : float + New value for the control parameter C + Q : float + New value for the control parameter Q + + References + ---------- + .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line + search and its application to the spectral residual + method'', IMA J. Numer. Anal. 29, 814 (2009). + + """ + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + # Update C and Q + Q_next = nu * Q + 1 + C = (nu * Q * (C + eta) + fp) / Q_next + Q = Q_next + + return alpha, xp, fp, Fp, C, Q diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..054ba471dcbd4622ab9c2fb9dda313bb124c0451 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog.py @@ -0,0 +1,733 @@ +""" +A top-level linear programming interface. + +.. versionadded:: 0.15.0 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + linprog + linprog_verbose_callback + linprog_terse_callback + +""" + +import numpy as np + +from ._optimize import OptimizeResult, OptimizeWarning +from warnings import warn +from ._linprog_highs import _linprog_highs +from ._linprog_ip import _linprog_ip +from ._linprog_simplex import _linprog_simplex +from ._linprog_rs import _linprog_rs +from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401 + _linprog_rs_doc, _linprog_simplex_doc, + _linprog_highs_ipm_doc, _linprog_highs_ds_doc) +from ._linprog_util import ( + _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale, + _postsolve, _check_result, _display_summary) +from copy import deepcopy + +__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] + +__docformat__ = "restructuredtext en" + +LINPROG_METHODS = [ + 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm' +] + + +def linprog_verbose_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces detailed output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization: + + ``0`` : Optimization terminated successfully + + ``1`` : Iteration limit reached + + ``2`` : Problem appears to be infeasible + + ``3`` : Problem appears to be unbounded + + ``4`` : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + x = res['x'] + fun = res['fun'] + phase = res['phase'] + status = res['status'] + nit = res['nit'] + message = res['message'] + complete = res['complete'] + + saved_printoptions = np.get_printoptions() + np.set_printoptions(linewidth=500, + formatter={'float': lambda x: f"{x: 12.4f}"}) + if status: + print('--------- Simplex Early Exit -------\n') + print(f'The simplex method exited early with status {status:d}') + print(message) + elif complete: + print('--------- Simplex Complete --------\n') + print(f'Iterations required: {nit}') + else: + print(f'--------- Iteration {nit:d} ---------\n') + + if nit > 0: + if phase == 1: + print('Current Pseudo-Objective Value:') + else: + print('Current Objective Value:') + print('f = ', fun) + print() + print('Current Solution Vector:') + print('x = ', x) + print() + + np.set_printoptions(**saved_printoptions) + + +def linprog_terse_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces brief output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x``. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization: + + ``0`` : Optimization terminated successfully + + ``1`` : Iteration limit reached + + ``2`` : Problem appears to be infeasible + + ``3`` : Problem appears to be unbounded + + ``4`` : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + nit = res['nit'] + x = res['x'] + + if nit == 0: + print("Iter: X:") + print(f"{nit: <5d} ", end="") + print(x) + + +def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=(0, None), method='highs', callback=None, + options=None, x0=None, integrality=None): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + - minimize :: + + c @ x + + - such that :: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be + specified with ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. + If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` + will serve as bounds for all decision variables. + Use ``None`` to indicate that there is no bound. For instance, the + default bound ``(0, None)`` means that all decision variables are + non-negative, and the pair ``(None, None)`` means no bounds at all, + i.e. all variables are allowed to be any real. + method : str, optional + The algorithm used to solve the standard form problem. + The following are supported. + + - :ref:`'highs' ` (default) + - :ref:`'highs-ds' ` + - :ref:`'highs-ipm' ` + - :ref:`'interior-point' ` (legacy) + - :ref:`'revised simplex' ` (legacy) + - :ref:`'simplex' ` (legacy) + + The legacy methods are deprecated and will be removed in SciPy 1.11.0. + callback : callable, optional + If a callback function is provided, it will be called at least once per + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The current solution vector. + fun : float + The current value of the objective function ``c @ x``. + success : bool + ``True`` when the algorithm has completed successfully. + slack : 1-D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + phase : int + The phase of the algorithm being executed. + status : int + An integer representing the status of the algorithm. + + ``0`` : Optimization proceeding nominally. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The current iteration number. + message : str + A string descriptor of the algorithm status. + + Callback functions are not currently supported by the HiGHS methods. + + options : dict, optional + A dictionary of solver options. All methods accept the following + options: + + maxiter : int + Maximum number of iterations to perform. + Default: see method-specific documentation. + disp : bool + Set to ``True`` to print convergence messages. + Default: ``False``. + presolve : bool + Set to ``False`` to disable automatic presolve. + Default: ``True``. + + All methods except the HiGHS solvers also accept: + + tol : float + A tolerance which determines when a residual is "close enough" to + zero to be considered exactly zero. + autoscale : bool + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + Default: ``False``. + rr : bool + Set to ``False`` to disable automatic redundancy removal. + Default: ``True``. + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. For problems with + dense input, the available methods for redundancy removal are: + + ``SVD``: + Repeatedly performs singular value decomposition on + the matrix, detecting redundant rows based on nonzeros + in the left singular vectors that correspond with + zero singular values. May be fast when the matrix is + nearly full rank. + ``pivot``: + Uses the algorithm presented in [5]_ to identify + redundant rows. + ``ID``: + Uses a randomized interpolative decomposition. + Identifies columns of the matrix transpose not used in + a full-rank interpolative decomposition of the matrix. + ``None``: + Uses ``svd`` if the matrix is nearly full rank, that is, + the difference between the matrix rank and the number + of rows is less than five. If not, uses ``pivot``. The + behavior of this default is subject to change without + prior notice. + + Default: None. + For problems with sparse input, this option is ignored, and the + pivot-based algorithm presented in [5]_ is used. + + For method-specific options, see + :func:`show_options('linprog') `. + + x0 : 1-D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + :ref:`'revised simplex' ` method, + and can only be used if `x0` represents a basic feasible solution. + + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape ``c.shape``. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcast to ``c.shape`` using `numpy.broadcast_to`. + + This argument is currently used only by the + :ref:`'highs' ` method and is ignored otherwise. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields + below. Note that the return types of the fields may depend on whether + the optimization was successful, therefore it is recommended to check + `OptimizeResult.status` before relying on the other fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The total number of iterations performed in all phases. + message : str + A string descriptor of the exit status of the algorithm. + + See Also + -------- + show_options : Additional options accepted by the solvers. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + :ref:`'highs-ds' `, and + :ref:`'highs-ipm' ` are interfaces to the + HiGHS simplex and interior-point method solvers [13]_, respectively. + :ref:`'highs' ` (default) chooses between + the two automatically. These are the fastest linear + programming solvers in SciPy, especially for large, sparse problems; + which of these two is faster is problem-dependent. + The other solvers are legacy methods and will be removed when `callback` is + supported by the HiGHS methods. + + Method :ref:`'highs-ds' `, is a wrapper of the C++ high + performance dual revised simplex implementation (HSOL) [13]_, [14]_. + Method :ref:`'highs-ipm' ` is a wrapper of a C++ + implementation of an **i**\ nterior-\ **p**\ oint **m**\ ethod [13]_; it + features a crossover routine, so it is as accurate as a simplex solver. + Method :ref:`'highs' ` chooses between the two + automatically. + For new code involving `linprog`, we recommend explicitly choosing one of + these three method values. + + .. versionadded:: 1.6.0 + + Method :ref:`'interior-point' ` + uses the primal-dual path following algorithm + as outlined in [4]_. This algorithm supports sparse constraint matrices and + is typically faster than the simplex methods, especially for large, sparse + problems. Note, however, that the solution returned may be slightly less + accurate than those of the simplex methods and will not, in general, + correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + + Method :ref:`'revised simplex' ` + uses the revised simplex method as described in + [9]_, except that a factorization [11]_ of the basis matrix, rather than + its inverse, is efficiently maintained and used to solve the linear systems + at each iteration of the algorithm. + + .. versionadded:: 1.3.0 + + Method :ref:`'simplex' ` uses a traditional, + full-tableau implementation of + Dantzig's simplex algorithm [1]_, [2]_ (*not* the + Nelder-Mead simplex). This algorithm is included for backwards + compatibility and educational purposes. + + .. versionadded:: 0.15.0 + + Before applying :ref:`'interior-point' `, + :ref:`'revised simplex' `, or + :ref:`'simplex' `, + a presolve procedure based on [8]_ attempts + to identify trivial infeasibilities, trivial unboundedness, and potential + problem simplifications. Specifically, it checks for: + + - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; + - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained + variables; + - column singletons in ``A_eq``, representing fixed variables; and + - column singletons in ``A_ub``, representing simple bounds. + + If presolve reveals that the problem is unbounded (e.g. an unconstrained + and unbounded variable has negative cost) or infeasible (e.g., a row of + zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver + terminates with the appropriate status code. Note that presolve terminates + as soon as any sign of unboundedness is detected; consequently, a problem + may be reported as unbounded when in reality the problem is infeasible + (but infeasibility has not been detected yet). Therefore, if it is + important to know whether the problem is actually infeasible, solve the + problem again with option ``presolve=False``. + + If neither infeasibility nor unboundedness are detected in a single pass + of the presolve, bounds are tightened where possible and fixed + variables are removed from the problem. Then, linearly dependent rows + of the ``A_eq`` matrix are removed, (unless they represent an + infeasibility) to avoid numerical difficulties in the primary solve + routine. Note that rows that are nearly linearly dependent (within a + prescribed tolerance) may also be removed, which can change the optimal + solution in rare cases. If this is a concern, eliminate redundancy from + your problem formulation and run with option ``rr=False`` or + ``presolve=False``. + + Several potential improvements can be made here: additional presolve + checks outlined in [8]_ should be implemented, the presolve routine should + be run multiple times (until no further simplifications can be made), and + more of the efficiency improvements from [5]_ should be implemented in the + redundancy removal routines. + + After presolve, the problem is transformed to standard form by converting + the (tightened) simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + Optionally, the problem is automatically scaled via equilibration [12]_. + The selected algorithm solves the standard form problem, and a + postprocessing routine converts the result to a solution to the original + problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." + Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at + http://www.4er.org/CourseNotes/Book%20B/B-III.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + .. [11] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + .. [12] Tomlin, J. A. "On scaling linear programming problems." + Mathematical Programming Study 4 (1975): 146-166. + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the following problem: + + .. math:: + + \min_{x_0, x_1} \ -x_0 + 4x_1 & \\ + \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ + -x_0 - 2x_1 & \geq -4,\\ + x_1 & \geq -3. + + The problem is not presented in the form accepted by `linprog`. This is + easily remedied by converting the "greater than" inequality + constraint to a "less than" inequality constraint by + multiplying both sides by a factor of :math:`-1`. Note also that the last + constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. + Finally, since there are no bounds on :math:`x_0`, we must explicitly + specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the + default is for variables to be non-negative. After collecting coeffecients + into arrays and tuples, the input for this problem is: + + >>> from scipy.optimize import linprog + >>> c = [-1, 4] + >>> A = [[-3, 1], [1, 2]] + >>> b = [6, 4] + >>> x0_bounds = (None, None) + >>> x1_bounds = (-3, None) + >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) + >>> res.fun + -22.0 + >>> res.x + array([10., -3.]) + >>> res.message + 'Optimization terminated successfully. (HiGHS Status 7: Optimal)' + + The marginals (AKA dual values / shadow prices / Lagrange multipliers) + and residuals (slacks) are also available. + + >>> res.ineqlin + residual: [ 3.900e+01 0.000e+00] + marginals: [-0.000e+00 -1.000e+00] + + For example, because the marginal associated with the second inequality + constraint is -1, we expect the optimal value of the objective function + to decrease by ``eps`` if we add a small amount ``eps`` to the right hand + side of the second inequality constraint: + + >>> eps = 0.05 + >>> b[1] += eps + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.05 + + Also, because the residual on the first inequality constraint is 39, we + can decrease the right hand side of the first constraint by 39 without + affecting the optimal solution. + + >>> b = [6, 4] # reset to original values + >>> b[0] -= 39 + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.0 + + """ + + meth = method.lower() + methods = {"highs", "highs-ds", "highs-ipm", + "simplex", "revised simplex", "interior-point"} + + if meth not in methods: + raise ValueError(f"Unknown solver '{method}'") + + if x0 is not None and meth != "revised simplex": + warning_message = "x0 is used only when method is 'revised simplex'. " + warn(warning_message, OptimizeWarning, stacklevel=2) + + if np.any(integrality) and not meth == "highs": + integrality = None + warning_message = ("Only `method='highs'` supports integer " + "constraints. Ignoring `integrality`.") + warn(warning_message, OptimizeWarning, stacklevel=2) + elif np.any(integrality): + integrality = np.broadcast_to(integrality, np.shape(c)) + else: + integrality = None + + lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality) + lp, solver_options = _parse_linprog(lp, options, meth) + tol = solver_options.get('tol', 1e-9) + + # Give unmodified problem to HiGHS + if meth.startswith('highs'): + if callback is not None: + raise NotImplementedError("HiGHS solvers do not support the " + "callback interface.") + highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex', + 'highs': None} + + sol = _linprog_highs(lp, solver=highs_solvers[meth], + **solver_options) + sol['status'], sol['message'] = ( + _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'], + sol['con'], lp.bounds, tol, sol['message'], + integrality)) + sol['success'] = sol['status'] == 0 + return OptimizeResult(sol) + + warn(f"`method='{meth}'` is deprecated and will be removed in SciPy " + "1.11.0. Please use one of the HiGHS solvers (e.g. " + "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2) + + iteration = 0 + complete = False # will become True if solved in presolve + undo = [] + + # Keep the original arrays to calculate slack/residuals for original + # problem. + lp_o = deepcopy(lp) + + # Solve trivial problem, eliminate variables, tighten bounds, etc. + rr_method = solver_options.pop('rr_method', None) # need to pop these; + rr = solver_options.pop('rr', True) # they're not passed to methods + c0 = 0 # we might get a constant term in the objective + if solver_options.pop('presolve', True): + (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, + rr_method, + tol) + + C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used + postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale) + + if not complete: + A, b, c, c0, x0 = _get_Abc(lp, c0) + if solver_options.pop('autoscale', False): + A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0) + postsolve_args = postsolve_args[:-2] + (C, b_scale) + + if meth == 'simplex': + x, status, message, iteration = _linprog_simplex( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'interior-point': + x, status, message, iteration = _linprog_ip( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'revised simplex': + x, status, message, iteration = _linprog_rs( + c, c0=c0, A=A, b=b, x0=x0, callback=callback, + postsolve_args=postsolve_args, **solver_options) + + # Eliminate artificial variables, re-introduce presolved variables, etc. + disp = solver_options.get('disp', False) + + x, fun, slack, con = _postsolve(x, postsolve_args, complete) + + status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, + tol, message, integrality) + + if disp: + _display_summary(message, status, fun, iteration) + + sol = { + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': iteration, + 'success': status == 0} + + return OptimizeResult(sol) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..ba016aec6dafe74e48076875202704a3b85b822a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py @@ -0,0 +1,1434 @@ +""" +Created on Sat Aug 22 19:49:17 2020 + +@author: matth +""" + + +def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + simplex_dual_edge_weight_strategy=None, + mip_rel_gap=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using one of the HiGHS solvers. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs', which chooses + automatically between + :ref:`'highs-ds' ` and + :ref:`'highs-ipm' `. + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcast to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + For :ref:`'highs-ipm' `, this does not + include the number of crossover iterations. Default is the largest + possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + Dual feasibility tolerance for + :ref:`'highs-ds' `. + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + primal_feasibility_tolerance : double (default: 1e-07) + Primal feasibility tolerance for + :ref:`'highs-ds' `. + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + ipm_optimality_tolerance : double (default: ``1e-08``) + Optimality tolerance for + :ref:`'highs-ipm' `. + Minimum allowable value is 1e-12. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + mip_rel_gap : double (default: None) + Termination criterion for MIP solver: solver will terminate when the + gap between the primal objective value and the dual objective bound, + scaled by the primal objective value, is <= mip_rel_gap. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For the HiGHS simplex method, this includes iterations in all + phases. For the HiGHS interior-point method, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for the HiGHS interior-point method. + This is ``0`` for the HiGHS simplex method. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + pass + + +def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs-ds', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + simplex_dual_edge_weight_strategy=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the HiGHS dual simplex solver. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs-ds'. + :ref:`'highs' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + Default is the largest possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + Dual feasibility tolerance for + :ref:`'highs-ds' `. + primal_feasibility_tolerance : double (default: 1e-07) + Primal feasibility tolerance for + :ref:`'highs-ds' `. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. This includes iterations + in all phases. + crossover_nit : int + This is always ``0`` for the HiGHS simplex method. + For the HiGHS interior-point method, this is the number of + primal/dual pushes performed during the crossover routine. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + pass + + +def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs-ipm', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the HiGHS interior point solver. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs-ipm'. + :ref:`'highs-ipm' `, + :ref:`'highs-ds' `, + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + For :ref:`'highs-ipm' `, this does not + include the number of crossover iterations. Default is the largest + possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + primal_feasibility_tolerance : double (default: 1e-07) + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + ipm_optimality_tolerance : double (default: ``1e-08``) + Optimality tolerance for + :ref:`'highs-ipm' `. + Minimum allowable value is 1e-12. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For the HiGHS interior-point method, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for the HiGHS interior-point method. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + """ + pass + + +def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + maxiter=1000, disp=False, presolve=True, + tol=1e-8, autoscale=False, rr=True, + alpha0=.99995, beta=0.1, sparse=False, + lstsq=False, sym_pos=True, cholesky=True, pc=True, + ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the interior-point method of + [4]_. + + .. deprecated:: 1.9.0 + `method='interior-point'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'interior-point'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + + Options + ------- + maxiter : int (default: 1000) + The maximum number of iterations of the algorithm. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + alpha0 : float (default: 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default: 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default: False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default: ``False``) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default: True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default: True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for problems + that are numerically well-behaved. + pc : bool (default: True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default: False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default: 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``, and no SuiteSparse.) + A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With default options, the solver used to perform the factorization depends + on third-party software availability and the conditioning of the problem. + + For dense problems, solvers are tried in the following order: + + 1. ``scipy.linalg.cho_factor`` + + 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` + + 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` + + 4. ``scipy.linalg.lstsq`` + + For sparse problems: + + 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are + installed) + + 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse + are installed) + + 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) + + 4. ``scipy.sparse.linalg.lsqr`` + + If the solver fails for any reason, successively more robust (but slower) + solvers are attempted in the order indicated. Attempting, failing, and + re-starting factorization can be time consuming, so if the problem is + numerically challenging, options can be set to bypass solvers that are + failing. Setting ``cholesky=False`` skips to solver 2, + ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips + to solver 4 for both sparse and dense problems. + + Potential improvements for combating issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem + is automatically converted to the form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + for solution. That is, the original problem contains equality, upper-bound + and variable constraints whereas the method specific solver requires + equality constraints and variable non-negativity. ``linprog`` converts the + original problem to standard form by converting the simple bounds to upper + bound constraints, introducing non-negative slack variables for inequality + constraints, and expressing unbounded variables as the difference between + two non-negative variables. The problem is converted back to the original + form before results are reported. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + """ + pass + + +def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + x0=None, maxiter=5000, disp=False, presolve=True, + tol=1e-12, autoscale=False, rr=True, maxupdate=10, + mast=False, pivot="mrc", **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the revised simplex method. + + .. deprecated:: 1.9.0 + `method='revised simplex'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'revised simplex'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + and :ref:`'simplex' ` (legacy) + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + x0 : 1-D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + Options + ------- + maxiter : int (default: 5000) + The maximum number of iterations to perform in either phase. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-12) + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + maxupdate : int (default: 10) + The maximum number of updates performed on the LU factorization. + After this many updates is reached, the basis matrix is factorized + from scratch. + mast : bool (default: False) + Minimize Amortized Solve Time. If enabled, the average time to solve + a linear system using the basis factorization is measured. Typically, + the average solve time will decrease with each successive solve after + initial factorization, as factorization takes much more time than the + solve operation (and updates). Eventually, however, the updated + factorization becomes sufficiently complex that the average solve time + begins to increase. When this is detected, the basis is refactorized + from scratch. Enable this option to maximize speed at the risk of + nondeterministic behavior. Ignored if ``maxupdate`` is 0. + pivot : "mrc" or "bland" (default: "mrc") + Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland"). + Choose Bland's rule if iteration limit is reached and cycling is + suspected. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + ``5`` : Problem has no constraints; turn presolve on. + + ``6`` : Invalid guess provided. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + + Notes + ----- + Method *revised simplex* uses the revised simplex method as described in + [9]_, except that a factorization [11]_ of the basis matrix, rather than + its inverse, is efficiently maintained and used to solve the linear systems + at each iteration of the algorithm. + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [11] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + """ + pass + + +def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + maxiter=5000, disp=False, presolve=True, + tol=1e-12, autoscale=False, rr=True, bland=False, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the tableau-based simplex method. + + .. deprecated:: 1.9.0 + `method='simplex'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'simplex'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + and :ref:`'revised simplex' ` + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + + Options + ------- + maxiter : int (default: 5000) + The maximum number of iterations to perform in either phase. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-12) + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + bland : bool + If True, use Bland's anti-cycling rule [3]_ to choose pivots to + prevent cycling. If False, choose pivots which should lead to a + converged solution more quickly. The latter method is subject to + cycling (non-convergence) in rare instances. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + """ + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py new file mode 100644 index 0000000000000000000000000000000000000000..9455cf460f96b7768c28acfbba76ad9ad08eed3f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py @@ -0,0 +1,422 @@ +"""HiGHS Linear Optimization Methods + +Interface to HiGHS linear optimization software. +https://highs.dev/ + +.. versionadded:: 1.5.0 + +References +---------- +.. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex + method." Mathematical Programming Computation, 10 (1), 119-142, + 2018. DOI: 10.1007/s12532-017-0130-5 + +""" + +import inspect +import numpy as np +from ._optimize import OptimizeWarning, OptimizeResult +from warnings import warn +from ._highspy._highs_wrapper import _highs_wrapper +from ._highspy._core import( + kHighsInf, + HighsDebugLevel, + ObjSense, + HighsModelStatus, + simplex_constants as s_c, # [1] +) +from scipy.sparse import csc_matrix, vstack, issparse + +# [1]: Directly importing from "._highspy._core.simplex_constants" +# causes problems when reloading. +# See https://github.com/scipy/scipy/pull/22869 for details. + +def _highs_to_scipy_status_message(highs_status, highs_message): + """Converts HiGHS status number/message to SciPy status number/message""" + + scipy_statuses_messages = { + None: (4, "HiGHS did not provide a status code. "), + HighsModelStatus.kNotset: (4, ""), + HighsModelStatus.kLoadError: (4, ""), + HighsModelStatus.kModelError: (2, ""), + HighsModelStatus.kPresolveError: (4, ""), + HighsModelStatus.kSolveError: (4, ""), + HighsModelStatus.kPostsolveError: (4, ""), + HighsModelStatus.kModelEmpty: (4, ""), + HighsModelStatus.kObjectiveBound: (4, ""), + HighsModelStatus.kObjectiveTarget: (4, ""), + HighsModelStatus.kOptimal: (0, "Optimization terminated successfully. "), + HighsModelStatus.kTimeLimit: (1, "Time limit reached. "), + HighsModelStatus.kIterationLimit: (1, "Iteration limit reached. "), + HighsModelStatus.kInfeasible: (2, "The problem is infeasible. "), + HighsModelStatus.kUnbounded: (3, "The problem is unbounded. "), + HighsModelStatus.kUnboundedOrInfeasible: (4, "The problem is unbounded " + "or infeasible. ")} + unrecognized = (4, "The HiGHS status code was not recognized. ") + scipy_status, scipy_message = ( + scipy_statuses_messages.get(highs_status, unrecognized)) + hstat = int(highs_status) if highs_status is not None else None + scipy_message = (f"{scipy_message}" + f"(HiGHS Status {hstat}: {highs_message})") + return scipy_status, scipy_message + + +def _replace_inf(x): + # Replace `np.inf` with kHighsInf + infs = np.isinf(x) + with np.errstate(invalid="ignore"): + x[infs] = np.sign(x[infs])*kHighsInf + return x + + +def _convert_to_highs_enum(option, option_str, choices): + # If option is in the choices we can look it up, if not use + # the default value taken from function signature and warn: + try: + return choices[option.lower()] + except AttributeError: + return choices[option] + except KeyError: + sig = inspect.signature(_linprog_highs) + default_str = sig.parameters[option_str].default + warn(f"Option {option_str} is {option}, but only values in " + f"{set(choices.keys())} are allowed. Using default: " + f"{default_str}.", + OptimizeWarning, stacklevel=3) + return choices[default_str] + + +def _linprog_highs(lp, solver, time_limit=None, presolve=True, + disp=False, maxiter=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + simplex_dual_edge_weight_strategy=None, + mip_rel_gap=None, + mip_max_nodes=None, + **unknown_options): + r""" + Solve the following linear programming problem using one of the HiGHS + solvers: + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + lp : _LPProblem + A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``. + solver : "ipm" or "simplex" or None + Which HiGHS solver to use. If ``None``, "simplex" will be used. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. For + ``solver='ipm'``, this does not include the number of crossover + iterations. Default is the largest possible value for an ``int`` + on the platform. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration; default ``False``. + time_limit : float + The maximum time in seconds allotted to solve the problem; default is + the largest possible value for a ``double`` on the platform. + presolve : bool + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if presolve is + to be disabled. + dual_feasibility_tolerance : double + Dual feasibility tolerance. Default is 1e-07. + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance when ``solver='ipm'``. + primal_feasibility_tolerance : double + Primal feasibility tolerance. Default is 1e-07. + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance when ``solver='ipm'``. + ipm_optimality_tolerance : double + Optimality tolerance for ``solver='ipm'``. Default is 1e-08. + Minimum possible value is 1e-12 and must be smaller than the largest + possible value for a ``double`` on the platform. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, using ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + + mip_max_nodes : int + The maximum number of nodes allotted to solve the problem; default is + the largest possible value for a ``HighsInt`` on the platform. + Ignored if not using the MIP solver. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing all + unused options. + + Returns + ------- + sol : dict + A dictionary consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For ``solver='simplex'``, this includes iterations in all + phases. For ``solver='ipm'``, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for ``solver='ipm'``. This is ``0`` + for ``solver='simplex'``. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + mip_node_count : int + The number of subproblems or "nodes" solved by the MILP + solver. Only present when `integrality` is not `None`. + + mip_dual_bound : float + The MILP solver's final estimate of the lower bound on the + optimal solution. Only present when `integrality` is not + `None`. + + mip_gap : float + The difference between the final objective function value + and the final dual bound, scaled by the final objective + function value. Only present when `integrality` is not + `None`. + + Notes + ----- + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + if unknown_options: + message = (f"Unrecognized options detected: {unknown_options}. " + "These will be passed to HiGHS verbatim.") + warn(message, OptimizeWarning, stacklevel=3) + + # Map options to HiGHS enum values + simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum( + simplex_dual_edge_weight_strategy, + 'simplex_dual_edge_weight_strategy', + choices={'dantzig': \ + s_c.SimplexEdgeWeightStrategy.kSimplexEdgeWeightStrategyDantzig, + 'devex': \ + s_c.SimplexEdgeWeightStrategy.kSimplexEdgeWeightStrategyDevex, + 'steepest-devex': \ + s_c.SimplexEdgeWeightStrategy.kSimplexEdgeWeightStrategyChoose, + 'steepest': \ + s_c.SimplexEdgeWeightStrategy.kSimplexEdgeWeightStrategySteepestEdge, + None: None}) + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs + # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints + with np.errstate(invalid="ignore"): + lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf + rhs_ub = b_ub # RHS of UB constraints is b_ub + lhs_eq = b_eq # Equality constraint is inequality + rhs_eq = b_eq # constraint with LHS=RHS + lhs = np.concatenate((lhs_ub, lhs_eq)) + rhs = np.concatenate((rhs_ub, rhs_eq)) + + if issparse(A_ub) or issparse(A_eq): + A = vstack((A_ub, A_eq)) + else: + A = np.vstack((A_ub, A_eq)) + A = csc_matrix(A) + + options = { + 'presolve': presolve, + 'sense': ObjSense.kMinimize, + 'solver': solver, + 'time_limit': time_limit, + 'highs_debug_level': HighsDebugLevel.kHighsDebugLevelNone, + 'dual_feasibility_tolerance': dual_feasibility_tolerance, + 'ipm_optimality_tolerance': ipm_optimality_tolerance, + 'log_to_console': disp, + 'mip_max_nodes': mip_max_nodes, + 'output_flag': disp, + 'primal_feasibility_tolerance': primal_feasibility_tolerance, + 'simplex_dual_edge_weight_strategy': + simplex_dual_edge_weight_strategy_enum, + 'simplex_strategy': s_c.SimplexStrategy.kSimplexStrategyDual, + 'ipm_iteration_limit': maxiter, + 'simplex_iteration_limit': maxiter, + 'mip_rel_gap': mip_rel_gap, + } + options.update(unknown_options) + + # np.inf doesn't work; use very large constant + rhs = _replace_inf(rhs) + lhs = _replace_inf(lhs) + lb = _replace_inf(lb) + ub = _replace_inf(ub) + + if integrality is None or np.sum(integrality) == 0: + integrality = np.empty(0) + else: + integrality = np.array(integrality) + + res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs, + lb, ub, integrality.astype(np.uint8), options) + + # HiGHS represents constraints as lhs/rhs, so + # Ax + s = b => Ax = b - s + # and we need to split up s by A_ub and A_eq + if 'slack' in res: + slack = res['slack'] + con = np.array(slack[len(b_ub):]) + slack = np.array(slack[:len(b_ub)]) + else: + slack, con = None, None + + # lagrange multipliers for equalities/inequalities and upper/lower bounds + if 'lambda' in res: + lamda = res['lambda'] + marg_ineqlin = np.array(lamda[:len(b_ub)]) + marg_eqlin = np.array(lamda[len(b_ub):]) + marg_upper = np.array(res['marg_bnds'][1, :]) + marg_lower = np.array(res['marg_bnds'][0, :]) + else: + marg_ineqlin, marg_eqlin = None, None + marg_upper, marg_lower = None, None + + # this needs to be updated if we start choosing the solver intelligently + + # Convert to scipy-style status and message + highs_status = res.get('status', None) + highs_message = res.get('message', None) + status, message = _highs_to_scipy_status_message(highs_status, + highs_message) + + x = res['x'] # is None if not set + sol = {'x': x, + 'slack': slack, + 'con': con, + 'ineqlin': OptimizeResult({ + 'residual': slack, + 'marginals': marg_ineqlin, + }), + 'eqlin': OptimizeResult({ + 'residual': con, + 'marginals': marg_eqlin, + }), + 'lower': OptimizeResult({ + 'residual': None if x is None else x - lb, + 'marginals': marg_lower, + }), + 'upper': OptimizeResult({ + 'residual': None if x is None else ub - x, + 'marginals': marg_upper + }), + 'fun': res.get('fun'), + 'status': status, + 'success': res['status'] == HighsModelStatus.kOptimal, + 'message': message, + 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0), + 'crossover_nit': res.get('crossover_nit'), + } + + if np.any(x) and integrality is not None: + sol.update({ + 'mip_node_count': res.get('mip_node_count', 0), + 'mip_dual_bound': res.get('mip_dual_bound', 0.0), + 'mip_gap': res.get('mip_gap', 0.0), + }) + + return sol diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..4e6bf717b4d7becd46d0046cedf5f807004898e4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py @@ -0,0 +1,1126 @@ +"""Interior-point method for linear programming + +The *interior-point* method uses the primal-dual path following algorithm +outlined in [1]_. This algorithm supports sparse constraint matrices and +is typically faster than the simplex methods, especially for large, sparse +problems. Note, however, that the solution returned may be slightly less +accurate than those of the simplex methods and will not, in general, +correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + +References +---------- +.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. +""" +# Author: Matt Haberland + +import numpy as np +import scipy as sp +import scipy.sparse as sps +from warnings import warn +from scipy.linalg import LinAlgError +from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options +from ._linprog_util import _postsolve +has_umfpack = True +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 + from sksparse.cholmod import analyze as cholmod_analyze +except ImportError: + has_cholmod = False +try: + import scikits.umfpack # test whether to use factorized # noqa: F401 +except ImportError: + has_umfpack = False + + +def _get_solver(M, sparse=False, lstsq=False, sym_pos=True, + cholesky=True, permc_spec='MMD_AT_PLUS_A'): + """ + Given solver options, return a handle to the appropriate linear system + solver. + + Parameters + ---------- + M : 2-D array + As defined in [4] Equation 8.31 + sparse : bool (default = False) + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool (default = False) + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool (default = True) + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool (default = True) + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + permc_spec : str (default = 'MMD_AT_PLUS_A') + Sparsity preservation strategy used by SuperLU. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + See SuperLU documentation. + + Returns + ------- + solve : function + Handle to the appropriate solver function + + """ + try: + if sparse: + if lstsq: + def solve(r, sym_pos=False): + return sps.linalg.lsqr(M, r)[0] + elif cholesky: + try: + # Will raise an exception in the first call, + # or when the matrix changes due to a new problem + _get_solver.cholmod_factor.cholesky_inplace(M) + except Exception: + _get_solver.cholmod_factor = cholmod_analyze(M) + _get_solver.cholmod_factor.cholesky_inplace(M) + solve = _get_solver.cholmod_factor + else: + if has_umfpack and sym_pos: + solve = sps.linalg.factorized(M) + else: # factorized doesn't pass permc_spec + solve = sps.linalg.splu(M, permc_spec=permc_spec).solve + + else: + if lstsq: # sometimes necessary as solution is approached + def solve(r): + return sp.linalg.lstsq(M, r)[0] + elif cholesky: + L = sp.linalg.cho_factor(M) + + def solve(r): + return sp.linalg.cho_solve(L, r) + else: + # this seems to cache the matrix factorization, so solving + # with multiple right hand sides is much faster + def solve(r, sym_pos=sym_pos): + if sym_pos: + return sp.linalg.solve(M, r, assume_a="pos") + else: + return sp.linalg.solve(M, r) + # There are many things that can go wrong here, and it's hard to say + # what all of them are. It doesn't really matter: if the matrix can't be + # factorized, return None. get_solver will be called again with different + # inputs, and a new routine will try to factorize the matrix. + except KeyboardInterrupt: + raise + except Exception: + return None + return solve + + +def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False, + lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A'): + """ + Given standard form problem defined by ``A``, ``b``, and ``c``; + current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; + algorithmic parameters ``gamma and ``eta; + and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` + (predictor-corrector), and ``ip`` (initial point improvement), + get the search direction for increments to the variable estimates. + + Parameters + ---------- + As defined in [4], except: + sparse : bool + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + pc : bool + True if the predictor-corrector method of Mehrota is to be used. This + is almost always (if not always) beneficial. Even though it requires + the solution of an additional linear system, the factorization + is typically (implicitly) reused so solution is efficient, and the + number of algorithm iterations is typically reduced. + ip : bool + True if the improved initial point suggestion due to [4] section 4.3 + is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + Search directions as defined in [4] + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if A.shape[0] == 0: + # If there are no constraints, some solvers fail (understandably) + # rather than returning empty solution. This gets the job done. + sparse, lstsq, sym_pos, cholesky = False, False, True, False + n_x = len(x) + + # [4] Equation 8.8 + r_P = b * tau - A.dot(x) + r_D = c * tau - A.T.dot(y) - z + r_G = c.dot(x) - b.transpose().dot(y) + kappa + mu = (x.dot(z) + tau * kappa) / (n_x + 1) + + # Assemble M from [4] Equation 8.31 + Dinv = x / z + + if sparse: + M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) + else: + M = A.dot(Dinv.reshape(-1, 1) * A.T) + solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec) + + # pc: "predictor-corrector" [4] Section 4.1 + # In development this option could be turned off + # but it always seems to improve performance substantially + n_corrections = 1 if pc else 0 + + i = 0 + alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 + while i <= n_corrections: + # Reference [4] Eq. 8.6 + rhatp = eta(gamma) * r_P + rhatd = eta(gamma) * r_D + rhatg = eta(gamma) * r_G + + # Reference [4] Eq. 8.7 + rhatxs = gamma * mu - x * z + rhattk = gamma * mu - tau * kappa + + if i == 1: + if ip: # if the correction is to get "initial point" + # Reference [4] Eq. 8.23 + rhatxs = ((1 - alpha) * gamma * mu - + x * z - alpha**2 * d_x * d_z) + rhattk = ((1 - alpha) * gamma * mu - + tau * kappa - + alpha**2 * d_tau * d_kappa) + else: # if the correction is for "predictor-corrector" + # Reference [4] Eq. 8.13 + rhatxs -= d_x * d_z + rhattk -= d_tau * d_kappa + + # sometimes numerical difficulties arise as the solution is approached + # this loop tries to solve the equations using a sequence of functions + # for solve. For dense systems, the order is: + # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, + # 2. scipy.linalg.solve w/ sym_pos = True, + # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails + # 4. scipy.linalg.lstsq + # For sparse systems, the order is: + # 1. sksparse.cholmod.cholesky (if available) + # 2. scipy.sparse.linalg.factorized (if umfpack available) + # 3. scipy.sparse.linalg.splu + # 4. scipy.sparse.linalg.lsqr + solved = False + while not solved: + try: + # [4] Equation 8.28 + p, q = _sym_solve(Dinv, A, c, b, solve) + # [4] Equation 8.29 + u, v = _sym_solve(Dinv, A, rhatd - + (1 / x) * rhatxs, rhatp, solve) + if np.any(np.isnan(p)) or np.any(np.isnan(q)): + raise LinAlgError + solved = True + except (LinAlgError, ValueError, TypeError) as e: + # Usually this doesn't happen. If it does, it happens when + # there are redundant constraints or when approaching the + # solution. If so, change solver. + if cholesky: + cholesky = False + warn( + "Solving system with option 'cholesky':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'cholesky' to False.", + OptimizeWarning, stacklevel=5) + elif sym_pos: + sym_pos = False + warn( + "Solving system with option 'sym_pos':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'sym_pos' to False.", + OptimizeWarning, stacklevel=5) + elif not lstsq: + lstsq = True + warn( + "Solving system with option 'sym_pos':False " + "failed. This may happen occasionally, " + "especially as the solution is " + "approached. However, if you see this frequently, " + "your problem may be numerically challenging. " + "If you cannot improve the formulation, consider " + "setting 'lstsq' to True. Consider also setting " + "`presolve` to True, if it is not already.", + OptimizeWarning, stacklevel=5) + else: + raise e + solve = _get_solver(M, sparse, lstsq, sym_pos, + cholesky, permc_spec) + # [4] Results after 8.29 + d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / + (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) + d_x = u + p * d_tau + d_y = v + q * d_tau + + # [4] Relations between after 8.25 and 8.26 + d_z = (1 / x) * (rhatxs - z * d_x) + d_kappa = 1 / tau * (rhattk - kappa * d_tau) + + # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23 + alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) + if ip: # initial point - see [4] 4.4 + gamma = 10 + else: # predictor-corrector, [4] definition after 8.12 + beta1 = 0.1 # [4] pg. 220 (Table 8.1) + gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) + i += 1 + + return d_x, d_y, d_z, d_tau, d_kappa + + +def _sym_solve(Dinv, A, r1, r2, solve): + """ + An implementation of [4] equation 8.31 and 8.32 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 8.31 + r = r2 + A.dot(Dinv * r1) + v = solve(r) + # [4] 8.32 + u = Dinv * (A.T.dot(v) - r1) + return u, v + + +def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): + """ + An implementation of [4] equation 8.21 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 4.3 Equation 8.21, ignoring 8.20 requirement + # same step is taken in primal and dual spaces + # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3 + # the value 1 is used in Mehrota corrector and initial point correction + i_x = d_x < 0 + i_z = d_z < 0 + alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 + alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 + alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 + alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 + alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) + return alpha + + +def _get_message(status): + """ + Given problem status code, return a more detailed message. + + Parameters + ---------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + Returns + ------- + message : str + A string descriptor of the exit status of the optimization. + + """ + messages = ( + ["Optimization terminated successfully.", + "The iteration limit was reached before the algorithm converged.", + "The algorithm terminated successfully and determined that the " + "problem is infeasible.", + "The algorithm terminated successfully and determined that the " + "problem is unbounded.", + "Numerical difficulties were encountered before the problem " + "converged. Please check your problem formulation for errors, " + "independence of linear equality constraints, and reasonable " + "scaling and matrix condition numbers. If you continue to " + "encounter this error, please submit a bug report." + ]) + return messages[status] + + +def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): + """ + An implementation of [4] Equation 8.9 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + x = x + alpha * d_x + tau = tau + alpha * d_tau + z = z + alpha * d_z + kappa = kappa + alpha * d_kappa + y = y + alpha * d_y + return x, y, z, tau, kappa + + +def _get_blind_start(shape): + """ + Return the starting point from [4] 4.4 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + m, n = shape + x0 = np.ones(n) + y0 = np.zeros(m) + z0 = np.ones(n) + tau0 = 1 + kappa0 = 1 + return x0, y0, z0, tau0, kappa0 + + +def _indicators(A, b, c, c0, x, y, z, tau, kappa): + """ + Implementation of several equations from [4] used as indicators of + the status of optimization. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + + # residuals for termination are relative to initial values + x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) + + # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8 + def r_p(x, tau): + return b * tau - A.dot(x) + + def r_d(y, z, tau): + return c * tau - A.T.dot(y) - z + + def r_g(x, y, kappa): + return kappa + c.dot(x) - b.dot(y) + + # np.dot unpacks if they are arrays of size one + def mu(x, tau, z, kappa): + return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) + + obj = c.dot(x / tau) + c0 + + def norm(a): + return np.linalg.norm(a) + + # See [4], Section 4.5 - The Stopping Criteria + r_p0 = r_p(x0, tau0) + r_d0 = r_d(y0, z0, tau0) + r_g0 = r_g(x0, y0, kappa0) + mu_0 = mu(x0, tau0, z0, kappa0) + rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) + rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) + rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) + rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) + rho_mu = mu(x, tau, z, kappa) / mu_0 + return rho_p, rho_d, rho_A, rho_g, rho_mu, obj + + +def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): + """ + Print indicators of optimization status to the console. + + Parameters + ---------- + rho_p : float + The (normalized) primal feasibility, see [4] 4.5 + rho_d : float + The (normalized) dual feasibility, see [4] 4.5 + rho_g : float + The (normalized) duality gap, see [4] 4.5 + alpha : float + The step size, see [4] 4.3 + rho_mu : float + The (normalized) path parameter, see [4] 4.5 + obj : float + The objective function value of the current iterate + header : bool + True if a header is to be printed + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if header: + print("Primal Feasibility ", + "Dual Feasibility ", + "Duality Gap ", + "Step ", + "Path Parameter ", + "Objective ") + + # no clue why this works + fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' + print(fmt.format( + float(rho_p), + float(rho_d), + float(rho_g), + alpha if isinstance(alpha, str) else float(alpha), + float(rho_mu), + float(obj))) + + +def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, + sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args): + r""" + Solve a linear programming problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + using the interior point method of [4]. + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in ``A`` (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + alpha0 : float + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_3`of [4] Table 8.1 + beta : float + The desired reduction of the path parameter :math:`\mu` (see [6]_) + maxiter : int + The maximum number of iterations of the algorithm. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + tol : float + Termination tolerance; see [4]_ Section 4.5. + sparse : bool + Set to ``True`` if the problem is to be treated as sparse. However, + the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as + (dense) arrays rather than sparse matrices. + lstsq : bool + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left as ``False`` unless severe + numerical difficulties are frequently encountered, and a better option + would be to improve the formulation of the problem. + sym_pos : bool + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix (almost always). + cholesky : bool + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for moderate, dense problems + that are numerically well-behaved. + pc : bool + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True only when an algorithm has completed successfully, + so this is always False as the callback function is called + only while the algorithm is still iterating. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the algorithm being executed. This is always + 1 for the interior-point method because it has only one phase. + status : int + For revised simplex, this is always 0 because if a different + status is detected, the algorithm terminates. + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Returns + ------- + x_hat : float + Solution vector (for standard form problem). + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at: + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + + """ + + iteration = 0 + + # default initial point + x, y, z, tau, kappa = _get_blind_start(A.shape) + + # first iteration is special improvement of initial point + ip = ip if pc else False + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) + + if disp: + _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + status = 0 + message = "Optimization terminated successfully." + + if sparse: + A = sps.csc_matrix(A) + + while go: + + iteration += 1 + + if ip: # initial point + # [4] Section 4.4 + gamma = 1 + + def eta(g): + return 1 + else: + # gamma = 0 in predictor step according to [4] 4.1 + # if predictor/corrector is off, use mean of complementarity [6] + # 5.1 / [4] Below Figure 10-4 + gamma = 0 if pc else beta * np.mean(z * x) + # [4] Section 4.1 + + def eta(g=gamma): + return 1 - g + + try: + # Solve [4] 8.6 and 8.7/8.13/8.23 + d_x, d_y, d_z, d_tau, d_kappa = _get_delta( + A, b, c, x, y, z, tau, kappa, gamma, eta, + sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) + + if ip: # initial point + # [4] 4.4 + # Formula after 8.23 takes a full step regardless if this will + # take it negative + alpha = 1.0 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, + d_z, d_tau, d_kappa, alpha) + x[x < 1] = 1 + z[z < 1] = 1 + tau = max(1, tau) + kappa = max(1, kappa) + ip = False # done with initial point + else: + # [4] Section 4.3 + alpha = _get_step(x, d_x, z, d_z, tau, + d_tau, kappa, d_kappa, alpha0) + # [4] Equation 8.9 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) + + except (LinAlgError, FloatingPointError, + ValueError, ZeroDivisionError): + # this can happen when sparse solver is used and presolve + # is turned off. Also observed ValueError in AppVeyor Python 3.6 + # Win32 build (PR #8676). I've never seen it otherwise. + status = 4 + message = _get_message(status) + break + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol + + if disp: + _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + # [4] 4.5 + inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * + max(1, kappa)) + inf2 = rho_mu < tol and tau < tol * min(1, kappa) + if inf1 or inf2: + # [4] Lemma 8.4 / Theorem 8.3 + if b.transpose().dot(y) > tol: + status = 2 + else: # elif c.T.dot(x) < tol: ? Probably not necessary. + status = 3 + message = _get_message(status) + break + elif iteration >= maxiter: + status = 1 + message = _get_message(status) + break + + x_hat = x / tau + # [4] Statement after Theorem 8.2 + return x_hat, status, message, iteration + + +def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8, + disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False, + sym_pos=True, cholesky=None, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A', **unknown_options): + r""" + Minimize a linear objective function subject to linear + equality and non-negativity constraints using the interior point method + of [4]_. Linear programming is intended to solve problems + of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional + Callback function to be executed once per iteration. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int (default = 1000) + The maximum number of iterations of the algorithm. + tol : float (default = 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + disp : bool (default = False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + alpha0 : float (default = 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default = 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default = False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default = False) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default = True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default = True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for problems + that are numerically well-behaved. + pc : bool (default = True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default = False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``, and no SuiteSparse.) + A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With default options, the solver used to perform the factorization depends + on third-party software availability and the conditioning of the problem. + + For dense problems, solvers are tried in the following order: + + 1. ``scipy.linalg.cho_factor`` + + 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` + + 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` + + 4. ``scipy.linalg.lstsq`` + + For sparse problems: + + 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed) + + 2. ``scipy.sparse.linalg.factorized`` + (if scikit-umfpack and SuiteSparse are installed) + + 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) + + 4. ``scipy.sparse.linalg.lsqr`` + + If the solver fails for any reason, successively more robust (but slower) + solvers are attempted in the order indicated. Attempting, failing, and + re-starting factorization can be time consuming, so if the problem is + numerically challenging, options can be set to bypass solvers that are + failing. Setting ``cholesky=False`` skips to solver 2, + ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips + to solver 4 for both sparse and dense problems. + + Potential improvements for combating issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point methods + for large scale linear programming. HEC/Universite de Geneve, 1996. + + """ + + _check_unknown_options(unknown_options) + + # These should be warnings, not errors + if (cholesky or cholesky is None) and sparse and not has_cholmod: + if cholesky: + warn("Sparse cholesky is only available with scikit-sparse. " + "Setting `cholesky = False`", + OptimizeWarning, stacklevel=3) + cholesky = False + + if sparse and lstsq: + warn("Option combination 'sparse':True and 'lstsq':True " + "is not recommended.", + OptimizeWarning, stacklevel=3) + + if lstsq and cholesky: + warn("Invalid option combination 'lstsq':True " + "and 'cholesky':True; option 'cholesky' has no effect when " + "'lstsq' is set True.", + OptimizeWarning, stacklevel=3) + + valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') + if permc_spec.upper() not in valid_permc_spec: + warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " + "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " + "and 'COLAMD'. Reverting to default.", + OptimizeWarning, stacklevel=3) + permc_spec = 'MMD_AT_PLUS_A' + + # This can be an error + if not sym_pos and cholesky: + raise ValueError( + "Invalid option combination 'sym_pos':False " + "and 'cholesky':True: Cholesky decomposition is only possible " + "for symmetric positive definite matrices.") + + cholesky = cholesky or (cholesky is None and sym_pos and not lstsq) + + x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, + maxiter, disp, tol, sparse, + lstsq, sym_pos, cholesky, + pc, ip, permc_spec, callback, + postsolve_args) + + return x, status, message, iteration diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py new file mode 100644 index 0000000000000000000000000000000000000000..43fed5805c4e40f0c38de91f053e3926cf1478e4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py @@ -0,0 +1,572 @@ +"""Revised simplex method for linear programming + +The *revised simplex* method uses the method described in [1]_, except +that a factorization [2]_ of the basis matrix, rather than its inverse, +is efficiently maintained and used to solve the linear systems at each +iteration of the algorithm. + +.. versionadded:: 1.3.0 + +References +---------- +.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. +.. [2] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + +""" +# Author: Matt Haberland + +import numpy as np +from numpy.linalg import LinAlgError + +from scipy.linalg import solve +from ._optimize import _check_unknown_options +from ._bglu_dense import LU +from ._bglu_dense import BGLU as BGLU +from ._linprog_util import _postsolve +from ._optimize import OptimizeResult + + +def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp, + maxupdate, mast, pivot): + """ + The purpose of phase one is to find an initial basic feasible solution + (BFS) to the original problem. + + Generates an auxiliary problem with a trivial BFS and an objective that + minimizes infeasibility of the original problem. Solves the auxiliary + problem using the main simplex routine (phase two). This either yields + a BFS to the original problem or determines that the original problem is + infeasible. If feasible, phase one detects redundant rows in the original + constraint matrix and removes them, then chooses additional indices as + necessary to complete a basis/BFS for the original problem. + """ + + m, n = A.shape + status = 0 + + # generate auxiliary problem to get initial BFS + A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol) + + if status == 6: + residual = c.dot(x) + iter_k = 0 + return x, basis, A, b, residual, status, iter_k + + # solve auxiliary problem + phase_one_n = n + iter_k = 0 + x, basis, status, iter_k = _phase_two(c, A, x, basis, callback, + postsolve_args, + maxiter, tol, disp, + maxupdate, mast, pivot, + iter_k, phase_one_n) + + # check for infeasibility + residual = c.dot(x) + if status == 0 and residual > tol: + status = 2 + + # drive artificial variables out of basis + # TODO: test redundant row removal better + # TODO: make solve more efficient with BGLU? This could take a while. + keep_rows = np.ones(m, dtype=bool) + for basis_column in basis[basis >= n]: + B = A[:, basis] + try: + basis_finder = np.abs(solve(B, A)) # inefficient + pertinent_row = np.argmax(basis_finder[:, basis_column]) + eligible_columns = np.ones(n, dtype=bool) + eligible_columns[basis[basis < n]] = 0 + eligible_column_indices = np.where(eligible_columns)[0] + index = np.argmax(basis_finder[:, :n] + [pertinent_row, eligible_columns]) + new_basis_column = eligible_column_indices[index] + if basis_finder[pertinent_row, new_basis_column] < tol: + keep_rows[pertinent_row] = False + else: + basis[basis == basis_column] = new_basis_column + except LinAlgError: + status = 4 + + # form solution to original problem + A = A[keep_rows, :n] + basis = basis[keep_rows] + x = x[:n] + m = A.shape[0] + return x, basis, A, b, residual, status, iter_k + + +def _get_more_basis_columns(A, basis): + """ + Called when the auxiliary problem terminates with artificial columns in + the basis, which must be removed and replaced with non-artificial + columns. Finds additional columns that do not make the matrix singular. + """ + m, n = A.shape + + # options for inclusion are those that aren't already in the basis + a = np.arange(m+n) + bl = np.zeros(len(a), dtype=bool) + bl[basis] = 1 + options = a[~bl] + options = options[options < n] # and they have to be non-artificial + + # form basis matrix + B = np.zeros((m, m)) + B[:, 0:len(basis)] = A[:, basis] + + if (basis.size > 0 and + np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)): + raise Exception("Basis has dependent columns") + + rank = 0 # just enter the loop + for i in range(n): # somewhat arbitrary, but we need another way out + # permute the options, and take as many as needed + new_basis = np.random.permutation(options)[:m-len(basis)] + B[:, len(basis):] = A[:, new_basis] # update the basis matrix + rank = np.linalg.matrix_rank(B) # check the rank + if rank == m: + break + + return np.concatenate((basis, new_basis)) + + +def _generate_auxiliary_problem(A, b, x0, tol): + """ + Modifies original problem to create an auxiliary problem with a trivial + initial basic feasible solution and an objective that minimizes + infeasibility in the original problem. + + Conceptually, this is done by stacking an identity matrix on the right of + the original constraint matrix, adding artificial variables to correspond + with each of these new columns, and generating a cost vector that is all + zeros except for ones corresponding with each of the new variables. + + A initial basic feasible solution is trivial: all variables are zero + except for the artificial variables, which are set equal to the + corresponding element of the right hand side `b`. + + Running the simplex method on this auxiliary problem drives all of the + artificial variables - and thus the cost - to zero if the original problem + is feasible. The original problem is declared infeasible otherwise. + + Much of the complexity below is to improve efficiency by using singleton + columns in the original problem where possible, thus generating artificial + variables only as necessary, and using an initial 'guess' basic feasible + solution. + """ + status = 0 + m, n = A.shape + + if x0 is not None: + x = x0 + else: + x = np.zeros(n) + + r = b - A@x # residual; this must be all zeros for feasibility + + A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS + b[r < 0] = -b[r < 0] # to the auxiliary problem + r[r < 0] *= -1 + + # Rows which we will need to find a trivial way to zero. + # This should just be the rows where there is a nonzero residual. + # But then we would not necessarily have a column singleton in every row. + # This makes it difficult to find an initial basis. + if x0 is None: + nonzero_constraints = np.arange(m) + else: + nonzero_constraints = np.where(r > tol)[0] + + # these are (at least some of) the initial basis columns + basis = np.where(np.abs(x) > tol)[0] + + if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS + c = np.zeros(n) + basis = _get_more_basis_columns(A, basis) + return A, b, c, basis, x, status + elif (len(nonzero_constraints) > m - len(basis) or + np.any(x < 0)): # can't get trivial BFS + c = np.zeros(n) + status = 6 + return A, b, c, basis, x, status + + # chooses existing columns appropriate for inclusion in initial basis + cols, rows = _select_singleton_columns(A, r) + + # find the rows we need to zero that we _can_ zero with column singletons + i_tofix = np.isin(rows, nonzero_constraints) + # these columns can't already be in the basis, though + # we are going to add them to the basis and change the corresponding x val + i_notinbasis = np.logical_not(np.isin(cols, basis)) + i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis) + rows = rows[i_fix_without_aux] + cols = cols[i_fix_without_aux] + + # indices of the rows we can only zero with auxiliary variable + # these rows will get a one in each auxiliary column + arows = nonzero_constraints[np.logical_not( + np.isin(nonzero_constraints, rows))] + n_aux = len(arows) + acols = n + np.arange(n_aux) # indices of auxiliary columns + + basis_ng = np.concatenate((cols, acols)) # basis columns not from guess + basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero + + # add auxiliary singleton columns + A = np.hstack((A, np.zeros((m, n_aux)))) + A[arows, acols] = 1 + + # generate initial BFS + x = np.concatenate((x, np.zeros(n_aux))) + x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng] + + # generate costs to minimize infeasibility + c = np.zeros(n_aux + n) + c[acols] = 1 + + # basis columns correspond with nonzeros in guess, those with column + # singletons we used to zero remaining constraints, and any additional + # columns to get a full set (m columns) + basis = np.concatenate((basis, basis_ng)) + basis = _get_more_basis_columns(A, basis) # add columns as needed + + return A, b, c, basis, x, status + + +def _select_singleton_columns(A, b): + """ + Finds singleton columns for which the singleton entry is of the same sign + as the right-hand side; these columns are eligible for inclusion in an + initial basis. Determines the rows in which the singleton entries are + located. For each of these rows, returns the indices of the one singleton + column and its corresponding row. + """ + # find indices of all singleton columns and corresponding row indices + column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0] + columns = A[:, column_indices] # array of singleton columns + row_indices = np.zeros(len(column_indices), dtype=int) + nonzero_rows, nonzero_columns = np.nonzero(columns) + row_indices[nonzero_columns] = nonzero_rows # corresponding row indices + + # keep only singletons with entries that have same sign as RHS + # this is necessary because all elements of BFS must be non-negative + same_sign = A[row_indices, column_indices]*b[row_indices] >= 0 + column_indices = column_indices[same_sign][::-1] + row_indices = row_indices[same_sign][::-1] + # Reversing the order so that steps below select rightmost columns + # for initial basis, which will tend to be slack variables. (If the + # guess corresponds with a basic feasible solution but a constraint + # is not satisfied with the corresponding slack variable zero, the slack + # variable must be basic.) + + # for each row, keep rightmost singleton column with an entry in that row + unique_row_indices, first_columns = np.unique(row_indices, + return_index=True) + return column_indices[first_columns], unique_row_indices + + +def _find_nonzero_rows(A, tol): + """ + Returns logical array indicating the locations of rows with at least + one nonzero element. + """ + return np.any(np.abs(A) > tol, axis=1) + + +def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12): + """ + Selects a pivot to enter the basis. Currently Bland's rule - the smallest + index that has a negative reduced cost - is the default. + """ + if rule.lower() == "mrc": # index with minimum reduced cost + return a[~bl][np.argmin(c_hat)] + else: # smallest index w/ negative reduced cost + return a[~bl][c_hat < -tol][0] + + +def _display_iter(phase, iteration, slack, con, fun): + """ + Print indicators of optimization status to the console. + """ + header = True if not iteration % 20 else False + + if header: + print("Phase", + "Iteration", + "Minimum Slack ", + "Constraint Residual", + "Objective ") + + # := -tol): # all reduced costs positive -> terminate + break + + j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol) + u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j]) + + i = u > tol # if none of the u are positive, unbounded + if not np.any(i): + status = 3 + break + + th = xb[i]/u[i] + l = np.argmin(th) # implicitly selects smallest subscript + th_star = th[l] # step size + + x[b] = x[b] - th_star*u # take step + x[j] = th_star + B.update(ab[i][l], j) # modify basis + b = B.b # similar to b[ab[i][l]] = + + else: + # If the end of the for loop is reached (without a break statement), + # then another step has been taken, so the iteration counter should + # increment, info should be displayed, and callback should be called. + iteration += 1 + status = 1 + if disp or callback is not None: + _display_and_callback(phase_one_n, x, postsolve_args, status, + iteration, disp, callback) + + return x, b, status, iteration + + +def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args, + maxiter=5000, tol=1e-12, disp=False, + maxupdate=10, mast=False, pivot="mrc", + **unknown_options): + """ + Solve the following linear programming problem via a two-phase + revised simplex algorithm.:: + + minimize: c @ x + + subject to: A @ x == b + 0 <= x < oo + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Currently unused.) + A : 2-D array + 2-D array which, when matrix-multiplied by ``x``, gives the values of + the equality constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + x0 : 1-D array, optional + Starting values of the independent variables, which will be refined by + the optimization algorithm. For the revised simplex method, these must + correspond with a basic feasible solution. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector. + fun : float + Current value of the objective function ``c @ x``. + success : bool + True only when an algorithm has completed successfully, + so this is always False as the callback function is called + only while the algorithm is still iterating. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x``. + phase : int + The phase of the algorithm being executed. + status : int + For revised simplex, this is always 0 because if a different + status is detected, the algorithm terminates. + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + maxupdate : int + The maximum number of updates performed on the LU factorization. + After this many updates is reached, the basis matrix is factorized + from scratch. + mast : bool + Minimize Amortized Solve Time. If enabled, the average time to solve + a linear system using the basis factorization is measured. Typically, + the average solve time will decrease with each successive solve after + initial factorization, as factorization takes much more time than the + solve operation (and updates). Eventually, however, the updated + factorization becomes sufficiently complex that the average solve time + begins to increase. When this is detected, the basis is refactorized + from scratch. Enable this option to maximize speed at the risk of + nondeterministic behavior. Ignored if ``maxupdate`` is 0. + pivot : "mrc" or "bland" + Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose + Bland's rule if iteration limit is reached and cycling is suspected. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Numerical difficulties encountered + 5 : No constraints; turn presolve on + 6 : Guess x0 cannot be converted to a basic feasible solution + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + """ + + _check_unknown_options(unknown_options) + + messages = ["Optimization terminated successfully.", + "Iteration limit reached.", + "The problem appears infeasible, as the phase one auxiliary " + "problem terminated successfully with a residual of {0:.1e}, " + "greater than the tolerance {1} required for the solution to " + "be considered feasible. Consider increasing the tolerance to " + "be greater than {0:.1e}. If this tolerance is unacceptably " + "large, the problem is likely infeasible.", + "The problem is unbounded, as the simplex algorithm found " + "a basic feasible solution from which there is a direction " + "with negative reduced cost in which all decision variables " + "increase.", + "Numerical difficulties encountered; consider trying " + "method='interior-point'.", + "Problems with no constraints are trivially solved; please " + "turn presolve on.", + "The guess x0 cannot be converted to a basic feasible " + "solution. " + ] + + if A.size == 0: # address test_unbounded_below_no_presolve_corrected + return np.zeros(c.shape), 5, messages[5], 0 + + x, basis, A, b, residual, status, iteration = ( + _phase_one(A, b, x0, callback, postsolve_args, + maxiter, tol, disp, maxupdate, mast, pivot)) + + if status == 0: + x, basis, status, iteration = _phase_two(c, A, x, basis, callback, + postsolve_args, + maxiter, tol, disp, + maxupdate, mast, pivot, + iteration) + + return x, status, messages[status].format(residual, tol), iteration diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py new file mode 100644 index 0000000000000000000000000000000000000000..c47806c9a595f756b9f86d268c5c146ea86c77c6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py @@ -0,0 +1,663 @@ +"""Simplex method for linear programming + +The *simplex* method uses a traditional, full-tableau implementation of +Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex). +This algorithm is included for backwards compatibility and educational +purposes. + + .. versionadded:: 0.15.0 + +Warnings +-------- + +The simplex method may encounter numerical difficulties when pivot +values are close to the specified tolerance. If encountered try +remove any redundant constraints, change the pivot strategy to Bland's +rule or increase the tolerance value. + +Alternatively, more robust methods maybe be used. See +:ref:`'interior-point' ` and +:ref:`'revised simplex' `. + +References +---------- +.. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 +.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. +""" + +import numpy as np +from warnings import warn +from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options +from ._linprog_util import _postsolve + + +def _pivot_col(T, tol=1e-9, bland=False): + """ + Given a linear programming simplex tableau, determine the column + of the variable to enter the basis. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + tol : float + Elements in the objective row larger than -tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the column (select the + first column with a negative coefficient in the objective row, + regardless of magnitude). + + Returns + ------- + status: bool + True if a suitable pivot column was found, otherwise False. + A return of False indicates that the linear programming simplex + algorithm is complete. + col: int + The index of the column of the pivot element. + If status is False, col will be returned as nan. + """ + ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False) + if ma.count() == 0: + return False, np.nan + if bland: + # ma.mask is sometimes 0d + return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0] + return True, np.ma.nonzero(ma == ma.min())[0][0] + + +def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False): + """ + Given a linear programming simplex tableau, determine the row for the + pivot operation. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a Problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : array + A list of the current basic variables. + pivcol : int + The index of the pivot column. + phase : int + The phase of the simplex algorithm (1 or 2). + tol : float + Elements in the pivot column smaller than tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the row (if more than one + row can be used, choose the one with the lowest variable index). + + Returns + ------- + status: bool + True if a suitable pivot row was found, otherwise False. A return + of False indicates that the linear programming problem is unbounded. + row: int + The index of the row of the pivot element. If status is False, row + will be returned as nan. + """ + if phase == 1: + k = 2 + else: + k = 1 + ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False) + if ma.count() == 0: + return False, np.nan + mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False) + q = mb / ma + min_rows = np.ma.nonzero(q == q.min())[0] + if bland: + return True, min_rows[np.argmin(np.take(basis, min_rows))] + return True, min_rows[0] + + +def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9): + """ + Pivot the simplex tableau inplace on the element given by (pivrow, pivol). + The entering variable corresponds to the column given by pivcol forcing + the variable basis[pivrow] to leave the basis. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : 1-D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _apply_pivot. + pivrow : int + Row index of the pivot. + pivcol : int + Column index of the pivot. + """ + basis[pivrow] = pivcol + pivval = T[pivrow, pivcol] + T[pivrow] = T[pivrow] / pivval + for irow in range(T.shape[0]): + if irow != pivrow: + T[irow] = T[irow] - T[pivrow] * T[irow, pivcol] + + # The selected pivot should never lead to a pivot value less than the tol. + if np.isclose(pivval, tol, atol=0, rtol=1e4): + message = ( + f"The pivot operation produces a pivot value of:{pivval: .1e}, " + "which is only slightly greater than the specified " + f"tolerance{tol: .1e}. This may lead to issues regarding the " + "numerical stability of the simplex method. " + "Removing redundant constraints, changing the pivot strategy " + "via Bland's rule or increasing the tolerance may " + "help reduce the issue.") + warn(message, OptimizeWarning, stacklevel=5) + + +def _solve_simplex(T, n, basis, callback, postsolve_args, + maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0, + ): + """ + Solve a linear programming problem in "standard form" using the Simplex + Method. Linear Programming is intended to solve the following problem form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + n : int + The number of true variables in the problem. + basis : 1-D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _solve_simplex + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback must accept a + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True only when a phase has completed successfully. This + will be False for most iterations. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + maxiter : int + The maximum number of iterations to perform before aborting the + optimization. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + bland : bool + If True, choose pivots using Bland's rule [3]_. In problems which + fail to converge due to cycling, using Bland's rule can provide + convergence at the expense of a less optimal path about the simplex. + nit0 : int + The initial iteration number used to keep an accurate iteration total + in a two-phase problem. + + Returns + ------- + nit : int + The number of iterations. Used to keep an accurate iteration total + in the two-phase problem. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + """ + nit = nit0 + status = 0 + message = '' + complete = False + + if phase == 1: + m = T.shape[1]-2 + elif phase == 2: + m = T.shape[1]-1 + else: + raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") + + if phase == 2: + # Check if any artificial variables are still in the basis. + # If yes, check if any coefficients from this row and a column + # corresponding to one of the non-artificial variable is non-zero. + # If found, pivot at this term. If not, start phase 2. + # Do this for all artificial variables in the basis. + # Ref: "An Introduction to Linear Programming and Game Theory" + # by Paul R. Thie, Gerard E. Keough, 3rd Ed, + # Chapter 3.7 Redundant Systems (pag 102) + for pivrow in [row for row in range(basis.size) + if basis[row] > T.shape[1] - 2]: + non_zero_row = [col for col in range(T.shape[1] - 1) + if abs(T[pivrow, col]) > tol] + if len(non_zero_row) > 0: + pivcol = non_zero_row[0] + _apply_pivot(T, basis, pivrow, pivcol, tol) + nit += 1 + + if len(basis[:m]) == 0: + solution = np.empty(T.shape[1] - 1, dtype=np.float64) + else: + solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1), + dtype=np.float64) + + while not complete: + # Find the pivot column + pivcol_found, pivcol = _pivot_col(T, tol, bland) + if not pivcol_found: + pivcol = np.nan + pivrow = np.nan + status = 0 + complete = True + else: + # Find the pivot row + pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland) + if not pivrow_found: + status = 3 + complete = True + + if callback is not None: + solution[:] = 0 + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + x, fun, slack, con = _postsolve( + x, postsolve_args + ) + res = OptimizeResult({ + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': nit, + 'success': status == 0 and complete, + 'phase': phase, + 'complete': complete, + }) + callback(res) + + if not complete: + if nit >= maxiter: + # Iteration limit exceeded + status = 1 + complete = True + else: + _apply_pivot(T, basis, pivrow, pivcol, tol) + nit += 1 + return nit, status + + +def _linprog_simplex(c, c0, A, b, callback, postsolve_args, + maxiter=1000, tol=1e-9, disp=False, bland=False, + **unknown_options): + """ + Minimize a linear objective function subject to linear equality and + non-negativity constraints using the two phase simplex method. + Linear programming is intended to solve problems of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True when an algorithm has completed successfully. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the algorithm being executed. + status : int + An integer representing the status of the optimization:: + + 0 : Algorithm proceeding nominally + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int + The maximum number of iterations to perform. + disp : bool + If True, print exit status message to sys.stdout + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + bland : bool + If True, use Bland's anti-cycling rule [3]_ to choose pivots to + prevent cycling. If False, choose pivots which should lead to a + converged solution more quickly. The latter method is subject to + cycling (non-convergence) in rare instances. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + + + Notes + ----- + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + """ + _check_unknown_options(unknown_options) + + status = 0 + messages = {0: "Optimization terminated successfully.", + 1: "Iteration limit reached.", + 2: "Optimization failed. Unable to find a feasible" + " starting point.", + 3: "Optimization failed. The problem appears to be unbounded.", + 4: "Optimization failed. Singular matrix encountered."} + + n, m = A.shape + + # All constraints must have b >= 0. + is_negative_constraint = np.less(b, 0) + A[is_negative_constraint] *= -1 + b[is_negative_constraint] *= -1 + + # As all constraints are equality constraints the artificial variables + # will also be basic variables. + av = np.arange(n) + m + basis = av.copy() + + # Format the phase one tableau by adding artificial variables and stacking + # the constraints, the objective row and pseudo-objective row. + row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis])) + row_objective = np.hstack((c, np.zeros(n), c0)) + row_pseudo_objective = -row_constraints.sum(axis=0) + row_pseudo_objective[av] = 0 + T = np.vstack((row_constraints, row_objective, row_pseudo_objective)) + + nit1, status = _solve_simplex(T, n, basis, callback=callback, + postsolve_args=postsolve_args, + maxiter=maxiter, tol=tol, phase=1, + bland=bland + ) + # if pseudo objective is zero, remove the last row from the tableau and + # proceed to phase 2 + nit2 = nit1 + if abs(T[-1, -1]) < tol: + # Remove the pseudo-objective row from the tableau + T = T[:-1, :] + # Remove the artificial variable columns from the tableau + T = np.delete(T, av, 1) + else: + # Failure to find a feasible starting point + status = 2 + messages[status] = ( + "Phase 1 of the simplex method failed to find a feasible " + "solution. The pseudo-objective function evaluates to " + f"{abs(T[-1, -1]):.1e} " + f"which exceeds the required tolerance of {tol} for a solution to be " + "considered 'close enough' to zero to be a basic solution. " + "Consider increasing the tolerance to be greater than " + f"{abs(T[-1, -1]):.1e}. " + "If this tolerance is unacceptably large the problem may be " + "infeasible." + ) + + if status == 0: + # Phase 2 + nit2, status = _solve_simplex(T, n, basis, callback=callback, + postsolve_args=postsolve_args, + maxiter=maxiter, tol=tol, phase=2, + bland=bland, nit0=nit1 + ) + + solution = np.zeros(n + m) + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + + return x, status, messages[status], int(nit2) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py new file mode 100644 index 0000000000000000000000000000000000000000..405ff0feee7116712a6b0897e2f956a6e8a1760f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py @@ -0,0 +1,1523 @@ +""" +Method agnostic utility functions for linear programming +""" + +import numpy as np +import scipy.sparse as sps +from warnings import warn +from ._optimize import OptimizeWarning +from scipy.optimize._remove_redundancy import ( + _remove_redundancy_svd, _remove_redundancy_pivot_sparse, + _remove_redundancy_pivot_dense, _remove_redundancy_id + ) +from collections import namedtuple + +_LPProblem = namedtuple('_LPProblem', + 'c A_ub b_ub A_eq b_eq bounds x0 integrality') +_LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg +_LPProblem.__doc__ = \ + """ Represents a linear-programming problem. + + Attributes + ---------- + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats + are: + * a 2D array (N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, the bounds can be specified as + a 1-D or 2-D array or sequence with 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + Absent lower and/or upper bounds can be specified as -numpy.inf (no + lower bound), numpy.inf (no upper bound) or None (both). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcast to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Notes + ----- + This namedtuple supports 2 ways of initialization: + >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4]) + >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4]) + + Note that only ``c`` is a required argument here, whereas all other arguments + ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with + default values of None. + For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``: + >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10]) + """ + + +def _check_sparse_inputs(options, meth, A_ub, A_eq): + """ + Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified + optional sparsity variables. + + Parameters + ---------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + method : str, optional + The algorithm used to solve the standard form problem. + + Returns + ------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + """ + # This is an undocumented option for unit testing sparse presolve + _sparse_presolve = options.pop('_sparse_presolve', False) + if _sparse_presolve and A_eq is not None: + A_eq = sps.coo_matrix(A_eq) + if _sparse_presolve and A_ub is not None: + A_ub = sps.coo_matrix(A_ub) + + sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub) + + preferred_methods = {"highs", "highs-ds", "highs-ipm"} + dense_methods = {"simplex", "revised simplex"} + if meth in dense_methods and sparse_constraint: + raise ValueError(f"Method '{meth}' does not support sparse " + "constraint matrices. Please consider using one of " + f"{preferred_methods}.") + + sparse = options.get('sparse', False) + if not sparse and sparse_constraint and meth == 'interior-point': + options['sparse'] = True + warn("Sparse constraint matrix detected; setting 'sparse':True.", + OptimizeWarning, stacklevel=4) + return options, A_ub, A_eq + + +def _format_A_constraints(A, n_x, sparse_lhs=False): + """Format the left hand side of the constraints to a 2-D array + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + n_x : int + The number of variables in the linear programming problem. + sparse_lhs : bool + Whether either of `A_ub` or `A_eq` are sparse. If true return a + coo_matrix instead of a numpy array. + + Returns + ------- + np.ndarray or sparse.coo_matrix + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + + """ + if sparse_lhs: + return sps.coo_matrix( + (0, n_x) if A is None else A, dtype=float, copy=True + ) + elif A is None: + return np.zeros((0, n_x), dtype=float) + else: + return np.array(A, dtype=float, copy=True) + + +def _format_b_constraints(b): + """Format the upper bounds of the constraints to a 1-D array + + Parameters + ---------- + b : 1-D array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + Returns + ------- + 1-D np.array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + """ + if b is None: + return np.array([], dtype=float) + b = np.array(b, dtype=float, copy=True).squeeze() + return b if b.size != 1 else b.reshape(-1) + + +def _clean_inputs(lp): + """ + Given user inputs for a linear programming problem, return the + objective vector, upper bound constraints, equality constraints, + and simple bounds in a preferred format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if c is None: + raise TypeError + + try: + c = np.array(c, dtype=np.float64, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: c must be a 1-D array of numerical " + "coefficients") from e + else: + # If c is a single value, convert it to a 1-D array. + if c.size == 1: + c = c.reshape(-1) + + n_x = len(c) + if n_x == 0 or len(c.shape) != 1: + raise ValueError( + "Invalid input for linprog: c must be a 1-D array and must " + "not have more than one non-singleton dimension") + if not np.isfinite(c).all(): + raise ValueError( + "Invalid input for linprog: c must not contain values " + "inf, nan, or None") + + sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub) + try: + A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_ub must be a 2-D array " + "of numerical values") from e + else: + n_ub = A_ub.shape[0] + if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_ub must have exactly two " + "dimensions, and the number of columns in A_ub must be " + "equal to the size of c") + if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() + or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): + raise ValueError( + "Invalid input for linprog: A_ub must not contain values " + "inf, nan, or None") + + try: + b_ub = _format_b_constraints(b_ub) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_ub must be a 1-D array of " + "numerical values, each representing the upper bound of an " + "inequality constraint (row) in A_ub") from e + else: + if b_ub.shape != (n_ub,): + raise ValueError( + "Invalid input for linprog: b_ub must be a 1-D array; b_ub " + "must not have more than one non-singleton dimension and " + "the number of rows in A_ub must equal the number of values " + "in b_ub") + if not np.isfinite(b_ub).all(): + raise ValueError( + "Invalid input for linprog: b_ub must not contain values " + "inf, nan, or None") + + try: + A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_eq must be a 2-D array " + "of numerical values") from e + else: + n_eq = A_eq.shape[0] + if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_eq must have exactly two " + "dimensions, and the number of columns in A_eq must be " + "equal to the size of c") + + if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() + or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): + raise ValueError( + "Invalid input for linprog: A_eq must not contain values " + "inf, nan, or None") + + try: + b_eq = _format_b_constraints(b_eq) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_eq must be a dense, 1-D array of " + "numerical values, each representing the right hand side of an " + "equality constraint (row) in A_eq") from e + else: + if b_eq.shape != (n_eq,): + raise ValueError( + "Invalid input for linprog: b_eq must be a 1-D array; b_eq " + "must not have more than one non-singleton dimension and " + "the number of rows in A_eq must equal the number of values " + "in b_eq") + if not np.isfinite(b_eq).all(): + raise ValueError( + "Invalid input for linprog: b_eq must not contain values " + "inf, nan, or None") + + # x0 gives a (optional) starting solution to the solver. If x0 is None, + # skip the checks. Initial solution will be generated automatically. + if x0 is not None: + try: + x0 = np.array(x0, dtype=float, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: x0 must be a 1-D array of " + "numerical coefficients") from e + if x0.ndim == 0: + x0 = x0.reshape(-1) + if len(x0) == 0 or x0.ndim != 1: + raise ValueError( + "Invalid input for linprog: x0 should be a 1-D array; it " + "must not have more than one non-singleton dimension") + if not x0.size == c.size: + raise ValueError( + "Invalid input for linprog: x0 and c should contain the " + "same number of elements") + if not np.isfinite(x0).all(): + raise ValueError( + "Invalid input for linprog: x0 must not contain values " + "inf, nan, or None") + + # Bounds can be one of these formats: + # (1) a 2-D array or sequence, with shape N x 2 + # (2) a 1-D or 2-D sequence or array with 2 scalars + # (3) None (or an empty sequence or array) + # Unspecified bounds can be represented by None or (-)np.inf. + # All formats are converted into a N x 2 np.array with (-)np.inf where + # bounds are unspecified. + + # Prepare clean bounds array + bounds_clean = np.zeros((n_x, 2), dtype=float) + + # Convert to a numpy array. + # np.array(..,dtype=float) raises an error if dimensions are inconsistent + # or if there are invalid data types in bounds. Just add a linprog prefix + # to the error and re-raise. + # Creating at least a 2-D array simplifies the cases to distinguish below. + if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]): + bounds = (0, np.inf) + try: + bounds_conv = np.atleast_2d(np.array(bounds, dtype=float)) + except ValueError as e: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + except TypeError as e: + raise TypeError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + + # Check bounds options + bsh = bounds_conv.shape + if len(bsh) > 2: + # Do not try to handle multidimensional bounds input + raise ValueError( + "Invalid input for linprog: provide a 2-D array for bounds, " + f"not a {len(bsh):d}-D array.") + elif np.all(bsh == (n_x, 2)): + # Regular N x 2 array + bounds_clean = bounds_conv + elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))): + # 2 values: interpret as overall lower and upper bound + bounds_flat = bounds_conv.flatten() + bounds_clean[:, 0] = bounds_flat[0] + bounds_clean[:, 1] = bounds_flat[1] + elif np.all(bsh == (2, n_x)): + # Reject a 2 x N array + raise ValueError( + f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, " + f"not a 2 x {n_x:d} array.") + else: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds with this " + f"dimension tuple: {bsh}.") + + # The process above creates nan-s where the input specified None + # Convert the nan-s in the 1st column to -np.inf and in the 2nd column + # to np.inf + i_none = np.isnan(bounds_clean[:, 0]) + bounds_clean[i_none, 0] = -np.inf + i_none = np.isnan(bounds_clean[:, 1]) + bounds_clean[i_none, 1] = np.inf + + return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality) + + +def _presolve(lp, rr, rr_method, tol=1e-9): + """ + Given inputs for a linear programming problem in preferred format, + presolve the problem: identify trivial infeasibilities, redundancies, + and unboundedness, tighten bounds where possible, and eliminate fixed + variables. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + rr : bool + If ``True`` attempts to eliminate any redundant rows in ``A_eq``. + Set False if ``A_eq`` is known to be of full row rank, or if you are + looking for a potential speedup (at the expense of reliability). + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : 1D array + Constant term in objective function due to fixed (and eliminated) + variables. + x : 1D array + Solution vector (when the solution is trivial and can be determined + in presolve) + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + (currently, the revstack contains only one function) + complete: bool + Whether the solution is complete (solved or determined to be infeasible + or unbounded in presolve) + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + + """ + # ideas from Reference [5] by Andersen and Andersen + # however, unlike the reference, this is performed before converting + # problem to standard form + # There are a few advantages: + # * artificial variables have not been added, so matrices are smaller + # * bounds have not been converted to constraints yet. (It is better to + # do that after presolve because presolve may adjust the simple bounds.) + # There are many improvements that can be made, namely: + # * implement remaining checks from [5] + # * loop presolve until no additional changes are made + # * implement additional efficiency improvements in redundancy removal [2] + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp + + revstack = [] # record of variables eliminated from problem + # constant term in cost function may be added if variables are eliminated + c0 = 0 + complete = False # complete is True if detected infeasible/unbounded + x = np.zeros(c.shape) # this is solution vector if completed in presolve + + status = 0 # all OK unless determined otherwise + message = "" + + # Lower and upper bounds. Copy to prevent feedback. + lb = bounds[:, 0].copy() + ub = bounds[:, 1].copy() + + m_eq, n = A_eq.shape + m_ub, n = A_ub.shape + + if (rr_method is not None + and rr_method.lower() not in {"svd", "pivot", "id"}): + message = ("'" + str(rr_method) + "' is not a valid option " + "for redundancy removal. Valid options are 'SVD', " + "'pivot', and 'ID'.") + raise ValueError(message) + + if sps.issparse(A_eq): + A_eq = A_eq.tocsr() + A_ub = A_ub.tocsr() + + def where(A): + return A.nonzero() + + vstack = sps.vstack + else: + where = np.where + vstack = np.vstack + + # upper bounds > lower bounds + if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf): + status = 2 + message = ("The problem is (trivially) infeasible since one " + "or more upper bounds are smaller than the corresponding " + "lower bounds, a lower bound is np.inf or an upper bound " + "is -np.inf.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # zero row in equality constraints + zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any( + np.logical_and( + zero_row, + np.abs(b_eq) > tol)): # test_zero_row_1 + # infeasible if RHS is not zero + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if RHS is zero, we can eliminate this equation entirely + A_eq = A_eq[np.logical_not(zero_row), :] + b_eq = b_eq[np.logical_not(zero_row)] + + # zero row in inequality constraints + zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 + # infeasible if RHS is less than zero (because LHS is zero) + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if LHS is >= 0, we can eliminate this constraint entirely + A_ub = A_ub[np.logical_not(zero_row), :] + b_ub = b_ub[np.logical_not(zero_row)] + + # zero column in (both) constraints + # this indicates that a variable isn't constrained and can be removed + A = vstack((A_eq, A_ub)) + if A.shape[0] > 0: + zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() + # variable will be at upper or lower bound, depending on objective + x[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + x[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + if np.any(np.isinf(x)): # if an unconstrained variable has no bound + status = 3 + message = ("If feasible, the problem is (trivially) unbounded " + "due to a zero column in the constraint matrices. If " + "you wish to check whether the problem is infeasible, " + "turn presolve off.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + # variables will equal upper/lower bounds will be removed later + lb[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + ub[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + + # row singleton in equality constraints + # this fixes a variable and removes the constraint + singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() + rows = where(singleton_row)[0] + cols = where(A_eq[rows, :])[1] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_eq[row] / A_eq[row, col] + if not lb[col] - tol <= val <= ub[col] + tol: + # infeasible if fixed value is not within bounds + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the equality constraints is " + "inconsistent with the bounds.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: + # sets upper and lower bounds at that fixed value - variable + # will be removed later + lb[col] = val + ub[col] = val + A_eq = A_eq[np.logical_not(singleton_row), :] + b_eq = b_eq[np.logical_not(singleton_row)] + + # row singleton in inequality constraints + # this indicates a simple bound and the constraint can be removed + # simple bounds may be adjusted here + # After all of the simple bound information is combined here, get_Abc will + # turn the simple bounds into constraints + singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() + cols = where(A_ub[singleton_row, :])[1] + rows = where(singleton_row)[0] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_ub[row] / A_ub[row, col] + if A_ub[row, col] > 0: # upper bound + if val < lb[col] - tol: # infeasible + complete = True + elif val < ub[col]: # new upper bound + ub[col] = val + else: # lower bound + if val > ub[col] + tol: # infeasible + complete = True + elif val > lb[col]: # new lower bound + lb[col] = val + if complete: + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the upper bound constraints is " + "inconsistent with the bounds.") + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + A_ub = A_ub[np.logical_not(singleton_row), :] + b_ub = b_ub[np.logical_not(singleton_row)] + + # identical bounds indicate that variable can be removed + i_f = np.abs(lb - ub) < tol # indices of "fixed" variables + i_nf = np.logical_not(i_f) # indices of "not fixed" variables + + # test_bounds_equal_but_infeasible + if np.all(i_f): # if bounds define solution, check for consistency + residual = b_eq - A_eq.dot(lb) + slack = b_ub - A_ub.dot(lb) + if ((A_ub.size > 0 and np.any(slack < 0)) or + (A_eq.size > 0 and not np.allclose(residual, 0))): + status = 2 + message = ("The problem is (trivially) infeasible because the " + "bounds fix all variables to values inconsistent with " + "the constraints") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + ub_mod = ub + lb_mod = lb + if np.any(i_f): + c0 += c[i_f].dot(lb[i_f]) + b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) + b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) + c = c[i_nf] + x_undo = lb[i_f] # not x[i_f], x is just zeroes + x = x[i_nf] + # user guess x0 stays separate from presolve solution x + if x0 is not None: + x0 = x0[i_nf] + A_eq = A_eq[:, i_nf] + A_ub = A_ub[:, i_nf] + # modify bounds + lb_mod = lb[i_nf] + ub_mod = ub[i_nf] + + def rev(x_mod): + # Function to restore x: insert x_undo into x_mod. + # When elements have been removed at positions k1, k2, k3, ... + # then these must be replaced at (after) positions k1-1, k2-2, + # k3-3, ... in the modified array to recreate the original + i = np.flatnonzero(i_f) + # Number of variables to restore + N = len(i) + index_offset = np.arange(N) + # Create insert indices + insert_indices = i - index_offset + x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo) + return x_rev + + # Use revstack as a list of functions, currently just this one. + revstack.append(rev) + + # no constraints indicates that problem is trivial + if A_eq.size == 0 and A_ub.size == 0: + b_eq = np.array([]) + b_ub = np.array([]) + # test_empty_constraint_1 + if c.size == 0: + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or + np.any(np.logical_and(c > 0, lb_mod == -np.inf))): + # test_no_constraints() + # test_unbounded_no_nontrivial_constraints_1 + # test_unbounded_no_nontrivial_constraints_2 + status = 3 + message = ("The problem is (trivially) unbounded " + "because there are no non-trivial constraints and " + "a) at least one decision variable is unbounded " + "above and its corresponding cost is negative, or " + "b) at least one decision variable is unbounded below " + "and its corresponding cost is positive. ") + else: # test_empty_constraint_2 + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + complete = True + x[c < 0] = ub_mod[c < 0] + x[c > 0] = lb_mod[c > 0] + # where c is zero, set x to a finite bound or zero + x_zero_c = ub_mod[c == 0] + x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)] + x_zero_c[np.isinf(x_zero_c)] = 0 + x[c == 0] = x_zero_c + # if this is not the last step of presolve, should convert bounds back + # to array and return here + + # Convert modified lb and ub back into N x 2 bounds + bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis])) + + # remove redundant (linearly dependent) rows from equality constraints + n_rows_A = A_eq.shape[0] + redundancy_warning = ("A_eq does not appear to be of full row rank. To " + "improve performance, check the problem formulation " + "for redundant equality constraints.") + if (sps.issparse(A_eq)): + if rr and A_eq.size > 0: # TODO: Fast sparse rank check? + rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if A_eq.shape[0] < n_rows_A: + warn(redundancy_warning, OptimizeWarning, stacklevel=1) + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # This is a wild guess for which redundancy removal algorithm will be + # faster. More testing would be good. + small_nullspace = 5 + if rr and A_eq.size > 0: + try: # TODO: use results of first SVD in _remove_redundancy_svd + rank = np.linalg.matrix_rank(A_eq) + # oh well, we'll have to go with _remove_redundancy_pivot_dense + except Exception: + rank = 0 + if rr and A_eq.size > 0 and rank < A_eq.shape[0]: + warn(redundancy_warning, OptimizeWarning, stacklevel=3) + dim_row_nullspace = A_eq.shape[0]-rank + if rr_method is None: + if dim_row_nullspace <= small_nullspace: + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if dim_row_nullspace > small_nullspace or status == 4: + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + + else: + rr_method = rr_method.lower() + if rr_method == "svd": + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "pivot": + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "id": + rr_res = _remove_redundancy_id(A_eq, b_eq, rank) + A_eq, b_eq, status, message = rr_res + else: # shouldn't get here; option validity checked above + pass + if A_eq.shape[0] < rank: + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + status = 4 + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + +def _parse_linprog(lp, options, meth): + """ + Parse the provided linear programming problem + + ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and + ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the + provided constraints (``A_ub`` and ``A_eq) and if these match the provided + sparsity optional values. + + ``_clean inputs`` checks of the provided inputs. If no violations are + identified the objective vector, upper bound constraints, equality + constraints, and simple bounds are returned in the expected format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + """ + if options is None: + options = {} + + solver_options = {k: v for k, v in options.items()} + solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth, + lp.A_ub, lp.A_eq) + # Convert lists to numpy arrays, etc... + lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq)) + return lp, solver_options + + +def _get_Abc(lp, c0): + """ + Given a linear programming problem of the form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + Return the problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + by adding slack variables and making variable substitutions as necessary. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + + Returns + ------- + A : 2-D array + 2-D array such that ``A`` @ ``x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in A (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + x0 : 1-D array + Starting values of the independent variables, which will be refined by + the optimization algorithm + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if sps.issparse(A_eq): + sparse = True + A_eq = sps.csr_matrix(A_eq) + A_ub = sps.csr_matrix(A_ub) + + def hstack(blocks): + return sps.hstack(blocks, format="csr") + + def vstack(blocks): + return sps.vstack(blocks, format="csr") + + zeros = sps.csr_matrix + eye = sps.eye + else: + sparse = False + hstack = np.hstack + vstack = np.vstack + zeros = np.zeros + eye = np.eye + + # Variables lbs and ubs (see below) may be changed, which feeds back into + # bounds, so copy. + bounds = np.array(bounds, copy=True) + + # modify problem such that all variables have only non-negativity bounds + lbs = bounds[:, 0] + ubs = bounds[:, 1] + m_ub, n_ub = A_ub.shape + + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + + # unbounded below: substitute xi = -xi' (unbounded above) + # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds + l_nolb_someub = np.logical_and(lb_none, ub_some) + i_nolb = np.nonzero(l_nolb_someub)[0] + lbs[l_nolb_someub], ubs[l_nolb_someub] = ( + -ubs[l_nolb_someub], -lbs[l_nolb_someub]) + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + c[i_nolb] *= -1 + if x0 is not None: + x0[i_nolb] *= -1 + if len(i_nolb) > 0: + if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird + A_ub[:, i_nolb] *= -1 + if A_eq.shape[0] > 0: + A_eq[:, i_nolb] *= -1 + + # upper bound: add inequality constraint + i_newub, = ub_some.nonzero() + ub_newub = ubs[ub_some] + n_bounds = len(i_newub) + if n_bounds > 0: + shape = (n_bounds, A_ub.shape[1]) + if sparse: + idxs = (np.arange(n_bounds), i_newub) + A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs), + shape=shape))) + else: + A_ub = vstack((A_ub, np.zeros(shape))) + A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1 + b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) + b_ub[m_ub:] = ub_newub + + A1 = vstack((A_ub, A_eq)) + b = np.concatenate((b_ub, b_eq)) + c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],)))) + # unbounded: substitute xi = xi+ + xi- + l_free = np.logical_and(lb_none, ub_none) + i_free = np.nonzero(l_free)[0] + n_free = len(i_free) + c = np.concatenate((c, np.zeros(n_free))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros(n_free))) + A1 = hstack((A1[:, :n_ub], -A1[:, i_free])) + c[n_ub:n_ub+n_free] = -c[i_free] + if x0 is not None: + i_free_neg = x0[i_free] < 0 + x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]] + x0[i_free[i_free_neg]] = 0 + + # add slack variables + A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) + + A = hstack([A1, A2]) + + # lower bound: substitute xi = xi' + lb + # now there is a constant term in objective + i_shift = np.nonzero(lb_some)[0] + lb_shift = lbs[lb_some].astype(float) + c0 += np.sum(lb_shift * c[i_shift]) + if sparse: + b = b.reshape(-1, 1) + A = A.tocsc() + b -= (A[:, i_shift] @ sps.diags(lb_shift)).sum(axis=1) + b = b.ravel() + else: + b -= (A[:, i_shift] * lb_shift).sum(axis=1) + if x0 is not None: + x0[i_shift] -= lb_shift + + return A, b, c, c0, x0 + + +def _round_to_power_of_two(x): + """ + Round elements of the array to the nearest power of two. + """ + return 2**np.around(np.log2(x)) + + +def _autoscale(A, b, c, x0): + """ + Scales the problem according to equilibration from [12]. + Also normalizes the right hand side vector by its maximum element. + """ + m, n = A.shape + + C = 1 + R = 1 + + if A.size > 0: + + R = np.max(np.abs(A), axis=1) + if sps.issparse(A): + R = R.toarray().flatten() + R[R == 0] = 1 + R = 1/_round_to_power_of_two(R) + A = sps.diags(R)@A if sps.issparse(A) else A*R.reshape(m, 1) + b = b*R + + C = np.max(np.abs(A), axis=0) + if sps.issparse(A): + C = C.toarray().flatten() + C[C == 0] = 1 + C = 1/_round_to_power_of_two(C) + A = A@sps.diags(C) if sps.issparse(A) else A*C + c = c*C + + b_scale = np.max(np.abs(b)) if b.size > 0 else 1 + if b_scale == 0: + b_scale = 1. + b = b/b_scale + + if x0 is not None: + x0 = x0/b_scale*(1/C) + return A, b, c, x0, C, b_scale + + +def _unscale(x, C, b_scale): + """ + Converts solution to _autoscale problem -> solution to original problem. + """ + + try: + n = len(C) + # fails if sparse or scalar; that's OK. + # this is only needed for original simplex (never sparse) + except TypeError: + n = len(x) + + return x[:n]*b_scale*C + + +def _display_summary(message, status, fun, iteration): + """ + Print the termination summary of the linear program + + Parameters + ---------- + message : str + A string descriptor of the exit status of the optimization. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + fun : float + Value of the objective function. + iteration : iteration + The number of iterations performed. + """ + print(message) + if status in (0, 1): + print(f" Current function value: {fun: <12.6f}") + print(f" Iterations: {iteration:d}") + + +def _postsolve(x, postsolve_args, complete=False): + """ + Given solution x to presolved, standard form linear program x, add + fixed variables back into the problem and undo the variable substitutions + to get solution to original linear program. Also, calculate the objective + function value, slack in original upper bound constraints, and residuals + in original equality constraints. + + Parameters + ---------- + x : 1-D array + Solution vector to the standard-form problem. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem, including: + + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + complete : bool + Whether the solution is was determined in presolve (``True`` if so) + + Returns + ------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + """ + # note that all the inputs are the ORIGINAL, unmodified versions + # no rows, columns have been removed + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0] + revstack, C, b_scale = postsolve_args[1:] + + x = _unscale(x, C, b_scale) + + # Undo variable substitutions of _get_Abc() + # if "complete", problem was solved in presolve; don't do anything here + n_x = bounds.shape[0] + if not complete and bounds is not None: # bounds are never none, probably + n_unbounded = 0 + for i, bi in enumerate(bounds): + lbi = bi[0] + ubi = bi[1] + if lbi == -np.inf and ubi == np.inf: + n_unbounded += 1 + x[i] = x[i] - x[n_x + n_unbounded - 1] + else: + if lbi == -np.inf: + x[i] = ubi - x[i] + else: + x[i] += lbi + # all the rest of the variables were artificial + x = x[:n_x] + + # If there were variables removed from the problem, add them back into the + # solution vector + # Apply the functions in revstack (reverse direction) + for rev in reversed(revstack): + x = rev(x) + + fun = x.dot(c) + with np.errstate(invalid="ignore"): + slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints + # report residuals of ORIGINAL EQ constraints + con = b_eq - A_eq.dot(x) + + return x, fun, slack, con + + +def _check_result(x, fun, status, slack, con, bounds, tol, message, + integrality): + """ + Check the validity of the provided solution. + + A valid (optimal) solution satisfies all bounds, all slack variables are + negative and all equality constraint residuals are strictly non-zero. + Further, the lower-bounds, upper-bounds, slack and residuals contain + no nan values. + + Parameters + ---------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + bounds : 2D array + The bounds on the original variables ``x`` + message : str + A string descriptor of the exit status of the optimization. + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + """ + # Somewhat arbitrary + tol = np.sqrt(tol) * 10 + + if x is None: + # HiGHS does not provide x if infeasible/unbounded + if status == 0: # Observed with HiGHS Simplex Primal + status = 4 + message = ("The solver did not provide a solution nor did it " + "report a failure. Please submit a bug report.") + return status, message + + contains_nans = ( + np.isnan(x).any() + or np.isnan(fun) + or np.isnan(slack).any() + or np.isnan(con).any() + ) + + if contains_nans: + is_feasible = False + else: + if integrality is None: + integrality = 0 + valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol) + # When integrality is 2 or 3, x must be within bounds OR take value 0 + valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol) + invalid_bounds = not np.all(valid_bounds) + + invalid_slack = status != 3 and (slack < -tol).any() + invalid_con = status != 3 and (np.abs(con) > tol).any() + is_feasible = not (invalid_bounds or invalid_slack or invalid_con) + + if status == 0 and not is_feasible: + status = 4 + message = ("The solution does not satisfy the constraints within the " + "required tolerance of " + f"{tol:.2E}" + ", yet " + "no errors were raised and there is no certificate of " + "infeasibility or unboundedness. Check whether " + "the slack and constraint residuals are acceptable; " + "if not, consider enabling presolve, adjusting the " + "tolerance option(s), and/or using a different method. " + "Please consider submitting a bug report.") + elif status == 2 and is_feasible: + # Occurs if the simplex method exits after phase one with a very + # nearly basic feasible solution. Postsolving can make the solution + # basic, however, this solution is NOT optimal + status = 4 + message = ("The solution is feasible, but the solver did not report " + "that the solution was optimal. Please try a different " + "method.") + + return status, message diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b2ea39a10549d1346bada989f573981804d22006 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f60adcc891304e34ac9d85d108b6a232b4bf0c93 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py @@ -0,0 +1,5 @@ +"""This module contains least-squares algorithms.""" +from .least_squares import least_squares +from .lsq_linear import lsq_linear + +__all__ = ['least_squares', 'lsq_linear'] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe9ce1ab398c1882e925b64acf76d0eeaad1c4ac Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fa0d4f38b4aa85c6a0a5b23d9cc1d0258074b2 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73c5b98e0ce46aa10ac39475d6ae39d754c451bc Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37ea5089e1de228e85f2cea38379feffbbb44833 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b5b92e142e52b5919e174cf1a0b1b5d657895a4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c18cb397e843df52cfdef6ff4be53e641a4468f Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db73828ec98c21941d70babadf7e6592cd1fcf18 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc081523c70f5eccfdc125dff63ee9cf914cbe80 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py new file mode 100644 index 0000000000000000000000000000000000000000..8f34ead4a1fc4edbb3c2ab50a204aa9a3cc21cff --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py @@ -0,0 +1,183 @@ +"""Bounded-variable least-squares algorithm.""" +import numpy as np +from numpy.linalg import norm, lstsq +from scipy.optimize import OptimizeResult + +from .common import print_header_linear, print_iteration_linear + + +def compute_kkt_optimality(g, on_bound): + """Compute the maximum violation of KKT conditions.""" + g_kkt = g * on_bound + free_set = on_bound == 0 + g_kkt[free_set] = np.abs(g[free_set]) + return np.max(g_kkt) + + +def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None): + m, n = A.shape + + x = x_lsq.copy() + on_bound = np.zeros(n) + + mask = x <= lb + x[mask] = lb[mask] + on_bound[mask] = -1 + + mask = x >= ub + x[mask] = ub[mask] + on_bound[mask] = 1 + + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + r = A.dot(x) - b + cost = 0.5 * np.dot(r, r) + initial_cost = cost + g = A.T.dot(r) + + cost_change = None + step_norm = None + iteration = 0 + + if verbose == 2: + print_header_linear() + + # This is the initialization loop. The requirement is that the + # least-squares solution on free variables is feasible before BVLS starts. + # One possible initialization is to set all variables to lower or upper + # bounds, but many iterations may be required from this state later on. + # The implemented ad-hoc procedure which intuitively should give a better + # initial state: find the least-squares solution on current free variables, + # if its feasible then stop, otherwise, set violating variables to + # corresponding bounds and continue on the reduced set of free variables. + + while free_set.size > 0: + if verbose == 2: + optimality = compute_kkt_optimality(g, on_bound) + print_iteration_linear(iteration, cost, cost_change, step_norm, + optimality) + + iteration += 1 + x_free_old = x[free_set].copy() + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=rcond)[0] + + lbv = z < lb[free_set] + ubv = z > ub[free_set] + v = lbv | ubv + + if np.any(lbv): + ind = free_set[lbv] + x[ind] = lb[ind] + active_set[ind] = True + on_bound[ind] = -1 + + if np.any(ubv): + ind = free_set[ubv] + x[ind] = ub[ind] + active_set[ind] = True + on_bound[ind] = 1 + + ind = free_set[~v] + x[ind] = z[~v] + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + cost = cost_new + g = A.T.dot(r) + step_norm = norm(x[free_set] - x_free_old) + + if np.any(v): + free_set = free_set[~v] + else: + break + + if max_iter is None: + max_iter = n + max_iter += iteration + + termination_status = None + + # Main BVLS loop. + + optimality = compute_kkt_optimality(g, on_bound) + for iteration in range(iteration, max_iter): # BVLS Loop A + if verbose == 2: + print_iteration_linear(iteration, cost, cost_change, + step_norm, optimality) + + if optimality < tol: + termination_status = 1 + + if termination_status is not None: + break + + move_to_free = np.argmax(g * on_bound) + on_bound[move_to_free] = 0 + + while True: # BVLS Loop B + + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + x_free = x[free_set] + x_free_old = x_free.copy() + lb_free = lb[free_set] + ub_free = ub[free_set] + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=rcond)[0] + + lbv, = np.nonzero(z < lb_free) + ubv, = np.nonzero(z > ub_free) + v = np.hstack((lbv, ubv)) + + if v.size > 0: + alphas = np.hstack(( + lb_free[lbv] - x_free[lbv], + ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v]) + + i = np.argmin(alphas) + i_free = v[i] + alpha = alphas[i] + + x_free *= 1 - alpha + x_free += alpha * z + x[free_set] = x_free + + if i < lbv.size: + on_bound[free_set[i_free]] = -1 + else: + on_bound[free_set[i_free]] = 1 + else: + x_free = z + x[free_set] = x_free + break + + step_norm = norm(x_free - x_free_old) + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + + if cost_change < tol * cost: + termination_status = 2 + cost = cost_new + + g = A.T.dot(r) + optimality = compute_kkt_optimality(g, on_bound) + + if termination_status is None: + termination_status = 0 + + return OptimizeResult( + x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound, + nit=iteration + 1, status=termination_status, + initial_cost=initial_cost) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8117f23ec1111d5205537c59931b165e2bfdaf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py @@ -0,0 +1,731 @@ +"""Functions used by least-squares algorithms.""" +from math import copysign + +import numpy as np +from numpy.linalg import norm + +from scipy.linalg import cho_factor, cho_solve, LinAlgError +from scipy.sparse import issparse +from scipy.sparse.linalg import LinearOperator, aslinearoperator + + +EPS = np.finfo(float).eps + + +# Functions related to a trust-region problem. + + +def intersect_trust_region(x, s, Delta): + """Find the intersection of a line with the boundary of a trust region. + + This function solves the quadratic equation with respect to t + ||(x + s*t)||**2 = Delta**2. + + Returns + ------- + t_neg, t_pos : tuple of float + Negative and positive roots. + + Raises + ------ + ValueError + If `s` is zero or `x` is not within the trust region. + """ + a = np.dot(s, s) + if a == 0: + raise ValueError("`s` is zero.") + + b = np.dot(x, s) + + c = np.dot(x, x) - Delta**2 + if c > 0: + raise ValueError("`x` is not within the trust region.") + + d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. + + # Computations below avoid loss of significance, see "Numerical Recipes". + q = -(b + copysign(d, b)) + t1 = q / a + t2 = c / q + + if t1 < t2: + return t1, t2 + else: + return t2, t1 + + +def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, + rtol=0.01, max_iter=10): + """Solve a trust-region problem arising in least-squares minimization. + + This function implements a method described by J. J. More [1]_ and used + in MINPACK, but it relies on a single SVD of Jacobian instead of series + of Cholesky decompositions. Before running this function, compute: + ``U, s, VT = svd(J, full_matrices=False)``. + + Parameters + ---------- + n : int + Number of variables. + m : int + Number of residuals. + uf : ndarray + Computed as U.T.dot(f). + s : ndarray + Singular values of J. + V : ndarray + Transpose of VT. + Delta : float + Radius of a trust region. + initial_alpha : float, optional + Initial guess for alpha, which might be available from a previous + iteration. If None, determined automatically. + rtol : float, optional + Stopping tolerance for the root-finding procedure. Namely, the + solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. + max_iter : int, optional + Maximum allowed number of iterations for the root-finding procedure. + + Returns + ------- + p : ndarray, shape (n,) + Found solution of a trust-region problem. + alpha : float + Positive value such that (J.T*J + alpha*I)*p = -J.T*f. + Sometimes called Levenberg-Marquardt parameter. + n_iter : int + Number of iterations made by root-finding procedure. Zero means + that Gauss-Newton step was selected as the solution. + + References + ---------- + .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes + in Mathematics 630, Springer Verlag, pp. 105-116, 1977. + """ + def phi_and_derivative(alpha, suf, s, Delta): + """Function of which to find zero. + + It is defined as "norm of regularized (by alpha) least-squares + solution minus `Delta`". Refer to [1]_. + """ + denom = s**2 + alpha + p_norm = norm(suf / denom) + phi = p_norm - Delta + phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm + return phi, phi_prime + + suf = s * uf + + # Check if J has full rank and try Gauss-Newton step. + if m >= n: + threshold = EPS * m * s[0] + full_rank = s[-1] > threshold + else: + full_rank = False + + if full_rank: + p = -V.dot(uf / s) + if norm(p) <= Delta: + return p, 0.0, 0 + + alpha_upper = norm(suf) / Delta + + if full_rank: + phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) + alpha_lower = -phi / phi_prime + else: + alpha_lower = 0.0 + + if initial_alpha is None or not full_rank and initial_alpha == 0: + alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) + else: + alpha = initial_alpha + + for it in range(max_iter): + if alpha < alpha_lower or alpha > alpha_upper: + alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) + + phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) + + if phi < 0: + alpha_upper = alpha + + ratio = phi / phi_prime + alpha_lower = max(alpha_lower, alpha - ratio) + alpha -= (phi + Delta) * ratio / Delta + + if np.abs(phi) < rtol * Delta: + break + + p = -V.dot(suf / (s**2 + alpha)) + + # Make the norm of p equal to Delta, p is changed only slightly during + # this. It is done to prevent p lie outside the trust region (which can + # cause problems later). + p *= Delta / norm(p) + + return p, alpha, it + 1 + + +def solve_trust_region_2d(B, g, Delta): + """Solve a general trust-region problem in 2 dimensions. + + The problem is reformulated as a 4th order algebraic equation, + the solution of which is found by numpy.roots. + + Parameters + ---------- + B : ndarray, shape (2, 2) + Symmetric matrix, defines a quadratic term of the function. + g : ndarray, shape (2,) + Defines a linear term of the function. + Delta : float + Radius of a trust region. + + Returns + ------- + p : ndarray, shape (2,) + Found solution. + newton_step : bool + Whether the returned solution is the Newton step which lies within + the trust region. + """ + try: + R, lower = cho_factor(B) + p = -cho_solve((R, lower), g) + if np.dot(p, p) <= Delta**2: + return p, True + except LinAlgError: + pass + + a = B[0, 0] * Delta**2 + b = B[0, 1] * Delta**2 + c = B[1, 1] * Delta**2 + + d = g[0] * Delta + f = g[1] * Delta + + coeffs = np.array( + [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) + t = np.roots(coeffs) # Can handle leading zeros. + t = np.real(t[np.isreal(t)]) + + p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) + value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) + i = np.argmin(value) + p = p[:, i] + + return p, False + + +def update_tr_radius(Delta, actual_reduction, predicted_reduction, + step_norm, bound_hit): + """Update the radius of a trust region based on the cost reduction. + + Returns + ------- + Delta : float + New radius. + ratio : float + Ratio between actual and predicted reductions. + """ + if predicted_reduction > 0: + ratio = actual_reduction / predicted_reduction + elif predicted_reduction == actual_reduction == 0: + ratio = 1 + else: + ratio = 0 + + if ratio < 0.25: + Delta = 0.25 * step_norm + elif ratio > 0.75 and bound_hit: + Delta *= 2.0 + + return Delta, ratio + + +# Construction and minimization of quadratic functions. + + +def build_quadratic_1d(J, g, s, diag=None, s0=None): + """Parameterize a multivariate quadratic function along a line. + + The resulting univariate quadratic function is given as follows:: + + f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + + g.T * (s0 + s*t) + + Parameters + ---------- + J : ndarray, sparse matrix or LinearOperator shape (m, n) + Jacobian matrix, affects the quadratic term. + g : ndarray, shape (n,) + Gradient, defines the linear term. + s : ndarray, shape (n,) + Direction vector of a line. + diag : None or ndarray with shape (n,), optional + Addition diagonal part, affects the quadratic term. + If None, assumed to be 0. + s0 : None or ndarray with shape (n,), optional + Initial point. If None, assumed to be 0. + + Returns + ------- + a : float + Coefficient for t**2. + b : float + Coefficient for t. + c : float + Free term. Returned only if `s0` is provided. + """ + v = J.dot(s) + a = np.dot(v, v) + if diag is not None: + a += np.dot(s * diag, s) + a *= 0.5 + + b = np.dot(g, s) + + if s0 is not None: + u = J.dot(s0) + b += np.dot(u, v) + c = 0.5 * np.dot(u, u) + np.dot(g, s0) + if diag is not None: + b += np.dot(s0 * diag, s) + c += 0.5 * np.dot(s0 * diag, s0) + return a, b, c + else: + return a, b + + +def minimize_quadratic_1d(a, b, lb, ub, c=0): + """Minimize a 1-D quadratic function subject to bounds. + + The free term `c` is 0 by default. Bounds must be finite. + + Returns + ------- + t : float + Minimum point. + y : float + Minimum value. + """ + t = [lb, ub] + if a != 0: + extremum = -0.5 * b / a + if lb < extremum < ub: + t.append(extremum) + t = np.asarray(t) + y = t * (a * t + b) + c + min_index = np.argmin(y) + return t[min_index], y[min_index] + + +def evaluate_quadratic(J, g, s, diag=None): + """Compute values of a quadratic function arising in least squares. + + The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. + + Parameters + ---------- + J : ndarray, sparse matrix or LinearOperator, shape (m, n) + Jacobian matrix, affects the quadratic term. + g : ndarray, shape (n,) + Gradient, defines the linear term. + s : ndarray, shape (k, n) or (n,) + Array containing steps as rows. + diag : ndarray, shape (n,), optional + Addition diagonal part, affects the quadratic term. + If None, assumed to be 0. + + Returns + ------- + values : ndarray with shape (k,) or float + Values of the function. If `s` was 2-D, then ndarray is + returned, otherwise, float is returned. + """ + if s.ndim == 1: + Js = J.dot(s) + q = np.dot(Js, Js) + if diag is not None: + q += np.dot(s * diag, s) + else: + Js = J.dot(s.T) + q = np.sum(Js**2, axis=0) + if diag is not None: + q += np.sum(diag * s**2, axis=1) + + l = np.dot(s, g) + + return 0.5 * q + l + + +# Utility functions to work with bound constraints. + + +def in_bounds(x, lb, ub): + """Check if a point lies within bounds.""" + return np.all((x >= lb) & (x <= ub)) + + +def step_size_to_bound(x, s, lb, ub): + """Compute a min_step size required to reach a bound. + + The function computes a positive scalar t, such that x + s * t is on + the bound. + + Returns + ------- + step : float + Computed step. Non-negative value. + hits : ndarray of int with shape of x + Each element indicates whether a corresponding variable reaches the + bound: + + * 0 - the bound was not hit. + * -1 - the lower bound was hit. + * 1 - the upper bound was hit. + """ + non_zero = np.nonzero(s) + s_non_zero = s[non_zero] + steps = np.empty_like(x) + steps.fill(np.inf) + with np.errstate(over='ignore'): + steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, + (ub - x)[non_zero] / s_non_zero) + min_step = np.min(steps) + return min_step, np.equal(steps, min_step) * np.sign(s).astype(int) + + +def find_active_constraints(x, lb, ub, rtol=1e-10): + """Determine which constraints are active in a given point. + + The threshold is computed using `rtol` and the absolute value of the + closest bound. + + Returns + ------- + active : ndarray of int with shape of x + Each component shows whether the corresponding constraint is active: + + * 0 - a constraint is not active. + * -1 - a lower bound is active. + * 1 - a upper bound is active. + """ + active = np.zeros_like(x, dtype=int) + + if rtol == 0: + active[x <= lb] = -1 + active[x >= ub] = 1 + return active + + lower_dist = x - lb + upper_dist = ub - x + + lower_threshold = rtol * np.maximum(1, np.abs(lb)) + upper_threshold = rtol * np.maximum(1, np.abs(ub)) + + lower_active = (np.isfinite(lb) & + (lower_dist <= np.minimum(upper_dist, lower_threshold))) + active[lower_active] = -1 + + upper_active = (np.isfinite(ub) & + (upper_dist <= np.minimum(lower_dist, upper_threshold))) + active[upper_active] = 1 + + return active + + +def make_strictly_feasible(x, lb, ub, rstep=1e-10): + """Shift a point to the interior of a feasible region. + + Each element of the returned vector is at least at a relative distance + `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. + """ + x_new = x.copy() + + active = find_active_constraints(x, lb, ub, rstep) + lower_mask = np.equal(active, -1) + upper_mask = np.equal(active, 1) + + if rstep == 0: + x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask]) + x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask]) + else: + x_new[lower_mask] = (lb[lower_mask] + + rstep * np.maximum(1, np.abs(lb[lower_mask]))) + x_new[upper_mask] = (ub[upper_mask] - + rstep * np.maximum(1, np.abs(ub[upper_mask]))) + + tight_bounds = (x_new < lb) | (x_new > ub) + x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) + + return x_new + + +def CL_scaling_vector(x, g, lb, ub): + """Compute Coleman-Li scaling vector and its derivatives. + + Components of a vector v are defined as follows:: + + | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf + v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf + | 1, otherwise + + According to this definition v[i] >= 0 for all i. It differs from the + definition in paper [1]_ (eq. (2.2)), where the absolute value of v is + used. Both definitions are equivalent down the line. + Derivatives of v with respect to x take value 1, -1 or 0 depending on a + case. + + Returns + ------- + v : ndarray with shape of x + Scaling vector. + dv : ndarray with shape of x + Derivatives of v[i] with respect to x[i], diagonal elements of v's + Jacobian. + + References + ---------- + .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + """ + v = np.ones_like(x) + dv = np.zeros_like(x) + + mask = (g < 0) & np.isfinite(ub) + v[mask] = ub[mask] - x[mask] + dv[mask] = -1 + + mask = (g > 0) & np.isfinite(lb) + v[mask] = x[mask] - lb[mask] + dv[mask] = 1 + + return v, dv + + +def reflective_transformation(y, lb, ub): + """Compute reflective transformation and its gradient.""" + if in_bounds(y, lb, ub): + return y, np.ones_like(y) + + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + x = y.copy() + g_negative = np.zeros_like(y, dtype=bool) + + mask = lb_finite & ~ub_finite + x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask]) + g_negative[mask] = y[mask] < lb[mask] + + mask = ~lb_finite & ub_finite + x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask]) + g_negative[mask] = y[mask] > ub[mask] + + mask = lb_finite & ub_finite + d = ub - lb + t = np.remainder(y[mask] - lb[mask], 2 * d[mask]) + x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t) + g_negative[mask] = t > d[mask] + + g = np.ones_like(y) + g[g_negative] = -1 + + return x, g + + +# Functions to display algorithm's progress. + + +def print_header_nonlinear(): + print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}" + .format("Iteration", "Total nfev", "Cost", "Cost reduction", + "Step norm", "Optimality")) + + +def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, + step_norm, optimality): + if cost_reduction is None: + cost_reduction = " " * 15 + else: + cost_reduction = f"{cost_reduction:^15.2e}" + + if step_norm is None: + step_norm = " " * 15 + else: + step_norm = f"{step_norm:^15.2e}" + + print(f"{iteration:^15}{nfev:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}") + + +def print_header_linear(): + print("{:^15}{:^15}{:^15}{:^15}{:^15}" + .format("Iteration", "Cost", "Cost reduction", "Step norm", + "Optimality")) + + +def print_iteration_linear(iteration, cost, cost_reduction, step_norm, + optimality): + if cost_reduction is None: + cost_reduction = " " * 15 + else: + cost_reduction = f"{cost_reduction:^15.2e}" + + if step_norm is None: + step_norm = " " * 15 + else: + step_norm = f"{step_norm:^15.2e}" + + print(f"{iteration:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}") + + +# Simple helper functions. + + +def compute_grad(J, f): + """Compute gradient of the least-squares cost function.""" + if isinstance(J, LinearOperator): + return J.rmatvec(f) + else: + return J.T.dot(f) + + +def compute_jac_scale(J, scale_inv_old=None): + """Compute variables scale based on the Jacobian matrix.""" + if issparse(J): + scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5 + else: + scale_inv = np.sum(J**2, axis=0)**0.5 + + if scale_inv_old is None: + scale_inv[scale_inv == 0] = 1 + else: + scale_inv = np.maximum(scale_inv, scale_inv_old) + + return 1 / scale_inv, scale_inv + + +def left_multiplied_operator(J, d): + """Return diag(d) J as LinearOperator.""" + J = aslinearoperator(J) + + def matvec(x): + return d * J.matvec(x) + + def matmat(X): + return d[:, np.newaxis] * J.matmat(X) + + def rmatvec(x): + return J.rmatvec(x.ravel() * d) + + return LinearOperator(J.shape, matvec=matvec, matmat=matmat, + rmatvec=rmatvec) + + +def right_multiplied_operator(J, d): + """Return J diag(d) as LinearOperator.""" + J = aslinearoperator(J) + + def matvec(x): + return J.matvec(np.ravel(x) * d) + + def matmat(X): + return J.matmat(X * d[:, np.newaxis]) + + def rmatvec(x): + return d * J.rmatvec(x) + + return LinearOperator(J.shape, matvec=matvec, matmat=matmat, + rmatvec=rmatvec) + + +def regularized_lsq_operator(J, diag): + """Return a matrix arising in regularized least squares as LinearOperator. + + The matrix is + [ J ] + [ D ] + where D is diagonal matrix with elements from `diag`. + """ + J = aslinearoperator(J) + m, n = J.shape + + def matvec(x): + return np.hstack((J.matvec(x), diag * x)) + + def rmatvec(x): + x1 = x[:m] + x2 = x[m:] + return J.rmatvec(x1) + diag * x2 + + return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec) + + +def right_multiply(J, d, copy=True): + """Compute J diag(d). + + If `copy` is False, `J` is modified in place (unless being LinearOperator). + """ + if copy and not isinstance(J, LinearOperator): + J = J.copy() + + if issparse(J): + J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. + elif isinstance(J, LinearOperator): + J = right_multiplied_operator(J, d) + else: + J *= d + + return J + + +def left_multiply(J, d, copy=True): + """Compute diag(d) J. + + If `copy` is False, `J` is modified in place (unless being LinearOperator). + """ + if copy and not isinstance(J, LinearOperator): + J = J.copy() + + if issparse(J): + J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. + elif isinstance(J, LinearOperator): + J = left_multiplied_operator(J, d) + else: + J *= d[:, np.newaxis] + + return J + + +def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): + """Check termination condition for nonlinear least squares.""" + ftol_satisfied = dF < ftol * F and ratio > 0.25 + xtol_satisfied = dx_norm < xtol * (xtol + x_norm) + + if ftol_satisfied and xtol_satisfied: + return 4 + elif ftol_satisfied: + return 2 + elif xtol_satisfied: + return 3 + else: + return None + + +def scale_for_robust_loss_function(J, f, rho): + """Scale Jacobian and residuals for a robust loss function. + + Arrays are modified in place. + """ + J_scale = rho[1] + 2 * rho[2] * f**2 + J_scale[J_scale < EPS] = EPS + J_scale **= 0.5 + + f *= rho[1] / J_scale + + return left_multiply(J, J_scale, copy=False), f diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py new file mode 100644 index 0000000000000000000000000000000000000000..6bb5abbe79028afed7b110603a0d5dfd6affae7f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py @@ -0,0 +1,331 @@ +""" +Dogleg algorithm with rectangular trust regions for least-squares minimization. + +The description of the algorithm can be found in [Voglis]_. The algorithm does +trust-region iterations, but the shape of trust regions is rectangular as +opposed to conventional elliptical. The intersection of a trust region and +an initial feasible region is again some rectangle. Thus, on each iteration a +bound-constrained quadratic optimization problem is solved. + +A quadratic problem is solved by well-known dogleg approach, where the +function is minimized along piecewise-linear "dogleg" path [NumOpt]_, +Chapter 4. If Jacobian is not rank-deficient then the function is decreasing +along this path, and optimization amounts to simply following along this +path as long as a point stays within the bounds. A constrained Cauchy step +(along the anti-gradient) is considered for safety in rank deficient cases, +in this situations the convergence might be slow. + +If during iterations some variable hit the initial bound and the component +of anti-gradient points outside the feasible region, then a next dogleg step +won't make any progress. At this state such variables satisfy first-order +optimality conditions and they are excluded before computing a next dogleg +step. + +Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense +Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for +dense and sparse matrices, or Jacobian being LinearOperator). The second +option allows to solve very large problems (up to couple of millions of +residuals on a regular PC), provided the Jacobian matrix is sufficiently +sparse. But note that dogbox is not very good for solving problems with +large number of constraints, because of variables exclusion-inclusion on each +iteration (a required number of function evaluations might be high or accuracy +of a solution will be poor), thus its large-scale usage is probably limited +to unconstrained problems. + +References +---------- +.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg + Approach for Unconstrained and Bound Constrained Nonlinear + Optimization", WSEAS International Conference on Applied + Mathematics, Corfu, Greece, 2004. +.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition". +""" +import numpy as np +from numpy.linalg import lstsq, norm + +from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr +from scipy.optimize import OptimizeResult + +from .common import ( + step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic, + build_quadratic_1d, minimize_quadratic_1d, compute_grad, + compute_jac_scale, check_termination, scale_for_robust_loss_function, + print_header_nonlinear, print_iteration_nonlinear) + + +def lsmr_operator(Jop, d, active_set): + """Compute LinearOperator to use in LSMR by dogbox algorithm. + + `active_set` mask is used to excluded active variables from computations + of matrix-vector products. + """ + m, n = Jop.shape + + def matvec(x): + x_free = x.ravel().copy() + x_free[active_set] = 0 + return Jop.matvec(x * d) + + def rmatvec(x): + r = d * Jop.rmatvec(x) + r[active_set] = 0 + return r + + return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float) + + +def find_intersection(x, tr_bounds, lb, ub): + """Find intersection of trust-region bounds and initial bounds. + + Returns + ------- + lb_total, ub_total : ndarray with shape of x + Lower and upper bounds of the intersection region. + orig_l, orig_u : ndarray of bool with shape of x + True means that an original bound is taken as a corresponding bound + in the intersection region. + tr_l, tr_u : ndarray of bool with shape of x + True means that a trust-region bound is taken as a corresponding bound + in the intersection region. + """ + lb_centered = lb - x + ub_centered = ub - x + + lb_total = np.maximum(lb_centered, -tr_bounds) + ub_total = np.minimum(ub_centered, tr_bounds) + + orig_l = np.equal(lb_total, lb_centered) + orig_u = np.equal(ub_total, ub_centered) + + tr_l = np.equal(lb_total, -tr_bounds) + tr_u = np.equal(ub_total, tr_bounds) + + return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u + + +def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub): + """Find dogleg step in a rectangular region. + + Returns + ------- + step : ndarray, shape (n,) + Computed dogleg step. + bound_hits : ndarray of int, shape (n,) + Each component shows whether a corresponding variable hits the + initial bound after the step is taken: + * 0 - a variable doesn't hit the bound. + * -1 - lower bound is hit. + * 1 - upper bound is hit. + tr_hit : bool + Whether the step hit the boundary of the trust-region. + """ + lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection( + x, tr_bounds, lb, ub + ) + bound_hits = np.zeros_like(x, dtype=int) + + if in_bounds(newton_step, lb_total, ub_total): + return newton_step, bound_hits, False + + to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total) + + # The classical dogleg algorithm would check if Cauchy step fits into + # the bounds, and just return it constrained version if not. But in a + # rectangular trust region it makes sense to try to improve constrained + # Cauchy step too. Thus, we don't distinguish these two cases. + + cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g + + step_diff = newton_step - cauchy_step + step_size, hits = step_size_to_bound(cauchy_step, step_diff, + lb_total, ub_total) + bound_hits[(hits < 0) & orig_l] = -1 + bound_hits[(hits > 0) & orig_u] = 1 + tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u) + + return cauchy_step + step_size * step_diff, bound_hits, tr_hit + + +def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose): + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + Delta = norm(x0 * scale_inv, ord=np.inf) + if Delta == 0: + Delta = 1.0 + + on_bound = np.zeros_like(x0, dtype=int) + on_bound[np.equal(x0, lb)] = -1 + on_bound[np.equal(x0, ub)] = 1 + + x = x0 + step = np.empty_like(x0) + + if max_nfev is None: + max_nfev = x0.size * 100 + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + active_set = on_bound * g < 0 + free_set = ~active_set + + g_free = g[free_set] + g_full = g.copy() + g[active_set] = 0 + + g_norm = norm(g, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + x_free = x[free_set] + lb_free = lb[free_set] + ub_free = ub[free_set] + scale_free = scale[free_set] + + # Compute (Gauss-)Newton and build quadratic model for Cauchy step. + if tr_solver == 'exact': + J_free = J[:, free_set] + newton_step = lstsq(J_free, -f, rcond=-1)[0] + + # Coefficients for the quadratic model along the anti-gradient. + a, b = build_quadratic_1d(J_free, g_free, -g_free) + elif tr_solver == 'lsmr': + Jop = aslinearoperator(J) + + # We compute lsmr step in scaled variables and then + # transform back to normal variables, if lsmr would give exact lsq + # solution, this would be equivalent to not doing any + # transformations, but from experience it's better this way. + + # We pass active_set to make computations as if we selected + # the free subset of J columns, but without actually doing any + # slicing, which is expensive for sparse matrices and impossible + # for LinearOperator. + + lsmr_op = lsmr_operator(Jop, scale, active_set) + newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set] + newton_step *= scale_free + + # Components of g for active variables were zeroed, so this call + # is correct and equivalent to using J_free and g_free. + a, b = build_quadratic_1d(Jop, g, -g) + + actual_reduction = -1.0 + while actual_reduction <= 0 and nfev < max_nfev: + tr_bounds = Delta * scale_free + + step_free, on_bound_free, tr_hit = dogleg_step( + x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free) + + step.fill(0.0) + step[free_set] = step_free + + if tr_solver == 'exact': + predicted_reduction = -evaluate_quadratic(J_free, g_free, + step_free) + elif tr_solver == 'lsmr': + predicted_reduction = -evaluate_quadratic(Jop, g, step) + + # gh11403 ensure that solution is fully within bounds. + x_new = np.clip(x + step, lb, ub) + + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step * scale_inv, ord=np.inf) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + + Delta, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, tr_hit + ) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + + if termination_status is not None: + break + + if actual_reduction > 0: + on_bound[free_set] = on_bound_free + + x = x_new + # Set variables exactly at the boundary. + mask = on_bound == -1 + x[mask] = lb[mask] + mask = on_bound == 1 + x[mask] = ub[mask] + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm, + active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py new file mode 100644 index 0000000000000000000000000000000000000000..1595e40d16a01b8355510c4721ca0fb6b5b23b4a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py @@ -0,0 +1,972 @@ +"""Generic interface for least-squares minimization.""" +from warnings import warn + +import numpy as np +from numpy.linalg import norm + +from scipy.sparse import issparse +from scipy.sparse.linalg import LinearOperator +from scipy.optimize import _minpack, OptimizeResult +from scipy.optimize._numdiff import approx_derivative, group_columns +from scipy.optimize._minimize import Bounds + +from .trf import trf +from .dogbox import dogbox +from .common import EPS, in_bounds, make_strictly_feasible + + +TERMINATION_MESSAGES = { + -1: "Improper input parameters status returned from `leastsq`", + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`ftol` termination condition is satisfied.", + 3: "`xtol` termination condition is satisfied.", + 4: "Both `ftol` and `xtol` termination conditions are satisfied." +} + + +FROM_MINPACK_TO_COMMON = { + 0: -1, # Improper input parameters from MINPACK. + 1: 2, + 2: 3, + 3: 4, + 4: 1, + 5: 0 + # There are 6, 7, 8 for too small tolerance parameters, + # but we guard against it by checking ftol, xtol, gtol beforehand. +} + + +def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step): + n = x0.size + + if diff_step is None: + epsfcn = EPS + else: + epsfcn = diff_step**2 + + # Compute MINPACK's `diag`, which is inverse of our `x_scale` and + # ``x_scale='jac'`` corresponds to ``diag=None``. + if isinstance(x_scale, str) and x_scale == 'jac': + diag = None + else: + diag = 1 / x_scale + + full_output = True + col_deriv = False + factor = 100.0 + + if jac is None: + if max_nfev is None: + # n squared to account for Jacobian evaluations. + max_nfev = 100 * n * (n + 1) + x, info, status = _minpack._lmdif( + fun, x0, (), full_output, ftol, xtol, gtol, + max_nfev, epsfcn, factor, diag) + else: + if max_nfev is None: + max_nfev = 100 * n + x, info, status = _minpack._lmder( + fun, jac, x0, (), full_output, col_deriv, + ftol, xtol, gtol, max_nfev, factor, diag) + + f = info['fvec'] + + if callable(jac): + J = jac(x) + else: + J = np.atleast_2d(approx_derivative(fun, x)) + + cost = 0.5 * np.dot(f, f) + g = J.T.dot(f) + g_norm = norm(g, ord=np.inf) + + nfev = info['nfev'] + njev = info.get('njev', None) + + status = FROM_MINPACK_TO_COMMON[status] + active_mask = np.zeros_like(x0, dtype=int) + + return OptimizeResult( + x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, status=status) + + +def prepare_bounds(bounds, n): + lb, ub = (np.asarray(b, dtype=float) for b in bounds) + if lb.ndim == 0: + lb = np.resize(lb, n) + + if ub.ndim == 0: + ub = np.resize(ub, n) + + return lb, ub + + +def check_tolerance(ftol, xtol, gtol, method): + def check(tol, name): + if tol is None: + tol = 0 + elif tol < EPS: + warn(f"Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively " + f"disables the corresponding termination condition.", + stacklevel=3) + return tol + + ftol = check(ftol, "ftol") + xtol = check(xtol, "xtol") + gtol = check(gtol, "gtol") + + if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS): + raise ValueError("All tolerances must be higher than machine epsilon " + f"({EPS:.2e}) for method 'lm'.") + elif ftol < EPS and xtol < EPS and gtol < EPS: + raise ValueError("At least one of the tolerances must be higher than " + f"machine epsilon ({EPS:.2e}).") + + return ftol, xtol, gtol + + +def check_x_scale(x_scale, x0): + if isinstance(x_scale, str) and x_scale == 'jac': + return x_scale + + try: + x_scale = np.asarray(x_scale, dtype=float) + valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0) + except (ValueError, TypeError): + valid = False + + if not valid: + raise ValueError("`x_scale` must be 'jac' or array_like with " + "positive numbers.") + + if x_scale.ndim == 0: + x_scale = np.resize(x_scale, x0.shape) + + if x_scale.shape != x0.shape: + raise ValueError("Inconsistent shapes between `x_scale` and `x0`.") + + return x_scale + + +def check_jac_sparsity(jac_sparsity, m, n): + if jac_sparsity is None: + return None + + if not issparse(jac_sparsity): + jac_sparsity = np.atleast_2d(jac_sparsity) + + if jac_sparsity.shape != (m, n): + raise ValueError("`jac_sparsity` has wrong shape.") + + return jac_sparsity, group_columns(jac_sparsity) + + +# Loss functions. + + +def huber(z, rho, cost_only): + mask = z <= 1 + rho[0, mask] = z[mask] + rho[0, ~mask] = 2 * z[~mask]**0.5 - 1 + if cost_only: + return + rho[1, mask] = 1 + rho[1, ~mask] = z[~mask]**-0.5 + rho[2, mask] = 0 + rho[2, ~mask] = -0.5 * z[~mask]**-1.5 + + +def soft_l1(z, rho, cost_only): + t = 1 + z + rho[0] = 2 * (t**0.5 - 1) + if cost_only: + return + rho[1] = t**-0.5 + rho[2] = -0.5 * t**-1.5 + + +def cauchy(z, rho, cost_only): + rho[0] = np.log1p(z) + if cost_only: + return + t = 1 + z + rho[1] = 1 / t + rho[2] = -1 / t**2 + + +def arctan(z, rho, cost_only): + rho[0] = np.arctan(z) + if cost_only: + return + t = 1 + z**2 + rho[1] = 1 / t + rho[2] = -2 * z / t**2 + + +IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1, + cauchy=cauchy, arctan=arctan) + + +def construct_loss_function(m, loss, f_scale): + if loss == 'linear': + return None + + if not callable(loss): + loss = IMPLEMENTED_LOSSES[loss] + rho = np.empty((3, m)) + + def loss_function(f, cost_only=False): + z = (f / f_scale) ** 2 + loss(z, rho, cost_only=cost_only) + if cost_only: + return 0.5 * f_scale ** 2 * np.sum(rho[0]) + rho[0] *= f_scale ** 2 + rho[2] /= f_scale ** 2 + return rho + else: + def loss_function(f, cost_only=False): + z = (f / f_scale) ** 2 + rho = loss(z) + if cost_only: + return 0.5 * f_scale ** 2 * np.sum(rho[0]) + rho[0] *= f_scale ** 2 + rho[2] /= f_scale ** 2 + return rho + + return loss_function + + +def least_squares( + fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf', + ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear', + f_scale=1.0, diff_step=None, tr_solver=None, tr_options=None, + jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs=None): + """Solve a nonlinear least-squares problem with bounds on the variables. + + Given the residuals f(x) (an m-D real function of n real + variables) and the loss function rho(s) (a scalar function), `least_squares` + finds a local minimum of the cost function F(x):: + + minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1) + subject to lb <= x <= ub + + The purpose of the loss function rho(s) is to reduce the influence of + outliers on the solution. + + Parameters + ---------- + fun : callable + Function which computes the vector of residuals, with the signature + ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with + respect to its first argument. The argument ``x`` passed to this + function is an ndarray of shape (n,) (never a scalar, even for n=1). + It must allocate and return a 1-D array_like of shape (m,) or a scalar. + If the argument ``x`` is complex or the function ``fun`` returns + complex residuals, it must be wrapped in a real function of real + arguments, as shown at the end of the Examples section. + x0 : array_like with shape (n,) or float + Initial guess on independent variables. If float, it will be treated + as a 1-D array with one element. When `method` is 'trf', the initial + guess might be slightly adjusted to lie sufficiently within the given + `bounds`. + jac : {'2-point', '3-point', 'cs', callable}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, where + element (i, j) is the partial derivative of f[i] with respect to + x[j]). The keywords select a finite difference scheme for numerical + estimation. The scheme '3-point' is more accurate, but requires + twice as many operations as '2-point' (default). The scheme 'cs' + uses complex steps, and while potentially the most accurate, it is + applicable only when `fun` correctly handles complex inputs and + can be analytically continued to the complex plane. Method 'lm' + always uses the '2-point' scheme. If callable, it is used as + ``jac(x, *args, **kwargs)`` and should return a good approximation + (or the exact value) for the Jacobian as an array_like (np.atleast_2d + is applied), a sparse matrix (csr_matrix preferred for performance) or + a `scipy.sparse.linalg.LinearOperator`. + bounds : 2-tuple of array_like or `Bounds`, optional + There are two ways to specify bounds: + + 1. Instance of `Bounds` class + 2. Lower and upper bounds on independent variables. Defaults to no + bounds. Each array must match the size of `x0` or be a scalar, + in the latter case a bound will be the same for all variables. + Use ``np.inf`` with an appropriate sign to disable bounds on all + or some variables. + + method : {'trf', 'dogbox', 'lm'}, optional + Algorithm to perform minimization. + + * 'trf' : Trust Region Reflective algorithm, particularly suitable + for large sparse problems with bounds. Generally robust method. + * 'dogbox' : dogleg algorithm with rectangular trust regions, + typical use case is small problems with bounds. Not recommended + for problems with rank-deficient Jacobian. + * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK. + Doesn't handle bounds and sparse Jacobians. Usually the most + efficient method for small unconstrained problems. + + Default is 'trf'. See Notes for more information. + ftol : float or None, optional + Tolerance for termination by the change of the cost function. Default + is 1e-8. The optimization process is stopped when ``dF < ftol * F``, + and there was an adequate agreement between a local quadratic model and + the true model in the last step. + + If None and 'method' is not 'lm', the termination by this condition is + disabled. If 'method' is 'lm', this tolerance must be higher than + machine epsilon. + xtol : float or None, optional + Tolerance for termination by the change of the independent variables. + Default is 1e-8. The exact condition depends on the `method` used: + + * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``. + * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is + a trust-region radius and ``xs`` is the value of ``x`` + scaled according to `x_scale` parameter (see below). + + If None and 'method' is not 'lm', the termination by this condition is + disabled. If 'method' is 'lm', this tolerance must be higher than + machine epsilon. + gtol : float or None, optional + Tolerance for termination by the norm of the gradient. Default is 1e-8. + The exact condition depends on a `method` used: + + * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where + ``g_scaled`` is the value of the gradient scaled to account for + the presence of the bounds [STIR]_. + * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where + ``g_free`` is the gradient with respect to the variables which + are not in the optimal state on the boundary. + * For 'lm' : the maximum absolute value of the cosine of angles + between columns of the Jacobian and the residual vector is less + than `gtol`, or the residual vector is zero. + + If None and 'method' is not 'lm', the termination by this condition is + disabled. If 'method' is 'lm', this tolerance must be higher than + machine epsilon. + x_scale : array_like or 'jac', optional + Characteristic scale of each variable. Setting `x_scale` is equivalent + to reformulating the problem in scaled variables ``xs = x / x_scale``. + An alternative view is that the size of a trust region along jth + dimension is proportional to ``x_scale[j]``. Improved convergence may + be achieved by setting `x_scale` such that a step of a given size + along any of the scaled variables has a similar effect on the cost + function. If set to 'jac', the scale is iteratively updated using the + inverse norms of the columns of the Jacobian matrix (as described in + [JJMore]_). + loss : str or callable, optional + Determines the loss function. The following keyword values are allowed: + + * 'linear' (default) : ``rho(z) = z``. Gives a standard + least-squares problem. + * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth + approximation of l1 (absolute value) loss. Usually a good + choice for robust least squares. + * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works + similarly to 'soft_l1'. + * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers + influence, but may cause difficulties in optimization process. + * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on + a single residual, has properties similar to 'cauchy'. + + If callable, it must take a 1-D ndarray ``z=f**2`` and return an + array_like with shape (3, m) where row 0 contains function values, + row 1 contains first derivatives and row 2 contains second + derivatives. Method 'lm' supports only 'linear' loss. + f_scale : float, optional + Value of soft margin between inlier and outlier residuals, default + is 1.0. The loss function is evaluated as follows + ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`, + and ``rho`` is determined by `loss` parameter. This parameter has + no effect with ``loss='linear'``, but for other `loss` values it is + of crucial importance. + max_nfev : None or int, optional + Maximum number of function evaluations before the termination. + If None (default), the value is chosen automatically: + + * For 'trf' and 'dogbox' : 100 * n. + * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1) + otherwise (because 'lm' counts function calls in Jacobian + estimation). + + diff_step : None or array_like, optional + Determines the relative step size for the finite difference + approximation of the Jacobian. The actual step is computed as + ``x * diff_step``. If None (default), then `diff_step` is taken to be + a conventional "optimal" power of machine epsilon for the finite + difference scheme used [NR]_. + tr_solver : {None, 'exact', 'lsmr'}, optional + Method for solving trust-region subproblems, relevant only for 'trf' + and 'dogbox' methods. + + * 'exact' is suitable for not very large problems with dense + Jacobian matrices. The computational complexity per iteration is + comparable to a singular value decomposition of the Jacobian + matrix. + * 'lsmr' is suitable for problems with sparse and large Jacobian + matrices. It uses the iterative procedure + `scipy.sparse.linalg.lsmr` for finding a solution of a linear + least-squares problem and only requires matrix-vector product + evaluations. + + If None (default), the solver is chosen based on the type of Jacobian + returned on the first iteration. + tr_options : dict, optional + Keyword options passed to trust-region solver. + + * ``tr_solver='exact'``: `tr_options` are ignored. + * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. + Additionally, ``method='trf'`` supports 'regularize' option + (bool, default is True), which adds a regularization term to the + normal equation, which improves convergence if the Jacobian is + rank-deficient [Byrd]_ (eq. 3.4). + + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations [Curtis]_. A zero + entry means that a corresponding element in the Jacobian is identically + zero. If provided, forces the use of 'lsmr' trust-region solver. + If None (default), then dense differencing will be used. Has no effect + for 'lm' method. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations (not supported by 'lm' + method). + + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same for + `jac`. + + Returns + ------- + result : OptimizeResult + `OptimizeResult` with the following fields defined: + + x : ndarray, shape (n,) + Solution found. + cost : float + Value of the cost function at the solution. + fun : ndarray, shape (m,) + Vector of residuals at the solution. + jac : ndarray, sparse matrix or LinearOperator, shape (m, n) + Modified Jacobian matrix at the solution, in the sense that J^T J + is a Gauss-Newton approximation of the Hessian of the cost function. + The type is the same as the one used by the algorithm. + grad : ndarray, shape (m,) + Gradient of the cost function at the solution. + optimality : float + First-order optimality measure. In unconstrained problems, it is + always the uniform norm of the gradient. In constrained problems, + it is the quantity which was compared with `gtol` during iterations. + active_mask : ndarray of int, shape (n,) + Each component shows whether a corresponding constraint is active + (that is, whether a variable is at the bound): + + * 0 : a constraint is not active. + * -1 : a lower bound is active. + * 1 : an upper bound is active. + + Might be somewhat arbitrary for 'trf' method as it generates a + sequence of strictly feasible iterates and `active_mask` is + determined within a tolerance threshold. + nfev : int + Number of function evaluations done. Methods 'trf' and 'dogbox' do + not count function calls for numerical Jacobian approximation, as + opposed to 'lm' method. + njev : int or None + Number of Jacobian evaluations done. If numerical Jacobian + approximation is used in 'lm' method, it is set to None. + status : int + The reason for algorithm termination: + + * -1 : improper input parameters status returned from MINPACK. + * 0 : the maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `ftol` termination condition is satisfied. + * 3 : `xtol` termination condition is satisfied. + * 4 : Both `ftol` and `xtol` termination conditions are satisfied. + + message : str + Verbal description of the termination reason. + success : bool + True if one of the convergence criteria is satisfied (`status` > 0). + + See Also + -------- + leastsq : A legacy wrapper for the MINPACK implementation of the + Levenberg-Marquadt algorithm. + curve_fit : Least-squares minimization applied to a curve-fitting problem. + + Notes + ----- + Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares + algorithms implemented in MINPACK (lmder, lmdif). It runs the + Levenberg-Marquardt algorithm formulated as a trust-region type algorithm. + The implementation is based on paper [JJMore]_, it is very robust and + efficient with a lot of smart tricks. It should be your first choice + for unconstrained problems. Note that it doesn't support bounds. Also, + it doesn't work when m < n. + + Method 'trf' (Trust Region Reflective) is motivated by the process of + solving a system of equations, which constitute the first-order optimality + condition for a bound-constrained minimization problem as formulated in + [STIR]_. The algorithm iteratively solves trust-region subproblems + augmented by a special diagonal quadratic term and with trust-region shape + determined by the distance from the bounds and the direction of the + gradient. This enhancements help to avoid making steps directly into bounds + and efficiently explore the whole space of variables. To further improve + convergence, the algorithm considers search directions reflected from the + bounds. To obey theoretical requirements, the algorithm keeps iterates + strictly feasible. With dense Jacobians trust-region subproblems are + solved by an exact method very similar to the one described in [JJMore]_ + (and implemented in MINPACK). The difference from the MINPACK + implementation is that a singular value decomposition of a Jacobian + matrix is done once per iteration, instead of a QR decomposition and series + of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace + approach of solving trust-region subproblems is used [STIR]_, [Byrd]_. + The subspace is spanned by a scaled gradient and an approximate + Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no + constraints are imposed the algorithm is very similar to MINPACK and has + generally comparable performance. The algorithm works quite robust in + unbounded and bounded problems, thus it is chosen as a default algorithm. + + Method 'dogbox' operates in a trust-region framework, but considers + rectangular trust regions as opposed to conventional ellipsoids [Voglis]_. + The intersection of a current trust region and initial bounds is again + rectangular, so on each iteration a quadratic minimization problem subject + to bound constraints is solved approximately by Powell's dogleg method + [NumOpt]_. The required Gauss-Newton step can be computed exactly for + dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large + sparse Jacobians. The algorithm is likely to exhibit slow convergence when + the rank of Jacobian is less than the number of variables. The algorithm + often outperforms 'trf' in bounded problems with a small number of + variables. + + Robust loss functions are implemented as described in [BA]_. The idea + is to modify a residual vector and a Jacobian matrix on each iteration + such that computed gradient and Gauss-Newton Hessian approximation match + the true gradient and Hessian approximation of the cost function. Then + the algorithm proceeds in a normal way, i.e., robust loss functions are + implemented as a simple wrapper over standard least-squares algorithms. + + .. versionadded:: 0.17.0 + + References + ---------- + .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", Sec. 5.7. + .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate + solution of the trust region problem by minimization over + two-dimensional subspaces", Math. Programming, 40, pp. 247-263, + 1988. + .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of + Mathematics and its Applications, 13, pp. 117-120, 1974. + .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture + Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. + .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region + Dogleg Approach for Unconstrained and Bound Constrained + Nonlinear Optimization", WSEAS International Conference on + Applied Mathematics, Corfu, Greece, 2004. + .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, + 2nd edition", Chapter 4. + .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis", + Proceedings of the International Workshop on Vision Algorithms: + Theory and Practice, pp. 298-372, 1999. + + Examples + -------- + In this example we find a minimum of the Rosenbrock function without bounds + on independent variables. + + >>> import numpy as np + >>> def fun_rosenbrock(x): + ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + Notice that we only provide the vector of the residuals. The algorithm + constructs the cost function as a sum of squares of the residuals, which + gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``. + + >>> from scipy.optimize import least_squares + >>> x0_rosenbrock = np.array([2, 2]) + >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock) + >>> res_1.x + array([ 1., 1.]) + >>> res_1.cost + 9.8669242910846867e-30 + >>> res_1.optimality + 8.8928864934219529e-14 + + We now constrain the variables, in such a way that the previous solution + becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and + ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter + to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``. + + We also provide the analytic Jacobian: + + >>> def jac_rosenbrock(x): + ... return np.array([ + ... [-20 * x[0], 10], + ... [-1, 0]]) + + Putting this all together, we see that the new solution lies on the bound: + + >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock, + ... bounds=([-np.inf, 1.5], np.inf)) + >>> res_2.x + array([ 1.22437075, 1.5 ]) + >>> res_2.cost + 0.025213093946805685 + >>> res_2.optimality + 1.5885401433157753e-07 + + Now we solve a system of equations (i.e., the cost function should be zero + at a minimum) for a Broyden tridiagonal vector-valued function of 100000 + variables: + + >>> def fun_broyden(x): + ... f = (3 - x) * x + 1 + ... f[1:] -= x[:-1] + ... f[:-1] -= 2 * x[1:] + ... return f + + The corresponding Jacobian matrix is sparse. We tell the algorithm to + estimate it by finite differences and provide the sparsity structure of + Jacobian to significantly speed up this process. + + >>> from scipy.sparse import lil_matrix + >>> def sparsity_broyden(n): + ... sparsity = lil_matrix((n, n), dtype=int) + ... i = np.arange(n) + ... sparsity[i, i] = 1 + ... i = np.arange(1, n) + ... sparsity[i, i - 1] = 1 + ... i = np.arange(n - 1) + ... sparsity[i, i + 1] = 1 + ... return sparsity + ... + >>> n = 100000 + >>> x0_broyden = -np.ones(n) + ... + >>> res_3 = least_squares(fun_broyden, x0_broyden, + ... jac_sparsity=sparsity_broyden(n)) + >>> res_3.cost + 4.5687069299604613e-23 + >>> res_3.optimality + 1.1650454296851518e-11 + + Let's also solve a curve fitting problem using robust loss function to + take care of outliers in the data. Define the model function as + ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an + observation and a, b, c are parameters to estimate. + + First, define the function which generates the data with noise and + outliers, define the model parameters, and generate data: + + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None): + ... rng = default_rng(seed) + ... + ... y = a + b * np.exp(t * c) + ... + ... error = noise * rng.standard_normal(t.size) + ... outliers = rng.integers(0, t.size, n_outliers) + ... error[outliers] *= 10 + ... + ... return y + error + ... + >>> a = 0.5 + >>> b = 2.0 + >>> c = -1 + >>> t_min = 0 + >>> t_max = 10 + >>> n_points = 15 + ... + >>> t_train = np.linspace(t_min, t_max, n_points) + >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3) + + Define function for computing residuals and initial estimate of + parameters. + + >>> def fun(x, t, y): + ... return x[0] + x[1] * np.exp(x[2] * t) - y + ... + >>> x0 = np.array([1.0, 1.0, 0.0]) + + Compute a standard least-squares solution: + + >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train)) + + Now compute two solutions with two different robust loss functions. The + parameter `f_scale` is set to 0.1, meaning that inlier residuals should + not significantly exceed 0.1 (the noise level used). + + >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, + ... args=(t_train, y_train)) + >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, + ... args=(t_train, y_train)) + + And, finally, plot all the curves. We see that by selecting an appropriate + `loss` we can get estimates close to optimal even in the presence of + strong outliers. But keep in mind that generally it is recommended to try + 'soft_l1' or 'huber' losses first (if at all necessary) as the other two + options may cause difficulties in optimization process. + + >>> t_test = np.linspace(t_min, t_max, n_points * 10) + >>> y_true = gen_data(t_test, a, b, c) + >>> y_lsq = gen_data(t_test, *res_lsq.x) + >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x) + >>> y_log = gen_data(t_test, *res_log.x) + ... + >>> import matplotlib.pyplot as plt + >>> plt.plot(t_train, y_train, 'o') + >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true') + >>> plt.plot(t_test, y_lsq, label='linear loss') + >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss') + >>> plt.plot(t_test, y_log, label='cauchy loss') + >>> plt.xlabel("t") + >>> plt.ylabel("y") + >>> plt.legend() + >>> plt.show() + + In the next example, we show how complex-valued residual functions of + complex variables can be optimized with ``least_squares()``. Consider the + following function: + + >>> def f(z): + ... return z - (0.5 + 0.5j) + + We wrap it into a function of real variables that returns real residuals + by simply handling the real and imaginary parts as independent variables: + + >>> def f_wrap(x): + ... fx = f(x[0] + 1j*x[1]) + ... return np.array([fx.real, fx.imag]) + + Thus, instead of the original m-D complex function of n complex + variables we optimize a 2m-D real function of 2n real variables: + + >>> from scipy.optimize import least_squares + >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1])) + >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j + >>> z + (0.49999999999925893+0.49999999999925893j) + + """ + if method not in ['trf', 'dogbox', 'lm']: + raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.") + + if jac not in ['2-point', '3-point', 'cs'] and not callable(jac): + raise ValueError("`jac` must be '2-point', '3-point', 'cs' or " + "callable.") + + if tr_solver not in [None, 'exact', 'lsmr']: + raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.") + + if loss not in IMPLEMENTED_LOSSES and not callable(loss): + raise ValueError(f"`loss` must be one of {IMPLEMENTED_LOSSES.keys()}" + " or a callable.") + + if method == 'lm' and loss != 'linear': + raise ValueError("method='lm' supports only 'linear' loss function.") + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + if max_nfev is not None and max_nfev <= 0: + raise ValueError("`max_nfev` must be None or positive integer.") + + if np.iscomplexobj(x0): + raise ValueError("`x0` must be real.") + + x0 = np.atleast_1d(x0).astype(float) + + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + if isinstance(bounds, Bounds): + lb, ub = bounds.lb, bounds.ub + bounds = (lb, ub) + else: + if len(bounds) == 2: + lb, ub = prepare_bounds(bounds, x0.shape[0]) + else: + raise ValueError("`bounds` must contain 2 elements.") + + if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)): + raise ValueError("Method 'lm' doesn't support bounds.") + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if np.any(lb >= ub): + raise ValueError("Each lower bound must be strictly less than each " + "upper bound.") + + if not in_bounds(x0, lb, ub): + raise ValueError("Initial guess is outside of provided bounds") + + x_scale = check_x_scale(x_scale, x0) + + ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method) + + if method == 'trf': + x0 = make_strictly_feasible(x0, lb, ub) + + if kwargs is None: + kwargs = {} + if tr_options is None: + tr_options = {} + + def fun_wrapped(x): + return np.atleast_1d(fun(x, *args, **kwargs)) + + f0 = fun_wrapped(x0) + + if f0.ndim != 1: + raise ValueError("`fun` must return at most 1-d array_like. " + f"f0.shape: {f0.shape}") + + if not np.all(np.isfinite(f0)): + raise ValueError("Residuals are not finite in the initial point.") + + n = x0.size + m = f0.size + + if method == 'lm' and m < n: + raise ValueError("Method 'lm' doesn't work when the number of " + "residuals is less than the number of variables.") + + loss_function = construct_loss_function(m, loss, f_scale) + if callable(loss): + rho = loss_function(f0) + if rho.shape != (3, m): + raise ValueError("The return value of `loss` callable has wrong " + "shape.") + initial_cost = 0.5 * np.sum(rho[0]) + elif loss_function is not None: + initial_cost = loss_function(f0, cost_only=True) + else: + initial_cost = 0.5 * np.dot(f0, f0) + + if callable(jac): + J0 = jac(x0, *args, **kwargs) + + if issparse(J0): + J0 = J0.tocsr() + + def jac_wrapped(x, _=None): + return jac(x, *args, **kwargs).tocsr() + + elif isinstance(J0, LinearOperator): + def jac_wrapped(x, _=None): + return jac(x, *args, **kwargs) + + else: + J0 = np.atleast_2d(J0) + + def jac_wrapped(x, _=None): + return np.atleast_2d(jac(x, *args, **kwargs)) + + else: # Estimate Jacobian by finite differences. + if method == 'lm': + if jac_sparsity is not None: + raise ValueError("method='lm' does not support " + "`jac_sparsity`.") + + if jac != '2-point': + warn(f"jac='{jac}' works equivalently to '2-point' for method='lm'.", + stacklevel=2) + + J0 = jac_wrapped = None + else: + if jac_sparsity is not None and tr_solver == 'exact': + raise ValueError("tr_solver='exact' is incompatible " + "with `jac_sparsity`.") + + jac_sparsity = check_jac_sparsity(jac_sparsity, m, n) + + def jac_wrapped(x, f): + J = approx_derivative(fun, x, rel_step=diff_step, method=jac, + f0=f, bounds=bounds, args=args, + kwargs=kwargs, sparsity=jac_sparsity) + if J.ndim != 2: # J is guaranteed not sparse. + J = np.atleast_2d(J) + + return J + + J0 = jac_wrapped(x0, f0) + + if J0 is not None: + if J0.shape != (m, n): + raise ValueError( + f"The return value of `jac` has wrong shape: expected {(m, n)}, " + f"actual {J0.shape}." + ) + + if not isinstance(J0, np.ndarray): + if method == 'lm': + raise ValueError("method='lm' works only with dense " + "Jacobian matrices.") + + if tr_solver == 'exact': + raise ValueError( + "tr_solver='exact' works only with dense " + "Jacobian matrices.") + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if isinstance(J0, LinearOperator) and jac_scale: + raise ValueError("x_scale='jac' can't be used when `jac` " + "returns LinearOperator.") + + if tr_solver is None: + if isinstance(J0, np.ndarray): + tr_solver = 'exact' + else: + tr_solver = 'lsmr' + + if method == 'lm': + result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, + max_nfev, x_scale, diff_step) + + elif method == 'trf': + result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, + gtol, max_nfev, x_scale, loss_function, tr_solver, + tr_options.copy(), verbose) + + elif method == 'dogbox': + if tr_solver == 'lsmr' and 'regularize' in tr_options: + warn("The keyword 'regularize' in `tr_options` is not relevant " + "for 'dogbox' method.", + stacklevel=2) + tr_options = tr_options.copy() + del tr_options['regularize'] + + result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, + xtol, gtol, max_nfev, x_scale, loss_function, + tr_solver, tr_options, verbose) + + result.message = TERMINATION_MESSAGES[result.status] + result.success = result.status > 0 + + if verbose >= 1: + print(result.message) + print(f"Function evaluations {result.nfev}, initial cost {initial_cost:.4e}, " + f"final cost {result.cost:.4e}, " + f"first-order optimality {result.optimality:.2e}.") + + return result diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..b077c45e40874fc63490748f75f8463bc2adb08d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py @@ -0,0 +1,361 @@ +"""Linear least squares with bound constraints on independent variables.""" +import numpy as np +from numpy.linalg import norm +from scipy.sparse import issparse, csr_matrix +from scipy.sparse.linalg import LinearOperator, lsmr +from scipy.optimize import OptimizeResult +from scipy.optimize._minimize import Bounds + +from .common import in_bounds, compute_grad +from .trf_linear import trf_linear +from .bvls import bvls + + +def prepare_bounds(bounds, n): + if len(bounds) != 2: + raise ValueError("`bounds` must contain 2 elements.") + lb, ub = (np.asarray(b, dtype=float) for b in bounds) + + if lb.ndim == 0: + lb = np.resize(lb, n) + + if ub.ndim == 0: + ub = np.resize(ub, n) + + return lb, ub + + +TERMINATION_MESSAGES = { + -1: "The algorithm was not able to make progress on the last iteration.", + 0: "The maximum number of iterations is exceeded.", + 1: "The first-order optimality measure is less than `tol`.", + 2: "The relative change of the cost function is less than `tol`.", + 3: "The unconstrained solution is optimal." +} + + +def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10, + lsq_solver=None, lsmr_tol=None, max_iter=None, + verbose=0, *, lsmr_maxiter=None,): + r"""Solve a linear least-squares problem with bounds on the variables. + + Given a m-by-n design matrix A and a target vector b with m elements, + `lsq_linear` solves the following optimization problem:: + + minimize 0.5 * ||A x - b||**2 + subject to lb <= x <= ub + + This optimization problem is convex, hence a found minimum (if iterations + have converged) is guaranteed to be global. + + Parameters + ---------- + A : array_like, sparse matrix of LinearOperator, shape (m, n) + Design matrix. Can be `scipy.sparse.linalg.LinearOperator`. + b : array_like, shape (m,) + Target vector. + bounds : 2-tuple of array_like or `Bounds`, optional + Lower and upper bounds on parameters. Defaults to no bounds. + There are two ways to specify the bounds: + + - Instance of `Bounds` class. + - 2-tuple of array_like: Each element of the tuple must be either + an array with the length equal to the number of parameters, or a + scalar (in which case the bound is taken to be the same for all + parameters). Use ``np.inf`` with an appropriate sign to disable + bounds on all or some parameters. + + method : 'trf' or 'bvls', optional + Method to perform minimization. + + * 'trf' : Trust Region Reflective algorithm adapted for a linear + least-squares problem. This is an interior-point-like method + and the required number of iterations is weakly correlated with + the number of variables. + * 'bvls' : Bounded-variable least-squares algorithm. This is + an active set method, which requires the number of iterations + comparable to the number of variables. Can't be used when `A` is + sparse or LinearOperator. + + Default is 'trf'. + tol : float, optional + Tolerance parameter. The algorithm terminates if a relative change + of the cost function is less than `tol` on the last iteration. + Additionally, the first-order optimality measure is considered: + + * ``method='trf'`` terminates if the uniform norm of the gradient, + scaled to account for the presence of the bounds, is less than + `tol`. + * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions + are satisfied within `tol` tolerance. + + lsq_solver : {None, 'exact', 'lsmr'}, optional + Method of solving unbounded least-squares problems throughout + iterations: + + * 'exact' : Use dense QR or SVD decomposition approach. Can't be + used when `A` is sparse or LinearOperator. + * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure + which requires only matrix-vector product evaluations. Can't + be used with ``method='bvls'``. + + If None (default), the solver is chosen based on type of `A`. + lsmr_tol : None, float or 'auto', optional + Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr` + If None (default), it is set to ``1e-2 * tol``. If 'auto', the + tolerance will be adjusted based on the optimality of the current + iterate, which can speed up the optimization process, but is not always + reliable. + max_iter : None or int, optional + Maximum number of iterations before termination. If None (default), it + is set to 100 for ``method='trf'`` or to the number of variables for + ``method='bvls'`` (not counting iterations for 'bvls' initialization). + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 : work silently (default). + * 1 : display a termination report. + * 2 : display progress during iterations. + + lsmr_maxiter : None or int, optional + Maximum number of iterations for the lsmr least squares solver, + if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it + uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the + number of rows and columns of `A`, respectively. Has no effect if + ``lsq_solver='exact'``. + + Returns + ------- + OptimizeResult with the following fields defined: + x : ndarray, shape (n,) + Solution found. + cost : float + Value of the cost function at the solution. + fun : ndarray, shape (m,) + Vector of residuals at the solution. + optimality : float + First-order optimality measure. The exact meaning depends on `method`, + refer to the description of `tol` parameter. + active_mask : ndarray of int, shape (n,) + Each component shows whether a corresponding constraint is active + (that is, whether a variable is at the bound): + + * 0 : a constraint is not active. + * -1 : a lower bound is active. + * 1 : an upper bound is active. + + Might be somewhat arbitrary for the `trf` method as it generates a + sequence of strictly feasible iterates and active_mask is determined + within a tolerance threshold. + unbounded_sol : tuple + Unbounded least squares solution tuple returned by the least squares + solver (set with `lsq_solver` option). If `lsq_solver` is not set or is + set to ``'exact'``, the tuple contains an ndarray of shape (n,) with + the unbounded solution, an ndarray with the sum of squared residuals, + an int with the rank of `A`, and an ndarray with the singular values + of `A` (see NumPy's ``linalg.lstsq`` for more information). If + `lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of + shape (n,) with the unbounded solution, an int with the exit code, + an int with the number of iterations, and five floats with + various norms and the condition number of `A` (see SciPy's + ``sparse.linalg.lsmr`` for more information). This output can be + useful for determining the convergence of the least squares solver, + particularly the iterative ``'lsmr'`` solver. The unbounded least + squares problem is to minimize ``0.5 * ||A x - b||**2``. + nit : int + Number of iterations. Zero if the unconstrained solution is optimal. + status : int + Reason for algorithm termination: + + * -1 : the algorithm was not able to make progress on the last + iteration. + * 0 : the maximum number of iterations is exceeded. + * 1 : the first-order optimality measure is less than `tol`. + * 2 : the relative change of the cost function is less than `tol`. + * 3 : the unconstrained solution is optimal. + + message : str + Verbal description of the termination reason. + success : bool + True if one of the convergence criteria is satisfied (`status` > 0). + + See Also + -------- + nnls : Linear least squares with non-negativity constraint. + least_squares : Nonlinear least squares with bounds on the variables. + + Notes + ----- + The algorithm first computes the unconstrained least-squares solution by + `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on + `lsq_solver`. This solution is returned as optimal if it lies within the + bounds. + + Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for + a linear least-squares problem. The iterations are essentially the same as + in the nonlinear least-squares algorithm, but as the quadratic function + model is always accurate, we don't need to track or modify the radius of + a trust region. The line search (backtracking) is used as a safety net + when a selected step does not decrease the cost function. Read more + detailed description of the algorithm in `scipy.optimize.least_squares`. + + Method 'bvls' runs a Python implementation of the algorithm described in + [BVLS]_. The algorithm maintains active and free sets of variables, on + each iteration chooses a new variable to move from the active set to the + free set and then solves the unconstrained least-squares problem on free + variables. This algorithm is guaranteed to give an accurate solution + eventually, but may require up to n iterations for a problem with n + variables. Additionally, an ad-hoc initialization procedure is + implemented, that determines which variables to set free or active + initially. It takes some number of iterations before actual BVLS starts, + but can significantly reduce the number of further iterations. + + References + ---------- + .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares: + an Algorithm and Applications", Computational Statistics, 10, + 129-141, 1995. + + Examples + -------- + In this example, a problem with a large sparse matrix and bounds on the + variables is solved. + + >>> import numpy as np + >>> from scipy.sparse import rand + >>> from scipy.optimize import lsq_linear + >>> rng = np.random.default_rng() + ... + >>> m = 2000 + >>> n = 1000 + ... + >>> A = rand(m, n, density=1e-4, random_state=rng) + >>> b = rng.standard_normal(m) + ... + >>> lb = rng.standard_normal(n) + >>> ub = lb + 1 + ... + >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1) + The relative change of the cost function is less than `tol`. + Number of iterations 10, initial cost 1.0070e+03, final cost 9.6602e+02, + first-order optimality 2.21e-09. # may vary + """ + if method not in ['trf', 'bvls']: + raise ValueError("`method` must be 'trf' or 'bvls'") + + if lsq_solver not in [None, 'exact', 'lsmr']: + raise ValueError("`solver` must be None, 'exact' or 'lsmr'.") + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + if issparse(A): + A = csr_matrix(A) + elif not isinstance(A, LinearOperator): + A = np.atleast_2d(np.asarray(A)) + + if method == 'bvls': + if lsq_solver == 'lsmr': + raise ValueError("method='bvls' can't be used with " + "lsq_solver='lsmr'") + + if not isinstance(A, np.ndarray): + raise ValueError("method='bvls' can't be used with `A` being " + "sparse or LinearOperator.") + + if lsq_solver is None: + if isinstance(A, np.ndarray): + lsq_solver = 'exact' + else: + lsq_solver = 'lsmr' + elif lsq_solver == 'exact' and not isinstance(A, np.ndarray): + raise ValueError("`exact` solver can't be used when `A` is " + "sparse or LinearOperator.") + + if len(A.shape) != 2: # No ndim for LinearOperator. + raise ValueError("`A` must have at most 2 dimensions.") + + if max_iter is not None and max_iter <= 0: + raise ValueError("`max_iter` must be None or positive integer.") + + m, n = A.shape + + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("`b` must have at most 1 dimension.") + + if b.size != m: + raise ValueError("Inconsistent shapes between `A` and `b`.") + + if isinstance(bounds, Bounds): + lb = bounds.lb + ub = bounds.ub + else: + lb, ub = prepare_bounds(bounds, n) + + if lb.shape != (n,) and ub.shape != (n,): + raise ValueError("Bounds have wrong shape.") + + if np.any(lb >= ub): + raise ValueError("Each lower bound must be strictly less than each " + "upper bound.") + + if lsmr_maxiter is not None and lsmr_maxiter < 1: + raise ValueError("`lsmr_maxiter` must be None or positive integer.") + + if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or + lsmr_tol in ('auto', None)): + raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.") + + if lsq_solver == 'exact': + unbd_lsq = np.linalg.lstsq(A, b, rcond=-1) + elif lsq_solver == 'lsmr': + first_lsmr_tol = lsmr_tol # tol of first call to lsmr + if lsmr_tol is None or lsmr_tol == 'auto': + first_lsmr_tol = 1e-2 * tol # default if lsmr_tol not defined + unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter, + atol=first_lsmr_tol, btol=first_lsmr_tol) + x_lsq = unbd_lsq[0] # extract the solution from the least squares solver + + if in_bounds(x_lsq, lb, ub): + r = A @ x_lsq - b + cost = 0.5 * np.dot(r, r) + termination_status = 3 + termination_message = TERMINATION_MESSAGES[termination_status] + g = compute_grad(A, r) + g_norm = norm(g, ord=np.inf) + + if verbose > 0: + print(termination_message) + print(f"Final cost {cost:.4e}, first-order optimality {g_norm:.2e}") + + return OptimizeResult( + x=x_lsq, fun=r, cost=cost, optimality=g_norm, + active_mask=np.zeros(n), unbounded_sol=unbd_lsq, + nit=0, status=termination_status, + message=termination_message, success=True) + + if method == 'trf': + res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, + max_iter, verbose, lsmr_maxiter=lsmr_maxiter) + elif method == 'bvls': + res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose) + + res.unbounded_sol = unbd_lsq + res.message = TERMINATION_MESSAGES[res.status] + res.success = res.status > 0 + + if verbose > 0: + print(res.message) + print( + f"Number of iterations {res.nit}, initial cost {res.initial_cost:.4e}, " + f"final cost {res.cost:.4e}, first-order optimality {res.optimality:.2e}." + ) + + del res.initial_cost + + return res diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py new file mode 100644 index 0000000000000000000000000000000000000000..9154bdba5b2cc41883811ba1820dfc251e515d6c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py @@ -0,0 +1,560 @@ +"""Trust Region Reflective algorithm for least-squares optimization. + +The algorithm is based on ideas from paper [STIR]_. The main idea is to +account for the presence of the bounds by appropriate scaling of the variables (or, +equivalently, changing a trust-region shape). Let's introduce a vector v: + + | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf + v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf + | 1, otherwise + +where g is the gradient of a cost function and lb, ub are the bounds. Its +components are distances to the bounds at which the anti-gradient points (if +this distance is finite). Define a scaling matrix D = diag(v**0.5). +First-order optimality conditions can be stated as + + D^2 g(x) = 0. + +Meaning that components of the gradient should be zero for strictly interior +variables, and components must point inside the feasible region for variables +on the bound. + +Now consider this system of equations as a new optimization problem. If the +point x is strictly interior (not on the bound), then the left-hand side is +differentiable and the Newton step for it satisfies + + (D^2 H + diag(g) Jv) p = -D^2 g + +where H is the Hessian matrix (or its J^T J approximation in least squares), +Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all +elements of matrix C = diag(g) Jv are non-negative. Introduce the change +of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables, +we have a Newton step satisfying + + B_h p_h = -g_h, + +where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where +J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect +to "hat" variables. To guarantee global convergence we formulate a +trust-region problem based on the Newton step in the new variables: + + 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta + +In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region +problem is + + 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta + +Here, the meaning of the matrix D becomes more clear: it alters the shape +of a trust-region, such that large steps towards the bounds are not allowed. +In the implementation, the trust-region problem is solved in "hat" space, +but handling of the bounds is done in the original space (see below and read +the code). + +The introduction of the matrix D doesn't allow to ignore bounds, the algorithm +must keep iterates strictly feasible (to satisfy aforementioned +differentiability), the parameter theta controls step back from the boundary +(see the code for details). + +The algorithm does another important trick. If the trust-region solution +doesn't fit into the bounds, then a reflected (from a firstly encountered +bound) search direction is considered. For motivation and analysis refer to +[STIR]_ paper (and other papers of the authors). In practice, it doesn't need +a lot of justifications, the algorithm simply chooses the best step among +three: a constrained trust-region step, a reflected step and a constrained +Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original +space). + +Another feature is that a trust-region radius control strategy is modified to +account for appearance of the diagonal C matrix (called diag_h in the code). + +Note that all described peculiarities are completely gone as we consider +problems without bounds (the algorithm becomes a standard trust-region type +algorithm very similar to ones implemented in MINPACK). + +The implementation supports two methods of solving the trust-region problem. +The first, called 'exact', applies SVD on Jacobian and then solves the problem +very accurately using the algorithm described in [JJMore]_. It is not +applicable to large problem. The second, called 'lsmr', uses the 2-D subspace +approach (sometimes called "indefinite dogleg"), where the problem is solved +in a subspace spanned by the gradient and the approximate Gauss-Newton step +found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is +reformulated as a 4th order algebraic equation and solved very accurately by +``numpy.roots``. The subspace approach allows to solve very large problems +(up to couple of millions of residuals on a regular PC), provided the Jacobian +matrix is sufficiently sparse. + +References +---------- +.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. +.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture +""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import svd, qr +from scipy.sparse.linalg import lsmr +from scipy.optimize import OptimizeResult + +from .common import ( + step_size_to_bound, find_active_constraints, in_bounds, + make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region, + solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d, + evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator, + CL_scaling_vector, compute_grad, compute_jac_scale, check_termination, + update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear, + print_iteration_nonlinear) + + +def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose): + # For efficiency, it makes sense to run the simplified version of the + # algorithm when no bounds are imposed. We decided to write the two + # separate functions. It violates the DRY principle, but the individual + # functions are kept the most readable. + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return trf_no_bounds( + fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + else: + return trf_bounds( + fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + + +def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta): + """Select the best step according to Trust Region Reflective algorithm.""" + if in_bounds(x + p, lb, ub): + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + return p, p_h, -p_value + + p_stride, hits = step_size_to_bound(x, p, lb, ub) + + # Compute the reflected direction. + r_h = np.copy(p_h) + r_h[hits.astype(bool)] *= -1 + r = d * r_h + + # Restrict trust-region step, such that it hits the bound. + p *= p_stride + p_h *= p_stride + x_on_bound = x + p + + # Reflected direction will cross first either feasible region or trust + # region boundary. + _, to_tr = intersect_trust_region(p_h, r_h, Delta) + to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub) + + # Find lower and upper bounds on a step size along the reflected + # direction, considering the strict feasibility requirement. There is no + # single correct way to do that, the chosen approach seems to work best + # on test problems. + r_stride = min(to_bound, to_tr) + if r_stride > 0: + r_stride_l = (1 - theta) * p_stride / r_stride + if r_stride == to_bound: + r_stride_u = theta * to_bound + else: + r_stride_u = to_tr + else: + r_stride_l = 0 + r_stride_u = -1 + + # Check if reflection step is available. + if r_stride_l <= r_stride_u: + a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h) + r_stride, r_value = minimize_quadratic_1d( + a, b, r_stride_l, r_stride_u, c=c) + r_h *= r_stride + r_h += p_h + r = r_h * d + else: + r_value = np.inf + + # Now correct p_h to make it strictly interior. + p *= theta + p_h *= theta + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + + ag_h = -g_h + ag = d * ag_h + + to_tr = Delta / norm(ag_h) + to_bound, _ = step_size_to_bound(x, ag, lb, ub) + if to_bound < to_tr: + ag_stride = theta * to_bound + else: + ag_stride = to_tr + + a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h) + ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride) + ag_h *= ag_stride + ag *= ag_stride + + if p_value < r_value and p_value < ag_value: + return p, p_h, -p_value + elif r_value < p_value and r_value < ag_value: + return r, r_h, -r_value + else: + return ag, ag_h, -ag_value + + +def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + v, dv = CL_scaling_vector(x, g, lb, ub) + v[dv != 0] *= scale_inv[dv != 0] + Delta = norm(x0 * scale_inv / v**0.5) + if Delta == 0: + Delta = 1.0 + + g_norm = norm(g * v, ord=np.inf) + + f_augmented = np.zeros(m + n) + if tr_solver == 'exact': + J_augmented = np.empty((m + n, n)) + elif tr_solver == 'lsmr': + reg_term = 0.0 + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + v, dv = CL_scaling_vector(x, g, lb, ub) + + g_norm = norm(g * v, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + # Now compute variables in "hat" space. Here, we also account for + # scaling introduced by `x_scale` parameter. This part is a bit tricky, + # you have to write down the formulas and see how the trust-region + # problem is formulated when the two types of scaling are applied. + # The idea is that first we apply `x_scale` and then apply Coleman-Li + # approach in the new variables. + + # v is recomputed in the variables after applying `x_scale`, note that + # components which were identically 1 not affected. + v[dv != 0] *= scale_inv[dv != 0] + + # Here, we apply two types of scaling. + d = v**0.5 * scale + + # C = diag(g * scale) Jv + diag_h = g * dv * scale + + # After all this has been done, we continue normally. + + # "hat" gradient. + g_h = d * g + + f_augmented[:m] = f + if tr_solver == 'exact': + J_augmented[:m] = J * d + J_h = J_augmented[:m] # Memory view. + J_augmented[m:] = np.diag(diag_h**0.5) + U, s, V = svd(J_augmented, full_matrices=False) + V = V.T + uf = U.T.dot(f_augmented) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5) + gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) # LinearOperator does dot too. + B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S) + g_S = S.T.dot(g_h) + + # theta controls step back step ratio from the bounds. + theta = max(0.995, 1 - g_norm) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + p_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + p_h = S.dot(p_S) + + p = d * p_h # Trust-region solution in the original space. + step, step_h, predicted_reduction = select_step( + x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta) + + x_new = make_strictly_feasible(x + step, lb, ub, rstep=0) + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + if termination_status is not None: + break + + alpha *= Delta / Delta_new + Delta = Delta_new + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = find_active_constraints(x, lb, ub, rtol=xtol) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) + + +def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + Delta = norm(x0 * scale_inv) + if Delta == 0: + Delta = 1.0 + + if tr_solver == 'lsmr': + reg_term = 0 + damp = tr_options.pop('damp', 0.0) + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + g_norm = norm(g, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + d = scale + g_h = d * g + + if tr_solver == 'exact': + J_h = J * d + U, s, V = svd(J_h, full_matrices=False) + V = V.T + uf = U.T.dot(f) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + damp_full = (damp**2 + reg_term)**0.5 + gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) + B_S = np.dot(JS.T, JS) + g_S = S.T.dot(g_h) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + step_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + step_h = S.dot(p_S) + + predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h) + step = d * step_h + x_new = x + step + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + if termination_status is not None: + break + + alpha *= Delta / Delta_new + Delta = Delta_new + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = np.zeros_like(x) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..dd752763179bcf97945c7f34ce6a9e49e85c819e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py @@ -0,0 +1,249 @@ +"""The adaptation of Trust Region Reflective algorithm for a linear +least-squares problem.""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import qr, solve_triangular +from scipy.sparse.linalg import lsmr +from scipy.optimize import OptimizeResult + +from .givens_elimination import givens_elimination +from .common import ( + EPS, step_size_to_bound, find_active_constraints, in_bounds, + make_strictly_feasible, build_quadratic_1d, evaluate_quadratic, + minimize_quadratic_1d, CL_scaling_vector, reflective_transformation, + print_header_linear, print_iteration_linear, compute_grad, + regularized_lsq_operator, right_multiplied_operator) + + +def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True): + """Solve regularized least squares using information from QR-decomposition. + + The initial problem is to solve the following system in a least-squares + sense:: + + A x = b + D x = 0 + + where D is diagonal matrix. The method is based on QR decomposition + of the form A P = Q R, where P is a column permutation matrix, Q is an + orthogonal matrix and R is an upper triangular matrix. + + Parameters + ---------- + m, n : int + Initial shape of A. + R : ndarray, shape (n, n) + Upper triangular matrix from QR decomposition of A. + QTb : ndarray, shape (n,) + First n components of Q^T b. + perm : ndarray, shape (n,) + Array defining column permutation of A, such that ith column of + P is perm[i]-th column of identity matrix. + diag : ndarray, shape (n,) + Array containing diagonal elements of D. + + Returns + ------- + x : ndarray, shape (n,) + Found least-squares solution. + """ + if copy_R: + R = R.copy() + v = QTb.copy() + + givens_elimination(R, v, diag[perm]) + + abs_diag_R = np.abs(np.diag(R)) + threshold = EPS * max(m, n) * np.max(abs_diag_R) + nns, = np.nonzero(abs_diag_R > threshold) + + R = R[np.ix_(nns, nns)] + v = v[nns] + + x = np.zeros(n) + x[perm[nns]] = solve_triangular(R, v) + + return x + + +def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): + """Find an appropriate step size using backtracking line search.""" + alpha = 1 + while True: + x_new, _ = reflective_transformation(x + alpha * p, lb, ub) + step = x_new - x + cost_change = -evaluate_quadratic(A, g, step) + if cost_change > -0.1 * alpha * p_dot_g: + break + alpha *= 0.5 + + active = find_active_constraints(x_new, lb, ub) + if np.any(active != 0): + x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub) + x_new = make_strictly_feasible(x_new, lb, ub, rstep=0) + step = x_new - x + cost_change = -evaluate_quadratic(A, g, step) + + return x, step, cost_change + + +def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta): + """Select the best step according to Trust Region Reflective algorithm.""" + if in_bounds(x + p, lb, ub): + return p + + p_stride, hits = step_size_to_bound(x, p, lb, ub) + r_h = np.copy(p_h) + r_h[hits.astype(bool)] *= -1 + r = d * r_h + + # Restrict step, such that it hits the bound. + p *= p_stride + p_h *= p_stride + x_on_bound = x + p + + # Find the step size along reflected direction. + r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub) + + # Stay interior. + r_stride_l = (1 - theta) * r_stride_u + r_stride_u *= theta + + if r_stride_u > 0: + a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h) + r_stride, r_value = minimize_quadratic_1d( + a, b, r_stride_l, r_stride_u, c=c) + r_h = p_h + r_h * r_stride + r = d * r_h + else: + r_value = np.inf + + # Now correct p_h to make it strictly interior. + p_h *= theta + p *= theta + p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h) + + ag_h = -g_h + ag = d * ag_h + ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub) + ag_stride_u *= theta + a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h) + ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u) + ag *= ag_stride + + if p_value < r_value and p_value < ag_value: + return p + elif r_value < p_value and r_value < ag_value: + return r + else: + return ag + + +def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, + max_iter, verbose, *, lsmr_maxiter=None): + m, n = A.shape + x, _ = reflective_transformation(x_lsq, lb, ub) + x = make_strictly_feasible(x, lb, ub, rstep=0.1) + + if lsq_solver == 'exact': + QT, R, perm = qr(A, mode='economic', pivoting=True) + QT = QT.T + + if m < n: + R = np.vstack((R, np.zeros((n - m, n)))) + + QTr = np.zeros(n) + k = min(m, n) + elif lsq_solver == 'lsmr': + r_aug = np.zeros(m + n) + auto_lsmr_tol = False + if lsmr_tol is None: + lsmr_tol = 1e-2 * tol + elif lsmr_tol == 'auto': + auto_lsmr_tol = True + + r = A.dot(x) - b + g = compute_grad(A, r) + cost = 0.5 * np.dot(r, r) + initial_cost = cost + + termination_status = None + step_norm = None + cost_change = None + + if max_iter is None: + max_iter = 100 + + if verbose == 2: + print_header_linear() + + for iteration in range(max_iter): + v, dv = CL_scaling_vector(x, g, lb, ub) + g_scaled = g * v + g_norm = norm(g_scaled, ord=np.inf) + if g_norm < tol: + termination_status = 1 + + if verbose == 2: + print_iteration_linear(iteration, cost, cost_change, + step_norm, g_norm) + + if termination_status is not None: + break + + diag_h = g * dv + diag_root_h = diag_h ** 0.5 + d = v ** 0.5 + g_h = d * g + + A_h = right_multiplied_operator(A, d) + if lsq_solver == 'exact': + QTr[:k] = QT.dot(r) + p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm, + diag_root_h, copy_R=False) + elif lsq_solver == 'lsmr': + lsmr_op = regularized_lsq_operator(A_h, diag_root_h) + r_aug[:m] = r + if auto_lsmr_tol: + eta = 1e-2 * min(0.5, g_norm) + lsmr_tol = max(EPS, min(0.1, eta * g_norm)) + p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter, + atol=lsmr_tol, btol=lsmr_tol)[0] + + p = d * p_h + + p_dot_g = np.dot(p, g) + if p_dot_g > 0: + termination_status = -1 + + theta = 1 - min(0.005, g_norm) + step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta) + cost_change = -evaluate_quadratic(A, g, step) + + # Perhaps almost never executed, the idea is that `p` is descent + # direction thus we must find acceptable cost decrease using simple + # "backtracking", otherwise the algorithm's logic would break. + if cost_change < 0: + x, step, cost_change = backtracking( + A, g, x, p, theta, p_dot_g, lb, ub) + else: + x = make_strictly_feasible(x + step, lb, ub, rstep=0) + + step_norm = norm(step) + r = A.dot(x) - b + g = compute_grad(A, r) + + if cost_change < tol * cost: + termination_status = 2 + + cost = 0.5 * np.dot(r, r) + + if termination_status is None: + termination_status = 0 + + active_mask = find_active_constraints(x, lb, ub, rtol=tol) + + return OptimizeResult( + x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask, + nit=iteration + 1, status=termination_status, + initial_cost=initial_cost) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_milp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..b97a00d15406700cfedbe50e1b3714d36a60f8fb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_milp.py @@ -0,0 +1,392 @@ +import warnings +import numpy as np +from scipy.sparse import csc_array, vstack, issparse +from scipy._lib._util import VisibleDeprecationWarning +from ._highspy._highs_wrapper import _highs_wrapper # type: ignore[import-not-found,import-untyped] +from ._constraints import LinearConstraint, Bounds +from ._optimize import OptimizeResult +from ._linprog_highs import _highs_to_scipy_status_message + + +def _constraints_to_components(constraints): + """ + Convert sequence of constraints to a single set of components A, b_l, b_u. + + `constraints` could be + + 1. A LinearConstraint + 2. A tuple representing a LinearConstraint + 3. An invalid object + 4. A sequence of composed entirely of objects of type 1/2 + 5. A sequence containing at least one object of type 3 + + We want to accept 1, 2, and 4 and reject 3 and 5. + """ + message = ("`constraints` (or each element within `constraints`) must be " + "convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + As = [] + b_ls = [] + b_us = [] + + # Accept case 1 by standardizing as case 4 + if isinstance(constraints, LinearConstraint): + constraints = [constraints] + else: + # Reject case 3 + try: + iter(constraints) + except TypeError as exc: + raise ValueError(message) from exc + + # Accept case 2 by standardizing as case 4 + if len(constraints) == 3: + # argument could be a single tuple representing a LinearConstraint + try: + constraints = [LinearConstraint(*constraints)] + except (TypeError, ValueError, VisibleDeprecationWarning): + # argument was not a tuple representing a LinearConstraint + pass + + # Address cases 4/5 + for constraint in constraints: + # if it's not a LinearConstraint or something that represents a + # LinearConstraint at this point, it's invalid + if not isinstance(constraint, LinearConstraint): + try: + constraint = LinearConstraint(*constraint) + except TypeError as exc: + raise ValueError(message) from exc + As.append(csc_array(constraint.A)) + b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64)) + b_us.append(np.atleast_1d(constraint.ub).astype(np.float64)) + + if len(As) > 1: + A = vstack(As, format="csc") + b_l = np.concatenate(b_ls) + b_u = np.concatenate(b_us) + else: # avoid unnecessary copying + A = As[0] + b_l = b_ls[0] + b_u = b_us[0] + + return A, b_l, b_u + + +def _milp_iv(c, integrality, bounds, constraints, options): + # objective IV + if issparse(c): + raise ValueError("`c` must be a dense array.") + c = np.atleast_1d(c).astype(np.float64) + if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)): + message = ("`c` must be a one-dimensional array of finite numbers " + "with at least one element.") + raise ValueError(message) + + # integrality IV + if issparse(integrality): + raise ValueError("`integrality` must be a dense array.") + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + if integrality is None: + integrality = 0 + try: + integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8) + except ValueError: + raise ValueError(message) + if integrality.min() < 0 or integrality.max() > 3: + raise ValueError(message) + + # bounds IV + if bounds is None: + bounds = Bounds(0, np.inf) + elif not isinstance(bounds, Bounds): + message = ("`bounds` must be convertible into an instance of " + "`scipy.optimize.Bounds`.") + try: + bounds = Bounds(*bounds) + except TypeError as exc: + raise ValueError(message) from exc + + try: + lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64) + ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64) + except (ValueError, TypeError) as exc: + message = ("`bounds.lb` and `bounds.ub` must contain reals and " + "be broadcastable to `c.shape`.") + raise ValueError(message) from exc + + # constraints IV + if not constraints: + constraints = [LinearConstraint(np.empty((0, c.size)), + np.empty((0,)), np.empty((0,)))] + try: + A, b_l, b_u = _constraints_to_components(constraints) + except ValueError as exc: + message = ("`constraints` (or each element within `constraints`) must " + "be convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + raise ValueError(message) from exc + + if A.shape != (b_l.size, c.size): + message = "The shape of `A` must be (len(b_l), len(c))." + raise ValueError(message) + indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64) + + # options IV + options = options or {} + supported_options = {'disp', 'presolve', 'time_limit', 'node_limit', + 'mip_rel_gap'} + unsupported_options = set(options).difference(supported_options) + if unsupported_options: + message = (f"Unrecognized options detected: {unsupported_options}. " + "These will be passed to HiGHS verbatim.") + warnings.warn(message, RuntimeWarning, stacklevel=3) + options_iv = {'log_to_console': options.pop("disp", False), + 'mip_max_nodes': options.pop("node_limit", None)} + options_iv.update(options) + + return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv + + +def milp(c, *, integrality=None, bounds=None, constraints=None, options=None): + r""" + Mixed-integer linear programming + + Solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & b_l \leq A x \leq b_u,\\ + & l \leq x \leq u, \\ + & x_i \in \mathbb{Z}, i \in X_i + + where :math:`x` is a vector of decision variables; + :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors; + :math:`A` is a matrix, and :math:`X_i` is the set of indices of + decision variables that must be integral. (In this context, a + variable that can assume only integer values is said to be "integral"; + it has an "integrality" constraint.) + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + b_l <= A @ x <= b_u + l <= x <= u + Specified elements of x must be integers + + By default, ``l = 0`` and ``u = np.inf`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1D dense array_like + The coefficients of the linear objective function to be minimized. + `c` is converted to a double precision array before the problem is + solved. + integrality : 1D dense array_like, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. `integrality` is converted + to an array of integers before the problem is solved. + + bounds : scipy.optimize.Bounds, optional + Bounds on the decision variables. Lower and upper bounds are converted + to double precision arrays before the problem is solved. The + ``keep_feasible`` parameter of the `Bounds` object is ignored. If + not specified, all decision variables are constrained to be + non-negative. + constraints : sequence of scipy.optimize.LinearConstraint, optional + Linear constraints of the optimization problem. Arguments may be + one of the following: + + 1. A single `LinearConstraint` object + 2. A single tuple that can be converted to a `LinearConstraint` object + as ``LinearConstraint(*constraints)`` + 3. A sequence composed entirely of objects of type 1. and 2. + + Before the problem is solved, all values are converted to double + precision, and the matrices of constraint coefficients are converted to + instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter + of `LinearConstraint` objects is ignored. + options : dict, optional + A dictionary of solver options. The following keys are recognized. + + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + node_limit : int, optional + The maximum number of nodes (linear program relaxations) to solve + before stopping. Default is no maximum number of nodes. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. + time_limit : float, optional + The maximum number of seconds allotted to solve the problem. + Default is no time limit. + mip_rel_gap : float, optional + Termination criterion for MIP solver: solver will terminate when + the gap between the primal objective value and the dual objective + bound, scaled by the primal objective value, is <= mip_rel_gap. + + Returns + ------- + res : OptimizeResult + An instance of :class:`scipy.optimize.OptimizeResult`. The object + is guaranteed to have the following attributes. + + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimal solution found. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem is infeasible. + + ``3`` : Problem is unbounded. + + ``4`` : Other; see message for details. + + success : bool + ``True`` when an optimal solution is found and ``False`` otherwise. + + message : str + A string descriptor of the exit status of the algorithm. + + The following attributes will also be present, but the values may be + ``None``, depending on the solution status. + + x : ndarray + The values of the decision variables that minimize the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + mip_node_count : int + The number of subproblems or "nodes" solved by the MILP solver. + mip_dual_bound : float + The MILP solver's final estimate of the lower bound on the optimal + solution. + mip_gap : float + The difference between the primal objective value and the dual + objective bound, scaled by the primal objective value. + + Notes + ----- + `milp` is a wrapper of the HiGHS linear optimization software [1]_. The + algorithm is deterministic, and it typically finds the global optimum of + moderately challenging mixed-integer linear programs (when it exists). + + References + ---------- + .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the problem at + https://en.wikipedia.org/wiki/Integer_programming#Example, which is + expressed as a maximization problem of two variables. Since `milp` requires + that the problem be expressed as a minimization problem, the objective + function coefficients on the decision variables are: + + >>> import numpy as np + >>> c = -np.array([0, 1]) + + Note the negative sign: we maximize the original objective function + by minimizing the negative of the objective function. + + We collect the coefficients of the constraints into arrays like: + + >>> A = np.array([[-1, 1], [3, 2], [2, 3]]) + >>> b_u = np.array([1, 12, 12]) + >>> b_l = np.full_like(b_u, -np.inf, dtype=float) + + Because there is no lower limit on these constraints, we have defined a + variable ``b_l`` full of values representing negative infinity. This may + be unfamiliar to users of `scipy.optimize.linprog`, which only accepts + "less than" (or "upper bound") inequality constraints of the form + ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints + ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than" + inequality constraints, "less than" inequality constraints, and equality + constraints concisely. + + These arrays are collected into a single `LinearConstraint` object like: + + >>> from scipy.optimize import LinearConstraint + >>> constraints = LinearConstraint(A, b_l, b_u) + + The non-negativity bounds on the decision variables are enforced by + default, so we do not need to provide an argument for `bounds`. + + Finally, the problem states that both decision variables must be integers: + + >>> integrality = np.ones_like(c) + + We solve the problem like: + + >>> from scipy.optimize import milp + >>> res = milp(c=c, constraints=constraints, integrality=integrality) + >>> res.x + [2.0, 2.0] + + Note that had we solved the relaxed problem (without integrality + constraints): + + >>> res = milp(c=c, constraints=constraints) # OR: + >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u) + >>> res.x + [1.8, 2.8] + + we would not have obtained the correct solution by rounding to the nearest + integers. + + Other examples are given :ref:`in the tutorial `. + + """ + args_iv = _milp_iv(c, integrality, bounds, constraints, options) + c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv + + highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u, + lb, ub, integrality, options) + + res = {} + + # Convert to scipy-style status and message + highs_status = highs_res.get('status', None) + highs_message = highs_res.get('message', None) + status, message = _highs_to_scipy_status_message(highs_status, + highs_message) + res['status'] = status + res['message'] = message + res['success'] = (status == 0) + x = highs_res.get('x', None) + res['x'] = np.array(x) if x is not None else None + res['fun'] = highs_res.get('fun', None) + res['mip_node_count'] = highs_res.get('mip_node_count', None) + res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None) + res['mip_gap'] = highs_res.get('mip_gap', None) + + return OptimizeResult(res) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minimize.py new file mode 100644 index 0000000000000000000000000000000000000000..0b47c57cb3a12c6f4a7adb5506089c569336b4c4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minimize.py @@ -0,0 +1,1131 @@ +""" +Unified interfaces to minimization algorithms. + +Functions +--------- +- minimize : minimization of a function of several variables. +- minimize_scalar : minimization of a function of one variable. +""" + +__all__ = ['minimize', 'minimize_scalar'] + + +from warnings import warn + +import numpy as np + +# unconstrained minimization +from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, + _minimize_bfgs, _minimize_newtoncg, + _minimize_scalar_brent, _minimize_scalar_bounded, + _minimize_scalar_golden, MemoizeJac, OptimizeResult, + _wrap_callback, _recover_from_bracket_error) +from ._trustregion_dogleg import _minimize_dogleg +from ._trustregion_ncg import _minimize_trust_ncg +from ._trustregion_krylov import _minimize_trust_krylov +from ._trustregion_exact import _minimize_trustregion_exact +from ._trustregion_constr import _minimize_trustregion_constr + +# constrained minimization +from ._lbfgsb_py import _minimize_lbfgsb +from ._tnc import _minimize_tnc +from ._cobyla_py import _minimize_cobyla +from ._cobyqa_py import _minimize_cobyqa +from ._slsqp_py import _minimize_slsqp +from ._constraints import (old_bound_to_new, new_bounds_to_old, + old_constraint_to_new, new_constraint_to_old, + NonlinearConstraint, LinearConstraint, Bounds, + PreparedConstraint) +from ._differentiable_functions import FD_METHODS + +MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', + 'l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp', + 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov'] + +# These methods support the new callback interface (passed an OptimizeResult) +MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', + 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov', 'cobyqa'] + +MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden'] + +def minimize(fun, x0, args=(), method=None, jac=None, hess=None, + hessp=None, bounds=None, constraints=(), tol=None, + callback=None, options=None): + """Minimization of scalar function of one or more variables. + + Parameters + ---------- + fun : callable + The objective function to be minimized:: + + fun(x, *args) -> float + + where ``x`` is a 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + + Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where + ``my_args`` and ``my_kwargs`` are required positional and keyword arguments. + Rather than passing ``f0`` as the callable, wrap it to accept + only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the + callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been + gathered before invoking this function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where ``n`` is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + Type of solver. Should be one of + + - 'Nelder-Mead' :ref:`(see here) ` + - 'Powell' :ref:`(see here) ` + - 'CG' :ref:`(see here) ` + - 'BFGS' :ref:`(see here) ` + - 'Newton-CG' :ref:`(see here) ` + - 'L-BFGS-B' :ref:`(see here) ` + - 'TNC' :ref:`(see here) ` + - 'COBYLA' :ref:`(see here) ` + - 'COBYQA' :ref:`(see here) ` + - 'SLSQP' :ref:`(see here) ` + - 'trust-constr':ref:`(see here) ` + - 'dogleg' :ref:`(see here) ` + - 'trust-ncg' :ref:`(see here) ` + - 'trust-exact' :ref:`(see here) ` + - 'trust-krylov' :ref:`(see here) ` + - custom - a callable object, see below for description. + + If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, + depending on whether or not the problem has constraints or bounds. + jac : {callable, '2-point', '3-point', 'cs', bool}, optional + Method for computing the gradient vector. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, + trust-exact and trust-constr. + If it is a callable, it should be a function that returns the gradient + vector:: + + jac(x, *args) -> array_like, shape (n,) + + where ``x`` is an array with shape (n,) and ``args`` is a tuple with + the fixed parameters. If `jac` is a Boolean and is True, `fun` is + assumed to return a tuple ``(f, g)`` containing the objective + function and the gradient. + Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and + 'trust-krylov' require that either a callable be supplied, or that + `fun` return the objective and gradient. + If None or False, the gradient will be estimated using 2-point finite + difference estimation with an absolute step size. + Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used + to select a finite difference scheme for numerical estimation of the + gradient with a relative step size. These finite difference schemes + obey any specified `bounds`. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional + Method for computing the Hessian matrix. Only for Newton-CG, dogleg, + trust-ncg, trust-krylov, trust-exact and trust-constr. + If it is callable, it should return the Hessian matrix:: + + hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n) + + where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed + parameters. + The keywords {'2-point', '3-point', 'cs'} can also be used to select + a finite difference scheme for numerical estimation of the hessian. + Alternatively, objects implementing the `HessianUpdateStrategy` + interface can be used to approximate the Hessian. Available + quasi-Newton methods implementing this interface are: + + - `BFGS` + - `SR1` + + Not all of the options are available for each of the methods; for + availability refer to the notes. + hessp : callable, optional + Hessian of objective function times an arbitrary vector p. Only for + Newton-CG, trust-ncg, trust-krylov, trust-constr. + Only one of `hessp` or `hess` needs to be given. If `hess` is + provided, then `hessp` will be ignored. `hessp` must compute the + Hessian times an arbitrary vector:: + + hessp(x, p, *args) -> ndarray shape (n,) + + where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with + dimension (n,) and ``args`` is a tuple with the fixed + parameters. + bounds : sequence or `Bounds`, optional + Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, + trust-constr, COBYLA, and COBYQA methods. There are two ways to specify + the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition. Only for COBYLA, COBYQA, SLSQP and trust-constr. + + Constraints for 'trust-constr' and 'cobyqa' are defined as a single object + or a list of objects specifying constraints to the optimization problem. + Available constraints are: + + - `LinearConstraint` + - `NonlinearConstraint` + + Constraints for COBYLA, SLSQP are defined as a list of dictionaries. + Each dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + Note that COBYLA only supports inequality constraints. + + tol : float, optional + Tolerance for termination. When `tol` is specified, the selected + minimization algorithm sets some relevant solver-specific tolerance(s) + equal to `tol`. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. All methods except `TNC` accept the + following generic options: + + maxiter : int + Maximum number of iterations to perform. Depending on the + method each iteration may use several function evaluations. + + For `TNC` use `maxfun` instead of `maxiter`. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options()`. + callback : callable, optional + A callable called after each iteration. + + All methods except TNC, SLSQP, and COBYLA support a callable with + the signature:: + + callback(intermediate_result: OptimizeResult) + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the present values + of the parameter vector and objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. These methods will also terminate if + the callback raises ``StopIteration``. + + All methods except trust-constr (also) support a signature like:: + + callback(xk) + + where ``xk`` is the current parameter vector. + + Introspection is used to determine which of the signatures above to + invoke. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + minimize_scalar : Interface to minimization algorithms for scalar + univariate functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *BFGS*. + + **Unconstrained minimization** + + Method :ref:`CG ` uses a nonlinear conjugate + gradient algorithm by Polak and Ribiere, a variant of the + Fletcher-Reeves method described in [5]_ pp.120-122. Only the + first derivatives are used. + + Method :ref:`BFGS ` uses the quasi-Newton + method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ + pp. 136. It uses the first derivatives only. BFGS has proven good + performance even for non-smooth optimizations. This method also + returns an approximation of the Hessian inverse, stored as + `hess_inv` in the OptimizeResult object. + + Method :ref:`Newton-CG ` uses a + Newton-CG algorithm [5]_ pp. 168 (also known as the truncated + Newton method). It uses a CG method to the compute the search + direction. See also *TNC* method for a box-constrained + minimization with a similar algorithm. Suitable for large-scale + problems. + + Method :ref:`dogleg ` uses the dog-leg + trust-region algorithm [5]_ for unconstrained minimization. This + algorithm requires the gradient and Hessian; furthermore the + Hessian is required to be positive definite. + + Method :ref:`trust-ncg ` uses the + Newton conjugate gradient trust-region algorithm [5]_ for + unconstrained minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + + Method :ref:`trust-krylov ` uses + the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained + minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + On indefinite problems it requires usually less iterations than the + `trust-ncg` method and is recommended for medium and large-scale problems. + + Method :ref:`trust-exact ` + is a trust-region method for unconstrained minimization in which + quadratic subproblems are solved almost exactly [13]_. This + algorithm requires the gradient and the Hessian (which is + *not* required to be positive definite). It is, in many + situations, the Newton method to converge in fewer iterations + and the most recommended for small and medium-size problems. + + **Bound-Constrained minimization** + + Method :ref:`Nelder-Mead ` uses the + Simplex algorithm [1]_, [2]_. This algorithm is robust in many + applications. However, if numerical computation of derivative can be + trusted, other algorithms using the first and/or second derivatives + information might be preferred for their better performance in + general. + + Method :ref:`L-BFGS-B ` uses the L-BFGS-B + algorithm [6]_, [7]_ for bound constrained minimization. + + Method :ref:`Powell ` is a modification + of Powell's method [3]_, [4]_ which is a conjugate direction + method. It performs sequential one-dimensional minimizations along + each vector of the directions set (`direc` field in `options` and + `info`), which is updated at each iteration of the main + minimization loop. The function need not be differentiable, and no + derivatives are taken. If bounds are not provided, then an + unbounded line search will be used. If bounds are provided and + the initial guess is within the bounds, then every function + evaluation throughout the minimization procedure will be within + the bounds. If bounds are provided, the initial guess is outside + the bounds, and `direc` is full rank (default has full rank), then + some function evaluations during the first iteration may be + outside the bounds, but every function evaluation after the first + iteration will be within the bounds. If `direc` is not full rank, + then some parameters may not be optimized and the solution is not + guaranteed to be within the bounds. + + Method :ref:`TNC ` uses a truncated Newton + algorithm [5]_, [8]_ to minimize a function with variables subject + to bounds. This algorithm uses gradient information; it is also + called Newton Conjugate-Gradient. It differs from the *Newton-CG* + method described above as it wraps a C implementation and allows + each variable to be given upper and lower bounds. + + **Constrained Minimization** + + Method :ref:`COBYLA ` uses the + Constrained Optimization BY Linear Approximation (COBYLA) method + [9]_, [10]_, [11]_. The algorithm is based on linear + approximations to the objective function and each constraint. The + method wraps a FORTRAN implementation of the algorithm. The + constraints functions 'fun' may return either a single number + or an array or list of numbers. + + Method :ref:`COBYQA ` uses the Constrained + Optimization BY Quadratic Approximations (COBYQA) method [18]_. The + algorithm is a derivative-free trust-region SQP method based on quadratic + approximations to the objective function and each nonlinear constraint. The + bounds are treated as unrelaxable constraints, in the sense that the + algorithm always respects them throughout the optimization process. + + Method :ref:`SLSQP ` uses Sequential + Least SQuares Programming to minimize a function of several + variables with any combination of bounds, equality and inequality + constraints. The method wraps the SLSQP Optimization subroutine + originally implemented by Dieter Kraft [12]_. Note that the + wrapper handles infinite values in bounds by converting them into + large floating values. + + Method :ref:`trust-constr ` is a + trust-region algorithm for constrained optimization. It switches + between two implementations depending on the problem definition. + It is the most versatile constrained minimization algorithm + implemented in SciPy and the most appropriate for large-scale problems. + For equality constrained problems it is an implementation of Byrd-Omojokun + Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When + inequality constraints are imposed as well, it switches to the trust-region + interior point method described in [16]_. This interior point algorithm, + in turn, solves inequality constraints by introducing slack variables + and solving a sequence of equality-constrained barrier problems + for progressively smaller values of the barrier parameter. + The previously described equality constrained SQP method is + used to solve the subproblems with increasing levels of accuracy + as the iterate gets closer to a solution. + + **Finite-Difference Options** + + For Method :ref:`trust-constr ` + the gradient and the Hessian may be approximated using + three finite-difference schemes: {'2-point', '3-point', 'cs'}. + The scheme 'cs' is, potentially, the most accurate but it + requires the function to correctly handle complex inputs and to + be differentiable in the complex plane. The scheme '3-point' is more + accurate than '2-point' but requires twice as many operations. If the + gradient is estimated via finite-differences the Hessian must be + estimated using one of the quasi-Newton strategies. + + **Method specific options for the** `hess` **keyword** + + +--------------+------+----------+-------------------------+-----+ + | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS | + +==============+======+==========+=========================+=====+ + | Newton-CG | x | (n, n) | x | x | + | | | LO | | | + +--------------+------+----------+-------------------------+-----+ + | dogleg | | (n, n) | | | + +--------------+------+----------+-------------------------+-----+ + | trust-ncg | | (n, n) | x | x | + +--------------+------+----------+-------------------------+-----+ + | trust-krylov | | (n, n) | x | x | + +--------------+------+----------+-------------------------+-----+ + | trust-exact | | (n, n) | | | + +--------------+------+----------+-------------------------+-----+ + | trust-constr | x | (n, n) | x | x | + | | | LO | | | + | | | sp | | | + +--------------+------+----------+-------------------------+-----+ + + where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using a frontend to this method such as `scipy.optimize.basinhopping` + or a different library. You can simply pass a callable as the ``method`` + parameter. + + The callable is called as ``method(fun, x0, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `callback`, `hess`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. Also, if + `jac` has been passed as a bool type, `jac` and `fun` are mangled so that + `fun` returns just the function values and `jac` is converted to a function + returning the Jacobian. The method shall return an `OptimizeResult` + object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + References + ---------- + .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function + Minimization. The Computer Journal 7: 308-13. + .. [2] Wright M H. 1996. Direct search methods: Once scorned, now + respectable, in Numerical Analysis 1995: Proceedings of the 1995 + Dundee Biennial Conference in Numerical Analysis (Eds. D F + Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. + 191-208. + .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of + a function of several variables without calculating derivatives. The + Computer Journal 7: 155-162. + .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. + Numerical Recipes (any edition), Cambridge University Press. + .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. + Springer New York. + .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory + Algorithm for Bound Constrained Optimization. SIAM Journal on + Scientific and Statistical Computing 16 (5): 1190-1208. + .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm + 778: L-BFGS-B, FORTRAN routines for large scale bound constrained + optimization. ACM Transactions on Mathematical Software 23 (4): + 550-560. + .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. + 1984. SIAM Journal of Numerical Analysis 21: 770-778. + .. [9] Powell, M J D. A direct search optimization method that models + the objective and constraint functions by linear interpolation. + 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez + and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. + .. [10] Powell M J D. Direct search algorithms for optimization + calculations. 1998. Acta Numerica 7: 287-336. + .. [11] Powell M J D. A view of algorithms for optimization without + derivatives. 2007.Cambridge University Technical Report DAMTP + 2007/NA03 + .. [12] Kraft, D. A software package for sequential quadratic + programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace + Center -- Institute for Flight Mechanics, Koln, Germany. + .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. + Trust region methods. 2000. Siam. pp. 169-200. + .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free + implementation of the GLTR method for iterative solution of + the trust region problem", :arxiv:`1611.04718` + .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the + Trust-Region Subproblem using the Lanczos Method", + SIAM J. Optim., 9(2), 504--525, (1999). + .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999. + An interior point algorithm for large-scale nonlinear programming. + SIAM Journal on Optimization 9.4: 877-900. + .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. 1998. On the + implementation of an algorithm for large-scale equality constrained + optimization. SIAM Journal on Optimization 8.3: 682-706. + .. [18] Ragonneau, T. M. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function (and its respective derivatives) is implemented in `rosen` + (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. + + >>> from scipy.optimize import minimize, rosen, rosen_der + + A simple application of the *Nelder-Mead* method is: + + >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) + >>> res.x + array([ 1., 1., 1., 1., 1.]) + + Now using the *BFGS* algorithm, using the first derivative and a few + options: + + >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, + ... options={'gtol': 1e-6, 'disp': True}) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 26 + Function evaluations: 31 + Gradient evaluations: 31 + >>> res.x + array([ 1., 1., 1., 1., 1.]) + >>> print(res.message) + Optimization terminated successfully. + >>> res.hess_inv + array([ + [ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary + [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], + [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], + [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], + [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523] + ]) + + + Next, consider a minimization problem with several constraints (namely + Example 16.4 from [5]_). The objective function is: + + >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + + There are three constraints defined as: + + >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + And variables must be positive, hence the following bounds: + + >>> bnds = ((0, None), (0, None)) + + The optimization problem is solved using the SLSQP method as: + + >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, + ... constraints=cons) + + It should converge to the theoretical solution (1.4 ,1.7). + + """ + x0 = np.atleast_1d(np.asarray(x0)) + + if x0.ndim != 1: + raise ValueError("'x0' must only have one dimension.") + + if x0.dtype.kind in np.typecodes["AllInteger"]: + x0 = np.asarray(x0, dtype=float) + + if not isinstance(args, tuple): + args = (args,) + + if method is None: + # Select automatically + if constraints: + method = 'SLSQP' + elif bounds is not None: + method = 'L-BFGS-B' + else: + method = 'BFGS' + + if callable(method): + meth = "_custom" + else: + meth = method.lower() + + if options is None: + options = {} + # check if optional parameters are supported by the selected method + # - jac + if meth in ('nelder-mead', 'powell', 'cobyla', 'cobyqa') and bool(jac): + warn(f'Method {method} does not use gradient information (jac).', + RuntimeWarning, stacklevel=2) + # - hess + if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', + 'trust-krylov', 'trust-exact', '_custom') and hess is not None: + warn(f'Method {method} does not use Hessian information (hess).', + RuntimeWarning, stacklevel=2) + # - hessp + if meth not in ('newton-cg', 'trust-ncg', 'trust-constr', + 'trust-krylov', '_custom') \ + and hessp is not None: + warn(f'Method {method} does not use Hessian-vector product' + ' information (hessp).', + RuntimeWarning, stacklevel=2) + # - constraints or bounds + if (meth not in ('cobyla', 'cobyqa', 'slsqp', 'trust-constr', '_custom') and + np.any(constraints)): + warn(f'Method {method} cannot handle constraints.', + RuntimeWarning, stacklevel=2) + if meth not in ( + 'nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'cobyqa', 'slsqp', + 'tnc', 'trust-constr', '_custom') and bounds is not None: + warn(f'Method {method} cannot handle bounds.', + RuntimeWarning, stacklevel=2) + # - return_all + if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp') and + options.get('return_all', False)): + warn(f'Method {method} does not support the return_all option.', + RuntimeWarning, stacklevel=2) + + # check gradient vector + if callable(jac): + pass + elif jac is True: + # fun returns func and grad + fun = MemoizeJac(fun) + jac = fun.derivative + elif (jac in FD_METHODS and + meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']): + # finite differences with relative step + pass + elif meth in ['trust-constr']: + # default jac calculation for this method + jac = '2-point' + elif jac is None or bool(jac) is False: + # this will cause e.g. LBFGS to use forward difference, absolute step + jac = None + else: + # default if jac option is not understood + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth == 'nelder-mead': + options.setdefault('xatol', tol) + options.setdefault('fatol', tol) + if meth in ('newton-cg', 'powell', 'tnc'): + options.setdefault('xtol', tol) + if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'): + options.setdefault('ftol', tol) + if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', + 'trust-ncg', 'trust-exact', 'trust-krylov'): + options.setdefault('gtol', tol) + if meth in ('cobyla', '_custom'): + options.setdefault('tol', tol) + if meth == 'cobyqa': + options.setdefault('final_tr_radius', tol) + if meth == 'trust-constr': + options.setdefault('xtol', tol) + options.setdefault('gtol', tol) + options.setdefault('barrier_tol', tol) + + if meth == '_custom': + # custom method called before bounds and constraints are 'standardised' + # custom method should be able to accept whatever bounds/constraints + # are provided to it. + return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, + bounds=bounds, constraints=constraints, + callback=callback, **options) + + constraints = standardize_constraints(constraints, x0, meth) + + remove_vars = False + if bounds is not None: + # convert to new-style bounds so we only have to consider one case + bounds = standardize_bounds(bounds, x0, 'new') + bounds = _validate_bounds(bounds, x0, meth) + + if meth in {"tnc", "slsqp", "l-bfgs-b"}: + # These methods can't take the finite-difference derivatives they + # need when a variable is fixed by the bounds. To avoid this issue, + # remove fixed variables from the problem. + # NOTE: if this list is expanded, then be sure to update the + # accompanying tests and test_optimize.eb_data. Consider also if + # default OptimizeResult will need updating. + + # determine whether any variables are fixed + i_fixed = (bounds.lb == bounds.ub) + + if np.all(i_fixed): + # all the parameters are fixed, a minimizer is not able to do + # anything + return _optimize_result_for_equal_bounds( + fun, bounds, meth, args=args, constraints=constraints + ) + + # determine whether finite differences are needed for any grad/jac + fd_needed = (not callable(jac)) + for con in constraints: + if not callable(con.get('jac', None)): + fd_needed = True + + # If finite differences are ever used, remove all fixed variables + # Always remove fixed variables for TNC; see gh-14565 + remove_vars = i_fixed.any() and (fd_needed or meth == "tnc") + if remove_vars: + x_fixed = (bounds.lb)[i_fixed] + x0 = x0[~i_fixed] + bounds = _remove_from_bounds(bounds, i_fixed) + fun = _remove_from_func(fun, i_fixed, x_fixed) + if callable(callback): + callback = _remove_from_func(callback, i_fixed, x_fixed) + if callable(jac): + jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1) + + # make a copy of the constraints so the user's version doesn't + # get changed. (Shallow copy is ok) + constraints = [con.copy() for con in constraints] + for con in constraints: # yes, guaranteed to be a list + con['fun'] = _remove_from_func(con['fun'], i_fixed, + x_fixed, min_dim=1, + remove=0) + if callable(con.get('jac', None)): + con['jac'] = _remove_from_func(con['jac'], i_fixed, + x_fixed, min_dim=2, + remove=1) + bounds = standardize_bounds(bounds, x0, meth) + + callback = _wrap_callback(callback, meth) + + if meth == 'nelder-mead': + res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds, + **options) + elif meth == 'powell': + res = _minimize_powell(fun, x0, args, callback, bounds, **options) + elif meth == 'cg': + res = _minimize_cg(fun, x0, args, jac, callback, **options) + elif meth == 'bfgs': + res = _minimize_bfgs(fun, x0, args, jac, callback, **options) + elif meth == 'newton-cg': + res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, + **options) + elif meth == 'l-bfgs-b': + res = _minimize_lbfgsb(fun, x0, args, jac, bounds, + callback=callback, **options) + elif meth == 'tnc': + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, + **options) + elif meth == 'cobyla': + res = _minimize_cobyla(fun, x0, args, constraints, callback=callback, + bounds=bounds, **options) + elif meth == 'cobyqa': + res = _minimize_cobyqa(fun, x0, args, bounds, constraints, callback, + **options) + elif meth == 'slsqp': + res = _minimize_slsqp(fun, x0, args, jac, bounds, + constraints, callback=callback, **options) + elif meth == 'trust-constr': + res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp, + bounds, constraints, + callback=callback, **options) + elif meth == 'dogleg': + res = _minimize_dogleg(fun, x0, args, jac, hess, + callback=callback, **options) + elif meth == 'trust-ncg': + res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-krylov': + res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-exact': + res = _minimize_trustregion_exact(fun, x0, args, jac, hess, + callback=callback, **options) + else: + raise ValueError(f'Unknown solver {method}') + + if remove_vars: + res.x = _add_to_array(res.x, i_fixed, x_fixed) + res.jac = _add_to_array(res.jac, i_fixed, np.nan) + if "hess_inv" in res: + res.hess_inv = None # unknown + + if getattr(callback, 'stop_iteration', False): + res.success = False + res.status = 99 + res.message = "`callback` raised `StopIteration`." + + return res + + +def minimize_scalar(fun, bracket=None, bounds=None, args=(), + method=None, tol=None, options=None): + """Local minimization of scalar function of one variable. + + Parameters + ---------- + fun : callable + Objective function. + Scalar function, must return a scalar. + + Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where + ``my_args`` and ``my_kwargs`` are required positional and keyword arguments. + Rather than passing ``f0`` as the callable, wrap it to accept + only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the + callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been + gathered before invoking this function. + + bracket : sequence, optional + For methods 'brent' and 'golden', `bracket` defines the bracketing + interval and is required. + Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair + ``(xa, xb)`` to be used as initial points for a downhill bracket search + (see `scipy.optimize.bracket`). + The minimizer ``res.x`` will not necessarily satisfy + ``xa <= res.x <= xb``. + bounds : sequence, optional + For method 'bounded', `bounds` is mandatory and must have two finite + items corresponding to the optimization bounds. + args : tuple, optional + Extra arguments passed to the objective function. + method : str or callable, optional + Type of solver. Should be one of: + + - :ref:`Brent ` + - :ref:`Bounded ` + - :ref:`Golden ` + - custom - a callable object (added in version 0.14.0), see below + + Default is "Bounded" if bounds are provided and "Brent" otherwise. + See the 'Notes' section for details of each solver. + + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + See :func:`show_options()` for solver-specific options. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + minimize : Interface to minimization algorithms for scalar multivariate + functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is the ``"Bounded"`` Brent method if + `bounds` are passed and unbounded ``"Brent"`` otherwise. + + Method :ref:`Brent ` uses Brent's + algorithm [1]_ to find a local minimum. The algorithm uses inverse + parabolic interpolation when possible to speed up convergence of + the golden section method. + + Method :ref:`Golden ` uses the + golden section search technique [1]_. It uses analog of the bisection + method to decrease the bracketed interval. It is usually + preferable to use the *Brent* method. + + Method :ref:`Bounded ` can + perform bounded minimization [2]_ [3]_. It uses the Brent method to find a + local minimum in the interval x1 < xopt < x2. + + Note that the Brent and Golden methods do not guarantee success unless a + valid ``bracket`` triple is provided. If a three-point bracket cannot be + found, consider `scipy.optimize.minimize`. Also, all methods are intended + only for local minimization. When the function of interest has more than + one local minimum, consider :ref:`global_optimization`. + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using some library frontend to minimize_scalar. You can simply + pass a callable as the ``method`` parameter. + + The callable is called as ``method(fun, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `bracket`, `tol`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. The method + shall return an `OptimizeResult` object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery. + Numerical Recipes in C. Cambridge University Press. + .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + Examples + -------- + Consider the problem of minimizing the following function. + + >>> def f(x): + ... return (x - 2) * x * (x + 2)**2 + + Using the *Brent* method, we find the local minimum as: + + >>> from scipy.optimize import minimize_scalar + >>> res = minimize_scalar(f) + >>> res.fun + -9.9149495908 + + The minimizer is: + + >>> res.x + 1.28077640403 + + Using the *Bounded* method, we find a local minimum with specified + bounds as: + + >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') + >>> res.fun # minimum + 3.28365179850e-13 + >>> res.x # minimizer + -2.0000002026 + + """ + if not isinstance(args, tuple): + args = (args,) + + if callable(method): + meth = "_custom" + elif method is None: + meth = 'brent' if bounds is None else 'bounded' + else: + meth = method.lower() + if options is None: + options = {} + + if bounds is not None and meth in {'brent', 'golden'}: + message = f"Use of `bounds` is incompatible with 'method={method}'." + raise ValueError(message) + + if tol is not None: + options = dict(options) + if meth == 'bounded' and 'xatol' not in options: + warn("Method 'bounded' does not support relative tolerance in x; " + "defaulting to absolute tolerance.", + RuntimeWarning, stacklevel=2) + options['xatol'] = tol + elif meth == '_custom': + options.setdefault('tol', tol) + else: + options.setdefault('xtol', tol) + + # replace boolean "disp" option, if specified, by an integer value. + disp = options.get('disp') + if isinstance(disp, bool): + options['disp'] = 2 * int(disp) + + if meth == '_custom': + res = method(fun, args=args, bracket=bracket, bounds=bounds, **options) + elif meth == 'brent': + res = _recover_from_bracket_error(_minimize_scalar_brent, + fun, bracket, args, **options) + elif meth == 'bounded': + if bounds is None: + raise ValueError('The `bounds` parameter is mandatory for ' + 'method `bounded`.') + res = _minimize_scalar_bounded(fun, bounds, args, **options) + elif meth == 'golden': + res = _recover_from_bracket_error(_minimize_scalar_golden, + fun, bracket, args, **options) + else: + raise ValueError(f'Unknown solver {method}') + + # gh-16196 reported inconsistencies in the output shape of `res.x`. While + # fixing this, future-proof it for when the function is vectorized: + # the shape of `res.x` should match that of `res.fun`. + res.fun = np.asarray(res.fun)[()] + res.x = np.reshape(res.x, res.fun.shape)[()] + return res + + +def _remove_from_bounds(bounds, i_fixed): + """Removes fixed variables from a `Bounds` instance""" + lb = bounds.lb[~i_fixed] + ub = bounds.ub[~i_fixed] + return Bounds(lb, ub) # don't mutate original Bounds object + + +def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0): + """Wraps a function such that fixed variables need not be passed in""" + def fun_out(x_in, *args, **kwargs): + x_out = np.zeros_like(i_fixed, dtype=x_in.dtype) + x_out[i_fixed] = x_fixed + x_out[~i_fixed] = x_in + y_out = fun_in(x_out, *args, **kwargs) + y_out = np.array(y_out) + + if min_dim == 1: + y_out = np.atleast_1d(y_out) + elif min_dim == 2: + y_out = np.atleast_2d(y_out) + + if remove == 1: + y_out = y_out[..., ~i_fixed] + elif remove == 2: + y_out = y_out[~i_fixed, ~i_fixed] + + return y_out + return fun_out + + +def _add_to_array(x_in, i_fixed, x_fixed): + """Adds fixed variables back to an array""" + i_free = ~i_fixed + if x_in.ndim == 2: + i_free = i_free[:, None] @ i_free[None, :] + x_out = np.zeros_like(i_free, dtype=x_in.dtype) + x_out[~i_free] = x_fixed + x_out[i_free] = x_in.ravel() + return x_out + + +def _validate_bounds(bounds, x0, meth): + """Check that bounds are valid.""" + + msg = "An upper bound is less than the corresponding lower bound." + if np.any(bounds.ub < bounds.lb): + raise ValueError(msg) + + msg = "The number of bounds is not compatible with the length of `x0`." + try: + bounds.lb = np.broadcast_to(bounds.lb, x0.shape) + bounds.ub = np.broadcast_to(bounds.ub, x0.shape) + except Exception as e: + raise ValueError(msg) from e + + return bounds + +def standardize_bounds(bounds, x0, meth): + """Converts bounds to the form required by the solver.""" + if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'cobyqa', + 'new'}: + if not isinstance(bounds, Bounds): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'): + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0]) + return bounds + + +def standardize_constraints(constraints, x0, meth): + """Converts constraints to the form required by the solver.""" + all_constraint_types = (NonlinearConstraint, LinearConstraint, dict) + new_constraint_types = all_constraint_types[:-1] + if constraints is None: + constraints = [] + elif isinstance(constraints, all_constraint_types): + constraints = [constraints] + else: + constraints = list(constraints) # ensure it's a mutable sequence + + if meth in ['trust-constr', 'cobyqa', 'new']: + for i, con in enumerate(constraints): + if not isinstance(con, new_constraint_types): + constraints[i] = old_constraint_to_new(i, con) + else: + # iterate over copy, changing original + for i, con in enumerate(list(constraints)): + if isinstance(con, new_constraint_types): + old_constraints = new_constraint_to_old(con, x0) + constraints[i] = old_constraints[0] + constraints.extend(old_constraints[1:]) # appends 1 if present + + return constraints + + +def _optimize_result_for_equal_bounds( + fun, bounds, method, args=(), constraints=() +): + """ + Provides a default OptimizeResult for when a bounded minimization method + has (lb == ub).all(). + + Parameters + ---------- + fun: callable + bounds: Bounds + method: str + constraints: Constraint + """ + success = True + message = 'All independent variables were fixed by bounds.' + + # bounds is new-style + x0 = bounds.lb + + if constraints: + message = ("All independent variables were fixed by bounds at values" + " that satisfy the constraints.") + constraints = standardize_constraints(constraints, x0, 'new') + + maxcv = 0 + for c in constraints: + pc = PreparedConstraint(c, x0) + violation = pc.violation(x0) + if np.sum(violation): + maxcv = max(maxcv, np.max(violation)) + success = False + message = (f"All independent variables were fixed by bounds, but " + f"the independent variables do not satisfy the " + f"constraints exactly. (Maximum violation: {maxcv}).") + + return OptimizeResult( + x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1, + njev=0, nhev=0, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..710d8c41decf596e7481bfb8d88d30f008d6f967 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..8cfce8aae21b7533fb1cb7385d0d5f48740e743b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py @@ -0,0 +1,1171 @@ +import warnings +from . import _minpack + +import numpy as np +from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater, + asarray, inf, + finfo, inexact, issubdtype, dtype) +from scipy import linalg +from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError +from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning +from ._lsq import least_squares +# from ._lsq.common import make_strictly_feasible +from ._lsq.least_squares import prepare_bounds +from scipy.optimize._minimize import Bounds + +__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] + + +def _check_func(checker, argname, thefunc, x0, args, numinputs, + output_shape=None): + res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + if (output_shape is not None) and (shape(res) != output_shape): + if (output_shape[0] != 1): + if len(output_shape) > 1: + if output_shape[1] == 1: + return shape(res) + msg = f"{checker}: there is a mismatch between the input and output " \ + f"shape of the '{argname}' argument" + func_name = getattr(thefunc, '__name__', None) + if func_name: + msg += f" '{func_name}'." + else: + msg += "." + msg += f'Shape should be {output_shape} but it is {shape(res)}.' + raise TypeError(msg) + if issubdtype(res.dtype, inexact): + dt = res.dtype + else: + dt = dtype(float) + return shape(res), dt + + +def fsolve(func, x0, args=(), fprime=None, full_output=0, + col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, + epsfcn=None, factor=100, diag=None): + """ + Find the roots of a function. + + Return the roots of the (non-linear) equations defined by + ``func(x) = 0`` given a starting estimate. + + Parameters + ---------- + func : callable ``f(x, *args)`` + A function that takes at least one (possibly vector) argument, + and returns a value of the same length. + x0 : ndarray + The starting estimate for the roots of ``func(x) = 0``. + args : tuple, optional + Any extra arguments to `func`. + fprime : callable ``f(x, *args)``, optional + A function to compute the Jacobian of `func` with derivatives + across the rows. By default, the Jacobian will be estimated. + full_output : bool, optional + If True, return optional outputs. + col_deriv : bool, optional + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float, optional + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int, optional + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple, optional + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + epsfcn : float, optional + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `epsfcn` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the + variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for + an unsuccessful call). + infodict : dict + A dictionary of optional outputs with the keys: + + ``nfev`` + number of function calls + ``njev`` + number of Jacobian calls + ``fvec`` + function evaluated at the output + ``fjac`` + the orthogonal matrix, q, produced by the QR + factorization of the final approximate Jacobian + matrix, stored column wise + ``r`` + upper triangular matrix produced by QR factorization + of the same matrix + ``qtf`` + the vector ``(transpose(q) * fvec)`` + + ier : int + An integer flag. Set to 1 if a solution was found, otherwise refer + to `mesg` for more information. + mesg : str + If no solution is found, `mesg` details the cause of failure. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See the ``method='hybr'`` in particular. + + Notes + ----- + ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. + + Examples + -------- + Find a solution to the system of equations: + ``x0*cos(x1) = 4, x1*x0 - x1 = 5``. + + >>> import numpy as np + >>> from scipy.optimize import fsolve + >>> def func(x): + ... return [x[0] * np.cos(x[1]) - 4, + ... x[1] * x[0] - x[1] - 5] + >>> root = fsolve(func, [1, 1]) + >>> root + array([6.50409711, 0.90841421]) + >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. + array([ True, True]) + + """ + def _wrapped_func(*fargs): + """ + Wrapped `func` to track the number of times + the function has been called. + """ + _wrapped_func.nfev += 1 + return func(*fargs) + + _wrapped_func.nfev = 0 + + options = {'col_deriv': col_deriv, + 'xtol': xtol, + 'maxfev': maxfev, + 'band': band, + 'eps': epsfcn, + 'factor': factor, + 'diag': diag} + + res = _root_hybr(_wrapped_func, x0, args, jac=fprime, **options) + res.nfev = _wrapped_func.nfev + + if full_output: + x = res['x'] + info = {k: res.get(k) + for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res} + info['fvec'] = res['fun'] + return x, info, res['status'], res['message'] + else: + status = res['status'] + msg = res['message'] + if status == 0: + raise TypeError(msg) + elif status == 1: + pass + elif status in [2, 3, 4, 5]: + warnings.warn(msg, RuntimeWarning, stacklevel=2) + else: + raise TypeError(msg) + return res['x'] + + +def _root_hybr(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, + factor=100, diag=None, **unknown_options): + """ + Find the roots of a multivariate function using MINPACK's hybrd and + hybrj routines (modified Powell method). + + Options + ------- + col_deriv : bool + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``jac=None``). + eps : float + A suitable step length for the forward-difference + approximation of the Jacobian (for ``jac=None``). If + `eps` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the + variables. + + """ + _check_unknown_options(unknown_options) + epsfcn = eps + + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + Dfun = jac + if Dfun is None: + if band is None: + ml, mu = -10, -10 + else: + ml, mu = band[:2] + if maxfev == 0: + maxfev = 200 * (n + 1) + retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, + ml, mu, epsfcn, factor, diag) + else: + _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) + if (maxfev == 0): + maxfev = 100 * (n + 1) + retval = _minpack._hybrj(func, Dfun, x0, args, 1, + col_deriv, xtol, maxfev, factor, diag) + + x, status = retval[0], retval[-1] + + errors = {0: "Improper input parameters were entered.", + 1: "The solution converged.", + 2: "The number of calls to function has " + "reached maxfev = %d." % maxfev, + 3: f"xtol={xtol:f} is too small, no further improvement " + "in the approximate\n solution is possible.", + 4: "The iteration is not making good progress, as measured " + "by the \n improvement from the last five " + "Jacobian evaluations.", + 5: "The iteration is not making good progress, " + "as measured by the \n improvement from the last " + "ten iterations.", + 'unknown': "An error occurred."} + + info = retval[1] + info['fun'] = info.pop('fvec') + sol = OptimizeResult(x=x, success=(status == 1), status=status, + method="hybr") + sol.update(info) + try: + sol['message'] = errors[status] + except KeyError: + sol['message'] = errors['unknown'] + + return sol + + +LEASTSQ_SUCCESS = [1, 2, 3, 4] +LEASTSQ_FAILURE = [5, 6, 7, 8] + + +def leastsq(func, x0, args=(), Dfun=None, full_output=False, + col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + """ + Minimize the sum of squares of a set of equations. + + :: + + x = arg min(sum(func(y)**2,axis=0)) + y + + Parameters + ---------- + func : callable + Should take at least one (possibly length ``N`` vector) argument and + returns ``M`` floating point numbers. It must not return NaNs or + fitting might fail. ``M`` must be greater than or equal to ``N``. + x0 : ndarray + The starting estimate for the minimization. + args : tuple, optional + Any extra arguments to func are placed in this tuple. + Dfun : callable, optional + A function or method to compute the Jacobian of func with derivatives + across the rows. If this is None, the Jacobian will be estimated. + full_output : bool, optional + If ``True``, return all optional outputs (not just `x` and `ier`). + col_deriv : bool, optional + If ``True``, specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float, optional + Relative error desired in the sum of squares. + xtol : float, optional + Relative error desired in the approximate solution. + gtol : float, optional + Orthogonality desired between the function vector and the columns of + the Jacobian. + maxfev : int, optional + The maximum number of calls to the function. If `Dfun` is provided, + then the default `maxfev` is 100*(N+1) where N is the number of elements + in x0, otherwise the default `maxfev` is 200*(N+1). + epsfcn : float, optional + A variable used in determining a suitable step length for the forward- + difference approximation of the Jacobian (for Dfun=None). + Normally the actual step length will be sqrt(epsfcn)*x + If epsfcn is less than the machine precision, it is assumed that the + relative errors are of the order of the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for an unsuccessful + call). + cov_x : ndarray + The inverse of the Hessian. `fjac` and `ipvt` are used to construct an + estimate of the Hessian. A value of None indicates a singular matrix, + which means the curvature in parameters `x` is numerically flat. To + obtain the covariance matrix of the parameters `x`, `cov_x` must be + multiplied by the variance of the residuals -- see curve_fit. Only + returned if `full_output` is ``True``. + infodict : dict + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls + ``fvec`` + The function evaluated at the output + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + ``qtf`` + The vector (transpose(q) * fvec). + + Only returned if `full_output` is ``True``. + mesg : str + A string message giving information about the cause of failure. + Only returned if `full_output` is ``True``. + ier : int + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable 'mesg' gives more information. + + See Also + -------- + least_squares : Newer interface to solve nonlinear least-squares problems + with bounds on the variables. See ``method='lm'`` in particular. + + Notes + ----- + "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. + + cov_x is a Jacobian approximation to the Hessian of the least squares + objective function. + This approximation assumes that the objective function is based on the + difference between some observed target data (ydata) and a (non-linear) + function of the parameters `f(xdata, params)` :: + + func(params) = ydata - f(xdata, params) + + so that the objective function is :: + + min sum((ydata - f(xdata, params))**2, axis=0) + params + + The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, + or whether `x0` is a scalar. + + Examples + -------- + >>> from scipy.optimize import leastsq + >>> def func(x): + ... return 2*(x-3)**2+1 + >>> leastsq(func, 0) + (array([2.99999999]), 1) + + """ + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] + + if n > m: + raise TypeError(f"Improper input: func input vector length N={n} must" + f" not exceed func output vector length M={m}") + + if epsfcn is None: + epsfcn = finfo(dtype).eps + + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100 * (n + 1) + retval = _minpack._lmder(func, Dfun, x0, args, full_output, + col_deriv, ftol, xtol, gtol, maxfev, + factor, diag) + + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + f"in the sum of squares\n are at most {ftol:f}", None], + 2: ["The relative error between two consecutive " + f"iterates is at most {xtol:f}", None], + 3: ["Both actual and predicted relative reductions in " + f"the sum of squares\n are at most {ftol:f} and the " + "relative error between two consecutive " + f"iterates is at \n most {xtol:f}", None], + 4: ["The cosine of the angle between func(x) and any " + f"column of the\n Jacobian is at most {gtol:f} in " + "absolute value", None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: [f"ftol={ftol:f} is too small, no further reduction " + "in the sum of squares\n is possible.", + ValueError], + 7: [f"xtol={xtol:f} is too small, no further improvement in " + "the approximate\n solution is possible.", + ValueError], + 8: [f"gtol={gtol:f} is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine precision.", ValueError]} + + # The FORTRAN return value (possible return values are >= 0 and <= 8) + info = retval[-1] + + if full_output: + cov_x = None + if info in LEASTSQ_SUCCESS: + # This was + # perm = take(eye(n), retval[1]['ipvt'] - 1, 0) + # r = triu(transpose(retval[1]['fjac'])[:n, :]) + # R = dot(r, perm) + # cov_x = inv(dot(transpose(R), R)) + # but the explicit dot product was not necessary and sometimes + # the result was not symmetric positive definite. See gh-4555. + perm = retval[1]['ipvt'] + n = len(perm) + r = triu(transpose(retval[1]['fjac'])[:n, :]) + inv_triu = linalg.get_lapack_funcs('trtri', (r,)) + try: + # inverse of permuted matrix is a permutation of matrix inverse + invR, trtri_info = inv_triu(r) # default: upper, non-unit diag + if trtri_info != 0: # explicit comparison for readability + raise LinAlgError(f'trtri returned info {trtri_info}') + invR[perm] = invR.copy() + cov_x = invR @ invR.T + except (LinAlgError, ValueError): + pass + return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) + else: + if info in LEASTSQ_FAILURE: + warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2) + elif info == 0: + raise errors[info][1](errors[info][0]) + return retval[0], info + + +def _lightweight_memoizer(f): + # very shallow memoization to address gh-13670: only remember the first set + # of parameters and corresponding function value, and only attempt to use + # them twice (the number of times the function is evaluated at x0). + def _memoized_func(params): + if _memoized_func.skip_lookup: + return f(params) + + if np.all(_memoized_func.last_params == params): + return _memoized_func.last_val + elif _memoized_func.last_params is not None: + _memoized_func.skip_lookup = True + + val = f(params) + + if _memoized_func.last_params is None: + _memoized_func.last_params = np.copy(params) + _memoized_func.last_val = val + + return val + + _memoized_func.last_params = None + _memoized_func.last_val = None + _memoized_func.skip_lookup = False + return _memoized_func + + +def _wrap_func(func, xdata, ydata, transform): + if transform is None: + def func_wrapped(params): + return func(xdata, *params) - ydata + elif transform.size == 1 or transform.ndim == 1: + def func_wrapped(params): + return transform * (func(xdata, *params) - ydata) + else: + # Chisq = (y - yd)^T C^{-1} (y-yd) + # transform = L such that C = L L^T + # C^{-1} = L^{-T} L^{-1} + # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) + # Define (y-yd)' = L^{-1} (y-yd) + # by solving + # L (y-yd)' = (y-yd) + # and minimize (y-yd)'^T (y-yd)' + def func_wrapped(params): + return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) + return func_wrapped + + +def _wrap_jac(jac, xdata, transform): + if transform is None: + def jac_wrapped(params): + return jac(xdata, *params) + elif transform.ndim == 1: + def jac_wrapped(params): + return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) + else: + def jac_wrapped(params): + return solve_triangular(transform, + np.asarray(jac(xdata, *params)), + lower=True) + return jac_wrapped + + +def _initialize_feasible(lb, ub): + p0 = np.ones_like(lb) + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + mask = lb_finite & ub_finite + p0[mask] = 0.5 * (lb[mask] + ub[mask]) + + mask = lb_finite & ~ub_finite + p0[mask] = lb[mask] + 1 + + mask = ~lb_finite & ub_finite + p0[mask] = ub[mask] - 1 + + return p0 + + +def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, + check_finite=None, bounds=(-np.inf, np.inf), method=None, + jac=None, *, full_output=False, nan_policy=None, + **kwargs): + """ + Use non-linear least squares to fit a function, f, to data. + + Assumes ``ydata = f(xdata, *params) + eps``. + + Parameters + ---------- + f : callable + The model function, f(x, ...). It must take the independent + variable as the first argument and the parameters to fit as + separate remaining arguments. + xdata : array_like + The independent variable where the data is measured. + Should usually be an M-length sequence or an (k,M)-shaped array for + functions with k predictors, and each element should be float + convertible if it is an array like object. + ydata : array_like + The dependent data, a length M array - nominally ``f(xdata, ...)``. + p0 : array_like, optional + Initial guess for the parameters (length N). If None, then the + initial values will all be 1 (if the number of parameters for the + function can be determined using introspection, otherwise a + ValueError is raised). + sigma : None or scalar or M-length sequence or MxM array, optional + Determines the uncertainty in `ydata`. If we define residuals as + ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` + depends on its number of dimensions: + + - A scalar or 1-D `sigma` should contain values of standard deviations of + errors in `ydata`. In this case, the optimized function is + ``chisq = sum((r / sigma) ** 2)``. + + - A 2-D `sigma` should contain the covariance matrix of + errors in `ydata`. In this case, the optimized function is + ``chisq = r.T @ inv(sigma) @ r``. + + .. versionadded:: 0.19 + + None (default) is equivalent of 1-D `sigma` filled with ones. + absolute_sigma : bool, optional + If True, `sigma` is used in an absolute sense and the estimated parameter + covariance `pcov` reflects these absolute values. + + If False (default), only the relative magnitudes of the `sigma` values matter. + The returned parameter covariance matrix `pcov` is based on scaling + `sigma` by a constant factor. This constant is set by demanding that the + reduced `chisq` for the optimal parameters `popt` when using the + *scaled* `sigma` equals unity. In other words, `sigma` is scaled to + match the sample variance of the residuals after the fit. Default is False. + Mathematically, + ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` + check_finite : bool, optional + If True, check that the input arrays do not contain nans of infs, + and raise a ValueError if they do. Setting this parameter to + False may silently produce nonsensical results if the input arrays + do contain nans. Default is True if `nan_policy` is not specified + explicitly and False otherwise. + bounds : 2-tuple of array_like or `Bounds`, optional + Lower and upper bounds on parameters. Defaults to no bounds. + There are two ways to specify the bounds: + + - Instance of `Bounds` class. + + - 2-tuple of array_like: Each element of the tuple must be either + an array with the length equal to the number of parameters, or a + scalar (in which case the bound is taken to be the same for all + parameters). Use ``np.inf`` with an appropriate sign to disable + bounds on all or some parameters. + + method : {'lm', 'trf', 'dogbox'}, optional + Method to use for optimization. See `least_squares` for more details. + Default is 'lm' for unconstrained problems and 'trf' if `bounds` are + provided. The method 'lm' won't work when the number of observations + is less than the number of variables, use 'trf' or 'dogbox' in this + case. + + .. versionadded:: 0.17 + jac : callable, string or None, optional + Function with signature ``jac(x, ...)`` which computes the Jacobian + matrix of the model function with respect to parameters as a dense + array_like structure. It will be scaled according to provided `sigma`. + If None (default), the Jacobian will be estimated numerically. + String keywords for 'trf' and 'dogbox' methods can be used to select + a finite difference scheme, see `least_squares`. + + .. versionadded:: 0.18 + full_output : boolean, optional + If True, this function returns additional information: `infodict`, + `mesg`, and `ier`. + + .. versionadded:: 1.9 + nan_policy : {'raise', 'omit', None}, optional + Defines how to handle when input contains nan. + The following options are available (default is None): + + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + * None: no special handling of NaNs is performed + (except what is done by check_finite); the behavior when NaNs + are present is implementation-dependent and may change. + + Note that if this value is specified explicitly (not None), + `check_finite` will be set as False. + + .. versionadded:: 1.11 + **kwargs + Keyword arguments passed to `leastsq` for ``method='lm'`` or + `least_squares` otherwise. + + Returns + ------- + popt : array + Optimal values for the parameters so that the sum of the squared + residuals of ``f(xdata, *popt) - ydata`` is minimized. + pcov : 2-D array + The estimated approximate covariance of popt. The diagonals provide + the variance of the parameter estimate. To compute one standard + deviation errors on the parameters, use + ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between + `cov` and parameter error estimates is derived based on a linear + approximation to the model function around the optimum [1]_. + When this approximation becomes inaccurate, `cov` may not provide an + accurate measure of uncertainty. + + How the `sigma` parameter affects the estimated covariance + depends on `absolute_sigma` argument, as described above. + + If the Jacobian matrix at the solution doesn't have a full rank, then + 'lm' method returns a matrix filled with ``np.inf``, on the other hand + 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute + the covariance matrix. Covariance matrices with large condition numbers + (e.g. computed with `numpy.linalg.cond`) may indicate that results are + unreliable. + infodict : dict (returned only if `full_output` is True) + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls. Methods 'trf' and 'dogbox' do not + count function calls for numerical Jacobian approximation, + as opposed to 'lm' method. + ``fvec`` + The residual values evaluated at the solution, for a 1-D `sigma` + this is ``(f(x, *popt) - ydata)/sigma``. + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + Method 'lm' only provides this information. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + Method 'lm' only provides this information. + ``qtf`` + The vector (transpose(q) * fvec). + Method 'lm' only provides this information. + + .. versionadded:: 1.9 + mesg : str (returned only if `full_output` is True) + A string message giving information about the solution. + + .. versionadded:: 1.9 + ier : int (returned only if `full_output` is True) + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable `mesg` gives more information. + + .. versionadded:: 1.9 + + Raises + ------ + ValueError + if either `ydata` or `xdata` contain NaNs, or if incompatible options + are used. + + RuntimeError + if the least-squares minimization fails. + + OptimizeWarning + if covariance of the parameters can not be estimated. + + See Also + -------- + least_squares : Minimize the sum of squares of nonlinear functions. + scipy.stats.linregress : Calculate a linear least squares regression for + two sets of measurements. + + Notes + ----- + Users should ensure that inputs `xdata`, `ydata`, and the output of `f` + are ``float64``, or else the optimization may return incorrect results. + + With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm + through `leastsq`. Note that this algorithm can only deal with + unconstrained problems. + + Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to + the docstring of `least_squares` for more information. + + Parameters to be fitted must have similar scale. Differences of multiple + orders of magnitude can lead to incorrect results. For the 'trf' and + 'dogbox' methods, the `x_scale` keyword argument can be used to scale + the parameters. + + References + ---------- + .. [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear + regression in groundwater flow: Three case studies. Water Resources + Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804` + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import curve_fit + + >>> def func(x, a, b, c): + ... return a * np.exp(-b * x) + c + + Define the data to be fit with some noise: + + >>> xdata = np.linspace(0, 4, 50) + >>> y = func(xdata, 2.5, 1.3, 0.5) + >>> rng = np.random.default_rng() + >>> y_noise = 0.2 * rng.normal(size=xdata.size) + >>> ydata = y + y_noise + >>> plt.plot(xdata, ydata, 'b-', label='data') + + Fit for the parameters a, b, c of the function `func`: + + >>> popt, pcov = curve_fit(func, xdata, ydata) + >>> popt + array([2.56274217, 1.37268521, 0.47427475]) + >>> plt.plot(xdata, func(xdata, *popt), 'r-', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + Constrain the optimization to the region of ``0 <= a <= 3``, + ``0 <= b <= 1`` and ``0 <= c <= 0.5``: + + >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) + >>> popt + array([2.43736712, 1. , 0.34463856]) + >>> plt.plot(xdata, func(xdata, *popt), 'g--', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend() + >>> plt.show() + + For reliable results, the model `func` should not be overparametrized; + redundant parameters can cause unreliable covariance matrices and, in some + cases, poorer quality fits. As a quick check of whether the model may be + overparameterized, calculate the condition number of the covariance matrix: + + >>> np.linalg.cond(pcov) + 34.571092161547405 # may vary + + The value is small, so it does not raise much concern. If, however, we were + to add a fourth parameter ``d`` to `func` with the same effect as ``a``: + + >>> def func2(x, a, b, c, d): + ... return a * d * np.exp(-b * x) + c # a and d are redundant + >>> popt, pcov = curve_fit(func2, xdata, ydata) + >>> np.linalg.cond(pcov) + 1.13250718925596e+32 # may vary + + Such a large value is cause for concern. The diagonal elements of the + covariance matrix, which is related to uncertainty of the fit, gives more + information: + + >>> np.diag(pcov) + array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary + + Note that the first and last terms are much larger than the other elements, + suggesting that the optimal values of these parameters are ambiguous and + that only one of these parameters is needed in the model. + + If the optimal parameters of `f` differ by multiple orders of magnitude, the + resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any + results: + + >>> ydata = func(xdata, 500000, 0.01, 15) + >>> try: + ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf') + ... except RuntimeError as e: + ... print(e) + Optimal parameters not found: The maximum number of function evaluations is + exceeded. + + If parameter scale is roughly known beforehand, it can be defined in + `x_scale` argument: + + >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf', + ... x_scale = [1000, 1, 1]) + >>> popt + array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01]) + """ + if p0 is None: + # determine number of parameters by inspecting the function + sig = _getfullargspec(f) + args = sig.args + if len(args) < 2: + raise ValueError("Unable to determine number of fit parameters.") + n = len(args) - 1 + else: + p0 = np.atleast_1d(p0) + n = p0.size + + if isinstance(bounds, Bounds): + lb, ub = bounds.lb, bounds.ub + else: + lb, ub = prepare_bounds(bounds, n) + if p0 is None: + p0 = _initialize_feasible(lb, ub) + + bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) + if method is None: + if bounded_problem: + method = 'trf' + else: + method = 'lm' + + if method == 'lm' and bounded_problem: + raise ValueError("Method 'lm' only works for unconstrained problems. " + "Use 'trf' or 'dogbox' instead.") + + if check_finite is None: + check_finite = True if nan_policy is None else False + + # optimization may produce garbage for float32 inputs, cast them to float64 + if check_finite: + ydata = np.asarray_chkfinite(ydata, float) + else: + ydata = np.asarray(ydata, float) + + if isinstance(xdata, (list, tuple, np.ndarray)): + # `xdata` is passed straight to the user-defined `f`, so allow + # non-array_like `xdata`. + if check_finite: + xdata = np.asarray_chkfinite(xdata, float) + else: + xdata = np.asarray(xdata, float) + + if ydata.size == 0: + raise ValueError("`ydata` must not be empty!") + + # nan handling is needed only if check_finite is False because if True, + # the x-y data are already checked, and they don't contain nans. + if not check_finite and nan_policy is not None: + if nan_policy == "propagate": + raise ValueError("`nan_policy='propagate'` is not supported " + "by this function.") + + policies = [None, 'raise', 'omit'] + x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy, + policies=policies) + y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy, + policies=policies) + + if (x_contains_nan or y_contains_nan) and nan_policy == 'omit': + # ignore NaNs for N dimensional arrays + has_nan = np.isnan(xdata) + has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1))) + has_nan |= np.isnan(ydata) + + xdata = xdata[..., ~has_nan] + ydata = ydata[~has_nan] + + # Also omit the corresponding entries from sigma + if sigma is not None: + sigma = np.asarray(sigma) + if sigma.ndim == 1: + sigma = sigma[~has_nan] + elif sigma.ndim == 2: + sigma = sigma[~has_nan, :] + sigma = sigma[:, ~has_nan] + + # Determine type of sigma + if sigma is not None: + sigma = np.asarray(sigma) + + # if 1-D or a scalar, sigma are errors, define transform = 1/sigma + if sigma.size == 1 or sigma.shape == (ydata.size,): + transform = 1.0 / sigma + # if 2-D, sigma is the covariance matrix, + # define transform = L such that L L^T = C + elif sigma.shape == (ydata.size, ydata.size): + try: + # scipy.linalg.cholesky requires lower=True to return L L^T = A + transform = cholesky(sigma, lower=True) + except LinAlgError as e: + raise ValueError("`sigma` must be positive definite.") from e + else: + raise ValueError("`sigma` has incorrect shape.") + else: + transform = None + + func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform)) + + if callable(jac): + jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform)) + elif jac is None and method != 'lm': + jac = '2-point' + + if 'args' in kwargs: + # The specification for the model function `f` does not support + # additional arguments. Refer to the `curve_fit` docstring for + # acceptable call signatures of `f`. + raise ValueError("'args' is not a supported keyword argument.") + + if method == 'lm': + # if ydata.size == 1, this might be used for broadcast. + if ydata.size != 1 and n > ydata.size: + raise TypeError(f"The number of func parameters={n} must not" + f" exceed the number of data points={ydata.size}") + res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) + popt, pcov, infodict, errmsg, ier = res + ysize = len(infodict['fvec']) + cost = np.sum(infodict['fvec'] ** 2) + if ier not in [1, 2, 3, 4]: + raise RuntimeError("Optimal parameters not found: " + errmsg) + else: + # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. + if 'max_nfev' not in kwargs: + kwargs['max_nfev'] = kwargs.pop('maxfev', None) + + res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, + **kwargs) + + if not res.success: + raise RuntimeError("Optimal parameters not found: " + res.message) + + infodict = dict(nfev=res.nfev, fvec=res.fun) + ier = res.status + errmsg = res.message + + ysize = len(res.fun) + cost = 2 * res.cost # res.cost is half sum of squares! + popt = res.x + + # Do Moore-Penrose inverse discarding zero singular values. + _, s, VT = svd(res.jac, full_matrices=False) + threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] + s = s[s > threshold] + VT = VT[:s.size] + pcov = np.dot(VT.T / s**2, VT) + + warn_cov = False + if pcov is None or np.isnan(pcov).any(): + # indeterminate covariance + pcov = zeros((len(popt), len(popt)), dtype=float) + pcov.fill(inf) + warn_cov = True + elif not absolute_sigma: + if ysize > p0.size: + s_sq = cost / (ysize - p0.size) + pcov = pcov * s_sq + else: + pcov.fill(inf) + warn_cov = True + + if warn_cov: + warnings.warn('Covariance of the parameters could not be estimated', + category=OptimizeWarning, stacklevel=2) + + if full_output: + return popt, pcov, infodict, errmsg, ier + else: + return popt, pcov + + +def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): + """Perform a simple check on the gradient for correctness. + + """ + + x = atleast_1d(x0) + n = len(x) + x = x.reshape((n,)) + fvec = atleast_1d(fcn(x, *args)) + m = len(fvec) + fvec = fvec.reshape((m,)) + ldfjac = m + fjac = atleast_1d(Dfcn(x, *args)) + fjac = fjac.reshape((m, n)) + if col_deriv == 0: + fjac = transpose(fjac) + + xp = zeros((n,), float) + err = zeros((m,), float) + fvecp = None + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) + + fvecp = atleast_1d(fcn(xp, *args)) + fvecp = fvecp.reshape((m,)) + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) + + good = (prod(greater(err, 0.5), axis=0)) + + return (good, err) + + +def _del2(p0, p1, d): + return p0 - np.square(p1 - p0) / d + + +def _relerr(actual, desired): + return (actual - desired) / desired + + +def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): + p0 = x0 + for i in range(maxiter): + p1 = func(p0, *args) + if use_accel: + p2 = func(p1, *args) + d = p2 - 2.0 * p1 + p0 + p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) + else: + p = p1 + relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) + if np.all(np.abs(relerr) < xtol): + return p + p0 = p + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): + """ + Find a fixed point of the function. + + Given a function of one or more variables and a starting point, find a + fixed point of the function: i.e., where ``func(x0) == x0``. + + Parameters + ---------- + func : function + Function to evaluate. + x0 : array_like + Fixed point of function. + args : tuple, optional + Extra arguments to `func`. + xtol : float, optional + Convergence tolerance, defaults to 1e-08. + maxiter : int, optional + Maximum number of iterations, defaults to 500. + method : {"del2", "iteration"}, optional + Method of finding the fixed-point, defaults to "del2", + which uses Steffensen's Method with Aitken's ``Del^2`` + convergence acceleration [1]_. The "iteration" method simply iterates + the function until convergence is detected, without attempting to + accelerate the convergence. + + References + ---------- + .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c1, c2): + ... return np.sqrt(c1/(x+c2)) + >>> c1 = np.array([10,12.]) + >>> c2 = np.array([3, 5.]) + >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) + array([ 1.4920333 , 1.37228132]) + + """ + use_accel = {'del2': True, 'iteration': False}[method] + x0 = _asarray_validated(x0, as_inexact=True) + return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nnls.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nnls.py new file mode 100644 index 0000000000000000000000000000000000000000..be904c90d715583faaf7751ce62b9c992e07e208 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nnls.py @@ -0,0 +1,97 @@ +import numpy as np +from ._cython_nnls import _nnls + + +__all__ = ['nnls'] + + +def nnls(A, b, maxiter=None, *, atol=None): + """ + Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. + + This problem, often called as NonNegative Least Squares, is a convex + optimization problem with convex constraints. It typically arises when + the ``x`` models quantities for which only nonnegative values are + attainable; weight of ingredients, component costs and so on. + + Parameters + ---------- + A : (m, n) ndarray + Coefficient array + b : (m,) ndarray, float + Right-hand side vector. + maxiter: int, optional + Maximum number of iterations, optional. Default value is ``3 * n``. + atol: float + Tolerance value used in the algorithm to assess closeness to zero in + the projected residual ``(A.T @ (A x - b)`` entries. Increasing this + value relaxes the solution constraints. A typical relaxation value can + be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``. + This value is not set as default since the norm operation becomes + expensive for large problems hence can be used only when necessary. + + Returns + ------- + x : ndarray + Solution vector. + rnorm : float + The 2-norm of the residual, ``|| Ax-b ||_2``. + + See Also + -------- + lsq_linear : Linear least squares with bounds on the variables + + Notes + ----- + The code is based on [2]_ which is an improved version of the classical + algorithm of [1]_. It utilizes an active set method and solves the KKT + (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem. + + References + ---------- + .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM, + 1995, :doi:`10.1137/1.9781611971217` + .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity- + Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997, + :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L` + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import nnls + ... + >>> A = np.array([[1, 0], [1, 0], [0, 1]]) + >>> b = np.array([2, 1, 1]) + >>> nnls(A, b) + (array([1.5, 1. ]), 0.7071067811865475) + + >>> b = np.array([-1, -1, -1]) + >>> nnls(A, b) + (array([0., 0.]), 1.7320508075688772) + + """ + + A = np.asarray_chkfinite(A, dtype=np.float64, order='C') + b = np.asarray_chkfinite(b, dtype=np.float64) + + if len(A.shape) != 2: + raise ValueError("Expected a two-dimensional array (matrix)" + + f", but the shape of A is {A.shape}") + if len(b.shape) != 1: + raise ValueError("Expected a one-dimensional array (vector)" + + f", but the shape of b is {b.shape}") + + m, n = A.shape + + if m != b.shape[0]: + raise ValueError( + "Incompatible dimensions. The first dimension of " + + f"A is {m}, while the shape of b is {(b.shape[0], )}") + + if not maxiter: + maxiter = 3*n + x, rnorm, info = _nnls(A, b, maxiter) + if info == -1: + raise RuntimeError("Maximum number of iterations reached.") + + return x, rnorm diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nonlin.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e07683500bb01c195b1cdfa8a13157353b5370 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_nonlin.py @@ -0,0 +1,1603 @@ +# Copyright (C) 2009, Pauli Virtanen +# Distributed under the same license as SciPy. + +import inspect +import sys +import warnings + +import numpy as np +from numpy import asarray, dot, vdot + +from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError +import scipy.sparse.linalg +import scipy.sparse +from scipy.linalg import get_blas_funcs +from scipy._lib._util import copy_if_needed +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._linesearch import scalar_search_wolfe1, scalar_search_armijo + + +__all__ = [ + 'broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'newton_krylov', + 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence'] + +#------------------------------------------------------------------------------ +# Utility functions +#------------------------------------------------------------------------------ + + +class NoConvergence(Exception): + """Exception raised when nonlinear solver fails to converge within the specified + `maxiter`.""" + pass + + +def maxnorm(x): + return np.absolute(x).max() + + +def _as_inexact(x): + """Return `x` as an array, of either floats or complex floats""" + x = asarray(x) + if not np.issubdtype(x.dtype, np.inexact): + return asarray(x, dtype=np.float64) + return x + + +def _array_like(x, x0): + """Return ndarray `x` as same array subclass and shape as `x0`""" + x = np.reshape(x, np.shape(x0)) + wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) + return wrap(x) + + +def _safe_norm(v): + if not np.isfinite(v).all(): + return np.array(np.inf) + return norm(v) + +#------------------------------------------------------------------------------ +# Generic nonlinear solver machinery +#------------------------------------------------------------------------------ + + +_doc_parts = dict( + params_basic=""" + F : function(x) -> f + Function whose root to find; should take and return an array-like + object. + xin : array_like + Initial guess for the solution + """.strip(), + params_extra=""" + iter : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + verbose : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + f_tol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + f_rtol : float, optional + Relative tolerance for the residual. If omitted, not used. + x_tol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + x_rtol : float, optional + Relative minimum step size. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in the + direction given by the Jacobian approximation. Defaults to 'armijo'. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. + + Returns + ------- + sol : ndarray + An array (of similar array type as `x0`) containing the final solution. + + Raises + ------ + NoConvergence + When a solution was not found. + + """.strip() +) + + +def _set_doc(obj): + if obj.__doc__: + obj.__doc__ = obj.__doc__ % _doc_parts + + +def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, + maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, + full_output=False, raise_exception=True): + """ + Find a root of a function, in a way suitable for large-scale problems. + + Parameters + ---------- + %(params_basic)s + jacobian : Jacobian + A Jacobian approximation: `Jacobian` object or something that + `asjacobian` can transform to one. Alternatively, a string specifying + which of the builtin Jacobian approximations to use: + + krylov, broyden1, broyden2, anderson + diagbroyden, linearmixing, excitingmixing + + %(params_extra)s + full_output : bool + If true, returns a dictionary `info` containing convergence + information. + raise_exception : bool + If True, a `NoConvergence` exception is raise if no solution is found. + + See Also + -------- + asjacobian, Jacobian + + Notes + ----- + This algorithm implements the inexact Newton method, with + backtracking or full line searches. Several Jacobian + approximations are available, including Krylov and Quasi-Newton + methods. + + References + ---------- + .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear + Equations\". Society for Industrial and Applied Mathematics. (1995) + https://archive.siam.org/books/kelley/fr16/ + + """ + # Can't use default parameters because it's being explicitly passed as None + # from the calling function, so we need to set it here. + tol_norm = maxnorm if tol_norm is None else tol_norm + condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, + x_tol=x_tol, x_rtol=x_rtol, + iter=iter, norm=tol_norm) + + x0 = _as_inexact(x0) + def func(z): + return _as_inexact(F(_array_like(z, x0))).flatten() + x = x0.flatten() + + dx = np.full_like(x, np.inf) + Fx = func(x) + Fx_norm = norm(Fx) + + jacobian = asjacobian(jacobian) + jacobian.setup(x.copy(), Fx, func) + + if maxiter is None: + if iter is not None: + maxiter = iter + 1 + else: + maxiter = 100*(x.size+1) + + if line_search is True: + line_search = 'armijo' + elif line_search is False: + line_search = None + + if line_search not in (None, 'armijo', 'wolfe'): + raise ValueError("Invalid line search") + + # Solver tolerance selection + gamma = 0.9 + eta_max = 0.9999 + eta_treshold = 0.1 + eta = 1e-3 + + for n in range(maxiter): + status = condition.check(Fx, x, dx) + if status: + break + + # The tolerance, as computed for scipy.sparse.linalg.* routines + tol = min(eta, eta*Fx_norm) + dx = -jacobian.solve(Fx, tol=tol) + + if norm(dx) == 0: + raise ValueError("Jacobian inversion yielded zero vector. " + "This indicates a bug in the Jacobian " + "approximation.") + + # Line search, or Newton step + if line_search: + s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, + line_search) + else: + s = 1.0 + x = x + dx + Fx = func(x) + Fx_norm_new = norm(Fx) + + jacobian.update(x.copy(), Fx) + + if callback: + callback(x, Fx) + + # Adjust forcing parameters for inexact methods + eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 + if gamma * eta**2 < eta_treshold: + eta = min(eta_max, eta_A) + else: + eta = min(eta_max, max(eta_A, gamma*eta**2)) + + Fx_norm = Fx_norm_new + + # Print status + if verbose: + sys.stdout.write("%d: |F(x)| = %g; step %g\n" % ( + n, tol_norm(Fx), s)) + sys.stdout.flush() + else: + if raise_exception: + raise NoConvergence(_array_like(x, x0)) + else: + status = 2 + + if full_output: + info = {'nit': condition.iteration, + 'fun': Fx, + 'status': status, + 'success': status == 1, + 'message': {1: 'A solution was found at the specified ' + 'tolerance.', + 2: 'The maximum number of iterations allowed ' + 'has been reached.' + }[status] + } + return _array_like(x, x0), info + else: + return _array_like(x, x0) + + +_set_doc(nonlin_solve) + + +def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, + smin=1e-2): + tmp_s = [0] + tmp_Fx = [Fx] + tmp_phi = [norm(Fx)**2] + s_norm = norm(x) / norm(dx) + + def phi(s, store=True): + if s == tmp_s[0]: + return tmp_phi[0] + xt = x + s*dx + v = func(xt) + p = _safe_norm(v)**2 + if store: + tmp_s[0] = s + tmp_phi[0] = p + tmp_Fx[0] = v + return p + + def derphi(s): + ds = (abs(s) + s_norm + 1) * rdiff + return (phi(s+ds, store=False) - phi(s)) / ds + + if search_type == 'wolfe': + s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], + xtol=1e-2, amin=smin) + elif search_type == 'armijo': + s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], + amin=smin) + + if s is None: + # XXX: No suitable step length found. Take the full Newton step, + # and hope for the best. + s = 1.0 + + x = x + s*dx + if s == tmp_s[0]: + Fx = tmp_Fx[0] + else: + Fx = func(x) + Fx_norm = norm(Fx) + + return s, x, Fx, Fx_norm + + +class TerminationCondition: + """ + Termination condition for an iteration. It is terminated if + + - |F| < f_rtol*|F_0|, AND + - |F| < f_tol + + AND + + - |dx| < x_rtol*|x|, AND + - |dx| < x_tol + + """ + def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + iter=None, norm=maxnorm): + + if f_tol is None: + f_tol = np.finfo(np.float64).eps ** (1./3) + if f_rtol is None: + f_rtol = np.inf + if x_tol is None: + x_tol = np.inf + if x_rtol is None: + x_rtol = np.inf + + self.x_tol = x_tol + self.x_rtol = x_rtol + self.f_tol = f_tol + self.f_rtol = f_rtol + + self.norm = norm + + self.iter = iter + + self.f0_norm = None + self.iteration = 0 + + def check(self, f, x, dx): + self.iteration += 1 + f_norm = self.norm(f) + x_norm = self.norm(x) + dx_norm = self.norm(dx) + + if self.f0_norm is None: + self.f0_norm = f_norm + + if f_norm == 0: + return 1 + + if self.iter is not None: + # backwards compatibility with SciPy 0.6.0 + return 2 * (self.iteration > self.iter) + + # NB: condition must succeed for rtol=inf even if norm == 0 + return int((f_norm <= self.f_tol + and f_norm/self.f_rtol <= self.f0_norm) + and (dx_norm <= self.x_tol + and dx_norm/self.x_rtol <= x_norm)) + + +#------------------------------------------------------------------------------ +# Generic Jacobian approximation +#------------------------------------------------------------------------------ + +class Jacobian: + """ + Common interface for Jacobians or Jacobian approximations. + + The optional methods come useful when implementing trust region + etc., algorithms that often require evaluating transposes of the + Jacobian. + + Methods + ------- + solve + Returns J^-1 * v + update + Updates Jacobian to point `x` (where the function has residual `Fx`) + + matvec : optional + Returns J * v + rmatvec : optional + Returns A^H * v + rsolve : optional + Returns A^-H * v + matmat : optional + Returns A * V, where V is a dense matrix with dimensions (N,K). + todense : optional + Form the dense Jacobian matrix. Necessary for dense trust region + algorithms, and useful for testing. + + Attributes + ---------- + shape + Matrix dimensions (M, N) + dtype + Data type of the matrix. + func : callable, optional + Function the Jacobian corresponds to + + """ + + def __init__(self, **kw): + names = ["solve", "update", "matvec", "rmatvec", "rsolve", + "matmat", "todense", "shape", "dtype"] + for name, value in kw.items(): + if name not in names: + raise ValueError(f"Unknown keyword argument {name}") + if value is not None: + setattr(self, name, kw[name]) + + + if hasattr(self, "todense"): + def __array__(self, dtype=None, copy=None): + if dtype is not None: + raise ValueError(f"`dtype` must be None, was {dtype}") + return self.todense() + + def aspreconditioner(self): + return InverseJacobian(self) + + def solve(self, v, tol=0): + raise NotImplementedError + + def update(self, x, F): + pass + + def setup(self, x, F, func): + self.func = func + self.shape = (F.size, x.size) + self.dtype = F.dtype + if self.__class__.setup is Jacobian.setup: + # Call on the first point unless overridden + self.update(x, F) + + +class InverseJacobian: + """ + A simple wrapper that inverts the Jacobian using the `solve` method. + + .. legacy:: class + + See the newer, more consistent interfaces in :mod:`scipy.optimize`. + + Parameters + ---------- + jacobian : Jacobian + The Jacobian to invert. + + Attributes + ---------- + shape + Matrix dimensions (M, N) + dtype + Data type of the matrix. + + """ + def __init__(self, jacobian): + self.jacobian = jacobian + self.matvec = jacobian.solve + self.update = jacobian.update + if hasattr(jacobian, 'setup'): + self.setup = jacobian.setup + if hasattr(jacobian, 'rsolve'): + self.rmatvec = jacobian.rsolve + + @property + def shape(self): + return self.jacobian.shape + + @property + def dtype(self): + return self.jacobian.dtype + + +def asjacobian(J): + """ + Convert given object to one suitable for use as a Jacobian. + """ + spsolve = scipy.sparse.linalg.spsolve + if isinstance(J, Jacobian): + return J + elif inspect.isclass(J) and issubclass(J, Jacobian): + return J() + elif isinstance(J, np.ndarray): + if J.ndim > 2: + raise ValueError('array must have rank <= 2') + J = np.atleast_2d(np.asarray(J)) + if J.shape[0] != J.shape[1]: + raise ValueError('array must be square') + + return Jacobian(matvec=lambda v: dot(J, v), + rmatvec=lambda v: dot(J.conj().T, v), + solve=lambda v, tol=0: solve(J, v), + rsolve=lambda v, tol=0: solve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif scipy.sparse.issparse(J): + if J.shape[0] != J.shape[1]: + raise ValueError('matrix must be square') + return Jacobian(matvec=lambda v: J @ v, + rmatvec=lambda v: J.conj().T @ v, + solve=lambda v, tol=0: spsolve(J, v), + rsolve=lambda v, tol=0: spsolve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): + return Jacobian(matvec=getattr(J, 'matvec'), + rmatvec=getattr(J, 'rmatvec'), + solve=J.solve, + rsolve=getattr(J, 'rsolve'), + update=getattr(J, 'update'), + setup=getattr(J, 'setup'), + dtype=J.dtype, + shape=J.shape) + elif callable(J): + # Assume it's a function J(x) that returns the Jacobian + class Jac(Jacobian): + def update(self, x, F): + self.x = x + + def solve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m, v) + elif scipy.sparse.issparse(m): + return spsolve(m, v) + else: + raise ValueError("Unknown matrix type") + + def matvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m, v) + elif scipy.sparse.issparse(m): + return m @ v + else: + raise ValueError("Unknown matrix type") + + def rsolve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m.conj().T, v) + elif scipy.sparse.issparse(m): + return spsolve(m.conj().T, v) + else: + raise ValueError("Unknown matrix type") + + def rmatvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m.conj().T, v) + elif scipy.sparse.issparse(m): + return m.conj().T @ v + else: + raise ValueError("Unknown matrix type") + return Jac() + elif isinstance(J, str): + return dict(broyden1=BroydenFirst, + broyden2=BroydenSecond, + anderson=Anderson, + diagbroyden=DiagBroyden, + linearmixing=LinearMixing, + excitingmixing=ExcitingMixing, + krylov=KrylovJacobian)[J]() + else: + raise TypeError('Cannot convert object to a Jacobian') + + +#------------------------------------------------------------------------------ +# Broyden +#------------------------------------------------------------------------------ + +class GenericBroyden(Jacobian): + def setup(self, x0, f0, func): + Jacobian.setup(self, x0, f0, func) + self.last_f = f0 + self.last_x = x0 + + if hasattr(self, 'alpha') and self.alpha is None: + # Autoscale the initial Jacobian parameter + # unless we have already guessed the solution. + normf0 = norm(f0) + if normf0: + self.alpha = 0.5*max(norm(x0), 1) / normf0 + else: + self.alpha = 1.0 + + def _update(self, x, f, dx, df, dx_norm, df_norm): + raise NotImplementedError + + def update(self, x, f): + df = f - self.last_f + dx = x - self.last_x + self._update(x, f, dx, df, norm(dx), norm(df)) + self.last_f = f + self.last_x = x + + +class LowRankMatrix: + r""" + A matrix represented as + + .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger + + However, if the rank of the matrix reaches the dimension of the vectors, + full matrix representation will be used thereon. + + """ + + def __init__(self, alpha, n, dtype): + self.alpha = alpha + self.cs = [] + self.ds = [] + self.n = n + self.dtype = dtype + self.collapsed = None + + @staticmethod + def _matvec(v, alpha, cs, ds): + axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], + cs[:1] + [v]) + w = alpha * v + for c, d in zip(cs, ds): + a = dotc(d, v) + w = axpy(c, w, w.size, a) + return w + + @staticmethod + def _solve(v, alpha, cs, ds): + """Evaluate w = M^-1 v""" + if len(cs) == 0: + return v/alpha + + # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 + + axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) + + c0 = cs[0] + A = alpha * np.identity(len(cs), dtype=c0.dtype) + for i, d in enumerate(ds): + for j, c in enumerate(cs): + A[i,j] += dotc(d, c) + + q = np.zeros(len(cs), dtype=c0.dtype) + for j, d in enumerate(ds): + q[j] = dotc(d, v) + q /= alpha + q = solve(A, q) + + w = v/alpha + for c, qc in zip(cs, q): + w = axpy(c, w, w.size, -qc) + + return w + + def matvec(self, v): + """Evaluate w = M v""" + if self.collapsed is not None: + return np.dot(self.collapsed, v) + return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) + + def rmatvec(self, v): + """Evaluate w = M^H v""" + if self.collapsed is not None: + return np.dot(self.collapsed.T.conj(), v) + return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) + + def solve(self, v, tol=0): + """Evaluate w = M^-1 v""" + if self.collapsed is not None: + return solve(self.collapsed, v) + return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) + + def rsolve(self, v, tol=0): + """Evaluate w = M^-H v""" + if self.collapsed is not None: + return solve(self.collapsed.T.conj(), v) + return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) + + def append(self, c, d): + if self.collapsed is not None: + self.collapsed += c[:,None] * d[None,:].conj() + return + + self.cs.append(c) + self.ds.append(d) + + if len(self.cs) > c.size: + self.collapse() + + def __array__(self, dtype=None, copy=None): + if dtype is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `dtype` " + f"should only be None but was {dtype} (not handled)", + stacklevel=3) + if copy is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `copy` " + f"should only be None but was {copy} (not handled)", + stacklevel=3) + if self.collapsed is not None: + return self.collapsed + + Gm = self.alpha*np.identity(self.n, dtype=self.dtype) + for c, d in zip(self.cs, self.ds): + Gm += c[:,None]*d[None,:].conj() + return Gm + + def collapse(self): + """Collapse the low-rank matrix to a full-rank one.""" + self.collapsed = np.array(self, copy=copy_if_needed) + self.cs = None + self.ds = None + self.alpha = None + + def restart_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping all vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + if len(self.cs) > rank: + del self.cs[:] + del self.ds[:] + + def simple_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping oldest vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + while len(self.cs) > rank: + del self.cs[0] + del self.ds[0] + + def svd_reduce(self, max_rank, to_retain=None): + """ + Reduce the rank of the matrix by retaining some SVD components. + + This corresponds to the \"Broyden Rank Reduction Inverse\" + algorithm described in [1]_. + + Note that the SVD decomposition can be done by solving only a + problem whose size is the effective rank of this matrix, which + is viable even for large problems. + + Parameters + ---------- + max_rank : int + Maximum rank of this matrix after reduction. + to_retain : int, optional + Number of SVD components to retain when reduction is done + (ie. rank > max_rank). Default is ``max_rank - 2``. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + if self.collapsed is not None: + return + + p = max_rank + if to_retain is not None: + q = to_retain + else: + q = p - 2 + + if self.cs: + p = min(p, len(self.cs[0])) + q = max(0, min(q, p-1)) + + m = len(self.cs) + if m < p: + # nothing to do + return + + C = np.array(self.cs).T + D = np.array(self.ds).T + + D, R = qr(D, mode='economic') + C = dot(C, R.T.conj()) + + U, S, WH = svd(C, full_matrices=False) + + C = dot(C, inv(WH)) + D = dot(D, WH.T.conj()) + + for k in range(q): + self.cs[k] = C[:,k].copy() + self.ds[k] = D[:,k].copy() + + del self.cs[q:] + del self.ds[q:] + + +_doc_parts['broyden_params'] = """ + alpha : float, optional + Initial guess for the Jacobian is ``(-1/alpha)``. + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden matrix + stays low. Can either be a string giving the name of the method, + or a tuple of the form ``(method, param1, param2, ...)`` + that gives the name of the method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """.strip() + + +class BroydenFirst(GenericBroyden): + """ + Find a root of a function, using Broyden's first Jacobian approximation. + + This method is also known as "Broyden's good method". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden1'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) dx^\\dagger H / ( dx^\\dagger H df) + + which corresponds to Broyden's first Jacobian update + + .. math:: J_+ = J + (df - J dx) dx^\\dagger / dx^\\dagger dx + + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + "A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + https://math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden1(fun, [0, 0]) + >>> sol + array([0.84116396, 0.15883641]) + + """ + + def __init__(self, alpha=None, reduction_method='restart', max_rank=None): + GenericBroyden.__init__(self) + self.alpha = alpha + self.Gm = None + + if max_rank is None: + max_rank = np.inf + self.max_rank = max_rank + + if isinstance(reduction_method, str): + reduce_params = () + else: + reduce_params = reduction_method[1:] + reduction_method = reduction_method[0] + reduce_params = (max_rank - 1,) + reduce_params + + if reduction_method == 'svd': + self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) + elif reduction_method == 'simple': + self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) + elif reduction_method == 'restart': + self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) + else: + raise ValueError(f"Unknown rank reduction method '{reduction_method}'") + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) + + def todense(self): + return inv(self.Gm) + + def solve(self, f, tol=0): + r = self.Gm.matvec(f) + if not np.isfinite(r).all(): + # singular; reset the Jacobian approximation + self.setup(self.last_x, self.last_f, self.func) + return self.Gm.matvec(f) + return r + + def matvec(self, f): + return self.Gm.solve(f) + + def rsolve(self, f, tol=0): + return self.Gm.rmatvec(f) + + def rmatvec(self, f): + return self.Gm.rsolve(f) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = self.Gm.rmatvec(dx) + c = dx - self.Gm.matvec(df) + d = v / vdot(df, v) + + self.Gm.append(c, d) + + +class BroydenSecond(BroydenFirst): + """ + Find a root of a function, using Broyden\'s second Jacobian approximation. + + This method is also known as \"Broyden's bad method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden2'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) + + corresponding to Broyden's second method. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden2(fun, [0, 0]) + >>> sol + array([0.84116365, 0.15883529]) + + """ + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = df + c = dx - self.Gm.matvec(df) + d = v / df_norm**2 + self.Gm.append(c, d) + + +#------------------------------------------------------------------------------ +# Broyden-like (restricted memory) +#------------------------------------------------------------------------------ + +class Anderson(GenericBroyden): + """ + Find a root of a function, using (extended) Anderson mixing. + + The Jacobian is formed by for a 'best' solution in the space + spanned by last `M` vectors. As a result, only a MxM matrix + inversions and MxN multiplications are required. [Ey]_ + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='anderson'`` in particular. + + References + ---------- + .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.anderson(fun, [0, 0]) + >>> sol + array([0.84116588, 0.15883789]) + + """ + + # Note: + # + # Anderson method maintains a rank M approximation of the inverse Jacobian, + # + # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v + # A = W + dF^H dF + # W = w0^2 diag(dF^H dF) + # + # so that for w0 = 0 the secant condition applies for last M iterates, i.e., + # + # J^-1 df_j = dx_j + # + # for all j = 0 ... M-1. + # + # Moreover, (from Sherman-Morrison-Woodbury formula) + # + # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v + # C = (dX + alpha dF) A^-1 + # b = -1/alpha + # + # and after simplification + # + # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v + # + + def __init__(self, alpha=None, w0=0.01, M=5): + GenericBroyden.__init__(self) + self.alpha = alpha + self.M = M + self.dx = [] + self.df = [] + self.gamma = None + self.w0 = w0 + + def solve(self, f, tol=0): + dx = -self.alpha*f + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + try: + gamma = solve(self.a, df_f) + except LinAlgError: + # singular; reset the Jacobian approximation + del self.dx[:] + del self.df[:] + return dx + + for m in range(n): + dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) + return dx + + def matvec(self, f): + dx = -f/self.alpha + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + b = np.empty((n, n), dtype=f.dtype) + for i in range(n): + for j in range(n): + b[i,j] = vdot(self.df[i], self.dx[j]) + if i == j and self.w0 != 0: + b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha + gamma = solve(b, df_f) + + for m in range(n): + dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) + return dx + + def _update(self, x, f, dx, df, dx_norm, df_norm): + if self.M == 0: + return + + self.dx.append(dx) + self.df.append(df) + + while len(self.dx) > self.M: + self.dx.pop(0) + self.df.pop(0) + + n = len(self.dx) + a = np.zeros((n, n), dtype=f.dtype) + + for i in range(n): + for j in range(i, n): + if i == j: + wd = self.w0**2 + else: + wd = 0 + a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) + + a += np.triu(a, 1).T.conj() + self.a = a + +#------------------------------------------------------------------------------ +# Simple iterations +#------------------------------------------------------------------------------ + + +class DiagBroyden(GenericBroyden): + """ + Find a root of a function, using diagonal Broyden Jacobian approximation. + + The Jacobian approximation is derived from previous iterations, by + retaining only the diagonal of Broyden matrices. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='diagbroyden'`` in particular. + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.diagbroyden(fun, [0, 0]) + >>> sol + array([0.84116403, 0.15883384]) + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f / self.d + + def matvec(self, f): + return -f * self.d + + def rsolve(self, f, tol=0): + return -f / self.d.conj() + + def rmatvec(self, f): + return -f * self.d.conj() + + def todense(self): + return np.diag(-self.d) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self.d -= (df + self.d*dx)*dx/dx_norm**2 + + +class LinearMixing(GenericBroyden): + """ + Find a root of a function, using a scalar Jacobian approximation. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + The Jacobian approximation is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='linearmixing'`` in particular. + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def solve(self, f, tol=0): + return -f*self.alpha + + def matvec(self, f): + return -f/self.alpha + + def rsolve(self, f, tol=0): + return -f*np.conj(self.alpha) + + def rmatvec(self, f): + return -f/np.conj(self.alpha) + + def todense(self): + return np.diag(np.full(self.shape[0], -1/self.alpha)) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + pass + + +class ExcitingMixing(GenericBroyden): + """ + Find a root of a function, using a tuned diagonal Jacobian approximation. + + The Jacobian matrix is diagonal and is tuned on each iteration. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='excitingmixing'`` in particular. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + %(params_extra)s + """ + + def __init__(self, alpha=None, alphamax=1.0): + GenericBroyden.__init__(self) + self.alpha = alpha + self.alphamax = alphamax + self.beta = None + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f*self.beta + + def matvec(self, f): + return -f/self.beta + + def rsolve(self, f, tol=0): + return -f*self.beta.conj() + + def rmatvec(self, f): + return -f/self.beta.conj() + + def todense(self): + return np.diag(-1/self.beta) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + incr = f*self.last_f > 0 + self.beta[incr] += self.alpha + self.beta[~incr] = self.alpha + np.clip(self.beta, 0, self.alphamax, out=self.beta) + + +#------------------------------------------------------------------------------ +# Iterative/Krylov approximated Jacobians +#------------------------------------------------------------------------------ + +class KrylovJacobian(Jacobian): + """ + Find a root of a function, using Krylov approximation for inverse Jacobian. + + This method is suitable for solving large-scale problems. + + Parameters + ---------- + %(params_basic)s + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_maxiter : int, optional + Parameter to pass to the "inner" Krylov solver: maximum number of + iterations. Iteration will stop after maxiter steps even if the + specified tolerance has not been achieved. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> from scipy.optimize import BroydenFirst, KrylovJacobian + >>> from scipy.optimize import InverseJacobian + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) + + If the preconditioner has a method named 'update', it will be called + as ``update(x, f)`` after each nonlinear step, with ``x`` giving + the current point, and ``f`` the current function value. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear iterations. + See `scipy.sparse.linalg.lgmres` for details. + inner_kwargs : kwargs + Keyword parameters for the "inner" Krylov solver + (defined with `method`). Parameter names must start with + the `inner_` prefix which will be stripped before passing on + the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='krylov'`` in particular. + scipy.sparse.linalg.gmres + scipy.sparse.linalg.lgmres + + Notes + ----- + This function implements a Newton-Krylov solver. The basic idea is + to compute the inverse of the Jacobian with an iterative Krylov + method. These methods require only evaluating the Jacobian-vector + products, which are conveniently approximated by a finite difference: + + .. math:: J v \\approx (f(x + \\omega*v/|v|) - f(x)) / \\omega + + Due to the use of iterative matrix inverses, these methods can + deal with large nonlinear problems. + + SciPy's `scipy.sparse.linalg` module offers a selection of Krylov + solvers to choose from. The default here is `lgmres`, which is a + variant of restarted GMRES iteration that reuses some of the + information obtained in the previous Newton steps to invert + Jacobians in subsequent steps. + + For a review on Newton-Krylov methods, see for example [1]_, + and for the LGMRES sparse inverse method, see [2]_. + + References + ---------- + .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method, + SIAM, pp.57-83, 2003. + :doi:`10.1137/1.9780898718898.ch3` + .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). + :doi:`10.1016/j.jcp.2003.08.010` + .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel, + SIAM J. Matrix Anal. Appl. 26, 962 (2005). + :doi:`10.1137/S0895479803422014` + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * x[1] - 1.0, + ... 0.5 * (x[1] - x[0]) ** 2] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.newton_krylov(fun, [0, 0]) + >>> sol + array([0.66731771, 0.66536458]) + + """ + + def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, + inner_M=None, outer_k=10, **kw): + self.preconditioner = inner_M + self.rdiff = rdiff + # Note that this retrieves one of the named functions, or otherwise + # uses `method` as is (i.e., for a user-provided callable). + self.method = dict( + bicgstab=scipy.sparse.linalg.bicgstab, + gmres=scipy.sparse.linalg.gmres, + lgmres=scipy.sparse.linalg.lgmres, + cgs=scipy.sparse.linalg.cgs, + minres=scipy.sparse.linalg.minres, + tfqmr=scipy.sparse.linalg.tfqmr, + ).get(method, method) + + self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) + + if self.method is scipy.sparse.linalg.gmres: + # Replace GMRES's outer iteration with Newton steps + self.method_kw['restart'] = inner_maxiter + self.method_kw['maxiter'] = 1 + self.method_kw.setdefault('atol', 0) + elif self.method in (scipy.sparse.linalg.gcrotmk, + scipy.sparse.linalg.bicgstab, + scipy.sparse.linalg.cgs): + self.method_kw.setdefault('atol', 0) + elif self.method is scipy.sparse.linalg.lgmres: + self.method_kw['outer_k'] = outer_k + # Replace LGMRES's outer iteration with Newton steps + self.method_kw['maxiter'] = 1 + # Carry LGMRES's `outer_v` vectors across nonlinear iterations + self.method_kw.setdefault('outer_v', []) + self.method_kw.setdefault('prepend_outer_v', True) + # But don't carry the corresponding Jacobian*v products, in case + # the Jacobian changes a lot in the nonlinear step + # + # XXX: some trust-region inspired ideas might be more efficient... + # See e.g., Brown & Saad. But needs to be implemented separately + # since it's not an inexact Newton method. + self.method_kw.setdefault('store_outer_Av', False) + self.method_kw.setdefault('atol', 0) + + for key, value in kw.items(): + if not key.startswith('inner_'): + raise ValueError(f"Unknown parameter {key}") + self.method_kw[key[6:]] = value + + def _update_diff_step(self): + mx = abs(self.x0).max() + mf = abs(self.f0).max() + self.omega = self.rdiff * max(1, mx) / max(1, mf) + + def matvec(self, v): + nv = norm(v) + if nv == 0: + return 0*v + sc = self.omega / nv + r = (self.func(self.x0 + sc*v) - self.f0) / sc + if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): + raise ValueError('Function returned non-finite results') + return r + + def solve(self, rhs, tol=0): + if 'rtol' in self.method_kw: + sol, info = self.method(self.op, rhs, **self.method_kw) + else: + sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw) + return sol + + def update(self, x, f): + self.x0 = x + self.f0 = f + self._update_diff_step() + + # Update also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'update'): + self.preconditioner.update(x, f) + + def setup(self, x, f, func): + Jacobian.setup(self, x, f, func) + self.x0 = x + self.f0 = f + self.op = scipy.sparse.linalg.aslinearoperator(self) + + if self.rdiff is None: + self.rdiff = np.finfo(x.dtype).eps ** (1./2) + + self._update_diff_step() + + # Setup also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'setup'): + self.preconditioner.setup(x, f, func) + + +#------------------------------------------------------------------------------ +# Wrapper functions +#------------------------------------------------------------------------------ + +def _nonlin_wrapper(name, jac): + """ + Construct a solver wrapper with given name and Jacobian approx. + + It inspects the keyword arguments of ``jac.__init__``, and allows to + use the same arguments in the wrapper function, in addition to the + keyword arguments of `nonlin_solve` + + """ + signature = _getfullargspec(jac.__init__) + args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature + kwargs = list(zip(args[-len(defaults):], defaults)) + kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs]) + if kw_str: + kw_str = ", " + kw_str + kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs]) + if kwkw_str: + kwkw_str = kwkw_str + ", " + if kwonlyargs: + raise ValueError(f'Unexpected signature {signature}') + + # Construct the wrapper function so that its keyword arguments + # are visible in pydoc.help etc. + wrapper = """ +def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, + f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, **kw): + jac = %(jac)s(%(kwkw)s **kw) + return nonlin_solve(F, xin, jac, iter, verbose, maxiter, + f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, + callback) +""" + + wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, + kwkw=kwkw_str) + ns = {} + ns.update(globals()) + exec(wrapper, ns) + func = ns[name] + func.__doc__ = jac.__doc__ + _set_doc(func) + return func + + +broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) +broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) +anderson = _nonlin_wrapper('anderson', Anderson) +linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) +diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) +excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) +newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_numdiff.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..6f847a8ebdaec7df7428ca1267a024fb9212d824 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_numdiff.py @@ -0,0 +1,785 @@ +"""Routines for numerical differentiation.""" +import functools +import numpy as np +from numpy.linalg import norm + +from scipy.sparse.linalg import LinearOperator +from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find +from ._group_columns import group_dense, group_sparse +from scipy._lib._array_api import array_namespace +from scipy._lib import array_api_extra as xpx + + +def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): + """Adjust final difference scheme to the presence of bounds. + + Parameters + ---------- + x0 : ndarray, shape (n,) + Point at which we wish to estimate derivative. + h : ndarray, shape (n,) + Desired absolute finite difference steps. + num_steps : int + Number of `h` steps in one direction required to implement finite + difference scheme. For example, 2 means that we need to evaluate + f(x0 + 2 * h) or f(x0 - 2 * h) + scheme : {'1-sided', '2-sided'} + Whether steps in one or both directions are required. In other + words '1-sided' applies to forward and backward schemes, '2-sided' + applies to center schemes. + lb : ndarray, shape (n,) + Lower bounds on independent variables. + ub : ndarray, shape (n,) + Upper bounds on independent variables. + + Returns + ------- + h_adjusted : ndarray, shape (n,) + Adjusted absolute step sizes. Step size decreases only if a sign flip + or switching to one-sided scheme doesn't allow to take a full step. + use_one_sided : ndarray of bool, shape (n,) + Whether to switch to one-sided scheme. Informative only for + ``scheme='2-sided'``. + """ + if scheme == '1-sided': + use_one_sided = np.ones_like(h, dtype=bool) + elif scheme == '2-sided': + h = np.abs(h) + use_one_sided = np.zeros_like(h, dtype=bool) + else: + raise ValueError("`scheme` must be '1-sided' or '2-sided'.") + + if np.all((lb == -np.inf) & (ub == np.inf)): + return h, use_one_sided + + h_total = h * num_steps + h_adjusted = h.copy() + + lower_dist = x0 - lb + upper_dist = ub - x0 + + if scheme == '1-sided': + x = x0 + h_total + violated = (x < lb) | (x > ub) + fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) + h_adjusted[violated & fitting] *= -1 + + forward = (upper_dist >= lower_dist) & ~fitting + h_adjusted[forward] = upper_dist[forward] / num_steps + backward = (upper_dist < lower_dist) & ~fitting + h_adjusted[backward] = -lower_dist[backward] / num_steps + elif scheme == '2-sided': + central = (lower_dist >= h_total) & (upper_dist >= h_total) + + forward = (upper_dist >= lower_dist) & ~central + h_adjusted[forward] = np.minimum( + h[forward], 0.5 * upper_dist[forward] / num_steps) + use_one_sided[forward] = True + + backward = (upper_dist < lower_dist) & ~central + h_adjusted[backward] = -np.minimum( + h[backward], 0.5 * lower_dist[backward] / num_steps) + use_one_sided[backward] = True + + min_dist = np.minimum(upper_dist, lower_dist) / num_steps + adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) + h_adjusted[adjusted_central] = min_dist[adjusted_central] + use_one_sided[adjusted_central] = False + + return h_adjusted, use_one_sided + + +@functools.lru_cache +def _eps_for_method(x0_dtype, f0_dtype, method): + """ + Calculates relative EPS step to use for a given data type + and numdiff step method. + + Progressively smaller steps are used for larger floating point types. + + Parameters + ---------- + f0_dtype: np.dtype + dtype of function evaluation + + x0_dtype: np.dtype + dtype of parameter vector + + method: {'2-point', '3-point', 'cs'} + + Returns + ------- + EPS: float + relative step size. May be np.float16, np.float32, np.float64 + + Notes + ----- + The default relative step will be np.float64. However, if x0 or f0 are + smaller floating point types (np.float16, np.float32), then the smallest + floating point type is chosen. + """ + # the default EPS value + EPS = np.finfo(np.float64).eps + + x0_is_fp = False + if np.issubdtype(x0_dtype, np.inexact): + # if you're a floating point type then over-ride the default EPS + EPS = np.finfo(x0_dtype).eps + x0_itemsize = np.dtype(x0_dtype).itemsize + x0_is_fp = True + + if np.issubdtype(f0_dtype, np.inexact): + f0_itemsize = np.dtype(f0_dtype).itemsize + # choose the smallest itemsize between x0 and f0 + if x0_is_fp and f0_itemsize < x0_itemsize: + EPS = np.finfo(f0_dtype).eps + + if method in ["2-point", "cs"]: + return EPS**0.5 + elif method in ["3-point"]: + return EPS**(1/3) + else: + raise RuntimeError("Unknown step method, should be one of " + "{'2-point', '3-point', 'cs'}") + + +def _compute_absolute_step(rel_step, x0, f0, method): + """ + Computes an absolute step from a relative step for finite difference + calculation. + + Parameters + ---------- + rel_step: None or array-like + Relative step for the finite difference calculation + x0 : np.ndarray + Parameter vector + f0 : np.ndarray or scalar + method : {'2-point', '3-point', 'cs'} + + Returns + ------- + h : float + The absolute step size + + Notes + ----- + `h` will always be np.float64. However, if `x0` or `f0` are + smaller floating point dtypes (e.g. np.float32), then the absolute + step size will be calculated from the smallest floating point size. + """ + # this is used instead of np.sign(x0) because we need + # sign_x0 to be 1 when x0 == 0. + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + + rstep = _eps_for_method(x0.dtype, f0.dtype, method) + + if rel_step is None: + abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0)) + else: + # User has requested specific relative steps. + # Don't multiply by max(1, abs(x0) because if x0 < 1 then their + # requested step is not used. + abs_step = rel_step * sign_x0 * np.abs(x0) + + # however we don't want an abs_step of 0, which can happen if + # rel_step is 0, or x0 is 0. Instead, substitute a realistic step + dx = ((x0 + abs_step) - x0) + abs_step = np.where(dx == 0, + rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), + abs_step) + + return abs_step + + +def _prepare_bounds(bounds, x0): + """ + Prepares new-style bounds from a two-tuple specifying the lower and upper + limits for values in x0. If a value is not bound then the lower/upper bound + will be expected to be -np.inf/np.inf. + + Examples + -------- + >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5]) + (array([0., 1., 2.]), array([ 1., 2., inf])) + """ + lb, ub = (np.asarray(b, dtype=float) for b in bounds) + if lb.ndim == 0: + lb = np.resize(lb, x0.shape) + + if ub.ndim == 0: + ub = np.resize(ub, x0.shape) + + return lb, ub + + +def group_columns(A, order=0): + """Group columns of a 2-D matrix for sparse finite differencing [1]_. + + Two columns are in the same group if in each row at least one of them + has zero. A greedy sequential algorithm is used to construct groups. + + Parameters + ---------- + A : array_like or sparse matrix, shape (m, n) + Matrix of which to group columns. + order : int, iterable of int with shape (n,) or None + Permutation array which defines the order of columns enumeration. + If int or None, a random permutation is used with `order` used as + a random seed. Default is 0, that is use a random permutation but + guarantee repeatability. + + Returns + ------- + groups : ndarray of int, shape (n,) + Contains values from 0 to n_groups-1, where n_groups is the number + of found groups. Each value ``groups[i]`` is an index of a group to + which ith column assigned. The procedure was helpful only if + n_groups is significantly less than n. + + References + ---------- + .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + """ + if issparse(A): + A = csc_matrix(A) + else: + A = np.atleast_2d(A) + A = (A != 0).astype(np.int32) + + if A.ndim != 2: + raise ValueError("`A` must be 2-dimensional.") + + m, n = A.shape + + if order is None or np.isscalar(order): + rng = np.random.RandomState(order) + order = rng.permutation(n) + else: + order = np.asarray(order) + if order.shape != (n,): + raise ValueError("`order` has incorrect shape.") + + A = A[:, order] + + if issparse(A): + groups = group_sparse(m, n, A.indices, A.indptr) + else: + groups = group_dense(m, n, A) + + groups[order] = groups.copy() + + return groups + + +def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None, + f0=None, bounds=(-np.inf, np.inf), sparsity=None, + as_linear_operator=False, args=(), kwargs=None): + """Compute finite difference approximation of the derivatives of a + vector-valued function. + + If a function maps from R^n to R^m, its derivatives form m-by-n matrix + called the Jacobian, where an element (i, j) is a partial derivative of + f[i] with respect to x[j]. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to a 1-D array. + method : {'3-point', '2-point', 'cs'}, optional + Finite difference method to use: + - '2-point' - use the first order accuracy forward or backward + difference. + - '3-point' - use central difference in interior points and the + second order accuracy forward or backward difference + near the boundary. + - 'cs' - use a complex-step finite difference scheme. This assumes + that the user function is real-valued and can be + analytically continued to the complex plane. Otherwise, + produces bogus results. + rel_step : None or array_like, optional + Relative step size to use. If None (default) the absolute step size is + computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with + `rel_step` being selected automatically, see Notes. Otherwise + ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the + sign of `h` is ignored. The calculated step size is possibly adjusted + to fit into the bounds. + abs_step : array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `abs_step` is ignored. By default + relative steps are used, only if ``abs_step is not None`` are absolute + steps used. + f0 : None or array_like, optional + If not None it is assumed to be equal to ``fun(x0)``, in this case + the ``fun(x0)`` is not called. Default is None. + bounds : tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. Bounds checking is not implemented + when `as_linear_operator` is True. + sparsity : {None, array_like, sparse matrix, 2-tuple}, optional + Defines a sparsity structure of the Jacobian matrix. If the Jacobian + matrix is known to have only few non-zero elements in each row, then + it's possible to estimate its several columns by a single function + evaluation [3]_. To perform such economic computations two ingredients + are required: + + * structure : array_like or sparse matrix of shape (m, n). A zero + element means that a corresponding element of the Jacobian + identically equals to zero. + * groups : array_like of shape (n,). A column grouping for a given + sparsity structure, use `group_columns` to obtain it. + + A single array or a sparse matrix is interpreted as a sparsity + structure, and groups are computed inside the function. A tuple is + interpreted as (structure, groups). If None (default), a standard + dense differencing will be used. + + Note, that sparse differencing makes sense only for large Jacobian + matrices where each row contains few non-zero elements. + as_linear_operator : bool, optional + When True the function returns an `scipy.sparse.linalg.LinearOperator`. + Otherwise it returns a dense array or a sparse matrix depending on + `sparsity`. The linear operator provides an efficient way of computing + ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow + direct access to individual elements of the matrix. By default + `as_linear_operator` is False. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)``. + + Returns + ------- + J : {ndarray, sparse matrix, LinearOperator} + Finite difference approximation of the Jacobian matrix. + If `as_linear_operator` is True returns a LinearOperator + with shape (m, n). Otherwise it returns a dense array or sparse + matrix depending on how `sparsity` is defined. If `sparsity` + is None then a ndarray with shape (m, n) is returned. If + `sparsity` is not None returns a csr_matrix with shape (m, n). + For sparse matrices and linear operators it is always returned as + a 2-D structure, for ndarrays, if m=1 it is returned + as a 1-D gradient array with shape (n,). + + See Also + -------- + check_derivative : Check correctness of a function computing derivatives. + + Notes + ----- + If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is + determined from the smallest floating point dtype of `x0` or `fun(x0)`, + ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and + s=3 for '3-point' method. Such relative step approximately minimizes a sum + of truncation and round-off errors, see [1]_. Relative steps are used by + default. However, absolute steps are used when ``abs_step is not None``. + If any of the absolute or relative steps produces an indistinguishable + difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a + automatic step size is substituted for that particular entry. + + A finite difference scheme for '3-point' method is selected automatically. + The well-known central difference scheme is used for points sufficiently + far from the boundary, and 3-point forward or backward scheme is used for + points near the boundary. Both schemes have the second-order accuracy in + terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point + forward and backward difference schemes. + + For dense differencing when m=1 Jacobian is returned with a shape (n,), + on the other hand when n=1 Jacobian is returned with a shape (m, 1). + Our motivation is the following: a) It handles a case of gradient + computation (m=1) in a conventional way. b) It clearly separates these two + different cases. b) In all cases np.atleast_2d can be called to get 2-D + Jacobian with correct dimensions. + + References + ---------- + .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", sec. 5.7. + + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + + .. [3] B. Fornberg, "Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import approx_derivative + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> approx_derivative(f, x0, args=(1, 2)) + array([[ 1., 0.], + [-1., 0.]]) + + Bounds can be used to limit the region of function evaluation. + In the example below we compute left and right derivative at point 1.0. + + >>> def g(x): + ... return x**2 if x >= 1 else x + ... + >>> x0 = 1.0 + >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) + array([ 1.]) + >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) + array([ 2.]) + """ + if method not in ['2-point', '3-point', 'cs']: + raise ValueError(f"Unknown method '{method}'. ") + + xp = array_namespace(x0) + _x = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + x0 = xp.astype(_x, _dtype) + + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + lb, ub = _prepare_bounds(bounds, x0) + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if as_linear_operator and not (np.all(np.isinf(lb)) + and np.all(np.isinf(ub))): + raise ValueError("Bounds not supported when " + "`as_linear_operator` is True.") + + if kwargs is None: + kwargs = {} + + def fun_wrapped(x): + # send user function same fp type as x0. (but only if cs is not being + # used + if xp.isdtype(x.dtype, "real floating"): + x = xp.astype(x, x0.dtype) + + f = np.atleast_1d(fun(x, *args, **kwargs)) + if f.ndim > 1: + raise RuntimeError("`fun` return value has " + "more than 1 dimension.") + return f + + if f0 is None: + f0 = fun_wrapped(x0) + else: + f0 = np.atleast_1d(f0) + if f0.ndim > 1: + raise ValueError("`f0` passed has more than 1 dimension.") + + if np.any((x0 < lb) | (x0 > ub)): + raise ValueError("`x0` violates bound constraints.") + + if as_linear_operator: + if rel_step is None: + rel_step = _eps_for_method(x0.dtype, f0.dtype, method) + + return _linear_operator_difference(fun_wrapped, x0, + f0, rel_step, method) + else: + # by default we use rel_step + if abs_step is None: + h = _compute_absolute_step(rel_step, x0, f0, method) + else: + # user specifies an absolute step + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + h = abs_step + + # cannot have a zero step. This might happen if x0 is very large + # or small. In which case fall back to relative step. + dx = ((x0 + h) - x0) + h = np.where(dx == 0, + _eps_for_method(x0.dtype, f0.dtype, method) * + sign_x0 * np.maximum(1.0, np.abs(x0)), + h) + + if method == '2-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', lb, ub) + elif method == '3-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + elif method == 'cs': + use_one_sided = False + + if sparsity is None: + return _dense_difference(fun_wrapped, x0, f0, h, + use_one_sided, method) + else: + if not issparse(sparsity) and len(sparsity) == 2: + structure, groups = sparsity + else: + structure = sparsity + groups = group_columns(sparsity) + + if issparse(structure): + structure = csc_matrix(structure) + else: + structure = np.atleast_2d(structure) + + groups = np.atleast_1d(groups) + return _sparse_difference(fun_wrapped, x0, f0, h, + use_one_sided, structure, + groups, method) + + +def _linear_operator_difference(fun, x0, f0, h, method): + m = f0.size + n = x0.size + + if method == '2-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p + df = fun(x) - f0 + return df / dx + + elif method == '3-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = 2*h / norm(p) + x1 = x0 - (dx/2)*p + x2 = x0 + (dx/2)*p + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + return df / dx + + elif method == 'cs': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p*1.j + f1 = fun(x) + df = f1.imag + return df / dx + + else: + raise RuntimeError("Never be here.") + + return LinearOperator((m, n), matvec) + + +def _dense_difference(fun, x0, f0, h, use_one_sided, method): + m = f0.size + n = x0.size + J_transposed = np.empty((n, m)) + x1 = x0.copy() + x2 = x0.copy() + xc = x0.astype(complex, copy=True) + + for i in range(h.size): + if method == '2-point': + x1[i] += h[i] + dx = x1[i] - x0[i] # Recompute dx as exactly representable number. + df = fun(x1) - f0 + elif method == '3-point' and use_one_sided[i]: + x1[i] += h[i] + x2[i] += 2 * h[i] + dx = x2[i] - x0[i] + f1 = fun(x1) + f2 = fun(x2) + df = -3.0 * f0 + 4 * f1 - f2 + elif method == '3-point' and not use_one_sided[i]: + x1[i] -= h[i] + x2[i] += h[i] + dx = x2[i] - x1[i] + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + elif method == 'cs': + xc[i] += h[i] * 1.j + f1 = fun(xc) + df = f1.imag + dx = h[i] + else: + raise RuntimeError("Never be here.") + + J_transposed[i] = df / dx + x1[i] = x2[i] = xc[i] = x0[i] + + if m == 1: + J_transposed = np.ravel(J_transposed) + + return J_transposed.T + + +def _sparse_difference(fun, x0, f0, h, use_one_sided, + structure, groups, method): + m = f0.size + n = x0.size + row_indices = [] + col_indices = [] + fractions = [] + + n_groups = np.max(groups) + 1 + for group in range(n_groups): + # Perturb variables which are in the same group simultaneously. + e = np.equal(group, groups) + h_vec = h * e + if method == '2-point': + x = x0 + h_vec + dx = x - x0 + df = fun(x) - f0 + # The result is written to columns which correspond to perturbed + # variables. + cols, = np.nonzero(e) + # Find all non-zero elements in selected columns of Jacobian. + i, j, _ = find(structure[:, cols]) + # Restore column indices in the full array. + j = cols[j] + elif method == '3-point': + # Here we do conceptually the same but separate one-sided + # and two-sided schemes. + x1 = x0.copy() + x2 = x0.copy() + + mask_1 = use_one_sided & e + x1[mask_1] += h_vec[mask_1] + x2[mask_1] += 2 * h_vec[mask_1] + + mask_2 = ~use_one_sided & e + x1[mask_2] -= h_vec[mask_2] + x2[mask_2] += h_vec[mask_2] + + dx = np.zeros(n) + dx[mask_1] = x2[mask_1] - x0[mask_1] + dx[mask_2] = x2[mask_2] - x1[mask_2] + + f1 = fun(x1) + f2 = fun(x2) + + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + + mask = use_one_sided[j] + df = np.empty(m) + + rows = i[mask] + df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] + + rows = i[~mask] + df[rows] = f2[rows] - f1[rows] + elif method == 'cs': + f1 = fun(x0 + h_vec*1.j) + df = f1.imag + dx = h_vec + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + else: + raise ValueError("Never be here.") + + # All that's left is to compute the fraction. We store i, j and + # fractions as separate arrays and later construct coo_matrix. + row_indices.append(i) + col_indices.append(j) + fractions.append(df[i] / dx[j]) + + row_indices = np.hstack(row_indices) + col_indices = np.hstack(col_indices) + fractions = np.hstack(fractions) + J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) + return csr_matrix(J) + + +def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), + kwargs=None): + """Check correctness of a function computing derivatives (Jacobian or + gradient) by comparison with a finite difference approximation. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + jac : callable + Function which computes Jacobian matrix of `fun`. It must work with + argument x the same way as `fun`. The return value must be array_like + or sparse matrix with an appropriate shape. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to 1-D array. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same + for `jac`. + + Returns + ------- + accuracy : float + The maximum among all relative errors for elements with absolute values + higher than 1 and absolute errors for elements with absolute values + less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, + then it is likely that your `jac` implementation is correct. + + See Also + -------- + approx_derivative : Compute finite difference approximation of derivative. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import check_derivative + >>> + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> def jac(x, c1, c2): + ... return np.array([ + ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], + ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] + ... ]) + ... + >>> + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> check_derivative(f, jac, x0, args=(1, 2)) + 2.4492935982947064e-16 + """ + if kwargs is None: + kwargs = {} + J_to_test = jac(x0, *args, **kwargs) + if issparse(J_to_test): + J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, + args=args, kwargs=kwargs) + J_to_test = csr_matrix(J_to_test) + abs_err = J_to_test - J_diff + i, j, abs_err_data = find(abs_err) + J_diff_data = np.asarray(J_diff[i, j]).ravel() + return np.max(np.abs(abs_err_data) / + np.maximum(1, np.abs(J_diff_data))) + else: + J_diff = approx_derivative(fun, x0, bounds=bounds, + args=args, kwargs=kwargs) + abs_err = np.abs(J_to_test - J_diff) + return np.max(abs_err / np.maximum(1, np.abs(J_diff))) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_optimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..4c0214daad371c51c5b860e4773e2bb01faf123d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_optimize.py @@ -0,0 +1,4131 @@ +#__docformat__ = "restructuredtext en" +# ******NOTICE*************** +# optimize.py module by Travis E. Oliphant +# +# You may copy and use this module as you see fit with no +# guarantee implied provided you keep this notice in all copies. +# *****END NOTICE************ + +# A collection of optimization algorithms. Version 0.5 +# CHANGES +# Added fminbound (July 2001) +# Added brute (Aug. 2002) +# Finished line search satisfying strong Wolfe conditions (Mar. 2004) +# Updated strong Wolfe conditions line search to use +# cubic-interpolation (Mar. 2004) + + +# Minimization routines + +__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', + 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', + 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', + 'line_search', 'check_grad', 'OptimizeResult', 'show_options', + 'OptimizeWarning'] + +__docformat__ = "restructuredtext en" + +import math +import warnings +import sys +import inspect +from numpy import eye, argmin, zeros, shape, asarray, sqrt +import numpy as np +from scipy.linalg import cholesky, issymmetric, LinAlgError +from scipy.sparse.linalg import LinearOperator +from ._linesearch import (line_search_wolfe1, line_search_wolfe2, + line_search_wolfe2 as line_search, + LineSearchWarning) +from ._numdiff import approx_derivative +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from scipy._lib._util import (MapWrapper, check_random_state, _RichResult, + _call_callback_maybe_halt, _transition_to_rng) +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS +from scipy._lib._array_api import array_namespace +from scipy._lib import array_api_extra as xpx + + +# standard status messages of optimizers +_status_message = {'success': 'Optimization terminated successfully.', + 'maxfev': 'Maximum number of function evaluations has ' + 'been exceeded.', + 'maxiter': 'Maximum number of iterations has been ' + 'exceeded.', + 'pr_loss': 'Desired error not necessarily achieved due ' + 'to precision loss.', + 'nan': 'NaN result encountered.', + 'out_of_bounds': 'The result is outside of the provided ' + 'bounds.'} + + +class MemoizeJac: + """Decorator that caches the return values of a function returning ``(fun, grad)`` + each time it is called.""" + + def __init__(self, fun): + self.fun = fun + self.jac = None + self._value = None + self.x = None + + def _compute_if_needed(self, x, *args): + if not np.all(x == self.x) or self._value is None or self.jac is None: + self.x = np.asarray(x).copy() + fg = self.fun(x, *args) + self.jac = fg[1] + self._value = fg[0] + + def __call__(self, x, *args): + """ returns the function value """ + self._compute_if_needed(x, *args) + return self._value + + def derivative(self, x, *args): + self._compute_if_needed(x, *args) + return self.jac + + +def _wrap_callback(callback, method=None): + """Wrap a user-provided callback so that attributes can be attached.""" + if callback is None or method in {'tnc', 'slsqp', 'cobyla', 'cobyqa'}: + return callback # don't wrap + + sig = inspect.signature(callback) + + if set(sig.parameters) == {'intermediate_result'}: + def wrapped_callback(res): + return callback(intermediate_result=res) + elif method == 'trust-constr': + def wrapped_callback(res): + return callback(np.copy(res.x), res) + elif method == 'differential_evolution': + def wrapped_callback(res): + return callback(np.copy(res.x), res.convergence) + else: + def wrapped_callback(res): + return callback(np.copy(res.x)) + + wrapped_callback.stop_iteration = False + return wrapped_callback + + +class OptimizeResult(_RichResult): + """ + Represents the optimization result. + + Attributes + ---------- + x : ndarray + The solution of the optimization. + success : bool + Whether or not the optimizer exited successfully. + status : int + Termination status of the optimizer. Its value depends on the + underlying solver. Refer to `message` for details. + message : str + Description of the cause of the termination. + fun : float + Value of objective function at `x`. + jac, hess : ndarray + Values of objective function's Jacobian and its Hessian at `x` (if + available). The Hessian may be an approximation, see the documentation + of the function in question. + hess_inv : object + Inverse of the objective function's Hessian; may be an approximation. + Not available for all solvers. The type of this attribute may be + either np.ndarray or scipy.sparse.linalg.LinearOperator. + nfev, njev, nhev : int + Number of evaluations of the objective functions and of its + Jacobian and Hessian. + nit : int + Number of iterations performed by the optimizer. + maxcv : float + The maximum constraint violation. + + Notes + ----- + Depending on the specific solver being used, `OptimizeResult` may + not have all attributes listed here, and they may have additional + attributes not listed here. Since this class is essentially a + subclass of dict with attribute accessors, one can see which + attributes are available using the `OptimizeResult.keys` method. + + """ + pass + + +class OptimizeWarning(UserWarning): + pass + +def _check_positive_definite(Hk): + def is_pos_def(A): + if issymmetric(A): + try: + cholesky(A) + return True + except LinAlgError: + return False + else: + return False + if Hk is not None: + if not is_pos_def(Hk): + raise ValueError("'hess_inv0' matrix isn't positive definite.") + + +def _check_unknown_options(unknown_options): + if unknown_options: + msg = ", ".join(map(str, unknown_options.keys())) + # Stack level 4: this is called from _minimize_*, which is + # called from another function in SciPy. Level 4 is the first + # level in user code. + warnings.warn(f"Unknown solver options: {msg}", OptimizeWarning, stacklevel=4) + + +def is_finite_scalar(x): + """Test whether `x` is either a finite scalar or a finite array scalar. + + """ + return np.size(x) == 1 and np.isfinite(x) + + +_epsilon = sqrt(np.finfo(float).eps) + + +def vecnorm(x, ord=2): + if ord == np.inf: + return np.amax(np.abs(x)) + elif ord == -np.inf: + return np.amin(np.abs(x)) + else: + return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord) + + +def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None, + epsilon=None, finite_diff_rel_step=None, + hess=None): + """ + Creates a ScalarFunction object for use with scalar minimizers + (BFGS/LBFGSB/SLSQP/TNC/CG/etc). + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is an 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where 'n' is the number of independent variables. + jac : {callable, '2-point', '3-point', 'cs', None}, optional + Method for computing the gradient vector. If it is a callable, it + should be a function that returns the gradient vector: + + ``jac(x, *args) -> array_like, shape (n,)`` + + If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient + is calculated with a relative step for finite differences. If `None`, + then two-point finite differences with an absolute step is used. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` functions). + bounds : sequence, optional + Bounds on variables. 'new-style' bounds are required. + eps : float or ndarray + If ``jac is None`` the absolute step size used for numerical + approximation of the jacobian via forward differences. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + hess : {callable, '2-point', '3-point', 'cs', None} + Computes the Hessian matrix. If it is callable, it should return the + Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + Alternatively, the keywords {'2-point', '3-point', 'cs'} select a + finite difference scheme for numerical estimation. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + + Returns + ------- + sf : ScalarFunction + """ + if callable(jac): + grad = jac + elif jac in FD_METHODS: + # epsilon is set to None so that ScalarFunction is made to use + # rel_step + epsilon = None + grad = jac + else: + # default (jac is None) is to do 2-point finite differences with + # absolute step size. ScalarFunction has to be provided an + # epsilon value that is not None to use absolute steps. This is + # normally the case from most _minimize* methods. + grad = '2-point' + epsilon = epsilon + + if hess is None: + # ScalarFunction requires something for hess, so we give a dummy + # implementation here if nothing is provided, return a value of None + # so that downstream minimisers halt. The results of `fun.hess` + # should not be used. + def hess(x, *args): + return None + + if bounds is None: + bounds = (-np.inf, np.inf) + + # ScalarFunction caches. Reuse of fun(x) during grad + # calculation reduces overall function evaluations. + sf = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, bounds, epsilon=epsilon) + + return sf + + +def _clip_x_for_func(func, bounds): + # ensures that x values sent to func are clipped to bounds + + # this is used as a mitigation for gh11403, slsqp/tnc sometimes + # suggest a move that is outside the limits by 1 or 2 ULP. This + # unclean fix makes sure x is strictly within bounds. + def eval(x): + x = _check_clip_x(x, bounds) + return func(x) + + return eval + + +def _check_clip_x(x, bounds): + if (x < bounds[0]).any() or (x > bounds[1]).any(): + warnings.warn("Values in x were outside bounds during a " + "minimize step, clipping to bounds", + RuntimeWarning, stacklevel=3) + x = np.clip(x, bounds[0], bounds[1]) + return x + + return x + + +def rosen(x): + """ + The Rosenbrock function. + + The function computed is:: + + sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + + Parameters + ---------- + x : array_like + 1-D array of points at which the Rosenbrock function is to be computed. + + Returns + ------- + f : float + The value of the Rosenbrock function. + + See Also + -------- + rosen_der, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen + >>> X = 0.1 * np.arange(10) + >>> rosen(X) + 76.56 + + For higher-dimensional input ``rosen`` broadcasts. + In the following example, we use this to plot a 2D landscape. + Note that ``rosen_hess`` does not broadcast in this manner. + + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + >>> x = np.linspace(-1, 1, 50) + >>> X, Y = np.meshgrid(x, x) + >>> ax = plt.subplot(111, projection='3d') + >>> ax.plot_surface(X, Y, rosen([X, Y])) + >>> plt.show() + """ + xp = array_namespace(x) + x = xp.asarray(x) + if xp.isdtype(x.dtype, 'integral'): + x = xp.astype(x, xp.asarray(1.).dtype) + r = xp.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0, dtype=x.dtype) + return r + + +def rosen_der(x): + """ + The derivative (i.e. gradient) of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the derivative is to be computed. + + Returns + ------- + rosen_der : (N,) ndarray + The gradient of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_der + >>> X = 0.1 * np.arange(9) + >>> rosen_der(X) + array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ]) + + """ + xp = array_namespace(x) + x = xp.asarray(x) + if xp.isdtype(x.dtype, 'integral'): + x = xp.astype(x, xp.asarray(1.).dtype) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = xp.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + +def rosen_hess(x): + """ + The Hessian matrix of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + + Returns + ------- + rosen_hess : ndarray + The Hessian matrix of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_der, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess + >>> X = 0.1 * np.arange(4) + >>> rosen_hess(X) + array([[-38., 0., 0., 0.], + [ 0., 134., -40., 0.], + [ 0., -40., 130., -80.], + [ 0., 0., -80., 200.]]) + + """ + xp = array_namespace(x) + x = xpx.atleast_nd(x, ndim=1, xp=xp) + if xp.isdtype(x.dtype, 'integral'): + x = xp.astype(x, xp.asarray(1.).dtype) + H = (xpx.create_diagonal(-400 * x[:-1], offset=1, xp=xp) + - xpx.create_diagonal(400 * x[:-1], offset=-1, xp=xp)) + diagonal = xp.zeros(x.shape[0], dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + return H + xpx.create_diagonal(diagonal, xp=xp) + + +def rosen_hess_prod(x, p): + """ + Product of the Hessian matrix of the Rosenbrock function with a vector. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + p : array_like + 1-D array, the vector to be multiplied by the Hessian matrix. + + Returns + ------- + rosen_hess_prod : ndarray + The Hessian matrix of the Rosenbrock function at `x` multiplied + by the vector `p`. + + See Also + -------- + rosen, rosen_der, rosen_hess + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess_prod + >>> X = 0.1 * np.arange(9) + >>> p = 0.5 * np.arange(9) + >>> rosen_hess_prod(X, p) + array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.]) + + """ + xp = array_namespace(x, p) + x = xpx.atleast_nd(x, ndim=1, xp=xp) + if xp.isdtype(x.dtype, 'integral'): + x = xp.astype(x, xp.asarray(1.).dtype) + p = xp.asarray(p, dtype=x.dtype) + Hp = xp.zeros(x.shape[0], dtype=x.dtype) + Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] + Hp[1:-1] = (-400 * x[:-2] * p[:-2] + + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - + 400 * x[1:-1] * p[2:]) + Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] + return Hp + + +def _wrap_scalar_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +class _MaxFuncCallError(RuntimeError): + pass + + +def _wrap_scalar_function_maxfun_validation(function, args, maxfun): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + if ncalls[0] >= maxfun: + raise _MaxFuncCallError("Too many function calls") + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), + # np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, + full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): + """ + Minimize a function using the downhill simplex algorithm. + + This algorithm only uses function values, not derivatives or second + derivatives. + + Parameters + ---------- + func : callable func(x,*args) + The objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func, i.e., ``f(x,*args)``. + xtol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + ftol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : number, optional + Maximum number of function evaluations to make. + full_output : bool, optional + Set to True if fopt and warnflag outputs are desired. + disp : bool, optional + Set to True to print convergence messages. + retall : bool, optional + Set to True to return list of solutions at each iteration. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + initial_simplex : array_like of shape (N + 1, N), optional + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + + Returns + ------- + xopt : ndarray + Parameter that minimizes function. + fopt : float + Value of function at minimum: ``fopt = func(xopt)``. + iter : int + Number of iterations performed. + funcalls : int + Number of function calls made. + warnflag : int + 1 : Maximum number of function evaluations made. + 2 : Maximum number of iterations reached. + allvecs : list + Solution at each iteration. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Nelder-Mead' `method` in particular. + + Notes + ----- + Uses a Nelder-Mead simplex algorithm to find the minimum of function of + one or more variables. + + This algorithm has a long history of successful use in applications. + But it will usually be slower than an algorithm that uses first or + second derivative information. In practice, it can have poor + performance in high-dimensional problems and is not robust to + minimizing complicated functions. Additionally, there currently is no + complete theory describing when the algorithm will successfully + converge to the minimum, or how fast it will if it does. Both the ftol and + xtol criteria must be met for convergence. + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin(f, 1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 17 + Function evaluations: 34 + >>> minimum[0] + -8.8817841970012523e-16 + + References + ---------- + .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function + minimization", The Computer Journal, 7, pp. 308-313 + + .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now + Respectable", in Numerical Analysis 1995, Proceedings of the + 1995 Dundee Biennial Conference in Numerical Analysis, D.F. + Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, + Harlow, UK, pp. 191-208. + + """ + opts = {'xatol': xtol, + 'fatol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'return_all': retall, + 'initial_simplex': initial_simplex} + + callback = _wrap_callback(callback) + res = _minimize_neldermead(func, x0, args, callback=callback, **opts) + if full_output: + retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_neldermead(func, x0, args=(), callback=None, + maxiter=None, maxfev=None, disp=False, + return_all=False, initial_simplex=None, + xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Nelder-Mead algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*200``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + initial_simplex : array_like of shape (N + 1, N) + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + xatol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + fatol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + adaptive : bool, optional + Adapt algorithm parameters to dimensionality of problem. Useful for + high-dimensional minimization [1]_. + bounds : sequence or `Bounds`, optional + Bounds on variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + Note that this just clips all vertices in simplex based on + the bounds. + + References + ---------- + .. [1] Gao, F. and Han, L. + Implementing the Nelder-Mead simplex algorithm with adaptive + parameters. 2012. Computational Optimization and Applications. + 51:1, pp. 259-277 + + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x0 = np.atleast_1d(x0).flatten() + dtype = x0.dtype if np.issubdtype(x0.dtype, np.inexact) else np.float64 + x0 = np.asarray(x0, dtype=dtype) + + if adaptive: + dim = float(len(x0)) + rho = 1 + chi = 1 + 2/dim + psi = 0.75 - 1/(2*dim) + sigma = 1 - 1/dim + else: + rho = 1 + chi = 2 + psi = 0.5 + sigma = 0.5 + + nonzdelt = 0.05 + zdelt = 0.00025 + + if bounds is not None: + lower_bound, upper_bound = bounds.lb, bounds.ub + # check bounds + if (lower_bound > upper_bound).any(): + raise ValueError("Nelder Mead - one of the lower bounds " + "is greater than an upper bound.") + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + if bounds is not None: + x0 = np.clip(x0, lower_bound, upper_bound) + + if initial_simplex is None: + N = len(x0) + + sim = np.empty((N + 1, N), dtype=x0.dtype) + sim[0] = x0 + for k in range(N): + y = np.array(x0, copy=True) + if y[k] != 0: + y[k] = (1 + nonzdelt)*y[k] + else: + y[k] = zdelt + sim[k + 1] = y + else: + sim = np.atleast_2d(initial_simplex).copy() + dtype = sim.dtype if np.issubdtype(sim.dtype, np.inexact) else np.float64 + sim = np.asarray(sim, dtype=dtype) + if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: + raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") + if len(x0) != sim.shape[1]: + raise ValueError("Size of `initial_simplex` is not consistent with `x0`") + N = sim.shape[1] + + if retall: + allvecs = [sim[0]] + + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 200 + maxfun = N * 200 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 200 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 200 + else: + maxfun = np.inf + + if bounds is not None: + # The default simplex construction may make all entries (for a given + # parameter) greater than an upper bound if x0 is very close to the + # upper bound. If one simply clips the simplex to the bounds this could + # make the simplex entries degenerate. If that occurs reflect into the + # interior. + msk = sim > upper_bound + # reflect into the interior + sim = np.where(msk, 2*upper_bound - sim, sim) + # but make sure the reflection is no less than the lower_bound + sim = np.clip(sim, lower_bound, upper_bound) + + one2np1 = list(range(1, N + 1)) + fsim = np.full((N + 1,), np.inf, dtype=float) + + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + try: + for k in range(N + 1): + fsim[k] = func(sim[k]) + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + + ind = np.argsort(fsim) + fsim = np.take(fsim, ind, 0) + # sort so sim[0,:] has the lowest function value + sim = np.take(sim, ind, 0) + + iterations = 1 + + while (fcalls[0] < maxfun and iterations < maxiter): + try: + if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and + np.max(np.abs(fsim[0] - fsim[1:])) <= fatol): + break + + xbar = np.add.reduce(sim[:-1], 0) / N + xr = (1 + rho) * xbar - rho * sim[-1] + if bounds is not None: + xr = np.clip(xr, lower_bound, upper_bound) + fxr = func(xr) + doshrink = 0 + + if fxr < fsim[0]: + xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] + if bounds is not None: + xe = np.clip(xe, lower_bound, upper_bound) + fxe = func(xe) + + if fxe < fxr: + sim[-1] = xe + fsim[-1] = fxe + else: + sim[-1] = xr + fsim[-1] = fxr + else: # fsim[0] <= fxr + if fxr < fsim[-2]: + sim[-1] = xr + fsim[-1] = fxr + else: # fxr >= fsim[-2] + # Perform contraction + if fxr < fsim[-1]: + xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] + if bounds is not None: + xc = np.clip(xc, lower_bound, upper_bound) + fxc = func(xc) + + if fxc <= fxr: + sim[-1] = xc + fsim[-1] = fxc + else: + doshrink = 1 + else: + # Perform an inside contraction + xcc = (1 - psi) * xbar + psi * sim[-1] + if bounds is not None: + xcc = np.clip(xcc, lower_bound, upper_bound) + fxcc = func(xcc) + + if fxcc < fsim[-1]: + sim[-1] = xcc + fsim[-1] = fxcc + else: + doshrink = 1 + + if doshrink: + for j in one2np1: + sim[j] = sim[0] + sigma * (sim[j] - sim[0]) + if bounds is not None: + sim[j] = np.clip( + sim[j], lower_bound, upper_bound) + fsim[j] = func(sim[j]) + iterations += 1 + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + if retall: + allvecs.append(sim[0]) + intermediate_result = OptimizeResult(x=sim[0], fun=fsim[0]) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + x = sim[0] + fval = np.min(fsim) + warnflag = 0 + + if fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif iterations >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + else: + msg = _status_message['success'] + if disp: + print(msg) + print(f" Current function value: {fval:f}") + print(" Iterations: %d" % iterations) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x, final_simplex=(sim, fsim)) + if retall: + result['allvecs'] = allvecs + return result + + +def approx_fprime(xk, f, epsilon=_epsilon, *args): + """Finite difference approximation of the derivatives of a + scalar or vector-valued function. + + If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form + an m-by-n matrix + called the Jacobian, where an element :math:`(i, j)` is a partial + derivative of f[i] with respect to ``xk[j]``. + + Parameters + ---------- + xk : array_like + The coordinate vector at which to determine the gradient of `f`. + f : callable + Function of which to estimate the derivatives of. Has the signature + ``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array + and `args` is a tuple of any additional fixed parameters needed to + completely specify the function. The argument `xk` passed to this + function is an ndarray of shape (n,) (never a scalar even if n=1). + It must return a 1-D array_like of shape (m,) or a scalar. + + Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where + ``my_args`` and ``my_kwargs`` are required positional and keyword arguments. + Rather than passing ``f0`` as the callable, wrap it to accept + only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the + callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been + gathered before invoking this function. + + .. versionchanged:: 1.9.0 + `f` is now able to return a 1-D array-like, with the :math:`(m, n)` + Jacobian being estimated. + + epsilon : {float, array_like}, optional + Increment to `xk` to use for determining the function gradient. + If a scalar, uses the same finite difference delta for all partial + derivatives. If an array, should contain one value per element of + `xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately + 1.49e-08. + \\*args : args, optional + Any other arguments that are to be passed to `f`. + + Returns + ------- + jac : ndarray + The partial derivatives of `f` to `xk`. + + See Also + -------- + check_grad : Check correctness of gradient function against approx_fprime. + + Notes + ----- + The function gradient is determined by the forward finite difference + formula:: + + f(xk[i] + epsilon[i]) - f(xk[i]) + f'[i] = --------------------------------- + epsilon[i] + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c0, c1): + ... "Coordinate vector `x` should be an array of size two." + ... return c0 * x[0]**2 + c1*x[1]**2 + + >>> x = np.ones(2) + >>> c0, c1 = (1, 200) + >>> eps = np.sqrt(np.finfo(float).eps) + >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) + array([ 2. , 400.00004208]) + + """ + xk = np.asarray(xk, float) + f0 = f(xk, *args) + + return approx_derivative(f, xk, method='2-point', abs_step=epsilon, + args=args, f0=f0) + + +@_transition_to_rng("seed", position_num=6) +def check_grad(func, grad, x0, *args, epsilon=_epsilon, + direction='all', rng=None): + r"""Check the correctness of a gradient function by comparing it against a + (forward) finite-difference approximation of the gradient. + + Parameters + ---------- + func : callable ``func(x0, *args)`` + Function whose derivative is to be checked. + grad : callable ``grad(x0, *args)`` + Jacobian of `func`. + x0 : ndarray + Points to check `grad` against forward difference approximation of grad + using `func`. + args : \\*args, optional + Extra arguments passed to `func` and `grad`. + epsilon : float, optional + Step size used for the finite difference approximation. It defaults to + ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08. + direction : str, optional + If set to ``'random'``, then gradients along a random vector + are used to check `grad` against forward difference approximation + using `func`. By default it is ``'all'``, in which case, all + the one hot direction vectors are considered to check `grad`. + If `func` is a vector valued function then only ``'all'`` can be used. + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + + The random numbers generated affect the random vector along which gradients + are computed to check ``grad``. Note that `rng` is only used when `direction` + argument is set to `'random'`. + + Returns + ------- + err : float + The square root of the sum of squares (i.e., the 2-norm) of the + difference between ``grad(x0, *args)`` and the finite difference + approximation of `grad` using func at the points `x0`. + + See Also + -------- + approx_fprime + + Examples + -------- + >>> import numpy as np + >>> def func(x): + ... return x[0]**2 - 0.5 * x[1]**3 + >>> def grad(x): + ... return [2 * x[0], -1.5 * x[1]**2] + >>> from scipy.optimize import check_grad + >>> check_grad(func, grad, [1.5, -1.5]) + 2.9802322387695312e-08 # may vary + >>> rng = np.random.default_rng() + >>> check_grad(func, grad, [1.5, -1.5], + ... direction='random', seed=rng) + 2.9802322387695312e-08 + + """ + step = epsilon + x0 = np.asarray(x0) + + def g(w, func, x0, v, *args): + return func(x0 + w*v, *args) + + if direction == 'random': + _grad = np.asanyarray(grad(x0, *args)) + if _grad.ndim > 1: + raise ValueError("'random' can only be used with scalar valued" + " func") + rng_gen = check_random_state(rng) + v = rng_gen.standard_normal(size=(x0.shape)) + _args = (func, x0, v) + args + _func = g + vars = np.zeros((1,)) + analytical_grad = np.dot(_grad, v) + elif direction == 'all': + _args = args + _func = func + vars = x0 + analytical_grad = grad(x0, *args) + else: + raise ValueError(f"{direction} is not a valid string for " + "``direction`` argument") + + return np.sqrt(np.sum(np.abs( + (analytical_grad - approx_fprime(vars, _func, step, *_args))**2 + ))) + + +def approx_fhess_p(x0, p, fprime, epsilon, *args): + # calculate fprime(x0) first, as this may be cached by ScalarFunction + f1 = fprime(*((x0,) + args)) + f2 = fprime(*((x0 + epsilon*p,) + args)) + return (f2 - f1) / epsilon + + +class _LineSearchError(RuntimeError): + pass + + +def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, + **kwargs): + """ + Same as line_search_wolfe1, but fall back to line_search_wolfe2 if + suitable step length is not found, and raise an exception if a + suitable step length is not found. + + Raises + ------ + _LineSearchError + If no suitable step size is found + + """ + + extra_condition = kwargs.pop('extra_condition', None) + + ret = line_search_wolfe1(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + **kwargs) + + if ret[0] is not None and extra_condition is not None: + xp1 = xk + ret[0] * pk + if not extra_condition(ret[0], xp1, ret[3], ret[5]): + # Reject step if extra_condition fails + ret = (None,) + + if ret[0] is None: + # line search failed: try different one. + with warnings.catch_warnings(): + warnings.simplefilter('ignore', LineSearchWarning) + kwargs2 = {} + for key in ('c1', 'c2', 'amax'): + if key in kwargs: + kwargs2[key] = kwargs[key] + ret = line_search_wolfe2(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + extra_condition=extra_condition, + **kwargs2) + + if ret[0] is None: + raise _LineSearchError() + + return ret + + +def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, + retall=0, callback=None, xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None): + """ + Minimize a function using the BFGS algorithm. + + Parameters + ---------- + f : callable ``f(x,*args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess, shape (n,) + fprime : callable ``f'(x,*args)``, optional + Gradient of f. + args : tuple, optional + Extra arguments passed to f and fprime. + gtol : float, optional + Terminate successfully if gradient norm is less than `gtol` + norm : float, optional + Order of norm (Inf is max, -Inf is min) + epsilon : int or ndarray, optional + If `fprime` is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function to call after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return ``fopt``, ``func_calls``, ``grad_calls``, and + ``warnflag`` in addition to ``xopt``. + disp : bool, optional + Print convergence message if True. + retall : bool, optional + Return a list of results at each iteration if True. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step + size is less than ``xk * xrtol`` where ``xk`` is the current + parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional`` + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Minimum value. + gopt : ndarray + Value of gradient at minimum, f'(xopt), which should be near 0. + Bopt : ndarray + Value of 1/f''(xopt), i.e., the inverse Hessian matrix. + func_calls : int + Number of function_calls made. + grad_calls : int + Number of gradient calls made. + warnflag : integer + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + 3 : NaN result encountered. + allvecs : list + The value of `xopt` at each iteration. Only returned if `retall` is + True. + + Notes + ----- + Optimize the function, `f`, whose gradient is given by `fprime` + using the quasi-Newton method of Broyden, Fletcher, Goldfarb, + and Shanno (BFGS). + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + See Also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See ``method='BFGS'`` in particular. + + References + ---------- + Wright, and Nocedal 'Numerical Optimization', 1999, p. 198. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import fmin_bfgs + >>> def quadratic_cost(x, Q): + ... return x @ Q @ x + ... + >>> x0 = np.array([-3, -4]) + >>> cost_weight = np.diag([1., 10.]) + >>> # Note that a trailing comma is necessary for a tuple with single element + >>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 # may vary + Function evaluations: 24 # may vary + Gradient evaluations: 8 # may vary + array([ 2.85169950e-06, -4.61820139e-07]) + + >>> def quadratic_cost_grad(x, Q): + ... return 2 * Q @ x + ... + >>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 + Function evaluations: 8 + Gradient evaluations: 8 + array([ 2.85916637e-06, -4.54371951e-07]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall, + 'xrtol': xrtol, + 'c1': c1, + 'c2': c2, + 'hess_inv0': hess_inv0} + + callback = _wrap_callback(callback) + res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + BFGS algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Terminate successfully if gradient norm is less than `gtol`. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step size is + less than ``xk * xrtol`` where ``xk`` is the current parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + If minimization doesn't complete successfully, with an error message of + ``Desired error not necessarily achieved due to precision loss``, then + consider setting `gtol` to a higher value. This precision loss typically + occurs when the (finite difference) numerical differentiation cannot provide + sufficient precision to satisfy the `gtol` termination criterion. + This can happen when working in single precision and a callable jac is not + provided. For single precision problems a `gtol` of 1e-3 seems to work. + """ + _check_unknown_options(unknown_options) + _check_positive_definite(hess_inv0) + retall = return_all + + x0 = asarray(x0).flatten() + if x0.ndim == 0: + x0.shape = (1,) + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + N = len(x0) + I = np.eye(N, dtype=int) + Hk = I if hess_inv0 is None else hess_inv0 + + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + xk = x0 + if retall: + allvecs = [x0] + warnflag = 0 + gnorm = vecnorm(gfk, ord=norm) + while (gnorm > gtol) and (k < maxiter): + pk = -np.dot(Hk, gfk) + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, + old_fval, old_old_fval, amin=1e-100, + amax=1e100, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + sk = alpha_k * pk + xkp1 = xk + sk + + if retall: + allvecs.append(xkp1) + xk = xkp1 + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + + yk = gfkp1 - gfk + gfk = gfkp1 + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + gnorm = vecnorm(gfk, ord=norm) + if (gnorm <= gtol): + break + + # See Chapter 5 in P.E. Frandsen, K. Jonasson, H.B. Nielsen, + # O. Tingleff: "Unconstrained Optimization", IMM, DTU. 1999. + # These notes are available here: + # http://www2.imm.dtu.dk/documents/ftp/publlec.html + if (alpha_k*vecnorm(pk) <= xrtol*(xrtol + vecnorm(xk))): + break + + if not np.isfinite(old_fval): + # We correctly found +-Inf as optimal value, or something went + # wrong. + warnflag = 2 + break + + rhok_inv = np.dot(yk, sk) + # this was handled in numeric, let it remains for more safety + # Cryptic comment above is preserved for posterity. Future reader: + # consider change to condition below proposed in gh-1261/gh-17345. + if rhok_inv == 0.: + rhok = 1000.0 + if disp: + msg = "Divide-by-zero encountered: rhok assumed large" + _print_success_message_or_warn(True, msg) + else: + rhok = 1. / rhok_inv + + A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok + A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok + Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] * + sk[np.newaxis, :]) + + fval = old_fval + + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(f" Current function value: {fval:f}") + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def _print_success_message_or_warn(warnflag, message, warntype=None): + if not warnflag: + print(message) + else: + warnings.warn(message, warntype or OptimizeWarning, stacklevel=3) + + +def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.4): + """ + Minimize a function using a nonlinear conjugate gradient algorithm. + + Parameters + ---------- + f : callable, ``f(x, *args)`` + Objective function to be minimized. Here `x` must be a 1-D array of + the variables that are to be changed in the search for a minimum, and + `args` are the other (fixed) parameters of `f`. + x0 : ndarray + A user-supplied initial estimate of `xopt`, the optimal value of `x`. + It must be a 1-D array of values. + fprime : callable, ``fprime(x, *args)``, optional + A function that returns the gradient of `f` at `x`. Here `x` and `args` + are as described above for `f`. The returned value must be a 1-D array. + Defaults to None, in which case the gradient is approximated + numerically (see `epsilon`, below). + args : tuple, optional + Parameter values passed to `f` and `fprime`. Must be supplied whenever + additional fixed parameters are needed to completely specify the + functions `f` and `fprime`. + gtol : float, optional + Stop when the norm of the gradient is less than `gtol`. + norm : float, optional + Order to use for the norm of the gradient + (``-np.inf`` is min, ``np.inf`` is max). + epsilon : float or ndarray, optional + Step size(s) to use when `fprime` is approximated numerically. Can be a + scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the + floating point machine precision. Usually ``sqrt(eps)`` is about + 1.5e-8. + maxiter : int, optional + Maximum number of iterations to perform. Default is ``200 * len(x0)``. + full_output : bool, optional + If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in + addition to `xopt`. See the Returns section below for additional + information on optional return values. + disp : bool, optional + If True, return a convergence message, followed by `xopt`. + retall : bool, optional + If True, add to the returned values the results of each iteration. + callback : callable, optional + An optional user-supplied function, called after each iteration. + Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float, optional + Minimum value found, f(xopt). Only returned if `full_output` is True. + func_calls : int, optional + The number of function_calls made. Only returned if `full_output` + is True. + grad_calls : int, optional + The number of gradient calls made. Only returned if `full_output` is + True. + warnflag : int, optional + Integer value with warning status, only returned if `full_output` is + True. + + 0 : Success. + + 1 : The maximum number of iterations was exceeded. + + 2 : Gradient and/or function calls were not changing. May indicate + that precision was lost, i.e., the routine did not converge. + + 3 : NaN result encountered. + + allvecs : list of ndarray, optional + List of arrays, containing the results at each iteration. + Only returned if `retall` is True. + + See Also + -------- + minimize : common interface to all `scipy.optimize` algorithms for + unconstrained and constrained minimization of multivariate + functions. It provides an alternative way to call + ``fmin_cg``, by specifying ``method='CG'``. + + Notes + ----- + This conjugate gradient algorithm is based on that of Polak and Ribiere + [1]_. + + Conjugate gradient methods tend to work better when: + + 1. `f` has a unique global minimizing point, and no local minima or + other stationary points, + 2. `f` is, at least locally, reasonably well approximated by a + quadratic function of the variables, + 3. `f` is continuous and has a continuous gradient, + 4. `fprime` is not too large, e.g., has a norm less than 1000, + 5. The initial guess, `x0`, is reasonably close to `f` 's global + minimizing point, `xopt`. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. + + Examples + -------- + Example 1: seek the minimum value of the expression + ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values + of the parameters and an initial guess ``(u, v) = (0, 0)``. + + >>> import numpy as np + >>> args = (2, 3, 7, 8, 9, 10) # parameter values + >>> def f(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f + >>> def gradf(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... gu = 2*a*u + b*v + d # u-component of the gradient + ... gv = b*u + 2*c*v + e # v-component of the gradient + ... return np.asarray((gu, gv)) + >>> x0 = np.asarray((0, 0)) # Initial guess. + >>> from scipy import optimize + >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res1 + array([-1.80851064, -0.25531915]) + + Example 2: solve the same problem using the `minimize` function. + (This `myopts` dictionary shows all of the available options, + although in practice only non-default values would be needed. + The returned value will be a dictionary.) + + >>> opts = {'maxiter' : None, # default value. + ... 'disp' : True, # non-default value. + ... 'gtol' : 1e-5, # default value. + ... 'norm' : np.inf, # default value. + ... 'eps' : 1.4901161193847656e-08} # default value. + >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, + ... method='CG', options=opts) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res2.x # minimum found + array([-1.80851064, -0.25531915]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_cg(f, x0, args, fprime, callback=callback, c1=c1, c2=c2, + **opts) + + if full_output: + retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_cg(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + c1=1e-4, c2=0.4, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + conjugate gradient algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + + retall = return_all + + x0 = asarray(x0).flatten() + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + xk = x0 + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + if retall: + allvecs = [xk] + warnflag = 0 + pk = -gfk + gnorm = vecnorm(gfk, ord=norm) + + sigma_3 = 0.01 + + while (gnorm > gtol) and (k < maxiter): + deltak = np.dot(gfk, gfk) + + cached_step = [None] + + def polak_ribiere_powell_step(alpha, gfkp1=None): + xkp1 = xk + alpha * pk + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + yk = gfkp1 - gfk + beta_k = max(0, np.dot(yk, gfkp1) / deltak) + pkp1 = -gfkp1 + beta_k * pk + gnorm = vecnorm(gfkp1, ord=norm) + return (alpha, xkp1, pkp1, gfkp1, gnorm) + + def descent_condition(alpha, xkp1, fp1, gfkp1): + # Polak-Ribiere+ needs an explicit check of a sufficient + # descent condition, which is not guaranteed by strong Wolfe. + # + # See Gilbert & Nocedal, "Global convergence properties of + # conjugate gradient methods for optimization", + # SIAM J. Optimization 2, 21 (1992). + cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) + alpha, xk, pk, gfk, gnorm = cached_step + + # Accept step if it leads to convergence. + if gnorm <= gtol: + return True + + # Accept step if sufficient descent condition applies. + return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk) + + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, + old_old_fval, c1=c1, c2=c2, amin=1e-100, + amax=1e100, extra_condition=descent_condition) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + # Reuse already computed results if possible + if alpha_k == cached_step[0]: + alpha_k, xk, pk, gfk, gnorm = cached_step + else: + alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) + + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + fval = old_fval + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(f" Current function value: {fval:f}") + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.9): + """ + Unconstrained minimization of a function using the Newton-CG method. + + Parameters + ---------- + f : callable ``f(x, *args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable ``f'(x, *args)`` + Gradient of f. + fhess_p : callable ``fhess_p(x, p, *args)``, optional + Function which computes the Hessian of f times an + arbitrary vector, p. + fhess : callable ``fhess(x, *args)``, optional + Function to compute the Hessian matrix of f. + args : tuple, optional + Extra arguments passed to f, fprime, fhess_p, and fhess + (the same set of extra arguments is supplied to all of + these functions). + epsilon : float or ndarray, optional + If fhess is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function which is called after + each iteration. Called as callback(xk), where xk is the + current parameter vector. + avextol : float, optional + Convergence is assumed when the average relative error in + the minimizer falls below this amount. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return the optional outputs. + disp : bool, optional + If True, print convergence message. + retall : bool, optional + If True, return a list of results at each iteration. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Value of the function at xopt, i.e., ``fopt = f(xopt)``. + fcalls : int + Number of function calls made. + gcalls : int + Number of gradient calls made. + hcalls : int + Number of Hessian calls made. + warnflag : int + Warnings generated by the algorithm. + 1 : Maximum number of iterations exceeded. + 2 : Line search failure (precision loss). + 3 : NaN result encountered. + allvecs : list + The result at each iteration, if retall is True (see below). + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Newton-CG' `method` in particular. + + Notes + ----- + Only one of `fhess_p` or `fhess` need to be given. If `fhess` + is provided, then `fhess_p` will be ignored. If neither `fhess` + nor `fhess_p` is provided, then the hessian product will be + approximated using finite differences on `fprime`. `fhess_p` + must compute the hessian times an arbitrary vector. If it is not + given, finite-differences on `fprime` are used to compute + it. + + Newton-CG methods are also called truncated Newton methods. This + function differs from scipy.optimize.fmin_tnc because + + 1. scipy.optimize.fmin_ncg is written purely in Python using NumPy + and scipy while scipy.optimize.fmin_tnc calls a C function. + 2. scipy.optimize.fmin_ncg is only for unconstrained minimization + while scipy.optimize.fmin_tnc is for unconstrained minimization + or box constrained minimization. (Box constraints give + lower and upper bounds for each variable separately.) + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + Wright & Nocedal, 'Numerical Optimization', 1999, p. 140. + + """ + opts = {'xtol': avextol, + 'eps': epsilon, + 'maxiter': maxiter, + 'disp': disp, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, + callback=callback, c1=c1, c2=c2, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['nfev'], res['njev'], + res['nhev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, + disp=False, return_all=False, c1=1e-4, c2=0.9, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Newton-CG algorithm. + + Note that the `jac` parameter (Jacobian) is required. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Average relative error in solution `xopt` acceptable for + convergence. + maxiter : int + Maximum number of iterations to perform. + eps : float or ndarray + If `hessp` is approximated, use this value for the step size. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + if jac is None: + raise ValueError('Jacobian is required for Newton-CG method') + fhess_p = hessp + fhess = hess + avextol = xtol + epsilon = eps + retall = return_all + + x0 = asarray(x0).flatten() + # TODO: add hessp (callable or FD) to ScalarFunction? + sf = _prepare_scalar_function( + fun, x0, jac, args=args, epsilon=eps, hess=hess + ) + f = sf.fun + fprime = sf.grad + _h = sf.hess(x0) + + # Logic for hess/hessp + # - If a callable(hess) is provided, then use that + # - If hess is a FD_METHOD, or the output from hess(x) is a LinearOperator + # then create a hessp function using those. + # - If hess is None but you have callable(hessp) then use the hessp. + # - If hess and hessp are None then approximate hessp using the grad/jac. + + if (hess in FD_METHODS or isinstance(_h, LinearOperator)): + fhess = None + + def _hessp(x, p, *args): + return sf.hess(x).dot(p) + + fhess_p = _hessp + + def terminate(warnflag, msg): + if disp: + _print_success_message_or_warn(warnflag, msg) + print(f" Current function value: {old_fval:f}") + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % hcalls) + fval = old_fval + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, nhev=hcalls, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + hcalls = 0 + if maxiter is None: + maxiter = len(x0)*200 + cg_maxiter = 20*len(x0) + + xtol = len(x0) * avextol + # Make sure we enter the while loop. + update_l1norm = np.finfo(float).max + xk = np.copy(x0) + if retall: + allvecs = [xk] + k = 0 + gfk = None + old_fval = f(x0) + old_old_fval = None + float64eps = np.finfo(np.float64).eps + while update_l1norm > xtol: + if k >= maxiter: + msg = "Warning: " + _status_message['maxiter'] + return terminate(1, msg) + # Compute a search direction pk by applying the CG method to + # del2 f(xk) p = - grad f(xk) starting from 0. + b = -fprime(xk) + maggrad = np.linalg.norm(b, ord=1) + eta = min(0.5, math.sqrt(maggrad)) + termcond = eta * maggrad + xsupi = zeros(len(x0), dtype=x0.dtype) + ri = -b + psupi = -ri + i = 0 + dri0 = np.dot(ri, ri) + + if fhess is not None: # you want to compute hessian once. + A = sf.hess(xk) + hcalls += 1 + + for k2 in range(cg_maxiter): + if np.add.reduce(np.abs(ri)) <= termcond: + break + if fhess is None: + if fhess_p is None: + Ap = approx_fhess_p(xk, psupi, fprime, epsilon) + else: + Ap = fhess_p(xk, psupi, *args) + hcalls += 1 + else: + # hess was supplied as a callable or hessian update strategy, so + # A is a dense numpy array or sparse matrix + Ap = A.dot(psupi) + # check curvature + Ap = asarray(Ap).squeeze() # get rid of matrices... + curv = np.dot(psupi, Ap) + if 0 <= curv <= 3 * float64eps: + break + elif curv < 0: + if (i > 0): + break + else: + # fall back to steepest descent direction + xsupi = dri0 / (-curv) * b + break + alphai = dri0 / curv + xsupi += alphai * psupi + ri += alphai * Ap + dri1 = np.dot(ri, ri) + betai = dri1 / dri0 + psupi = -ri + betai * psupi + i += 1 + dri0 = dri1 # update np.dot(ri,ri) for next time. + else: + # curvature keeps increasing, bail out + msg = ("Warning: CG iterations didn't converge. The Hessian is not " + "positive definite.") + return terminate(3, msg) + + pk = xsupi # search direction is solution to system. + gfk = -b # gradient at xk + + try: + alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + msg = "Warning: " + _status_message['pr_loss'] + return terminate(2, msg) + + update = alphak * pk + xk += update # upcast if necessary + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + return terminate(5, "") + update_l1norm = np.linalg.norm(update, ord=1) + + else: + if np.isnan(old_fval) or np.isnan(update_l1norm): + return terminate(3, _status_message['nan']) + + msg = _status_message['success'] + return terminate(0, msg) + + +def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, + full_output=0, disp=1): + """Bounded minimization for scalar functions. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : float or array scalar + Finite optimization bounds. + args : tuple, optional + Extra arguments passed to function. + xtol : float, optional + The convergence tolerance. + maxfun : int, optional + Maximum number of function evaluations allowed. + full_output : bool, optional + If True, return optional outputs. + disp: int, optional + If non-zero, print messages. + + ``0`` : no message printing. + + ``1`` : non-convergence notification messages only. + + ``2`` : print a message on convergence too. + + ``3`` : print iteration results. + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + fval : number + (Optional output) The function value evaluated at the minimizer. + ierr : int + (Optional output) An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + (Optional output) The number of function calls made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Bounded' `method` in particular. + + Notes + ----- + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing.) + + References + ---------- + .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + Examples + -------- + `fminbound` finds the minimizer of the function in the given range. + The following examples illustrate this. + + >>> from scipy import optimize + >>> def f(x): + ... return (x-1)**2 + >>> minimizer = optimize.fminbound(f, -4, 4) + >>> minimizer + 1.0 + >>> minimum = f(minimizer) + >>> minimum + 0.0 + >>> res = optimize.fminbound(f, 3, 4, full_output=True) + >>> minimizer, fval, ierr, numfunc = res + >>> minimizer + 3.000005960860986 + >>> minimum = f(minimizer) + >>> minimum, fval + (4.000023843479476, 4.000023843479476) + """ + options = {'xatol': xtol, + 'maxiter': maxfun, + 'disp': disp} + + res = _minimize_scalar_bounded(func, (x1, x2), args, **options) + if full_output: + return res['x'], res['fun'], res['status'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_bounded(func, bounds, args=(), + xatol=1e-5, maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + + ``0`` : no message printing. + + ``1`` : non-convergence notification messages only. + + ``2`` : print a message on convergence too. + + ``3`` : print iteration results. + + xatol : float + Absolute error in solution `xopt` acceptable for convergence. + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + # Test bounds are of correct form + if len(bounds) != 2: + raise ValueError('bounds must have two elements.') + x1, x2 = bounds + + if not (is_finite_scalar(x1) and is_finite_scalar(x2)): + raise ValueError("Optimization bounds must be finite scalars.") + + if x1 > x2: + raise ValueError("The lower bound exceeds the upper bound.") + + flag = 0 + header = ' Func-count x f(x) Procedure' + step = ' initial' + + sqrt_eps = sqrt(2.2e-16) + golden_mean = 0.5 * (3.0 - sqrt(5.0)) + a, b = x1, x2 + fulc = a + golden_mean * (b - a) + nfc, xf = fulc, fulc + rat = e = 0.0 + x = xf + fx = func(x, *args) + num = 1 + fmin_data = (1, xf, fx) + fu = np.inf + + ffulc = fnfc = fx + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if disp > 2: + print(" ") + print(header) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))): + golden = 1 + # Check for parabolic fit + if np.abs(e) > tol1: + golden = 0 + r = (xf - nfc) * (fx - ffulc) + q = (xf - fulc) * (fx - fnfc) + p = (xf - fulc) * q - (xf - nfc) * r + q = 2.0 * (q - r) + if q > 0.0: + p = -p + q = np.abs(q) + r = e + e = rat + + # Check for acceptability of parabola + if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and + (p < q * (b - xf))): + rat = (p + 0.0) / q + x = xf + rat + step = ' parabolic' + + if ((x - a) < tol2) or ((b - x) < tol2): + si = np.sign(xm - xf) + ((xm - xf) == 0) + rat = tol1 * si + else: # do a golden-section step + golden = 1 + + if golden: # do a golden-section step + if xf >= xm: + e = a - xf + else: + e = b - xf + rat = golden_mean*e + step = ' golden' + + si = np.sign(rat) + (rat == 0) + x = xf + si * np.maximum(np.abs(rat), tol1) + fu = func(x, *args) + num += 1 + fmin_data = (num, x, fu) + if disp > 2: + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + if fu <= fx: + if x >= xf: + a = xf + else: + b = xf + fulc, ffulc = nfc, fnfc + nfc, fnfc = xf, fx + xf, fx = x, fu + else: + if x < xf: + a = x + else: + b = x + if (fu <= fnfc) or (nfc == xf): + fulc, ffulc = nfc, fnfc + nfc, fnfc = x, fu + elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): + fulc, ffulc = x, fu + + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if num >= maxfun: + flag = 1 + break + + if np.isnan(xf) or np.isnan(fx) or np.isnan(fu): + flag = 2 + + fval = fx + if disp > 0: + _endprint(x, flag, fval, maxfun, xatol, disp) + + result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), + message={0: 'Solution found.', + 1: 'Maximum number of function calls ' + 'reached.', + 2: _status_message['nan']}.get(flag, ''), + x=xf, nfev=num, nit=num) + + return result + + +class Brent: + #need to rethink design of __init__ + def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, + full_output=0, disp=0): + self.func = func + self.args = args + self.tol = tol + self.maxiter = maxiter + self._mintol = 1.0e-11 + self._cg = 0.3819660 + self.xmin = None + self.fval = None + self.iter = 0 + self.funcalls = 0 + self.disp = disp + + # need to rethink design of set_bracket (new options, etc.) + def set_bracket(self, brack=None): + self.brack = brack + + def get_bracket_info(self): + #set up + func = self.func + args = self.args + brack = self.brack + ### BEGIN core bracket_info code ### + ### carefully DOCUMENT any CHANGES in core ## + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + + funcalls = 3 + else: + raise ValueError("Bracketing interval must be " + "length 2 or 3 sequence.") + ### END core bracket_info code ### + + return xa, xb, xc, fa, fb, fc, funcalls + + def optimize(self): + # set up for optimization + func = self.func + xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() + _mintol = self._mintol + _cg = self._cg + ################################# + #BEGIN CORE ALGORITHM + ################################# + x = w = v = xb + fw = fv = fx = fb + if (xa < xc): + a = xa + b = xc + else: + a = xc + b = xa + deltax = 0.0 + iter = 0 + + if self.disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + while (iter < self.maxiter): + tol1 = self.tol * np.abs(x) + _mintol + tol2 = 2.0 * tol1 + xmid = 0.5 * (a + b) + # check for convergence + if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)): + break + # XXX In the first iteration, rat is only bound in the true case + # of this conditional. This used to cause an UnboundLocalError + # (gh-4140). It should be set before the if (but to what?). + if (np.abs(deltax) <= tol1): + if (x >= xmid): + deltax = a - x # do a golden section step + else: + deltax = b - x + rat = _cg * deltax + else: # do a parabolic step + tmp1 = (x - w) * (fx - fv) + tmp2 = (x - v) * (fx - fw) + p = (x - v) * tmp2 - (x - w) * tmp1 + tmp2 = 2.0 * (tmp2 - tmp1) + if (tmp2 > 0.0): + p = -p + tmp2 = np.abs(tmp2) + dx_temp = deltax + deltax = rat + # check parabolic fit + if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and + (np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))): + rat = p * 1.0 / tmp2 # if parabolic step is useful. + u = x + rat + if ((u - a) < tol2 or (b - u) < tol2): + if xmid - x >= 0: + rat = tol1 + else: + rat = -tol1 + else: + if (x >= xmid): + deltax = a - x # if it's not do a golden section step + else: + deltax = b - x + rat = _cg * deltax + + if (np.abs(rat) < tol1): # update by at least tol1 + if rat >= 0: + u = x + tol1 + else: + u = x - tol1 + else: + u = x + rat + fu = func(*((u,) + self.args)) # calculate new output value + funcalls += 1 + + if (fu > fx): # if it's bigger than current + if (u < x): + a = u + else: + b = u + if (fu <= fw) or (w == x): + v = w + w = u + fv = fw + fw = fu + elif (fu <= fv) or (v == x) or (v == w): + v = u + fv = fu + else: + if (u >= x): + a = x + else: + b = x + v = w + w = x + x = u + fv = fw + fw = fx + fx = fu + + if self.disp > 2: + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + iter += 1 + ################################# + #END CORE ALGORITHM + ################################# + + self.xmin = x + self.fval = fx + self.iter = iter + self.funcalls = funcalls + + def get_result(self, full_output=False): + if full_output: + return self.xmin, self.fval, self.iter, self.funcalls + else: + return self.xmin + + +def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): + """ + Given a function of one variable and a possible bracket, return + a local minimizer of the function isolated to a fractional precision + of tol. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. + args : tuple, optional + Additional arguments (if present). + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair + ``(xa, xb)`` to be used as initial points for a downhill bracket search + (see `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + Relative error in solution `xopt` acceptable for convergence. + full_output : bool, optional + If True, return all output args (xmin, fval, iter, + funcalls). + maxiter : int, optional + Maximum number of iterations in solution. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + iter : int + (Optional output) Number of iterations. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Brent' `method` in particular. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + Does not ensure that the minimum lies in the range specified by + `brack`. See `scipy.optimize.fminbound`. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3 respectively. In the case where `brack` is of the + form ``(xa, xb)``, we can see for the given values, the output does + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.brent(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.brent(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, iter, funcalls = res + >>> f(xmin), fval + (0.0, 0.0) + + """ + options = {'xtol': tol, + 'maxiter': maxiter} + res = _minimize_scalar_brent(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nit'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, + maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + disp : int, optional + If non-zero, print messages. + + ``0`` : no message printing. + + ``1`` : non-convergence notification messages only. + + ``2`` : print a message on convergence too. + + ``3`` : print iteration results. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + """ + _check_unknown_options(unknown_options) + tol = xtol + if tol < 0: + raise ValueError(f'tolerance should be >= 0, got {tol!r}') + + brent = Brent(func=func, args=args, tol=tol, + full_output=True, maxiter=maxiter, disp=disp) + brent.set_bracket(brack) + brent.optimize() + x, fval, nit, nfev = brent.get_result(full_output=True) + + success = nit < maxiter and not (np.isnan(x) or np.isnan(fval)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(x) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, + success=success, message=message) + + +def golden(func, args=(), brack=None, tol=_epsilon, + full_output=0, maxiter=5000): + """ + Return the minimizer of a function of one variable using the golden section + method. + + Given a function of one variable and a possible bracketing interval, + return a minimizer of the function isolated to a fractional precision of + tol. + + Parameters + ---------- + func : callable func(x,*args) + Objective function to minimize. + args : tuple, optional + Additional arguments (if present), passed to func. + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` where ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair (xa, xb) + to be used as initial points for a downhill bracket search (see + `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + x tolerance stop criterion + full_output : bool, optional + If True, return optional outputs. + maxiter : int + Maximum number of iterations to perform. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Golden' `method` in particular. + + Notes + ----- + Uses analog of bisection method to decrease the bracketed + interval. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3, respectively. In the case where `brack` is of the + form (xa,xb), we can see for the given values, the output need + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.golden(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.golden(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, funcalls = res + >>> f(xmin), fval + (9.925165290385052e-18, 9.925165290385052e-18) + + """ + options = {'xtol': tol, 'maxiter': maxiter} + res = _minimize_scalar_golden(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_golden(func, brack=None, args=(), + xtol=_epsilon, maxiter=5000, disp=0, + **unknown_options): + """ + Options + ------- + xtol : float + Relative error in solution `xopt` acceptable for convergence. + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + + ``0`` : no message printing. + + ``1`` : non-convergence notification messages only. + + ``2`` : print a message on convergence too. + + ``3`` : print iteration results. + """ + _check_unknown_options(unknown_options) + tol = xtol + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + funcalls = 3 + else: + raise ValueError("Bracketing interval must be length 2 or 3 sequence.") + + _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) + _gC = 1.0 - _gR + x3 = xc + x0 = xa + if (np.abs(xc - xb) > np.abs(xb - xa)): + x1 = xb + x2 = xb + _gC * (xc - xb) + else: + x2 = xb + x1 = xb - _gC * (xb - xa) + f1 = func(*((x1,) + args)) + f2 = func(*((x2,) + args)) + funcalls += 2 + nit = 0 + + if disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + + for i in range(maxiter): + if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)): + break + if (f2 < f1): + x0 = x1 + x1 = x2 + x2 = _gR * x1 + _gC * x3 + f1 = f2 + f2 = func(*((x2,) + args)) + else: + x3 = x2 + x2 = x1 + x1 = _gR * x2 + _gC * x0 + f2 = f1 + f1 = func(*((x1,) + args)) + funcalls += 1 + if disp > 2: + if (f1 < f2): + xmin, fval = x1, f1 + else: + xmin, fval = x2, f2 + print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}") + + nit += 1 + # end of iteration loop + + if (f1 < f2): + xmin = x1 + fval = f1 + else: + xmin = x2 + fval = f2 + + success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(xmin) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, + success=success, message=message) + + +def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): + """ + Bracket the minimum of a function. + + Given a function and distinct initial points, search in the + downhill direction (as defined by the initial points) and return + three points that bracket the minimum of the function. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to minimize. + xa, xb : float, optional + Initial points. Defaults `xa` to 0.0, and `xb` to 1.0. + A local minimum need not be contained within this interval. + args : tuple, optional + Additional arguments (if present), passed to `func`. + grow_limit : float, optional + Maximum grow limit. Defaults to 110.0 + maxiter : int, optional + Maximum number of iterations to perform. Defaults to 1000. + + Returns + ------- + xa, xb, xc : float + Final points of the bracket. + fa, fb, fc : float + Objective function values at the bracket points. + funcalls : int + Number of function evaluations made. + + Raises + ------ + BracketError + If no valid bracket is found before the algorithm terminates. + See notes for conditions of a valid bracket. + + Notes + ----- + The algorithm attempts to find three strictly ordered points (i.e. + :math:`x_a < x_b < x_c` or :math:`x_c < x_b < x_a`) satisfying + :math:`f(x_b) ≤ f(x_a)` and :math:`f(x_b) ≤ f(x_c)`, where one of the + inequalities must be satisfied strictly and all :math:`x_i` must be + finite. + + Examples + -------- + This function can find a downward convex region of a function: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import bracket + >>> def f(x): + ... return 10*x**2 + 3*x + 5 + >>> x = np.linspace(-2, 2) + >>> y = f(x) + >>> init_xa, init_xb = 0.1, 1 + >>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb) + >>> plt.axvline(x=init_xa, color="k", linestyle="--") + >>> plt.axvline(x=init_xb, color="k", linestyle="--") + >>> plt.plot(x, y, "-k") + >>> plt.plot(xa, fa, "bx") + >>> plt.plot(xb, fb, "rx") + >>> plt.plot(xc, fc, "bx") + >>> plt.show() + + Note that both initial points were to the right of the minimum, and the + third point was found in the "downhill" direction: the direction + in which the function appeared to be decreasing (to the left). + The final points are strictly ordered, and the function value + at the middle point is less than the function values at the endpoints; + it follows that a minimum must lie within the bracket. + + """ + _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 + _verysmall_num = 1e-21 + # convert to numpy floats if not already + xa, xb = np.asarray([xa, xb]) + fa = func(*(xa,) + args) + fb = func(*(xb,) + args) + if (fa < fb): # Switch so fa > fb + xa, xb = xb, xa + fa, fb = fb, fa + xc = xb + _gold * (xb - xa) + fc = func(*((xc,) + args)) + funcalls = 3 + iter = 0 + while (fc < fb): + tmp1 = (xb - xa) * (fb - fc) + tmp2 = (xb - xc) * (fb - fa) + val = tmp2 - tmp1 + if np.abs(val) < _verysmall_num: + denom = 2.0 * _verysmall_num + else: + denom = 2.0 * val + w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom + wlim = xb + grow_limit * (xc - xb) + msg = ("No valid bracket was found before the iteration limit was " + "reached. Consider trying different initial points or " + "increasing `maxiter`.") + if iter > maxiter: + raise RuntimeError(msg) + iter += 1 + if (w - xc) * (xb - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xa = xb + xb = w + fa = fb + fb = fw + break + elif (fw > fb): + xc = w + fc = fw + break + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(wlim - xc) >= 0.0: + w = wlim + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(xc - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xb = xc + xc = w + w = xc + _gold * (xc - xb) + fb = fc + fc = fw + fw = func(*((w,) + args)) + funcalls += 1 + else: + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + xa = xb + xb = xc + xc = w + fa = fb + fb = fc + fc = fw + + # three conditions for a valid bracket + cond1 = (fb < fc and fb <= fa) or (fb < fa and fb <= fc) + cond2 = (xa < xb < xc or xc < xb < xa) + cond3 = np.isfinite(xa) and np.isfinite(xb) and np.isfinite(xc) + msg = ("The algorithm terminated without finding a valid bracket. " + "Consider trying different initial points.") + if not (cond1 and cond2 and cond3): + e = BracketError(msg) + e.data = (xa, xb, xc, fa, fb, fc, funcalls) + raise e + + return xa, xb, xc, fa, fb, fc, funcalls + + +class BracketError(RuntimeError): + pass + + +def _recover_from_bracket_error(solver, fun, bracket, args, **options): + # `bracket` was originally written without checking whether the resulting + # bracket is valid. `brent` and `golden` built on top of it without + # checking the returned bracket for validity, and their output can be + # incorrect without warning/error if the original bracket is invalid. + # gh-14858 noticed the problem, and the following is the desired + # behavior: + # - `scipy.optimize.bracket`, `scipy.optimize.brent`, and + # `scipy.optimize.golden` should raise an error if the bracket is + # invalid, as opposed to silently returning garbage + # - `scipy.optimize.minimize_scalar` should return with `success=False` + # and other information + # The changes that would be required to achieve this the traditional + # way (`return`ing all the required information from bracket all the way + # up to `minimizer_scalar`) are extensive and invasive. (See a6aa40d.) + # We can achieve the same thing by raising the error in `bracket`, but + # storing the information needed by `minimize_scalar` in the error object, + # and intercepting it here. + try: + res = solver(fun, bracket, args, **options) + except BracketError as e: + msg = str(e) + xa, xb, xc, fa, fb, fc, funcalls = e.data + xs, fs = [xa, xb, xc], [fa, fb, fc] + if np.any(np.isnan([xs, fs])): + x, fun = np.nan, np.nan + else: + imin = np.argmin(fs) + x, fun = xs[imin], fs[imin] + return OptimizeResult(fun=fun, nfev=funcalls, x=x, + nit=0, success=False, message=msg) + return res + + +def _line_for_search(x0, alpha, lower_bound, upper_bound): + """ + Given a parameter vector ``x0`` with length ``n`` and a direction + vector ``alpha`` with length ``n``, and lower and upper bounds on + each of the ``n`` parameters, what are the bounds on a scalar + ``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``. + + + Parameters + ---------- + x0 : np.array. + The vector representing the current location. + Note ``np.shape(x0) == (n,)``. + alpha : np.array. + The vector representing the direction. + Note ``np.shape(alpha) == (n,)``. + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + + Returns + ------- + res : tuple ``(lmin, lmax)`` + The bounds for ``l`` such that + ``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]`` + for all ``i``. + + """ + # get nonzero indices of alpha so we don't get any zero division errors. + # alpha will not be all zero, since it is called from _linesearch_powell + # where we have a check for this. + nonzero, = alpha.nonzero() + lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero] + x0, alpha = x0[nonzero], alpha[nonzero] + low = (lower_bound - x0) / alpha + high = (upper_bound - x0) / alpha + + # positive and negative indices + pos = alpha > 0 + + lmin_pos = np.where(pos, low, 0) + lmin_neg = np.where(pos, 0, high) + lmax_pos = np.where(pos, high, 0) + lmax_neg = np.where(pos, 0, low) + + lmin = np.max(lmin_pos + lmin_neg) + lmax = np.min(lmax_pos + lmax_neg) + + # if x0 is outside the bounds, then it is possible that there is + # no way to get back in the bounds for the parameters being updated + # with the current direction alpha. + # when this happens, lmax < lmin. + # If this is the case, then we can just return (0, 0) + return (lmin, lmax) if lmax >= lmin else (0, 0) + + +def _linesearch_powell(func, p, xi, tol=1e-3, + lower_bound=None, upper_bound=None, fval=None): + """Line-search algorithm using fminbound. + + Find the minimum of the function ``func(x0 + alpha*direc)``. + + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + fval : number. + ``fval`` is equal to ``func(p)``, the idea is just to avoid + recomputing it so we can limit the ``fevals``. + + """ + def myfunc(alpha): + return func(p + alpha*xi) + + # if xi is zero, then don't optimize + if not np.any(xi): + return ((fval, p, xi) if fval is not None else (func(p), p, xi)) + elif lower_bound is None and upper_bound is None: + # non-bounded minimization + res = _recover_from_bracket_error(_minimize_scalar_brent, + myfunc, None, tuple(), xtol=tol) + alpha_min, fret = res.x, res.fun + xi = alpha_min * xi + return fret, p + xi, xi + else: + bound = _line_for_search(p, xi, lower_bound, upper_bound) + if np.isneginf(bound[0]) and np.isposinf(bound[1]): + # equivalent to unbounded + return _linesearch_powell(func, p, xi, fval=fval, tol=tol) + elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]): + # we can use a bounded scalar minimization + res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100) + xi = res.x * xi + return res.fun, p + xi, xi + else: + # only bounded on one side. use the tangent function to convert + # the infinity bound to a finite bound. The new bounded region + # is a subregion of the region bounded by -np.pi/2 and np.pi/2. + bound = np.arctan(bound[0]), np.arctan(bound[1]) + res = _minimize_scalar_bounded( + lambda x: myfunc(np.tan(x)), + bound, + xatol=tol / 100) + xi = np.tan(res.x) * xi + return res.fun, p + xi, xi + + +def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, + maxfun=None, full_output=0, disp=1, retall=0, callback=None, + direc=None): + """ + Minimize a function using modified Powell's method. + + This method only uses function values, not derivatives. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func. + xtol : float, optional + Line-search error tolerance. + ftol : float, optional + Relative error in ``func(xopt)`` acceptable for convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : int, optional + Maximum number of function evaluations to make. + full_output : bool, optional + If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and + ``warnflag`` are returned. + disp : bool, optional + If True, print convergence messages. + retall : bool, optional + If True, return a list of the solution at each iteration. + callback : callable, optional + An optional user-supplied function, called after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + direc : ndarray, optional + Initial fitting step and parameter order set as an (N, N) array, where N + is the number of fitting parameters in `x0`. Defaults to step size 1.0 + fitting all parameters simultaneously (``np.eye((N, N))``). To + prevent initial consideration of values in a step or to change initial + step size, set to 0 or desired step size in the Jth position in the Mth + block, where J is the position in `x0` and M is the desired evaluation + step, with steps being evaluated in index order. Step size and ordering + will change freely as minimization proceeds. + + Returns + ------- + xopt : ndarray + Parameter which minimizes `func`. + fopt : number + Value of function at minimum: ``fopt = func(xopt)``. + direc : ndarray + Current direction set. + iter : int + Number of iterations. + funcalls : int + Number of function calls made. + warnflag : int + Integer warning flag: + 1 : Maximum number of function evaluations. + 2 : Maximum number of iterations. + 3 : NaN result encountered. + 4 : The result is out of the provided bounds. + allvecs : list + List of solutions at each iteration. + + See also + -------- + minimize: Interface to unconstrained minimization algorithms for + multivariate functions. See the 'Powell' method in particular. + + Notes + ----- + Uses a modification of Powell's method to find the minimum of + a function of N variables. Powell's method is a conjugate + direction method. + + The algorithm has two loops. The outer loop merely iterates over the inner + loop. The inner loop minimizes over each current direction in the direction + set. At the end of the inner loop, if certain conditions are met, the + direction that gave the largest decrease is dropped and replaced with the + difference between the current estimated x and the estimated x from the + beginning of the inner-loop. + + The technical conditions for replacing the direction of greatest + increase amount to checking that + + 1. No further gain can be made along the direction of greatest increase + from that iteration. + 2. The direction of greatest increase accounted for a large sufficient + fraction of the decrease in the function value from that iteration of + the inner loop. + + References + ---------- + Powell M.J.D. (1964) An efficient method for finding the minimum of a + function of several variables without calculating derivatives, + Computer Journal, 7 (2):155-162. + + Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: + Numerical Recipes (any edition), Cambridge University Press + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin_powell(f, -1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 2 + Function evaluations: 16 + >>> minimum + array(0.0) + + """ + opts = {'xtol': xtol, + 'ftol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'direc': direc, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_powell(func, x0, args, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_powell(func, x0, args=(), callback=None, bounds=None, + xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, + disp=False, direc=None, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + modified Powell algorithm. + + Parameters + ---------- + fun : callable + The objective function to be minimized:: + + fun(x, *args) -> float + + where ``x`` is a 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where ``n`` is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + The present documentation is specific to ``method='powell'``, but other + options are available. See documentation for `scipy.optimize.minimize`. + bounds : sequence or `Bounds`, optional + Bounds on decision variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + If bounds are not provided, then an unbounded line search will be used. + If bounds are provided and the initial guess is within the bounds, then + every function evaluation throughout the minimization procedure will be + within the bounds. If bounds are provided, the initial guess is outside + the bounds, and `direc` is full rank (or left to default), then some + function evaluations during the first iteration may be outside the + bounds, but every function evaluation after the first iteration will be + within the bounds. If `direc` is not full rank, then some parameters + may not be optimized and the solution is not guaranteed to be within + the bounds. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. Depending on the + method each iteration may use several function evaluations. + disp : bool + Set to True to print convergence messages. + + See method-specific options for ``method='powell'`` below. + callback : callable, optional + Called after each iteration. The signature is:: + + callback(xk) + + where ``xk`` is the current parameter vector. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + ftol : float + Relative error in ``fun(xopt)`` acceptable for convergence. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*1000``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + direc : ndarray + Initial set of direction vectors for the Powell method. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x = asarray(x0).flatten() + if retall: + allvecs = [x] + N = len(x) + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 1000 + maxfun = N * 1000 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 1000 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 1000 + else: + maxfun = np.inf + + # we need to use a mutable object here that we can update in the + # wrapper function + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + if direc is None: + direc = eye(N, dtype=float) + else: + direc = asarray(direc, dtype=float) + if np.linalg.matrix_rank(direc) != direc.shape[0]: + warnings.warn("direc input is not full rank, some parameters may " + "not be optimized", + OptimizeWarning, stacklevel=3) + + if bounds is None: + # don't make these arrays of all +/- inf. because + # _linesearch_powell will do an unnecessary check of all the elements. + # just keep them None, _linesearch_powell will not have to check + # all the elements. + lower_bound, upper_bound = None, None + else: + # bounds is standardized in _minimize.py. + lower_bound, upper_bound = bounds.lb, bounds.ub + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + fval = func(x) + x1 = x.copy() + iter = 0 + while True: + try: + fx = fval + bigind = 0 + delta = 0.0 + for i in range(N): + direc1 = direc[i] + fx2 = fval + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + if (fx2 - fval) > delta: + delta = fx2 - fval + bigind = i + iter += 1 + if retall: + allvecs.append(x) + intermediate_result = OptimizeResult(x=x, fun=fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20 + if 2.0 * (fx - fval) <= bnd: + break + if fcalls[0] >= maxfun: + break + if iter >= maxiter: + break + if np.isnan(fx) and np.isnan(fval): + # Ended up in a nan-region: bail out + break + + # Construct the extrapolated point + direc1 = x - x1 + x1 = x.copy() + # make sure that we don't go outside the bounds when extrapolating + if lower_bound is None and upper_bound is None: + lmax = 1 + else: + _, lmax = _line_for_search(x, direc1, lower_bound, upper_bound) + x2 = x + min(lmax, 1) * direc1 + fx2 = func(x2) + + if (fx > fx2): + t = 2.0*(fx + fx2 - 2.0*fval) + temp = (fx - fval - delta) + t *= temp*temp + temp = fx - fx2 + t -= delta*temp*temp + if t < 0.0: + fval, x, direc1 = _linesearch_powell( + func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval + ) + if np.any(direc1): + direc[bigind] = direc[-1] + direc[-1] = direc1 + except _MaxFuncCallError: + break + + warnflag = 0 + msg = _status_message['success'] + # out of bounds is more urgent than exceeding function evals or iters, + # but I don't want to cause inconsistencies by changing the + # established warning flags for maxfev and maxiter, so the out of bounds + # warning flag becomes 3, but is checked for first. + if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)): + warnflag = 4 + msg = _status_message['out_of_bounds'] + elif fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + elif iter >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + elif np.isnan(fval) or np.isnan(x).any(): + warnflag = 3 + msg = _status_message['nan'] + + if disp: + _print_success_message_or_warn(warnflag, msg, RuntimeWarning) + print(f" Current function value: {fval:f}") + print(" Iterations: %d" % iter) + print(" Function evaluations: %d" % fcalls[0]) + result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x) + if retall: + result['allvecs'] = allvecs + return result + + +def _endprint(x, flag, fval, maxfun, xtol, disp): + if flag == 0: + if disp > 1: + print("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + "(using xtol = ", xtol, ")") + return + + if flag == 1: + msg = ("\nMaximum number of function evaluations exceeded --- " + "increase maxfun argument.\n") + elif flag == 2: + msg = f"\n{_status_message['nan']}" + + _print_success_message_or_warn(flag, msg) + return + + +def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, + disp=False, workers=1): + """Minimize a function over a given range by brute force. + + Uses the "brute force" method, i.e., computes the function's value + at each point of a multidimensional grid of points, to find the global + minimum of the function. + + The function is evaluated everywhere in the range with the datatype of the + first call to the function, as enforced by the ``vectorize`` NumPy + function. The value and type of the function evaluation returned when + ``full_output=True`` are affected in addition by the ``finish`` argument + (see Notes). + + The brute force approach is inefficient because the number of grid points + increases exponentially - the number of grid points to evaluate is + ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even + moderately sized problems can take a long time to run, and/or run into + memory limitations. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the + form ``f(x, *args)``, where ``x`` is the argument in + the form of a 1-D array and ``args`` is a tuple of any + additional fixed parameters needed to completely specify + the function. + ranges : tuple + Each component of the `ranges` tuple must be either a + "slice object" or a range tuple of the form ``(low, high)``. + The program uses these to create the grid of points on which + the objective function will be computed. See `Note 2` for + more detail. + args : tuple, optional + Any additional fixed parameters needed to completely specify + the function. + Ns : int, optional + Number of grid points along the axes, if not otherwise + specified. See `Note2`. + full_output : bool, optional + If True, return the evaluation grid and the objective function's + values on it. + finish : callable, optional + An optimization function that is called with the result of brute force + minimization as initial guess. `finish` should take `func` and + the initial guess as positional arguments, and take `args` as + keyword arguments. It may additionally take `full_output` + and/or `disp` as keyword arguments. Use None if no "polishing" + function is to be used. See Notes for more details. + disp : bool, optional + Set to True to print convergence messages from the `finish` callable. + workers : int or map-like callable, optional + If `workers` is an int the grid is subdivided into `workers` + sections and evaluated in parallel (uses + `multiprocessing.Pool `). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the grid in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.3.0 + + Returns + ------- + x0 : ndarray + A 1-D array containing the coordinates of a point at which the + objective function had its minimum value. (See `Note 1` for + which point is returned.) + fval : float + Function value at the point `x0`. (Returned when `full_output` is + True.) + grid : tuple + Representation of the evaluation grid. It has the same + length as `x0`. (Returned when `full_output` is True.) + Jout : ndarray + Function values at each point of the evaluation + grid, i.e., ``Jout = func(*grid)``. (Returned + when `full_output` is True.) + + See Also + -------- + basinhopping, differential_evolution + + Notes + ----- + *Note 1*: The program finds the gridpoint at which the lowest value + of the objective function occurs. If `finish` is None, that is the + point returned. When the global minimum occurs within (or not very far + outside) the grid's boundaries, and the grid is fine enough, that + point will be in the neighborhood of the global minimum. + + However, users often employ some other optimization program to + "polish" the gridpoint values, i.e., to seek a more precise + (local) minimum near `brute's` best gridpoint. + The `brute` function's `finish` option provides a convenient way to do + that. Any polishing program used must take `brute's` output as its + initial guess as a positional argument, and take `brute's` input values + for `args` as keyword arguments, otherwise an error will be raised. + It may additionally take `full_output` and/or `disp` as keyword arguments. + + `brute` assumes that the `finish` function returns either an + `OptimizeResult` object or a tuple in the form: + ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing + value of the argument, ``Jmin`` is the minimum value of the objective + function, "..." may be some other returned values (which are not used + by `brute`), and ``statuscode`` is the status code of the `finish` program. + + Note that when `finish` is not None, the values returned are those + of the `finish` program, *not* the gridpoint ones. Consequently, + while `brute` confines its search to the input grid points, + the `finish` program's results usually will not coincide with any + gridpoint, and may fall outside the grid's boundary. Thus, if a + minimum only needs to be found over the provided grid points, make + sure to pass in ``finish=None``. + + *Note 2*: The grid of points is a `numpy.mgrid` object. + For `brute` the `ranges` and `Ns` inputs have the following effect. + Each component of the `ranges` tuple can be either a slice object or a + two-tuple giving a range of values, such as (0, 5). If the component is a + slice object, `brute` uses it directly. If the component is a two-tuple + range, `brute` internally converts it to a slice object that interpolates + `Ns` points from its low-value to its high-value, inclusive. + + Examples + -------- + We illustrate the use of `brute` to seek the global minimum of a function + of two variables that is given as the sum of a positive-definite + quadratic and two deep "Gaussian-shaped" craters. Specifically, define + the objective function `f` as the sum of three other functions, + ``f = f1 + f2 + f3``. We suppose each of these has a signature + ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions + are as defined below. + + >>> import numpy as np + >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + >>> def f1(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + >>> def f2(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + >>> def f3(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + >>> def f(z, *params): + ... return f1(z, *params) + f2(z, *params) + f3(z, *params) + + Thus, the objective function may have local minima near the minimum + of each of the three functions of which it is composed. To + use `fmin` to polish its gridpoint result, we may then continue as + follows: + + >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + >>> from scipy import optimize + >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, + ... finish=optimize.fmin) + >>> resbrute[0] # global minimum + array([-1.05665192, 1.80834843]) + >>> resbrute[1] # function value at global minimum + -3.4085818767 + + Note that if `finish` had been set to None, we would have gotten the + gridpoint [-1.0 1.75] where the rounded function value is -2.892. + + """ + N = len(ranges) + if N > 40: + raise ValueError("Brute Force not possible with more " + "than 40 variables.") + lrange = list(ranges) + for k in range(N): + if not isinstance(lrange[k], slice): + if len(lrange[k]) < 3: + lrange[k] = tuple(lrange[k]) + (complex(Ns),) + lrange[k] = slice(*lrange[k]) + if (N == 1): + lrange = lrange[0] + + grid = np.mgrid[lrange] + + # obtain an array of parameters that is iterable by a map-like callable + inpt_shape = grid.shape + if (N > 1): + grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T + + if not np.iterable(args): + args = (args,) + + wrapped_func = _Brute_Wrapper(func, args) + + # iterate over input arrays, possibly in parallel + with MapWrapper(pool=workers) as mapper: + Jout = np.array(list(mapper(wrapped_func, grid))) + if (N == 1): + grid = (grid,) + Jout = np.squeeze(Jout) + elif (N > 1): + Jout = np.reshape(Jout, inpt_shape[1:]) + grid = np.reshape(grid.T, inpt_shape) + + Nshape = shape(Jout) + + indx = argmin(Jout.ravel(), axis=-1) + Nindx = np.empty(N, int) + xmin = np.empty(N, float) + for k in range(N - 1, -1, -1): + thisN = Nshape[k] + Nindx[k] = indx % Nshape[k] + indx = indx // thisN + for k in range(N): + xmin[k] = grid[k][tuple(Nindx)] + + Jmin = Jout[tuple(Nindx)] + if (N == 1): + grid = grid[0] + xmin = xmin[0] + + if callable(finish): + # set up kwargs for `finish` function + finish_args = _getfullargspec(finish).args + finish_kwargs = dict() + if 'full_output' in finish_args: + finish_kwargs['full_output'] = 1 + if 'disp' in finish_args: + finish_kwargs['disp'] = disp + elif 'options' in finish_args: + # pass 'disp' as `options` + # (e.g., if `finish` is `minimize`) + finish_kwargs['options'] = {'disp': disp} + + # run minimizer + res = finish(func, xmin, args=args, **finish_kwargs) + + if isinstance(res, OptimizeResult): + xmin = res.x + Jmin = res.fun + success = res.success + else: + xmin = res[0] + Jmin = res[1] + success = res[-1] == 0 + if not success: + if disp: + warnings.warn("Either final optimization did not succeed or `finish` " + "does not return `statuscode` as its last argument.", + RuntimeWarning, stacklevel=2) + + if full_output: + return xmin, Jmin, grid, Jout + else: + return xmin + + +class _Brute_Wrapper: + """ + Object to wrap user cost function for optimize.brute, allowing picklability + """ + + def __init__(self, f, args): + self.f = f + self.args = [] if args is None else args + + def __call__(self, x): + # flatten needed for one dimensional case. + return self.f(np.asarray(x).flatten(), *self.args) + + +def show_options(solver=None, method=None, disp=True): + """ + Show documentation for additional options of optimization solvers. + + These are method-specific options that can be supplied through the + ``options`` dict. + + Parameters + ---------- + solver : str + Type of optimization solver. One of 'minimize', 'minimize_scalar', + 'root', 'root_scalar', 'linprog', or 'quadratic_assignment'. + method : str, optional + If not given, shows all methods of the specified solver. Otherwise, + show only the options for the specified method. Valid values + corresponds to methods' names of respective solver (e.g., 'BFGS' for + 'minimize'). + disp : bool, optional + Whether to print the result rather than returning it. + + Returns + ------- + text + Either None (for disp=True) or the text string (disp=False) + + Notes + ----- + The solver-specific methods are: + + `scipy.optimize.minimize` + + - :ref:`Nelder-Mead ` + - :ref:`Powell ` + - :ref:`CG ` + - :ref:`BFGS ` + - :ref:`Newton-CG ` + - :ref:`L-BFGS-B ` + - :ref:`TNC ` + - :ref:`COBYLA ` + - :ref:`COBYQA ` + - :ref:`SLSQP ` + - :ref:`dogleg ` + - :ref:`trust-ncg ` + + `scipy.optimize.root` + + - :ref:`hybr ` + - :ref:`lm ` + - :ref:`broyden1 ` + - :ref:`broyden2 ` + - :ref:`anderson ` + - :ref:`linearmixing ` + - :ref:`diagbroyden ` + - :ref:`excitingmixing ` + - :ref:`krylov ` + - :ref:`df-sane ` + + `scipy.optimize.minimize_scalar` + + - :ref:`brent ` + - :ref:`golden ` + - :ref:`bounded ` + + `scipy.optimize.root_scalar` + + - :ref:`bisect ` + - :ref:`brentq ` + - :ref:`brenth ` + - :ref:`ridder ` + - :ref:`toms748 ` + - :ref:`newton ` + - :ref:`secant ` + - :ref:`halley ` + + `scipy.optimize.linprog` + + - :ref:`simplex ` + - :ref:`interior-point ` + - :ref:`revised simplex ` + - :ref:`highs ` + - :ref:`highs-ds ` + - :ref:`highs-ipm ` + + `scipy.optimize.quadratic_assignment` + + - :ref:`faq ` + - :ref:`2opt ` + + Examples + -------- + We can print documentations of a solver in stdout: + + >>> from scipy.optimize import show_options + >>> show_options(solver="minimize") + ... + + Specifying a method is possible: + + >>> show_options(solver="minimize", method="Nelder-Mead") + ... + + We can also get the documentations as a string: + + >>> show_options(solver="minimize", method="Nelder-Mead", disp=False) + Minimization of scalar function of one or more variables using the ... + + """ + import textwrap + + doc_routines = { + 'minimize': ( + ('bfgs', 'scipy.optimize._optimize._minimize_bfgs'), + ('cg', 'scipy.optimize._optimize._minimize_cg'), + ('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'), + ('cobyqa', 'scipy.optimize._cobyqa_py._minimize_cobyqa'), + ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), + ('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'), + ('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'), + ('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'), + ('powell', 'scipy.optimize._optimize._minimize_powell'), + ('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'), + ('tnc', 'scipy.optimize._tnc._minimize_tnc'), + ('trust-ncg', + 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), + ('trust-constr', + 'scipy.optimize._trustregion_constr.' + '_minimize_trustregion_constr'), + ('trust-exact', + 'scipy.optimize._trustregion_exact._minimize_trustregion_exact'), + ('trust-krylov', + 'scipy.optimize._trustregion_krylov._minimize_trust_krylov'), + ), + 'root': ( + ('hybr', 'scipy.optimize._minpack_py._root_hybr'), + ('lm', 'scipy.optimize._root._root_leastsq'), + ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), + ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), + ('anderson', 'scipy.optimize._root._root_anderson_doc'), + ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), + ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), + ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), + ('krylov', 'scipy.optimize._root._root_krylov_doc'), + ('df-sane', 'scipy.optimize._spectral._root_df_sane'), + ), + 'root_scalar': ( + ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'), + ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'), + ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'), + ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'), + ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'), + ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'), + ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'), + ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'), + ), + 'linprog': ( + ('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'), + ('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'), + ('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'), + ('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'), + ('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'), + ('highs', 'scipy.optimize._linprog._linprog_highs_doc'), + ), + 'quadratic_assignment': ( + ('faq', 'scipy.optimize._qap._quadratic_assignment_faq'), + ('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'), + ), + 'minimize_scalar': ( + ('brent', 'scipy.optimize._optimize._minimize_scalar_brent'), + ('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'), + ('golden', 'scipy.optimize._optimize._minimize_scalar_golden'), + ), + } + + if solver is None: + text = ["\n\n\n========\n", "minimize\n", "========\n"] + text.append(show_options('minimize', disp=False)) + text.extend(["\n\n===============\n", "minimize_scalar\n", + "===============\n"]) + text.append(show_options('minimize_scalar', disp=False)) + text.extend(["\n\n\n====\n", "root\n", + "====\n"]) + text.append(show_options('root', disp=False)) + text.extend(['\n\n\n=======\n', 'linprog\n', + '=======\n']) + text.append(show_options('linprog', disp=False)) + text = "".join(text) + else: + solver = solver.lower() + if solver not in doc_routines: + raise ValueError(f'Unknown solver {solver!r}') + + if method is None: + text = [] + for name, _ in doc_routines[solver]: + text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) + text.append(show_options(solver, name, disp=False)) + text = "".join(text) + else: + method = method.lower() + methods = dict(doc_routines[solver]) + if method not in methods: + raise ValueError(f"Unknown method {method!r}") + name = methods[method] + + # Import function object + parts = name.split('.') + mod_name = ".".join(parts[:-1]) + __import__(mod_name) + obj = getattr(sys.modules[mod_name], parts[-1]) + + # Get doc + doc = obj.__doc__ + if doc is not None: + text = textwrap.dedent(doc).strip() + else: + text = "" + + if disp: + print(text) + return + else: + return text diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_qap.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_qap.py new file mode 100644 index 0000000000000000000000000000000000000000..03fe3b128c066adb21406021ec2e34effbf65703 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_qap.py @@ -0,0 +1,760 @@ +import numpy as np +import operator +import warnings +import numbers +from . import (linear_sum_assignment, OptimizeResult) +from ._optimize import _check_unknown_options + +from scipy._lib._util import check_random_state +import itertools + +QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt'] + + +def quadratic_assignment(A, B, method="faq", options=None): + r""" + Approximates solution to the quadratic assignment problem and + the graph matching problem. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. + :ref:`'faq' ` (default) and + :ref:`'2opt' ` are available. + + options : dict, optional + A dictionary of solver options. All solvers support the following: + + maximize : bool (default: False) + Maximizes the objective function if ``True``. + + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: + node ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + rng : `numpy.random.Generator`, optional + Pseudorandom number generator state. When `rng` is None, a new + `numpy.random.Generator` is created using entropy from the + operating system. Types other than `numpy.random.Generator` are + passed to `numpy.random.default_rng` to instantiate a ``Generator``. + + .. versionchanged:: 1.15.0 + As part of the `SPEC-007 `_ + transition from use of `numpy.random.RandomState` to + `numpy.random.Generator` is occurring. Supplying + `np.random.RandomState` to this function will now emit a + `DeprecationWarning`. In SciPy 1.17 its use will raise an exception. + In addition relying on global state using `np.random.seed` + will emit a `FutureWarning`. In SciPy 1.17 the global random number + generator will no longer be used. + Use of an int-like seed will raise a `FutureWarning`, in SciPy 1.17 it + will be normalized via `np.random.default_rng` rather than + `np.random.RandomState`. + + For method-specific options, see + :func:`show_options('quadratic_assignment') `. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of iterations performed during optimization. + + Notes + ----- + The default method :ref:`'faq' ` uses the Fast + Approximate QAP algorithm [1]_; it typically offers the best combination of + speed and accuracy. + Method :ref:`'2opt' ` can be computationally expensive, + but may be a useful alternative, or it can be used to refine the solution + returned by another method. + + References + ---------- + .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, + S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and + C.E. Priebe, "Fast approximate quadratic programming for graph + matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, + :doi:`10.1371/journal.pone.0121002` + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, :doi:`10.1016/j.patcog.2018.09.014` + + .. [3] "2-opt," Wikipedia. + https://en.wikipedia.org/wiki/2-opt + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import quadratic_assignment + >>> rng = np.random.default_rng() + >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100], + ... [150, 130, 0, 120], [170, 100, 120, 0]]) + >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8], + ... [0, 0, 0, 3], [0, 0, 0, 0]]) + >>> res = quadratic_assignment(A, B, options={'rng': rng}) + >>> print(res) + fun: 3260 + col_ind: [0 3 2 1] + nit: 9 + + The see the relationship between the returned ``col_ind`` and ``fun``, + use ``col_ind`` to form the best permutation matrix found, then evaluate + the objective function :math:`f(P) = trace(A^T P B P^T )`. + + >>> perm = res['col_ind'] + >>> P = np.eye(len(A), dtype=int)[perm] + >>> fun = np.trace(A.T @ P @ B @ P.T) + >>> print(fun) + 3260 + + Alternatively, to avoid constructing the permutation matrix explicitly, + directly permute the rows and columns of the distance matrix. + + >>> fun = np.trace(A.T @ B[perm][:, perm]) + >>> print(fun) + 3260 + + Although not guaranteed in general, ``quadratic_assignment`` happens to + have found the globally optimal solution. + + >>> from itertools import permutations + >>> perm_opt, fun_opt = None, np.inf + >>> for perm in permutations([0, 1, 2, 3]): + ... perm = np.array(perm) + ... fun = np.trace(A.T @ B[perm][:, perm]) + ... if fun < fun_opt: + ... fun_opt, perm_opt = fun, perm + >>> print(np.array_equal(perm_opt, res['col_ind'])) + True + + Here is an example for which the default method, + :ref:`'faq' `, does not find the global optimum. + + >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1], + ... [8, 5, 0, 2], [6, 1, 2, 0]]) + >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2], + ... [8, 5, 0, 5], [4, 2, 5, 0]]) + >>> res = quadratic_assignment(A, B, options={'rng': rng}) + >>> print(res) + fun: 178 + col_ind: [1 0 3 2] + nit: 13 + + If accuracy is important, consider using :ref:`'2opt' ` + to refine the solution. + + >>> guess = np.array([np.arange(len(A)), res.col_ind]).T + >>> res = quadratic_assignment(A, B, method="2opt", + ... options = {'rng': rng, 'partial_guess': guess}) + >>> print(res) + fun: 176 + col_ind: [1 2 3 0] + nit: 17 + + """ + + if options is None: + options = {} + + method = method.lower() + methods = {"faq": _quadratic_assignment_faq, + "2opt": _quadratic_assignment_2opt} + if method not in methods: + raise ValueError(f"method {method} must be in {methods}.") + + _spec007_transition(options.get("rng", None)) + res = methods[method](A, B, **options) + return res + + +def _spec007_transition(rng): + if isinstance(rng, np.random.RandomState): + warnings.warn( + "Use of `RandomState` with `quadratic_assignment` is deprecated" + " and will result in an exception in SciPy 1.17", + DeprecationWarning, + stacklevel=2 + ) + if ((rng is None or rng is np.random) and + np.random.mtrand._rand._bit_generator._seed_seq is None): + warnings.warn( + "The NumPy global RNG was seeded by calling `np.random.seed`." + " From SciPy 1.17, this function will no longer use the global RNG.", + FutureWarning, + stacklevel=2 + ) + if isinstance(rng, numbers.Integral | np.integer): + warnings.warn( + "The behavior when the rng option is an integer is changing: the value" + " will be normalized using np.random.default_rng beginning in SciPy 1.17," + " and the resulting Generator will be used to generate random numbers.", + FutureWarning, + stacklevel=2 + ) + + +def _calc_score(A, B, perm): + # equivalent to objective function but avoids matmul + return np.sum(A * B[perm][:, perm]) + + +def _common_input_validation(A, B, partial_match): + A = np.atleast_2d(A) + B = np.atleast_2d(B) + + if partial_match is None: + partial_match = np.array([[], []]).T + partial_match = np.atleast_2d(partial_match).astype(int) + + msg = None + if A.shape[0] != A.shape[1]: + msg = "`A` must be square" + elif B.shape[0] != B.shape[1]: + msg = "`B` must be square" + elif A.ndim != 2 or B.ndim != 2: + msg = "`A` and `B` must have exactly two dimensions" + elif A.shape != B.shape: + msg = "`A` and `B` matrices must be of equal size" + elif partial_match.shape[0] > A.shape[0]: + msg = "`partial_match` can have only as many seeds as there are nodes" + elif partial_match.shape[1] != 2: + msg = "`partial_match` must have two columns" + elif partial_match.ndim != 2: + msg = "`partial_match` must have exactly two dimensions" + elif (partial_match < 0).any(): + msg = "`partial_match` must contain only positive indices" + elif (partial_match >= len(A)).any(): + msg = "`partial_match` entries must be less than number of nodes" + elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or + not len(set(partial_match[:, 1])) == len(partial_match[:, 1])): + msg = "`partial_match` column entries must be unique" + + if msg is not None: + raise ValueError(msg) + + return A, B, partial_match + + +def _quadratic_assignment_faq(A, B, + maximize=False, partial_match=None, rng=None, + P0="barycenter", shuffle_input=False, maxiter=30, + tol=0.03, **unknown_options): + r"""Solve the quadratic assignment problem (approximately). + + This function solves the Quadratic Assignment Problem (QAP) and the + Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm + (FAQ) [1]_. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. This is the method-specific + documentation for 'faq'. + :ref:`'2opt' ` is also available. + + Options + ------- + maximize : bool (default: False) + Maximizes the objective function if ``True``. + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: + node ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where + ``m`` is not greater than the number of nodes, :math:`n`. + + rng : {None, int, `numpy.random.Generator`}, optional + Pseudorandom number generator state. See `quadratic_assignment` for details. + P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter") + Initial position. Must be a doubly-stochastic matrix [3]_. + + If the initial position is an array, it must be a doubly stochastic + matrix of size :math:`m' \times m'` where :math:`m' = n - m`. + + If ``"barycenter"`` (default), the initial position is the barycenter + of the Birkhoff polytope (the space of doubly stochastic matrices). + This is a :math:`m' \times m'` matrix with all entries equal to + :math:`1 / m'`. + + If ``"randomized"`` the initial search position is + :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and + :math:`K` is a random doubly stochastic matrix. + shuffle_input : bool (default: False) + Set to `True` to resolve degenerate gradients randomly. For + non-degenerate gradients this option has no effect. + maxiter : int, positive (default: 30) + Integer specifying the max number of Frank-Wolfe iterations performed. + tol : float (default: 0.03) + Tolerance for termination. Frank-Wolfe iteration terminates when + :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m'}} \leq tol`, + where :math:`i` is the iteration number. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of Frank-Wolfe iterations performed. + + Notes + ----- + The algorithm may be sensitive to the initial permutation matrix (or + search "position") due to the possibility of several local minima + within the feasible region. A barycenter initialization is more likely to + result in a better solution than a single random initialization. However, + calling ``quadratic_assignment`` several times with different random + initializations may result in a better optimum at the cost of longer + total execution time. + + Examples + -------- + As mentioned above, a barycenter initialization often results in a better + solution than a single random initialization. + + >>> from scipy.optimize import quadratic_assignment + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> n = 15 + >>> A = rng.random((n, n)) + >>> B = rng.random((n, n)) + >>> options = {"rng": rng} + >>> res = quadratic_assignment(A, B, options=options) # FAQ is default method + >>> print(res.fun) + 47.797048706380636 # may vary + + >>> options = {"rng": rng, "P0": "randomized"} # use randomized initialization + >>> res = quadratic_assignment(A, B, options=options) + >>> print(res.fun) + 47.37287069769966 # may vary + + However, consider running from several randomized initializations and + keeping the best result. + + >>> res = min([quadratic_assignment(A, B, options=options) + ... for i in range(30)], key=lambda x: x.fun) + >>> print(res.fun) + 46.55974835248574 # may vary + + The '2-opt' method can be used to attempt to refine the results. + + >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T, "rng": rng} + >>> res = quadratic_assignment(A, B, method="2opt", options=options) + >>> print(res.fun) + 46.55974835248574 # may vary + + References + ---------- + .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, + S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and + C.E. Priebe, "Fast approximate quadratic programming for graph + matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, + :doi:`10.1371/journal.pone.0121002` + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, :doi:`10.1016/j.patcog.2018.09.014` + + .. [3] "Doubly stochastic Matrix," Wikipedia. + https://en.wikipedia.org/wiki/Doubly_stochastic_matrix + + """ + + _check_unknown_options(unknown_options) + + maxiter = operator.index(maxiter) + + # ValueError check + A, B, partial_match = _common_input_validation(A, B, partial_match) + + msg = None + if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}: + msg = "Invalid 'P0' parameter string" + elif maxiter <= 0: + msg = "'maxiter' must be a positive integer" + elif tol <= 0: + msg = "'tol' must be a positive float" + if msg is not None: + raise ValueError(msg) + + rng = check_random_state(rng) + n = len(A) # number of vertices in graphs + n_seeds = len(partial_match) # number of seeds + n_unseed = n - n_seeds + + # [1] Algorithm 1 Line 1 - choose initialization + if not isinstance(P0, str): + P0 = np.atleast_2d(P0) + if P0.shape != (n_unseed, n_unseed): + msg = "`P0` matrix must have shape m' x m', where m'=n-m" + elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1) + or not np.allclose(np.sum(P0, axis=1), 1)): + msg = "`P0` matrix must be doubly stochastic" + if msg is not None: + raise ValueError(msg) + elif P0 == 'barycenter': + P0 = np.ones((n_unseed, n_unseed)) / n_unseed + elif P0 == 'randomized': + J = np.ones((n_unseed, n_unseed)) / n_unseed + # generate a nxn matrix where each entry is a random number [0, 1] + # would use rand, but Generators don't have it + # would use random, but old mtrand.RandomStates don't have it + K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed))) + P0 = (J + K) / 2 + + # check trivial cases + if n == 0 or n_seeds == n: + score = _calc_score(A, B, partial_match[:, 1]) + res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} + return OptimizeResult(res) + + obj_func_scalar = 1 + if maximize: + obj_func_scalar = -1 + + nonseed_B = np.setdiff1d(range(n), partial_match[:, 1]) + if shuffle_input: + nonseed_B = rng.permutation(nonseed_B) + + nonseed_A = np.setdiff1d(range(n), partial_match[:, 0]) + perm_A = np.concatenate([partial_match[:, 0], nonseed_A]) + perm_B = np.concatenate([partial_match[:, 1], nonseed_B]) + + # definitions according to Seeded Graph Matching [2]. + A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds) + B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds) + const_sum = A21 @ B21.T + A12.T @ B12 + + P = P0 + # [1] Algorithm 1 Line 2 - loop while stopping criteria not met + for n_iter in range(1, maxiter+1): + # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t) + grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22) + # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8 + _, cols = linear_sum_assignment(grad_fp, maximize=maximize) + Q = np.eye(n_unseed)[cols] + + # [1] Algorithm 1 Line 5 - compute the step size + # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect + # terms as ax**2 + bx + c. c does not affect location of minimum + # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum(); + # apply where possible for efficiency. + R = P - Q + b21 = ((R.T @ A21) * B21).sum() + b12 = ((R.T @ A12.T) * B12.T).sum() + AR22 = A22.T @ R + BR22 = B22 @ R.T + b22a = (AR22 * B22.T[cols]).sum() + b22b = (A22 * BR22[cols]).sum() + a = (AR22.T * BR22).sum() + b = b21 + b12 + b22a + b22b + # critical point of ax^2 + bx + c is at x = -d/(2*e) + # if a * obj_func_scalar > 0, it is a minimum + # if minimum is not in [0, 1], only endpoints need to be considered + if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1: + alpha = -b/(2*a) + else: + alpha = np.argmin([0, (b + a)*obj_func_scalar]) + + # [1] Algorithm 1 Line 6 - Update P + P_i1 = alpha * P + (1 - alpha) * Q + if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol: + P = P_i1 + break + P = P_i1 + # [1] Algorithm 1 Line 7 - end main loop + + # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices + _, col = linear_sum_assignment(P, maximize=True) + perm = np.concatenate((np.arange(n_seeds), col + n_seeds)) + + unshuffled_perm = np.zeros(n, dtype=int) + unshuffled_perm[perm_A] = perm_B[perm] + + score = _calc_score(A, B, unshuffled_perm) + res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter} + return OptimizeResult(res) + + +def _split_matrix(X, n): + # definitions according to Seeded Graph Matching [2]. + upper, lower = X[:n], X[n:] + return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:] + + +def _doubly_stochastic(P, tol=1e-3): + # Adapted from @btaba implementation + # https://github.com/btaba/sinkhorn_knopp + # of Sinkhorn-Knopp algorithm + # https://projecteuclid.org/euclid.pjm/1102992505 + + max_iter = 1000 + c = 1 / P.sum(axis=0) + r = 1 / (P @ c) + P_eps = P + + for it in range(max_iter): + if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and + (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): + # All column/row sums ~= 1 within threshold + break + + c = 1 / (r @ P) + r = 1 / (P @ c) + P_eps = r[:, None] * P * c + + return P_eps + + +def _quadratic_assignment_2opt(A, B, maximize=False, rng=None, + partial_match=None, + partial_guess=None, + **unknown_options): + r"""Solve the quadratic assignment problem (approximately). + + This function solves the Quadratic Assignment Problem (QAP) and the + Graph Matching Problem (GMP) using the 2-opt algorithm [1]_. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. This is the method-specific + documentation for '2opt'. + :ref:`'faq' ` is also available. + + Options + ------- + maximize : bool (default: False) + Maximizes the objective function if ``True``. + rng : {None, int, `numpy.random.Generator`}, optional + Pseudorandom number generator state. See `quadratic_assignment` for details. + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: node + ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + .. note:: + `partial_match` must be sorted by the first column. + + partial_guess : 2-D array of integers, optional (default: None) + A guess for the matching between the two matrices. Unlike + `partial_match`, `partial_guess` does not fix the indices; they are + still free to be optimized. + + Each row of `partial_guess` specifies a pair of matched nodes: node + ``partial_guess[i, 0]`` of `A` is matched to node + ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + .. note:: + `partial_guess` must be sorted by the first column. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of iterations performed during optimization. + + Notes + ----- + This is a greedy algorithm that works similarly to bubble sort: beginning + with an initial permutation, it iteratively swaps pairs of indices to + improve the objective function until no such improvements are possible. + + References + ---------- + .. [1] "2-opt," Wikipedia. + https://en.wikipedia.org/wiki/2-opt + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, https://doi.org/10.1016/j.patcog.2018.09.014 + + """ + _check_unknown_options(unknown_options) + rng = check_random_state(rng) + A, B, partial_match = _common_input_validation(A, B, partial_match) + + N = len(A) + # check trivial cases + if N == 0 or partial_match.shape[0] == N: + score = _calc_score(A, B, partial_match[:, 1]) + res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} + return OptimizeResult(res) + + if partial_guess is None: + partial_guess = np.array([[], []]).T + partial_guess = np.atleast_2d(partial_guess).astype(int) + + msg = None + if partial_guess.shape[0] > A.shape[0]: + msg = ("`partial_guess` can have only as " + "many entries as there are nodes") + elif partial_guess.shape[1] != 2: + msg = "`partial_guess` must have two columns" + elif partial_guess.ndim != 2: + msg = "`partial_guess` must have exactly two dimensions" + elif (partial_guess < 0).any(): + msg = "`partial_guess` must contain only positive indices" + elif (partial_guess >= len(A)).any(): + msg = "`partial_guess` entries must be less than number of nodes" + elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or + not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])): + msg = "`partial_guess` column entries must be unique" + if msg is not None: + raise ValueError(msg) + + fixed_rows = None + if partial_match.size or partial_guess.size: + # use partial_match and partial_guess for initial permutation, + # but randomly permute the rest. + guess_rows = np.zeros(N, dtype=bool) + guess_cols = np.zeros(N, dtype=bool) + fixed_rows = np.zeros(N, dtype=bool) + fixed_cols = np.zeros(N, dtype=bool) + perm = np.zeros(N, dtype=int) + + rg, cg = partial_guess.T + guess_rows[rg] = True + guess_cols[cg] = True + perm[guess_rows] = cg + + # match overrides guess + rf, cf = partial_match.T + fixed_rows[rf] = True + fixed_cols[cf] = True + perm[fixed_rows] = cf + + random_rows = ~fixed_rows & ~guess_rows + random_cols = ~fixed_cols & ~guess_cols + perm[random_rows] = rng.permutation(np.arange(N)[random_cols]) + else: + perm = rng.permutation(np.arange(N)) + + best_score = _calc_score(A, B, perm) + + i_free = np.arange(N) + if fixed_rows is not None: + i_free = i_free[~fixed_rows] + + better = operator.gt if maximize else operator.lt + n_iter = 0 + done = False + while not done: + # equivalent to nested for loops i in range(N), j in range(i, N) + for i, j in itertools.combinations_with_replacement(i_free, 2): + n_iter += 1 + perm[i], perm[j] = perm[j], perm[i] + score = _calc_score(A, B, perm) + if better(score, best_score): + best_score = score + break + # faster to swap back than to create a new list every time + perm[i], perm[j] = perm[j], perm[i] + else: # no swaps made + done = True + + res = {"col_ind": perm, "fun": best_score, "nit": n_iter} + return OptimizeResult(res) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..cb81ad1696b768d2304b2fc42a80cc9678cbde00 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py @@ -0,0 +1,522 @@ +""" +Routines for removing redundant (linearly dependent) equations from linear +programming equality constraints. +""" +# Author: Matt Haberland + +import numpy as np +from scipy.linalg import svd +from scipy.linalg.interpolative import interp_decomp +import scipy +from scipy.linalg.blas import dtrsm + + +def _row_count(A): + """ + Counts the number of nonzeros in each row of input array A. + Nonzeros are defined as any element with absolute value greater than + tol = 1e-13. This value should probably be an input to the function. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + + Returns + ------- + rowcount : 1-D array + Number of nonzeros in each row of A + + """ + tol = 1e-13 + return np.array((abs(A) > tol).sum(axis=1)).flatten() + + +def _get_densest(A, eligibleRows): + """ + Returns the index of the densest row of A. Ignores rows that are not + eligible for consideration. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + eligibleRows : 1-D logical array + Values indicate whether the corresponding row of A is eligible + to be considered + + Returns + ------- + i_densest : int + Index of the densest row in A eligible for consideration + + """ + rowCounts = _row_count(A) + return np.argmax(rowCounts * eligibleRows) + + +def _remove_zero_rows(A, b): + """ + Eliminates trivial equations from system of equations defined by Ax = b + and identifies trivial infeasibilities + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the removal operation + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + status = 0 + message = "" + i_zero = _row_count(A) == 0 + A = A[np.logical_not(i_zero), :] + if not np.allclose(b[i_zero], 0): + status = 2 + message = "There is a zero row in A_eq with a nonzero corresponding " \ + "entry in b_eq. The problem is infeasible." + b = b[np.logical_not(i_zero)] + return A, b, status, message + + +def bg_update_dense(plu, perm_r, v, j): + LU, p = plu + + vperm = v[perm_r] + u = dtrsm(1, LU, vperm, lower=1, diag=1) + LU[:j+1, j] = u[:j+1] + l = u[j+1:] + piv = LU[j, j] + LU[j+1:, j] += (l/piv) + return LU, p + + +def _remove_redundancy_pivot_dense(A, rhs, true_rank=None): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + d = [] # Indices of dependent rows + perm_r = None + + A_orig = A + A = np.zeros((m, m + n), order='F') + np.fill_diagonal(A, 1) + A[:, m:] = A_orig + e = np.zeros(m) + + js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis + # manual masking was faster than masked array + js_mask = np.ones(js_candidates.shape, dtype=bool) + + # Implements basic algorithm from [2] + # Uses some of the suggested improvements (removing zero rows and + # Bartels-Golub update idea). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis are only really useful if + # the matrix is sparse. + + lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial + perm_r = lu[1] + for i in v: + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + try: # fails for i==0 and any time it gets ill-conditioned + j = b[i-1] + lu = bg_update_dense(lu, perm_r, A[:, j], i-1) + except Exception: + lu = scipy.linalg.lu_factor(A[:, b]) + LU, p = lu + perm_r = list(range(m)) + for i1, i2 in enumerate(p): + perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] + + pi = scipy.linalg.lu_solve(lu, e, trans=1) + + js = js_candidates[js_mask] + batch = 50 + + # This is a tiny bit faster than looping over columns individually, + # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: + for j_index in range(0, len(js), batch): + j_indices = js[j_index: min(j_index+batch, len(js))] + + c = abs(A[:, j_indices].transpose().dot(pi)) + if (c > tolapiv).any(): + j = js[j_index + np.argmax(c)] # very independent column + b[i] = j + js_mask[j-m] = False + break + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + if true_rank is not None and len(d) == m - true_rank: + break # found all redundancies + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_pivot_sparse(A, rhs): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + k = set(range(m, m+n)) # Structural column indices. + d = [] # Indices of dependent rows + + A_orig = A + A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() + e = np.zeros(m) + + # Implements basic algorithm from [2] + # Uses only one of the suggested improvements (removing zero rows). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis sound useful, but the + # description of the procedure seems to assume a lot of familiarity with + # the subject; it is not very explicit. I already went through enough + # trouble getting the basic algorithm working, so I was not interested in + # trying to decipher this, too. (Overall, the paper is fraught with + # mistakes and ambiguities - which is strange, because the rest of + # Andersen's papers are quite good.) + # I tried and tried and tried to improve performance using the + # Bartels-Golub update. It works, but it's only practical if the LU + # factorization can be specialized as described, and that is not possible + # until the SciPy SuperLU interface permits control over column + # permutation - see issue #7700. + + for i in v: + B = A[:, b] + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) + + js = list(k-set(b)) # not efficient, but this is not the time sink... + + # Due to overhead, it tends to be faster (for problems tested) to + # compute the full matrix-vector product rather than individual + # vector-vector products (with the chance of terminating as soon + # as any are nonzero). For very large matrices, it might be worth + # it to compute, say, 100 or 1000 at a time and stop when a nonzero + # is found. + + c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] + if len(c) > 0: # independent + j = js[c[0]] + # in a previous commit, the previous line was changed to choose + # index j corresponding with the maximum dot product. + # While this avoided issues with almost + # singular matrices, it slowed the routine in most NETLIB tests. + # I think this is because these columns were denser than the + # first column with nonzero dot product (c[0]). + # It would be nice to have a heuristic that balances sparsity with + # high dot product, but I don't think it's worth the time to + # develop one right now. Bartels-Golub update is a much higher + # priority. + b[i] = j # replace artificial column + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1 + bnorm) > tolprimal: + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_svd(A, b): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + A, b, status, message = _remove_zero_rows(A, b) + + if status != 0: + return A, b, status, message + + U, s, Vh = svd(A) + eps = np.finfo(float).eps + tol = s.max() * max(A.shape) * eps + + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + # this algorithm is faster than that of [2] when the nullspace is small + # but it could probably be improvement by randomized algorithms and with + # a sparse implementation. + # it relies on repeated singular value decomposition to find linearly + # dependent rows (as identified by columns of U that correspond with zero + # singular values). Unfortunately, only one row can be removed per + # decomposition (I tried otherwise; doing so can cause problems.) + # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds + # but that function is unreliable at finding singular values near zero. + # Finding max eigenvalue L of A A^T, then largest eigenvalue (and + # associated eigenvector) of -A A^T + L I (I is identity) via power + # iteration would also work in theory, but is only efficient if the + # smallest nonzero eigenvalue of A A^T is close to the largest nonzero + # eigenvalue. + + while abs(s_min) < tol: + v = U[:, -1] # TODO: return these so user can eliminate from problem? + # rows need to be represented in significant amount + eligibleRows = np.abs(v) > tol * 10e6 + if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): + status = 4 + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + break + if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349 + status = 2 + message = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + break + + i_remove = _get_densest(A, eligibleRows) + A = np.delete(A, i_remove, axis=0) + b = np.delete(b, i_remove) + U, s, Vh = svd(A) + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + return A, b, status, message + + +def _remove_redundancy_id(A, rhs, rank=None, randomized=True): + """Eliminates redundant equations from a system of equations. + + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + rank : int, optional + The rank of A + randomized: bool, optional + True for randomized interpolative decomposition + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + k = rank + if rank is None: + k = np.linalg.matrix_rank(A) + + idx, proj = interp_decomp(A.T, k, rand=randomized) + + # first k entries in idx are indices of the independent rows + # remaining entries are the indices of the m-k dependent rows + # proj provides a linear combinations of rows of A2 that form the + # remaining m-k (dependent) rows. The same linear combination of entries + # in rhs2 must give the remaining m-k entries. If not, the system is + # inconsistent, and the problem is infeasible. + if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]): + status = 2 + message = inconsistent + + # sort indices because the other redundancy removal routines leave rows + # in original order and tests were written with that in mind + idx = sorted(idx[:k]) + A2 = A[idx, :] + rhs2 = rhs[idx] + return A2, rhs2, status, message diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf5a0cf409392d4c3ece21f23ad2e061860c699 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root.py @@ -0,0 +1,732 @@ +""" +Unified interfaces to root finding algorithms. + +Functions +--------- +- root : find a root of a vector function. +""" +__all__ = ['root'] + +import numpy as np + +from warnings import warn + +from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options +from ._minpack_py import _root_hybr, leastsq +from ._spectral import _root_df_sane +from . import _nonlin as nonlin + + +ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', + 'df-sane'] + + +def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, + options=None): + r""" + Find a root of a vector function. + + Parameters + ---------- + fun : callable + A vector function to find a root of. + + Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where + ``my_args`` and ``my_kwargs`` are required positional and keyword arguments. + Rather than passing ``f0`` as the callable, wrap it to accept + only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the + callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been + gathered before invoking this function. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function and its Jacobian. + method : str, optional + Type of solver. Should be one of + + - 'hybr' :ref:`(see here) ` + - 'lm' :ref:`(see here) ` + - 'broyden1' :ref:`(see here) ` + - 'broyden2' :ref:`(see here) ` + - 'anderson' :ref:`(see here) ` + - 'linearmixing' :ref:`(see here) ` + - 'diagbroyden' :ref:`(see here) ` + - 'excitingmixing' :ref:`(see here) ` + - 'krylov' :ref:`(see here) ` + - 'df-sane' :ref:`(see here) ` + + jac : bool or callable, optional + If `jac` is a Boolean and is True, `fun` is assumed to return the + value of Jacobian along with the objective function. If False, the + Jacobian will be estimated numerically. + `jac` can also be a callable returning the Jacobian of `fun`. In + this case, it must accept the same arguments as `fun`. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. For all methods but 'hybr' and 'lm'. + options : dict, optional + A dictionary of solver options. E.g., `xtol` or `maxiter`, see + :obj:`show_options()` for details. + + Returns + ------- + sol : OptimizeResult + The solution represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the algorithm exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *hybr*. + + Method *hybr* uses a modification of the Powell hybrid method as + implemented in MINPACK [1]_. + + Method *lm* solves the system of nonlinear equations in a least squares + sense using a modification of the Levenberg-Marquardt algorithm as + implemented in MINPACK [1]_. + + Method *df-sane* is a derivative-free spectral method. [3]_ + + Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, + *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, + with backtracking or full line searches [2]_. Each method corresponds + to a particular Jacobian approximations. + + - Method *broyden1* uses Broyden's first Jacobian approximation, it is + known as Broyden's good method. + - Method *broyden2* uses Broyden's second Jacobian approximation, it + is known as Broyden's bad method. + - Method *anderson* uses (extended) Anderson mixing. + - Method *Krylov* uses Krylov approximation for inverse Jacobian. It + is suitable for large-scale problem. + - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. + - Method *linearmixing* uses a scalar Jacobian approximation. + - Method *excitingmixing* uses a tuned diagonal Jacobian + approximation. + + .. warning:: + + The algorithms implemented for methods *diagbroyden*, + *linearmixing* and *excitingmixing* may be useful for specific + problems, but whether they will work may depend strongly on the + problem. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. + 1980. User Guide for MINPACK-1. + .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear + Equations. Society for Industrial and Applied Mathematics. + + .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). + + Examples + -------- + The following functions define a system of nonlinear equations and its + jacobian. + + >>> import numpy as np + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + >>> def jac(x): + ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, + ... -1.5 * (x[0] - x[1])**2], + ... [-1.5 * (x[1] - x[0])**2, + ... 1 + 1.5 * (x[1] - x[0])**2]]) + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') + >>> sol.x + array([ 0.8411639, 0.1588361]) + + **Large problem** + + Suppose that we needed to solve the following integrodifferential + equation on the square :math:`[0,1]\times[0,1]`: + + .. math:: + + \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 + + with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of + the square. + + The solution can be found using the ``method='krylov'`` solver: + + >>> from scipy import optimize + >>> # parameters + >>> nx, ny = 75, 75 + >>> hx, hy = 1./(nx-1), 1./(ny-1) + + >>> P_left, P_right = 0, 0 + >>> P_top, P_bottom = 1, 0 + + >>> def residual(P): + ... d2x = np.zeros_like(P) + ... d2y = np.zeros_like(P) + ... + ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx + ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx + ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx + ... + ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy + ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy + ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy + ... + ... return d2x + d2y - 10*np.cosh(P).mean()**2 + + >>> guess = np.zeros((nx, ny), float) + >>> sol = optimize.root(residual, guess, method='krylov') + >>> print('Residual: %g' % abs(residual(sol.x)).max()) + Residual: 5.7972e-06 # may vary + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)] + >>> plt.pcolormesh(x, y, sol.x, shading='gouraud') + >>> plt.colorbar() + >>> plt.show() + + """ + def _wrapped_fun(*fargs): + """ + Wrapped `func` to track the number of times + the function has been called. + """ + _wrapped_fun.nfev += 1 + return fun(*fargs) + + _wrapped_fun.nfev = 0 + + if not isinstance(args, tuple): + args = (args,) + + meth = method.lower() + if options is None: + options = {} + + if callback is not None and meth in ('hybr', 'lm'): + warn(f'Method {method} does not accept callback.', + RuntimeWarning, stacklevel=2) + + # fun also returns the Jacobian + if not callable(jac) and meth in ('hybr', 'lm'): + if bool(jac): + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth in ('hybr', 'lm'): + options.setdefault('xtol', tol) + elif meth in ('df-sane',): + options.setdefault('ftol', tol) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + options.setdefault('xtol', tol) + options.setdefault('xatol', np.inf) + options.setdefault('ftol', np.inf) + options.setdefault('fatol', np.inf) + + if meth == 'hybr': + sol = _root_hybr(_wrapped_fun, x0, args=args, jac=jac, **options) + elif meth == 'lm': + sol = _root_leastsq(_wrapped_fun, x0, args=args, jac=jac, **options) + elif meth == 'df-sane': + _warn_jac_unused(jac, method) + sol = _root_df_sane(_wrapped_fun, x0, args=args, callback=callback, + **options) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + _warn_jac_unused(jac, method) + sol = _root_nonlin_solve(_wrapped_fun, x0, args=args, jac=jac, + _method=meth, _callback=callback, + **options) + else: + raise ValueError(f'Unknown solver {method}') + + sol.nfev = _wrapped_fun.nfev + return sol + + +def _warn_jac_unused(jac, method): + if jac is not None: + warn(f'Method {method} does not use the jacobian (jac).', + RuntimeWarning, stacklevel=2) + + +def _root_leastsq(fun, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, + gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, + **unknown_options): + """ + Solve for least squares with Levenberg-Marquardt + + Options + ------- + col_deriv : bool + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float + Relative error desired in the sum of squares. + xtol : float + Relative error desired in the approximate solution. + gtol : float + Orthogonality desired between the function vector and the columns + of the Jacobian. + maxiter : int + The maximum number of calls to the function. If zero, then + 100*(N+1) is the maximum where N is the number of elements in x0. + eps : float + A suitable step length for the forward-difference approximation of + the Jacobian (for Dfun=None). If `eps` is less than the machine + precision, it is assumed that the relative errors in the functions + are of the order of the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the variables. + """ + nfev = 0 + def _wrapped_fun(*fargs): + """ + Wrapped `func` to track the number of times + the function has been called. + """ + nonlocal nfev + nfev += 1 + return fun(*fargs) + + _check_unknown_options(unknown_options) + x, cov_x, info, msg, ier = leastsq(_wrapped_fun, x0, args=args, + Dfun=jac, full_output=True, + col_deriv=col_deriv, xtol=xtol, + ftol=ftol, gtol=gtol, + maxfev=maxiter, epsfcn=eps, + factor=factor, diag=diag) + sol = OptimizeResult(x=x, message=msg, status=ier, + success=ier in (1, 2, 3, 4), cov_x=cov_x, + fun=info.pop('fvec'), method="lm") + sol.update(info) + sol.nfev = nfev + return sol + + +def _root_nonlin_solve(fun, x0, args=(), jac=None, + _callback=None, _method=None, + nit=None, disp=False, maxiter=None, + ftol=None, fatol=None, xtol=None, xatol=None, + tol_norm=None, line_search='armijo', jac_options=None, + **unknown_options): + _check_unknown_options(unknown_options) + + f_tol = fatol + f_rtol = ftol + x_tol = xatol + x_rtol = xtol + verbose = disp + if jac_options is None: + jac_options = dict() + + jacobian = {'broyden1': nonlin.BroydenFirst, + 'broyden2': nonlin.BroydenSecond, + 'anderson': nonlin.Anderson, + 'linearmixing': nonlin.LinearMixing, + 'diagbroyden': nonlin.DiagBroyden, + 'excitingmixing': nonlin.ExcitingMixing, + 'krylov': nonlin.KrylovJacobian + }[_method] + + if args: + if jac is True: + def f(x): + return fun(x, *args)[0] + else: + def f(x): + return fun(x, *args) + else: + f = fun + + x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), + iter=nit, verbose=verbose, + maxiter=maxiter, f_tol=f_tol, + f_rtol=f_rtol, x_tol=x_tol, + x_rtol=x_rtol, tol_norm=tol_norm, + line_search=line_search, + callback=_callback, full_output=True, + raise_exception=False) + sol = OptimizeResult(x=x, method=_method) + sol.update(info) + return sol + +def _root_broyden1_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + + Examples + -------- + >>> def func(x): + ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] + ... + >>> from scipy import optimize + >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14) + >>> x = res.x + >>> x + array([4.04674914, 3.91158389, 2.71791677, 1.61756251]) + >>> np.cos(x) + x[::-1] + array([1., 2., 3., 4.]) + + """ + pass + + +def _root_broyden2_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """ + pass + + +def _root_anderson_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + """ + pass + +def _root_linearmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_diagbroyden_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_excitingmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + """ + pass + +def _root_krylov_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=jac.inverse). + + If the preconditioner has a method named 'update', it will + be called as ``update(x, f)`` after each nonlinear step, + with ``x`` giving the current point, and ``f`` the current + function value. + inner_rtol, inner_atol, inner_callback, ... + Parameters to pass on to the "inner" Krylov solver. + + For a full list of options, see the documentation for the + solver you are using. By default this is `scipy.sparse.linalg.lgmres`. + If the solver has been overridden through `method`, see the documentation + for that solver instead. + To use an option for that solver, prepend ``inner_`` to it. + For example, to control the ``rtol`` argument to the solver, + set the `inner_rtol` option here. + + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear + iterations. + + See `scipy.sparse.linalg.lgmres` for details. + """ + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py new file mode 100644 index 0000000000000000000000000000000000000000..668565de62ea36e0cc9be378875604855e489d17 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py @@ -0,0 +1,538 @@ +""" +Unified interfaces to root finding algorithms for real or complex +scalar functions. + +Functions +--------- +- root : find a root of a scalar function. +""" +import numpy as np + +from . import _zeros_py as optzeros +from ._numdiff import approx_derivative + +__all__ = ['root_scalar'] + +ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748', + 'newton', 'secant', 'halley'] + + +class MemoizeDer: + """Decorator that caches the value and derivative(s) of function each + time it is called. + + This is a simplistic memoizer that calls and caches a single value + of ``f(x, *args)``. + It assumes that `args` does not change between invocations. + It supports the use case of a root-finder where `args` is fixed, + `x` changes, and only rarely, if at all, does x assume the same value + more than once.""" + def __init__(self, fun): + self.fun = fun + self.vals = None + self.x = None + self.n_calls = 0 + + def __call__(self, x, *args): + r"""Calculate f or use cached value if available""" + # Derivative may be requested before the function itself, always check + if self.vals is None or x != self.x: + fg = self.fun(x, *args) + self.x = x + self.n_calls += 1 + self.vals = fg[:] + return self.vals[0] + + def fprime(self, x, *args): + r"""Calculate f' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[1] + + def fprime2(self, x, *args): + r"""Calculate f'' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[2] + + def ncalls(self): + return self.n_calls + + +def root_scalar(f, args=(), method=None, bracket=None, + fprime=None, fprime2=None, + x0=None, x1=None, + xtol=None, rtol=None, maxiter=None, + options=None): + """ + Find a root of a scalar function. + + Parameters + ---------- + f : callable + A function to find a root of. + + Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where + ``my_args`` and ``my_kwargs`` are required positional and keyword arguments. + Rather than passing ``f0`` as the callable, wrap it to accept + only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the + callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been + gathered before invoking this function. + args : tuple, optional + Extra arguments passed to the objective function and its derivative(s). + method : str, optional + Type of solver. Should be one of + + - 'bisect' :ref:`(see here) ` + - 'brentq' :ref:`(see here) ` + - 'brenth' :ref:`(see here) ` + - 'ridder' :ref:`(see here) ` + - 'toms748' :ref:`(see here) ` + - 'newton' :ref:`(see here) ` + - 'secant' :ref:`(see here) ` + - 'halley' :ref:`(see here) ` + + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + x0 : float, optional + Initial guess. + x1 : float, optional + A second guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the derivative. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, optional + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the + first and second derivatives. + `fprime2` can also be a callable returning the second derivative of `f`. + In this case, it must accept the same arguments as `f`. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options : dict, optional + A dictionary of solver options. E.g., ``k``, see + :obj:`show_options()` for details. + + Returns + ------- + sol : RootResults + The solution represented as a ``RootResults`` object. + Important attributes are: ``root`` the solution , ``converged`` a + boolean flag indicating if the algorithm exited successfully and + ``flag`` which describes the cause of the termination. See + `RootResults` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + root : Find a root of a vector function. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + The default is to use the best method available for the situation + presented. + If a bracket is provided, it may use one of the bracketing methods. + If a derivative and an initial value are specified, it may + select one of the derivative-based methods. + If no method is judged applicable, it will raise an Exception. + + Arguments for each method are as follows (x=required, o=optional). + + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options | + +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+ + | :ref:`bisect ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brentq ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brenth ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`ridder ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`toms748 ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`secant ` | x | o | | x | o | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`newton ` | x | o | | x | | o | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`halley ` | x | o | | x | | x | x | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + + Examples + -------- + + Find the root of a simple cubic + + >>> from scipy import optimize + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> def fprime(x): + ... return 3*x**2 + + The `brentq` method takes as input a bracket + + >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 10, 11) + + The `newton` method takes as input a single point and uses the + derivative(s). + + >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 22) + + The function can provide the value and derivative(s) in a single call. + + >>> def f_p_pp(x): + ... return (x**3 - 1), 3*x**2, 6*x + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, method='newton' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 11) + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 7, 8) + + + """ # noqa: E501 + if not isinstance(args, tuple): + args = (args,) + + if options is None: + options = {} + + # fun also returns the derivative(s) + is_memoized = False + if fprime2 is not None and not callable(fprime2): + if bool(fprime2): + f = MemoizeDer(f) + is_memoized = True + fprime2 = f.fprime2 + fprime = f.fprime + else: + fprime2 = None + if fprime is not None and not callable(fprime): + if bool(fprime): + f = MemoizeDer(f) + is_memoized = True + fprime = f.fprime + else: + fprime = None + + # respect solver-specific default tolerances - only pass in if actually set + kwargs = {} + for k in ['xtol', 'rtol', 'maxiter']: + v = locals().get(k) + if v is not None: + kwargs[k] = v + + # Set any solver-specific options + if options: + kwargs.update(options) + # Always request full_output from the underlying method as _root_scalar + # always returns a RootResults object + kwargs.update(full_output=True, disp=False) + + # Pick a method if not specified. + # Use the "best" method available for the situation. + if not method: + if bracket is not None: + method = 'brentq' + elif x0 is not None: + if fprime: + if fprime2: + method = 'halley' + else: + method = 'newton' + elif x1 is not None: + method = 'secant' + else: + method = 'newton' + if not method: + raise ValueError('Unable to select a solver as neither bracket ' + 'nor starting point provided.') + + meth = method.lower() + map2underlying = {'halley': 'newton', 'secant': 'newton'} + + try: + methodc = getattr(optzeros, map2underlying.get(meth, meth)) + except AttributeError as e: + raise ValueError(f'Unknown solver {meth}') from e + + if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']: + if not isinstance(bracket, (list, tuple, np.ndarray)): + raise ValueError(f'Bracket needed for {method}') + + a, b = bracket[:2] + try: + r, sol = methodc(f, a, b, args=args, **kwargs) + except ValueError as e: + # gh-17622 fixed some bugs in low-level solvers by raising an error + # (rather than returning incorrect results) when the callable + # returns a NaN. It did so by wrapping the callable rather than + # modifying compiled code, so the iteration count is not available. + if hasattr(e, "_x"): + sol = optzeros.RootResults(root=e._x, + iterations=np.nan, + function_calls=e._function_calls, + flag=str(e), method=method) + else: + raise + + elif meth in ['secant']: + if x0 is None: + raise ValueError(f'x0 must not be None for {method}') + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None, + x1=x1, **kwargs) + elif meth in ['newton']: + if x0 is None: + raise ValueError(f'x0 must not be None for {method}') + if not fprime: + # approximate fprime with finite differences + + def fprime(x, *args): + # `root_scalar` doesn't actually seem to support vectorized + # use of `newton`. In that case, `approx_derivative` will + # always get scalar input. Nonetheless, it always returns an + # array, so we extract the element to produce scalar output. + # Similarly, `approx_derivative` always passes array input, so + # we extract the element to ensure the user's function gets + # scalar input. + def f_wrapped(x, *args): + return f(x[0], *args) + return approx_derivative(f_wrapped, x, method='2-point', args=args)[0] + + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None, + **kwargs) + elif meth in ['halley']: + if x0 is None: + raise ValueError(f'x0 must not be None for {method}') + if not fprime: + raise ValueError(f'fprime must be specified for {method}') + if not fprime2: + raise ValueError(f'fprime2 must be specified for {method}') + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs) + else: + raise ValueError(f'Unknown solver {method}') + + if is_memoized: + # Replace the function_calls count with the memoized count. + # Avoids double and triple-counting. + n_calls = f.n_calls + sol.function_calls = n_calls + + return sol + + +def _root_scalar_brentq_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_brenth_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + +def _root_scalar_toms748_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_secant_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + x1 : float, optional + A second guess. Must be different from `x0`. If not specified, + a value near `x0` will be chosen. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_newton_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivative. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_halley_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivatives. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, required + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, required + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of 1st and 2nd derivatives along with the objective function. + `fprime2` can also be a callable returning the 2nd derivative of `f`. + In this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_ridder_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_bisect_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. ``f(x, *args)`` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9f37ce003ba42df054e25f03fbcdfe1478a423 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo.py @@ -0,0 +1,1600 @@ +"""shgo: The simplicial homology global optimisation algorithm.""" +from collections import namedtuple +import time +import logging +import warnings +import sys + +import numpy as np + +from scipy import spatial +from scipy.optimize import OptimizeResult, minimize, Bounds +from scipy.optimize._optimize import MemoizeJac +from scipy.optimize._constraints import new_bounds_to_old +from scipy.optimize._minimize import standardize_constraints +from scipy._lib._util import _FunctionWrapper + +from scipy.optimize._shgo_lib._complex import Complex + +__all__ = ['shgo'] + + +def shgo( + func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, + minimizer_kwargs=None, options=None, sampling_method='simplicial', *, + workers=1 +): + """ + Finds the global minimum of a function using SHG optimization. + + SHGO stands for "simplicial homology global optimization". + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition. Only for COBYLA, COBYQA, SLSQP and trust-constr. + See the tutorial [5]_ for further details on specifying constraints. + + .. note:: + + Only COBYLA, COBYQA, SLSQP, and trust-constr local minimize methods + currently support constraint arguments. If the ``constraints`` + sequence used in the local optimization problem is not defined in + ``minimizer_kwargs`` and a constrained method is used then the + global ``constraints`` will be used. + (Defining a ``constraints`` sequence in ``minimizer_kwargs`` + means that ``constraints`` will not be added so if equality + constraints and so forth need to be added then the inequality + functions in ``constraints`` need to be added to + ``minimizer_kwargs`` too). + COBYLA only supports inequality constraints. + + .. versionchanged:: 1.11.0 + + ``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`. + + n : int, optional + Number of sampling points used in the construction of the simplicial + complex. For the default ``simplicial`` sampling method 2**dim + 1 + sampling points are generated instead of the default ``n=100``. For all + other specified values `n` sampling points are generated. For + ``sobol``, ``halton`` and other arbitrary `sampling_methods` ``n=100`` or + another specified number of sampling points are generated. + iters : int, optional + Number of iterations used in the construction of the simplicial + complex. Default is 1. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the minimizer + ``scipy.optimize.minimize``. Some important options could be: + + method : str + The minimization method. If not given, chosen to be one of + BFGS, L-BFGS-B, SLSQP, depending on whether or not the + problem has constraints or bounds. + args : tuple + Extra arguments passed to the objective function (``func``) and + its derivatives (Jacobian, Hessian). + options : dict, optional + Note that by default the tolerance is specified as + ``{ftol: 1e-12}`` + + options : dict, optional + A dictionary of solver options. Many of the options specified for the + global routine are also passed to the ``scipy.optimize.minimize`` + routine. The options that are also passed to the local routine are + marked with "(L)". + + Stopping criteria, the algorithm will terminate if any of the specified + criteria are met. However, the default algorithm does not require any + to be specified: + + maxfev : int (L) + Maximum number of function evaluations in the feasible domain. + (Note only methods that support this option will terminate + the routine at precisely exact specified value. Otherwise the + criterion will only terminate during a global iteration) + f_min : float + Specify the minimum objective function value, if it is known. + f_tol : float + Precision goal for the value of f in the stopping + criterion. Note that the global routine will also + terminate if a sampling point in the global routine is + within this tolerance. + maxiter : int + Maximum number of iterations to perform. + maxev : int + Maximum number of sampling evaluations to perform (includes + searching in infeasible points). + maxtime : float + Maximum processing runtime allowed + minhgrd : int + Minimum homology group rank differential. The homology group of the + objective function is calculated (approximately) during every + iteration. The rank of this group has a one-to-one correspondence + with the number of locally convex subdomains in the objective + function (after adequate sampling points each of these subdomains + contain a unique global minimum). If the difference in the hgr is 0 + between iterations for ``maxhgrd`` specified iterations the + algorithm will terminate. + + Objective function knowledge: + + symmetry : list or bool + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. If `True` is specified + then all variables will be set symmetric to the first variable. + Default + is set to False. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as:: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + jac : bool or callable, optional + Jacobian (gradient) of objective function. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a + boolean and is True, ``fun`` is assumed to return the gradient + along with the objective function. If False, the gradient will be + estimated numerically. ``jac`` can also be a callable returning the + gradient of the objective. In this case, it must accept the same + arguments as ``fun``. (Passed to `scipy.optimize.minimize` + automatically) + + hess, hessp : callable, optional + Hessian (matrix of second-order derivatives) of objective function + or Hessian of objective function times an arbitrary vector p. + Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or + ``hess`` needs to be given. If ``hess`` is provided, then + ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is + provided, then the Hessian product will be approximated using + finite differences on ``jac``. ``hessp`` must compute the Hessian + times an arbitrary vector. (Passed to `scipy.optimize.minimize` + automatically) + + Algorithm settings: + + minimize_every_iter : bool + If True then promising global sampling points will be passed to a + local minimization routine every iteration. If True then only the + final minimizer pool will be run. Defaults to True. + + local_iter : int + Only evaluate a few of the best minimizer pool candidates every + iteration. If False all potential points are passed to the local + minimization routine. + + infty_constraints : bool + If True then any sampling points generated which are outside will + the feasible domain will be saved and given an objective function + value of ``inf``. If False then these points will be discarded. + Using this functionality could lead to higher performance with + respect to function evaluations before the global minimum is found, + specifying False will use less memory at the cost of a slight + decrease in performance. Defaults to True. + + Feedback: + + disp : bool (L) + Set to True to print convergence messages. + + sampling_method : str or function, optional + Current built in sampling method options are ``halton``, ``sobol`` and + ``simplicial``. The default ``simplicial`` provides + the theoretical guarantee of convergence to the global minimum in + finite time. ``halton`` and ``sobol`` method are faster in terms of + sampling point generation at the cost of the loss of + guaranteed convergence. It is more appropriate for most "easier" + problems where the convergence is relatively fast. + User defined sampling functions must accept two arguments of ``n`` + sampling points of dimension ``dim`` per call and output an array of + sampling points with shape `n x dim`. + + workers : int or map-like callable, optional + Sample and run the local serial minimizations in parallel. + Supply -1 to use all available CPU cores, or an int to use + that many Processes (uses `multiprocessing.Pool `). + + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for parallel evaluation. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.11.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: + ``x`` the solution array corresponding to the global minimum, + ``fun`` the function output at the global solution, + ``xl`` an ordered list of local minima solutions, + ``funl`` the function output at the corresponding local solutions, + ``success`` a Boolean flag indicating if the optimizer exited + successfully, + ``message`` which describes the cause of the termination, + ``nfev`` the total number of objective function evaluations including + the sampling calls, + ``nlfev`` the total number of objective function evaluations + culminating from all local search optimizations, + ``nit`` number of iterations performed by the global routine. + + Notes + ----- + Global optimization using simplicial homology global optimization [1]_. + Appropriate for solving general purpose NLP and blackbox optimization + problems to global optimality (low-dimensional problems). + + In general, the optimization problems are of the form:: + + minimize f(x) subject to + + g_i(x) >= 0, i = 1,...,m + h_j(x) = 0, j = 1,...,p + + where x is a vector of one or more variables. ``f(x)`` is the objective + function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and + ``h_j(x)`` are the equality constraints. + + Optionally, the lower and upper bounds for each element in x can also be + specified using the `bounds` argument. + + While most of the theoretical advantages of SHGO are only proven for when + ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to + converge to the global optimum for the more general case where ``f(x)`` is + non-continuous, non-convex and non-smooth, if the default sampling method + is used [1]_. + + The local search method may be specified using the ``minimizer_kwargs`` + parameter which is passed on to ``scipy.optimize.minimize``. By default, + the ``SLSQP`` method is used. In general, it is recommended to use the + ``SLSQP``, ``COBYLA``, or ``COBYQA`` local minimization if inequality + constraints are defined for the problem since the other methods do not use + constraints. + + The ``halton`` and ``sobol`` method points are generated using + `scipy.stats.qmc`. Any other QMC method could be used. + + References + ---------- + .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology + algorithm for lipschitz optimisation", Journal of Global + Optimization. + .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with + better two-dimensional projections", SIAM J. Sci. Comput. 30, + 2635-2654. + .. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear + programming codes", Lecture Notes in Economics and Mathematical + Systems, 187. Springer-Verlag, New York. + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and + dynamics from the potential energy landscape", + Journal of Chemical Physics, 142(13), 2015. + .. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize + + Examples + -------- + First consider the problem of minimizing the Rosenbrock function, `rosen`: + + >>> from scipy.optimize import rosen, shgo + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = shgo(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18) + + Note that bounds determine the dimensionality of the objective + function and is therefore a required input, however you can specify + empty bounds using ``None`` or objects like ``np.inf`` which will be + converted to large float numbers. + + >>> bounds = [(None, None), ]*4 + >>> result = shgo(rosen, bounds) + >>> result.x + array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ]) + + Next, we consider the Eggholder function, a problem with several local + minima and one global minimum. We will demonstrate the use of arguments and + the capabilities of `shgo`. + (https://en.wikipedia.org/wiki/Test_functions_for_optimization) + + >>> import numpy as np + >>> def eggholder(x): + ... return (-(x[1] + 47.0) + ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) + ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ... ) + ... + >>> bounds = [(-512, 512), (-512, 512)] + + `shgo` has built-in low discrepancy sampling sequences. First, we will + input 64 initial sampling points of the *Sobol'* sequence: + + >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol') + >>> result.x, result.fun + (array([512. , 404.23180824]), -959.6406627208397) + + `shgo` also has a return for any other local minima that was found, these + can be called using: + + >>> result.xl + array([[ 512. , 404.23180824], + [ 283.0759062 , -487.12565635], + [-294.66820039, -462.01964031], + [-105.87688911, 423.15323845], + [-242.97926 , 274.38030925], + [-506.25823477, 6.3131022 ], + [-408.71980731, -156.10116949], + [ 150.23207937, 301.31376595], + [ 91.00920901, -391.283763 ], + [ 202.89662724, -269.38043241], + [ 361.66623976, -106.96493868], + [-219.40612786, -244.06020508]]) + + >>> result.funl + array([-959.64066272, -718.16745962, -704.80659592, -565.99778097, + -559.78685655, -557.36868733, -507.87385942, -493.9605115 , + -426.48799655, -421.15571437, -419.31194957, -410.98477763]) + + These results are useful in applications where there are many global minima + and the values of other global minima are desired or where the local minima + can provide insight into the system (for example morphologies + in physical chemistry [4]_). + + If we want to find a larger number of local minima, we can increase the + number of sampling points or the number of iterations. We'll increase the + number of sampling points to 64 and the number of iterations from the + default of 1 to 3. Using ``simplicial`` this would have given us + 64 x 3 = 192 initial sampling points. + + >>> result_2 = shgo(eggholder, + ... bounds, n=64, iters=3, sampling_method='sobol') + >>> len(result.xl), len(result_2.xl) + (12, 23) + + Note the difference between, e.g., ``n=192, iters=1`` and ``n=64, + iters=3``. + In the first case the promising points contained in the minimiser pool + are processed only once. In the latter case it is processed every 64 + sampling points for a total of 3 times. + + To demonstrate solving problems with non-linear constraints consider the + following example from Hock and Schittkowski problem 73 (cattle-feed) + [3]_:: + + minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4 + + subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0, + + 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21 + -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 + + 20.5 * x_3**2 + 0.62 * x_4**2) >= 0, + + x_1 + x_2 + x_3 + x_4 - 1 == 0, + + 1 >= x_i >= 0 for all i + + The approximate answer given in [3]_ is:: + + f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378 + + >>> def f(x): # (cattle-feed) + ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3] + ... + >>> def g1(x): + ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0 + ... + >>> def g2(x): + ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21 + ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2 + ... + 20.5*x[2]**2 + 0.62*x[3]**2) + ... ) # >=0 + ... + >>> def h1(x): + ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + ... + >>> cons = ({'type': 'ineq', 'fun': g1}, + ... {'type': 'ineq', 'fun': g2}, + ... {'type': 'eq', 'fun': h1}) + >>> bounds = [(0, 1.0),]*4 + >>> res = shgo(f, bounds, n=150, constraints=cons) + >>> res + message: Optimization terminated successfully. + success: True + fun: 29.894378159142136 + funl: [ 2.989e+01] + x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] # may vary + xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] # may vary + nit: 1 + nfev: 142 # may vary + nlfev: 35 # may vary + nljev: 5 + nlhev: 0 + + >>> g1(res.x), g2(res.x), h1(res.x) + (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0) + + """ + # if necessary, convert bounds class to old bounds + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + # Initiate SHGO class + # use in context manager to make sure that any parallelization + # resources are freed. + with SHGO(func, bounds, args=args, constraints=constraints, n=n, + iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, + options=options, sampling_method=sampling_method, + workers=workers) as shc: + # Run the algorithm, process results and test success + shc.iterate_all() + + if not shc.break_routine: + if shc.disp: + logging.info("Successfully completed construction of complex.") + + # Test post iterations success + if len(shc.LMC.xl_maps) == 0: + # If sampling failed to find pool, return lowest sampled point + # with a warning + shc.find_lowest_vertex() + shc.break_routine = True + shc.fail_routine(mes="Failed to find a feasible minimizer point. " + f"Lowest sampling point = {shc.f_lowest}") + shc.res.fun = shc.f_lowest + shc.res.x = shc.x_lowest + shc.res.nfev = shc.fn + shc.res.tnev = shc.n_sampled + else: + # Test that the optimal solutions do not violate any constraints + pass # TODO + + # Confirm the routine ran successfully + if not shc.break_routine: + shc.res.message = 'Optimization terminated successfully.' + shc.res.success = True + + # Return the final results + return shc.res + + +class SHGO: + def __init__(self, func, bounds, args=(), constraints=None, n=None, + iters=None, callback=None, minimizer_kwargs=None, + options=None, sampling_method='simplicial', workers=1): + from scipy.stats import qmc + # Input checks + methods = ['halton', 'sobol', 'simplicial'] + if isinstance(sampling_method, str) and sampling_method not in methods: + raise ValueError(("Unknown sampling_method specified." + " Valid methods: {}").format(', '.join(methods))) + + # Split obj func if given with Jac + try: + if ((minimizer_kwargs['jac'] is True) and + (not callable(minimizer_kwargs['jac']))): + self.func = MemoizeJac(func) + jac = self.func.derivative + minimizer_kwargs['jac'] = jac + func = self.func # .fun + else: + self.func = func # Normal definition of objective function + except (TypeError, KeyError): + self.func = func # Normal definition of objective function + + # Initiate class + self.func = _FunctionWrapper(func, args) + self.bounds = bounds + self.args = args + self.callback = callback + + # Bounds + abound = np.array(bounds, float) + self.dim = np.shape(abound)[0] # Dimensionality of problem + + # Set none finite values to large floats + infind = ~np.isfinite(abound) + abound[infind[:, 0], 0] = -1e50 + abound[infind[:, 1], 1] = 1e50 + + # Check if bounds are correctly specified + bnderr = abound[:, 0] > abound[:, 1] + if bnderr.any(): + raise ValueError("Error: lb > ub in bounds " + f"{', '.join(str(b) for b in bnderr)}.") + + self.bounds = abound + + # Constraints + # Process constraint dict sequence: + self.constraints = constraints + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + + # shgo internals deals with old-style constraints + # self.constraints is used to create Complex, so need + # to be stored internally in old-style. + # `minimize` takes care of normalising these constraints + # for slsqp/cobyla/cobyqa/trust-constr. + self.constraints = standardize_constraints( + constraints, + np.empty(self.dim, float), + 'old' + ) + for cons in self.constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Define local minimization keyword arguments + # Start with defaults + self.minimizer_kwargs = {'method': 'SLSQP', + 'bounds': self.bounds, + 'options': {}, + 'callback': self.callback + } + if minimizer_kwargs is not None: + # Overwrite with supplied values + self.minimizer_kwargs.update(minimizer_kwargs) + + else: + self.minimizer_kwargs['options'] = {'ftol': 1e-12} + + if ( + self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla', + 'cobyqa', + 'trust-constr') + and ( + minimizer_kwargs is not None and + 'constraints' not in minimizer_kwargs and + constraints is not None + ) or + (self.g_cons is not None) + ): + self.minimizer_kwargs['constraints'] = self.min_cons + + # Process options dict + if options is not None: + self.init_options(options) + else: # Default settings: + self.f_min_true = None + self.minimize_every_iter = True + + # Algorithm limits + self.maxiter = None + self.maxfev = None + self.maxev = None + self.maxtime = None + self.f_min_true = None + self.minhgrd = None + + # Objective function knowledge + self.symmetry = None + + # Algorithm functionality + self.infty_cons_sampl = True + self.local_iter = False + + # Feedback + self.disp = False + + # Remove unknown arguments in self.minimizer_kwargs + # Start with arguments all the solvers have in common + self.min_solver_args = ['fun', 'x0', 'args', + 'callback', 'options', 'method'] + # then add the ones unique to specific solvers + solver_args = { + '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'], + 'nelder-mead': [], + 'powell': [], + 'cg': ['jac'], + 'bfgs': ['jac'], + 'newton-cg': ['jac', 'hess', 'hessp'], + 'l-bfgs-b': ['jac', 'bounds'], + 'tnc': ['jac', 'bounds'], + 'cobyla': ['constraints', 'catol'], + 'cobyqa': ['bounds', 'constraints', 'feasibility_tol'], + 'slsqp': ['jac', 'bounds', 'constraints'], + 'dogleg': ['jac', 'hess'], + 'trust-ncg': ['jac', 'hess', 'hessp'], + 'trust-krylov': ['jac', 'hess', 'hessp'], + 'trust-exact': ['jac', 'hess'], + 'trust-constr': ['jac', 'hess', 'hessp', 'constraints'], + } + method = self.minimizer_kwargs['method'] + self.min_solver_args += solver_args[method.lower()] + + # Only retain the known arguments + def _restrict_to_keys(dictionary, goodkeys): + """Remove keys from dictionary if not in goodkeys - inplace""" + existingkeys = set(dictionary) + for key in existingkeys - set(goodkeys): + dictionary.pop(key, None) + + _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args) + _restrict_to_keys(self.minimizer_kwargs['options'], + self.min_solver_args + ['ftol']) + + # Algorithm controls + # Global controls + self.stop_global = False # Used in the stopping_criteria method + self.break_routine = False # Break the algorithm globally + self.iters = iters # Iterations to be ran + self.iters_done = 0 # Iterations completed + self.n = n # Sampling points per iteration + self.nc = 0 # n # Sampling points to sample in current iteration + self.n_prc = 0 # Processed points (used to track Delaunay iters) + self.n_sampled = 0 # To track no. of sampling points already generated + self.fn = 0 # Number of feasible sampling points evaluations performed + self.hgr = 0 # Homology group rank + # Initially attempt to build the triangulation incrementally: + self.qhull_incremental = True + + # Default settings if no sampling criteria. + if (self.n is None) and (self.iters is None) \ + and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + self.nc = 0 # self.n + if self.iters is None: + self.iters = 1 + if (self.n is None) and not (sampling_method == 'simplicial'): + self.n = self.n = 100 + self.nc = 0 # self.n + if (self.n == 100) and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + + if not ((self.maxiter is None) and (self.maxfev is None) and ( + self.maxev is None) + and (self.minhgrd is None) and (self.f_min_true is None)): + self.iters = None + + # Set complex construction mode based on a provided stopping criteria: + # Initialise sampling Complex and function cache + # Note that sfield_args=() since args are already wrapped in self.func + # using the_FunctionWrapper class. + self.HC = Complex(dim=self.dim, domain=self.bounds, + sfield=self.func, sfield_args=(), + symmetry=self.symmetry, + constraints=self.constraints, + workers=workers) + + # Choose complex constructor + if sampling_method == 'simplicial': + self.iterate_complex = self.iterate_hypercube + self.sampling_method = sampling_method + + elif sampling_method in ['halton', 'sobol'] or \ + not isinstance(sampling_method, str): + self.iterate_complex = self.iterate_delaunay + # Sampling method used + if sampling_method in ['halton', 'sobol']: + if sampling_method == 'sobol': + self.n = int(2 ** np.ceil(np.log2(self.n))) + # self.n #TODO: Should always be self.n, this is + # unacceptable for shgo, check that nfev behaves as + # expected. + self.nc = 0 + self.sampling_method = 'sobol' + self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False, + seed=0) + else: + self.sampling_method = 'halton' + self.qmc_engine = qmc.Halton(d=self.dim, scramble=True, + seed=0) + + def sampling_method(n, d): + return self.qmc_engine.random(n) + + else: + # A user defined sampling method: + self.sampling_method = 'custom' + + self.sampling = self.sampling_custom + self.sampling_function = sampling_method # F(n, d) + + # Local controls + self.stop_l_iter = False # Local minimisation iterations + self.stop_complex_iter = False # Sampling iterations + + # Initiate storage objects used in algorithm classes + self.minimizer_pool = [] + + # Cache of local minimizers mapped + self.LMC = LMapCache() + + # Initialize return object + self.res = OptimizeResult() # scipy.optimize.OptimizeResult object + self.res.nfev = 0 # Includes each sampling point as func evaluation + self.res.nlfev = 0 # Local function evals for all minimisers + self.res.nljev = 0 # Local Jacobian evals for all minimisers + self.res.nlhev = 0 # Local Hessian evals for all minimisers + + # Initiation aids + def init_options(self, options): + """ + Initiates the options. + + Can also be useful to change parameters after class initiation. + + Parameters + ---------- + options : dict + + Returns + ------- + None + + """ + # Update 'options' dict passed to optimize.minimize + # Do this first so we don't mutate `options` below. + self.minimizer_kwargs['options'].update(options) + + # Ensure that 'jac', 'hess', and 'hessp' are passed directly to + # `minimize` as keywords, not as part of its 'options' dictionary. + for opt in ['jac', 'hess', 'hessp']: + if opt in self.minimizer_kwargs['options']: + self.minimizer_kwargs[opt] = ( + self.minimizer_kwargs['options'].pop(opt)) + + # Default settings: + self.minimize_every_iter = options.get('minimize_every_iter', True) + + # Algorithm limits + # Maximum number of iterations to perform. + self.maxiter = options.get('maxiter', None) + # Maximum number of function evaluations in the feasible domain + self.maxfev = options.get('maxfev', None) + # Maximum number of sampling evaluations (includes searching in + # infeasible points + self.maxev = options.get('maxev', None) + # Maximum processing runtime allowed + self.init = time.time() + self.maxtime = options.get('maxtime', None) + if 'f_min' in options: + # Specify the minimum objective function value, if it is known. + self.f_min_true = options['f_min'] + self.f_tol = options.get('f_tol', 1e-4) + else: + self.f_min_true = None + + self.minhgrd = options.get('minhgrd', None) + + # Objective function knowledge + self.symmetry = options.get('symmetry', False) + if self.symmetry: + self.symmetry = [0, ]*len(self.bounds) + else: + self.symmetry = None + # Algorithm functionality + # Only evaluate a few of the best candidates + self.local_iter = options.get('local_iter', False) + self.infty_cons_sampl = options.get('infty_constraints', True) + + # Feedback + self.disp = options.get('disp', False) + + def __enter__(self): + return self + + def __exit__(self, *args): + return self.HC.V._mapwrapper.__exit__(*args) + + # Iteration properties + # Main construction loop: + def iterate_all(self): + """ + Construct for `iters` iterations. + + If uniform sampling is used, every iteration adds 'n' sampling points. + + Iterations if a stopping criteria (e.g., sampling points or + processing time) has been met. + + """ + if self.disp: + logging.info('Splitting first generation') + + while not self.stop_global: + if self.break_routine: + break + # Iterate complex, process minimisers + self.iterate() + self.stopping_criteria() + + # Build minimiser pool + # Final iteration only needed if pools weren't minimised every + # iteration + if not self.minimize_every_iter: + if not self.break_routine: + self.find_minima() + + self.res.nit = self.iters_done # + 1 + self.fn = self.HC.V.nfev + + def find_minima(self): + """ + Construct the minimizer pool, map the minimizers to local minima + and sort the results into a global return object. + """ + if self.disp: + logging.info('Searching for minimizer pool...') + + self.minimizers() + + if len(self.X_min) != 0: + # Minimize the pool of minimizers with local minimization methods + # Note that if Options['local_iter'] is an `int` instead of default + # value False then only that number of candidates will be minimized + self.minimise_pool(self.local_iter) + # Sort results and build the global return object + self.sort_result() + + # Lowest values used to report in case of failures + self.f_lowest = self.res.fun + self.x_lowest = self.res.x + else: + self.find_lowest_vertex() + + if self.disp: + logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}") + + def find_lowest_vertex(self): + # Find the lowest objective function value on one of + # the vertices of the simplicial complex + self.f_lowest = np.inf + for x in self.HC.V.cache: + if self.HC.V[x].f < self.f_lowest: + if self.disp: + logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}') + self.f_lowest = self.HC.V[x].f + self.x_lowest = self.HC.V[x].x_a + for lmc in self.LMC.cache: + if self.LMC[lmc].f_min < self.f_lowest: + self.f_lowest = self.LMC[lmc].f_min + self.x_lowest = self.LMC[lmc].x_l + + if self.f_lowest == np.inf: # no feasible point + self.f_lowest = None + self.x_lowest = None + + # Stopping criteria functions: + def finite_iterations(self): + mi = min(x for x in [self.iters, self.maxiter] if x is not None) + if self.disp: + logging.info(f'Iterations done = {self.iters_done} / {mi}') + if self.iters is not None: + if self.iters_done >= (self.iters): + self.stop_global = True + + if self.maxiter is not None: # Stop for infeasible sampling + if self.iters_done >= (self.maxiter): + self.stop_global = True + return self.stop_global + + def finite_fev(self): + # Finite function evals in the feasible domain + if self.disp: + logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}') + if self.fn >= self.maxfev: + self.stop_global = True + return self.stop_global + + def finite_ev(self): + # Finite evaluations including infeasible sampling points + if self.disp: + logging.info(f'Sampling evaluations done = {self.n_sampled} ' + f'/ {self.maxev}') + if self.n_sampled >= self.maxev: + self.stop_global = True + + def finite_time(self): + if self.disp: + logging.info(f'Time elapsed = {time.time() - self.init} ' + f'/ {self.maxtime}') + if (time.time() - self.init) >= self.maxtime: + self.stop_global = True + + def finite_precision(self): + """ + Stop the algorithm if the final function value is known + + Specify in options (with ``self.f_min_true = options['f_min']``) + and the tolerance with ``f_tol = options['f_tol']`` + """ + # If no minimizer has been found use the lowest sampling value + self.find_lowest_vertex() + if self.disp: + logging.info(f'Lowest function evaluation = {self.f_lowest}') + logging.info(f'Specified minimum = {self.f_min_true}') + # If no feasible point was return from test + if self.f_lowest is None: + return self.stop_global + + # Function to stop algorithm at specified percentage error: + if self.f_min_true == 0.0: + if self.f_lowest <= self.f_tol: + self.stop_global = True + else: + pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true) + if self.f_lowest <= self.f_min_true: + self.stop_global = True + # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)): + if abs(pe) >= 2 * self.f_tol: + warnings.warn( + f"A much lower value than expected f* = {self.f_min_true} " + f"was found f_lowest = {self.f_lowest}", + stacklevel=3 + ) + if pe <= self.f_tol: + self.stop_global = True + + return self.stop_global + + def finite_homology_growth(self): + """ + Stop the algorithm if homology group rank did not grow in iteration. + """ + if self.LMC.size == 0: + return # pass on no reason to stop yet. + self.hgrd = self.LMC.size - self.hgr + + self.hgr = self.LMC.size + if self.hgrd <= self.minhgrd: + self.stop_global = True + if self.disp: + logging.info(f'Current homology growth = {self.hgrd} ' + f' (minimum growth = {self.minhgrd})') + return self.stop_global + + def stopping_criteria(self): + """ + Various stopping criteria ran every iteration + + Returns + ------- + stop : bool + """ + if self.maxiter is not None: + self.finite_iterations() + if self.iters is not None: + self.finite_iterations() + if self.maxfev is not None: + self.finite_fev() + if self.maxev is not None: + self.finite_ev() + if self.maxtime is not None: + self.finite_time() + if self.f_min_true is not None: + self.finite_precision() + if self.minhgrd is not None: + self.finite_homology_growth() + return self.stop_global + + def iterate(self): + self.iterate_complex() + + # Build minimizer pool + if self.minimize_every_iter: + if not self.break_routine: + self.find_minima() # Process minimizer pool + + # Algorithm updates + self.iters_done += 1 + + def iterate_hypercube(self): + """ + Iterate a subdivision of the complex + + Note: called with ``self.iterate_complex()`` after class initiation + """ + # Iterate the complex + if self.disp: + logging.info('Constructing and refining simplicial complex graph ' + 'structure') + if self.n is None: + self.HC.refine_all() + self.n_sampled = self.HC.V.size() # nevs counted + else: + self.HC.refine(self.n) + self.n_sampled += self.n + + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Re-add minimisers to complex + if len(self.LMC.xl_maps) > 0: + for xl in self.LMC.cache: + v = self.HC.V[xl] + v_near = v.star() + for v in v.nn: + v_near = v_near.union(v.nn) + # Reconnect vertices to complex + # if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l), + # near=v_near): + # continue + # else: + # If failure to find in v_near, then search all vertices + # (very expensive operation: + # self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l) + # ) + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + return + + def iterate_delaunay(self): + """ + Build a complex of Delaunay triangulated points + + Note: called with ``self.iterate_complex()`` after class initiation + """ + self.nc += self.n + self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl) + + # Add sampled points to a triangulation, construct self.Tri + if self.disp: + logging.info(f'self.n = {self.n}') + logging.info(f'self.nc = {self.nc}') + logging.info('Constructing and refining simplicial complex graph ' + 'structure from sampling points.') + + if self.dim < 2: + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Ind_sorted = self.Ind_sorted.flatten() + tris = [] + for ind, ind_s in enumerate(self.Ind_sorted): + if ind > 0: + tris.append(self.Ind_sorted[ind - 1:ind + 1]) + + tris = np.array(tris) + # Store 1D triangulation: + self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris) + self.points = {} + else: + if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built + self.delaunay_triangulation(n_prc=self.n_prc) + self.n_prc = self.C.shape[0] + + if self.disp: + logging.info('Triangulation completed, evaluating all ' + 'constraints and objective function values.') + + if hasattr(self, 'Tri'): + self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices) + + # Process all pools + # Evaluate all constraints and functions + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + self.n_sampled = self.nc # nevs counted in triangulation + return + + # Hypercube minimizers + def minimizers(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can implement parallelization here + for x in self.HC.V.cache: + in_LMC = False + if len(self.LMC.xl_maps) > 0: + for xlmi in self.LMC.xl_maps: + if np.all(np.array(x) == np.array(xlmi)): + in_LMC = True + if in_LMC: + continue + + if self.HC.V[x].minimiser(): + if self.disp: + logging.info('=' * 60) + logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer') + logging.info(f'v.f = {self.HC.V[x].f} is minimizer') + logging.info('=' * 30) + + if self.HC.V[x] not in self.minimizer_pool: + self.minimizer_pool.append(self.HC.V[x]) + + if self.disp: + logging.info('Neighbors:') + logging.info('=' * 30) + for vn in self.HC.V[x].nn: + logging.info(f'x = {vn.x} || f = {vn.f}') + + logging.info('=' * 60) + self.minimizer_pool_F = [] + self.X_min = [] + # normalized tuple in the Vertex cache + self.X_min_cache = {} # Cache used in hypercube sampling + + for v in self.minimizer_pool: + self.X_min.append(v.x_a) + self.minimizer_pool_F.append(v.f) + self.X_min_cache[tuple(v.x_a)] = v.x + + self.minimizer_pool_F = np.array(self.minimizer_pool_F) + self.X_min = np.array(self.X_min) + + # TODO: Only do this if global mode + self.sort_min_pool() + + return self.X_min + + # Local minimisation + # Minimiser pool processing + def minimise_pool(self, force_iter=False): + """ + This processing method can optionally minimise only the best candidate + solutions in the minimiser pool + + Parameters + ---------- + force_iter : int + Number of starting minimizers to process (can be specified + globally or locally) + + """ + # Find first local minimum + # NOTE: Since we always minimize this value regardless it is a waste to + # build the topograph first before minimizing + lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0]) + + # Trim minimized point from current minimizer set + self.trim_min_pool(0) + + while not self.stop_l_iter: + # Global stopping criteria: + self.stopping_criteria() + + # Note first iteration is outside loop: + if force_iter: + force_iter -= 1 + if force_iter == 0: + self.stop_l_iter = True + break + + if np.shape(self.X_min)[0] == 0: + self.stop_l_iter = True + break + + # Construct topograph from current minimizer set + # (NOTE: This is a very small topograph using only the minizer pool + # , it might be worth using some graph theory tools instead. + self.g_topograph(lres_f_min.x, self.X_min) + + # Find local minimum at the miniser with the greatest Euclidean + # distance from the current solution + ind_xmin_l = self.Z[:, -1] + lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1]) + + # Trim minimised point from current minimizer set + self.trim_min_pool(ind_xmin_l) + + # Reset controls + self.stop_l_iter = False + return + + def sort_min_pool(self): + # Sort to find minimum func value in min_pool + self.ind_f_min = np.argsort(self.minimizer_pool_F) + self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min] + self.minimizer_pool_F = np.array(self.minimizer_pool_F)[ + self.ind_f_min] + return + + def trim_min_pool(self, trim_ind): + self.X_min = np.delete(self.X_min, trim_ind, axis=0) + self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind) + self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind) + return + + def g_topograph(self, x_min, X_min): + """ + Returns the topographical vector stemming from the specified value + ``x_min`` for the current feasible set ``X_min`` with True boolean + values indicating positive entries and False values indicating + negative entries. + + """ + x_min = np.array([x_min]) + self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean') + # Find sorted indexes of spatial distances: + self.Z = np.argsort(self.Y, axis=-1) + + self.Ss = X_min[self.Z][0] + self.minimizer_pool = self.minimizer_pool[self.Z] + self.minimizer_pool = self.minimizer_pool[0] + return self.Ss + + # Local bound functions + def construct_lcb_simplicial(self, v_min): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + # Loop over all bounds + for vn in v_min.nn: + for i, x_i in enumerate(vn.x_a): + # Lower bound + if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]): + cbounds[i][0] = x_i + + # Upper bound + if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]): + cbounds[i][1] = x_i + + if self.disp: + logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}') + logging.info(f'cbounds = {cbounds}') + + return cbounds + + def construct_lcb_delaunay(self, v_min, ind=None): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + + return cbounds + + # Minimize a starting point locally + def minimize(self, x_min, ind=None): + """ + This function is used to calculate the local minima using the specified + sampling point as a starting value. + + Parameters + ---------- + x_min : vector of floats + Current starting point to minimize. + + Returns + ------- + lres : OptimizeResult + The local optimization result represented as a `OptimizeResult` + object. + """ + # Use minima maps if vertex was already run + if self.disp: + logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}') + + if self.LMC[x_min].lres is not None: + logging.info(f'Found self.LMC[x_min].lres = ' + f'{self.LMC[x_min].lres}') + return self.LMC[x_min].lres + + if self.callback is not None: + logging.info(f'Callback for minimizer starting at {x_min}:') + + if self.disp: + logging.info(f'Starting minimization at {x_min}...') + + if self.sampling_method == 'simplicial': + x_min_t = tuple(x_min) + # Find the normalized tuple in the Vertex cache: + x_min_t_norm = self.X_min_cache[tuple(x_min_t)] + x_min_t_norm = tuple(x_min_t_norm) + g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm]) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + else: + g_bounds = self.construct_lcb_delaunay(x_min, ind=ind) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + if self.disp and 'bounds' in self.minimizer_kwargs: + logging.info('bounds in kwarg:') + logging.info(self.minimizer_kwargs['bounds']) + + # Local minimization using scipy.optimize.minimize: + lres = minimize(self.func, x_min, **self.minimizer_kwargs) + + if self.disp: + logging.info(f'lres = {lres}') + + # Local function evals for all minimizers + self.res.nlfev += lres.nfev + if 'njev' in lres: + self.res.nljev += lres.njev + if 'nhev' in lres: + self.res.nlhev += lres.nhev + + try: # Needed because of the brain dead 1x1 NumPy arrays + lres.fun = lres.fun[0] + except (IndexError, TypeError): + lres.fun + + # Append minima maps + self.LMC[x_min] + self.LMC.add_res(x_min, lres, bounds=g_bounds) + + return lres + + # Post local minimization processing + def sort_result(self): + """ + Sort results and build the global return object + """ + # Sort results in local minima cache + results = self.LMC.sort_cache_result() + self.res.xl = results['xl'] + self.res.funl = results['funl'] + self.res.x = results['x'] + self.res.fun = results['fun'] + + # Add local func evals to sampling func evals + # Count the number of feasible vertices and add to local func evals: + self.res.nfev = self.fn + self.res.nlfev + return self.res + + # Algorithm controls + def fail_routine(self, mes=("Failed to converge")): + self.break_routine = True + self.res.success = False + self.X_min = [None] + self.res.message = mes + + def sampled_surface(self, infty_cons_sampl=False): + """ + Sample the function surface. + + There are 2 modes, if ``infty_cons_sampl`` is True then the sampled + points that are generated outside the feasible domain will be + assigned an ``inf`` value in accordance with SHGO rules. + This guarantees convergence and usually requires less objective + function evaluations at the computational costs of more Delaunay + triangulation points. + + If ``infty_cons_sampl`` is False, then the infeasible points are + discarded and only a subspace of the sampled points are used. This + comes at the cost of the loss of guaranteed convergence and usually + requires more objective function evaluations. + """ + # Generate sampling points + if self.disp: + logging.info('Generating sampling points') + self.sampling(self.nc, self.dim) + if len(self.LMC.xl_maps) > 0: + self.C = np.vstack((self.C, np.array(self.LMC.xl_maps))) + if not infty_cons_sampl: + # Find subspace of feasible points + if self.g_cons is not None: + self.sampling_subspace() + + # Sort remaining samples + self.sorted_samples() + + # Find objective function references + self.n_sampled = self.nc + + def sampling_custom(self, n, dim): + """ + Generates uniform sampling points in a hypercube and scales the points + to the bound limits. + """ + # Generate sampling points. + # Generate uniform sample points in [0, 1]^m \subset R^m + if self.n_sampled == 0: + self.C = self.sampling_function(n, dim) + else: + self.C = self.sampling_function(n, dim) + # Distribute over bounds + for i in range(len(self.bounds)): + self.C[:, i] = (self.C[:, i] * + (self.bounds[i][1] - self.bounds[i][0]) + + self.bounds[i][0]) + return self.C + + def sampling_subspace(self): + """Find subspace of feasible points from g_func definition""" + # Subspace of feasible points. + for ind, g in enumerate(self.g_cons): + # C.shape = (Z, dim) where Z is the number of sampling points to + # evaluate and dim is the dimensionality of the problem. + # the constraint function may not be vectorised so have to step + # through each sampling point sequentially. + feasible = np.array( + [np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C], + dtype=bool + ) + self.C = self.C[feasible] + + if self.C.size == 0: + self.res.message = ('No sampling point found within the ' + + 'feasible set. Increasing sampling ' + + 'size.') + # sampling correctly for both 1-D and >1-D cases + if self.disp: + logging.info(self.res.message) + + def sorted_samples(self): # Validated + """Find indexes of the sorted sampling points""" + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Xs = self.C[self.Ind_sorted] + return self.Ind_sorted, self.Xs + + def delaunay_triangulation(self, n_prc=0): + if hasattr(self, 'Tri') and self.qhull_incremental: + # TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps) + # in self.sampled_surface + self.Tri.add_points(self.C[n_prc:, :]) + else: + try: + self.Tri = spatial.Delaunay(self.C, + incremental=self.qhull_incremental, + ) + except spatial.QhullError: + if str(sys.exc_info()[1])[:6] == 'QH6239': + logging.warning('QH6239 Qhull precision error detected, ' + 'this usually occurs when no bounds are ' + 'specified, Qhull can only run with ' + 'handling cocircular/cospherical points' + ' and in this case incremental mode is ' + 'switched off. The performance of shgo ' + 'will be reduced in this mode.') + self.qhull_incremental = False + self.Tri = spatial.Delaunay(self.C, + incremental= + self.qhull_incremental) + else: + raise + + return self.Tri + + +class LMap: + def __init__(self, v): + self.v = v + self.x_l = None + self.lres = None + self.f_min = None + self.lbounds = [] + + +class LMapCache: + def __init__(self): + self.cache = {} + + # Lists for search queries + self.v_maps = [] + self.xl_maps = [] + self.xl_maps_set = set() + self.f_maps = [] + self.lbound_maps = [] + self.size = 0 + + def __getitem__(self, v): + try: + v = np.ndarray.tolist(v) + except TypeError: + pass + v = tuple(v) + try: + return self.cache[v] + except KeyError: + xval = LMap(v) + self.cache[v] = xval + + return self.cache[v] + + def add_res(self, v, lres, bounds=None): + v = np.ndarray.tolist(v) + v = tuple(v) + self.cache[v].x_l = lres.x + self.cache[v].lres = lres + self.cache[v].f_min = lres.fun + self.cache[v].lbounds = bounds + + # Update cache size + self.size += 1 + + # Cache lists for search queries + self.v_maps.append(v) + self.xl_maps.append(lres.x) + self.xl_maps_set.add(tuple(lres.x)) + self.f_maps.append(lres.fun) + self.lbound_maps.append(bounds) + + def sort_cache_result(self): + """ + Sort results and build the global return object + """ + results = {} + # Sort results and save + self.xl_maps = np.array(self.xl_maps) + self.f_maps = np.array(self.f_maps) + + # Sorted indexes in Func_min + ind_sorted = np.argsort(self.f_maps) + + # Save ordered list of minima + results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals + self.f_maps = np.array(self.f_maps) + results['funl'] = self.f_maps[ind_sorted] + results['funl'] = results['funl'].T + + # Find global of all minimizers + results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima + results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value + + self.xl_maps = np.ndarray.tolist(self.xl_maps) + self.f_maps = np.ndarray.tolist(self.f_maps) + return results diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1b47e3ab8a76338b873b0af1a916d3530ec16c4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f78c1e2149e61ae128cae3f4382e0d921ec7100 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e2a12a51ec31c34850e1dac67b1f0307d3b7f4 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..178e9daea2d03e335a6901483f574d7453fb9340 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py @@ -0,0 +1,1225 @@ +"""Base classes for low memory simplicial complex structures.""" +import copy +import logging +import itertools +import decimal +from functools import cache + +import numpy as np + +from ._vertex import (VertexCacheField, VertexCacheIndex) + + +class Complex: + """ + Base class for a simplicial complex described as a cache of vertices + together with their connections. + + Important methods: + Domain triangulation: + Complex.triangulate, Complex.split_generation + Triangulating arbitrary points (must be traingulable, + may exist outside domain): + Complex.triangulate(sample_set) + Converting another simplicial complex structure data type to the + structure used in Complex (ex. OBJ wavefront) + Complex.convert(datatype, data) + + Important objects: + HC.V: The cache of vertices and their connection + HC.H: Storage structure of all vertex groups + + Parameters + ---------- + dim : int + Spatial dimensionality of the complex R^dim + domain : list of tuples, optional + The bounds [x_l, x_u]^dim of the hyperrectangle space + ex. The default domain is the hyperrectangle [0, 1]^dim + Note: The domain must be convex, non-convex spaces can be cut + away from this domain using the non-linear + g_cons functions to define any arbitrary domain + (these domains may also be disconnected from each other) + sfield : + A scalar function defined in the associated domain f: R^dim --> R + sfield_args : tuple + Additional arguments to be passed to `sfield` + vfield : + A scalar function defined in the associated domain + f: R^dim --> R^m + (for example a gradient function of the scalar field) + vfield_args : tuple + Additional arguments to be passed to vfield + symmetry : None or list + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be + non-negative.constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (unused). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + """ + def __init__(self, dim, domain=None, sfield=None, sfield_args=(), + symmetry=None, constraints=None, workers=1): + self.dim = dim + + # Domains + self.domain = domain + if domain is None: + self.bounds = [(0.0, 1.0), ] * dim + else: + self.bounds = domain + self.symmetry = symmetry + # here in init to avoid if checks + + # Field functions + self.sfield = sfield + self.sfield_args = sfield_args + + # Process constraints + # Constraints + # Process constraint dict sequence: + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + if not isinstance(constraints, (tuple, list)): + constraints = (constraints,) + + for cons in constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Homology properties + self.gen = 0 + self.perm_cycle = 0 + + # Every cell is stored in a list of its generation, + # ex. the initial cell is stored in self.H[0] + # 1st get new cells are stored in self.H[1] etc. + # When a cell is sub-generated it is removed from this list + + self.H = [] # Storage structure of vertex groups + + # Cache of all vertices + if (sfield is not None) or (self.g_cons is not None): + # Initiate a vertex cache and an associated field cache, note that + # the field case is always initiated inside the vertex cache if an + # associated field scalar field is defined: + if sfield is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + elif self.g_cons is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + else: + self.V = VertexCacheIndex() + + self.V_non_symm = [] # List of non-symmetric vertices + + def __call__(self): + return self.H + + # %% Triangulation methods + def cyclic_product(self, bounds, origin, supremum, centroid=True): + """Generate initial triangulation using cyclic product""" + # Define current hyperrectangle + vot = tuple(origin) + vut = tuple(supremum) # Hyperrectangle supremum + self.V[vot] + vo = self.V[vot] + yield vo.x + self.V[vut].connect(self.V[vot]) + yield vut + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + C0x = [[self.V[vot]]] + a_vo = copy.copy(list(origin)) + a_vo[0] = vut[0] # Update aN Origin + a_vo = self.V[tuple(a_vo)] + # self.V[vot].connect(self.V[tuple(a_vo)]) + self.V[vot].connect(a_vo) + yield a_vo.x + C1x = [[a_vo]] + # C1x = [[self.V[tuple(a_vo)]]] + ab_C = [] # Container for a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + C0x.append([]) + C1x.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + x[1] + # Copy lists for iteration + cC0x = [x[:] for x in C0x[:i + 1]] + cC1x = [x[:] for x in C1x[:i + 1]] + for j, (VL, VU) in enumerate(zip(cC0x, cC1x)): + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + + # Connect vertices in N to corresponding vertices + # in aN: + vl.connect(a_vl) + + yield a_vl.x + + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + + # Connect new vertex pair in aN: + a_vl.connect(a_vu) + + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + + # Update the containers + C0x[i + 1].append(vl) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vl) + C1x[i + 1].append(a_vu) + + # Update old containers + C0x[j].append(a_vl) + C1x[j].append(a_vu) + + # Yield new points + yield a_vu.x + + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) + + for vp in ab_Cc: + b_v = list(vp[0].x) + ab_v = list(vp[1].x) + b_v[i + 1] = vut[i + 1] + ab_v[i + 1] = vut[i + 1] + b_v = self.V[tuple(b_v)] # b + vl + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + b_v.connect(ab_v) # s-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + ab_C.append((b_v, ab_v)) + + except IndexError: + cC0x = C0x[i] + cC1x = C1x[i] + VL, VU = cC0x, cC1x + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + # Connect new vertex pair in aN: + # a_vl.connect(a_vu) + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vu) + # Yield new points + a_vu.connect(self.V[vut]) + yield a_vu.x + ab_Cc = copy.copy(ab_C) + for vp in ab_Cc: + if vp[1].x[i] == vut[i]: + ab_v = list(vp[1].x) + ab_v[i + 1] = vut[i + 1] + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + + # Clean class trash + try: + del C0x + del cC0x + del C1x + del cC1x + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + # Extra yield to ensure that the triangulation is completed + if centroid: + vo = self.V[vot] + vs = self.V[vut] + # Disconnect the origin and supremum + vo.disconnect(vs) + # Build centroid + vc = self.split_edge(vot, vut) + for v in vo.nn: + v.connect(vc) + yield vc.x + return vc.x + else: + yield vut + return vut + + def triangulate(self, n=None, symmetry=None, centroid=True, + printout=False): + """ + Triangulate the initial domain, if n is not None then a limited number + of points will be generated + + Parameters + ---------- + n : int, Number of points to be sampled. + symmetry : + + Ex. Dictionary/hashtable + f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + symmetry = symmetry[0]: 0, # Variable 1 + symmetry[1]: 0, # symmetric to variable 1 + symmetry[2]: 0, # symmetric to variable 1 + symmetry[3]: 3, # Variable 4 + symmetry[4]: 3, # symmetric to variable 4 + symmetry[5]: 3, # symmetric to variable 4 + } + centroid : bool, if True add a central point to the hypercube + printout : bool, if True print out results + + NOTES: + ------ + Rather than using the combinatorial algorithm to connect vertices we + make the following observation: + + The bound pairs are similar a C2 cyclic group and the structure is + formed using the cartesian product: + + H = C2 x C2 x C2 ... x C2 (dim times) + + So construct any normal subgroup N and consider H/N first, we connect + all vertices within N (ex. N is C2 (the first dimension), then we move + to a left coset aN (an operation moving around the defined H/N group by + for example moving from the lower bound in C2 (dimension 2) to the + higher bound in C2. During this operation connection all the vertices. + Now repeat the N connections. Note that these elements can be connected + in parallel. + """ + # Inherit class arguments + if symmetry is None: + symmetry = self.symmetry + # Build origin and supremum vectors + origin = [i[0] for i in self.bounds] + self.origin = origin + supremum = [i[1] for i in self.bounds] + + self.supremum = supremum + + if symmetry is None: + cbounds = self.bounds + else: + cbounds = copy.copy(self.bounds) + for i, j in enumerate(symmetry): + if i is not j: + # pop second entry on second symmetry vars + cbounds[i] = [self.bounds[symmetry[i]][0]] + # Sole (first) entry is the sup value and there is no + # origin: + cbounds[i] = [self.bounds[symmetry[i]][1]] + if (self.bounds[symmetry[i]] is not + self.bounds[symmetry[j]]): + logging.warning(f"Variable {i} was specified as " + f"symmetric to variable {j}, however" + f", the bounds {i} =" + f" {self.bounds[symmetry[i]]} and {j}" + f" =" + f" {self.bounds[symmetry[j]]} do not " + f"match, the mismatch was ignored in " + f"the initial triangulation.") + cbounds[i] = self.bounds[symmetry[j]] + + if n is None: + # Build generator + self.cp = self.cyclic_product(cbounds, origin, supremum, centroid) + for i in self.cp: + i + + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + else: + # Check if generator already exists + try: + self.cp + except (AttributeError, KeyError): + self.cp = self.cyclic_product(cbounds, origin, supremum, + centroid) + + try: + while len(self.V.cache) < n: + next(self.cp) + except StopIteration: + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + if printout: + # for v in self.C0(): + # v.print_out() + for v in self.V.cache: + self.V[v].print_out() + + return + + def refine(self, n=1): + if n is None: + try: + self.triangulated_vectors + self.refine_all() + return + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry) + return + else: + raise + + nt = len(self.V.cache) + n # Target number of total vertices + # In the outer while loop we iterate until we have added an extra `n` + # vertices to the complex: + while len(self.V.cache) < nt: # while loop 1 + try: # try 1 + # Try to access triangulated_vectors, this should only be + # defined if an initial triangulation has already been + # performed: + self.triangulated_vectors + # Try a usual iteration of the current generator, if it + # does not exist or is exhausted then produce a new generator + try: # try 2 + next(self.rls) + except (AttributeError, StopIteration, KeyError): + vp = self.triangulated_vectors[0] + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + next(self.rls) + + except (AttributeError, KeyError): + # If an initial triangulation has not been completed, then + # we start/continue the initial triangulation targeting `nt` + # vertices, if nt is greater than the initial number of + # vertices then the `refine` routine will move back to try 1. + self.triangulate(nt, self.symmetry) + return + + def refine_all(self, centroids=True): + """Refine the entire domain of the current complex.""" + try: + self.triangulated_vectors + tvs = copy.copy(self.triangulated_vectors) + for i, vp in enumerate(tvs): + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + for i in self.rls: + i + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry, centroid=centroids) + else: + raise + + # This adds a centroid to every new sub-domain generated and defined + # by self.triangulated_vectors, in addition the vertices ! to complete + # the triangulation + return + + def refine_local_space(self, origin, supremum, bounds, centroid=1): + # Copy for later removal + origin_c = copy.copy(origin) + supremum_c = copy.copy(supremum) + + # Initiate local variables redefined in later inner `for` loop: + vl, vu, a_vu = None, None, None + + # Change the vector orientation so that it is only increasing + s_ov = list(origin) + s_origin = list(origin) + s_sv = list(supremum) + s_supremum = list(supremum) + for i, vi in enumerate(s_origin): + if s_ov[i] > s_sv[i]: + s_origin[i] = s_sv[i] + s_supremum[i] = s_ov[i] + + vot = tuple(s_origin) + vut = tuple(s_supremum) # Hyperrectangle supremum + + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + if tuple(a_vl) not in self.V.cache: + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + a_vl = self.V[tuple(a_vl)] + else: + a_vl = self.V[tuple(a_vl)] + + c_v = self.split_edge(vo.x, a_vl.x) + c_v.connect(vco) + yield c_v.x + Cox = [[vo]] + Ccx = [[c_v]] + Cux = [[a_vl]] + ab_C = [] # Container for a + b operations + s_ab_C = [] # Container for symmetric a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + Cox.append([]) + Ccx.append([]) + Cux.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + t_a_vl = list(vot) + t_a_vl[i + 1] = vut[i + 1] + + # New: lists are used anyway, so copy all + # %% + # Copy lists for iteration + cCox = [x[:] for x in Cox[:i + 1]] + cCcx = [x[:] for x in Ccx[:i + 1]] + cCux = [x[:] for x in Cux[:i + 1]] + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the + # (VL, VC, VU) for-loop, but we use the copy of the list in the + # ab_Cc for-loop. + s_ab_Cc = copy.copy(s_ab_C) + + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + if tuple(t_a_vl) not in self.V.cache: + # Raise error to continue symmetric refine + raise IndexError + t_a_vu = list(vut) + t_a_vu[i + 1] = vut[i + 1] + if tuple(t_a_vu) not in self.V.cache: + # Raise error to continue symmetric refine: + raise IndexError + + for vectors in s_ab_Cc: + # s_ab_C.append([c_vc, vl, vu, a_vu]) + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vu = list(vectors[3].x) + + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + + b_vl_c = self.split_edge(b_vu.x, b_vl.x) + bc_vc.connect(b_vl_c) + + yield b_vu + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + b_vu_c = self.split_edge(b_vu.x, ba_vu.x) + bc_vc.connect(b_vu_c) + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + + # comb = [c_vc, vl, vu, a_vl, a_vu, + # bc_vc, b_vl, b_vu, ba_vl, ba_vu] + comb = [vl, vu, a_vu, + b_vl, b_vu, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + # Add new list of cross pairs + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev + + for vectors in ab_Cc: + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + yield b_vu + ba_vl = self.V[tuple(ba_vl)] + bc_vc.connect(ba_vl) # Connect aN cross pairs + d_bc_vc.connect(ba_vl) # Connect all to centroid + self.split_edge(b_vu.x, ba_vl.x) + yield ba_vl + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs + yield d_ba_vl + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + c_vc, vl, vu, a_vl, a_vu = vectors + + comb = [vl, vu, a_vl, a_vu, + b_vl, b_vu, ba_vl, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Add new list of cross pairs + ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu)) + ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu)) + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl)) + + for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)): + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper C3 group in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + a_vu = self.V[tuple(a_vu)] + # Note, build (a + vc) later for consistent yields + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + # Build cN vertices for each lower-upper C3 group in N: + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vl) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield c_vc.x + c_vl = self.split_edge(vl.x, a_vl.x) + c_vl.connect(vco) + c_vc.connect(c_vl) # Connect cN group vertices + yield c_vl.x + # yield at end of loop: + c_vu = self.split_edge(vu.x, a_vu.x) + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield c_vu.x + + a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ? + a_vc.connect(vco) + a_vc.connect(c_vc) + + # Storage for connecting c + ac operations: + ab_C.append((c_vc, vl, vu, a_vl, a_vu)) + + # Update the containers + Cox[i + 1].append(vl) + Cox[i + 1].append(vc) + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vl) + Ccx[i + 1].append(c_vc) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vl) + Cux[i + 1].append(a_vc) + Cux[i + 1].append(a_vu) + + # Update old containers + Cox[j].append(c_vl) # ! + Cox[j].append(a_vl) + Ccx[j].append(c_vc) # ! + Ccx[j].append(a_vc) # ! + Cux[j].append(c_vu) # ! + Cux[j].append(a_vu) + + # Yield new points + yield a_vc.x + + except IndexError: + for vectors in ab_Cc: + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + ba_vu = self.V[tuple(ba_vu)] + yield ba_vu + d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s + yield ba_vu + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + ba_vl = self.V[tuple(ba_vl)] + yield ba_vl + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x) + yield d_ba_vl + yield d_ba_vu + yield d_ba_vc + c_vc, vl, vu, a_vl, a_vu = vectors + comb = [vl, vu, a_vl, a_vu, + ba_vl, + ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Copy lists for iteration + cCox = Cox[i] + cCcx = Ccx[i] + cCux = Cux[i] + VL, VC, VU = cCox, cCcx, cCux + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + yield a_vl.x + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield (c_vc.x) + c_vu = self.split_edge(vu.x, + a_vu.x) # yield at end of loop + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield (c_vu.x) + + # Update the containers + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vu) + + # Update old containers + s_ab_C.append([c_vc, vl, vu, a_vu]) + + yield a_vu.x + + # Clean class trash + try: + del Cox + del Ccx + del Cux + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + try: + self.triangulated_vectors.remove((tuple(origin_c), + tuple(supremum_c))) + except ValueError: + # Turn this into a logging warning? + pass + # Add newly triangulated vectors: + for vs in sup_set: + self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x))) + + # Extra yield to ensure that the triangulation is completed + if centroid: + vcn_set = set() + c_nn_lists = [] + for vs in sup_set: + # Build centroid + c_nn = self.vpool(vco.x, vs.x) + try: + c_nn.remove(vcn_set) + except KeyError: + pass + c_nn_lists.append(c_nn) + + for c_nn in c_nn_lists: + try: + c_nn.remove(vcn_set) + except KeyError: + pass + + for vs, c_nn in zip(sup_set, c_nn_lists): + # Build centroid + vcn = self.split_edge(vco.x, vs.x) + vcn_set.add(vcn) + try: # Shouldn't be needed? + c_nn.remove(vcn_set) + except KeyError: + pass + for vnn in c_nn: + vcn.connect(vnn) + yield vcn.x + else: + pass + + yield vut + return + + def refine_star(self, v): + """Refine the star domain of a vertex `v`.""" + # Copy lists before iteration + vnn = copy.copy(v.nn) + v1nn = [] + d_v0v1_set = set() + for v1 in vnn: + v1nn.append(copy.copy(v1.nn)) + + for v1, v1nn in zip(vnn, v1nn): + vnnu = v1nn.intersection(vnn) + + d_v0v1 = self.split_edge(v.x, v1.x) + for o_d_v0v1 in d_v0v1_set: + d_v0v1.connect(o_d_v0v1) + d_v0v1_set.add(d_v0v1) + for v2 in vnnu: + d_v1v2 = self.split_edge(v1.x, v2.x) + d_v0v1.connect(d_v1v2) + return + + @cache + def split_edge(self, v1, v2): + v1 = self.V[v1] + v2 = self.V[v2] + # Destroy original edge, if it exists: + v1.disconnect(v2) + # Compute vertex on centre of edge: + try: + vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a + except TypeError: # Allow for decimal operations + vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a + + vc = self.V[tuple(vct)] + # Connect to original 2 vertices to the new centre vertex + vc.connect(v1) + vc.connect(v2) + return vc + + def vpool(self, origin, supremum): + vot = tuple(origin) + vst = tuple(supremum) + # Initiate vertices in case they don't exist + vo = self.V[vot] + vs = self.V[vst] + + # Remove origin - supremum disconnect + + # Find the lower/upper bounds of the refinement hyperrectangle + bl = list(vot) + bu = list(vst) + for i, (voi, vsi) in enumerate(zip(vot, vst)): + if bl[i] > vsi: + bl[i] = vsi + if bu[i] < voi: + bu[i] = voi + + # NOTE: This is mostly done with sets/lists because we aren't sure + # how well the numpy arrays will scale to thousands of + # dimensions. + vn_pool = set() + vn_pool.update(vo.nn) + vn_pool.update(vs.nn) + cvn_pool = copy.copy(vn_pool) + for vn in cvn_pool: + for i, xi in enumerate(vn.x): + if bl[i] <= xi <= bu[i]: + pass + else: + try: + vn_pool.remove(vn) + except KeyError: + pass # NOTE: Not all neighbours are in initial pool + return vn_pool + + def vf_to_vv(self, vertices, simplices): + """ + Convert a vertex-face mesh to a vertex-vertex mesh used by this class + + Parameters + ---------- + vertices : list + Vertices + simplices : list + Simplices + """ + if self.dim > 1: + for s in simplices: + edges = itertools.combinations(s, self.dim) + for e in edges: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + else: + for e in simplices: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + return + + def connect_vertex_non_symm(self, v_x, near=None): + """ + Adds a vertex at coords v_x to the complex that is not symmetric to the + initial triangulation and sub-triangulation. + + If near is specified (for example; a star domain or collections of + cells known to contain v) then only those simplices containd in near + will be searched, this greatly speeds up the process. + + If near is not specified this method will search the entire simplicial + complex structure. + + Parameters + ---------- + v_x : tuple + Coordinates of non-symmetric vertex + near : set or list + List of vertices, these are points near v to check for + """ + if near is None: + star = self.V + else: + star = near + # Create the vertex origin + if tuple(v_x) in self.V.cache: + if self.V[v_x] in self.V_non_symm: + pass + else: + return + + self.V[v_x] + found_nn = False + S_rows = [] + for v in star: + S_rows.append(v.x) + + S_rows = np.array(S_rows) + A = np.array(S_rows) - np.array(v_x) + # Iterate through all the possible simplices of S_rows + for s_i in itertools.combinations(range(S_rows.shape[0]), + r=self.dim + 1): + # Check if connected, else s_i is not a simplex + valid_simplex = True + for i in itertools.combinations(s_i, r=2): + # Every combination of vertices must be connected, we check of + # the current iteration of all combinations of s_i are + # connected we break the loop if it is not. + if ((self.V[tuple(S_rows[i[1]])] not in + self.V[tuple(S_rows[i[0]])].nn) + and (self.V[tuple(S_rows[i[0]])] not in + self.V[tuple(S_rows[i[1]])].nn)): + valid_simplex = False + break + + S = S_rows[tuple([s_i])] + if valid_simplex: + if self.deg_simplex(S, proj=None): + valid_simplex = False + + # If s_i is a valid simplex we can test if v_x is inside si + if valid_simplex: + # Find the A_j0 value from the precalculated values + A_j0 = A[tuple([s_i])] + if self.in_simplex(S, v_x, A_j0): + found_nn = True + # breaks the main for loop, s_i is the target simplex: + break + + # Connect the simplex to point + if found_nn: + for i in s_i: + self.V[v_x].connect(self.V[tuple(S_rows[i])]) + # Attached the simplex to storage for all non-symmetric vertices + self.V_non_symm.append(self.V[v_x]) + # this bool value indicates a successful connection if True: + return found_nn + + def in_simplex(self, S, v_x, A_j0=None): + """Check if a vector v_x is in simplex `S`. + + Parameters + ---------- + S : array_like + Array containing simplex entries of vertices as rows + v_x : + A candidate vertex + A_j0 : array, optional, + Allows for A_j0 to be pre-calculated + + Returns + ------- + res : boolean + True if `v_x` is in `S` + """ + A_11 = np.delete(S, 0, 0) - S[0] + + sign_det_A_11 = np.sign(np.linalg.det(A_11)) + if sign_det_A_11 == 0: + # NOTE: We keep the variable A_11, but we loop through A_jj + # ind= + # while sign_det_A_11 == 0: + # A_11 = np.delete(S, ind, 0) - S[ind] + # sign_det_A_11 = np.sign(np.linalg.det(A_11)) + + sign_det_A_11 = -1 # TODO: Choose another det of j instead? + # TODO: Unlikely to work in many cases + + if A_j0 is None: + A_j0 = S - v_x + + for d in range(self.dim + 1): + det_A_jj = (-1)**d * sign_det_A_11 + # TODO: Note that scipy might be faster to add as an optional + # dependency + sign_det_A_j0 = np.sign(np.linalg.det(np.delete(A_j0, d, + 0))) + # TODO: Note if sign_det_A_j0 == then the point is coplanar to the + # current simplex facet, so perhaps return True and attach? + if det_A_jj == sign_det_A_j0: + continue + else: + return False + + return True + + def deg_simplex(self, S, proj=None): + """Test a simplex S for degeneracy (linear dependence in R^dim). + + Parameters + ---------- + S : np.array + Simplex with rows as vertex vectors + proj : array, optional, + If the projection S[1:] - S[0] is already + computed it can be added as an optional argument. + """ + # Strategy: we test all combination of faces, if any of the + # determinants are zero then the vectors lie on the same face and is + # therefore linearly dependent in the space of R^dim + if proj is None: + proj = S[1:] - S[0] + + # TODO: Is checking the projection of one vertex against faces of other + # vertices sufficient? Or do we need to check more vertices in + # dimensions higher than 2? + # TODO: Literature seems to suggest using proj.T, but why is this + # needed? + if np.linalg.det(proj) == 0.0: # TODO: Replace with tolerance? + return True # Simplex is degenerate + else: + return False # Simplex is not degenerate diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py new file mode 100644 index 0000000000000000000000000000000000000000..e47558ee7b9a181638841c34bb63603b5d37e221 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py @@ -0,0 +1,460 @@ +import collections +from abc import ABC, abstractmethod + +import numpy as np + +from scipy._lib._util import MapWrapper + + +class VertexBase(ABC): + """ + Base class for a vertex. + """ + def __init__(self, x, nn=None, index=None): + """ + Initiation of a vertex object. + + Parameters + ---------- + x : tuple or vector + The geometric location (domain). + nn : list, optional + Nearest neighbour list. + index : int, optional + Index of vertex. + """ + self.x = x + self.hash = hash(self.x) # Save precomputed hash + + if nn is not None: + self.nn = set(nn) # can use .indexupdate to add a new list + else: + self.nn = set() + + self.index = index + + def __hash__(self): + return self.hash + + def __getattr__(self, item): + if item not in ['x_a']: + raise AttributeError(f"{type(self)} object has no attribute " + f"'{item}'") + if item == 'x_a': + self.x_a = np.array(self.x) + return self.x_a + + @abstractmethod + def connect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + @abstractmethod + def disconnect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + def star(self): + """Returns the star domain ``st(v)`` of the vertex. + + Parameters + ---------- + v : + The vertex ``v`` in ``st(v)`` + + Returns + ------- + st : set + A set containing all the vertices in ``st(v)`` + """ + self.st = self.nn + self.st.add(self) + return self.st + + +class VertexScalarField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R associated with + the geometry built from the VertexBase class + """ + + def __init__(self, x, field=None, nn=None, index=None, field_args=(), + g_cons=None, g_cons_args=()): + """ + Parameters + ---------- + x : tuple, + vector of vertex coordinates + field : callable, optional + a scalar field f: R^n --> R associated with the geometry + nn : list, optional + list of nearest neighbours + index : int, optional + index of the vertex + field_args : tuple, optional + additional arguments to be passed to field + g_cons : callable, optional + constraints on the vertex + g_cons_args : tuple, optional + additional arguments to be passed to g_cons + + """ + super().__init__(x, nn=nn, index=index) + + # Note Vertex is only initiated once for all x so only + # evaluated once + # self.feasible = None + + # self.f is externally defined by the cache to allow parallel + # processing + # None type that will break arithmetic operations unless defined + # self.f = None + + self.check_min = True + self.check_max = True + + def connect(self, v): + """Connects self to another vertex object v. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def minimiser(self): + """Check whether this vertex is strictly less than all its + neighbours""" + if self.check_min: + self._min = all(self.f < v.f for v in self.nn) + self.check_min = False + + return self._min + + def maximiser(self): + """ + Check whether this vertex is strictly greater than all its + neighbours. + """ + if self.check_max: + self._max = all(self.f > v.f for v in self.nn) + self.check_max = False + + return self._max + + +class VertexVectorField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R^m associated with + the geometry built from the VertexBase class. + """ + + def __init__(self, x, sfield=None, vfield=None, field_args=(), + vfield_args=(), g_cons=None, + g_cons_args=(), nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + raise NotImplementedError("This class is still a work in progress") + + +class VertexCacheBase: + """Base class for a vertex cache for a simplicial complex.""" + def __init__(self): + + self.cache = collections.OrderedDict() + self.nfev = 0 # Feasible points + self.index = -1 + + def __iter__(self): + for v in self.cache: + yield self.cache[v] + return + + def size(self): + """Returns the size of the vertex cache.""" + return self.index + 1 + + def print_out(self): + headlen = len(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + print(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + for v in self.cache: + self.cache[v].print_out() + + +class VertexCube(VertexBase): + """Vertex class to be used for a pure simplicial complex with no associated + differential geometry (single level domain that exists in R^n)""" + def __init__(self, x, nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + def connect(self, v): + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + +class VertexCacheIndex(VertexCacheBase): + def __init__(self): + """ + Class for a vertex cache for a simplicial complex without an associated + field. Useful only for building and visualising a domain complex. + + Parameters + ---------- + """ + super().__init__() + self.Vertex = VertexCube + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, index=self.index) + # logging.info("New generated vertex at x = {}".format(x)) + # NOTE: Surprisingly high performance increase if logging + # is commented out + self.cache[x] = xval + return self.cache[x] + + +class VertexCacheField(VertexCacheBase): + def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(), + workers=1): + """ + Class for a vertex cache for a simplicial complex with an associated + field. + + Parameters + ---------- + field : callable + Scalar or vector field callable. + field_args : tuple, optional + Any additional fixed parameters needed to completely specify the + field function + g_cons : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + g_cons_args : tuple, optional + Any additional fixed parameters needed to completely specify the + constraint functions + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + + """ + super().__init__() + self.index = -1 + self.Vertex = VertexScalarField + self.field = field + self.field_args = field_args + self.wfield = FieldWrapper(field, field_args) # if workers is not 1 + + self.g_cons = g_cons + self.g_cons_args = g_cons_args + self.wgcons = ConstraintWrapper(g_cons, g_cons_args) + self.gpool = set() # A set of tuples to process for feasibility + + # Field processing objects + self.fpool = set() # A set of tuples to process for scalar function + self.sfc_lock = False # True if self.fpool is non-Empty + + self.workers = workers + self._mapwrapper = MapWrapper(workers) + + if workers == 1: + self.process_gpool = self.proc_gpool + if g_cons is None: + self.process_fpool = self.proc_fpool_nog + else: + self.process_fpool = self.proc_fpool_g + else: + self.process_gpool = self.pproc_gpool + if g_cons is None: + self.process_fpool = self.pproc_fpool_nog + else: + self.process_fpool = self.pproc_fpool_g + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, field=self.field, nn=nn, index=self.index, + field_args=self.field_args, + g_cons=self.g_cons, + g_cons_args=self.g_cons_args) + + self.cache[x] = xval # Define in cache + self.gpool.add(xval) # Add to pool for processing feasibility + self.fpool.add(xval) # Add to pool for processing field values + return self.cache[x] + + def __getstate__(self): + self_dict = self.__dict__.copy() + del self_dict['pool'] + return self_dict + + def process_pools(self): + if self.g_cons is not None: + self.process_gpool() + self.process_fpool() + self.proc_minimisers() + + def feasibility_check(self, v): + v.feasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v.x_a, *args) < 0.0): + v.f = np.inf + v.feasible = False + break + + def compute_sfield(self, v): + """Compute the scalar field values of a vertex object `v`. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + try: + v.f = self.field(v.x_a, *self.field_args) + self.nfev += 1 + except AttributeError: + v.f = np.inf + # logging.warning(f"Field function not found at x = {self.x_a}") + if np.isnan(v.f): + v.f = np.inf + + def proc_gpool(self): + """Process all constraints.""" + if self.g_cons is not None: + for v in self.gpool: + self.feasibility_check(v) + # Clean the pool + self.gpool = set() + + def pproc_gpool(self): + """Process all constraints in parallel.""" + gpool_l = [] + for v in self.gpool: + gpool_l.append(v.x_a) + + G = self._mapwrapper(self.wgcons.gcons, gpool_l) + for v, g in zip(self.gpool, G): + v.feasible = g # set vertex object attribute v.feasible = g (bool) + + def proc_fpool_g(self): + """Process all field functions with constraints supplied.""" + for v in self.fpool: + if v.feasible: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def proc_fpool_nog(self): + """Process all field functions with no constraints supplied.""" + for v in self.fpool: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def pproc_fpool_g(self): + """ + Process all field functions with constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + if v.feasible: + fpool_l.append(v.x_a) + else: + v.f = np.inf + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def pproc_fpool_nog(self): + """ + Process all field functions with no constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + fpool_l.append(v.x_a) + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def proc_minimisers(self): + """Check for minimisers.""" + for v in self: + v.minimiser() + v.maximiser() + + +class ConstraintWrapper: + """Object to wrap constraints to pass to `multiprocessing.Pool`.""" + def __init__(self, g_cons, g_cons_args): + self.g_cons = g_cons + self.g_cons_args = g_cons_args + + def gcons(self, v_x_a): + vfeasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v_x_a, *args) < 0.0): + vfeasible = False + break + return vfeasible + + +class FieldWrapper: + """Object to wrap field to pass to `multiprocessing.Pool`.""" + def __init__(self, field, field_args): + self.field = field + self.field_args = field_args + + def func(self, v_x_a): + try: + v_f = self.field(v_x_a, *self.field_args) + except Exception: + v_f = np.inf + if np.isnan(v_f): + v_f = np.inf + + return v_f diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1f64a0469d430cfacd71e0d9a2e72eb73fb28c66 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab66837497a6626d042d56530ead46ea134ad9c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py @@ -0,0 +1,511 @@ +""" +This module implements the Sequential Least Squares Programming optimization +algorithm (SLSQP), originally developed by Dieter Kraft. +See http://www.netlib.org/toms/733 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + approx_jacobian + fmin_slsqp + +""" + +__all__ = ['approx_jacobian', 'fmin_slsqp'] + +import numpy as np +from scipy.optimize._slsqp import slsqp +from numpy import (zeros, array, linalg, append, concatenate, finfo, + sqrt, vstack, isfinite, atleast_1d) +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function, _clip_x_for_func, + _check_clip_x) +from ._numdiff import approx_derivative +from ._constraints import old_bound_to_new, _arr_to_scalar +from scipy._lib._array_api import array_namespace +from scipy._lib import array_api_extra as xpx + + +__docformat__ = "restructuredtext en" + +_epsilon = sqrt(finfo(float).eps) + + +def approx_jacobian(x, func, epsilon, *args): + """ + Approximate the Jacobian matrix of a callable function. + + Parameters + ---------- + x : array_like + The state vector at which to compute the Jacobian matrix. + func : callable f(x,*args) + The vector-valued function. + epsilon : float + The perturbation used to determine the partial derivatives. + args : sequence + Additional arguments passed to func. + + Returns + ------- + An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length + of the outputs of `func`, and ``lenx`` is the number of elements in + `x`. + + Notes + ----- + The approximation is done using forward differences. + + """ + # approx_derivative returns (m, n) == (lenf, lenx) + jac = approx_derivative(func, x, method='2-point', abs_step=epsilon, + args=args) + # if func returns a scalar jac.shape will be (lenx,). Make sure + # it's at least a 2D array. + return np.atleast_2d(jac) + + +def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, + bounds=(), fprime=None, fprime_eqcons=None, + fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, + iprint=1, disp=None, full_output=0, epsilon=_epsilon, + callback=None): + """ + Minimize a function using Sequential Least Squares Programming + + Python interface function for the SLSQP Optimization subroutine + originally implemented by Dieter Kraft. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. Must return a scalar. + x0 : 1-D ndarray of float + Initial guess for the independent variable(s). + eqcons : list, optional + A list of functions of length n such that + eqcons[j](x,*args) == 0.0 in a successfully optimized + problem. + f_eqcons : callable f(x,*args), optional + Returns a 1-D array in which each element must equal 0.0 in a + successfully optimized problem. If f_eqcons is specified, + eqcons is ignored. + ieqcons : list, optional + A list of functions of length n such that + ieqcons[j](x,*args) >= 0.0 in a successfully optimized + problem. + f_ieqcons : callable f(x,*args), optional + Returns a 1-D ndarray in which each element must be greater or + equal to 0.0 in a successfully optimized problem. If + f_ieqcons is specified, ieqcons is ignored. + bounds : list, optional + A list of tuples specifying the lower and upper bound + for each independent variable [(xl0, xu0),(xl1, xu1),...] + Infinite values will be interpreted as large floating values. + fprime : callable ``f(x,*args)``, optional + A function that evaluates the partial derivatives of func. + fprime_eqcons : callable ``f(x,*args)``, optional + A function of the form ``f(x, *args)`` that returns the m by n + array of equality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_eqcons should be sized as ( len(eqcons), len(x0) ). + fprime_ieqcons : callable ``f(x,*args)``, optional + A function of the form ``f(x, *args)`` that returns the m by n + array of inequality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). + args : sequence, optional + Additional arguments passed to func and fprime. + iter : int, optional + The maximum number of iterations. + acc : float, optional + Requested accuracy. + iprint : int, optional + The verbosity of fmin_slsqp : + + * iprint <= 0 : Silent operation + * iprint == 1 : Print summary upon completion (default) + * iprint >= 2 : Print status of each iterate and summary + disp : int, optional + Overrides the iprint interface (preferred). + full_output : bool, optional + If False, return only the minimizer of func (default). + Otherwise, output final objective function and summary + information. + epsilon : float, optional + The step size for finite-difference derivative estimates. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + out : ndarray of float + The final minimizer of func. + fx : ndarray of float, if full_output is true + The final value of the objective function. + its : int, if full_output is true + The number of iterations. + imode : int, if full_output is true + The exit mode from the optimizer (see below). + smode : string, if full_output is true + Message describing the exit mode from the optimizer. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'SLSQP' `method` in particular. + + Notes + ----- + Exit modes are defined as follows: + + - ``-1`` : Gradient evaluation required (g & a) + - ``0`` : Optimization terminated successfully + - ``1`` : Function evaluation required (f & c) + - ``2`` : More equality constraints than independent variables + - ``3`` : More than 3*n iterations in LSQ subproblem + - ``4`` : Inequality constraints incompatible + - ``5`` : Singular matrix E in LSQ subproblem + - ``6`` : Singular matrix C in LSQ subproblem + - ``7`` : Rank-deficient equality constraint subproblem HFTI + - ``8`` : Positive directional derivative for linesearch + - ``9`` : Iteration limit reached + + Examples + -------- + Examples are given :ref:`in the tutorial `. + + """ + if disp is not None: + iprint = disp + + opts = {'maxiter': iter, + 'ftol': acc, + 'iprint': iprint, + 'disp': iprint != 0, + 'eps': epsilon, + 'callback': callback} + + # Build the constraints as a tuple of dictionaries + cons = () + # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take + # the same extra arguments as the objective function. + cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) + cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) + # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian + # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments + # as the objective function. + if f_eqcons: + cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, + 'args': args}, ) + if f_ieqcons: + cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, + 'args': args}, ) + + res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, + constraints=cons, **opts) + if full_output: + return res['x'], res['fun'], res['nit'], res['status'], res['message'] + else: + return res['x'] + + +def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, + constraints=(), + maxiter=100, ftol=1.0E-6, iprint=1, disp=False, + eps=_epsilon, callback=None, finite_diff_rel_step=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using Sequential + Least Squares Programming (SLSQP). + + Options + ------- + ftol : float + Precision goal for the value of f in the stopping criterion. + eps : float + Step size used for numerical approximation of the Jacobian. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored and set to 0. + maxiter : int + Maximum number of iterations. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of `jac`. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + """ + _check_unknown_options(unknown_options) + iter = maxiter - 1 + acc = ftol + epsilon = eps + + if not disp: + iprint = 0 + + # Transform x0 into an array. + xp = array_namespace(x0) + x0 = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x = xp.reshape(xp.astype(x0, dtype), -1) + + # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by + # ScalarFunction + if bounds is None or len(bounds) == 0: + new_bounds = (-np.inf, np.inf) + else: + new_bounds = old_bound_to_new(bounds) + + # clip the initial guess to bounds, otherwise ScalarFunction doesn't work + x = np.clip(x, new_bounds[0], new_bounds[1]) + + # Constraints are triaged per type into a dictionary of tuples + if isinstance(constraints, dict): + constraints = (constraints, ) + + cons = {'eq': (), 'ineq': ()} + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError(f"Unknown constraint type '{con['type']}'.") + + # check function + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + # check Jacobian + cjac = con.get('jac') + if cjac is None: + # approximate Jacobian function. The factory function is needed + # to keep a reference to `fun`, see gh-4240. + def cjac_factory(fun): + def cjac(x, *args): + x = _check_clip_x(x, new_bounds) + + if jac in ['2-point', '3-point', 'cs']: + return approx_derivative(fun, x, method=jac, args=args, + rel_step=finite_diff_rel_step, + bounds=new_bounds) + else: + return approx_derivative(fun, x, method='2-point', + abs_step=epsilon, args=args, + bounds=new_bounds) + + return cjac + cjac = cjac_factory(con['fun']) + + # update constraints' dictionary + cons[ctype] += ({'fun': con['fun'], + 'jac': cjac, + 'args': con.get('args', ())}, ) + + exit_modes = {-1: "Gradient evaluation required (g & a)", + 0: "Optimization terminated successfully", + 1: "Function evaluation required (f & c)", + 2: "More equality constraints than independent variables", + 3: "More than 3*n iterations in LSQ subproblem", + 4: "Inequality constraints incompatible", + 5: "Singular matrix E in LSQ subproblem", + 6: "Singular matrix C in LSQ subproblem", + 7: "Rank-deficient equality constraint subproblem HFTI", + 8: "Positive directional derivative for linesearch", + 9: "Iteration limit reached"} + + # Set the parameters that SLSQP will need + # meq, mieq: number of equality and inequality constraints + meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['eq']])) + mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['ineq']])) + # m = The total number of constraints + m = meq + mieq + # la = The number of constraints, or 1 if there are no constraints + la = array([1, m]).max() + # n = The number of independent variables + n = len(x) + + # Define the workspaces for SLSQP + n1 = n + 1 + mineq = m - meq + n1 + n1 + len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 + len_jw = mineq + w = zeros(len_w) + jw = zeros(len_jw) + + # Decompose bounds into xl and xu + if bounds is None or len(bounds) == 0: + xl = np.empty(n, dtype=float) + xu = np.empty(n, dtype=float) + xl.fill(np.nan) + xu.fill(np.nan) + else: + bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u)) + for (l, u) in bounds], float) + if bnds.shape[0] != n: + raise IndexError('SLSQP Error: the length of bounds is not ' + 'compatible with that of x0.') + + with np.errstate(invalid='ignore'): + bnderr = bnds[:, 0] > bnds[:, 1] + + if bnderr.any(): + raise ValueError("SLSQP Error: lb > ub in bounds " + f"{', '.join(str(b) for b in bnderr)}.") + xl, xu = bnds[:, 0], bnds[:, 1] + + # Mark infinite bounds with nans; the Fortran code understands this + infbnd = ~isfinite(bnds) + xl[infbnd[:, 0]] = np.nan + xu[infbnd[:, 1]] = np.nan + + # ScalarFunction provides function and gradient evaluation + sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this + # doesn't get sent to the func/grad evaluator. + wrapped_fun = _clip_x_for_func(sf.fun, new_bounds) + wrapped_grad = _clip_x_for_func(sf.grad, new_bounds) + + # Initialize the iteration counter and the mode value + mode = array(0, int) + acc = array(acc, float) + majiter = array(iter, int) + majiter_prev = 0 + + # Initialize internal SLSQP state variables + alpha = array(0, float) + f0 = array(0, float) + gs = array(0, float) + h1 = array(0, float) + h2 = array(0, float) + h3 = array(0, float) + h4 = array(0, float) + t = array(0, float) + t0 = array(0, float) + tol = array(0, float) + iexact = array(0, int) + incons = array(0, int) + ireset = array(0, int) + itermx = array(0, int) + line = array(0, int) + n1 = array(0, int) + n2 = array(0, int) + n3 = array(0, int) + + # Print the header if iprint >= 2 + if iprint >= 2: + print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) + + # mode is zero on entry, so call objective, constraints and gradients + # there should be no func evaluations here because it's cached from + # ScalarFunction + fx = wrapped_fun(x) + g = append(wrapped_grad(x), 0.0) + c = _eval_constraint(x, cons) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + while 1: + # Call SLSQP + slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw, + alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, + iexact, incons, ireset, itermx, line, + n1, n2, n3) + + if mode == 1: # objective and constraint evaluation required + fx = wrapped_fun(x) + c = _eval_constraint(x, cons) + + if mode == -1: # gradient evaluation required + g = append(wrapped_grad(x), 0.0) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + if majiter > majiter_prev: + # call callback if major iteration has incremented + if callback is not None: + callback(np.copy(x)) + + # Print the status of the current iterate if iprint > 2 + if iprint >= 2: + print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev, + fx, linalg.norm(g))) + + # If exit mode is not -1 or 1, slsqp has completed + if abs(mode) != 1: + break + + majiter_prev = int(majiter) + + # Optimization loop complete. Print status if requested + if iprint >= 1: + print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') + print(" Current function value:", fx) + print(" Iterations:", majiter) + print(" Function evaluations:", sf.nfev) + print(" Gradient evaluations:", sf.ngev) + + return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), + nfev=sf.nfev, njev=sf.ngev, status=int(mode), + message=exit_modes[int(mode)], success=(mode == 0)) + + +def _eval_constraint(x, cons): + # Compute constraints + if cons['eq']: + c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['eq']]) + else: + c_eq = zeros(0) + + if cons['ineq']: + c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['ineq']]) + else: + c_ieq = zeros(0) + + # Now combine c_eq and c_ieq into a single matrix + c = concatenate((c_eq, c_ieq)) + return c + + +def _eval_con_normals(x, cons, la, n, m, meq, mieq): + # Compute the normals of the constraints + if cons['eq']: + a_eq = vstack([con['jac'](x, *con['args']) + for con in cons['eq']]) + else: # no equality constraint + a_eq = zeros((meq, n)) + + if cons['ineq']: + a_ieq = vstack([con['jac'](x, *con['args']) + for con in cons['ineq']]) + else: # no inequality constraint + a_ieq = zeros((mieq, n)) + + # Now combine a_eq and a_ieq into a single a matrix + if m == 0: # no constraints + a = zeros((la, n)) + else: + a = vstack((a_eq, a_ieq)) + a = concatenate((a, zeros([la, 1])), 1) + + return a diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_spectral.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff5bef0283b2d6b6c018c1c8b98cd46a335d7cb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_spectral.py @@ -0,0 +1,260 @@ +""" +Spectral Algorithm for Nonlinear Equations +""" +import collections + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize._optimize import _check_unknown_options +from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng + +class _NoConvergence(Exception): + pass + + +def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, + fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, + sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): + r""" + Solve nonlinear equation with the DF-SANE method + + Options + ------- + ftol : float, optional + Relative norm tolerance. + fatol : float, optional + Absolute norm tolerance. + Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. + fnorm : callable, optional + Norm to use in the convergence check. If None, 2-norm is used. + maxfev : int, optional + Maximum number of function evaluations. + disp : bool, optional + Whether to print convergence process to stdout. + eta_strategy : callable, optional + Choice of the ``eta_k`` parameter, which gives slack for growth + of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with + `k` the iteration number, `x` the current iterate and `F` the current + residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. + Default: ``||F||**2 / (1 + k)**2``. + sigma_eps : float, optional + The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. + Default: 1e-10 + sigma_0 : float, optional + Initial spectral coefficient. + Default: 1.0 + M : int, optional + Number of iterates to include in the nonmonotonic line search. + Default: 10 + line_search : {'cruz', 'cheng'} + Type of line search to employ. 'cruz' is the original one defined in + [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is + a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. + Default: 'cruz' + + References + ---------- + .. [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). + .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). + + """ + _check_unknown_options(unknown_options) + + if line_search not in ('cheng', 'cruz'): + raise ValueError(f"Invalid value {line_search!r} for 'line_search'") + + nexp = 2 + + if eta_strategy is None: + # Different choice from [1], as their eta is not invariant + # vs. scaling of F. + def eta_strategy(k, x, F): + # Obtain squared 2-norm of the initial residual from the outer scope + return f_0 / (1 + k)**2 + + if fnorm is None: + def fnorm(F): + # Obtain squared 2-norm of the current residual from the outer scope + return f_k**(1.0/nexp) + + def fmerit(F): + return np.linalg.norm(F)**nexp + + nfev = [0] + f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, + nfev, maxfev, args) + + k = 0 + f_0 = f_k + sigma_k = sigma_0 + + F_0_norm = fnorm(F_k) + + # For the 'cruz' line search + prev_fs = collections.deque([f_k], M) + + # For the 'cheng' line search + Q = 1.0 + C = f_0 + + converged = False + message = "too many function evaluations required" + + while True: + F_k_norm = fnorm(F_k) + + if disp: + print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) + + if callback is not None: + callback(x_k, F_k) + + if F_k_norm < ftol * F_0_norm + fatol: + # Converged! + message = "successful convergence" + converged = True + break + + # Control spectral parameter, from [2] + if abs(sigma_k) > 1/sigma_eps: + sigma_k = 1/sigma_eps * np.sign(sigma_k) + elif abs(sigma_k) < sigma_eps: + sigma_k = sigma_eps + + # Line search direction + d = -sigma_k * F_k + + # Nonmonotone line search + eta = eta_strategy(k, x_k, F_k) + try: + if line_search == 'cruz': + alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, + eta=eta) + elif line_search == 'cheng': + alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, + C, Q, eta=eta) + except _NoConvergence: + break + + # Update spectral parameter + s_k = xp - x_k + y_k = Fp - F_k + sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) + + # Take step + x_k = xp + F_k = Fp + f_k = fp + + # Store function value + if line_search == 'cruz': + prev_fs.append(fp) + + k += 1 + + x = _wrap_result(x_k, is_complex, shape=x_shape) + F = _wrap_result(F_k, is_complex) + + result = OptimizeResult(x=x, success=converged, + message=message, + fun=F, nfev=nfev[0], nit=k, method="df-sane") + + return result + + +def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): + """ + Wrap a function and an initial value so that (i) complex values + are wrapped to reals, and (ii) value for a merit function + fmerit(x, f) is computed at the same time, (iii) iteration count + is maintained and an exception is raised if it is exceeded. + + Parameters + ---------- + func : callable + Function to wrap + x0 : ndarray + Initial value + fmerit : callable + Merit function fmerit(f) for computing merit value from residual. + nfev_list : list + List to store number of evaluations in. Should be [0] in the beginning. + maxfev : int + Maximum number of evaluations before _NoConvergence is raised. + args : tuple + Extra arguments to func + + Returns + ------- + wrap_func : callable + Wrapped function, to be called as + ``F, fp = wrap_func(x0)`` + x0_wrap : ndarray of float + Wrapped initial value; raveled to 1-D and complex + values mapped to reals. + x0_shape : tuple + Shape of the initial value array + f : float + Merit function at F + F : ndarray of float + Residual at x0_wrap + is_complex : bool + Whether complex values were mapped to reals + + """ + x0 = np.asarray(x0) + x0_shape = x0.shape + F = np.asarray(func(x0, *args)).ravel() + is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) + x0 = x0.ravel() + + nfev_list[0] = 1 + + if is_complex: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + z = _real2complex(x).reshape(x0_shape) + v = np.asarray(func(z, *args)).ravel() + F = _complex2real(v) + f = fmerit(F) + return f, F + + x0 = _complex2real(x0) + F = _complex2real(F) + else: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + x = x.reshape(x0_shape) + F = np.asarray(func(x, *args)).ravel() + f = fmerit(F) + return f, F + + return wrap_func, x0, x0_shape, fmerit(F), F, is_complex + + +def _wrap_result(result, is_complex, shape=None): + """ + Convert from real to complex and reshape result arrays. + """ + if is_complex: + z = _real2complex(result) + else: + z = result + if shape is not None: + z = z.reshape(shape) + return z + + +def _real2complex(x): + return np.ascontiguousarray(x, dtype=float).view(np.complex128) + + +def _complex2real(z): + return np.ascontiguousarray(z, dtype=complex).view(np.float64) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tnc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..327fe4262e25f2f7e2c95d7e5261b6f188e72881 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tnc.py @@ -0,0 +1,431 @@ +# TNC Python interface +# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ + +# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: + +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +TNC: A Python interface to the TNC non-linear optimizer + +TNC is a non-linear optimizer. To use it, you must provide a function to +minimize. The function must take one argument: the list of coordinates where to +evaluate the function; and it must return either a tuple, whose first element is the +value of the function, and whose second argument is the gradient of the function +(as a list of values); or None, to abort the minimization. +""" + +from scipy.optimize import _moduleTNC as moduleTNC +from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new +from scipy._lib._array_api import array_namespace +from scipy._lib import array_api_extra as xpx + +from numpy import inf, array, zeros + +__all__ = ['fmin_tnc'] + + +MSG_NONE = 0 # No messages +MSG_ITER = 1 # One line per iteration +MSG_INFO = 2 # Informational messages +MSG_VERS = 4 # Version info +MSG_EXIT = 8 # Exit reasons +MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT + +MSGS = { + MSG_NONE: "No messages", + MSG_ITER: "One line per iteration", + MSG_INFO: "Informational messages", + MSG_VERS: "Version info", + MSG_EXIT: "Exit reasons", + MSG_ALL: "All messages" +} + +INFEASIBLE = -1 # Infeasible (lower bound > upper bound) +LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) +FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) +XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) +MAXFUN = 3 # Max. number of function evaluations reached +LSFAIL = 4 # Linear search failed +CONSTANT = 5 # All lower bounds are equal to the upper bounds +NOPROGRESS = 6 # Unable to progress +USERABORT = 7 # User requested end of minimization + +RCSTRINGS = { + INFEASIBLE: "Infeasible (lower bound > upper bound)", + LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", + FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", + XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", + MAXFUN: "Max. number of function evaluations reached", + LSFAIL: "Linear search failed", + CONSTANT: "All lower bounds are equal to the upper bounds", + NOPROGRESS: "Unable to progress", + USERABORT: "User requested end of minimization" +} + +# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in +# SciPy + + +def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, + bounds=None, epsilon=1e-8, scale=None, offset=None, + messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, + stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, + rescale=-1, disp=None, callback=None): + r""" + Minimize a function with variables subject to bounds, using + gradient information in a truncated Newton algorithm. This + method wraps a C implementation of the algorithm. + + Parameters + ---------- + func : callable ``func(x, *args)`` + Function to minimize. Must do one of: + + 1. Return f and g, where f is the value of the function and g its + gradient (a list of floats). + + 2. Return the function value but supply gradient function + separately as `fprime`. + + 3. Return the function value and set ``approx_grad=True``. + + If the function returns None, the minimization + is aborted. + x0 : array_like + Initial estimate of minimum. + fprime : callable ``fprime(x, *args)``, optional + Gradient of `func`. If None, then either `func` must return the + function value and the gradient (``f,g = func(x, *args)``) + or `approx_grad` must be True. + args : tuple, optional + Arguments to pass to function. + approx_grad : bool, optional + If true, approximate the gradient numerically. + bounds : list, optional + (min, max) pairs for each element in x0, defining the + bounds on that parameter. Use None or +/-inf for one of + min or max when there is no bound in that direction. + epsilon : float, optional + Used if approx_grad is True. The stepsize in a finite + difference approximation for fprime. + scale : array_like, optional + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x| for the others. Defaults to None. + offset : array_like, optional + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + messages : int, optional + Bit mask used to select messages display during + minimization values defined in the MSGS dict. Defaults to + MGS_ALL. + disp : int, optional + Integer interface to messages. 0 = no message, 5 = all messages + maxCGit : int, optional + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + maxfun : int, optional + Maximum number of function evaluation. If None, maxfun is + set to max(100, 10*len(x0)). Defaults to None. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + eta : float, optional + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float, optional + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float, optional + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + fmin : float, optional + Minimum function value estimate. Defaults to 0. + ftol : float, optional + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float, optional + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + pgtol : float, optional + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float, optional + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + + Returns + ------- + x : ndarray + The solution. + nfeval : int + The number of function evaluations. + rc : int + Return code, see below + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'TNC' `method` in particular. + + Notes + ----- + The underlying algorithm is truncated Newton, also called + Newton Conjugate-Gradient. This method differs from + scipy.optimize.fmin_ncg in that + + 1. it wraps a C implementation of the algorithm + 2. it allows each variable to be given an upper and lower bound. + + The algorithm incorporates the bound constraints by determining + the descent direction as in an unconstrained truncated Newton, + but never taking a step-size large enough to leave the space + of feasible x's. The algorithm keeps track of a set of + currently active constraints, and ignores them when computing + the minimum allowable step size. (The x's associated with the + active constraint are kept fixed.) If the maximum allowable + step size is zero then a new constraint is added. At the end + of each iteration one of the constraints may be deemed no + longer active and removed. A constraint is considered + no longer active is if it is currently active + but the gradient for that variable points inward from the + constraint. The specific constraint removed is the one + associated with the variable of largest index whose + constraint is no longer active. + + Return codes are defined as follows: + + - ``-1`` : Infeasible (lower bound > upper bound) + - ``0`` : Local minimum reached (:math:`|pg| \approx 0`) + - ``1`` : Converged (:math:`|f_n-f_(n-1)| \approx 0`) + - ``2`` : Converged (:math:`|x_n-x_(n-1)| \approx 0`) + - ``3`` : Max. number of function evaluations reached + - ``4`` : Linear search failed + - ``5`` : All lower bounds are equal to the upper bounds + - ``6`` : Unable to progress + - ``7`` : User requested end of minimization + + References + ---------- + Wright S., Nocedal J. (2006), 'Numerical Optimization' + + Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", + SIAM Journal of Numerical Analysis 21, pp. 770-778 + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + if disp is not None: # disp takes precedence over messages + mesg_num = disp + else: + mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) + # build options + opts = {'eps': epsilon, + 'scale': scale, + 'offset': offset, + 'mesg_num': mesg_num, + 'maxCGit': maxCGit, + 'maxfun': maxfun, + 'eta': eta, + 'stepmx': stepmx, + 'accuracy': accuracy, + 'minfev': fmin, + 'ftol': ftol, + 'xtol': xtol, + 'gtol': pgtol, + 'rescale': rescale, + 'disp': False} + + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) + + return res['x'], res['nfev'], res['status'] + + +def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, + eps=1e-8, scale=None, offset=None, mesg_num=None, + maxCGit=-1, eta=-1, stepmx=0, accuracy=0, + minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, + callback=None, finite_diff_rel_step=None, maxfun=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using a truncated + Newton (TNC) algorithm. + + Options + ------- + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + scale : list of floats + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x] for the others. Defaults to None. + offset : float + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + disp : bool + Set to True to print convergence messages. + maxCGit : int + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + eta : float + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + minfev : float + Minimum function value estimate. Defaults to 0. + ftol : float + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + gtol : float + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + finite_diff_rel_step : None or array_like, optional + If ``jac in ['2-point', '3-point', 'cs']`` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + maxfun : int + Maximum number of function evaluations. If None, `maxfun` is + set to max(100, 10*len(x0)). Defaults to None. + """ + _check_unknown_options(unknown_options) + fmin = minfev + pgtol = gtol + + xp = array_namespace(x0) + x0 = xpx.atleast_nd(xp.asarray(x0), ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x0 = xp.reshape(xp.astype(x0, dtype), -1) + + n = len(x0) + + if bounds is None: + bounds = [(None,None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + new_bounds = old_bound_to_new(bounds) + + if mesg_num is not None: + messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) + elif disp: + messages = MSG_ALL + else: + messages = MSG_NONE + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + func_and_grad = sf.fun_and_grad + + """ + low, up : the bounds (lists of floats) + if low is None, the lower bounds are removed. + if up is None, the upper bounds are removed. + low and up defaults to None + """ + low = zeros(n) + up = zeros(n) + for i in range(n): + if bounds[i] is None: + l, u = -inf, inf + else: + l,u = bounds[i] + if l is None: + low[i] = -inf + else: + low[i] = l + if u is None: + up[i] = inf + else: + up[i] = u + + if scale is None: + scale = array([]) + + if offset is None: + offset = array([]) + + if maxfun is None: + maxfun = max(100, 10*len(x0)) + + rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize( + func_and_grad, x0, low, up, scale, + offset, messages, maxCGit, maxfun, + eta, stepmx, accuracy, fmin, ftol, + xtol, pgtol, rescale, callback + ) + # the TNC documentation states: "On output, x, f and g may be very + # slightly out of sync because of scaling". Therefore re-evaluate + # func_and_grad so they are synced. + funv, jacv = func_and_grad(x) + + return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev, + nit=nit, status=rc, message=RCSTRINGS[rc], + success=(-1 < rc < 3)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..537b73b3aeb36df09863a0cd24957e5612deb030 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py @@ -0,0 +1,12 @@ +from ._trlib import TRLIBQuadraticSubproblem + +__all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem'] + + +def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False): + def subproblem_factory(x, fun, jac, hess, hessp): + return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp, + tol_rel_i=tol_rel_i, + tol_rel_b=tol_rel_b, + disp=disp) + return subproblem_factory diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ee7d77f26f10db69dc9bb5b05c335e7dd5072fa Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion.py new file mode 100644 index 0000000000000000000000000000000000000000..0dadc727e74e40b5200810191b21cdeda941c5f6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion.py @@ -0,0 +1,304 @@ +"""Trust-region optimization.""" +import math +import warnings + +import numpy as np +import scipy.linalg +from ._optimize import (_check_unknown_options, _status_message, + OptimizeResult, _prepare_scalar_function, + _call_callback_maybe_halt) +from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy +from scipy.optimize._differentiable_functions import FD_METHODS +__all__ = [] + + +def _wrap_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + return function(np.copy(x), *(wrapper_args + args)) + + return ncalls, function_wrapper + + +class BaseQuadraticSubproblem: + """ + Base/abstract class defining the quadratic model for trust-region + minimization. Child classes must implement the ``solve`` method. + + Values of the objective function, Jacobian and Hessian (if provided) at + the current iterate ``x`` are evaluated on demand and then stored as + attributes ``fun``, ``jac``, ``hess``. + """ + + def __init__(self, x, fun, jac, hess=None, hessp=None): + self._x = x + self._f = None + self._g = None + self._h = None + self._g_mag = None + self._cauchy_point = None + self._newton_point = None + self._fun = fun + self._jac = jac + self._hess = hess + self._hessp = hessp + + def __call__(self, p): + return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) + + @property + def fun(self): + """Value of objective function at current iteration.""" + if self._f is None: + self._f = self._fun(self._x) + return self._f + + @property + def jac(self): + """Value of Jacobian of objective function at current iteration.""" + if self._g is None: + self._g = self._jac(self._x) + return self._g + + @property + def hess(self): + """Value of Hessian of objective function at current iteration.""" + if self._h is None: + self._h = self._hess(self._x) + return self._h + + def hessp(self, p): + if self._hessp is not None: + return self._hessp(self._x, p) + else: + return np.dot(self.hess, p) + + @property + def jac_mag(self): + """Magnitude of jacobian of objective function at current iteration.""" + if self._g_mag is None: + self._g_mag = scipy.linalg.norm(self.jac) + return self._g_mag + + def get_boundaries_intersections(self, z, d, trust_radius): + """ + Solve the scalar quadratic equation ``||z + t d|| == trust_radius``. + This is like a line-sphere intersection. + Return the two values of t, sorted from low to high. + """ + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + sqrt_discriminant = math.sqrt(b*b - 4*a*c) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + math.copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + return sorted([ta, tb]) + + def solve(self, trust_radius): + raise NotImplementedError('The solve method should be implemented by ' + 'the child class') + + +def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, + subproblem=None, initial_trust_radius=1.0, + max_trust_radius=1000.0, eta=0.15, gtol=1e-4, + maxiter=None, disp=False, return_all=False, + callback=None, inexact=True, **unknown_options): + """ + Minimization of scalar function of one or more variables using a + trust-region algorithm. + + Options for the trust-region algorithm are: + initial_trust_radius : float + Initial trust radius. + max_trust_radius : float + Never propose steps that are longer than this value. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` + before successful termination. + maxiter : int + Maximum number of iterations to perform. + disp : bool + If True, print convergence message. + inexact : bool + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. Only effective for method + trust-krylov. + + This function is called by the `minimize` function. + It is not supposed to be called directly. + """ + _check_unknown_options(unknown_options) + + if jac is None: + raise ValueError('Jacobian is currently required for trust-region ' + 'methods') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + if subproblem is None: + raise ValueError('A subproblem solving strategy is required for ' + 'trust-region methods') + if not (0 <= eta < 0.25): + raise Exception('invalid acceptance stringency') + if max_trust_radius <= 0: + raise Exception('the max trust radius must be positive') + if initial_trust_radius <= 0: + raise ValueError('the initial trust radius must be positive') + if initial_trust_radius >= max_trust_radius: + raise ValueError('the initial trust radius must be less than the ' + 'max trust radius') + + # force the initial guess into a nice format + x0 = np.asarray(x0).flatten() + + # A ScalarFunction representing the problem. This caches calls to fun, jac, + # hess. + sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args) + fun = sf.fun + jac = sf.grad + if callable(hess): + hess = sf.hess + elif callable(hessp): + # this elif statement must come before examining whether hess + # is estimated by FD methods or a HessianUpdateStrategy + pass + elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): + # If the Hessian is being estimated by finite differences or a + # Hessian update strategy then ScalarFunction.hess returns a + # LinearOperator or a HessianUpdateStrategy. This enables the + # calculation/creation of a hessp. BUT you only want to do this + # if the user *hasn't* provided a callable(hessp) function. + hess = None + + def hessp(x, p, *args): + return sf.hess(x).dot(p) + else: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + + # ScalarFunction doesn't represent hessp + nhessp, hessp = _wrap_function(hessp, args) + + # limit the number of iterations + if maxiter is None: + maxiter = len(x0)*200 + + # init the search status + warnflag = 0 + + # initialize the search + trust_radius = initial_trust_radius + x = x0 + if return_all: + allvecs = [x] + m = subproblem(x, fun, jac, hess, hessp) + k = 0 + + # search for the function min + # do not even start if the gradient is small enough + while m.jac_mag >= gtol: + + # Solve the sub-problem. + # This gives us the proposed step relative to the current position + # and it tells us whether the proposed step + # has reached the trust region boundary or not. + try: + p, hits_boundary = m.solve(trust_radius) + except np.linalg.LinAlgError: + warnflag = 3 + break + + # calculate the predicted value at the proposed point + predicted_value = m(p) + + # define the local approximation at the proposed point + x_proposed = x + p + m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) + + # evaluate the ratio defined in equation (4.4) + actual_reduction = m.fun - m_proposed.fun + predicted_reduction = m.fun - predicted_value + if predicted_reduction <= 0: + warnflag = 2 + break + rho = actual_reduction / predicted_reduction + + # update the trust radius according to the actual/predicted ratio + if rho < 0.25: + trust_radius *= 0.25 + elif rho > 0.75 and hits_boundary: + trust_radius = min(2*trust_radius, max_trust_radius) + + # if the ratio is high enough then accept the proposed step + if rho > eta: + x = x_proposed + m = m_proposed + + # append the best guess, call back, increment the iteration count + if return_all: + allvecs.append(np.copy(x)) + k += 1 + + intermediate_result = OptimizeResult(x=x, fun=m.fun) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + # check if the gradient is small enough to stop + if m.jac_mag < gtol: + warnflag = 0 + break + + # check if we have looked at enough iterations + if k >= maxiter: + warnflag = 1 + break + + # print some stuff if requested + status_messages = ( + _status_message['success'], + _status_message['maxiter'], + 'A bad approximation caused failure to predict improvement.', + 'A linalg error occurred, such as a non-psd Hessian.', + ) + if disp: + if warnflag == 0: + print(status_messages[warnflag]) + else: + warnings.warn(status_messages[warnflag], RuntimeWarning, stacklevel=3) + print(f" Current function value: {m.fun:f}") + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0])) + + result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, + fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev, + nhev=sf.nhev + nhessp[0], nit=k, + message=status_messages[warnflag]) + + if hess is not None: + result['hess'] = m.hess + + if return_all: + result['allvecs'] = allvecs + + return result diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549cfb9760dda474cb858b7b36d236af48111067 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py @@ -0,0 +1,6 @@ +"""This module contains the equality constrained SQP solver.""" + + +from .minimize_trustregion_constr import _minimize_trustregion_constr + +__all__ = ['_minimize_trustregion_constr'] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be86bade2fb9bf32fc58419c80e02714aa582c7 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e62bd35206b6e7574db5b5bbd17313a011245f Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5e3b294786932c69ca1484f164a81e980feaf90 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae12889620aeff0cd8d5988f2c389577d826ee27 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c8df87c59538c10c63886be059861ce097f117b Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86749d4c4237693e4f2f1524289ac7fd394a5cc3 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6356878e00ba4812d004649b663e78f1dedabdfb Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31d093d31702c48e98cda5186cf48aebda65b929 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9e75f04c032e5be077f9b7db210b96072092c9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py @@ -0,0 +1,390 @@ +import numpy as np +import scipy.sparse as sps + + +class CanonicalConstraint: + """Canonical constraint to use with trust-constr algorithm. + + It represents the set of constraints of the form:: + + f_eq(x) = 0 + f_ineq(x) <= 0 + + where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see + below. + + The class is supposed to be instantiated by factory methods, which + should prepare the parameters listed below. + + Parameters + ---------- + n_eq, n_ineq : int + Number of equality and inequality constraints respectively. + fun : callable + Function defining the constraints. The signature is + ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` + components and ``c_ineq`` is ndarray with `n_ineq` components. + jac : callable + Function to evaluate the Jacobian of the constraint. The signature + is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are + either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n), + respectively. + hess : callable + Function to evaluate the Hessian of the constraints multiplied + by Lagrange multipliers, that is + ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is + ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied + shape (n, n) and provide a matrix-vector product operation + ``H.dot(p)``. + keep_feasible : ndarray, shape (n_ineq,) + Mask indicating which inequality constraints should be kept feasible. + """ + def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): + self.n_eq = n_eq + self.n_ineq = n_ineq + self.fun = fun + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + @classmethod + def from_PreparedConstraint(cls, constraint): + """Create an instance from `PreparedConstrained` object.""" + lb, ub = constraint.bounds + cfun = constraint.fun + keep_feasible = constraint.keep_feasible + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + elif np.all(lb == ub): + return cls._equal_to_canonical(cfun, lb) + elif np.all(lb == -np.inf): + return cls._less_to_canonical(cfun, ub, keep_feasible) + elif np.all(ub == np.inf): + return cls._greater_to_canonical(cfun, lb, keep_feasible) + else: + return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) + + @classmethod + def empty(cls, n): + """Create an "empty" instance. + + This "empty" instance is required to allow working with unconstrained + problems as if they have some constraints. + """ + empty_fun = np.empty(0) + empty_jac = np.empty((0, n)) + empty_hess = sps.csr_matrix((n, n)) + + def fun(x): + return empty_fun, empty_fun + + def jac(x): + return empty_jac, empty_jac + + def hess(x, v_eq, v_ineq): + return empty_hess + + return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_)) + + @classmethod + def concatenate(cls, canonical_constraints, sparse_jacobian): + """Concatenate multiple `CanonicalConstraint` into one. + + `sparse_jacobian` (bool) determines the Jacobian format of the + concatenated constraint. Note that items in `canonical_constraints` + must have their Jacobians in the same format. + """ + def fun(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.fun(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return np.hstack(eq_all), np.hstack(ineq_all) + + if sparse_jacobian: + vstack = sps.vstack + else: + vstack = np.vstack + + def jac(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.jac(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return vstack(eq_all), vstack(ineq_all) + + def hess(x, v_eq, v_ineq): + hess_all = [] + index_eq = 0 + index_ineq = 0 + for c in canonical_constraints: + vc_eq = v_eq[index_eq:index_eq + c.n_eq] + vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] + hess_all.append(c.hess(x, vc_eq, vc_ineq)) + index_eq += c.n_eq + index_ineq += c.n_ineq + + def matvec(p): + result = np.zeros_like(p, dtype=float) + for h in hess_all: + result += h.dot(p) + return result + + n = x.shape[0] + return sps.linalg.LinearOperator((n, n), matvec, dtype=float) + + n_eq = sum(c.n_eq for c in canonical_constraints) + n_ineq = sum(c.n_ineq for c in canonical_constraints) + keep_feasible = np.hstack([c.keep_feasible for c in + canonical_constraints]) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _equal_to_canonical(cls, cfun, value): + empty_fun = np.empty(0) + n = cfun.n + + n_eq = value.shape[0] + n_ineq = 0 + keep_feasible = np.empty(0, dtype=bool) + + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + def fun(x): + return cfun.fun(x) - value, empty_fun + + def jac(x): + return cfun.jac(x), empty_jac + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_eq) + + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _less_to_canonical(cls, cfun, ub, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_ub = ub < np.inf + n_eq = 0 + n_ineq = np.sum(finite_ub) + + if np.all(finite_ub): + def fun(x): + return empty_fun, cfun.fun(x) - ub + + def jac(x): + return empty_jac, cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_ineq) + else: + finite_ub = np.nonzero(finite_ub)[0] + keep_feasible = keep_feasible[finite_ub] + ub = ub[finite_ub] + + def fun(x): + return empty_fun, cfun.fun(x)[finite_ub] - ub + + def jac(x): + return empty_jac, cfun.jac(x)[finite_ub] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_ub] = v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _greater_to_canonical(cls, cfun, lb, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_lb = lb > -np.inf + n_eq = 0 + n_ineq = np.sum(finite_lb) + + if np.all(finite_lb): + def fun(x): + return empty_fun, lb - cfun.fun(x) + + def jac(x): + return empty_jac, -cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, -v_ineq) + else: + finite_lb = np.nonzero(finite_lb)[0] + keep_feasible = keep_feasible[finite_lb] + lb = lb[finite_lb] + + def fun(x): + return empty_fun, lb - cfun.fun(x)[finite_lb] + + def jac(x): + return empty_jac, -cfun.jac(x)[finite_lb] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_lb] = -v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + equal = np.nonzero(equal)[0] + less = np.nonzero(less)[0] + greater = np.nonzero(greater)[0] + interval = np.nonzero(interval)[0] + n_less = less.shape[0] + n_greater = greater.shape[0] + n_interval = interval.shape[0] + n_ineq = n_less + n_greater + 2 * n_interval + n_eq = equal.shape[0] + + keep_feasible = np.hstack((keep_feasible[less], + keep_feasible[greater], + keep_feasible[interval], + keep_feasible[interval])) + + def fun(x): + f = cfun.fun(x) + eq = f[equal] - lb[equal] + le = f[less] - ub[less] + ge = lb[greater] - f[greater] + il = f[interval] - ub[interval] + ig = lb[interval] - f[interval] + return eq, np.hstack((le, ge, il, ig)) + + def jac(x): + J = cfun.jac(x) + eq = J[equal] + le = J[less] + ge = -J[greater] + il = J[interval] + ig = -il + if sps.issparse(J): + ineq = sps.vstack((le, ge, il, ig)) + else: + ineq = np.vstack((le, ge, il, ig)) + return eq, ineq + + def hess(x, v_eq, v_ineq): + n_start = 0 + v_l = v_ineq[n_start:n_start + n_less] + n_start += n_less + v_g = v_ineq[n_start:n_start + n_greater] + n_start += n_greater + v_il = v_ineq[n_start:n_start + n_interval] + n_start += n_interval + v_ig = v_ineq[n_start:n_start + n_interval] + + v = np.zeros_like(lb) + v[equal] = v_eq + v[less] = v_l + v[greater] = -v_g + v[interval] = v_il - v_ig + + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + +def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): + """Convert initial values of the constraints to the canonical format. + + The purpose to avoid one additional call to the constraints at the initial + point. It takes saved values in `PreparedConstraint`, modifies and + concatenates them to the canonical constraint format. + """ + c_eq = [] + c_ineq = [] + J_eq = [] + J_ineq = [] + + for c in prepared_constraints: + f = c.fun.f + J = c.fun.J + lb, ub = c.bounds + if np.all(lb == ub): + c_eq.append(f - lb) + J_eq.append(J) + elif np.all(lb == -np.inf): + finite_ub = ub < np.inf + c_ineq.append(f[finite_ub] - ub[finite_ub]) + J_ineq.append(J[finite_ub]) + elif np.all(ub == np.inf): + finite_lb = lb > -np.inf + c_ineq.append(lb[finite_lb] - f[finite_lb]) + J_ineq.append(-J[finite_lb]) + else: + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + c_eq.append(f[equal] - lb[equal]) + c_ineq.append(f[less] - ub[less]) + c_ineq.append(lb[greater] - f[greater]) + c_ineq.append(f[interval] - ub[interval]) + c_ineq.append(lb[interval] - f[interval]) + + J_eq.append(J[equal]) + J_ineq.append(J[less]) + J_ineq.append(-J[greater]) + J_ineq.append(J[interval]) + J_ineq.append(-J[interval]) + + c_eq = np.hstack(c_eq) if c_eq else np.empty(0) + c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) + + if sparse_jacobian: + vstack = sps.vstack + empty = sps.csr_matrix((0, n)) + else: + vstack = np.vstack + empty = np.empty((0, n)) + + J_eq = vstack(J_eq) if J_eq else empty + J_ineq = vstack(J_ineq) if J_ineq else empty + + return c_eq, c_ineq, J_eq, J_ineq diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py new file mode 100644 index 0000000000000000000000000000000000000000..88a9f8deb1abfaa82533eeb6308bbfbd5516ed4d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py @@ -0,0 +1,231 @@ +"""Byrd-Omojokun Trust-Region SQP method.""" + +from scipy.sparse import eye as speye +from .projections import projections +from .qp_subproblem import modified_dogleg, projected_cg, box_intersections +import numpy as np +from numpy.linalg import norm + +__all__ = ['equality_constrained_sqp'] + + +def default_scaling(x): + n, = np.shape(x) + return speye(n) + + +def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess, + x0, fun0, grad0, constr0, + jac0, stop_criteria, + state, + initial_penalty, + initial_trust_radius, + factorization_method, + trust_lb=None, + trust_ub=None, + scaling=default_scaling): + """Solve nonlinear equality-constrained problem using trust-region SQP. + + Solve optimization problem: + + minimize fun(x) + subject to: constr(x) = 0 + + using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several + implementation details are based on [2]_ and [3]_, p. 549. + + References + ---------- + .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the + implementation of an algorithm for large-scale equality + constrained optimization." SIAM Journal on + Optimization 8.3 (1998): 682-706. + .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891. + LARGE_REDUCTION_RATIO = 0.9 + INTERMEDIARY_REDUCTION_RATIO = 0.3 + SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892. + TRUST_ENLARGEMENT_FACTOR_L = 7.0 + TRUST_ENLARGEMENT_FACTOR_S = 2.0 + MAX_TRUST_REDUCTION = 0.5 + MIN_TRUST_REDUCTION = 0.1 + SOC_THRESHOLD = 0.1 + TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885. + BOX_FACTOR = 0.5 + + n, = np.shape(x0) # Number of parameters + + # Set default lower and upper bounds. + if trust_lb is None: + trust_lb = np.full(n, -np.inf) + if trust_ub is None: + trust_ub = np.full(n, np.inf) + + # Initial values + x = np.copy(x0) + trust_radius = initial_trust_radius + penalty = initial_penalty + # Compute Values + f = fun0 + c = grad0 + b = constr0 + A = jac0 + S = scaling(x) + # Get projections + try: + Z, LS, Y = projections(A, factorization_method) + except ValueError as e: + if str(e) == "expected square matrix": + # can be the case if there are more equality + # constraints than independent variables + raise ValueError( + "The 'expected square matrix' error can occur if there are" + " more equality constraints than independent variables." + " Consider how your constraints are set up, or use" + " factorization_method='SVDFactorization'." + ) from e + else: + raise e + + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + + # Update state parameters + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + cg_info = {'niter': 0, 'stop_cond': 0, + 'hits_boundary': False} + + last_iteration_failed = False + while not stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + # Normal Step - `dn` + # minimize 1/2*||A dn + b||^2 + # subject to: + # ||dn|| <= TR_FACTOR * trust_radius + # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub. + dn = modified_dogleg(A, Y, b, + TR_FACTOR*trust_radius, + BOX_FACTOR*trust_lb, + BOX_FACTOR*trust_ub) + + # Tangential Step - `dt` + # Solve the QP problem: + # minimize 1/2 dt.T H dt + dt.T (H dn + c) + # subject to: + # A dt = 0 + # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2) + # lb - dn <= dt <= ub - dn + c_t = H.dot(dn) + c + b_t = np.zeros_like(b) + trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2) + lb_t = trust_lb - dn + ub_t = trust_ub - dn + dt, cg_info = projected_cg(H, c_t, Z, Y, b_t, + trust_radius_t, + lb_t, ub_t) + + # Compute update (normal + tangential steps). + d = dn + dt + + # Compute second order model: 1/2 d H d + c.T d + f. + quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d) + # Compute linearized constraint: l = A d + b. + linearized_constr = A.dot(d)+b + # Compute new penalty parameter according to formula (3.52), + # reference [2]_, p.891. + vpred = norm(b) - norm(linearized_constr) + # Guarantee `vpred` always positive, + # regardless of roundoff errors. + vpred = max(1e-16, vpred) + previous_penalty = penalty + if quadratic_model > 0: + new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred) + penalty = max(penalty, new_penalty) + # Compute predicted reduction according to formula (3.52), + # reference [2]_, p.891. + predicted_reduction = -quadratic_model + penalty*vpred + + # Compute merit function at current point + merit_function = f + penalty*norm(b) + # Evaluate function and constraints at trial point + x_next = x + S.dot(d) + f_next, b_next = fun_and_constr(x_next) + # Compute merit function at trial point + merit_function_next = f_next + penalty*norm(b_next) + # Compute actual reduction according to formula (3.54), + # reference [2]_, p.892. + actual_reduction = merit_function - merit_function_next + # Compute reduction ratio + reduction_ratio = actual_reduction / predicted_reduction + + # Second order correction (SOC), reference [2]_, p.892. + if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \ + norm(dn) <= SOC_THRESHOLD * norm(dt): + # Compute second order correction + y = -Y.dot(b_next) + # Make sure increment is inside box constraints + _, t, intersect = box_intersections(d, y, trust_lb, trust_ub) + # Compute tentative point + x_soc = x + S.dot(d + t*y) + f_soc, b_soc = fun_and_constr(x_soc) + # Recompute actual reduction + merit_function_soc = f_soc + penalty*norm(b_soc) + actual_reduction_soc = merit_function - merit_function_soc + # Recompute reduction ratio + reduction_ratio_soc = actual_reduction_soc / predicted_reduction + if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO: + x_next = x_soc + f_next = f_soc + b_next = b_soc + reduction_ratio = reduction_ratio_soc + + # Readjust trust region step, formula (3.55), reference [2]_, p.892. + if reduction_ratio >= LARGE_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d), + trust_radius) + elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d), + trust_radius) + # Reduce trust region step, according to reference [3]_, p.696. + elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO: + trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) / + (1-reduction_ratio)) + new_trust_radius = trust_reduction * norm(d) + if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius: + trust_radius *= MAX_TRUST_REDUCTION + elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius: + trust_radius = new_trust_radius + else: + trust_radius *= MIN_TRUST_REDUCTION + + # Update iteration + if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO: + x = x_next + f, b = f_next, b_next + c, A = grad_and_jac(x) + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + # Set Flag + last_iteration_failed = False + # Optimality values + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + else: + penalty = previous_penalty + last_iteration_failed = True + + return x, state diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py new file mode 100644 index 0000000000000000000000000000000000000000..580f9ccf8eab9f22d73ae41f130b2e5e541a71da --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py @@ -0,0 +1,576 @@ +import time +import numpy as np +from scipy.sparse.linalg import LinearOperator +from .._differentiable_functions import VectorFunction +from .._constraints import ( + NonlinearConstraint, LinearConstraint, PreparedConstraint, Bounds, strict_bounds) +from .._hessian_update_strategy import BFGS +from .._optimize import OptimizeResult +from .._differentiable_functions import ScalarFunction +from .equality_constrained_sqp import equality_constrained_sqp +from .canonical_constraint import (CanonicalConstraint, + initial_constraints_as_canonical) +from .tr_interior_point import tr_interior_point +from .report import BasicReport, SQPReport, IPReport + + +TERMINATION_MESSAGES = { + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`xtol` termination condition is satisfied.", + 3: "`callback` function requested termination.", + 4: "Constraint violation exceeds 'gtol'" +} + + +class HessianLinearOperator: + """Build LinearOperator from hessp""" + def __init__(self, hessp, n): + self.hessp = hessp + self.n = n + + def __call__(self, x, *args): + def matvec(p): + return self.hessp(x, p, *args) + + return LinearOperator((self.n, self.n), matvec=matvec) + + +class LagrangianHessian: + """The Hessian of the Lagrangian as LinearOperator. + + The Lagrangian is computed as the objective function plus all the + constraints multiplied with some numbers (Lagrange multipliers). + """ + def __init__(self, n, objective_hess, constraints_hess): + self.n = n + self.objective_hess = objective_hess + self.constraints_hess = constraints_hess + + def __call__(self, x, v_eq, v_ineq=None): + if v_ineq is None: + v_ineq = np.empty(0) + H_objective = self.objective_hess(x) + H_constraints = self.constraints_hess(x, v_eq, v_ineq) + + def matvec(p): + return H_objective.dot(p) + H_constraints.dot(p) + + return LinearOperator((self.n, self.n), matvec) + + +def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, + start_time, tr_radius, constr_penalty, cg_info): + state.nit += 1 + state.nfev = objective.nfev + state.njev = objective.ngev + state.nhev = objective.nhev + state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + + if not last_iteration_failed: + state.x = x + state.fun = objective.f + state.grad = objective.g + state.v = [c.fun.v for c in prepared_constraints] + state.constr = [c.fun.f for c in prepared_constraints] + state.jac = [c.fun.J for c in prepared_constraints] + # Compute Lagrangian Gradient + state.lagrangian_grad = np.copy(state.grad) + for c in prepared_constraints: + state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) + state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) + # Compute maximum constraint violation + state.constr_violation = 0 + for i in range(len(prepared_constraints)): + lb, ub = prepared_constraints[i].bounds + c = state.constr[i] + state.constr_violation = np.max([state.constr_violation, + np.max(lb - c), + np.max(c - ub)]) + + state.execution_time = time.time() - start_time + state.tr_radius = tr_radius + state.constr_penalty = constr_penalty + state.cg_niter += cg_info["niter"] + state.cg_stop_cond = cg_info["stop_cond"] + + return state + + +def update_state_ip(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, + tr_radius, constr_penalty, cg_info, + barrier_parameter, barrier_tolerance): + state = update_state_sqp(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, tr_radius, + constr_penalty, cg_info) + state.barrier_parameter = barrier_parameter + state.barrier_tolerance = barrier_tolerance + return state + + +def _minimize_trustregion_constr(fun, x0, args, grad, + hess, hessp, bounds, constraints, + xtol=1e-8, gtol=1e-8, + barrier_tol=1e-8, + sparse_jacobian=None, + callback=None, maxiter=1000, + verbose=0, finite_diff_rel_step=None, + initial_constr_penalty=1.0, initial_tr_radius=1.0, + initial_barrier_parameter=0.1, + initial_barrier_tolerance=0.1, + factorization_method=None, + disp=False): + """Minimize a scalar function subject to constraints. + + Parameters + ---------- + gtol : float, optional + Tolerance for termination by the norm of the Lagrangian gradient. + The algorithm will terminate when both the infinity norm (i.e., max + abs value) of the Lagrangian gradient and the constraint violation + are smaller than ``gtol``. Default is 1e-8. + xtol : float, optional + Tolerance for termination by the change of the independent variable. + The algorithm will terminate when ``tr_radius < xtol``, where + ``tr_radius`` is the radius of the trust region used in the algorithm. + Default is 1e-8. + barrier_tol : float, optional + Threshold on the barrier parameter for the algorithm termination. + When inequality constraints are present, the algorithm will terminate + only when the barrier parameter is less than `barrier_tol`. + Default is 1e-8. + sparse_jacobian : {bool, None}, optional + Determines how to represent Jacobians of the constraints. If bool, + then Jacobians of all the constraints will be converted to the + corresponding format. If None (default), then Jacobians won't be + converted, but the algorithm can proceed only if they all have the + same format. + initial_tr_radius: float, optional + Initial trust radius. The trust radius gives the maximum distance + between solution points in consecutive iterations. It reflects the + trust the algorithm puts in the local approximation of the optimization + problem. For an accurate local approximation the trust-region should be + large and for an approximation valid only close to the current point it + should be a small one. The trust radius is automatically updated throughout + the optimization process, with ``initial_tr_radius`` being its initial value. + Default is 1 (recommended in [1]_, p. 19). + initial_constr_penalty : float, optional + Initial constraints penalty parameter. The penalty parameter is used for + balancing the requirements of decreasing the objective function + and satisfying the constraints. It is used for defining the merit function: + ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, + where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all + the constraints. The merit function is used for accepting or rejecting + trial points and ``constr_penalty`` weights the two conflicting goals + of reducing objective function and constraints. The penalty is automatically + updated throughout the optimization process, with + ``initial_constr_penalty`` being its initial value. Default is 1 + (recommended in [1]_, p 19). + initial_barrier_parameter, initial_barrier_tolerance: float, optional + Initial barrier parameter and initial tolerance for the barrier subproblem. + Both are used only when inequality constraints are present. For dealing with + optimization problems ``min_x f(x)`` subject to inequality constraints + ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem + ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality + constraints ``c(x) + s = 0`` instead of the original problem. This subproblem + is solved for decreasing values of ``barrier_parameter`` and with decreasing + tolerances for the termination, starting with ``initial_barrier_parameter`` + for the barrier parameter and ``initial_barrier_tolerance`` for the + barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). + Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated + with the same prefactor. + factorization_method : string or None, optional + Method to factorize the Jacobian of the constraints. Use None (default) + for the auto selection or one of: + + - 'NormalEquation' (requires scikit-sparse) + - 'AugmentedSystem' + - 'QRFactorization' + - 'SVDFactorization' + + The methods 'NormalEquation' and 'AugmentedSystem' can be used only + with sparse constraints. The projections required by the algorithm + will be computed using, respectively, the normal equation and the + augmented system approaches explained in [1]_. 'NormalEquation' + computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' + performs the LU factorization of an augmented system. They usually + provide similar results. 'AugmentedSystem' is used by default for + sparse matrices. + + The methods 'QRFactorization' and 'SVDFactorization' can be used + only with dense constraints. They compute the required projections + using, respectively, QR and SVD factorizations. The 'SVDFactorization' + method can cope with Jacobian matrices with deficient row rank and will + be used whenever other factorization methods fail (which may imply the + conversion of sparse matrices to a dense format when required). + By default, 'QRFactorization' is used for dense matrices. + finite_diff_rel_step : None or array_like, optional + Relative step size for the finite difference approximation. + maxiter : int, optional + Maximum number of algorithm iterations. Default is 1000. + verbose : {0, 1, 2, 3}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + * 3 : display progress during iterations (more complete report). + + disp : bool, optional + If True (default), then `verbose` will be set to 1 if it was 0. + + Returns + ------- + `OptimizeResult` with the fields documented below. Note the following: + + 1. All values corresponding to the constraints are ordered as they + were passed to the solver. And values corresponding to `bounds` + constraints are put *after* other constraints. + 2. All numbers of function, Jacobian or Hessian evaluations correspond + to numbers of actual Python function calls. It means, for example, + that if a Jacobian is estimated by finite differences, then the + number of Jacobian evaluations will be zero and the number of + function evaluations will be incremented by all calls during the + finite difference estimation. + + x : ndarray, shape (n,) + Solution found. + optimality : float + Infinity norm of the Lagrangian gradient at the solution. + constr_violation : float + Maximum constraint violation at the solution. + fun : float + Objective function at the solution. + grad : ndarray, shape (n,) + Gradient of the objective function at the solution. + lagrangian_grad : ndarray, shape (n,) + Gradient of the Lagrangian function at the solution. + nit : int + Total number of iterations. + nfev : integer + Number of the objective function evaluations. + njev : integer + Number of the objective function gradient evaluations. + nhev : integer + Number of the objective function Hessian evaluations. + cg_niter : int + Total number of the conjugate gradient method iterations. + method : {'equality_constrained_sqp', 'tr_interior_point'} + Optimization method used. + constr : list of ndarray + List of constraint values at the solution. + jac : list of {ndarray, sparse matrix} + List of the Jacobian matrices of the constraints at the solution. + v : list of ndarray + List of the Lagrange multipliers for the constraints at the solution. + For an inequality constraint a positive multiplier means that the upper + bound is active, a negative multiplier means that the lower bound is + active and if a multiplier is zero it means the constraint is not + active. + constr_nfev : list of int + Number of constraint evaluations for each of the constraints. + constr_njev : list of int + Number of Jacobian matrix evaluations for each of the constraints. + constr_nhev : list of int + Number of Hessian evaluations for each of the constraints. + tr_radius : float + Radius of the trust region at the last iteration. + constr_penalty : float + Penalty parameter at the last iteration, see `initial_constr_penalty`. + barrier_tolerance : float + Tolerance for the barrier subproblem at the last iteration. + Only for problems with inequality constraints. + barrier_parameter : float + Barrier parameter at the last iteration. Only for problems + with inequality constraints. + execution_time : float + Total execution time. + message : str + Termination message. + status : {0, 1, 2, 3, 4} + Termination status: + + * 0 : The maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `xtol` termination condition is satisfied. + * 3 : `callback` function requested termination. + * 4 : Constraint violation exceeds 'gtol'. + + .. versionchanged:: 1.15.0 + If the constraint violation exceeds `gtol`, then ``result.success`` + will now be False. + + cg_stop_cond : int + Reason for CG subproblem termination at the last iteration: + + * 0 : CG subproblem not evaluated. + * 1 : Iteration limit was reached. + * 2 : Reached the trust-region boundary. + * 3 : Negative curvature detected. + * 4 : Tolerance was satisfied. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + x0 = np.atleast_1d(x0).astype(float) + n_vars = np.size(x0) + if hess is None: + if callable(hessp): + hess = HessianLinearOperator(hessp, n_vars) + else: + hess = BFGS() + if disp and verbose == 0: + verbose = 1 + + if bounds is not None: + modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf) + modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf) + modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb) + modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub) + bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible) + finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, + bounds.keep_feasible, n_vars) + else: + finite_diff_bounds = (-np.inf, np.inf) + + # Define Objective Function + objective = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, finite_diff_bounds) + + # Put constraints in list format when needed. + if isinstance(constraints, (NonlinearConstraint | LinearConstraint)): + constraints = [constraints] + + # Prepare constraints. + prepared_constraints = [ + PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) + for c in constraints] + + # Check that all constraints are either sparse or dense. + n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) + if 0 < n_sparse < len(prepared_constraints): + raise ValueError("All constraints must have the same kind of the " + "Jacobian --- either all sparse or all dense. " + "You can set the sparsity globally by setting " + "`sparse_jacobian` to either True of False.") + if prepared_constraints: + sparse_jacobian = n_sparse > 0 + + if bounds is not None: + if sparse_jacobian is None: + sparse_jacobian = True + prepared_constraints.append(PreparedConstraint(bounds, x0, + sparse_jacobian)) + + # Concatenate initial constraints to the canonical form. + c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( + n_vars, prepared_constraints, sparse_jacobian) + + # Prepare all canonical constraints and concatenate it into one. + canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) + for c in prepared_constraints] + + if len(canonical_all) == 0: + canonical = CanonicalConstraint.empty(n_vars) + elif len(canonical_all) == 1: + canonical = canonical_all[0] + else: + canonical = CanonicalConstraint.concatenate(canonical_all, + sparse_jacobian) + + # Generate the Hessian of the Lagrangian. + lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) + + # Choose appropriate method + if canonical.n_ineq == 0: + method = 'equality_constrained_sqp' + else: + method = 'tr_interior_point' + + # Construct OptimizeResult + state = OptimizeResult( + nit=0, nfev=0, njev=0, nhev=0, + cg_niter=0, cg_stop_cond=0, + fun=objective.f, grad=objective.g, + lagrangian_grad=np.copy(objective.g), + constr=[c.fun.f for c in prepared_constraints], + jac=[c.fun.J for c in prepared_constraints], + constr_nfev=[0 for c in prepared_constraints], + constr_njev=[0 for c in prepared_constraints], + constr_nhev=[0 for c in prepared_constraints], + v=[c.fun.v for c in prepared_constraints], + method=method) + + # Start counting + start_time = time.time() + + # Define stop criteria + if method == 'equality_constrained_sqp': + def stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + tr_radius, constr_penalty, cg_info): + state = update_state_sqp(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + SQPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward-compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif state.tr_radius < xtol: + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + elif method == 'tr_interior_point': + def stop_criteria(state, x, last_iteration_failed, tr_radius, + constr_penalty, cg_info, barrier_parameter, + barrier_tolerance): + state = update_state_ip(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info, barrier_parameter, barrier_tolerance) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + IPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.barrier_parameter, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif (state.tr_radius < xtol + and state.barrier_parameter < barrier_tol): + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + + if verbose == 2: + BasicReport.print_header() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_header() + elif method == 'tr_interior_point': + IPReport.print_header() + + # Call inferior function to do the optimization + if method == 'equality_constrained_sqp': + def fun_and_constr(x): + f = objective.fun(x) + c_eq, _ = canonical.fun(x) + return f, c_eq + + def grad_and_jac(x): + g = objective.grad(x) + J_eq, _ = canonical.jac(x) + return g, J_eq + + _, result = equality_constrained_sqp( + fun_and_constr, grad_and_jac, lagrangian_hess, + x0, objective.f, objective.g, + c_eq0, J_eq0, + stop_criteria, state, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + elif method == 'tr_interior_point': + _, result = tr_interior_point( + objective.fun, objective.grad, lagrangian_hess, + n_vars, canonical.n_ineq, canonical.n_eq, + canonical.fun, canonical.jac, + x0, objective.f, objective.g, + c_ineq0, J_ineq0, c_eq0, J_eq0, + stop_criteria, + canonical.keep_feasible, + xtol, state, initial_barrier_parameter, + initial_barrier_tolerance, + initial_constr_penalty, initial_tr_radius, + factorization_method, finite_diff_bounds) + + # Status 4 occurs when minimize is successful but constraints are not satisfied. + if result.status in (1, 2) and state.constr_violation > gtol: + result.status = 4 + + # Status 3 occurs when the callback function requests termination, + # this is assumed to not be a success. + result.success = True if result.status in (1, 2) else False + result.message = TERMINATION_MESSAGES[result.status] + + # Alias (for backward compatibility with 1.1.0) + result.niter = result.nit + + if verbose == 2: + BasicReport.print_footer() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_footer() + elif method == 'tr_interior_point': + IPReport.print_footer() + if verbose >= 1: + print(result.message) + print(f"Number of iterations: {result.nit}, " + f"function evaluations: {result.nfev}, " + f"CG iterations: {result.cg_niter}, " + f"optimality: {result.optimality:.2e}, " + f"constraint violation: {result.constr_violation:.2e}, " + f"execution time: {result.execution_time:4.2} s.") + return result diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py new file mode 100644 index 0000000000000000000000000000000000000000..a07b836bdbad688a265ae34ce91a361fd5050eb1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py @@ -0,0 +1,407 @@ +"""Basic linear factorizations needed by the solver.""" + +from scipy.sparse import (bmat, csc_matrix, eye, issparse) +from scipy.sparse.linalg import LinearOperator +import scipy.linalg +import scipy.sparse.linalg +try: + from sksparse.cholmod import cholesky_AAt + sksparse_available = True +except ImportError: + import warnings + sksparse_available = False +import numpy as np +from warnings import warn + +__all__ = [ + 'orthogonality', + 'projections', +] + + +def orthogonality(A, g): + """Measure orthogonality between a vector and the null space of a matrix. + + Compute a measure of orthogonality between the null space + of the (possibly sparse) matrix ``A`` and a given vector ``g``. + + The formula is a simplified (and cheaper) version of formula (3.13) + from [1]_. + ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + # Compute vector norms + norm_g = np.linalg.norm(g) + # Compute Froebnius norm of the matrix A + if issparse(A): + norm_A = scipy.sparse.linalg.norm(A, ord='fro') + else: + norm_A = np.linalg.norm(A, ord='fro') + + # Check if norms are zero + if norm_g == 0 or norm_A == 0: + return 0 + + norm_A_g = np.linalg.norm(A.dot(g)) + # Orthogonality measure + orth = norm_A_g / (norm_A*norm_g) + return orth + + +def normal_equation_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``NormalEquation`` approach. + """ + # Cholesky factorization + factor = cholesky_AAt(A) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + v = factor(A.dot(x)) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # z_next = z - A.T inv(A A.T) A z + v = factor(A.dot(z)) + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + return factor(A.dot(x)) + + # z = A.T inv(A A.T) x + def row_space(x): + return A.T.dot(factor(x)) + + return null_space, least_squares, row_space + + +def augmented_system_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A - ``AugmentedSystem``.""" + # Form augmented system + K = csc_matrix(bmat([[eye(n), A.T], [A, None]])) + # LU factorization + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + try: + solve = scipy.sparse.linalg.factorized(K) + except RuntimeError: + warn("Singular Jacobian matrix. Using dense SVD decomposition to " + "perform the factorizations.", + stacklevel=3) + return svd_factorization_projections(A.toarray(), + m, n, orth_tol, + max_refin, tol) + + # z = x - A.T inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [ z ] = [x] + # [A O ] [aux] [0] + def null_space(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + z = lu_sol[:n] + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.2. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # new_v = [x] - [I A.T] * [ z ] + # [0] [A O ] [aux] + new_v = v - K.dot(lu_sol) + # [I A.T] * [delta z ] = new_v + # [A O ] [delta aux] + lu_update = solve(new_v) + # [ z ] += [delta z ] + # [aux] [delta aux] + lu_sol += lu_update + z = lu_sol[:n] + k += 1 + + # return z = x - A.T inv(A A.T) A x + return z + + # z = inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [aux] = [x] + # [A O ] [ z ] [0] + def least_squares(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [aux] + # [ z ] + lu_sol = solve(v) + # return z = inv(A A.T) A x + return lu_sol[n:m+n] + + # z = A.T inv(A A.T) x + # is computed solving the extended system: + # [I A.T] * [ z ] = [0] + # [A O ] [aux] [x] + def row_space(x): + # v = [0] + # [x] + v = np.hstack([np.zeros(n), x]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + # return z = A.T inv(A A.T) x + return lu_sol[:n] + + return null_space, least_squares, row_space + + +def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``QRFactorization`` approach. + """ + # QRFactorization + Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic') + + if np.linalg.norm(R[-1, :], np.inf) < tol: + warn('Singular Jacobian matrix. Using SVD decomposition to ' + + 'perform the factorizations.', + stacklevel=3) + return svd_factorization_projections(A, m, n, + orth_tol, + max_refin, + tol) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v = np.zeros(m) + v[P] = aux2 + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = P inv(R) Q.T x + aux1 = Q.T.dot(z) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v[P] = aux2 + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + z = np.zeros(m) + z[P] = aux2 + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = Q inv(R.T) P.T x + aux1 = x[P] + aux2 = scipy.linalg.solve_triangular(R, aux1, + lower=False, + trans='T') + z = Q.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``SVDFactorization`` approach. + """ + # SVD Factorization + U, s, Vt = scipy.linalg.svd(A, full_matrices=False) + + # Remove dimensions related with very small singular values + U = U[:, s > tol] + Vt = Vt[s > tol, :] + s = s[s > tol] + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + v = U.dot(aux2) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(z) + aux2 = 1/s*aux1 + v = U.dot(aux2) + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + z = U.dot(aux2) + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = V 1/s U.T x + aux1 = U.T.dot(x) + aux2 = 1/s*aux1 + z = Vt.T.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15): + """Return three linear operators related with a given matrix A. + + Parameters + ---------- + A : sparse matrix (or ndarray), shape (m, n) + Matrix ``A`` used in the projection. + method : string, optional + Method used for compute the given linear + operators. Should be one of: + + - 'NormalEquation': The operators + will be computed using the + so-called normal equation approach + explained in [1]_. In order to do + so the Cholesky factorization of + ``(A A.T)`` is computed. Exclusive + for sparse matrices. + - 'AugmentedSystem': The operators + will be computed using the + so-called augmented system approach + explained in [1]_. Exclusive + for sparse matrices. + - 'QRFactorization': Compute projections + using QR factorization. Exclusive for + dense matrices. + - 'SVDFactorization': Compute projections + using SVD factorization. Exclusive for + dense matrices. + + orth_tol : float, optional + Tolerance for iterative refinements. + max_refin : int, optional + Maximum number of iterative refinements. + tol : float, optional + Tolerance for singular values. + + Returns + ------- + Z : LinearOperator, shape (n, n) + Null-space operator. For a given vector ``x``, + the null space operator is equivalent to apply + a projection matrix ``P = I - A.T inv(A A.T) A`` + to the vector. It can be shown that this is + equivalent to project ``x`` into the null space + of A. + LS : LinearOperator, shape (m, n) + Least-squares operator. For a given vector ``x``, + the least-squares operator is equivalent to apply a + pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A`` + to the vector. It can be shown that this vector + ``pinv(A.T) x`` is the least_square solution to + ``A.T y = x``. + Y : LinearOperator, shape (n, m) + Row-space operator. For a given vector ``x``, + the row-space operator is equivalent to apply a + projection matrix ``Q = A.T inv(A A.T)`` + to the vector. It can be shown that this + vector ``y = Q x`` the minimum norm solution + of ``A y = x``. + + Notes + ----- + Uses iterative refinements described in [1] + during the computation of ``Z`` in order to + cope with the possibility of large roundoff errors. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + m, n = np.shape(A) + + # The factorization of an empty matrix + # only works for the sparse representation. + if m*n == 0: + A = csc_matrix(A) + + # Check Argument + if issparse(A): + if method is None: + method = "AugmentedSystem" + if method not in ("NormalEquation", "AugmentedSystem"): + raise ValueError("Method not allowed for sparse matrix.") + if method == "NormalEquation" and not sksparse_available: + warnings.warn("Only accepts 'NormalEquation' option when " + "scikit-sparse is available. Using " + "'AugmentedSystem' option instead.", + ImportWarning, stacklevel=3) + method = 'AugmentedSystem' + else: + if method is None: + method = "QRFactorization" + if method not in ("QRFactorization", "SVDFactorization"): + raise ValueError("Method not allowed for dense array.") + + if method == 'NormalEquation': + null_space, least_squares, row_space \ + = normal_equation_projections(A, m, n, orth_tol, max_refin, tol) + elif method == 'AugmentedSystem': + null_space, least_squares, row_space \ + = augmented_system_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "QRFactorization": + null_space, least_squares, row_space \ + = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "SVDFactorization": + null_space, least_squares, row_space \ + = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) + + Z = LinearOperator((n, n), null_space) + LS = LinearOperator((m, n), least_squares) + Y = LinearOperator((n, m), row_space) + + return Z, LS, Y diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..a039a7738c283f90f30fd7c4583bf9e1a8f559d5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py @@ -0,0 +1,637 @@ +"""Equality-constrained quadratic programming solvers.""" + +from scipy.sparse import (linalg, bmat, csc_matrix) +from math import copysign +import numpy as np +from numpy.linalg import norm + +__all__ = [ + 'eqp_kktfact', + 'sphere_intersections', + 'box_intersections', + 'box_sphere_intersections', + 'inside_box_boundaries', + 'modified_dogleg', + 'projected_cg' +] + + +# For comparison with the projected CG +def eqp_kktfact(H, c, A, b): + """Solve equality-constrained quadratic programming (EQP) problem. + + Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` + using direct factorization of the KKT system. + + Parameters + ---------- + H : sparse matrix, shape (n, n) + Hessian matrix of the EQP problem. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + A : sparse matrix + Jacobian matrix of the EQP problem. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + + Returns + ------- + x : array_like, shape (n,) + Solution of the KKT problem. + lagrange_multipliers : ndarray, shape (m,) + Lagrange multipliers of the KKT problem. + """ + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Karush-Kuhn-Tucker matrix of coefficients. + # Defined as in Nocedal/Wright "Numerical + # Optimization" p.452 in Eq. (16.4). + kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) + # Vector of coefficients. + kkt_vec = np.hstack([-c, -b]) + + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + lu = linalg.splu(kkt_matrix) + kkt_sol = lu.solve(kkt_vec) + x = kkt_sol[:n] + lagrange_multipliers = -kkt_sol[n:n+m] + + return x, lagrange_multipliers + + +def sphere_intersections(z, d, trust_radius, + entire_line=False): + """Find the intersection between segment (or line) and spherical constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the ball + ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the ball + ``||x|| <= trust_radius``. When ``False``, the function returns the intersection + between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the ball for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line/segment + and the sphere. On the other hand, when ``False``, there is no + intersection. + """ + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + # Check for inf trust_radius + if np.isinf(trust_radius): + if entire_line: + ta = -np.inf + tb = np.inf + else: + ta = 0 + tb = 1 + intersect = True + return ta, tb, intersect + + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + discriminant = b*b - 4*a*c + if discriminant < 0: + intersect = False + return 0, 0, intersect + sqrt_discriminant = np.sqrt(discriminant) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + ta, tb = sorted([ta, tb]) + + if entire_line: + intersect = True + else: + # Checks to see if intersection happens + # within vectors length. + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + intersect = True + # Restrict intersection interval + # between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_intersections(z, d, lb, ub, + entire_line=False): + """Find the intersection between segment (or line) and box constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the rectangular box + ``lb <= x <= ub``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular + box. When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the box for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and the rectangular box. On the other hand, when ``False``, there is no + intersection. + """ + # Make sure it is a numpy array + z = np.asarray(z) + d = np.asarray(d) + lb = np.asarray(lb) + ub = np.asarray(ub) + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + + # Get values for which d==0 + zero_d = (d == 0) + # If the boundaries are not satisfied for some coordinate + # for which "d" is zero, there is no box-line intersection. + if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): + intersect = False + return 0, 0, intersect + # Remove values for which d is zero + not_zero_d = np.logical_not(zero_d) + z = z[not_zero_d] + d = d[not_zero_d] + lb = lb[not_zero_d] + ub = ub[not_zero_d] + + # Find a series of intervals (t_lb[i], t_ub[i]). + t_lb = (lb-z) / d + t_ub = (ub-z) / d + # Get the intersection of all those intervals. + ta = max(np.minimum(t_lb, t_ub)) + tb = min(np.maximum(t_lb, t_ub)) + + # Check if intersection is feasible + if ta <= tb: + intersect = True + else: + intersect = False + # Checks to see if intersection happens within vectors length. + if not entire_line: + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + # Restrict intersection interval between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_sphere_intersections(z, d, lb, ub, trust_radius, + entire_line=False, + extra_info=False): + """Find the intersection between segment (or line) and box/sphere constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d``, the rectangular box + ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. + When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. + extra_info : bool, optional + When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the rectangular box and + inside the ball for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and both constraints. On the other hand, when ``False``, there is no + intersection. + sphere_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the ball. And a boolean value indicating + whether the sphere is intersected by the line. + box_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the box. And a boolean value indicating + whether the box is intersected by the line. + """ + ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, + entire_line) + ta_s, tb_s, intersect_s = sphere_intersections(z, d, + trust_radius, + entire_line) + ta = np.maximum(ta_b, ta_s) + tb = np.minimum(tb_b, tb_s) + if intersect_b and intersect_s and ta <= tb: + intersect = True + else: + intersect = False + + if extra_info: + sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} + box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} + return ta, tb, intersect, sphere_info, box_info + else: + return ta, tb, intersect + + +def inside_box_boundaries(x, lb, ub): + """Check if lb <= x <= ub.""" + return (lb <= x).all() and (x <= ub).all() + + +def reinforce_box_boundaries(x, lb, ub): + """Return clipped value of x""" + return np.minimum(np.maximum(x, lb), ub) + + +def modified_dogleg(A, Y, b, trust_radius, lb, ub): + """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. + + Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` + subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification + of the classical dogleg approach. + + Parameters + ---------- + A : LinearOperator (or sparse matrix or ndarray), shape (m, n) + Matrix ``A`` in the minimization problem. It should have + dimension ``(m, n)`` such that ``m < n``. + Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) + LinearOperator that apply the projection matrix + ``Q = A.T inv(A A.T)`` to the vector. The obtained vector + ``y = Q x`` being the minimum norm solution of ``A y = x``. + b : array_like, shape (m,) + Vector ``b``in the minimization problem. + trust_radius: float + Trust radius to be considered. Delimits a sphere boundary + to the problem. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. + It is expected that ``lb <= 0``, otherwise the algorithm + may fail. If ``lb[i] = -Inf``, the lower + bound for the ith component is just ignored. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. + It is expected that ``ub >= 0``, otherwise the algorithm + may fail. If ``ub[i] = Inf``, the upper bound for the ith + component is just ignored. + + Returns + ------- + x : array_like, shape (n,) + Solution to the problem. + + Notes + ----- + Based on implementations described in pp. 885-886 from [1]_. + + References + ---------- + .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + """ + # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. + newton_point = -Y.dot(b) + # Check for interior point + if inside_box_boundaries(newton_point, lb, ub) \ + and norm(newton_point) <= trust_radius: + x = newton_point + return x + + # Compute gradient vector ``g = A.T b`` + g = A.T.dot(b) + # Compute Cauchy point + # `cauchy_point = g.T g / (g.T A.T A g)``. + A_g = A.dot(g) + cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g + # Origin + origin_point = np.zeros_like(cauchy_point) + + # Check the segment between cauchy_point and newton_point + # for a possible solution. + z = cauchy_point + p = newton_point - cauchy_point + _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, + trust_radius) + if intersect: + x1 = z + alpha*p + else: + # Check the segment between the origin and cauchy_point + # for a possible solution. + z = origin_point + p = cauchy_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x1 = z + alpha*p + + # Check the segment between origin and newton_point + # for a possible solution. + z = origin_point + p = newton_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x2 = z + alpha*p + + # Return the best solution among x1 and x2. + if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): + return x1 + else: + return x2 + + +def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, + lb=None, ub=None, tol=None, + max_iter=None, max_infeasible_iter=None, + return_all=False): + """Solve EQP problem with projected CG method. + + Solve equality-constrained quadratic programming problem + ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, + possibly, to trust region constraints ``||x|| < trust_radius`` + and box constraints ``lb <= x <= ub``. + + Parameters + ---------- + H : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for computing ``H v``. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for projecting ``x`` into the null space of A. + Y : LinearOperator, sparse matrix, ndarray, shape (n, m) + Operator that, for a given a vector ``b``, compute smallest + norm solution of ``A x + b = 0``. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + trust_radius : float, optional + Trust radius to be considered. By default, uses ``trust_radius=inf``, + which means no trust radius at all. + lb : array_like, shape (n,), optional + Lower bounds to each one of the components of ``x``. + If ``lb[i] = -Inf`` the lower bound for the i-th + component is just ignored (default). + ub : array_like, shape (n, ), optional + Upper bounds to each one of the components of ``x``. + If ``ub[i] = Inf`` the upper bound for the i-th + component is just ignored (default). + tol : float, optional + Tolerance used to interrupt the algorithm. + max_iter : int, optional + Maximum algorithm iterations. Where ``max_inter <= n-m``. + By default, uses ``max_iter = n-m``. + max_infeasible_iter : int, optional + Maximum infeasible (regarding box constraints) iterations the + algorithm is allowed to take. + By default, uses ``max_infeasible_iter = n-m``. + return_all : bool, optional + When ``true``, return the list of all vectors through the iterations. + + Returns + ------- + x : array_like, shape (n,) + Solution of the EQP problem. + info : Dict + Dictionary containing the following: + + - niter : Number of iterations. + - stop_cond : Reason for algorithm termination: + 1. Iteration limit was reached; + 2. Reached the trust-region boundary; + 3. Negative curvature detected; + 4. Tolerance was satisfied. + - allvecs : List containing all intermediary vectors (optional). + - hits_boundary : True if the proposed step is on the boundary + of the trust region. + + Notes + ----- + Implementation of Algorithm 6.2 on [1]_. + + In the absence of spherical and box constraints, for sufficient + iterations, the method returns a truly optimal result. + In the presence of those constraints, the value returned is only + a inexpensive approximation of the optimal value. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + CLOSE_TO_ZERO = 1e-25 + + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Initial Values + x = Y.dot(-b) + r = Z.dot(H.dot(x) + c) + g = Z.dot(r) + p = -g + + # Store ``x`` value + if return_all: + allvecs = [x] + # Values for the first iteration + H_p = H.dot(p) + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + + # If x > trust-region the problem does not have a solution. + tr_distance = trust_radius - norm(x) + if tr_distance < 0: + raise ValueError("Trust region problem does not have a solution.") + # If x == trust_radius, then x is the solution + # to the optimization problem, since x is the + # minimum norm solution to Ax=b. + elif tr_distance < CLOSE_TO_ZERO: + info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} + if return_all: + allvecs.append(x) + info['allvecs'] = allvecs + return x, info + + # Set default tolerance + if tol is None: + tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) + # Set default lower and upper bounds + if lb is None: + lb = np.full(n, -np.inf) + if ub is None: + ub = np.full(n, np.inf) + # Set maximum iterations + if max_iter is None: + max_iter = n-m + max_iter = min(max_iter, n-m) + # Set maximum infeasible iterations + if max_infeasible_iter is None: + max_infeasible_iter = n-m + + hits_boundary = False + stop_cond = 1 + counter = 0 + last_feasible_x = np.zeros_like(x) + k = 0 + for i in range(max_iter): + # Stop criteria - Tolerance : r.T g < tol + if rt_g < tol: + stop_cond = 4 + break + k += 1 + # Compute curvature + pt_H_p = H_p.dot(p) + # Stop criteria - Negative curvature + if pt_H_p <= 0: + if np.isinf(trust_radius): + raise ValueError("Negative curvature not allowed " + "for unrestricted problems.") + else: + # Find intersection with constraints + _, alpha, intersect = box_sphere_intersections( + x, p, lb, ub, trust_radius, entire_line=True) + # Update solution + if intersect: + x = x + alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 3 + hits_boundary = True + break + + # Get next step + alpha = rt_g / pt_H_p + x_next = x + alpha*p + + # Stop criteria - Hits boundary + if np.linalg.norm(x_next) >= trust_radius: + # Find intersection with box constraints + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + # Update solution + if intersect: + x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 2 + hits_boundary = True + break + + # Check if ``x`` is inside the box and start counter if it is not. + if inside_box_boundaries(x_next, lb, ub): + counter = 0 + else: + counter += 1 + # Whenever outside box constraints keep looking for intersections. + if counter > 0: + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + if intersect: + last_feasible_x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + last_feasible_x = reinforce_box_boundaries(last_feasible_x, + lb, ub) + counter = 0 + # Stop after too many infeasible (regarding box constraints) iteration. + if counter > max_infeasible_iter: + break + # Store ``x_next`` value + if return_all: + allvecs.append(x_next) + + # Update residual + r_next = r + alpha*H_p + # Project residual g+ = Z r+ + g_next = Z.dot(r_next) + # Compute conjugate direction step d + rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) + beta = rt_g_next / rt_g + p = - g_next + beta*p + # Prepare for next iteration + x = x_next + g = g_next + r = g_next + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + H_p = H.dot(p) + + if not inside_box_boundaries(x, lb, ub): + x = last_feasible_x + hits_boundary = True + info = {'niter': k, 'stop_cond': stop_cond, + 'hits_boundary': hits_boundary} + if return_all: + info['allvecs'] = allvecs + return x, info diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py new file mode 100644 index 0000000000000000000000000000000000000000..f7f997d663cd5ce6265e77f940622b6105362bf7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py @@ -0,0 +1,49 @@ +"""Progress report printers.""" + +class ReportBase: + COLUMN_NAMES: list[str] = NotImplemented + COLUMN_WIDTHS: list[int] = NotImplemented + ITERATION_FORMATS: list[str] = NotImplemented + + @classmethod + def print_header(cls): + fmt = ("|" + + "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS]) + + "|") + separators = ['-' * x for x in cls.COLUMN_WIDTHS] + print(fmt.format(*cls.COLUMN_NAMES)) + print(fmt.format(*separators)) + + @classmethod + def print_iteration(cls, *args): + iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS] + fmt = "|" + "|".join(iteration_format) + "|" + print(fmt.format(*args)) + + @classmethod + def print_footer(cls): + print() + + +class BasicReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", + "^10.2e", "^10.2e", "^10.2e"] + + +class SQPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^7"] + + +class IPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "barrier param", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^13.2e", "^7"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..452b327d02da3b3bd3fab9592bdef4d56d6aff57 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py @@ -0,0 +1,296 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from scipy.optimize._constraints import (NonlinearConstraint, Bounds, + PreparedConstraint) +from scipy.optimize._trustregion_constr.canonical_constraint \ + import CanonicalConstraint, initial_constraints_as_canonical + + +def create_quadratic_function(n, m, rng): + a = rng.rand(m) + A = rng.rand(m, n) + H = rng.rand(m, n, n) + HT = np.transpose(H, (1, 2, 0)) + + def fun(x): + return a + A.dot(x) + 0.5 * H.dot(x).dot(x) + + def jac(x): + return A + H.dot(x) + + def hess(x, v): + return HT.dot(v) + + return fun, jac, hess + + +def test_bounds_cases(): + # Test 1: no constraints. + user_constraint = Bounds(-np.inf, np.inf) + x0 = np.array([-1, 2]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 2))) + assert_array_equal(J_ineq, np.empty((0, 2))) + + assert_array_equal(c.keep_feasible, []) + + # Test 2: infinite lower bound. + user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True]) + x0 = np.array([-1, -2, -3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -4]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]])) + + assert_array_equal(c.keep_feasible, [False, True]) + + # Test 3: infinite upper bound. + user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True]) + x0 = np.array([1, 2, 3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -1]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]])) + + assert_array_equal(c.keep_feasible, [True, False]) + + # Test 4: interval constraint. + user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3], + [False, True, True, True]) + x0 = np.array([0, 10, 8, 5]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 1) + assert_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [2]) + assert_array_equal(c_ineq, [-1, -2, -1, -6]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, [[0, 0, 0, 1]]) + assert_array_equal(J_ineq, [[1, 0, 0, 0], + [0, 0, 1, 0], + [-1, 0, 0, 0], + [0, 0, -1, 0]]) + + assert_array_equal(c.keep_feasible, [False, True, False, True]) + + +def test_nonlinear_constraint(): + n = 3 + m = 5 + rng = np.random.RandomState(0) + x0 = rng.rand(n) + + fun, jac, hess = create_quadratic_function(n, m, rng) + f = fun(x0) + J = jac(x0) + + lb = [-10, 3, -np.inf, -np.inf, -5] + ub = [10, 3, np.inf, 3, np.inf] + user_constraint = NonlinearConstraint( + fun, lb, ub, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + prepared_constraint = PreparedConstraint(user_constraint, x0, + sparse_jacobian) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_array_equal(c.n_eq, 1) + assert_array_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f[1] - lb[1]]) + assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4], + f[0] - ub[0], lb[0] - f[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, J[1, None]) + assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(m) + v[1] = v_eq[0] + v[3] = v_ineq[0] + v[4] = -v_ineq[1] + v[0] = v_ineq[2] - v_ineq[3] + assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v)) + + assert_array_equal(c.keep_feasible, [True, False, True, True]) + + +def test_concatenation(): + rng = np.random.RandomState(0) + n = 4 + x0 = rng.rand(n) + + f1 = x0 + J1 = np.eye(n) + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + f2 = fun(x0) + J2 = jac(x0) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared) + c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared) + c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian) + + assert_equal(c.n_eq, 2) + assert_equal(c.n_ineq, 7) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(5) + v[1] = v_eq[1] + v[3] = v_ineq[3] + v[4] = -v_ineq[4] + v[0] = v_ineq[5] - v_ineq[6] + H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n)) + assert_array_equal(H, hess(x0, v)) + + assert_array_equal(c.keep_feasible, + [True, False, False, True, False, True, True]) + + +def test_empty(): + x = np.array([1, 2, 3]) + c = CanonicalConstraint.empty(3) + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.empty((0, 3))) + + H = c.hess(x, None, None).toarray() + assert_array_equal(H, np.zeros((3, 3))) + + +def test_initial_constraints_as_canonical(): + # rng is only used to generate the coefficients of the quadratic + # function that is used by the nonlinear constraint. + rng = np.random.RandomState(0) + + x0 = np.array([0.5, 0.4, 0.3, 0.2]) + n = len(x0) + + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + f1 = bounds_prepared.fun.f + J1 = bounds_prepared.fun.J + f2 = nonlinear_prepared.fun.f + J2 = nonlinear_prepared.fun.J + + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [bounds_prepared, nonlinear_prepared], sparse_jacobian) + + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + if sparse_jacobian: + J1 = J1.toarray() + J2 = J2.toarray() + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + +def test_initial_constraints_as_canonical_empty(): + n = 3 + for sparse_jacobian in [False, True]: + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [], sparse_jacobian) + + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.empty((0, n))) + assert_array_equal(J_ineq, np.empty((0, n))) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py new file mode 100644 index 0000000000000000000000000000000000000000..f9aa57058c6523d4106b26840ef447431d615cd6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py @@ -0,0 +1,39 @@ +import pytest +import numpy as np +from scipy.optimize import minimize, NonlinearConstraint, rosen, rosen_der + + +# Ignore this warning about inefficient use of Hessians +# The bug only shows up with the default HUS +@pytest.mark.filterwarnings( + "ignore:delta_grad == 0.0. Check if the approximated function is linear." +) +def test_gh21193(): + # Test that nested minimization does not share Hessian objects + def identity(x): + return x[0] + def identity_jac(x): + a = np.zeros(len(x)) + a[0] = 1 + return a + constraint1 = NonlinearConstraint(identity, 0, 0, identity_jac) + constraint2 = NonlinearConstraint(identity, 0, 0, identity_jac) + + # The default HUS for each should be distinct + assert constraint1.hess is not constraint2.hess + + _ = minimize( + lambda x: minimize( + rosen, + x[1:], + jac=rosen_der, + constraints=constraint1, + method="trust-constr", + options={'maxiter': 2}, + ).fun, + [1, 0, 0], + constraints=constraint2, + method="trust-constr", + options={'maxiter': 2}, + ) + # This test doesn't check that the output is correct, just that it doesn't crash diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff3c39d649d0ac663d9b71bb906f1daac021118 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py @@ -0,0 +1,214 @@ +import numpy as np +import scipy.linalg +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.projections \ + import projections, orthogonality +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_equal, assert_allclose) + +try: + from sksparse.cholmod import cholesky_AAt # noqa: F401 + sksparse_available = True + available_sparse_methods = ("NormalEquation", "AugmentedSystem") +except ImportError: + sksparse_available = False + available_sparse_methods = ("AugmentedSystem",) +available_dense_methods = ('QRFactorization', 'SVDFactorization') + + +class TestProjections(TestCase): + + def test_nullspace_and_least_squares_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At_dense = A_dense.T + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At_dense, z)[0] + assert_array_almost_equal(x, x2) + + def test_iterative_refinements_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + atol = 1e-13 * abs(x).max() + assert_allclose(A.dot(x), 0, atol=atol) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, atol=1e-13) + + def test_rowspace_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_sparse_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A_dense, x)) + assert_equal(np.linalg.matrix_rank(A_dense), + np.linalg.matrix_rank(A_ext)) + + def test_nullspace_and_least_squares_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At = A.T + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At, z)[0] + assert_array_almost_equal(x, x2) + + def test_compare_dense_and_sparse(self): + D = np.diag(range(1, 101)) + A = np.hstack([D, D, D, D]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(20): + z = np.random.normal(size=(400,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(100,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_compare_dense_and_sparse2(self): + D1 = np.diag([-1.7, 1, 0.5]) + D2 = np.diag([1, -0.6, -0.3]) + D3 = np.diag([-0.3, -1.5, 2]) + A = np.hstack([D1, D2, D3]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(1): + z = np.random.normal(size=(9,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(3,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_iterative_refinements_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16) + + def test_rowspace_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_dense_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A, x)) + assert_equal(np.linalg.matrix_rank(A), + np.linalg.matrix_rank(A_ext)) + + +class TestOrthogonality(TestCase): + + def test_dense_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) + + def test_sparse_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..70e65e53b9d2389541c0aab45b94b1d30dcdd146 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py @@ -0,0 +1,645 @@ +import numpy as np +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.qp_subproblem \ + import (eqp_kktfact, + projected_cg, + box_intersections, + sphere_intersections, + box_sphere_intersections, + modified_dogleg) +from scipy.optimize._trustregion_constr.projections \ + import projections +from numpy.testing import TestCase, assert_array_almost_equal, assert_equal +import pytest + + +class TestEQPDirectFactorization(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + x, lagrange_multipliers = eqp_kktfact(H, c, A, b) + assert_array_almost_equal(x, [2, -1, 1]) + assert_array_almost_equal(lagrange_multipliers, [3, -2]) + + +class TestSphericalBoundariesIntersections(TestCase): + + def test_2d_sphere_constraints(self): + # Interior initial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_sphere_constraints_line_intersections(self): + # Interior initial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1, + entire_line=True) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1, + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 3.5]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + +class TestBoxBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1]) + assert_equal(intersect, False) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Interior initial point + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2]) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3]) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2]) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_box_constraints_entire_line(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 1.5]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1.5, -0.5]) + assert_equal(intersect, True) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, np.inf]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Interior initial point + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + def test_3d_box_constraints(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3]) + assert_equal(intersect, False) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + def test_3d_box_constraints_entire_line(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1, 1]) + assert_equal(intersect, True) + + +class TestBoxSphereBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=False) + assert_equal(intersect, False) + + def test_2d_box_constraints_entire_line(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 2]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=True) + assert_equal(intersect, False) + + +class TestModifiedDogleg(TestCase): + + def test_cauchypoint_equalsto_newtonpoint(self): + A = np.array([[1, 8]]) + b = np.array([-16]) + _, _, Y = projections(A) + newton_point = np.array([0.24615385, 1.96923077]) + + # Newton point inside boundaries + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # Spherical constraint active + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point)) + + # Box constraints active + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf]) + assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1) + + def test_3d_example(self): + A = np.array([[1, 8, 1], + [4, 2, 2]]) + b = np.array([-16, 2]) + Z, LS, Y = projections(A) + + newton_point = np.array([-1.37090909, 2.23272727, -0.49090909]) + cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585]) + origin = np.zeros_like(newton_point) + + # newton_point inside boundaries + x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # line between cauchy_point and newton_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.40807330)) + assert_array_almost_equal(np.linalg.norm(x), 2) + + # line between cauchy_point and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.7498195)) + assert_array_almost_equal(x[0], -1) + + # line between origin and cauchy_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = origin + d = cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.573936265)) + assert_array_almost_equal(np.linalg.norm(x), 1) + + # line between origin and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, 1, np.inf]) + z = origin + d = newton_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.4478827364)) + assert_array_almost_equal(x[1], 1) + + +class TestProjectCG(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b) + assert_equal(info["stop_cond"], 4) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, [2, -1, 1]) + + def test_compare_with_direct_fact(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, tol=0) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + def test_trust_region_infeasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 1 + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, trust_radius=trust_radius) + + def test_trust_region_barely_feasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 2.32379000772445021283 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + assert_array_almost_equal(x, -Y.dot(b)) + + def test_hits_boundary(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + def test_negative_curvature_unconstrained(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, tol=0) + + def test_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + # The box constraints are inactive at the solution but + # are active during the iterations. + def test_inactive_box_constraints(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.5, -np.inf, + -np.inf, -np.inf], + return_all=True) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + # The box constraints active and the termination is + # by maximum iterations (infeasible interaction). + def test_active_box_constraints_maximum_iterations_reached(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.8, -np.inf, + -np.inf, -np.inf], + return_all=True) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(A.dot(x), -b) + assert_array_almost_equal(x[0], 0.8) + + # The box constraints are active and the termination is + # because it hits boundary (without infeasible interaction). + def test_active_box_constraints_hits_boundaries(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 1.6, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 1.6) + + # The box constraints are active and the termination is + # because it hits boundary (infeasible interaction). + def test_active_box_constraints_hits_boundaries_infeasible_iter(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 4 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, 0.1, np.inf, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[1], 0.1) + + # The box constraints are active and the termination is + # because it hits boundary (no infeasible interaction). + def test_active_box_constraints_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 100, np.inf], + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 100) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py new file mode 100644 index 0000000000000000000000000000000000000000..66fa5bd17f80a907db425c927c08e5dc0797028e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py @@ -0,0 +1,34 @@ +import pytest +import numpy as np +from scipy.optimize import minimize, Bounds + +def test_gh10880(): + # checks that verbose reporting works with trust-constr for + # bound-constrained problems + bnds = Bounds(1, 2) + opts = {'maxiter': 1000, 'verbose': 2} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + +@pytest.mark.xslow +def test_gh12922(): + # checks that verbose reporting works with trust-constr for + # general constraints + def objective(x): + return np.array([(np.sum((x+1)**4))]) + + cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2} + n = 25 + x0 = np.linspace(-5, 5, n) + + opts = {'maxiter': 1000, 'verbose': 2} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py new file mode 100644 index 0000000000000000000000000000000000000000..e14b3f366fba818d9174af97fa91e065bf26e826 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py @@ -0,0 +1,361 @@ +"""Trust-region interior point method. + +References +---------- +.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. +.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal. + "On the local behavior of an interior point method for + nonlinear programming." Numerical analysis 1997 (1997): 37-56. +.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). +""" + +import scipy.sparse as sps +import numpy as np +from .equality_constrained_sqp import equality_constrained_sqp +from scipy.sparse.linalg import LinearOperator + +__all__ = ['tr_interior_point'] + + +class BarrierSubproblem: + """ + Barrier optimization problem: + minimize fun(x) - barrier_parameter*sum(log(s)) + subject to: constr_eq(x) = 0 + constr_ineq(x) + s = 0 + """ + + def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, barrier_parameter, tolerance, + enforce_feasibility, global_stop_criteria, + xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0, finite_diff_bounds): + # Store parameters + self.n_vars = n_vars + self.x0 = x0 + self.s0 = s0 + self.fun = fun + self.grad = grad + self.lagr_hess = lagr_hess + self.constr = constr + self.jac = jac + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + self.n_eq = n_eq + self.n_ineq = n_ineq + self.enforce_feasibility = enforce_feasibility + self.global_stop_criteria = global_stop_criteria + self.xtol = xtol + self.fun0 = self._compute_function(fun0, constr_ineq0, s0) + self.grad0 = self._compute_gradient(grad0) + self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0) + self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0) + self.terminate = False + self.lb = finite_diff_bounds[0] + self.ub = finite_diff_bounds[1] + + def update(self, barrier_parameter, tolerance): + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + + def get_slack(self, z): + return z[self.n_vars:self.n_vars+self.n_ineq] + + def get_variables(self, z): + return z[:self.n_vars] + + def function_and_constraints(self, z): + """Returns barrier function and constraints at given point. + + For z = [x, s], returns barrier function: + function(z) = fun(x) - barrier_parameter*sum(log(s)) + and barrier constraints: + constraints(z) = [ constr_eq(x) ] + [ constr_ineq(x) + s ] + + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + + # Compute function and constraints, + # making sure x is within any strict bounds + if np.any((x < self.lb) | (x > self.ub)): + # If x is out of the strict bounds, set f = inf, + # and just set both equality and inequality + # constraints to 0 since we can't evaluate + # them separately. + f = np.inf + c_eq = np.full(self.n_eq, 0.) + c_ineq = np.full(self.n_ineq, 0.) + else: + f = self.fun(x) + c_eq, c_ineq = self.constr(x) + + # Return objective function and constraints + return (self._compute_function(f, c_ineq, s), + self._compute_constr(c_ineq, c_eq, s)) + + def _compute_function(self, f, c_ineq, s): + # Use technique from Nocedal and Wright book, ref [3]_, p.576, + # to guarantee constraints from `enforce_feasibility` + # stay feasible along iterations. + s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility] + log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s] + # Compute barrier objective function + return f - self.barrier_parameter*np.sum(log_s) + + def _compute_constr(self, c_ineq, c_eq, s): + # Compute barrier constraint + return np.hstack((c_eq, + c_ineq + s)) + + def scaling(self, z): + """Returns scaling vector. + Given by: + scaling = [ones(n_vars), s] + """ + s = self.get_slack(z) + diag_elements = np.hstack((np.ones(self.n_vars), s)) + + # Diagonal matrix + def matvec(vec): + return diag_elements*vec + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def gradient_and_jacobian(self, z): + """Returns scaled gradient. + + Return scaled gradient: + gradient = [ grad(x) ] + [ -barrier_parameter*ones(n_ineq) ] + and scaled Jacobian matrix: + jacobian = [ jac_eq(x) 0 ] + [ jac_ineq(x) S ] + Both of them scaled by the previously defined scaling factor. + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute first derivatives + g = self.grad(x) + J_eq, J_ineq = self.jac(x) + # Return gradient and Jacobian + return (self._compute_gradient(g), + self._compute_jacobian(J_eq, J_ineq, s)) + + def _compute_gradient(self, g): + return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq))) + + def _compute_jacobian(self, J_eq, J_ineq, s): + if self.n_ineq == 0: + return J_eq + else: + if sps.issparse(J_eq) or sps.issparse(J_ineq): + # It is expected that J_eq and J_ineq + # are already `csr_matrix` because of + # the way ``BoxConstraint``, ``NonlinearConstraint`` + # and ``LinearConstraint`` are defined. + J_eq = sps.csr_matrix(J_eq) + J_ineq = sps.csr_matrix(J_ineq) + return self._assemble_sparse_jacobian(J_eq, J_ineq, s) + else: + S = np.diag(s) + zeros = np.zeros((self.n_eq, self.n_ineq)) + # Convert to matrix + if sps.issparse(J_ineq): + J_ineq = J_ineq.toarray() + if sps.issparse(J_eq): + J_eq = J_eq.toarray() + # Concatenate matrices + return np.block([[J_eq, zeros], + [J_ineq, S]]) + + def _assemble_sparse_jacobian(self, J_eq, J_ineq, s): + """Assemble sparse Jacobian given its components. + + Given ``J_eq``, ``J_ineq`` and ``s`` returns: + jacobian = [ J_eq, 0 ] + [ J_ineq, diag(s) ] + + It is equivalent to: + sps.bmat([[ J_eq, None ], + [ J_ineq, diag(s) ]], "csr") + but significantly more efficient for this + given structure. + """ + n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq + J_aux = sps.vstack([J_eq, J_ineq], "csr") + indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data + new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), + np.arange(n_ineq+1, dtype=int))) + size = indices.size+n_ineq + new_indices = np.empty(size) + new_data = np.empty(size) + mask = np.full(size, False, bool) + mask[new_indptr[-n_ineq:]-1] = True + new_indices[mask] = n_vars+np.arange(n_ineq) + new_indices[~mask] = indices + new_data[mask] = s + new_data[~mask] = data + J = sps.csr_matrix((new_data, new_indices, new_indptr), + (n_eq + n_ineq, n_vars + n_ineq)) + return J + + def lagrangian_hessian_x(self, z, v): + """Returns Lagrangian Hessian (in relation to `x`) -> Hx""" + x = self.get_variables(z) + # Get lagrange multipliers related to nonlinear equality constraints + v_eq = v[:self.n_eq] + # Get lagrange multipliers related to nonlinear ineq. constraints + v_ineq = v[self.n_eq:self.n_eq+self.n_ineq] + lagr_hess = self.lagr_hess + return lagr_hess(x, v_eq, v_ineq) + + def lagrangian_hessian_s(self, z, v): + """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S""" + s = self.get_slack(z) + # Using the primal formulation: + # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s). + # Reference [1]_ p. 882, formula (3.1) + primal = self.barrier_parameter + # Using the primal-dual formulation + # S Hs S = diag(s)*diag(v/s)*diag(s) + # Reference [1]_ p. 883, formula (3.11) + primal_dual = v[-self.n_ineq:]*s + # Uses the primal-dual formulation for + # positives values of v_ineq, and primal + # formulation for the remaining ones. + return np.where(v[-self.n_ineq:] > 0, primal_dual, primal) + + def lagrangian_hessian(self, z, v): + """Returns scaled Lagrangian Hessian""" + # Compute Hessian in relation to x and s + Hx = self.lagrangian_hessian_x(z, v) + if self.n_ineq > 0: + S_Hs_S = self.lagrangian_hessian_s(z, v) + + # The scaled Lagragian Hessian is: + # [ Hx 0 ] + # [ 0 S Hs S ] + def matvec(vec): + vec_x = self.get_variables(vec) + vec_s = self.get_slack(vec) + if self.n_ineq > 0: + return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) + else: + return Hx.dot(vec_x) + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def stop_criteria(self, state, z, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + """Stop criteria to the barrier problem. + The criteria here proposed is similar to formula (2.3) + from [1]_, p.879. + """ + x = self.get_variables(z) + if self.global_stop_criteria(state, x, + last_iteration_failed, + trust_radius, penalty, + cg_info, + self.barrier_parameter, + self.tolerance): + self.terminate = True + return True + else: + g_cond = (optimality < self.tolerance and + constr_violation < self.tolerance) + x_cond = trust_radius < self.xtol + return g_cond or x_cond + + +def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, x0, fun0, grad0, + constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0, stop_criteria, + enforce_feasibility, xtol, state, + initial_barrier_parameter, + initial_tolerance, + initial_penalty, + initial_trust_radius, + factorization_method, + finite_diff_bounds): + """Trust-region interior points method. + + Solve problem: + minimize fun(x) + subject to: constr_ineq(x) <= 0 + constr_eq(x) = 0 + using trust-region interior point method described in [1]_. + """ + # BOUNDARY_PARAMETER controls the decrease on the slack + # variables. Represents ``tau`` from [1]_ p.885, formula (3.18). + BOUNDARY_PARAMETER = 0.995 + # BARRIER_DECAY_RATIO controls the decay of the barrier parameter + # and of the subproblem tolerance. Represents ``theta`` from [1]_ p.879. + BARRIER_DECAY_RATIO = 0.2 + # TRUST_ENLARGEMENT controls the enlargement on trust radius + # after each iteration + TRUST_ENLARGEMENT = 5 + + # Default enforce_feasibility + if enforce_feasibility is None: + enforce_feasibility = np.zeros(n_ineq, bool) + # Initial Values + barrier_parameter = initial_barrier_parameter + tolerance = initial_tolerance + trust_radius = initial_trust_radius + # Define initial value for the slack variables + s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq)) + # Define barrier subproblem + subprob = BarrierSubproblem( + x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, + barrier_parameter, tolerance, enforce_feasibility, + stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, + constr_eq0, jac_eq0, finite_diff_bounds) + # Define initial parameter for the first iteration. + z = np.hstack((x0, s0)) + fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0 + grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0 + # Define trust region bounds + trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf), + np.full(subprob.n_ineq, -BOUNDARY_PARAMETER))) + trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf) + + # Solves a sequence of barrier problems + while True: + # Solve SQP subproblem + z, state = equality_constrained_sqp( + subprob.function_and_constraints, + subprob.gradient_and_jacobian, + subprob.lagrangian_hessian, + z, fun0_subprob, grad0_subprob, + constr0_subprob, jac0_subprob, subprob.stop_criteria, + state, initial_penalty, trust_radius, + factorization_method, trust_lb, trust_ub, subprob.scaling) + if subprob.terminate: + break + # Update parameters + trust_radius = max(initial_trust_radius, + TRUST_ENLARGEMENT*state.tr_radius) + # TODO: Use more advanced strategies from [2]_ + # to update this parameters. + barrier_parameter *= BARRIER_DECAY_RATIO + tolerance *= BARRIER_DECAY_RATIO + # Update Barrier Problem + subprob.update(barrier_parameter, tolerance) + # Compute initial values for next iteration + fun0_subprob, constr0_subprob = subprob.function_and_constraints(z) + grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z) + + # Get x and s + x = subprob.get_variables(z) + return x, state diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py new file mode 100644 index 0000000000000000000000000000000000000000..a54abd60c703408d6c87cb5020d6781fdf0213c7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py @@ -0,0 +1,122 @@ +"""Dog-leg trust-region optimization.""" +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the dog-leg trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for dogleg minimization') + if not callable(hess): + raise ValueError('Hessian is required for dogleg minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=DoglegSubproblem, + **trust_region_options) + + +class DoglegSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by the dogleg method""" + + def cauchy_point(self): + """ + The Cauchy point is minimal along the direction of steepest descent. + """ + if self._cauchy_point is None: + g = self.jac + Bg = self.hessp(g) + self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g + return self._cauchy_point + + def newton_point(self): + """ + The Newton point is a global minimum of the approximate function. + """ + if self._newton_point is None: + g = self.jac + B = self.hess + cho_info = scipy.linalg.cho_factor(B) + self._newton_point = -scipy.linalg.cho_solve(cho_info, g) + return self._newton_point + + def solve(self, trust_radius): + """ + Minimize a function using the dog-leg trust-region algorithm. + + This algorithm requires function values and first and second derivatives. + It also performs a costly Hessian decomposition for most iterations, + and the Hessian is required to be positive definite. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + The Hessian is required to be positive definite. + + References + ---------- + .. [1] Jorge Nocedal and Stephen Wright, + Numerical Optimization, second edition, + Springer-Verlag, 2006, page 73. + """ + + # Compute the Newton point. + # This is the optimum for the quadratic model function. + # If it is inside the trust radius then return this point. + p_best = self.newton_point() + if scipy.linalg.norm(p_best) < trust_radius: + hits_boundary = False + return p_best, hits_boundary + + # Compute the Cauchy point. + # This is the predicted optimum along the direction of steepest descent. + p_u = self.cauchy_point() + + # If the Cauchy point is outside the trust region, + # then return the point where the path intersects the boundary. + p_u_norm = scipy.linalg.norm(p_u) + if p_u_norm >= trust_radius: + p_boundary = p_u * (trust_radius / p_u_norm) + hits_boundary = True + return p_boundary, hits_boundary + + # Compute the intersection of the trust region boundary + # and the line segment connecting the Cauchy and Newton points. + # This requires solving a quadratic equation. + # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 + # Solve this for positive time t using the quadratic formula. + _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, + trust_radius) + p_boundary = p_u + tb * (p_best - p_u) + hits_boundary = True + return p_boundary, hits_boundary diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py new file mode 100644 index 0000000000000000000000000000000000000000..956e4f261907f001cf5bbb3616f331c18a676af0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py @@ -0,0 +1,438 @@ +"""Nearly exact trust-region optimization subproblem.""" +import numpy as np +from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, + cho_solve) +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = ['_minimize_trustregion_exact', + 'estimate_smallest_singular_value', + 'singular_leading_submatrix', + 'IterativeSubproblem'] + + +def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + a nearly exact trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than ``gtol`` before successful + termination. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ' + 'exact minimization.') + if not callable(hess): + raise ValueError('Hessian matrix is required for trust region ' + 'exact minimization.') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=IterativeSubproblem, + **trust_region_options) + + +def estimate_smallest_singular_value(U): + """Given upper triangular matrix ``U`` estimate the smallest singular + value and the correspondent right singular vector in O(n**2) operations. + + Parameters + ---------- + U : ndarray + Square upper triangular matrix. + + Returns + ------- + s_min : float + Estimated smallest singular value of the provided matrix. + z_min : ndarray + Estimated right singular vector. + + Notes + ----- + The procedure is based on [1]_ and is done in two steps. First, it finds + a vector ``e`` with components selected from {+1, -1} such that the + solution ``w`` from the system ``U.T w = e`` is as large as possible. + Next it estimate ``U v = w``. The smallest singular value is close + to ``norm(w)/norm(v)`` and the right singular vector is close + to ``v/norm(v)``. + + The estimation will be better more ill-conditioned is the matrix. + + References + ---------- + .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. + An estimate for the condition number of a matrix. 1979. + SIAM Journal on Numerical Analysis, 16(2), 368-375. + """ + + U = np.atleast_2d(U) + m, n = U.shape + + if m != n: + raise ValueError("A square triangular matrix should be provided.") + + # A vector `e` with components selected from {+1, -1} + # is selected so that the solution `w` to the system + # `U.T w = e` is as large as possible. Implementation + # based on algorithm 3.5.1, p. 142, from reference [2] + # adapted for lower triangular matrix. + + p = np.zeros(n) + w = np.empty(n) + + # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press. pp. 140-142. + for k in range(n): + wp = (1-p[k]) / U.T[k, k] + wm = (-1-p[k]) / U.T[k, k] + pp = p[k+1:] + U.T[k+1:, k]*wp + pm = p[k+1:] + U.T[k+1:, k]*wm + + if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): + w[k] = wp + p[k+1:] = pp + else: + w[k] = wm + p[k+1:] = pm + + # The system `U v = w` is solved using backward substitution. + v = solve_triangular(U, w) + + v_norm = norm(v) + w_norm = norm(w) + + # Smallest singular value + s_min = w_norm / v_norm + + # Associated vector + z_min = v / v_norm + + return s_min, z_min + + +def gershgorin_bounds(H): + """ + Given a square matrix ``H`` compute upper + and lower bounds for its eigenvalues (Gregoshgorin Bounds). + Defined ref. [1]. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + + H_diag = np.diag(H) + H_diag_abs = np.abs(H_diag) + H_row_sums = np.sum(np.abs(H), axis=1) + lb = np.min(H_diag + H_diag_abs - H_row_sums) + ub = np.max(H_diag - H_diag_abs + H_row_sums) + + return lb, ub + + +def singular_leading_submatrix(A, U, k): + """ + Compute term that makes the leading ``k`` by ``k`` + submatrix from ``A`` singular. + + Parameters + ---------- + A : ndarray + Symmetric matrix that is not positive definite. + U : ndarray + Upper triangular matrix resulting of an incomplete + Cholesky decomposition of matrix ``A``. + k : int + Positive integer such that the leading k by k submatrix from + `A` is the first non-positive definite leading submatrix. + + Returns + ------- + delta : float + Amount that should be added to the element (k, k) of the + leading k by k submatrix of ``A`` to make it singular. + v : ndarray + A vector such that ``v.T B v = 0``. Where B is the matrix A after + ``delta`` is added to its element (k, k). + """ + + # Compute delta + delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] + + n = len(A) + + # Initialize v + v = np.zeros(n) + v[k-1] = 1 + + # Compute the remaining values of v by solving a triangular system. + if k != 1: + v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) + + return delta, v + + +class IterativeSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by nearly exact iterative method. + + Notes + ----- + This subproblem solver was based on [1]_, [2]_ and [3]_, + which implement similar algorithms. The algorithm is basically + that of [1]_ but ideas from [2]_ and [3]_ were also used. + + References + ---------- + .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", + Siam, pp. 169-200, 2000. + .. [2] J. Nocedal and S. Wright, "Numerical optimization", + Springer Science & Business Media. pp. 83-91, 2006. + .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", + SIAM Journal on Scientific and Statistical Computing, vol. 4(3), + pp. 553-572, 1983. + """ + + # UPDATE_COEFF appears in reference [1]_ + # in formula 7.3.14 (p. 190) named as "theta". + # As recommended there it value is fixed in 0.01. + UPDATE_COEFF = 0.01 + + EPS = np.finfo(float).eps + + def __init__(self, x, fun, jac, hess, hessp=None, + k_easy=0.1, k_hard=0.2): + + super().__init__(x, fun, jac, hess) + + # When the trust-region shrinks in two consecutive + # calculations (``tr_radius < previous_tr_radius``) + # the lower bound ``lambda_lb`` may be reused, + # facilitating the convergence. To indicate no + # previous value is known at first ``previous_tr_radius`` + # is set to -1 and ``lambda_lb`` to None. + self.previous_tr_radius = -1 + self.lambda_lb = None + + self.niter = 0 + + # ``k_easy`` and ``k_hard`` are parameters used + # to determine the stop criteria to the iterative + # subproblem solver. Take a look at pp. 194-197 + # from reference _[1] for a more detailed description. + self.k_easy = k_easy + self.k_hard = k_hard + + # Get Lapack function for cholesky decomposition. + # The implemented SciPy wrapper does not return + # the incomplete factorization needed by the method. + self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) + + # Get info about Hessian + self.dimension = len(self.hess) + self.hess_gershgorin_lb,\ + self.hess_gershgorin_ub = gershgorin_bounds(self.hess) + self.hess_inf = norm(self.hess, np.inf) + self.hess_fro = norm(self.hess, 'fro') + + # A constant such that for vectors smaller than that + # backward substitution is not reliable. It was established + # based on Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press., p.165. + self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf + + def _initial_values(self, tr_radius): + """Given a trust radius, return a good initial guess for + the damping factor, the lower bound and the upper bound. + The values were chosen accordingly to the guidelines on + section 7.3.8 (p. 192) from [1]_. + """ + + # Upper bound for the damping factor + lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, + self.hess_fro, + self.hess_inf)) + + # Lower bound for the damping factor + lambda_lb = max(0, -min(self.hess.diagonal()), + self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, + self.hess_fro, + self.hess_inf)) + + # Improve bounds with previous info + if tr_radius < self.previous_tr_radius: + lambda_lb = max(self.lambda_lb, lambda_lb) + + # Initial guess for the damping factor + if lambda_lb == 0: + lambda_initial = 0 + else: + lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + return lambda_initial, lambda_lb, lambda_ub + + def solve(self, tr_radius): + """Solve quadratic subproblem""" + + lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) + n = self.dimension + hits_boundary = True + already_factorized = False + self.niter = 0 + + while True: + + # Compute Cholesky factorization + if already_factorized: + already_factorized = False + else: + H = self.hess+lambda_current*np.eye(n) + U, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + self.niter += 1 + + # Check if factorization succeeded + if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: + # Successful factorization + + # Solve `U.T U p = s` + p = cho_solve((U, False), -self.jac) + + p_norm = norm(p) + + # Check for interior convergence + if p_norm <= tr_radius and lambda_current == 0: + hits_boundary = False + break + + # Solve `U.T w = p` + w = solve_triangular(U, p, trans='T') + + w_norm = norm(w) + + # Compute Newton step accordingly to + # formula (4.44) p.87 from ref [2]_. + delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius + lambda_new = lambda_current + delta_lambda + + if p_norm < tr_radius: # Inside boundary + s_min, z_min = estimate_smallest_singular_value(U) + + ta, tb = self.get_boundaries_intersections(p, z_min, + tr_radius) + + # Choose `step_len` with the smallest magnitude. + # The reason for this choice is explained at + # ref [3]_, p. 6 (Immediately before the formula + # for `tau`). + step_len = min([ta, tb], key=abs) + + # Compute the quadratic term (p.T*H*p) + quadratic_term = np.dot(p, np.dot(H, p)) + + # Check stop criteria + relative_error = ((step_len**2 * s_min**2) + / (quadratic_term + lambda_current*tr_radius**2)) + if relative_error <= self.k_hard: + p += step_len * z_min + break + + # Update uncertainty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Compute Cholesky factorization + H = self.hess + lambda_new*np.eye(n) + c, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + # Check if the factorization have succeeded + # + if info == 0: # Successful factorization + # Update damping factor + lambda_current = lambda_new + already_factorized = True + else: # Unsuccessful factorization + # Update uncertainty bounds + lambda_lb = max(lambda_lb, lambda_new) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Outside boundary + # Check stop criteria + relative_error = abs(p_norm - tr_radius) / tr_radius + if relative_error <= self.k_easy: + break + + # Update uncertainty bounds + lambda_lb = lambda_current + + # Update damping factor + lambda_current = lambda_new + + elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: + # jac_mag very close to zero + + # Check for interior convergence + if lambda_current == 0: + p = np.zeros(n) + hits_boundary = False + break + + s_min, z_min = estimate_smallest_singular_value(U) + step_len = tr_radius + + # Check stop criteria + if (step_len**2 * s_min**2 + <= self.k_hard * lambda_current * tr_radius**2): + p = step_len * z_min + break + + # Update uncertainty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Unsuccessful factorization + + # Compute auxiliary terms + delta, v = singular_leading_submatrix(H, U, info) + v_norm = norm(v) + + # Update uncertainty interval + lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + self.lambda_lb = lambda_lb + self.lambda_current = lambda_current + self.previous_tr_radius = tr_radius + + return p, hits_boundary diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py new file mode 100644 index 0000000000000000000000000000000000000000..54e861ae2de02164966a33c437e5fdb08ba3006c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py @@ -0,0 +1,65 @@ +from ._trustregion import (_minimize_trust_region) +from ._trlib import (get_trlib_quadratic_subproblem) + +__all__ = ['_minimize_trust_krylov'] + +def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, + inexact=True, **trust_region_options): + """ + Minimization of a scalar function of one or more variables using + a nearly exact trust-region algorithm that only requires matrix + vector products with the hessian matrix. + + .. versionadded:: 1.0.0 + + Options + ------- + inexact : bool, optional + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ', + 'exact minimization.') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Krylov trust-region minimization') + + # tol_rel specifies the termination tolerance relative to the initial + # gradient norm in the Krylov subspace iteration. + + # - tol_rel_i specifies the tolerance for interior convergence. + # - tol_rel_b specifies the tolerance for boundary convergence. + # in nonlinear programming applications it is not necessary to solve + # the boundary case as exact as the interior case. + + # - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov + # subspace iteration leading to quadratic convergence if eventually + # the trust region stays inactive. + # - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov + # subspace iteration leading to superlinear convergence as long + # as the iterates hit the trust region boundary. + + # For details consult the documentation of trlib_krylov_min + # in _trlib/trlib_krylov.h + # + # Optimality of this choice of parameters among a range of possibilities + # has been tested on the unconstrained subset of the CUTEst library. + + if inexact: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=-2.0, tol_rel_b=-3.0, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) + else: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py new file mode 100644 index 0000000000000000000000000000000000000000..fed17ff8b84eaf019c0ad69a03f260ca674477ad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py @@ -0,0 +1,126 @@ +"""Newton-CG trust-region optimization.""" +import math + +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the Newton conjugate gradient trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for Newton-CG trust-region ' + 'minimization') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Newton-CG trust-region minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + hessp=hessp, subproblem=CGSteihaugSubproblem, + **trust_region_options) + + +class CGSteihaugSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by a conjugate gradient method""" + def solve(self, trust_radius): + """ + Solve the subproblem using a conjugate gradient method. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + This is algorithm (7.2) of Nocedal and Wright 2nd edition. + Only the function that computes the Hessian-vector product is required. + The Hessian itself is not required, and the Hessian does + not need to be positive semidefinite. + """ + + # get the norm of jacobian and define the origin + p_origin = np.zeros_like(self.jac) + + # define a default tolerance + tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag + + # Stop the method if the search direction + # is a direction of nonpositive curvature. + if self.jac_mag < tolerance: + hits_boundary = False + return p_origin, hits_boundary + + # init the state for the first iteration + z = p_origin + r = self.jac + d = -r + + # Search for the min of the approximation of the objective function. + while True: + + # do an iteration + Bd = self.hessp(d) + dBd = np.dot(d, Bd) + if dBd <= 0: + # Look at the two boundary points. + # Find both values of t to get the boundary points such that + # ||z + t d|| == trust_radius + # and then choose the one with the predicted min value. + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + pa = z + ta * d + pb = z + tb * d + if self(pa) < self(pb): + p_boundary = pa + else: + p_boundary = pb + hits_boundary = True + return p_boundary, hits_boundary + r_squared = np.dot(r, r) + alpha = r_squared / dBd + z_next = z + alpha * d + if scipy.linalg.norm(z_next) >= trust_radius: + # Find t >= 0 to get the boundary point such that + # ||z + t d|| == trust_radius + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + p_boundary = z + tb * d + hits_boundary = True + return p_boundary, hits_boundary + r_next = r + alpha * Bd + r_next_squared = np.dot(r_next, r_next) + if math.sqrt(r_next_squared) < tolerance: + hits_boundary = False + return z_next, hits_boundary + beta_next = r_next_squared / r_squared + d_next = -r_next + beta_next * d + + # update the state for the next iteration + z = z_next + r = r_next + d = d_next diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tstutils.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tstutils.py new file mode 100644 index 0000000000000000000000000000000000000000..f56e835e345d66023efae81114a45ed29269f18d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_tstutils.py @@ -0,0 +1,972 @@ +r""" +Parameters used in test and benchmark methods. + +Collections of test cases suitable for testing 1-D root-finders + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval + with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a left- and right- discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, + randomly negative on the other. + f4 - f6 are not continuous at the root. + + 'aps': The test problems in the 1995 paper + TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The test cases are provided as a list of dictionaries. The dictionary + keys will be a subset of: + ["f", "fprime", "fprime2", "args", "bracket", "smoothness", + "a", "b", "x0", "x1", "root", "ID"] +""" + +# Sources: +# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# "Algorithm 748: Enclosing Zeros of Continuous Functions", +# ACM Trans. Math. Softw. Volume 221(1995) +# doi = {10.1145/210089.210111}, +# [2] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm +# for finding the zero of a nonlinear function without using derivatives." +# Advances in Engineering Software 28.3 (1997): 145-149. + +from random import random + +import numpy as np + +from scipy.optimize import _zeros_py as cc +from scipy._lib._array_api import array_namespace + +# "description" refers to the original functions +description = """ +f2 is a symmetric parabola, x**2 - 1 +f3 is a quartic polynomial with large hump in interval +f4 is step function with a discontinuity at 1 +f5 is a hyperbola with vertical asymptote at 1 +f6 has random values positive to left of 1, negative to right + +Of course, these are not real problems. They just test how the +'good' solvers behave in bad circumstances where bisection is +really the best. A good solver should not be much worse than +bisection in such circumstance, while being faster for smooth +monotone sorts of functions. +""" + + +def f1(x): + r"""f1 is a quadratic with roots at 0 and 1""" + return x * (x - 1.) + + +def f1_fp(x): + return 2 * x - 1 + + +def f1_fpp(x): + return 2 + + +def f2(x): + r"""f2 is a symmetric parabola, x**2 - 1""" + return x**2 - 1 + + +def f2_fp(x): + return 2 * x + + +def f2_fpp(x): + return 2 + + +def f3(x): + r"""A quartic with roots at 0, 1, 2 and 3""" + return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x + + +def f3_fp(x): + return 4 * x**3 - 18 * x**2 + 22 * x - 6 + + +def f3_fpp(x): + return 12 * x**2 - 36 * x + 22 + + +def f4(x): + r"""Piecewise linear, left- and right- discontinuous at x=1, the root.""" + if x > 1: + return 1.0 + .1 * x + if x < 1: + return -1.0 + .1 * x + return 0 + + +def f5(x): + r""" + Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root. + """ + if x != 1: + return 1.0 / (1. - x) + return 0 + + +# f6(x) returns random value. Without memoization, calling twice with the +# same x returns different values, hence a "random value", not a +# "function with random values" +_f6_cache = {} +def f6(x): + v = _f6_cache.get(x, None) + if v is None: + if x > 1: + v = random() + elif x < 1: + v = -random() + else: + v = 0 + _f6_cache[x] = v + return v + + +# Each Original test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability (smoothness) on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case + +_ORIGINAL_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_ORIGINAL_TESTS = [ + [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"], + [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"], + [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"], + [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"], + [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"], + [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"] +] + +_ORIGINAL_TESTS_DICTS = [ + dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS +] + +# ################## +# "APS" test cases +# Functions and test cases that appear in [1] + + +def aps01_f(x): + r"""Straightforward sum of trigonometric function and polynomial""" + return np.sin(x) - x / 2 + + +def aps01_fp(x): + return np.cos(x) - 1.0 / 2 + + +def aps01_fpp(x): + return -np.sin(x) + + +def aps02_f(x): + r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0""" + ii = np.arange(1, 21) + return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3) + + +def aps02_fp(x): + ii = np.arange(1, 21) + return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4) + + +def aps02_fpp(x): + ii = np.arange(1, 21) + return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5) + + +def aps03_f(x, a, b): + r"""Rapidly changing at the root""" + return a * x * np.exp(b * x) + + +def aps03_fp(x, a, b): + return a * (b * x + 1) * np.exp(b * x) + + +def aps03_fpp(x, a, b): + return a * (b * (b * x + 1) + b) * np.exp(b * x) + + +def aps04_f(x, n, a): + r"""Medium-degree polynomial""" + return x**n - a + + +def aps04_fp(x, n, a): + return n * x**(n - 1) + + +def aps04_fpp(x, n, a): + return n * (n - 1) * x**(n - 2) + + +def aps05_f(x): + r"""Simple Trigonometric function""" + return np.sin(x) - 1.0 / 2 + + +def aps05_fp(x): + return np.cos(x) + + +def aps05_fpp(x): + return -np.sin(x) + + +def aps06_f(x, n): + r"""Exponential rapidly changing from -1 to 1 at x=0""" + return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1 + + +def aps06_fp(x, n): + return 2 * np.exp(-n) + 2 * n * np.exp(-n * x) + + +def aps06_fpp(x, n): + return -2 * n * n * np.exp(-n * x) + + +def aps07_f(x, n): + r"""Upside down parabola with parametrizable height""" + return (1 + (1 - n)**2) * x - (1 - n * x)**2 + + +def aps07_fp(x, n): + return (1 + (1 - n)**2) + 2 * n * (1 - n * x) + + +def aps07_fpp(x, n): + return -2 * n * n + + +def aps08_f(x, n): + r"""Degree n polynomial""" + return x * x - (1 - x)**n + + +def aps08_fp(x, n): + return 2 * x + n * (1 - x)**(n - 1) + + +def aps08_fpp(x, n): + return 2 - n * (n - 1) * (1 - x)**(n - 2) + + +def aps09_f(x, n): + r"""Upside down quartic with parametrizable height""" + return (1 + (1 - n)**4) * x - (1 - n * x)**4 + + +def aps09_fp(x, n): + return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3 + + +def aps09_fpp(x, n): + return -12 * n * (1 - n * x)**2 + + +def aps10_f(x, n): + r"""Exponential plus a polynomial""" + return np.exp(-n * x) * (x - 1) + x**n + + +def aps10_fp(x, n): + return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1) + + +def aps10_fpp(x, n): + return (np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + + n * (n - 1) * x**(n - 2)) + + +def aps11_f(x, n): + r"""Rational function with a zero at x=1/n and a pole at x=0""" + return (n * x - 1) / ((n - 1) * x) + + +def aps11_fp(x, n): + return 1 / (n - 1) / x**2 + + +def aps11_fpp(x, n): + return -2 / (n - 1) / x**3 + + +def aps12_f(x, n): + r"""nth root of x, with a zero at x=n""" + return np.power(x, 1.0 / n) - np.power(n, 1.0 / n) + + +def aps12_fp(x, n): + return np.power(x, (1.0 - n) / n) / n + + +def aps12_fpp(x, n): + return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n + + +_MAX_EXPABLE = np.log(np.finfo(float).max) + + +def aps13_f(x): + r"""Function with *all* derivatives 0 at the root""" + if x == 0: + return 0 + # x2 = 1.0/x**2 + # if x2 > 708: + # return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return x / np.exp(y) + + +def aps13_fp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return (1 + 2 / x**2) / np.exp(y) + + +def aps13_fpp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return 2 * (2 - x**2) / x**5 / np.exp(y) + + +def aps14_f(x, n): + r"""0 for negative x-values, trigonometric+linear for x positive""" + if x <= 0: + return -n / 20.0 + return n / 20.0 * (x / 1.5 + np.sin(x) - 1) + + +def aps14_fp(x, n): + if x <= 0: + return 0 + return n / 20.0 * (1.0 / 1.5 + np.cos(x)) + + +def aps14_fpp(x, n): + if x <= 0: + return 0 + return -n / 20.0 * (np.sin(x)) + + +def aps15_f(x, n): + r"""piecewise linear, constant outside of [0, 0.002/(1+n)]""" + if x < 0: + return -0.859 + if x > 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) - 1.859 + + +def aps15_fp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 + + +def aps15_fpp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000 + + +# Each APS test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability of the function on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley methods need a single +# starting point x0, which was chosen to be near the middle of the interval, +# unless that would have made the problem too easy. + +_APS_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_APS_TESTS = [ + [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, + 3, 1.89549426703398094e+00, "aps.01.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, + 2, 3.02291534727305677e+00, "aps.02.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, + 5, 6.68375356080807848e+00, "aps.02.01"], + [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, + 10, 1.12387016550022114e+01, "aps.02.02"], + [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, + 17, 1.96760000806234103e+01, "aps.02.03"], + [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, + 26, 2.98282273265047557e+01, "aps.02.04"], + [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, + 37, 4.19061161952894139e+01, "aps.02.05"], + [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, + 50, 5.59535958001430913e+01, "aps.02.06"], + [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, + 65, 7.19856655865877997e+01, "aps.02.07"], + [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, + 82, 9.00088685391666701e+01, "aps.02.08"], + [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, + 101, 1.10026532748330197e+02, "aps.02.09"], + [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, + -2, 0, "aps.03.00"], + [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, + -2, 0, "aps.03.01"], + [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, + -2, 0, "aps.03.02"], + [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, + 2.5, 6.68740304976422006e-01, "aps.04.00"], + [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, + 2.5, 7.64724491331730039e-01, "aps.04.01"], + [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, + 2.5, 8.17765433957942545e-01, "aps.04.02"], + [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, + 2.5, 8.51339922520784609e-01, "aps.04.03"], + [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, + 2.5, 8.74485272221167897e-01, "aps.04.04"], + [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.05"], + [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.06"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.07"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.08"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.09"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.10"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.11"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.12"], + [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.13"], + [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, + 1.3, np.pi / 6, "aps.05.00"], + [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, + 0.5, 4.22477709641236709e-01, "aps.06.00"], + [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, + 0.5, 3.06699410483203705e-01, "aps.06.01"], + [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, + 0.5, 2.23705457654662959e-01, "aps.06.02"], + [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, + 0.5, 1.71719147519508369e-01, "aps.06.03"], + [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, + 0.4, 1.38257155056824066e-01, "aps.06.04"], + [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, + 0.1, 3.46573590208538521e-02, "aps.06.05"], + [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, + 5e-02, 1.73286795139986315e-02, "aps.06.06"], + [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, + 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"], + [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, + 2.5e-02, 8.66433975699931573e-03, "aps.06.08"], + [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, + 2e-02, 6.93147180559945415e-03, "aps.06.09"], + [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, + 0.4, 3.84025518406218985e-02, "aps.07.00"], + [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, + 0.4, 9.90000999800049949e-03, "aps.07.01"], + [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, + 0.4, 2.49375003906201174e-03, "aps.07.02"], + [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, + 0.9, 0.5, "aps.08.00"], + [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, + 0.9, 3.45954815848242059e-01, "aps.08.01"], + [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, + 0.9, 2.45122333753307220e-01, "aps.08.02"], + [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, + 0.9, 1.95547623536565629e-01, "aps.08.03"], + [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, + 0.9, 1.64920957276440960e-01, "aps.08.04"], + [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, + 0.5, 2.75508040999484394e-01, "aps.09.00"], + [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, + 0.5, 1.37754020499742197e-01, "aps.09.01"], + [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, + 0.5, 1.03052837781564422e-02, "aps.09.02"], + [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, + 0.5, 3.61710817890406339e-03, "aps.09.03"], + [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, + 0.5, 4.10872918496395375e-04, "aps.09.04"], + [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, + 0.5, 2.59895758929076292e-05, "aps.09.05"], + [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, + 0.5, 7.66859512218533719e-06, "aps.09.06"], + [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, + 0.9, 4.01058137541547011e-01, "aps.10.00"], + [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, + 0.9, 5.16153518757933583e-01, "aps.10.01"], + [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, + 0.9, 5.39522226908415781e-01, "aps.10.02"], + [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, + 0.9, 5.48182294340655241e-01, "aps.10.03"], + [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, + 0.9, 5.52704666678487833e-01, "aps.10.04"], + [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, + 1e-02, 1.0 / 2, "aps.11.00"], + [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, + 1e-02, 1.0 / 5, "aps.11.01"], + [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, + 1e-02, 1.0 / 15, "aps.11.02"], + [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, + 1e-02, 1.0 / 20, "aps.11.03"], + [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, + 1.1, 2, "aps.12.00"], + [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, + 1.1, 3, "aps.12.01"], + [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, + 1.1, 4, "aps.12.02"], + [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, + 1.1, 5, "aps.12.03"], + [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, + 1.1, 6, "aps.12.04"], + [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, + 1.1, 7, "aps.12.05"], + [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, + 1.1, 9, "aps.12.06"], + [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, + 1.1, 11, "aps.12.07"], + [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, + 1.1, 13, "aps.12.08"], + [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, + 1.1, 15, "aps.12.09"], + [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, + 1.1, 17, "aps.12.10"], + [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, + 1.1, 19, "aps.12.11"], + [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, + 1.1, 21, "aps.12.12"], + [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, + 1.1, 23, "aps.12.13"], + [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, + 1.1, 25, "aps.12.14"], + [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, + 1.1, 27, "aps.12.15"], + [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, + 1.1, 29, "aps.12.16"], + [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, + 1.1, 31, "aps.12.17"], + [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, + 1.1, 33, "aps.12.18"], + [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, + 1.5, 0, "aps.13.00"], + [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.00"], + [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.01"], + [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.02"], + [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.03"], + [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.04"], + [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.05"], + [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.06"], + [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.07"], + [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.08"], + [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.09"], + [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.10"], + [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.11"], + [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.12"], + [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.13"], + [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.14"], + [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.15"], + [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.16"], + [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.17"], + [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.18"], + [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.19"], + [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.20"], + [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.21"], + [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.22"], + [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.23"], + [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.24"], + [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.25"], + [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.26"], + [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.27"], + [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.28"], + [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.29"], + [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.30"], + [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.31"], + [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.32"], + [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.33"], + [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.34"], + [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.35"], + [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.36"], + [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.37"], + [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.38"], + [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.39"], + [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, + -2, 5.90513055942197166e-05, "aps.15.00"], + [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, + -2, 5.63671553399369967e-05, "aps.15.01"], + [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, + -2, 5.39164094555919196e-05, "aps.15.02"], + [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, + -2, 5.16698923949422470e-05, "aps.15.03"], + [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, + -2, 4.96030966991445609e-05, "aps.15.04"], + [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, + -2, 4.76952852876389951e-05, "aps.15.05"], + [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, + -2, 4.59287932399486662e-05, "aps.15.06"], + [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, + -2, 4.42884791956647841e-05, "aps.15.07"], + [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, + -2, 4.27612902578832391e-05, "aps.15.08"], + [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, + -2, 4.13359139159538030e-05, "aps.15.09"], + [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, + -2, 4.00024973380198076e-05, "aps.15.10"], + [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, + -2, 3.87524192962066869e-05, "aps.15.11"], + [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, + -2, 3.75781035599579910e-05, "aps.15.12"], + [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, + -2, 3.64728652199592355e-05, "aps.15.13"], + [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, + -2, 3.54307833565318273e-05, "aps.15.14"], + [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, + -2, 3.44465949299614980e-05, "aps.15.15"], + [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, + -2, 3.35156058778003705e-05, "aps.15.16"], + [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, + -2, 3.26336162494372125e-05, "aps.15.17"], + [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, + -2, 3.17968568584260013e-05, "aps.15.18"], + [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, + -2, 3.10019354369653455e-05, "aps.15.19"], + [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, + -2, 3.02457906702100968e-05, "aps.15.20"], + [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, + -2, 1.22779942324615231e-05, "aps.15.21"], + [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, + -2, 6.16953939044086617e-06, "aps.15.22"], + [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, + -2, 4.11985852982928163e-06, "aps.15.23"], + [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, + -2, 3.09246238772721682e-06, "aps.15.24"], + [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, + -2, 2.47520442610501789e-06, "aps.15.25"], + [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, + -2, 2.06335676785127107e-06, "aps.15.26"], + [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, + -2, 1.76901200781542651e-06, "aps.15.27"], + [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, + -2, 1.54816156988591016e-06, "aps.15.28"], + [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, + -2, 1.37633453660223511e-06, "aps.15.29"], + [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, + -2, 1.23883857889971403e-06, "aps.15.30"] +] + +_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS] + + +# ################## +# "complex" test cases +# A few simple, complex-valued, functions, defined on the complex plane. + + +def cplx01_f(z, n, a): + r"""z**n-a: Use to find the nth root of a""" + return z**n - a + + +def cplx01_fp(z, n, a): + return n * z**(n - 1) + + +def cplx01_fpp(z, n, a): + return n * (n - 1) * z**(n - 2) + + +def cplx02_f(z, a): + r"""e**z - a: Use to find the log of a""" + return np.exp(z) - a + + +def cplx02_fp(z, a): + return np.exp(z) + + +def cplx02_fpp(z, a): + return np.exp(z) + + +# Each "complex" test case has +# - a function and its two derivatives, +# - additional arguments, +# - the order of differentiability of the function on this interval +# - two starting values x0 and x1 +# - the root +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley need a single starting point +# x0, which was chosen to be near the middle of the interval, unless that +# would make the problem too easy. + + +_COMPLEX_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID" +] +_COMPLEX_TESTS = [ + [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, + (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, + (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.01"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, + 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.02"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, + 5, 4, 2, "complex.01.03"], + [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"], + [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"], +] + +_COMPLEX_TESTS_DICTS = [ + dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS +] + + +def _add_a_b(tests): + r"""Add "a" and "b" keys to each test from the "bracket" value""" + for d in tests: + for k, v in zip(['a', 'b'], d.get('bracket', [])): + d[k] = v + + +_add_a_b(_ORIGINAL_TESTS_DICTS) +_add_a_b(_APS_TESTS_DICTS) +_add_a_b(_COMPLEX_TESTS_DICTS) + + +def get_tests(collection='original', smoothness=None): + r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys + + Allowed values of collection: + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a single discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, randomly negative on the other + 'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + paper by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The dictionary keys will be a subset of + ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"] + """ # noqa: E501 + collection = collection or "original" + subsets = {"aps": _APS_TESTS_DICTS, + "complex": _COMPLEX_TESTS_DICTS, + "original": _ORIGINAL_TESTS_DICTS, + "chandrupatla": _CHANDRUPATLA_TESTS_DICTS} + tests = subsets.get(collection, []) + if smoothness is not None: + tests = [tc for tc in tests if tc['smoothness'] >= smoothness] + return tests + + +# Backwards compatibility +methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq] +mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq'] +functions = [f2, f3, f4, f5, f6] +fstrings = ['f2', 'f3', 'f4', 'f5', 'f6'] + +# ################## +# "Chandrupatla" test cases +# Functions and test cases that appear in [2] + +def fun1(x): + return x**3 - 2*x - 5 +fun1.root = 2.0945514815423265 # additional precision using mpmath.findroot + + +def fun2(x): + return 1 - 1/x**2 +fun2.root = 1 + + +def fun3(x): + return (x-3)**3 +fun3.root = 3 + + +def fun4(x): + return 6*(x-2)**5 +fun4.root = 2 + + +def fun5(x): + return x**9 +fun5.root = 0 + + +def fun6(x): + return x**19 +fun6.root = 0 + + +def fun7(x): + xp = array_namespace(x) + return 0 if xp.abs(x) < 3.8e-4 else x*xp.exp(-x**(-2)) +fun7.root = 0 + + +def fun8(x): + xp = array_namespace(x) + xi = 0.61489 + return -(3062*(1-xi)*xp.exp(-x))/(xi + (1-xi)*xp.exp(-x)) - 1013 + 1628/x +fun8.root = 1.0375360332870405 + + +def fun9(x): + xp = array_namespace(x) + return xp.exp(x) - 2 - 0.01/x**2 + .000002/x**3 +fun9.root = 0.7032048403631358 + +# Each "chandropatla" test case has +# - a function, +# - two starting values x0 and x1 +# - the root +# - the number of function evaluations required by Chandrupatla's algorithm +# - an Identifier of the test case +# +# Chandrupatla's is a bracketing algorithm, so a bracketing interval was +# provided in [2] for each test case. No special support for testing with +# secant/Newton/Halley is provided. + +_CHANDRUPATLA_TESTS_KEYS = ["f", "bracket", "root", "nfeval", "ID"] +_CHANDRUPATLA_TESTS = [ + [fun1, [2, 3], fun1.root, 7], + [fun1, [1, 10], fun1.root, 11], + [fun1, [1, 100], fun1.root, 14], + [fun1, [-1e4, 1e4], fun1.root, 23], + [fun1, [-1e10, 1e10], fun1.root, 43], + [fun2, [0.5, 1.51], fun2.root, 8], + [fun2, [1e-4, 1e4], fun2.root, 22], + [fun2, [1e-6, 1e6], fun2.root, 28], + [fun2, [1e-10, 1e10], fun2.root, 41], + [fun2, [1e-12, 1e12], fun2.root, 48], + [fun3, [0, 5], fun3.root, 21], + [fun3, [-10, 10], fun3.root, 23], + [fun3, [-1e4, 1e4], fun3.root, 36], + [fun3, [-1e6, 1e6], fun3.root, 45], + [fun3, [-1e10, 1e10], fun3.root, 55], + [fun4, [0, 5], fun4.root, 21], + [fun4, [-10, 10], fun4.root, 23], + [fun4, [-1e4, 1e4], fun4.root, 33], + [fun4, [-1e6, 1e6], fun4.root, 43], + [fun4, [-1e10, 1e10], fun4.root, 54], + [fun5, [-1, 4], fun5.root, 21], + [fun5, [-2, 5], fun5.root, 22], + [fun5, [-1, 10], fun5.root, 23], + [fun5, [-5, 50], fun5.root, 25], + [fun5, [-10, 100], fun5.root, 26], + [fun6, [-1., 4.], fun6.root, 21], + [fun6, [-2., 5.], fun6.root, 22], + [fun6, [-1., 10.], fun6.root, 23], + [fun6, [-5., 50.], fun6.root, 25], + [fun6, [-10., 100.], fun6.root, 26], + [fun7, [-1, 4], fun7.root, 8], + [fun7, [-2, 5], fun7.root, 8], + [fun7, [-1, 10], fun7.root, 11], + [fun7, [-5, 50], fun7.root, 18], + [fun7, [-10, 100], fun7.root, 19], + [fun8, [2e-4, 2], fun8.root, 9], + [fun8, [2e-4, 3], fun8.root, 10], + [fun8, [2e-4, 9], fun8.root, 11], + [fun8, [2e-4, 27], fun8.root, 12], + [fun8, [2e-4, 81], fun8.root, 14], + [fun9, [2e-4, 1], fun9.root, 7], + [fun9, [2e-4, 3], fun9.root, 8], + [fun9, [2e-4, 9], fun9.root, 10], + [fun9, [2e-4, 27], fun9.root, 11], + [fun9, [2e-4, 81], fun9.root, 13], +] +_CHANDRUPATLA_TESTS = [test + [f'{test[0].__name__}.{i%5+1}'] + for i, test in enumerate(_CHANDRUPATLA_TESTS)] + +_CHANDRUPATLA_TESTS_DICTS = [dict(zip(_CHANDRUPATLA_TESTS_KEYS, testcase)) + for testcase in _CHANDRUPATLA_TESTS] +_add_a_b(_CHANDRUPATLA_TESTS_DICTS) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7af3971d3d8f44903d9da2be40e4e414cbf6463d Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py new file mode 100644 index 0000000000000000000000000000000000000000..2b96902ccbd58ed8fb6de3bbafe551f53fb8bf86 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py @@ -0,0 +1,1395 @@ +import warnings +from collections import namedtuple +import operator +from . import _zeros +from ._optimize import OptimizeResult +import numpy as np + + +_iter = 100 +_xtol = 2e-12 +_rtol = 4 * np.finfo(float).eps + +__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', + 'RootResults'] + +# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h +_ECONVERGED = 0 +_ESIGNERR = -1 # used in _chandrupatla +_ECONVERR = -2 +_EVALUEERR = -3 +_ECALLBACK = -4 +_EINPROGRESS = 1 + +CONVERGED = 'converged' +SIGNERR = 'sign error' +CONVERR = 'convergence error' +VALUEERR = 'value error' +INPROGRESS = 'No error' + + +flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR, + _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS} + + +class RootResults(OptimizeResult): + """Represents the root finding result. + + Attributes + ---------- + root : float + Estimated root location. + iterations : int + Number of iterations needed to find the root. + function_calls : int + Number of times the function was called. + converged : bool + True if the routine converged. + flag : str + Description of the cause of termination. + method : str + Root finding method used. + + """ + + def __init__(self, root, iterations, function_calls, flag, method): + self.root = root + self.iterations = iterations + self.function_calls = function_calls + self.converged = flag == _ECONVERGED + if flag in flag_map: + self.flag = flag_map[flag] + else: + self.flag = flag + self.method = method + + +def results_c(full_output, r, method): + if full_output: + x, funcalls, iterations, flag = r + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + else: + return r + + +def _results_select(full_output, r, method): + """Select from a tuple of (root, funccalls, iterations, flag)""" + x, funcalls, iterations, flag = r + if full_output: + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + return x + + +def _wrap_nan_raise(f): + + def f_raise(x, *args): + fx = f(x, *args) + f_raise._function_calls += 1 + if np.isnan(fx): + msg = (f'The function value at x={x} is NaN; ' + 'solver cannot continue.') + err = ValueError(msg) + err._x = x + err._function_calls = f_raise._function_calls + raise err + return fx + + f_raise._function_calls = 0 + return f_raise + + +def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, + fprime2=None, x1=None, rtol=0.0, + full_output=False, disp=True): + """ + Find a root of a real or complex function using the Newton-Raphson + (or secant or Halley's) method. + + Find a root of the scalar-valued function `func` given a nearby scalar + starting point `x0`. + The Newton-Raphson method is used if the derivative `fprime` of `func` + is provided, otherwise the secant method is used. If the second order + derivative `fprime2` of `func` is also provided, then Halley's method is + used. + + If `x0` is a sequence with more than one item, `newton` returns an array: + the roots of the function from each (scalar) starting point in `x0`. + In this case, `func` must be vectorized to return a sequence or array of + the same shape as its first argument. If `fprime` (`fprime2`) is given, + then its return must also have the same shape: each element is the first + (second) derivative of `func` with respect to its only variable evaluated + at each element of its first argument. + + `newton` is for finding roots of a scalar-valued functions of a single + variable. For problems involving several variables, see `root`. + + Parameters + ---------- + func : callable + The function whose root is wanted. It must be a function of a + single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` + are extra arguments that can be passed in the `args` parameter. + x0 : float, sequence, or ndarray + An initial estimate of the root that should be somewhere near the + actual root. If not scalar, then `func` must be vectorized and return + a sequence or array of the same shape as its first argument. + fprime : callable, optional + The derivative of the function when available and convenient. If it + is None (default), then the secant method is used. + args : tuple, optional + Extra arguments to be used in the function call. + tol : float, optional + The allowable error of the root's value. If `func` is complex-valued, + a larger `tol` is recommended as both the real and imaginary parts + of `x` contribute to ``|x - x0|``. + maxiter : int, optional + Maximum number of iterations. + fprime2 : callable, optional + The second order derivative of the function when available and + convenient. If it is None (default), then the normal Newton-Raphson + or the secant method is used. If it is not None, then Halley's method + is used. + x1 : float, optional + Another estimate of the root that should be somewhere near the + actual root. Used if `fprime` is not provided. + rtol : float, optional + Tolerance (relative) for termination. + full_output : bool, optional + If `full_output` is False (default), the root is returned. + If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` + is the root and ``r`` is a `RootResults` object. + If True and `x0` is non-scalar, the return value is ``(x, converged, + zero_der)`` (see Returns section for details). + disp : bool, optional + If True, raise a RuntimeError if the algorithm didn't converge, with + the error message containing the number of iterations and current + function value. Otherwise, the convergence status is recorded in a + `RootResults` return object. + Ignored if `x0` is not scalar. + *Note: this has little to do with displaying, however, + the `disp` keyword cannot be renamed for backwards compatibility.* + + Returns + ------- + root : float, sequence, or ndarray + Estimated location where function is zero. + r : `RootResults`, optional + Present if ``full_output=True`` and `x0` is scalar. + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + converged : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements converged successfully. + zero_der : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements had a zero derivative. + + See Also + -------- + root_scalar : interface to root solvers for scalar functions + root : interface to root solvers for multi-input, multi-output functions + + Notes + ----- + The convergence rate of the Newton-Raphson method is quadratic, + the Halley method is cubic, and the secant method is + sub-quadratic. This means that if the function is well-behaved + the actual error in the estimated root after the nth iteration + is approximately the square (cube for Halley) of the error + after the (n-1)th step. However, the stopping criterion used + here is the step size and there is no guarantee that a root + has been found. Consequently, the result should be verified. + Safer algorithms are brentq, brenth, ridder, and bisect, + but they all require that the root first be bracketed in an + interval where the function changes sign. The brentq algorithm + is recommended for general use in one dimensional problems + when such an interval has been found. + + When `newton` is used with arrays, it is best suited for the following + types of problems: + + * The initial guesses, `x0`, are all relatively the same distance from + the roots. + * Some or all of the extra arguments, `args`, are also arrays so that a + class of similar problems can be solved together. + * The size of the initial guesses, `x0`, is larger than O(100) elements. + Otherwise, a naive loop may perform as well or better than a vector. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import optimize + + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + ``fprime`` is not provided, use the secant method: + + >>> root = optimize.newton(f, 1.5) + >>> root + 1.0000000000000016 + >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) + >>> root + 1.0000000000000016 + + Only ``fprime`` is provided, use the Newton-Raphson method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) + >>> root + 1.0 + + Both ``fprime2`` and ``fprime`` are provided, use Halley's method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, + ... fprime2=lambda x: 6 * x) + >>> root + 1.0 + + When we want to find roots for a set of related starting values and/or + function parameters, we can provide both of those as an array of inputs: + + >>> f = lambda x, a: x**3 - a + >>> fder = lambda x, a: 3 * x**2 + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(100) + >>> a = np.arange(-50, 50) + >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200) + + The above is the equivalent of solving for each value in ``(x, a)`` + separately in a for-loop, just faster: + + >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), + ... maxiter=200) + ... for x0, a0 in zip(x, a)] + >>> np.allclose(vec_res, loop_res) + True + + Plot the results found for all values of ``a``: + + >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) + >>> fig, ax = plt.subplots() + >>> ax.plot(a, analytical_result, 'o') + >>> ax.plot(a, vec_res, '.') + >>> ax.set_xlabel('$a$') + >>> ax.set_ylabel('$x$ where $f(x, a)=0$') + >>> plt.show() + + """ + if tol <= 0: + raise ValueError(f"tol too small ({tol:g} <= 0)") + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if np.size(x0) > 1: + return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, + full_output) + + # Convert to float (don't use float(x0); this works also for complex x0) + # Use np.asarray because we want x0 to be a numpy object, not a Python + # object. e.g. np.complex(1+1j) > 0 is possible, but (1 + 1j) > 0 raises + # a TypeError + x0 = np.asarray(x0)[()] * 1.0 + p0 = x0 + funcalls = 0 + if fprime is not None: + # Newton-Raphson method + method = "newton" + for itr in range(maxiter): + # first evaluate fval + fval = func(p0, *args) + funcalls += 1 + # If fval is 0, a root has been found, then terminate + if fval == 0: + return _results_select( + full_output, (p0, funcalls, itr, _ECONVERGED), method) + fder = fprime(p0, *args) + funcalls += 1 + if fder == 0: + msg = "Derivative was zero." + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p0)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + return _results_select( + full_output, (p0, funcalls, itr + 1, _ECONVERR), method) + newton_step = fval / fder + if fprime2: + fder2 = fprime2(p0, *args) + funcalls += 1 + method = "halley" + # Halley's method: + # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) + # Only do it if denominator stays close enough to 1 + # Rationale: If 1-adj < 0, then Halley sends x in the + # opposite direction to Newton. Doesn't happen if x is close + # enough to root. + adj = newton_step * fder2 / fder / 2 + if np.abs(adj) < 1: + newton_step /= 1.0 - adj + p = p0 - newton_step + if np.isclose(p, p0, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0 = p + else: + # Secant method + method = "secant" + if x1 is not None: + if x1 == x0: + raise ValueError("x1 and x0 must be different") + p1 = x1 + else: + eps = 1e-4 + p1 = x0 * (1 + eps) + p1 += (eps if p1 >= 0 else -eps) + q0 = func(p0, *args) + funcalls += 1 + q1 = func(p1, *args) + funcalls += 1 + if abs(q1) < abs(q0): + p0, p1, q0, q1 = p1, p0, q1, q0 + for itr in range(maxiter): + if q1 == q0: + if p1 != p0: + msg = f"Tolerance of {p1 - p0} reached." + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p1)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + p = (p1 + p0) / 2.0 + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERR), method) + else: + if abs(q1) > abs(q0): + p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) + else: + p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) + if np.isclose(p, p1, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0, q0 = p1, q1 + p1 = p + q1 = func(p1, *args) + funcalls += 1 + + if disp: + msg = ("Failed to converge after %d iterations, value is %s." + % (itr + 1, p)) + raise RuntimeError(msg) + + return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR), method) + + +def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): + """ + A vectorized version of Newton, Halley, and secant methods for arrays. + + Do not use this method directly. This method is called from `newton` + when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. + """ + # Explicitly copy `x0` as `p` will be modified inplace, but the + # user's array should not be altered. + p = np.array(x0, copy=True) + + failures = np.ones_like(p, dtype=bool) + nz_der = np.ones_like(failures) + if fprime is not None: + # Newton-Raphson method + for iteration in range(maxiter): + # first evaluate fval + fval = np.asarray(func(p, *args)) + # If all fval are 0, all roots have been found, then terminate + if not fval.any(): + failures = fval.astype(bool) + break + fder = np.asarray(fprime(p, *args)) + nz_der = (fder != 0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + break + # Newton step + dp = fval[nz_der] / fder[nz_der] + if fprime2 is not None: + fder2 = np.asarray(fprime2(p, *args)) + dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, dp, np.float64)) + p[nz_der] -= dp + failures[nz_der] = np.abs(dp) >= tol # items not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + else: + # Secant method + dx = np.finfo(float).eps**0.33 + p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) + q0 = np.asarray(func(p, *args)) + q1 = np.asarray(func(p1, *args)) + active = np.ones_like(p, dtype=bool) + for iteration in range(maxiter): + nz_der = (q1 != q0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + p = (p1 + p) / 2.0 + break + # Secant Step + dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64)) + p[nz_der] = p1[nz_der] - dp + active_zero_der = ~nz_der & active + p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 + active &= nz_der # don't assign zero derivatives again + failures[nz_der] = np.abs(dp) >= tol # not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + p1, p = p, p1 + q0 = q1 + q1 = np.asarray(func(p1, *args)) + + zero_der = ~nz_der & failures # don't include converged with zero-ders + if zero_der.any(): + # Secant warnings + if fprime is None: + nonzero_dp = (p1 != p) + # non-zero dp, but infinite newton step + zero_der_nz_dp = (zero_der & nonzero_dp) + if zero_der_nz_dp.any(): + rms = np.sqrt( + sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) + ) + warnings.warn(f'RMS of {rms:g} reached', RuntimeWarning, stacklevel=3) + # Newton or Halley warnings + else: + all_or_some = 'all' if zero_der.all() else 'some' + msg = f'{all_or_some:s} derivatives were zero' + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif failures.any(): + all_or_some = 'all' if failures.all() else 'some' + msg = f'{all_or_some:s} failed to converge after {maxiter:d} iterations' + if failures.all(): + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=3) + + if full_output: + result = namedtuple('result', ('root', 'converged', 'zero_der')) + p = result(p, ~failures, zero_der) + + return p + + +def bisect(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find root of a function within an interval using bisection. + + Basic bisection routine to find a root of the function `f` between the + arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. + Slow but sure. + + Parameters + ---------- + f : function + Python function returning a number. `f` must be continuous, and + f(a) and f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where x is the root, and r is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in a `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.bisect(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.bisect(f, -2, 0) + >>> root + -1.0 + + See Also + -------- + brentq, brenth, bisect, newton + fixed_point : scalar fixed-point finder + fsolve : n-dimensional root-finding + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError(f"xtol too small ({xtol:g} <= 0)") + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "bisect") + + +def ridder(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in an interval using Ridder's method. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. + In particular, ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + Uses [Ridders1979]_ method to find a root of the function `f` between the + arguments `a` and `b`. Ridders' method is faster than bisection, but not + generally as fast as the Brent routines. [Ridders1979]_ provides the + classic description and source of the algorithm. A description can also be + found in any recent edition of Numerical Recipes. + + The routine used here diverges slightly from standard presentations in + order to be a bit more careful of tolerance. + + References + ---------- + .. [Ridders1979] + Ridders, C. F. J. "A New Algorithm for Computing a + Single Root of a Real Continuous Function." + IEEE Trans. Circuits Systems 26, 979-980, 1979. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.ridder(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.ridder(f, -2, 0) + >>> root + -1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError(f"xtol too small ({xtol:g} <= 0)") + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "ridder") + + +def brentq(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in a bracketing interval using Brent's method. + + Uses the classic Brent's method to find a root of the function `f` on + the sign changing interval [a , b]. Generally considered the best of the + rootfinding routines here. It is a safe version of the secant method that + uses inverse quadratic extrapolation. Brent's method combines root + bracketing, interval bisection, and inverse quadratic interpolation. It is + sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) + claims convergence is guaranteed for functions computable within [a,b]. + + [Brent1973]_ provides the classic description of the algorithm. Another + description can be found in a recent edition of Numerical Recipes, including + [PressEtal1992]_. A third description is at + http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to + understand the algorithm just by reading our code. Our code diverges a bit + from standard presentations: we choose a different formula for the + extrapolation step. + + Parameters + ---------- + f : function + Python function returning a number. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` must + have opposite signs. + a : scalar + One end of the bracketing interval :math:`[a, b]`. + b : scalar + The other end of the bracketing interval :math:`[a, b]`. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers + leastsq : nonlinear least squares minimizer + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + basinhopping, differential_evolution, brute : global optimizers + fminbound, brent, golden, bracket : local scalar minimizers + fsolve : N-D root-finding + brenth, ridder, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + `f` must be continuous. f(a) and f(b) must have opposite signs. + + References + ---------- + .. [Brent1973] + Brent, R. P., + *Algorithms for Minimization Without Derivatives*. + Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. + + .. [PressEtal1992] + Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. + *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. + Cambridge, England: Cambridge University Press, pp. 352-355, 1992. + Section 9.3: "Van Wijngaarden-Dekker-Brent Method." + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brentq(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brentq(f, 0, 2) + >>> root + 1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError(f"xtol too small ({xtol:g} <= 0)") + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brentq") + + +def brenth(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """Find a root of a function in a bracketing interval using Brent's + method with hyperbolic extrapolation. + + A variation on the classic Brent routine to find a root of the function f + between the arguments a and b that uses hyperbolic extrapolation instead of + inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence + for this method, claiming that the upper bound of function evaluations here + is 4 or 5 times that of bisection. + f(a) and f(b) cannot have the same signs. Generally, on a par with the + brent routine, but not as heavily tested. It is a safe version of the + secant method that uses hyperbolic extrapolation. + The version here is by Chuck Harris, and implements Algorithm M of + [BusAndDekker1975]_, where further details (convergence properties, + additional remarks and such) can be found + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. As with `brentq`, for nice + functions the method will often satisfy the above condition + with ``xtol/2`` and ``rtol/2``. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. As with `brentq`, for nice functions + the method will often satisfy the above condition with + ``xtol/2`` and ``rtol/2``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers + leastsq : nonlinear least squares minimizer + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + basinhopping, differential_evolution, brute : global optimizers + fminbound, brent, golden, bracket : local scalar minimizers + fsolve : N-D root-finding + brentq, ridder, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + References + ---------- + .. [BusAndDekker1975] + Bus, J. C. P., Dekker, T. J., + "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero + of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue + 4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M". + :doi:`10.1145/355656.355659` + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brenth(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brenth(f, 0, 2) + >>> root + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError(f"xtol too small ({xtol:g} <= 0)") + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brenth") + + +################################ +# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by +# Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# See [1] + + +def _notclose(fs, rtol=_rtol, atol=_xtol): + # Ensure not None, not 0, all finite, and not very close to each other + notclosefvals = ( + all(fs) and all(np.isfinite(fs)) and + not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) + for i, _f in enumerate(fs[:-1]))) + return notclosefvals + + +def _secant(xvals, fvals): + """Perform a secant step, taking a little care""" + # Secant has many "mathematically" equivalent formulations + # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 + # = x1 - (x1 - x0)/(f1 - f0) * f1 + # = (-x1 * f0 + x0 * f1) / (f1 - f0) + # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + x0, x1 = xvals[:2] + f0, f1 = fvals[:2] + if f0 == f1: + return np.nan + if np.abs(f1) > np.abs(f0): + x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + else: + x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + return x2 + + +def _update_bracket(ab, fab, c, fc): + """Update a bracket given (c, fc), return the discarded endpoints.""" + fa, fb = fab + idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) + rx, rfx = ab[idx], fab[idx] + fab[idx] = fc + ab[idx] = c + return rx, rfx + + +def _compute_divided_differences(xvals, fvals, N=None, full=True, + forward=True): + """Return a matrix of divided differences for the xvals, fvals pairs + + DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i + + If full is False, just return the main diagonal(or last row): + f[a], f[a, b] and f[a, b, c]. + If forward is False, return f[c], f[b, c], f[a, b, c].""" + if full: + if forward: + xvals = np.asarray(xvals) + else: + xvals = np.array(xvals)[::-1] + M = len(xvals) + N = M if N is None else min(N, M) + DD = np.zeros([M, N]) + DD[:, 0] = fvals[:] + for i in range(1, N): + DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) / + (xvals[i:] - xvals[:M - i])) + return DD + + xvals = np.asarray(xvals) + dd = np.array(fvals) + row = np.array(fvals) + idx2Use = (0 if forward else -1) + dd[0] = fvals[idx2Use] + for i in range(1, len(xvals)): + denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] + row = np.diff(row)[:] / denom + dd[i] = row[idx2Use] + return dd + + +def _interpolated_poly(xvals, fvals, x): + """Compute p(x) for the polynomial passing through the specified locations. + + Use Neville's algorithm to compute p(x) where p is the minimal degree + polynomial passing through the points xvals, fvals""" + xvals = np.asarray(xvals) + N = len(xvals) + Q = np.zeros([N, N]) + D = np.zeros([N, N]) + Q[:, 0] = fvals[:] + D[:, 0] = fvals[:] + for k in range(1, N): + alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] + diffik = xvals[0:N - k] - xvals[k:N] + Q[k:, k] = (xvals[k:] - x) / diffik * alpha + D[k:, k] = (xvals[:N - k] - x) / diffik * alpha + # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root + return np.sum(Q[-1, 1:]) + Q[-1, 0] + + +def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): + """Inverse cubic interpolation f-values -> x-values + + Given four points (fa, a), (fb, b), (fc, c), (fd, d) with + fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points + and compute x=IP(0). + """ + return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) + + +def _newton_quadratic(ab, fab, d, fd, k): + """Apply Newton-Raphson like steps, using divided differences to approximate f' + + ab is a real interval [a, b] containing a root, + fab holds the real values of f(a), f(b) + d is a real number outside [ab, b] + k is the number of steps to apply + """ + a, b = ab + fa, fb = fab + _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], + forward=True, full=False) + + # _P is the quadratic polynomial through the 3 points + def _P(x): + # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) + return (A * (x - b) + B) * (x - a) + fa + + if A == 0: + r = a - fa / B + else: + r = (a if np.sign(A) * np.sign(fa) > 0 else b) + # Apply k Newton-Raphson steps to _P(x), starting from x=r + for i in range(k): + r1 = r - _P(r) / (B + A * (2 * r - a - b)) + if not (ab[0] < r1 < ab[1]): + if (ab[0] < r < ab[1]): + return r + r = sum(ab) / 2.0 + break + r = r1 + + return r + + +class TOMS748Solver: + """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. + """ + _MU = 0.5 + _K_MIN = 1 + _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. + + def __init__(self): + self.f = None + self.args = None + self.function_calls = 0 + self.iterations = 0 + self.k = 2 + # ab=[a,b] is a global interval containing a root + self.ab = [np.nan, np.nan] + # fab is function values at a, b + self.fab = [np.nan, np.nan] + self.d = None + self.fd = None + self.e = None + self.fe = None + self.disp = False + self.xtol = _xtol + self.rtol = _rtol + self.maxiter = _iter + + def configure(self, xtol, rtol, maxiter, disp, k): + self.disp = disp + self.xtol = xtol + self.rtol = rtol + self.maxiter = maxiter + # Silently replace a low value of k with 1 + self.k = max(k, self._K_MIN) + # Noisily replace a high value of k with self._K_MAX + if self.k > self._K_MAX: + msg = "toms748: Overriding k: ->%d" % self._K_MAX + warnings.warn(msg, RuntimeWarning, stacklevel=3) + self.k = self._K_MAX + + def _callf(self, x, error=True): + """Call the user-supplied function, update book-keeping""" + fx = self.f(x, *self.args) + self.function_calls += 1 + if not np.isfinite(fx) and error: + raise ValueError(f"Invalid function value: f({x:f}) -> {fx} ") + return fx + + def get_result(self, x, flag=_ECONVERGED): + r"""Package the result and statistics into a tuple.""" + return (x, self.function_calls, self.iterations, flag) + + def _update_bracket(self, c, fc): + return _update_bracket(self.ab, self.fab, c, fc) + + def start(self, f, a, b, args=()): + r"""Prepare for the iterations.""" + self.function_calls = 0 + self.iterations = 0 + + self.f = f + self.args = args + self.ab[:] = [a, b] + if not np.isfinite(a) or np.imag(a) != 0: + raise ValueError(f"Invalid x value: {a} ") + if not np.isfinite(b) or np.imag(b) != 0: + raise ValueError(f"Invalid x value: {b} ") + + fa = self._callf(a) + if not np.isfinite(fa) or np.imag(fa) != 0: + raise ValueError(f"Invalid function value: f({a:f}) -> {fa} ") + if fa == 0: + return _ECONVERGED, a + fb = self._callf(b) + if not np.isfinite(fb) or np.imag(fb) != 0: + raise ValueError(f"Invalid function value: f({b:f}) -> {fb} ") + if fb == 0: + return _ECONVERGED, b + + if np.sign(fb) * np.sign(fa) > 0: + raise ValueError("f(a) and f(b) must have different signs, but " + f"f({a:e})={fa:e}, f({b:e})={fb:e} ") + self.fab[:] = [fa, fb] + + return _EINPROGRESS, sum(self.ab) / 2.0 + + def get_status(self): + """Determine the current status.""" + a, b = self.ab[:2] + if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): + return _ECONVERGED, sum(self.ab) / 2.0 + if self.iterations >= self.maxiter: + return _ECONVERR, sum(self.ab) / 2.0 + return _EINPROGRESS, sum(self.ab) / 2.0 + + def iterate(self): + """Perform one step in the algorithm. + + Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] + """ + self.iterations += 1 + eps = np.finfo(float).eps + d, fd, e, fe = self.d, self.fd, self.e, self.fe + ab_width = self.ab[1] - self.ab[0] # Need the start width below + c = None + + for nsteps in range(2, self.k+2): + # If the f-values are sufficiently separated, perform an inverse + # polynomial interpolation step. Otherwise, nsteps repeats of + # an approximate Newton-Raphson step. + if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): + c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, + self.fab[0], self.fab[1], fd, fe) + if self.ab[0] < c0 < self.ab[1]: + c = c0 + if c is None: + c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + # re-bracket + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # u is the endpoint with the smallest f-value + uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) + u, fu = self.ab[uix], self.fab[uix] + + _, A = _compute_divided_differences(self.ab, self.fab, + forward=(uix == 0), full=False) + c = u - 2 * fu / A + if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): + c = sum(self.ab) / 2.0 + else: + if np.isclose(c, u, rtol=eps, atol=0): + # c didn't change (much). + # Either because the f-values at the endpoints have vastly + # differing magnitudes, or because the root is very close to + # that endpoint + frs = np.frexp(self.fab)[1] + if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 + c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 + else: + # Make a bigger adjustment, about the + # size of the requested tolerance. + mm = (1 if uix == 0 else -1) + adj = mm * np.abs(c) * self.rtol + mm * self.xtol + c = u + adj + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # If the width of the new interval did not decrease enough, bisect + if self.ab[1] - self.ab[0] > self._MU * ab_width: + e, fe = d, fd + z = sum(self.ab) / 2.0 + fz = self._callf(z) + if fz == 0: + return _ECONVERGED, z + d, fd = self._update_bracket(z, fz) + + # Record d and e for next iteration + self.d, self.fd = d, fd + self.e, self.fe = e, fe + + status, xn = self.get_status() + return status, xn + + def solve(self, f, a, b, args=(), + xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): + r"""Solve f(x) = 0 given an interval containing a root.""" + self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) + status, xn = self.start(f, a, b, args) + if status == _ECONVERGED: + return self.get_result(xn) + + # The first step only has two x-values. + c = _secant(self.ab, self.fab) + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + fc = self._callf(c) + if fc == 0: + return self.get_result(c) + + self.d, self.fd = self._update_bracket(c, fc) + self.e, self.fe = None, None + self.iterations += 1 + + while True: + status, xn = self.iterate() + if status == _ECONVERGED: + return self.get_result(xn) + if status == _ECONVERR: + fmt = "Failed to converge after %d iterations, bracket is %s" + if disp: + msg = fmt % (self.iterations + 1, self.ab) + raise RuntimeError(msg) + return self.get_result(xn, _ECONVERR) + + +def toms748(f, a, b, args=(), k=1, + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root using TOMS Algorithm 748 method. + + Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a + root of the function `f` on the interval ``[a , b]``, where ``f(a)`` and + `f(b)` must have opposite signs. + + It uses a mixture of inverse cubic interpolation and + "Newton-quadratic" steps. [APS1995]. + + Parameters + ---------- + f : function + Python function returning a scalar. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` + have opposite signs. + a : scalar, + lower boundary of the search interval + b : scalar, + upper boundary of the search interval + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``f(x, *args)``. + k : int, optional + The number of Newton quadratic steps to perform each + iteration. ``k>=1``. + xtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in the `RootResults` + return object. + + Returns + ------- + root : float + Approximate root of `f` + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, ridder, bisect, newton + fsolve : find roots in N dimensions. + + Notes + ----- + `f` must be continuous. + Algorithm 748 with ``k=2`` is asymptotically the most efficient + algorithm known for finding roots of a four times continuously + differentiable function. + In contrast with Brent's algorithm, which may only decrease the length of + the enclosing bracket on the last step, Algorithm 748 decreases it each + iteration with the same asymptotic efficiency as it finds the root. + + For easy statement of efficiency indices, assume that `f` has 4 + continuous deriviatives. + For ``k=1``, the convergence order is at least 2.7, and with about + asymptotically 2 function evaluations per iteration, the efficiency + index is approximately 1.65. + For ``k=2``, the order is about 4.6 with asymptotically 3 function + evaluations per iteration, and the efficiency index 1.66. + For higher values of `k`, the efficiency index approaches + the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are + usually appropriate. + + References + ---------- + .. [APS1995] + Alefeld, G. E. and Potra, F. A. and Shi, Yixun, + *Algorithm 748: Enclosing Zeros of Continuous Functions*, + ACM Trans. Math. Softw. Volume 221(1995) + doi = {10.1145/210089.210111} + + Examples + -------- + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> from scipy import optimize + >>> root, results = optimize.toms748(f, 0, 2, full_output=True) + >>> root + 1.0 + >>> results + converged: True + flag: converged + function_calls: 11 + iterations: 5 + root: 1.0 + method: toms748 + """ + if xtol <= 0: + raise ValueError(f"xtol too small ({xtol:g} <= 0)") + if rtol < _rtol / 4: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol/4:g})") + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if not np.isfinite(a): + raise ValueError(f"a is not finite {a}") + if not np.isfinite(b): + raise ValueError(f"b is not finite {b}") + if a >= b: + raise ValueError(f"a and b are not an interval [{a}, {b}]") + if not k >= 1: + raise ValueError(f"k too small ({k} < 1)") + + if not isinstance(args, tuple): + args = (args,) + f = _wrap_nan_raise(f) + solver = TOMS748Solver() + result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, + maxiter=maxiter, disp=disp) + x, function_calls, iterations, flag = result + return _results_select(full_output, (x, function_calls, iterations, flag), + "toms748") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cobyla.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..87d111d8fc1634e54d3766a3f1c58abd37ac58cb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cobyla.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'fmin_cobyla', +] + +def __dir__(): + return __all__ + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="cobyla", + private_modules=["_cobyla_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d35f8da68b34d3a587f3a99326770d8550a2135c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd @@ -0,0 +1,11 @@ +# Public Cython API declarations +# +# See doc/source/dev/contributor/public_cython_api.rst for guidelines + + +# The following cimport statement provides legacy ABI +# support. Changing it causes an ABI forward-compatibility break +# (gh-11793), so we currently leave it as is (no further cimport +# statements should be used in this file). +from scipy.optimize.cython_optimize._zeros cimport ( + brentq, brenth, ridder, bisect, zeros_full_output) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a07250bbeb06542721480c42005307992558fced --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py @@ -0,0 +1,133 @@ +""" +Cython optimize root finding API +================================ +The underlying C functions for the following root finders can be accessed +directly using Cython: + +- `~scipy.optimize.bisect` +- `~scipy.optimize.ridder` +- `~scipy.optimize.brenth` +- `~scipy.optimize.brentq` + +The Cython API for the root finding functions is similar except there is no +``disp`` argument. Import the root finding functions using ``cimport`` from +`scipy.optimize.cython_optimize`. :: + + from scipy.optimize.cython_optimize cimport bisect, ridder, brentq, brenth + + +Callback signature +------------------ +The zeros functions in `~scipy.optimize.cython_optimize` expect a callback that +takes a double for the scalar independent variable as the 1st argument and a +user defined ``struct`` with any extra parameters as the 2nd argument. :: + + double (*callback_type)(double, void*) noexcept + + +Examples +-------- +Usage of `~scipy.optimize.cython_optimize` requires Cython to write callbacks +that are compiled into C. For more information on compiling Cython, see the +`Cython Documentation `_. + +These are the basic steps: + +1. Create a Cython ``.pyx`` file, for example: ``myexample.pyx``. +2. Import the desired root finder from `~scipy.optimize.cython_optimize`. +3. Write the callback function, and call the selected root finding function + passing the callback, any extra arguments, and the other solver + parameters. :: + + from scipy.optimize.cython_optimize cimport brentq + + # import math from Cython + from libc cimport math + + myargs = {'C0': 1.0, 'C1': 0.7} # a dictionary of extra arguments + XLO, XHI = 0.5, 1.0 # lower and upper search boundaries + XTOL, RTOL, MITR = 1e-3, 1e-3, 10 # other solver parameters + + # user-defined struct for extra parameters + ctypedef struct test_params: + double C0 + double C1 + + + # user-defined callback + cdef double f(double x, void *args) noexcept: + cdef test_params *myargs = args + return myargs.C0 - math.exp(-(x - myargs.C1)) + + + # Cython wrapper function + cdef double brentq_wrapper_example(dict args, double xa, double xb, + double xtol, double rtol, int mitr): + # Cython automatically casts dictionary to struct + cdef test_params myargs = args + return brentq( + f, xa, xb, &myargs, xtol, rtol, mitr, NULL) + + + # Python function + def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL, + mitr=MITR): + '''Calls Cython wrapper from Python.''' + return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr) + +4. If you want to call your function from Python, create a Cython wrapper, and + a Python function that calls the wrapper, or use ``cpdef``. Then, in Python, + you can import and run the example. :: + + from myexample import brentq_example + + x = brentq_example() + # 0.6999942848231314 + +5. Create a Cython ``.pxd`` file if you need to export any Cython functions. + + +Full output +----------- +The functions in `~scipy.optimize.cython_optimize` can also copy the full +output from the solver to a C ``struct`` that is passed as its last argument. +If you don't want the full output, just pass ``NULL``. The full output +``struct`` must be type ``zeros_full_output``, which is defined in +`scipy.optimize.cython_optimize` with the following fields: + +- ``int funcalls``: number of function calls +- ``int iterations``: number of iterations +- ``int error_num``: error number +- ``double root``: root of function + +The root is copied by `~scipy.optimize.cython_optimize` to the full output +``struct``. An error number of -1 means a sign error, -2 means a convergence +error, and 0 means the solver converged. Continuing from the previous example:: + + from scipy.optimize.cython_optimize cimport zeros_full_output + + + # cython brentq solver with full output + cdef zeros_full_output brentq_full_output_wrapper_example( + dict args, double xa, double xb, double xtol, double rtol, + int mitr): + cdef test_params myargs = args + cdef zeros_full_output my_full_output + # use my_full_output instead of NULL + brentq(f, xa, xb, &myargs, xtol, rtol, mitr, &my_full_output) + return my_full_output + + + # Python function + def brent_full_output_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, + rtol=RTOL, mitr=MITR): + '''Returns full output''' + return brentq_full_output_wrapper_example(args, xa, xb, xtol, rtol, + mitr) + + result = brent_full_output_example() + # {'error_num': 0, + # 'funcalls': 6, + # 'iterations': 5, + # 'root': 0.6999942848231314} +""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d3c9e98f0a24d80d15d1f7052f690d608f66dd80 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd @@ -0,0 +1,33 @@ +# Legacy public Cython API declarations +# +# NOTE: due to the way Cython ABI compatibility works, **no changes +# should be made to this file** --- any API additions/changes should be +# done in `cython_optimize.pxd` (see gh-11793). + +ctypedef double (*callback_type)(double, void*) noexcept + +ctypedef struct zeros_parameters: + callback_type function + void* args + +ctypedef struct zeros_full_output: + int funcalls + int iterations + int error_num + double root + +cdef double bisect(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double ridder(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double brenth(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double brentq(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0d83c80eb886846ddbbd6927e37e05812911f856 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd @@ -0,0 +1,26 @@ +cdef extern from "../Zeros/zeros.h": + ctypedef double (*callback_type)(double, void*) noexcept + ctypedef struct scipy_zeros_info: + int funcalls + int iterations + int error_num + +cdef extern from "../Zeros/bisect.c" nogil: + double bisect(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/ridder.c" nogil: + double ridder(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/brenth.c" nogil: + double brenth(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/brentq.c" nogil: + double brentq(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/elementwise.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/elementwise.py new file mode 100644 index 0000000000000000000000000000000000000000..f7be4484626880182a17acf883d72388937578d1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/elementwise.py @@ -0,0 +1,38 @@ +""" +=================================================================== +Elementwise Scalar Optimization (:mod:`scipy.optimize.elementwise`) +=================================================================== + +.. currentmodule:: scipy.optimize.elementwise + +This module provides a collection of functions for root finding and +minimization of scalar, real-valued functions of one variable. Unlike their +counterparts in the base :mod:`scipy.optimize` namespace, these functions work +elementwise, enabling the solution of many related problems in an efficient, +vectorized call. Furthermore, when environment variable ``SCIPY_ARRAY_API=1``, +these functions can accept non-NumPy, array API standard compatible arrays and +perform all calculations using the corresponding array library (e.g. PyTorch, +JAX, CuPy). + +Root finding +============ + +.. autosummary:: + :toctree: generated/ + + find_root + bracket_root + +Minimization +============ + +.. autosummary:: + :toctree: generated/ + + find_minimum + bracket_minimum + +""" +from ._elementwise import find_root, find_minimum, bracket_root, bracket_minimum # noqa: F401, E501 + +__all__ = ["find_root", "find_minimum", "bracket_root", "bracket_minimum"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py new file mode 100644 index 0000000000000000000000000000000000000000..866407cabb3decf0ff72239e6fd372f69f7550c0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LbfgsInvHessProduct', + 'OptimizeResult', + 'fmin_l_bfgs_b', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="lbfgsb", + private_modules=["_lbfgsb_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/linesearch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..cb34b25092da34991c868683da3d6a894d1a7f80 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/linesearch.py @@ -0,0 +1,18 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = ["line_search"] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="linesearch", + private_modules=["_linesearch"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..29fddef537361d8508e6343d23b2c3c7d6d12ec6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'OptimizeWarning', + 'curve_fit', + 'fixed_point', + 'fsolve', + 'least_squares', + 'leastsq', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack", + private_modules=["_minpack_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack2.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack2.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb3503e0e1e4c886c89bfb62e6a2efc3ba54549 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/minpack2.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack2", + private_modules=["_minpack2"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc5884ed5c39437b7681395419d641443a1fdb8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="moduleTNC", + private_modules=["_moduleTNC"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/nonlin.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..20b490b40ef790a2943d539790b45fc378df2c76 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/nonlin.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'BroydenFirst', + 'InverseJacobian', + 'KrylovJacobian', + 'anderson', + 'broyden1', + 'broyden2', + 'diagbroyden', + 'excitingmixing', + 'linearmixing', + 'newton_krylov', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="nonlin", + private_modules=["_nonlin"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/optimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..4db770e5f6e921906c916f2650003d92f5507791 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/optimize.py @@ -0,0 +1,40 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'OptimizeWarning', + 'approx_fprime', + 'bracket', + 'brent', + 'brute', + 'check_grad', + 'fmin', + 'fmin_bfgs', + 'fmin_cg', + 'fmin_ncg', + 'fmin_powell', + 'fminbound', + 'golden', + 'line_search', + 'rosen', + 'rosen_der', + 'rosen_hess', + 'rosen_hess_prod', + 'show_options', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="optimize", + private_modules=["_optimize"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/slsqp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..c2b77d2eb447527cd91e92907e06ad53dd1ad3d8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/slsqp.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'fmin_slsqp', + 'slsqp', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="slsqp", + private_modules=["_slsqp_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/extending.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/extending.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d831b3c7f5dcaee71371027c7ee95aa9ee51d157 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/extending.pyx @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +#cython: language_level=3 +#cython: boundscheck=False +#cython: wraparound=False +""" +Taken from docstring for scipy.optimize.cython_optimize module. +""" + +from scipy.optimize.cython_optimize cimport brentq + +# import math from Cython +from libc cimport math + +myargs = {'C0': 1.0, 'C1': 0.7} # a dictionary of extra arguments +XLO, XHI = 0.5, 1.0 # lower and upper search boundaries +XTOL, RTOL, MITR = 1e-3, 1e-3, 10 # other solver parameters + +# user-defined struct for extra parameters +ctypedef struct test_params: + double C0 + double C1 + + +# user-defined callback +cdef double f(double x, void *args) noexcept: + cdef test_params *myargs = args + return myargs.C0 - math.exp(-(x - myargs.C1)) + + +# Cython wrapper function +cdef double brentq_wrapper_example(dict args, double xa, double xb, + double xtol, double rtol, int mitr): + # Cython automatically casts dictionary to struct + cdef test_params myargs = args + return brentq( + f, xa, xb, &myargs, xtol, rtol, mitr, NULL) + + +# Python function +def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL, + mitr=MITR): + '''Calls Cython wrapper from Python.''' + return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/meson.build b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..1fb210fbecb84a21518ad8828a789376410f02aa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/meson.build @@ -0,0 +1,32 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +fs = import('fs') + +py3 = import('python').find_installation(pure: false) + +cy = meson.get_compiler('cython') + +if not cy.version().version_compare('>=3.0.8') + error('tests requires Cython >= 3.0.8') +endif + +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + +py3.extension_module( + 'extending', + 'extending.pyx', + cython_args: cython_args, + install: false, +) + +extending_cpp = fs.copyfile('extending.pyx', 'extending_cpp.pyx') +py3.extension_module( + 'extending_cpp', + extending_cpp, + cython_args: cython_args, + install: false, + override_options : ['cython_language=cpp'] +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py new file mode 100644 index 0000000000000000000000000000000000000000..80729460ee6aa596d7dc2c398ce43ff4aacc7bff --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py @@ -0,0 +1,535 @@ +""" +Unit tests for the basin hopping global minimization algorithm. +""" +import copy + +from numpy.testing import (assert_almost_equal, assert_equal, assert_, + assert_allclose) +import pytest +from pytest import raises as assert_raises +import numpy as np +from numpy import cos, sin + +from scipy.optimize import basinhopping, OptimizeResult +from scipy.optimize._basinhopping import ( + Storage, RandomDisplacement, Metropolis, AdaptiveStepsize) + + +def func1d(x): + f = cos(14.5 * x - 0.3) + (x + 0.2) * x + df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2) + return f, df + + +def func2d_nograd(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + return f + + +def func2d(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + df = np.zeros(2) + df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + df[1] = 2. * x[1] + 0.2 + return f, df + + +def func2d_easyderiv(x): + f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0] + df = np.zeros(2) + df[0] = 4.0*x[0] + 2.0*x[1] - 6.0 + df[1] = 2.0*x[0] + 4.0*x[1] + + return f, df + + +class MyTakeStep1(RandomDisplacement): + """use a copy of displace, but have it set a special parameter to + make sure it's actually being used.""" + def __init__(self): + self.been_called = False + super().__init__() + + def __call__(self, x): + self.been_called = True + return super().__call__(x) + + +def myTakeStep2(x): + """redo RandomDisplacement in function form without the attribute stepsize + to make sure everything still works ok + """ + s = 0.5 + x += np.random.uniform(-s, s, np.shape(x)) + return x + + +class MyAcceptTest: + """pass a custom accept test + + This does nothing but make sure it's being used and ensure all the + possible return values are accepted + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + self.testres = [False, 'force accept', True, np.bool_(True), + np.bool_(False), [], {}, 0, 1] + + def __call__(self, **kwargs): + self.been_called = True + self.ncalls += 1 + if self.ncalls - 1 < len(self.testres): + return self.testres[self.ncalls - 1] + else: + return True + + +class MyCallBack: + """pass a custom callback function + + This makes sure it's being used. It also returns True after 10 + steps to ensure that it's stopping early. + + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x, f, accepted): + self.been_called = True + self.ncalls += 1 + if self.ncalls == 10: + return True + + +class TestBasinHopping: + + def setup_method(self): + """ Tests setup. + + Run tests based on the 1-D and 2-D functions described above. + """ + self.x0 = (1.0, [1.0, 1.0]) + self.sol = (-0.195, np.array([-0.195, -0.1])) + + self.tol = 3 # number of decimal places + + self.niter = 100 + self.disp = False + + self.kwargs = {"method": "L-BFGS-B", "jac": True} + self.kwargs_nograd = {"method": "L-BFGS-B"} + + def test_TypeError(self): + # test the TypeErrors are raised on bad input + i = 1 + # if take_step is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + take_step=1) + # if accept_test is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + accept_test=1) + + def test_input_validation(self): + msg = 'target_accept_rate has to be in range \\(0, 1\\)' + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], target_accept_rate=0.) + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], target_accept_rate=1.) + + msg = 'stepwise_factor has to be in range \\(0, 1\\)' + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], stepwise_factor=0.) + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], stepwise_factor=1.) + + def test_1d_grad(self): + # test 1-D minimizations with gradient + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_2d(self): + # test 2d minimizations with gradient + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(res.nfev > 0) + + def test_njev(self): + # test njev is returned correctly + i = 1 + minimizer_kwargs = self.kwargs.copy() + # L-BFGS-B doesn't use njev, but BFGS does + minimizer_kwargs["method"] = "BFGS" + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + assert_(res.nfev > 0) + assert_equal(res.nfev, res.njev) + + def test_jac(self): + # test Jacobian returned + minimizer_kwargs = self.kwargs.copy() + # BFGS returns a Jacobian + minimizer_kwargs["method"] = "BFGS" + + res = basinhopping(func2d_easyderiv, [0.0, 0.0], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + + assert_(hasattr(res.lowest_optimization_result, "jac")) + + # in this case, the Jacobian is just [df/dx, df/dy] + _, jacobian = func2d_easyderiv(res.x) + assert_almost_equal(res.lowest_optimization_result.jac, jacobian, + self.tol) + + def test_2d_nograd(self): + # test 2-D minimizations without gradient + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + @pytest.mark.fail_slow(10) + def test_all_minimizers(self): + # Test 2-D minimizations with gradient. Nelder-Mead, Powell, COBYLA, and + # COBYQA don't accept jac=True, so aren't included here. + i = 1 + methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP'] + minimizer_kwargs = copy.copy(self.kwargs) + for method in methods: + minimizer_kwargs["method"] = method + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + @pytest.mark.fail_slow(20) + def test_all_nograd_minimizers(self): + # Test 2-D minimizations without gradient. Newton-CG requires jac=True, + # so not included here. + i = 1 + methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP', + 'Nelder-Mead', 'Powell', 'COBYLA', 'COBYQA'] + minimizer_kwargs = copy.copy(self.kwargs_nograd) + for method in methods: + # COBYQA takes extensive amount of time on this problem + niter = 10 if method == 'COBYQA' else self.niter + minimizer_kwargs["method"] = method + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=niter, disp=self.disp, seed=1234) + tol = self.tol + if method == 'COBYLA': + tol = 2 + assert_almost_equal(res.x, self.sol[i], decimal=tol) + + def test_pass_takestep(self): + # test that passing a custom takestep works + # also test that the stepsize is being adjusted + takestep = MyTakeStep1() + initial_step_size = takestep.stepsize + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(takestep.been_called) + # make sure that the build in adaptive step size has been used + assert_(initial_step_size != takestep.stepsize) + + def test_pass_simple_takestep(self): + # test that passing a custom takestep without attribute stepsize + takestep = myTakeStep2 + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_pass_accept_test(self): + # test passing a custom accept test + # makes sure it's being used and ensures all the possible return values + # are accepted. + accept_test = MyAcceptTest() + i = 1 + # there's no point in running it more than a few steps. + basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=10, disp=self.disp, accept_test=accept_test) + assert_(accept_test.been_called) + + def test_pass_callback(self): + # test passing a custom callback function + # This makes sure it's being used. It also returns True after 10 steps + # to ensure that it's stopping early. + callback = MyCallBack() + i = 1 + # there's no point in running it more than a few steps. + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=30, disp=self.disp, callback=callback) + assert_(callback.been_called) + assert_("callback" in res.message[0]) + # One of the calls of MyCallBack is during BasinHoppingRunner + # construction, so there are only 9 remaining before MyCallBack stops + # the minimization. + assert_equal(res.nit, 9) + + def test_minimizer_fail(self): + # test if a minimizer fails + i = 1 + self.kwargs["options"] = dict(maxiter=0) + self.niter = 10 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + # the number of failed minimizations should be the number of + # iterations + 1 + assert_equal(res.nit + 1, res.minimization_failures) + + def test_niter_zero(self): + # gh5915, what happens if you call basinhopping with niter=0 + i = 0 + basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=0, disp=self.disp) + + def test_rng_reproducibility(self): + # rng should ensure reproducibility between runs + minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} + + f_1 = [] + + def callback(x, f, accepted): + f_1.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback, rng=10) + + f_2 = [] + + def callback2(x, f, accepted): + f_2.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback2, rng=10) + assert_equal(np.array(f_1), np.array(f_2)) + + def test_random_gen(self): + # check that np.random.Generator can be used (numpy >= 1.17) + rng = np.random.default_rng(1) + + minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} + + res1 = basinhopping(func2d, [1.0, 1.0], + minimizer_kwargs=minimizer_kwargs, + niter=10, rng=rng) + + rng = np.random.default_rng(1) + res2 = basinhopping(func2d, [1.0, 1.0], + minimizer_kwargs=minimizer_kwargs, + niter=10, rng=rng) + assert_equal(res1.x, res2.x) + + def test_monotonic_basin_hopping(self): + # test 1-D minimizations with gradient and T=0 + i = 0 + + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, T=0) + assert_almost_equal(res.x, self.sol[i], self.tol) + + +@pytest.mark.thread_unsafe +class Test_Storage: + def setup_method(self): + self.x0 = np.array(1) + self.f0 = 0 + + minres = OptimizeResult(success=True) + minres.x = self.x0 + minres.fun = self.f0 + + self.storage = Storage(minres) + + def test_higher_f_rejected(self): + new_minres = OptimizeResult(success=True) + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 + 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert_equal(self.x0, minres.x) + assert_equal(self.f0, minres.fun) + assert_(not ret) + + @pytest.mark.parametrize('success', [True, False]) + def test_lower_f_accepted(self, success): + new_minres = OptimizeResult(success=success) + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 - 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert (self.x0 != minres.x) == success # can't use `is` + assert (self.f0 != minres.fun) == success # left side is NumPy bool + assert ret is success + + +class Test_RandomDisplacement: + def setup_method(self): + self.stepsize = 1.0 + self.N = 300000 + + def test_random(self): + # the mean should be 0 + # the variance should be (2*stepsize)**2 / 12 + # note these tests are random, they will fail from time to time + rng = np.random.RandomState(0) + x0 = np.zeros([self.N]) + displace = RandomDisplacement(stepsize=self.stepsize, rng=rng) + x = displace(x0) + v = (2. * self.stepsize) ** 2 / 12 + assert_almost_equal(np.mean(x), 0., 1) + assert_almost_equal(np.var(x), v, 1) + + +class Test_Metropolis: + def setup_method(self): + self.T = 2. + self.met = Metropolis(self.T) + self.res_new = OptimizeResult(success=True, fun=0.) + self.res_old = OptimizeResult(success=True, fun=1.) + + def test_boolean_return(self): + # the return must be a bool, else an error will be raised in + # basinhopping + ret = self.met(res_new=self.res_new, res_old=self.res_old) + assert isinstance(ret, bool) + + def test_lower_f_accepted(self): + assert_(self.met(res_new=self.res_new, res_old=self.res_old)) + + def test_accept(self): + # test that steps are randomly accepted for f_new > f_old + one_accept = False + one_reject = False + for i in range(1000): + if one_accept and one_reject: + break + res_new = OptimizeResult(success=True, fun=1.) + res_old = OptimizeResult(success=True, fun=0.5) + ret = self.met(res_new=res_new, res_old=res_old) + if ret: + one_accept = True + else: + one_reject = True + assert_(one_accept) + assert_(one_reject) + + def test_GH7495(self): + # an overflow in exp was producing a RuntimeWarning + # create own object here in case someone changes self.T + met = Metropolis(2) + res_new = OptimizeResult(success=True, fun=0.) + res_old = OptimizeResult(success=True, fun=2000) + with np.errstate(over='raise'): + met.accept_reject(res_new=res_new, res_old=res_old) + + def test_gh7799(self): + # gh-7799 reported a problem in which local search was successful but + # basinhopping returned an invalid solution. Show that this is fixed. + def func(x): + return (x**2-8)**2+(x+2)**2 + + x0 = -4 + limit = 50 # Constrain to func value >= 50 + con = {'type': 'ineq', 'fun': lambda x: func(x) - limit}, + res = basinhopping( + func, + x0, + 30, + seed=np.random.RandomState(1234), + minimizer_kwargs={'constraints': con} + ) + assert res.success + assert_allclose(res.fun, limit, rtol=1e-6) + + def test_accept_gh7799(self): + # Metropolis should not accept the result of an unsuccessful new local + # search if the old local search was successful + + met = Metropolis(0) # monotonic basin hopping + res_new = OptimizeResult(success=True, fun=0.) + res_old = OptimizeResult(success=True, fun=1.) + + # if new local search was successful and energy is lower, accept + assert met(res_new=res_new, res_old=res_old) + # if new res is unsuccessful, don't accept - even if energy is lower + res_new.success = False + assert not met(res_new=res_new, res_old=res_old) + # ...unless the old res was unsuccessful, too. In that case, why not? + res_old.success = False + assert met(res_new=res_new, res_old=res_old) + + def test_reject_all_gh7799(self): + # Test the behavior when there is no feasible solution + def fun(x): + return x@x + + def constraint(x): + return x + 1 + + kwargs = {'constraints': {'type': 'eq', 'fun': constraint}, + 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'} + res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs) + assert not res.success + + +class Test_AdaptiveStepsize: + def setup_method(self): + self.stepsize = 1. + self.ts = RandomDisplacement(stepsize=self.stepsize) + self.target_accept_rate = 0.5 + self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False, + accept_rate=self.target_accept_rate) + + def test_adaptive_increase(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(False) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_adaptive_decrease(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(True) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) + + def test_all_accepted(self): + # test that everything works OK if all steps were accepted + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_all_rejected(self): + # test that everything works OK if all steps were rejected + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py new file mode 100644 index 0000000000000000000000000000000000000000..3f81877f08abe436211210d5bd15d876c9a7177c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py @@ -0,0 +1,1703 @@ +""" +Unit tests for the differential global minimization algorithm. +""" +import multiprocessing +from multiprocessing.dummy import Pool as ThreadPool +import platform + +from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver, + _ConstraintWrapper) +from scipy.optimize import differential_evolution, OptimizeResult +from scipy.optimize._constraints import (Bounds, NonlinearConstraint, + LinearConstraint) +from scipy.optimize import rosen, minimize +from scipy.sparse import csr_matrix +from scipy import stats + +import numpy as np +from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal, + assert_string_equal, assert_, suppress_warnings) +from pytest import raises as assert_raises, warns +import pytest + + +class TestDifferentialEvolutionSolver: + + def setup_method(self): + self.old_seterr = np.seterr(invalid='raise') + self.limits = np.array([[0., 0.], + [2., 2.]]) + self.bounds = [(0., 2.), (0., 2.)] + + self.dummy_solver = DifferentialEvolutionSolver(self.quadratic, + [(0, 100)]) + + # dummy_solver2 will be used to test mutation strategies + self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic, + [(0, 1)], + popsize=7, + mutation=0.5) + # create a population that's only 7 members long + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T + self.dummy_solver2.population = population + + def teardown_method(self): + np.seterr(**self.old_seterr) + + def quadratic(self, x): + return x[0]**2 + + def test__strategy_resolves(self): + # test that the correct mutation function is resolved by + # different requested strategy arguments + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp') + assert_equal(solver.strategy, 'best1exp') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1bin') + assert_equal(solver.strategy, 'best1bin') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1bin') + assert_equal(solver.strategy, 'rand1bin') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1exp') + assert_equal(solver.strategy, 'rand1exp') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best2bin') + assert_equal(solver.strategy, 'best2bin') + assert_equal(solver.mutation_func.__name__, '_best2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2bin') + assert_equal(solver.strategy, 'rand2bin') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1bin') + assert_equal(solver.strategy, 'randtobest1bin') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1exp') + assert_equal(solver.strategy, 'randtobest1exp') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1bin') + assert_equal(solver.strategy, 'currenttobest1bin') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1exp') + assert_equal(solver.strategy, 'currenttobest1exp') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + def test__mutate1(self): + # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc. + result = np.array([0.05]) + trial = self.dummy_solver2._best1(np.array([2, 3, 4, 5, 6])) + assert_allclose(trial, result) + + result = np.array([0.25]) + trial = self.dummy_solver2._rand1(np.array([2, 3, 4, 5, 6])) + assert_allclose(trial, result) + + def test__mutate2(self): + # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc. + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + + result = np.array([-0.1]) + trial = self.dummy_solver2._best2(np.array([2, 3, 4, 5, 6])) + assert_allclose(trial, result) + + result = np.array([0.1]) + trial = self.dummy_solver2._rand2(np.array([2, 3, 4, 5, 6])) + assert_allclose(trial, result) + + def test__randtobest1(self): + # strategies randtobest/1/* + result = np.array([0.15]) + trial = self.dummy_solver2._randtobest1(np.array([2, 3, 4, 5, 6])) + assert_allclose(trial, result) + + def test__currenttobest1(self): + # strategies currenttobest/1/* + result = np.array([0.1]) + trial = self.dummy_solver2._currenttobest1( + 1, + np.array([2, 3, 4, 5, 6]) + ) + assert_allclose(trial, result) + + def test_can_init_with_dithering(self): + mutation = (0.5, 1) + solver = DifferentialEvolutionSolver(self.quadratic, + self.bounds, + mutation=mutation) + + assert_equal(solver.dither, list(mutation)) + + def test_invalid_mutation_values_arent_accepted(self): + func = rosen + mutation = (0.5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (-1, 1) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (0.1, np.nan) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = 0.5 + solver = DifferentialEvolutionSolver(func, + self.bounds, + mutation=mutation) + assert_equal(0.5, solver.scale) + assert_equal(None, solver.dither) + + def test_invalid_functional(self): + def func(x): + return np.array([np.sum(x ** 2), np.sum(x)]) + + with assert_raises( + RuntimeError, + match=r"func\(x, \*args\) must return a scalar value"): + differential_evolution(func, [(-2, 2), (-2, 2)]) + + def test__scale_parameters(self): + trial = np.array([0.3]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + def test__unscale_parameters(self): + trial = np.array([30]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + def test_equal_bounds(self): + with np.errstate(invalid='raise'): + solver = DifferentialEvolutionSolver( + self.quadratic, + bounds=[(2.0, 2.0), (1.0, 3.0)] + ) + v = solver._unscale_parameters([2.0, 2.0]) + assert_allclose(v, 0.5) + + res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)]) + assert_equal(res.x, [2.0, 3.0]) + + def test__ensure_constraint(self): + trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001]) + self.dummy_solver._ensure_constraint(trial) + + assert_equal(trial[2], 0.9) + assert_(np.logical_and(trial >= 0, trial <= 1).all()) + + def test_differential_evolution(self): + # test that the Jmin of DifferentialEvolutionSolver + # is the same as the function evaluation + solver = DifferentialEvolutionSolver( + self.quadratic, [(-2, 2)], maxiter=1, polish=False + ) + result = solver.solve() + assert_equal(result.fun, self.quadratic(result.x)) + + solver = DifferentialEvolutionSolver( + self.quadratic, [(-2, 2)], maxiter=1, polish=True + ) + result = solver.solve() + assert_equal(result.fun, self.quadratic(result.x)) + + def test_best_solution_retrieval(self): + # test that the getter property method for the best solution works. + solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) + result = solver.solve() + assert_equal(result.x, solver.x) + + def test_intermediate_result(self): + # Check that intermediate result object passed into the callback + # function contains the expected information and that raising + # `StopIteration` causes the expected behavior. + maxiter = 10 + + def func(x): + val = rosen(x) + if val < func.val: + func.x = x + func.val = val + return val + func.x = None + func.val = np.inf + + def callback(intermediate_result): + callback.nit += 1 + callback.intermediate_result = intermediate_result + assert intermediate_result.population.ndim == 2 + assert intermediate_result.population.shape[1] == 2 + assert intermediate_result.nit == callback.nit + + # Check that `x` and `fun` attributes are the best found so far + assert_equal(intermediate_result.x, callback.func.x) + assert_equal(intermediate_result.fun, callback.func.val) + + # Check for consistency between `fun`, `population_energies`, + # `x`, and `population` + assert_equal(intermediate_result.fun, rosen(intermediate_result.x)) + for i in range(len(intermediate_result.population_energies)): + res = intermediate_result.population_energies[i] + ref = rosen(intermediate_result.population[i]) + assert_equal(res, ref) + assert_equal(intermediate_result.x, + intermediate_result.population[0]) + assert_equal(intermediate_result.fun, + intermediate_result.population_energies[0]) + + assert intermediate_result.message == 'in progress' + assert intermediate_result.success is True + assert isinstance(intermediate_result, OptimizeResult) + if callback.nit == maxiter: + raise StopIteration + callback.nit = 0 + callback.intermediate_result = None + callback.func = func + + bounds = [(0, 2), (0, 2)] + kwargs = dict(func=func, bounds=bounds, rng=838245, polish=False) + res = differential_evolution(**kwargs, callback=callback) + ref = differential_evolution(**kwargs, maxiter=maxiter) + + # Check that final `intermediate_result` is equivalent to returned + # result object and that terminating with callback `StopIteration` + # after `maxiter` iterations is equivalent to terminating with + # `maxiter` parameter. + assert res.success is ref.success is False + assert callback.nit == res.nit == maxiter + assert res.message == 'callback function requested stop early' + assert ref.message == 'Maximum number of iterations has been exceeded.' + for field, val in ref.items(): + if field in {'message', 'success'}: # checked separately + continue + assert_equal(callback.intermediate_result[field], val) + assert_equal(res[field], val) + + # Check that polish occurs after `StopIteration` as advertised + callback.nit = 0 + func.val = np.inf + kwargs['polish'] = True + res = differential_evolution(**kwargs, callback=callback) + assert res.fun < ref.fun + + def test_callback_terminates(self): + # test that if the callback returns true, then the minimization halts + bounds = [(0, 2), (0, 2)] + expected_msg = 'callback function requested stop early' + def callback_python_true(param, convergence=0.): + return True + + result = differential_evolution( + rosen, bounds, callback=callback_python_true + ) + assert_string_equal(result.message, expected_msg) + + # if callback raises StopIteration then solve should be interrupted + def callback_stop(intermediate_result): + raise StopIteration + + result = differential_evolution(rosen, bounds, callback=callback_stop) + assert not result.success + + def callback_evaluates_true(param, convergence=0.): + # DE should stop if bool(self.callback) is True + return [10] + + result = differential_evolution(rosen, bounds, callback=callback_evaluates_true) + assert_string_equal(result.message, expected_msg) + assert not result.success + + def callback_evaluates_false(param, convergence=0.): + return [] + + result = differential_evolution(rosen, bounds, + callback=callback_evaluates_false) + assert result.success + + def test_args_tuple_is_passed(self): + # test that the args tuple is passed to the cost function properly. + bounds = [(-10, 10)] + args = (1., 2., 3.) + + def quadratic(x, *args): + if not isinstance(args, tuple): + raise ValueError('args should be a tuple') + return args[0] + args[1] * x + args[2] * x**2. + + result = differential_evolution(quadratic, + bounds, + args=args, + polish=True) + assert_almost_equal(result.fun, 2 / 3.) + + def test_init_with_invalid_strategy(self): + # test that passing an invalid strategy raises ValueError + func = rosen + bounds = [(-3, 3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds, + strategy='abc') + + def test_bounds_checking(self): + # test that the bounds checking works + func = rosen + bounds = [(-3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + bounds = [(-3, 3), (3, 4, 5)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + + # test that we can use a new-type Bounds object + result = differential_evolution(rosen, Bounds([0, 0], [2, 2])) + assert_almost_equal(result.x, (1., 1.)) + + def test_select_samples(self): + # select_samples should return 5 separate random numbers. + limits = np.arange(12., dtype='float64').reshape(2, 6) + bounds = list(zip(limits[0, :], limits[1, :])) + solver = DifferentialEvolutionSolver(None, bounds, popsize=1) + candidate = 0 + r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5) + assert_equal( + len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6) + + def test_maxiter_stops_solve(self): + # test that if the maximum number of iterations is exceeded + # the solver stops. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1) + result = solver.solve() + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of iterations has been exceeded.') + + def test_maxfun_stops_solve(self): + # test that if the maximum number of function evaluations is exceeded + # during initialisation the solver stops + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1, + polish=False) + result = solver.solve() + + assert_equal(result.nfev, 2) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # test that if the maximum number of function evaluations is exceeded + # during the actual minimisation, then the solver stops. + # Have to turn polishing off, as this will still occur even if maxfun + # is reached. For popsize=5 and len(bounds)=2, then there are only 10 + # function evaluations during initialisation. + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=40) + result = solver.solve() + + assert_equal(result.nfev, 41) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # now repeat for updating='deferred version + # 47 function evaluations is not a multiple of the population size, + # so maxfun is reached partway through a population evaluation. + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=47, + updating='deferred') + result = solver.solve() + + assert_equal(result.nfev, 47) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been reached.') + + def test_quadratic(self): + # test the quadratic function from object + solver = DifferentialEvolutionSolver(self.quadratic, + [(-100, 100)], + tol=0.02) + solver.solve() + assert_equal(np.argmin(solver.population_energies), 0) + + def test_quadratic_from_diff_ev(self): + # test the quadratic function from differential_evolution function + differential_evolution(self.quadratic, + [(-100, 100)], + tol=0.02, + seed=1) + + def test_rng_gives_repeatability(self): + result = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + rng=1, + tol=0.5) + result2 = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + rng=1, + tol=0.5) + assert_equal(result.x, result2.x) + assert_equal(result.nfev, result2.nfev) + + def test_random_generator(self): + # check that np.random.Generator can be used (numpy >= 1.17) + # obtain a np.random.Generator object + rng = np.random.default_rng() + + inits = ['random', 'latinhypercube', 'sobol', 'halton'] + for init in inits: + differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + rng=rng, + tol=0.5, + init=init) + + def test_exp_runs(self): + # test whether exponential mutation loop runs + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp', + maxiter=1) + + solver.solve() + + def test_gh_4511_regression(self): + # This modification of the differential evolution docstring example + # uses a custom popsize that had triggered an off-by-one error. + # Because we do not care about solving the optimization problem in + # this test, we use maxiter=1 to reduce the testing time. + bounds = [(-5, 5), (-5, 5)] + # result = differential_evolution(rosen, bounds, popsize=1815, + # maxiter=1) + + # the original issue arose because of rounding error in arange, with + # linspace being a much better solution. 1815 is quite a large popsize + # to use and results in a long test time (~13s). I used the original + # issue to figure out the lowest number of samples that would cause + # this rounding error to occur, 49. + differential_evolution(rosen, bounds, popsize=49, maxiter=1) + + def test_calculate_population_energies(self): + # if popsize is 3, then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3) + solver._calculate_population_energies(solver.population) + solver._promote_lowest_energy() + assert_equal(np.argmin(solver.population_energies), 0) + + # initial calculation of the energies should require 6 nfev. + assert_equal(solver._nfev, 6) + + def test_iteration(self): + # test that DifferentialEvolutionSolver is iterable + # if popsize is 3, then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3, + maxfun=12) + x, fun = next(solver) + assert_equal(np.size(x, 0), 2) + + # 6 nfev are required for initial calculation of energies, 6 nfev are + # required for the evolution of the 6 population members. + assert_equal(solver._nfev, 12) + + # the next generation should halt because it exceeds maxfun + assert_raises(StopIteration, next, solver) + + # check a proper minimisation can be done by an iterable solver + solver = DifferentialEvolutionSolver(rosen, self.bounds) + _, fun_prev = next(solver) + for i, soln in enumerate(solver): + x_current, fun_current = soln + assert fun_prev >= fun_current + _, fun_prev = x_current, fun_current + # need to have this otherwise the solver would never stop. + if i == 50: + break + + def test_convergence(self): + solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2, + polish=False) + solver.solve() + assert_(solver.convergence < 0.2) + + def test_maxiter_none_GH5731(self): + # Pre 0.17 the previous default for maxiter and maxfun was None. + # the numerical defaults are now 1000 and np.inf. However, some scripts + # will still supply None for both of those, this will raise a TypeError + # in the solve method. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None, + maxfun=None) + solver.solve() + + def test_population_initiation(self): + # test the different modes of population initiation + + # init must be either 'latinhypercube' or 'random' + # raising ValueError is something else is passed in + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': 'rubbish'}) + + solver = DifferentialEvolutionSolver(rosen, self.bounds) + + # check that population initiation: + # 1) resets _nfev to 0 + # 2) all population energies are np.inf + solver.init_population_random() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver.init_population_lhs() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver.init_population_qmc(qmc_engine='halton') + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol') + solver.init_population_qmc(qmc_engine='sobol') + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + # we should be able to initialize with our own array + population = np.linspace(-1, 3, 10).reshape(5, 2) + solver = DifferentialEvolutionSolver(rosen, self.bounds, + init=population, + strategy='best2bin', + atol=0.01, rng=1, popsize=5) + + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + assert_(solver.num_population_members == 5) + assert_(solver.population_shape == (5, 2)) + + # check that the population was initialized correctly + unscaled_population = np.clip(solver._unscale_parameters(population), + 0, 1) + assert_almost_equal(solver.population[:5], unscaled_population) + + # population values need to be clipped to bounds + assert_almost_equal(np.min(solver.population[:5]), 0) + assert_almost_equal(np.max(solver.population[:5]), 1) + + # shouldn't be able to initialize with an array if it's the wrong shape + # this would have too many parameters + population = np.linspace(-1, 3, 15).reshape(5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': population}) + + # provide an initial solution + # bounds are [(0, 2), (0, 2)] + x0 = np.random.uniform(low=0.0, high=2.0, size=2) + solver = DifferentialEvolutionSolver( + rosen, self.bounds, x0=x0 + ) + # parameters are scaled to unit interval + assert_allclose(solver.population[0], x0 / 2.0) + + def test_x0(self): + # smoke test that checks that x0 is usable. + res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8]) + assert res.success + + # check what happens if some of the x0 lay outside the bounds + with assert_raises(ValueError): + differential_evolution(rosen, self.bounds, x0=[0.2, 2.1]) + + def test_infinite_objective_function(self): + # Test that there are no problems if the objective function + # returns inf on some runs + def sometimes_inf(x): + if x[0] < .5: + return np.inf + return x[1] + bounds = [(0, 1), (0, 1)] + differential_evolution(sometimes_inf, bounds=bounds, disp=False) + + def test_deferred_updating(self): + # check setting of deferred updating, with default workers + bounds = [(0., 2.), (0., 2.)] + solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred') + assert_(solver._updating == 'deferred') + assert_(solver._mapwrapper._mapfunc is map) + res = solver.solve() + assert res.success + + # check that deferred updating works with an exponential crossover + res = differential_evolution( + rosen, bounds, updating='deferred', strategy='best1exp' + ) + assert res.success + + @pytest.mark.thread_unsafe + def test_immediate_updating(self): + # check setting of immediate updating, with default workers + bounds = [(0., 2.), (0., 2.)] + solver = DifferentialEvolutionSolver(rosen, bounds) + assert_(solver._updating == 'immediate') + + # Safely forking from a multithreaded process is + # problematic, and deprecated in Python 3.12, so + # we use a slower but portable alternative + # see gh-19848 + ctx = multiprocessing.get_context("spawn") + with ctx.Pool(2) as p: + # should raise a UserWarning because the updating='immediate' + # is being overridden by the workers keyword + with warns(UserWarning): + with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s: + solver.solve() + assert s._updating == 'deferred' + + @pytest.mark.fail_slow(10) + def test_parallel(self): + # smoke test for parallelization with deferred updating + bounds = [(0., 2.), (0., 2.)] + # use threads instead of Process to speed things up for this simple example + with ThreadPool(2) as p, DifferentialEvolutionSolver( + rosen, bounds, updating='deferred', workers=p.map, tol=0.1, popsize=3 + ) as solver: + assert solver._mapwrapper.pool is not None + assert solver._updating == 'deferred' + solver.solve() + + with DifferentialEvolutionSolver( + rosen, bounds, updating='deferred', workers=2, popsize=3, tol=0.1 + ) as solver: + assert solver._mapwrapper.pool is not None + assert solver._updating == 'deferred' + solver.solve() + + def test_converged(self): + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)]) + solver.solve() + assert_(solver.converged()) + + def test_constraint_violation_fn(self): + def constr_f(x): + return [x[0] + x[1]] + + def constr_f2(x): + return np.array([x[0]**2 + x[1], x[0] - x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc,)) + + cv = solver._constraint_violation_fn(np.array([1.0, 1.0])) + assert_almost_equal(cv, 0.1) + + nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2)) + + # for multiple constraints the constraint violations should + # be concatenated. + xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)] + vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)] + + for x, v in zip(xs, vs): + cv = solver._constraint_violation_fn(np.array(x)) + assert_allclose(cv, np.atleast_2d(v)) + + # vectorized calculation of a series of solutions + assert_allclose( + solver._constraint_violation_fn(np.array(xs)), np.array(vs) + ) + + # the following line is used in _calculate_population_feasibilities. + # _constraint_violation_fn returns an (1, M) array when + # x.shape == (N,), i.e. a single solution. Therefore this list + # comprehension should generate (S, 1, M) array. + constraint_violation = np.array([solver._constraint_violation_fn(x) + for x in np.array(xs)]) + assert constraint_violation.shape == (3, 1, 3) + + # we need reasonable error messages if the constraint function doesn't + # return the right thing + def constr_f3(x): + # returns (S, M), rather than (M, S) + return constr_f2(x).T + + nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2), + vectorized=False) + solver.vectorized = True + with pytest.raises( + RuntimeError, match="An array returned from a Constraint" + ): + solver._constraint_violation_fn(np.array(xs)) + + def test_constraint_population_feasibilities(self): + def constr_f(x): + return [x[0] + x[1]] + + def constr_f2(x): + return [x[0]**2 + x[1], x[0] - x[1]] + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc,)) + + # are population feasibilities correct + # [0.5, 0.5] corresponds to scaled values of [1., 1.] + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [1., 1.]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1], [2.1]])) + assert cv.shape == (2, 1) + + nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) + + for vectorize in [False, True]: + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2), + vectorized=vectorize, + updating='deferred') + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [0.6, 0.5]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]])) + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [1., 1.]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]])) + assert cv.shape == (2, 3) + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.25, 0.25], [1., 1.]])) + assert_equal(feas, [True, False]) + assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]])) + assert cv.shape == (2, 3) + + @pytest.mark.thread_unsafe + def test_constraint_solve(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc,)) + + # trust-constr warns if the constraint function is linear + with warns(UserWarning): + res = solver.solve() + + assert constr_f(res.x) <= 1.9 + assert res.success + + @pytest.mark.fail_slow(10) + @pytest.mark.thread_unsafe + def test_impossible_constraint(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, -1) + + solver = DifferentialEvolutionSolver( + rosen, [(0, 2), (0, 2)], constraints=(nlc,), popsize=1, rng=1, maxiter=100 + ) + + # a UserWarning is issued because the 'trust-constr' polishing is + # attempted on the least infeasible solution found. + with warns(UserWarning): + res = solver.solve() + + assert res.maxcv > 0 + assert not res.success + + # test _promote_lowest_energy works when none of the population is + # feasible. In this case, the solution with the lowest constraint + # violation should be promoted. + solver = DifferentialEvolutionSolver( + rosen, [(0, 2), (0, 2)], constraints=(nlc,), polish=False) + next(solver) + assert not solver.feasible.all() + assert not np.isfinite(solver.population_energies).all() + + # now swap two of the entries in the population + l = 20 + cv = solver.constraint_violation[0] + + solver.population_energies[[0, l]] = solver.population_energies[[l, 0]] + solver.population[[0, l], :] = solver.population[[l, 0], :] + solver.constraint_violation[[0, l], :] = ( + solver.constraint_violation[[l, 0], :]) + + solver._promote_lowest_energy() + assert_equal(solver.constraint_violation[0], cv) + + def test_accept_trial(self): + # _accept_trial(self, energy_trial, feasible_trial, cv_trial, + # energy_orig, feasible_orig, cv_orig) + def constr_f(x): + return [x[0] + x[1]] + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc,)) + fn = solver._accept_trial + # both solutions are feasible, select lower energy + assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.])) + assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False) + assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.])) + + # trial is feasible, original is not + assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.])) + + # trial and original are infeasible + # cv_trial have to be <= cv_original to be better + assert (fn(0.1, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1., 1.0]))) + assert (fn(0.1, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1., 0.50]))) + assert not (fn(1.0, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1.0, 0.4]))) + + def test_constraint_wrapper(self): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + pc = _ConstraintWrapper(Bounds(lb, ub), x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([0.25, 21, 31]) == 0).all() + + # check vectorized Bounds constraint + xs = np.arange(1, 16).reshape(5, 3) + violations = [] + for x in xs: + violations.append(pc.violation(x)) + np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + # check vectorized LinearConstraint, for 7 lots of parameter vectors + # with each parameter vector being 4 long, with 3 constraints + # xs is the same shape as stored in the differential evolution + # population, but it's sent to the violation function as (len(x), M) + xs = np.arange(1, 29).reshape(7, 4) + violations = [] + for x in xs: + violations.append(pc.violation(x)) + np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) + + pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0), + x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def fun(x): + return A.dot(x) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0) + pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4]) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def test_constraint_wrapper_violation(self): + def cons_f(x): + # written in vectorised form to accept an array of (N, S) + # returning (M, S) + # where N is the number of parameters, + # S is the number of solution vectors to be examined, + # and M is the number of constraint components + return np.array([x[0] ** 2 + x[1], + x[0] ** 2 - x[1]]) + + nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) + pc = _ConstraintWrapper(nlc, [0.5, 1]) + assert np.size(pc.bounds[0]) == 2 + + xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)] + vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)] + + for x, v in zip(xs, vs): + assert_allclose(pc.violation(x), v) + + # now check that we can vectorize the constraint wrapper + assert_allclose(pc.violation(np.array(xs).T), + np.array(vs).T) + assert pc.fun(np.array(xs).T).shape == (2, len(xs)) + assert pc.violation(np.array(xs).T).shape == (2, len(xs)) + assert pc.num_constr == 2 + assert pc.parameter_count == 2 + + def test_matrix_linear_constraint(self): + # gh20041 supplying an np.matrix to construct a LinearConstraint caused + # _ConstraintWrapper to start returning constraint violations of the + # wrong shape. + with suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + matrix = np.matrix([[1, 1, 1, 1.], + [2, 2, 2, 2.]]) + lc = LinearConstraint(matrix, 0, 1) + x0 = np.ones(4) + cw = _ConstraintWrapper(lc, x0) + # the shape of the constraint violation should be the same as the number + # of constraints applied. + assert cw.violation(x0).shape == (2,) + + # let's try a vectorised violation call. + xtrial = np.arange(4 * 5).reshape(4, 5) + assert cw.violation(xtrial).shape == (2, 5) + + @pytest.mark.fail_slow(20) + def test_L1(self): + # Lampinen ([5]) test problem 1 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:]) + return fun + + A = np.zeros((10, 14)) # 1-indexed to match reference + A[1, [1, 2, 10, 11]] = 2, 2, 1, 1 + A[2, [1, 10]] = -8, 1 + A[3, [4, 5, 10]] = -2, -1, 1 + A[4, [1, 3, 10, 11]] = 2, 2, 1, 1 + A[5, [2, 11]] = -8, 1 + A[6, [6, 7, 11]] = -2, -1, 1 + A[7, [2, 3, 11, 12]] = 2, 2, 1, 1 + A[8, [3, 12]] = -8, 1 + A[9, [8, 9, 12]] = -2, -1, 1 + A = A[1:, 1:] + + b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0]) + + L = LinearConstraint(A, -np.inf, b) + + bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)] + + # using a lower popsize to speed the test up + res = differential_evolution( + f, bounds, strategy='best1bin', rng=12345, constraints=(L,), + popsize=5, tol=0.01 + ) + + x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1) + f_opt = -15 + + assert_allclose(f(x_opt), f_opt, atol=6e-4) + assert res.success + assert_allclose(res.x, x_opt, atol=6e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + # now repeat the same solve, using the same overall constraints, + # but using a sparse matrix for the LinearConstraint instead of an + # array + + L = LinearConstraint(csr_matrix(A), -np.inf, b) + + # using a lower popsize to speed the test up + res = differential_evolution( + f, bounds, strategy='best1bin', rng=1211134, constraints=(L,), + popsize=2, tol=0.05 + ) + + assert_allclose(f(x_opt), f_opt) + assert res.success + assert_allclose(res.x, x_opt, atol=5e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + # now repeat the same solve, using the same overall constraints, + # but specify half the constraints in terms of LinearConstraint, + # and the other half by NonlinearConstraint + def c1(x): + x = np.hstack(([0], x)) + return [2*x[2] + 2*x[3] + x[11] + x[12], + -8*x[3] + x[12]] + + def c2(x): + x = np.hstack(([0], x)) + return -2*x[8] - x[9] + x[12] + + L = LinearConstraint(A[:5, :], -np.inf, b[:5]) + L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6]) + N = NonlinearConstraint(c1, -np.inf, b[6:8]) + N2 = NonlinearConstraint(c2, -np.inf, b[8:9]) + constraints = (L, N, L2, N2) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution( + f, bounds, strategy='best1bin', rng=1211134, + constraints=constraints, popsize=2, tol=0.05 + ) + + assert_allclose(res.x, x_opt, atol=6e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_L2(self): + # Lampinen ([5]) test problem 2 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 + + 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] - + 8*x[7]) + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5], + 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7], + 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5], + -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 - + 5*x[6] + 11*x[7]] + + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(-10, 10)]*7 + constraints = (N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, strategy='best1bin', + rng=1234, constraints=constraints) + + f_opt = 680.6300599487869 + x_opt = (2.330499, 1.951372, -0.4775414, 4.365726, + -0.6244870, 1.038131, 1.594227) + + assert_allclose(f(x_opt), f_opt) + assert_allclose(res.fun, f_opt) + assert_allclose(res.x, x_opt, atol=1e-5) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_L3(self): + # Lampinen ([5]) test problem 3 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] + + (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 + + 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 + + (x[10] - 7)**2 + 45 + ) + return fun # maximize + + A = np.zeros((4, 11)) + A[1, [1, 2, 7, 8]] = -4, -5, 3, -9 + A[2, [1, 2, 7, 8]] = -10, 8, 17, -2 + A[3, [1, 2, 9, 10]] = 8, -2, -5, 2 + A = A[1:, 1:] + b = np.array([-105, 0, -12]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10], + -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120, + -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6], + -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40, + -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30] + + L = LinearConstraint(A, b, np.inf) + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(-10, 10)]*10 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, rng=1234, + constraints=constraints, popsize=3) + + x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548, + 1.430574, 1.321644, 9.828726, 8.280092, 8.375927) + f_opt = 24.3062091 + + assert_allclose(f(x_opt), f_opt, atol=1e-5) + assert_allclose(res.x, x_opt, atol=1e-6) + assert_allclose(res.fun, f_opt, atol=1e-5) + assert res.success + assert_(np.all(A @ res.x >= b)) + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_L4(self): + # Lampinen ([5]) test problem 4 + def f(x): + return np.sum(x[:3]) + + A = np.zeros((4, 9)) + A[1, [4, 6]] = 0.0025, 0.0025 + A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025 + A[3, [8, 5]] = 0.01, -0.01 + A = A[1:, 1:] + b = np.array([1, 1, 1]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333, + x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4], + x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]] + + L = LinearConstraint(A, -np.inf, 1) + N = NonlinearConstraint(c1, 0, np.inf) + + bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution( + f, bounds, strategy='best1bin', rng=1234, + constraints=constraints, popsize=3, tol=0.05 + ) + + f_opt = 7049.248 + + x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172, + 217.9823, 286.416528, 395.601172] + + assert_allclose(f(x_opt), f_opt, atol=0.001) + assert_allclose(res.fun, f_opt, atol=0.001) + + # use higher tol here for 32-bit Windows, see gh-11693 + if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8): + assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035) + else: + # tolerance determined from macOS + MKL failure, see gh-12701 + assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024) + + assert res.success + assert_(np.all(A @ res.x <= b)) + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_L5(self): + # Lampinen ([5]) test problem 5 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) / + (x[1]**3*(x[1]+x[2]))) + return -fun # maximize + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[1]**2 - x[2] + 1, + 1 - x[1] + (x[2]-4)**2] + + N = NonlinearConstraint(c1, -np.inf, 0) + bounds = [(0, 10)]*2 + constraints = (N) + + res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234, + constraints=constraints) + + x_opt = (1.22797135, 4.24537337) + f_opt = -0.095825 + assert_allclose(f(x_opt), f_opt, atol=2e-5) + assert_allclose(res.fun, f_opt, atol=1e-4) + assert res.success + assert_(np.all(np.array(c1(res.x)) <= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_L6(self): + # Lampinen ([5]) test problem 6 + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (x[1]-10)**3 + (x[2] - 20)**3 + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [(x[1]-5)**2 + (x[2] - 5)**2 - 100, + -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81] + + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(13, 100), (0, 100)] + constraints = (N) + res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234, + constraints=constraints, tol=1e-7) + x_opt = (14.095, 0.84296) + f_opt = -6961.814744 + + assert_allclose(f(x_opt), f_opt, atol=1e-6) + assert_allclose(res.fun, f_opt, atol=0.001) + assert_allclose(res.x, x_opt, atol=1e-4) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L7(self): + # Lampinen ([5]) test problem 7 + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] + + 37.293239*x[1] - 40792.141) + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [ + 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] - + 0.0022053*x[3]*x[5], + + 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] + + 0.0021813*x[3]**2, + + 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] + + 0.0019085*x[3]*x[4] + ] + + N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25]) + + bounds = [(78, 102), (33, 45)] + [(27, 45)]*3 + constraints = (N) + + res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234, + constraints=constraints) + + # using our best solution, rather than Lampinen/Koziel. Koziel solution + # doesn't satisfy constraints, Lampinen f_opt just plain wrong. + x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971, + 36.77579979] + + f_opt = -30665.537578 + + assert_allclose(f(x_opt), f_opt) + assert_allclose(res.x, x_opt, atol=1e-3) + assert_allclose(res.fun, f_opt, atol=1e-3) + + assert res.success + assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20]))) + assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25]))) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.xslow + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_L8(self): + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3 + return fun + + A = np.zeros((3, 5)) + A[1, [4, 3]] = 1, -1 + A[2, [3, 4]] = 1, -1 + A = A[1:, 1:] + b = np.array([-.55, -.55]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [ + 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) + + 894.8 - x[1], + 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) + + 894.8 - x[2], + 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) + + 1294.8 + ] + L = LinearConstraint(A, b, np.inf) + N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001)) + + bounds = [(0, 1200)]*2+[(-.55, .55)]*2 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + # original Lampinen test was with rand1bin, but that takes a + # huge amount of CPU time. Changing strategy to best1bin speeds + # things up a lot + res = differential_evolution(f, bounds, strategy='best1bin', + rng=1234, constraints=constraints, + maxiter=5000) + + x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336) + f_opt = 5126.4981 + + assert_allclose(f(x_opt), f_opt, atol=1e-3) + assert_allclose(res.x[:2], x_opt[:2], atol=2e-3) + assert_allclose(res.x[2:], x_opt[2:], atol=2e-3) + assert_allclose(res.fun, f_opt, atol=2e-2) + assert res.success + assert_(np.all(A@res.x >= b)) + assert_(np.all(np.array(c1(res.x)) >= -0.001)) + assert_(np.all(np.array(c1(res.x)) <= 0.001)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(5) + def test_L9(self): + # Lampinen ([5]) test problem 9 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return x[1]**2 + (x[2]-1)**2 + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[2] - x[1]**2] + + N = NonlinearConstraint(c1, [-.001], [0.001]) + + bounds = [(-1, 1)]*2 + constraints = (N) + res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234, + constraints=constraints) + + x_opt = [np.sqrt(2)/2, 0.5] + f_opt = 0.75 + + assert_allclose(f(x_opt), f_opt) + assert_allclose(np.abs(res.x), x_opt, atol=1e-3) + assert_allclose(res.fun, f_opt, atol=1e-3) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= -0.001)) + assert_(np.all(np.array(c1(res.x)) <= 0.001)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.fail_slow(10) + def test_integrality(self): + # test fitting discrete distribution to data + rng = np.random.default_rng(6519843218105) + dist = stats.nbinom + shapes = (5, 0.5) + x = dist.rvs(*shapes, size=10000, random_state=rng) + + def func(p, *args): + dist, x = args + # negative log-likelihood function + ll = -np.log(dist.pmf(x, *p)).sum(axis=-1) + if np.isnan(ll): # occurs when x is outside of support + ll = np.inf # we don't want that + return ll + + integrality = [True, False] + bounds = [(1, 18), (0, 0.95)] + + res = differential_evolution(func, bounds, args=(dist, x), + integrality=integrality, polish=False, + rng=rng) + # tolerance has to be fairly relaxed for the second parameter + # because we're fitting a distribution to random variates. + assert res.x[0] == 5 + assert_allclose(res.x, shapes, rtol=0.025) + + # check that we can still use integrality constraints with polishing + res2 = differential_evolution(func, bounds, args=(dist, x), + integrality=integrality, polish=True, + rng=rng) + + def func2(p, *args): + n, dist, x = args + return func(np.array([n, p[0]]), dist, x) + + # compare the DE derived solution to an LBFGSB solution (that doesn't + # have to find the integral values). Note we're setting x0 to be the + # output from the first DE result, thereby making the polishing step + # and this minimisation pretty much equivalent. + LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x), + bounds=[(0, 0.95)]) + assert_allclose(res2.x[1], LBFGSB.x) + assert res2.fun <= res.fun + + def test_integrality_limits(self): + def f(x): + return x + + integrality = [True, False, True] + bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)] + + # no integrality constraints + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=False) + assert_allclose(solver.limits[0], [0.2, 0.9, 3.3]) + assert_allclose(solver.limits[1], [1.1, 2.2, 4.9]) + + # with integrality constraints + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [0.5, 0.9, 3.5]) + assert_allclose(solver.limits[1], [1.5, 2.2, 4.5]) + assert_equal(solver.integrality, [True, False, True]) + assert solver.polish is False + + bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)] + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5]) + assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5]) + + # A lower bound of -1.2 is converted to + # np.nextafter(np.ceil(-1.2) - 0.5, np.inf) + # with a similar process to the upper bound. Check that the + # conversions work + assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0]) + assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0]) + + bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)] + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5]) + assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5]) + + bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)] + with pytest.raises(ValueError, match='One of the integrality'): + DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + + @pytest.mark.thread_unsafe + @pytest.mark.fail_slow(10) + def test_vectorized(self): + def quadratic(x): + return np.sum(x**2) + + def quadratic_vec(x): + return np.sum(x**2, axis=0) + + # A vectorized function needs to accept (len(x), S) and return (S,) + with pytest.raises(RuntimeError, match='The vectorized function'): + differential_evolution(quadratic, self.bounds, + vectorized=True, updating='deferred') + + # vectorized overrides the updating keyword, check for warning + with warns(UserWarning, match="differential_evolution: the 'vector"): + differential_evolution(quadratic_vec, self.bounds, + vectorized=True) + + # vectorized defers to the workers keyword, check for warning + with warns(UserWarning, match="differential_evolution: the 'workers"): + differential_evolution(quadratic_vec, self.bounds, + vectorized=True, workers=map, + updating='deferred') + + ncalls = [0] + + def rosen_vec(x): + ncalls[0] += 1 + return rosen(x) + + bounds = [(0, 10), (0, 10)] + res1 = differential_evolution(rosen, bounds, updating='deferred', + rng=1) + res2 = differential_evolution(rosen_vec, bounds, vectorized=True, + updating='deferred', rng=1) + + # the two minimisation runs should be functionally equivalent + assert_allclose(res1.x, res2.x) + assert ncalls[0] == res2.nfev + assert res1.nit == res2.nit + + def test_vectorized_constraints(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + def constr_f2(x): + return np.array([x[0]**2 + x[1], x[0] - x[1]]) + + nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9) + nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0)) + + def rosen_vec(x): + # accept an (len(x0), S) array, returning a (S,) array + v = 100 * (x[1:] - x[:-1]**2.0)**2.0 + v += (1 - x[:-1])**2.0 + return np.squeeze(v) + + bounds = [(0, 10), (0, 10)] + + res1 = differential_evolution(rosen, bounds, updating='deferred', + rng=1, constraints=[nlc1, nlc2], + polish=False) + res2 = differential_evolution(rosen_vec, bounds, vectorized=True, + updating='deferred', rng=1, + constraints=[nlc1, nlc2], + polish=False) + # the two minimisation runs should be functionally equivalent + assert_allclose(res1.x, res2.x) + + def test_constraint_violation_error_message(self): + + def func(x): + return np.cos(x[0]) + np.sin(x[1]) + + # Intentionally infeasible constraints. + c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf) + c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0) + + result = differential_evolution(func, + bounds=[(-1, 2), (-1, 1)], + constraints=[c0, c1], + maxiter=10, + polish=False, + rng=864197532) + assert result.success is False + # The numerical value in the error message might be sensitive to + # changes in the implementation. It can be updated if the code is + # changed. The essential part of the test is that there is a number + # after the '=', so if necessary, the text could be reduced to, say, + # "MAXCV = 0.". + assert "MAXCV = 0." in result.message + + @pytest.mark.fail_slow(20) # fail-slow exception by request - see gh-20806 + def test_strategy_fn(self): + # examines ability to customize strategy by mimicking one of the + # in-built strategies + parameter_count = 4 + popsize = 10 + bounds = [(0, 10.)] * parameter_count + total_popsize = parameter_count * popsize + mutation = 0.8 + recombination = 0.7 + + calls = [0] + def custom_strategy_fn(candidate, population, rng=None): + calls[0] += 1 + trial = np.copy(population[candidate]) + fill_point = rng.choice(parameter_count) + + pool = np.arange(total_popsize) + rng.shuffle(pool) + idxs = pool[:2 + 1] + idxs = idxs[idxs != candidate][:2] + + r0, r1 = idxs[:2] + + bprime = (population[0] + mutation * + (population[r0] - population[r1])) + + crossovers = rng.uniform(size=parameter_count) + crossovers = crossovers < recombination + crossovers[fill_point] = True + trial = np.where(crossovers, bprime, trial) + return trial + + solver = DifferentialEvolutionSolver( + rosen, + bounds, + popsize=popsize, + recombination=recombination, + mutation=mutation, + maxiter=2, + strategy=custom_strategy_fn, + rng=10, + polish=False + ) + assert solver.strategy is custom_strategy_fn + solver.solve() + assert calls[0] > 0 + + # check custom strategy works with updating='deferred' + res = differential_evolution( + rosen, bounds, strategy=custom_strategy_fn, updating='deferred' + ) + assert res.success + + def custom_strategy_fn(candidate, population, rng=None): + return np.array([1.0, 2.0]) + + with pytest.raises(RuntimeError, match="strategy*"): + differential_evolution( + rosen, + bounds, + strategy=custom_strategy_fn + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..3465be508491aae411e113167b8fdd84f6d2d70b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py @@ -0,0 +1,416 @@ +# Dual annealing unit tests implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, PMP S.A. +""" +Unit tests for the dual annealing global optimizer +""" +from scipy.optimize import dual_annealing, Bounds + +from scipy.optimize._dual_annealing import EnergyState +from scipy.optimize._dual_annealing import LocalSearchWrapper +from scipy.optimize._dual_annealing import ObjectiveFunWrapper +from scipy.optimize._dual_annealing import StrategyChain +from scipy.optimize._dual_annealing import VisitingDistribution +from scipy.optimize import rosen, rosen_der +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_array_less +from pytest import raises as assert_raises +from scipy._lib._util import check_random_state + +import threading + + +class TestDualAnnealing: + + def setup_method(self): + # A function that returns always infinity for initialization tests + self.weirdfunc = lambda x: np.inf + # 2-D bounds for testing function + self.ld_bounds = [(-5.12, 5.12)] * 2 + # 4-D bounds for testing function + self.hd_bounds = self.ld_bounds * 4 + # Number of values to be generated for testing visit function + self.nbtestvalues = 5000 + self.high_temperature = 5230 + self.low_temperature = 0.1 + self.qv = 2.62 + self.seed = 1234 + self.rng = check_random_state(self.seed) + self.nb_fun_call = threading.local() + self.ngev = threading.local() + + def callback(self, x, f, context): + # For testing callback mechanism. Should stop for e <= 1 as + # the callback function returns True + if f <= 1.0: + return True + + def func(self, x, args=()): + # Using Rastrigin function for performing tests + if args: + shift = args + else: + shift = 0 + y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * ( + x - shift))) + 10 * np.size(x) + shift + if not hasattr(self.nb_fun_call, 'c'): + self.nb_fun_call.c = 0 + self.nb_fun_call.c += 1 + return y + + def rosen_der_wrapper(self, x, args=()): + if not hasattr(self.ngev, 'c'): + self.ngev.c = 0 + self.ngev.c += 1 + return rosen_der(x, *args) + + # FIXME: there are some discontinuities in behaviour as a function of `qv`, + # this needs investigating - see gh-12384 + @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9]) + def test_visiting_stepping(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + dim = lower.size + vd = VisitingDistribution(lower, upper, qv, self.rng) + values = np.zeros(dim) + x_step_low = vd.visiting(values, 0, self.high_temperature) + # Make sure that only the first component is changed + assert_equal(np.not_equal(x_step_low, 0), True) + values = np.zeros(dim) + x_step_high = vd.visiting(values, dim, self.high_temperature) + # Make sure that component other than at dim has changed + assert_equal(np.not_equal(x_step_high[0], 0), True) + + @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9]) + def test_visiting_dist_high_temperature(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + vd = VisitingDistribution(lower, upper, qv, self.rng) + # values = np.zeros(self.nbtestvalues) + # for i in np.arange(self.nbtestvalues): + # values[i] = vd.visit_fn(self.high_temperature) + values = vd.visit_fn(self.high_temperature, self.nbtestvalues) + + # Visiting distribution is a distorted version of Cauchy-Lorentz + # distribution, and as no 1st and higher moments (no mean defined, + # no variance defined). + # Check that big tails values are generated + assert_array_less(np.min(values), 1e-10) + assert_array_less(1e+10, np.max(values)) + + def test_reset(self): + owf = ObjectiveFunWrapper(self.weirdfunc) + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + es = EnergyState(lower, upper) + assert_raises(ValueError, es.reset, owf, check_random_state(None)) + + def test_low_dim(self): + ret = dual_annealing( + self.func, self.ld_bounds, rng=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + @pytest.mark.fail_slow(10) + def test_high_dim(self): + ret = dual_annealing(self.func, self.hd_bounds, rng=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + def test_low_dim_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-4) + + @pytest.mark.fail_slow(10) + def test_high_dim_no_ls(self): + ret = dual_annealing(self.func, self.hd_bounds, + no_local_search=True, rng=self.seed) + assert_allclose(ret.fun, 0., atol=1.2e-4) + + def test_nb_fun_call(self): + self.nb_fun_call.c = 0 + ret = dual_annealing(self.func, self.ld_bounds, rng=self.seed) + assert_equal(self.nb_fun_call.c, ret.nfev) + + def test_nb_fun_call_no_ls(self): + self.nb_fun_call.c = 0 + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, rng=self.seed) + assert_equal(self.nb_fun_call.c, ret.nfev) + + def test_max_reinit(self): + assert_raises(ValueError, dual_annealing, self.weirdfunc, + self.ld_bounds) + + @pytest.mark.fail_slow(10) + def test_reproduce(self): + res1 = dual_annealing(self.func, self.ld_bounds, rng=self.seed) + res2 = dual_annealing(self.func, self.ld_bounds, rng=self.seed) + res3 = dual_annealing(self.func, self.ld_bounds, rng=self.seed) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + assert_equal(res1.x, res3.x) + + def test_rand_gen(self): + # check that np.random.Generator can be used (numpy >= 1.17) + # obtain a np.random.Generator object + rng = np.random.default_rng(1) + + res1 = dual_annealing(self.func, self.ld_bounds, rng=rng) + # seed again + rng = np.random.default_rng(1) + res2 = dual_annealing(self.func, self.ld_bounds, rng=rng) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + + def test_bounds_integrity(self): + wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] + assert_raises(ValueError, dual_annealing, self.func, + wrong_bounds) + + def test_bound_validity(self): + invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + + @pytest.mark.thread_unsafe + def test_deprecated_local_search_options_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + @pytest.mark.thread_unsafe + def test_minimizer_kwargs_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "SLSQP", "bounds": bounds}) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + def test_max_fun_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, maxfun=100, + rng=self.seed) + + ls_max_iter = min(max( + len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, + LocalSearchWrapper.LS_MAXITER_MIN), + LocalSearchWrapper.LS_MAXITER_MAX) + assert ret.nfev <= 100 + ls_max_iter + assert not ret.success + + def test_max_fun_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, maxfun=500, rng=self.seed) + assert ret.nfev <= 500 + assert not ret.success + + def test_maxiter(self): + ret = dual_annealing(self.func, self.ld_bounds, maxiter=700, + rng=self.seed) + assert ret.nit <= 700 + + # Testing that args are passed correctly for dual_annealing + def test_fun_args_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159,)), rng=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-6) + + # Testing that args are passed correctly for pure simulated annealing + def test_fun_args_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159, )), no_local_search=True, + rng=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-4) + + def test_callback_stop(self): + # Testing that callback make the algorithm stop for + # fun value <= 1.0 (see callback method) + ret = dual_annealing(self.func, self.ld_bounds, + callback=self.callback, rng=self.seed) + assert ret.fun <= 1.0 + assert 'stop early' in ret.message[0] + assert not ret.success + + @pytest.mark.parametrize('method, atol', [ + ('Nelder-Mead', 2e-5), + ('COBYLA', 1e-5), + ('COBYQA', 1e-8), + ('Powell', 1e-8), + ('CG', 1e-8), + ('BFGS', 1e-8), + ('TNC', 1e-8), + ('SLSQP', 2e-7), + ]) + def test_multi_ls_minimizer(self, method, atol): + ret = dual_annealing(self.func, self.ld_bounds, + minimizer_kwargs=dict(method=method), + rng=self.seed) + assert_allclose(ret.fun, 0., atol=atol) + + def test_wrong_restart_temp(self): + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=1) + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=0) + + def test_gradient_gnev(self): + minimizer_opts = { + 'jac': self.rosen_der_wrapper, + } + ret = dual_annealing(rosen, self.ld_bounds, + minimizer_kwargs=minimizer_opts, + rng=self.seed) + assert ret.njev == self.ngev.c + + @pytest.mark.fail_slow(10) + def test_from_docstring(self): + def func(x): + return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + lw = [-5.12] * 10 + up = [5.12] * 10 + ret = dual_annealing(func, bounds=list(zip(lw, up)), rng=1234) + assert_allclose(ret.x, + [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, + -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, + -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, + -5.00668935e-09], atol=4e-8) + assert_allclose(ret.fun, 0.000000, atol=5e-13) + + @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [ + (0, 100, 1000, 1.0097587941791923), + (0, 2, 1000, 1.2599210498948732), + (10, 100, 878, 0.8786035869128718), + (10, 60, 695, 0.6812920690579612), + (2, 100, 990, 0.9897404249173424), + ]) + def test_accept_reject_probabilistic( + self, new_e, temp_step, accepted, accept_rate): + # Test accepts unconditionally with e < current_energy and + # probabilistically with e > current_energy + + rs = check_random_state(123) + + count_accepted = 0 + iterations = 1000 + + accept_param = -5 + current_energy = 1 + for _ in range(iterations): + energy_state = EnergyState(lower=None, upper=None) + # Set energy state with current_energy, any location. + energy_state.update_current(current_energy, [0]) + + chain = StrategyChain( + accept_param, None, None, None, rs, energy_state) + # Normally this is set in run() + chain.temperature_step = temp_step + + # Check if update is accepted. + chain.accept_reject(j=1, e=new_e, x_visit=[2]) + if energy_state.current_energy == new_e: + count_accepted += 1 + + assert count_accepted == accepted + + # Check accept rate + pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step + rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param)) + + assert_allclose(rate, accept_rate) + + @pytest.mark.fail_slow(10) + def test_bounds_class(self): + # test that result does not depend on the bounds type + def func(x): + f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + return f + lw = [-5.12] * 5 + up = [5.12] * 5 + + # Unbounded global minimum is all zeros. Most bounds below will force + # a DV away from unbounded minimum and be active at solution. + up[0] = -2.0 + up[1] = -1.0 + lw[3] = 1.0 + lw[4] = 2.0 + + # run optimizations + bounds = Bounds(lw, up) + ret_bounds_class = dual_annealing(func, bounds=bounds, rng=1234) + + bounds_old = list(zip(lw, up)) + ret_bounds_list = dual_annealing(func, bounds=bounds_old, rng=1234) + + # test that found minima, function evaluations and iterations match + assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8) + assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7) + assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9) + assert ret_bounds_list.nfev == ret_bounds_class.nfev + + @pytest.mark.fail_slow(10) + def test_callable_jac_hess_with_args_gh11052(self): + # dual_annealing used to fail when `jac` was callable and `args` were + # used; check that this is resolved. Example is from gh-11052. + + # extended to hess as part of closing gh20614 + rng = np.random.default_rng(94253637693657847462) + def f(x, power): + return np.sum(np.exp(x ** power)) + + def jac(x, power): + return np.exp(x ** power) * power * x ** (power - 1) + + def hess(x, power): + # calculated using WolframAlpha as d^2/dx^2 e^(x^p) + return np.diag( + power * np.exp(x ** power) * x ** (power - 2) * + (power * x ** power + power - 1) + ) + + def hessp(x, p, power): + return hess(x, power) @ p + + res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], rng=rng, + minimizer_kwargs=dict(method='L-BFGS-B')) + res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], rng=rng, + minimizer_kwargs=dict(method='L-BFGS-B', + jac=jac)) + res3 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], rng=rng, + minimizer_kwargs=dict(method='newton-cg', + jac=jac, hess=hess)) + res4 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], rng=rng, + minimizer_kwargs=dict(method='newton-cg', + jac=jac, hessp=hessp)) + assert_allclose(res1.fun, res2.fun, rtol=1e-6) + assert_allclose(res3.fun, res2.fun, rtol=1e-6) + assert_allclose(res4.fun, res2.fun, rtol=1e-6) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0e4097bc9aadbfd3335aa3a86d063216f2c69a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py @@ -0,0 +1,310 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +from scipy.optimize._linprog_util import _clean_inputs, _LPProblem +from scipy._lib._util import VisibleDeprecationWarning +from copy import deepcopy +from datetime import date + + +def test_aliasing(): + """ + Test for ensuring that no objects referred to by `lp` attributes, + `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified + by `_clean_inputs` as a side effect. + """ + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=[1], + A_eq=[[1]], + b_eq=[1], + bounds=(-np.inf, np.inf) + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_(lp.c == lp_copy.c, "c modified by _clean_inputs") + assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs") + assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs") + assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs") + assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_aliasing2(): + """ + Similar purpose as `test_aliasing` above. + """ + lp = _LPProblem( + c=np.array([1, 1]), + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([[1], [1]]), + A_eq=np.array([[1, 1]]), + b_eq=np.array([1]), + bounds=[(-np.inf, np.inf), (None, 1)] + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs") + assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs") + assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs") + assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs") + assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_missing_inputs(): + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + + assert_raises(TypeError, _clean_inputs) + assert_raises(TypeError, _clean_inputs, _LPProblem(c=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq)) + + +def test_too_many_dimensions(): + cb = [1, 2, 3, 4] + A = np.random.rand(4, 4) + bad2D = [[1, 2], [3, 4]] + bad3D = np.random.rand(4, 4, 4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D)) + + +def test_too_few_dimensions(): + bad = np.random.rand(4, 4).ravel() + cb = np.random.rand(4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb)) + + +def test_inconsistent_dimensions(): + m = 2 + n = 4 + c = [1, 2, 3, 4] + + Agood = np.random.rand(m, n) + Abad = np.random.rand(m, n + 1) + bgood = np.random.rand(m) + bbad = np.random.rand(m + 1) + boundsbad = [(0, 1)] * (n + 1) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad)) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]])) + + +def test_type_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + bad = "hello" + + assert_raises(TypeError, _clean_inputs, lp._replace(c=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad)) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad)) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi")) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")])) + assert_raises(TypeError, _clean_inputs, + lp._replace(bounds=[(1, date(2020, 2, 29))])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]])) + + +def test_non_finite_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0])) + + assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1])) + assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan])) + + +def test__clean_inputs1(): + lp = _LPProblem( + c=[1, 2], + A_ub=[[1, 1], [2, 2]], + b_ub=[1, 1], + A_eq=[[1, 1], [2, 2]], + b_eq=[1, 1], + bounds=None + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.A_ub.shape == (2, 2), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.A_eq.shape == (2, 2), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test__clean_inputs2(): + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=1, + A_eq=[[1]], + b_eq=1, + bounds=(0, 1) + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, 1)]) + + assert_(lp_cleaned.c.shape == (1,), "") + assert_(lp_cleaned.A_ub.shape == (1, 1), "") + assert_(lp_cleaned.b_ub.shape == (1,), "") + assert_(lp_cleaned.A_eq.shape == (1, 1), "") + assert_(lp_cleaned.b_eq.shape == (1,), "") + + +def test__clean_inputs3(): + lp = _LPProblem( + c=[[1, 2]], + A_ub=np.random.rand(2, 2), + b_ub=[[1], [2]], + A_eq=np.random.rand(2, 2), + b_eq=[[1], [2]], + bounds=[(0, 1)] + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array([1, 2])) + assert_allclose(lp_cleaned.b_ub, np.array([1, 2])) + assert_allclose(lp_cleaned.b_eq, np.array([1, 2])) + assert_equal(lp_cleaned.bounds, [(0, 1)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test_bad_bounds(): + lp = _LPProblem(c=[1, 2]) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2, 2)])) + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)])) + + +def test_good_bounds(): + lp = _LPProblem(c=[1, 2]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[[]])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), + (-np.inf, None), + (None, np.inf), + (-np.inf, np.inf)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..21fcf36b01f480ea23e15b67e4bebb1270d63c3b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py @@ -0,0 +1,841 @@ +import math +from itertools import product + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy.sparse import csr_matrix, csc_matrix, lil_matrix + +from scipy.optimize._numdiff import ( + _adjust_scheme_to_bounds, approx_derivative, check_derivative, + group_columns, _eps_for_method, _compute_absolute_step) + + +def test_group_columns(): + structure = [ + [1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0] + ] + for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]: + A = transform(structure) + order = np.arange(6) + groups_true = np.array([0, 1, 2, 0, 1, 2]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + order = [1, 2, 4, 3, 5, 0] + groups_true = np.array([2, 0, 1, 2, 0, 1]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + # Test repeatability. + groups_1 = group_columns(A) + groups_2 = group_columns(A) + assert_equal(groups_1, groups_2) + + +def test_correct_fp_eps(): + # check that relative step size is correct for FP size + EPS = np.finfo(np.float64).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float64, method), + relative_step[method]) + assert_allclose( + _eps_for_method(np.complex128, np.complex128, method), + relative_step[method] + ) + + # check another FP size + EPS = np.finfo(np.float32).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float32, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float64, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float32, method), + relative_step[method] + ) + + +class TestAdjustSchemeToBounds: + def test_no_bounds(self): + x0 = np.zeros(3) + h = np.full(3, 1e-2) + inf_lower = np.empty_like(x0) + inf_upper = np.empty_like(x0) + inf_lower.fill(-np.inf) + inf_upper.fill(np.inf) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + def test_with_bound(self): + x0 = np.array([0.0, 0.85, -0.85]) + lb = -np.ones(3) + ub = np.ones(3) + h = np.array([1, 1, -1]) * 1e-1 + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, h) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.abs(h)) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + assert_equal(one_sided, np.array([False, True, True])) + + def test_tight_bounds(self): + lb = np.array([-0.03, -0.03]) + ub = np.array([0.05, 0.05]) + x0 = np.array([0.0, 0.03]) + h = np.array([-0.1, -0.1]) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.05, -0.06])) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.025, -0.03])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.03, -0.03])) + assert_equal(one_sided, np.array([False, True])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.015, -0.015])) + assert_equal(one_sided, np.array([False, True])) + + +class TestApproxDerivativesDense: + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def wrong_dimensions_fun(self, x): + return np.array([x**2, np.tan(x), np.exp(x)]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def fun_vector_vector_with_arg(self, x, arg): + """Used to test passing custom arguments with check_derivative()""" + assert arg == 42 + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def jac_vector_vector_with_arg(self, x, arg): + """Used to test passing custom arguments with check_derivative()""" + assert arg == 42 + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def fun_parametrized(self, x, c0, c1=1.0): + return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])]) + + def jac_parametrized(self, x, c0, c1=0.1): + return np.array([ + [c0 * np.exp(c0 * x[0]), 0], + [0, c1 * np.exp(c1 * x[1])] + ]) + + def fun_with_nan(self, x): + return x if np.abs(x) <= 1e-8 else np.nan + + def jac_with_nan(self, x): + return 1.0 if np.abs(x) <= 1e-8 else np.nan + + def fun_zero_jacobian(self, x): + return np.array([x[0] * x[1], np.cos(x[0] * x[1])]) + + def jac_zero_jacobian(self, x): + return np.array([ + [x[1], x[0]], + [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])] + ]) + + def jac_non_numpy(self, x): + # x can be a scalar or an array [val]. + # Cast to true scalar before handing over to math.exp + xp = np.asarray(x).item() + return math.exp(xp) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs') + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + abs_step=1.49e-8) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs') + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs') + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-7) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + abs_step=1.49e-8, rel_step=np.inf) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=3e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs') + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-5) + assert_allclose(jac_diff_3, jac_true, rtol=1e-6) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_wrong_dimensions(self): + x0 = 1.0 + assert_raises(RuntimeError, approx_derivative, + self.wrong_dimensions_fun, x0) + f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) + assert_raises(ValueError, approx_derivative, + self.wrong_dimensions_fun, x0, f0=f0) + + def test_custom_rel_step(self): + x0 = np.array([-0.1, 0.1]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', rel_step=1e-4) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + rel_step=1e-4) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-2) + assert_allclose(jac_diff_3, jac_true, rtol=1e-4) + + def test_options(self): + x0 = np.array([1.0, 1.0]) + c0 = -1.0 + c1 = 1.0 + lb = 0.0 + ub = 2.0 + f0 = self.fun_parametrized(x0, c0, c1=c1) + rel_step = np.array([-1e-6, 1e-7]) + jac_true = self.jac_parametrized(x0, c0, c1) + jac_diff_2 = approx_derivative( + self.fun_parametrized, x0, method='2-point', rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_parametrized, x0, rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_with_bounds_2_point(self): + lb = -np.ones(2) + ub = np.ones(2) + + x0 = np.array([-2.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, bounds=(lb, ub)) + + x0 = np.array([-1.0, 1.0]) + jac_diff = approx_derivative(self.fun_vector_vector, x0, + method='2-point', bounds=(lb, ub)) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + def test_with_bounds_3_point(self): + lb = np.array([1.0, 1.0]) + ub = np.array([2.0, 2.0]) + + x0 = np.array([1.0, 2.0]) + jac_true = self.jac_vector_vector(x0) + + jac_diff = approx_derivative(self.fun_vector_vector, x0) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, np.inf)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(-np.inf, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + def test_tight_bounds(self): + x0 = np.array([10.0, 10.0]) + lb = x0 - 3e-9 + ub = x0 + 2e-9 + jac_true = self.jac_vector_vector(x0) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', + rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + jac_diff = approx_derivative( + self.fun_vector_vector, x0, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_true, jac_diff, rtol=1e-6) + + def test_bound_switches(self): + lb = -1e-8 + ub = 1e-8 + x0 = 0.0 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + x0 = 1e-8 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_non_numpy(self): + x0 = 1.0 + jac_true = self.jac_non_numpy(x0) + jac_diff_2 = approx_derivative(self.jac_non_numpy, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.jac_non_numpy, x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-8) + + # math.exp cannot handle complex arguments, hence this raises + assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0, + **dict(method='cs')) + + def test_fp(self): + # checks that approx_derivative works for FP size other than 64. + # Example is derived from the minimal working example in gh12991. + np.random.seed(1) + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + x = np.linspace(0, 1, 100, dtype=np.float64) + y = np.random.random(100).astype(np.float64) + p0 = np.array([-1.0, -1.0]) + + jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y)) + + # parameter vector is float32, func output is float64 + jac_fp = approx_derivative(err, p0.astype(np.float32), + method='2-point', args=(x, y)) + assert err(p0, x, y).dtype == np.float64 + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # parameter vector is float64, func output is float32 + def err_fp32(p): + assert p.dtype == np.float32 + return err(p, x, y).astype(np.float32) + + jac_fp = approx_derivative(err_fp32, p0.astype(np.float32), + method='2-point') + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # check upper bound of error on the derivative for 2-point + def f(x): + return np.sin(x) + def g(x): + return np.cos(x) + def hess(x): + return -np.sin(x) + + def calc_atol(h, x0, f, hess, EPS): + # truncation error + t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h))) + # roundoff error. There may be a divisor (>1) missing from + # the following line, so this contribution is possibly + # overestimated + t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h))) + return t0 + t1 + + for dtype in [np.float16, np.float32, np.float64]: + EPS = np.finfo(dtype).eps + x0 = np.array(1.0).astype(dtype) + h = _compute_absolute_step(None, x0, f(x0), '2-point') + atol = calc_atol(h, x0, f, hess, EPS) + err = approx_derivative(f, x0, method='2-point', + abs_step=h) - g(x0) + assert abs(err) < atol + + def test_check_derivative(self): + x0 = np.array([-10.0, 10]) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-9) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-6) + + x0 = np.array([0.0, 0.0]) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + + def test_check_derivative_with_kwargs(self): + x0 = np.array([-10.0, 10]) + accuracy = check_derivative(self.fun_vector_vector_with_arg, + self.jac_vector_vector_with_arg, + x0, + kwargs={'arg': 42}) + assert_(accuracy < 1e-9) + + +class TestApproxDerivativeSparse: + # Example from Numerical Optimization 2nd edition, p. 198. + def setup_method(self): + np.random.seed(0) + self.n = 50 + self.lb = -0.1 * (1 + np.arange(self.n)) + self.ub = 0.1 * (1 + np.arange(self.n)) + self.x0 = np.empty(self.n) + self.x0[::2] = (1 - 1e-7) * self.lb[::2] + self.x0[1::2] = (1 - 1e-7) * self.ub[1::2] + + self.J_true = self.jac(self.x0) + + def fun(self, x): + e = x[1:]**3 - x[:-1]**2 + return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0)) + + def jac(self, x): + n = x.size + J = np.zeros((n, n)) + J[0, 0] = -4 * x[0] + J[0, 1] = 6 * x[1]**2 + for i in range(1, n - 1): + J[i, i - 1] = -6 * x[i-1] + J[i, i] = 9 * x[i]**2 - 4 * x[i] + J[i, i + 1] = 6 * x[i+1]**2 + J[-1, -1] = 9 * x[-1]**2 + J[-1, -2] = -6 * x[-2] + + return J + + def structure(self, n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + def test_all(self): + A = self.structure(self.n) + order = np.arange(self.n) + groups_1 = group_columns(A, order) + np.random.shuffle(order) + groups_2 = group_columns(A, order) + + for method, groups, l, u in product( + ['2-point', '3-point', 'cs'], [groups_1, groups_2], + [-np.inf, self.lb], [np.inf, self.ub]): + J = approx_derivative(self.fun, self.x0, method=method, + bounds=(l, u), sparsity=(A, groups)) + assert_(isinstance(J, csr_matrix)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + rel_step = np.full_like(self.x0, 1e-8) + rel_step[::2] *= -1 + J = approx_derivative(self.fun, self.x0, method=method, + rel_step=rel_step, sparsity=(A, groups)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-5) + + def test_no_precomputed_groups(self): + A = self.structure(self.n) + J = approx_derivative(self.fun, self.x0, sparsity=A) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + def test_equivalence(self): + structure = np.ones((self.n, self.n), dtype=int) + groups = np.arange(self.n) + for method in ['2-point', '3-point', 'cs']: + J_dense = approx_derivative(self.fun, self.x0, method=method) + J_sparse = approx_derivative( + self.fun, self.x0, sparsity=(structure, groups), method=method) + assert_allclose(J_dense, J_sparse.toarray(), + rtol=5e-16, atol=7e-15) + + def test_check_derivative(self): + def jac(x): + return csr_matrix(self.jac(x)) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + +class TestApproxDerivativeLinearOperator: + + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true*p, + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true*p, + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true*p, + rtol=5e-6) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), + rtol=5e-6) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-7) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_vector(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7) + + def test_exception(self): + x0 = np.array([-100.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, + method='2-point', bounds=(1, np.inf)) + + +def test_absolute_step_sign(): + # test for gh12487 + # if an absolute step is specified for 2-point differences make sure that + # the side corresponds to the step. i.e. if step is positive then forward + # differences should be used, if step is negative then backwards + # differences should be used. + + # function has double discontinuity at x = [-1, -1] + # first component is \/, second component is /\ + def f(x): + return -np.abs(x[0] + 1) + np.abs(x[1] + 1) + + # check that the forward difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8) + assert_allclose(grad, [-1.0, 1.0]) + + # check that the backwards difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8) + assert_allclose(grad, [1.0, -1.0]) + + # check that the forwards difference is used with a step for both + # parameters + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8] + ) + assert_allclose(grad, [-1.0, 1.0]) + + # check that we can mix forward/backwards steps. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8] + ) + assert_allclose(grad, [-1.0, -1.0]) + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8] + ) + assert_allclose(grad, [1.0, 1.0]) + + # the forward step should reverse to a backwards step if it runs into a + # bound + # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level + # function. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=1e-8, + bounds=(-np.inf, -1) + ) + assert_allclose(grad, [1.0, -1.0]) + + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf) + ) + assert_allclose(grad, [-1.0, 1.0]) + + +def test__compute_absolute_step(): + # tests calculation of absolute step from rel_step + methods = ['2-point', '3-point', 'cs'] + + x0 = np.array([1e-5, 0, 1, 1e5]) + + EPS = np.finfo(np.float64).eps + relative_step = { + "2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5 + } + f0 = np.array(1.0) + + for method in methods: + rel_step = relative_step[method] + correct_step = np.array([rel_step, + rel_step * 1., + rel_step * 1., + rel_step * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(None, x0, f0, method) + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(None, -x0, f0, method) + assert_allclose(abs_step, sign_x0 * correct_step) + + # if a relative step is provided it should be used + rel_step = np.array([0.1, 1, 10, 100]) + correct_step = np.array([rel_step[0] * x0[0], + relative_step['2-point'], + rel_step[2] * 1., + rel_step[3] * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point') + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point') + assert_allclose(abs_step, sign_x0 * correct_step) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..817282011699dea333042a4173f65c999a2925fc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py @@ -0,0 +1,228 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" + +# TODO: add tests for: +# https://github.com/scipy/scipy/issues/5400 +# https://github.com/scipy/scipy/issues/6690 + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal) + +from .test_linprog import magic_square +from scipy.optimize._remove_redundancy import _remove_redundancy_svd +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse +from scipy.optimize._remove_redundancy import _remove_redundancy_id + +from scipy.sparse import csc_matrix + + +def setup_module(): + np.random.seed(2017) + + +def redundancy_removed(A, B): + """Checks whether a matrix contains only independent rows of another""" + for rowA in A: + # `rowA in B` is not a reliable check + for rowB in B: + if np.all(rowA == rowB): + break + else: + return False + return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B) + + +class RRCommonTests: + def test_no_redundancy(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_allclose(A0, A1) + assert_allclose(b0, b1) + assert_equal(status, 0) + + def test_infeasible_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 2) + + def test_remove_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + b[1] = 0 + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_allclose(A1, A[[0, 2], :]) + assert_allclose(b1, b[[0, 2]]) + + def test_infeasible_m_gt_n(self): + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_eq_n(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = 2 * A0[-2, :] + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_lt_n(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_m_gt_n(self): + np.random.seed(2032) + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + x = np.linalg.solve(A0[:n, :], b0[:n]) + b0[n:] = A0[n:, :].dot(x) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], n) + assert_equal(np.linalg.matrix_rank(A1), n) + + def test_m_gt_n_rank_deficient(self): + m, n = 20, 10 + A0 = np.zeros((m, n)) + A0[:, 0] = 1 + b0 = np.ones(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_allclose(A1, A0[0:1, :]) + assert_allclose(b1, b0[0]) + + def test_m_lt_n_rank_deficient(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + b0[-1] = np.arange(m - 1).dot(b0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], 8) + assert_equal(np.linalg.matrix_rank(A1), 8) + + def test_dense1(self): + A = np.ones((6, 6)) + A[0, :3] = 0 + A[1, 3:] = 0 + A[3:, ::2] = -1 + A[3, :2] = 0 + A[4, 2:] = 0 + b = np.zeros(A.shape[0]) + + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense2(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense3(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.random.rand(A.shape[0]) + b[-1] = np.sum(b[:-1]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_m_gt_n_sparse(self): + np.random.seed(2013) + m, n = 20, 5 + p = 0.1 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_lt_n_sparse(self): + np.random.seed(2017) + m, n = 20, 50 + p = 0.05 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_eq_n_sparse(self): + np.random.seed(2017) + m, n = 100, 100 + p = 0.01 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_magic_square(self): + A, b, c, numbers, _ = magic_square(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 23) + assert_equal(np.linalg.matrix_rank(A1), 23) + + def test_magic_square2(self): + A, b, c, numbers, _ = magic_square(4) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 39) + assert_equal(np.linalg.matrix_rank(A1), 39) + + +class TestRRSVD(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_svd(A, b) + + +class TestRRPivotDense(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_pivot_dense(A, b) + + +class TestRRID(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_id(A, b) + + +class TestRRPivotSparse(RRCommonTests): + def rr(self, A, b): + rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b) + A1, b1, status, message = rr_res + return A1.toarray(), b1, status, message diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py new file mode 100644 index 0000000000000000000000000000000000000000..1e2f45a10d3d976d02be18084d241adea8612b05 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py @@ -0,0 +1,124 @@ +""" +Unit tests for optimization routines from _root.py. +""" +from numpy.testing import assert_, assert_equal +import pytest +from pytest import raises as assert_raises, warns as assert_warns +import numpy as np + +from scipy.optimize import root + + +class TestRoot: + def test_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return np.array([x**3 - 1, y**3 - 1]) + + def dfunc(z): + x, y = z + return np.array([[3*x**2, 0], [0, 3*y**2]]) + + for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'diagbroyden', 'krylov']: + if method in ('linearmixing', 'excitingmixing'): + # doesn't converge + continue + + if method in ('hybr', 'lm'): + jac = dfunc + else: + jac = None + + sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method) + sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method) + msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" + assert_(sol1.success, msg) + assert_(sol2.success, msg) + assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(), + msg) + + def test_tol_norm(self): + + def norm(x): + return abs(x[0]) + + for method in ['excitingmixing', + 'diagbroyden', + 'linearmixing', + 'anderson', + 'broyden1', + 'broyden2', + 'krylov']: + + root(np.zeros_like, np.zeros(2), method=method, + options={"tol_norm": norm}) + + def test_minimize_scalar_coerce_args_param(self): + # GitHub issue #3503 + def func(z, f=1): + x, y = z + return np.array([x**3 - 1, y**3 - f]) + root(func, [1.1, 1.1], args=1.5) + + def test_f_size(self): + # gh8320 + # check that decreasing the size of the returned array raises an error + # and doesn't segfault + class fun: + def __init__(self): + self.count = 0 + + def __call__(self, x): + self.count += 1 + + if not (self.count % 5): + ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0 + else: + ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0, + 0.5 * (x[1] - x[0]) ** 3 + x[1]]) + + return ret + + F = fun() + with assert_raises(ValueError): + root(F, [0.1, 0.0], method='lm') + + @pytest.mark.thread_unsafe + def test_gh_10370(self): + # gh-10370 reported that passing both `args` and `jac` to `root` with + # `method='krylov'` caused a failure. Ensure that this is fixed whether + # the gradient is passed via `jac` or as a second output of `fun`. + def fun(x, ignored): + return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2] + + def grad(x, ignored): + return [[3, 0.5 * x[1]], [0.2 * x[0], 5]] + + def fun_grad(x, ignored): + return fun(x, ignored), grad(x, ignored) + + x0 = np.zeros(2) + + ref = root(fun, x0, args=(1,), method='krylov') + message = 'Method krylov does not use the jacobian' + with assert_warns(RuntimeWarning, match=message): + res1 = root(fun, x0, args=(1,), method='krylov', jac=grad) + with assert_warns(RuntimeWarning, match=message): + res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True) + + assert_equal(res1.x, ref.x) + assert_equal(res2.x, ref.x) + assert res1.success is res2.success is ref.success is True + + @pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2", + "anderson", "linearmixing", + "diagbroyden", "excitingmixing", + "krylov", "df-sane"]) + def test_method_in_result(self, method): + def func(x): + return x - 1 + + res = root(func, x0=[1], method=method) + assert res.method == method diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..82efb74beee92eacc75f64c6c705374fa5ada322 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py @@ -0,0 +1,1156 @@ +import logging +import sys + +import numpy as np +import time +from multiprocessing import Pool +from numpy.testing import assert_allclose, IS_PYPY +import pytest +from pytest import raises as assert_raises, warns +from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen, + rosen_der, rosen_hess, NonlinearConstraint) +from scipy.optimize._constraints import new_constraint_to_old +from scipy.optimize._shgo import SHGO + + +class StructTestFunction: + def __init__(self, bounds, expected_x, expected_fun=None, + expected_xl=None, expected_funl=None): + self.bounds = bounds + self.expected_x = expected_x + self.expected_fun = expected_fun + self.expected_xl = expected_xl + self.expected_funl = expected_funl + + +def wrap_constraints(g): + cons = [] + if g is not None: + if not isinstance(g, (tuple, list)): + g = (g,) + else: + pass + for g in g: + cons.append({'type': 'ineq', + 'fun': g}) + cons = tuple(cons) + else: + cons = None + return cons + + +class StructTest1(StructTestFunction): + def f(self, x): + return x[0] ** 2 + x[1] ** 2 + + def g(x): + return -(np.sum(x, axis=0) - 6.0) + + cons = wrap_constraints(g) + + +test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], + expected_x=[0, 0]) +test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], + expected_x=[0, 0]) +test1_3 = StructTest1(bounds=[(None, None), (None, None)], + expected_x=[0, 0]) + + +class StructTest2(StructTestFunction): + """ + Scalar function with several minima to test all minimiser retrievals + """ + + def f(self, x): + return (x - 30) * np.sin(x) + + def g(x): + return 58 - np.sum(x, axis=0) + + cons = wrap_constraints(g) + + +test2_1 = StructTest2(bounds=[(0, 60)], + expected_x=[1.53567906], + expected_fun=-28.44677132, + # Important: test that funl return is in the correct + # order + expected_xl=np.array([[1.53567906], + [55.01782167], + [7.80894889], + [48.74797493], + [14.07445705], + [42.4913859], + [20.31743841], + [36.28607535], + [26.43039605], + [30.76371366]]), + + expected_funl=np.array([-28.44677132, -24.99785984, + -22.16855376, -18.72136195, + -15.89423937, -12.45154942, + -9.63133158, -6.20801301, + -3.43727232, -0.46353338]) + ) + +test2_2 = StructTest2(bounds=[(0, 4.5)], + expected_x=[1.53567906], + expected_fun=[-28.44677132], + expected_xl=np.array([[1.53567906]]), + expected_funl=np.array([-28.44677132]) + ) + + +class StructTest3(StructTestFunction): + """ + Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981) + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + Minimize: f = 0.01 * (x_1)**2 + (x_2)**2 + + Subject to: x_1 * x_2 - 25.0 >= 0, + (x_1)**2 + (x_2)**2 - 25.0 >= 0, + 2 <= x_1 <= 50, + 0 <= x_2 <= 50. + + Approx. Answer: + f([(250)**0.5 , (2.5)**0.5]) = 5.0 + + + """ + + # amended to test vectorisation of constraints + def f(self, x): + return 0.01 * (x[0]) ** 2 + (x[1]) ** 2 + + def g1(x): + return x[0] * x[1] - 25.0 + + def g2(x): + return x[0] ** 2 + x[1] ** 2 - 25.0 + + # g = (g1, g2) + # cons = wrap_constraints(g) + + def g(x): + return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0 + + # this checks that shgo can be sent new-style constraints + __nlc = NonlinearConstraint(g, 0, np.inf) + cons = (__nlc,) + +test3_1 = StructTest3(bounds=[(2, 50), (0, 50)], + expected_x=[250 ** 0.5, 2.5 ** 0.5], + expected_fun=5.0 + ) + + +class StructTest4(StructTestFunction): + """ + Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981) + + NOTE: Did not find in original reference to HS collection, refer to + Henderson (2015) problem 7 instead. 02.03.2016 + """ + + def f(self, x): + return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4 + + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[ + 6] ** 4 + - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6] + ) + + def g1(x): + return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2 + + 5 * x[4] - 127) + + def g2(x): + return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0) + + def g3(x): + return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196) + + def g4(x): + return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2 + + 5 * x[5] - 11 * x[6]) + + g = (g1, g2, g3, g4) + + cons = wrap_constraints(g) + + +test4_1 = StructTest4(bounds=[(-10, 10), ] * 7, + expected_x=[2.330499, 1.951372, -0.4775414, + 4.365726, -0.6244870, 1.038131, 1.594227], + expected_fun=680.6300573 + ) + + +class StructTest5(StructTestFunction): + def f(self, x): + return ( + -(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) + - x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + g = None + cons = wrap_constraints(g) + + +test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)], + expected_fun=[-959.64066272085051], + expected_x=[512., 404.23180542]) + + +class StructTestLJ(StructTestFunction): + """ + LennardJones objective function. Used to test symmetry constraints + settings. + """ + + def f(self, x, *args): + print(f'x = {x}') + self.N = args[0] + k = int(self.N / 3) + s = 0.0 + + for i in range(k - 1): + for j in range(i + 1, k): + a = 3 * i + b = 3 * j + xd = x[a] - x[b] + yd = x[a + 1] - x[b + 1] + zd = x[a + 2] - x[b + 2] + ed = xd * xd + yd * yd + zd * zd + ud = ed * ed * ed + if ed > 0.0: + s += (1.0 / ud - 2.0) / ud + + return s + + g = None + cons = wrap_constraints(g) + + +N = 6 +boundsLJ = list(zip([-4.0] * 6, [4.0] * 6)) + +testLJ = StructTestLJ(bounds=boundsLJ, + expected_fun=[-1.0], + expected_x=None, + # expected_x=[-2.71247337e-08, + # -2.71247337e-08, + # -2.50000222e+00, + # -2.71247337e-08, + # -2.71247337e-08, + # -1.50000222e+00] + ) + + +class StructTestS(StructTestFunction): + def f(self, x): + return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2 + + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2) + + g = None + cons = wrap_constraints(g) + + +test_s = StructTestS(bounds=[(0, 2.0), ] * 4, + expected_fun=0.0, + expected_x=np.ones(4) - 0.5 + ) + + +class StructTestTable(StructTestFunction): + def f(self, x): + if x[0] == 3.0 and x[1] == 3.0: + return 50 + else: + return 100 + + g = None + cons = wrap_constraints(g) + + +test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)], + expected_fun=[50], + expected_x=[3.0, 3.0]) + + +class StructTestInfeasible(StructTestFunction): + """ + Test function with no feasible domain. + """ + + def f(self, x, *args): + return x[0] ** 2 + x[1] ** 2 + + def g1(x): + return x[0] + x[1] - 1 + + def g2(x): + return -(x[0] + x[1] - 1) + + def g3(x): + return -x[0] + x[1] - 1 + + def g4(x): + return -(-x[0] + x[1] - 1) + + g = (g1, g2, g3, g4) + cons = wrap_constraints(g) + + +test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)], + expected_fun=None, + expected_x=None + ) + + +@pytest.mark.skip("Not a test") +def run_test(test, args=(), test_atol=1e-5, n=100, iters=None, + callback=None, minimizer_kwargs=None, options=None, + sampling_method='sobol', workers=1): + res = shgo(test.f, test.bounds, args=args, constraints=test.cons, + n=n, iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, options=options, + sampling_method=sampling_method, workers=workers) + + print(f'res = {res}') + logging.info(f'res = {res}') + if test.expected_x is not None: + np.testing.assert_allclose(res.x, test.expected_x, + rtol=test_atol, + atol=test_atol) + + # (Optional tests) + if test.expected_fun is not None: + np.testing.assert_allclose(res.fun, + test.expected_fun, + atol=test_atol) + + if test.expected_xl is not None: + np.testing.assert_allclose(res.xl, + test.expected_xl, + atol=test_atol) + + if test.expected_funl is not None: + np.testing.assert_allclose(res.funl, + test.expected_funl, + atol=test_atol) + return + + +# Base test functions: +class TestShgoSobolTestFunctions: + """ + Global optimisation tests with Sobol sampling: + """ + + # Sobol algorithm + def test_f1_1_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1) + + def test_f1_2_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2) + + def test_f1_3_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" + options = {'disp': True} + run_test(test1_3, options=options) + + def test_f2_1_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + run_test(test2_1) + + def test_f2_2_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2) + + def test_f3_sobol(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1) + + @pytest.mark.slow + def test_f4_sobol(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + options = {'infty_constraints': False} + # run_test(test4_1, n=990, options=options) + run_test(test4_1, n=990 * 2, options=options) + + def test_f5_1_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=30) + run_test(test5_1, n=60) + + def test_f5_2_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=60, iters=5) + run_test(test5_1, n=60, iters=5) + + # def test_t911(self): + # """1D tabletop function""" + # run_test(test11_1) + + +class TestShgoSimplicialTestFunctions: + """ + Global optimisation tests with Simplicial sampling: + """ + + def test_f1_1_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1, n=1, sampling_method='simplicial') + + def test_f1_2_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2, n=1, sampling_method='simplicial') + + def test_f1_3_simplicial(self): + """Multivariate test function 1: x[0]**2 + x[1]**2 + with bounds=[(None, None),(None, None)]""" + run_test(test1_3, n=5, sampling_method='simplicial') + + def test_f2_1_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=200, iters=7, options=options, + sampling_method='simplicial') + + def test_f2_2_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2, n=1, sampling_method='simplicial') + + def test_f3_simplicial(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1, n=1, sampling_method='simplicial') + + @pytest.mark.slow + def test_f4_simplicial(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + run_test(test4_1, n=1, sampling_method='simplicial') + + def test_lj_symmetry_old(self): + """LJ: Symmetry-constrained test function""" + options = {'symmetry': True, + 'disp': True} + args = (6,) # Number of atoms + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_1_lj_symmetry(self): + """LJ: Symmetry constrained test function""" + options = {'symmetry': [0, ] * 6, + 'disp': True} + args = (6,) # No. of atoms + + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_2_cons_symmetry(self): + """Symmetry constrained test function""" + options = {'symmetry': [0, 0], + 'disp': True} + + run_test(test1_1, n=200, + options=options, iters=1, + sampling_method='simplicial') + + @pytest.mark.fail_slow(10) + def test_f5_3_cons_symmetry(self): + """Asymmetrically constrained test function""" + options = {'symmetry': [0, 0, 0, 3], + 'disp': True} + + run_test(test_s, n=10000, + options=options, + iters=1, + sampling_method='simplicial') + + @pytest.mark.skip("Not a test") + def test_f0_min_variance(self): + """Return a minimum on a perfectly symmetric problem, based on + gh10429""" + avg = 0.5 # Given average value of x + cons = {'type': 'eq', 'fun': lambda x: np.mean(x) - avg} + + # Minimize the variance of x under the given constraint + res = shgo(np.var, bounds=6 * [(0, 1)], constraints=cons) + assert res.success + assert_allclose(res.fun, 0, atol=1e-15) + assert_allclose(res.x, 0.5) + + @pytest.mark.skip("Not a test") + def test_f0_min_variance_1D(self): + """Return a minimum on a perfectly symmetric 1D problem, based on + gh10538""" + + def fun(x): + return x * (x - 1.0) * (x - 0.5) + + bounds = [(0, 1)] + res = shgo(fun, bounds=bounds) + ref = minimize_scalar(fun, bounds=bounds[0]) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, rtol=1e-6) + +# Argument test functions +class TestShgoArguments: + def test_1_1_simpl_iter(self): + """Iterative simplicial sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=2, sampling_method='simplicial') + + def test_1_2_simpl_iter(self): + """Iterative simplicial on TestFunction 2 (univariate)""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=None, iters=9, options=options, + sampling_method='simplicial') + + def test_2_1_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=1, sampling_method='sobol') + + def test_2_2_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 2 (univariate)""" + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=1, sampling_method='sobol') + + np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5) + + def test_3_1_disp_simplicial(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate) + """ + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, + sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_3_2_disp_sobol(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, sampling_method='sobol', + callback=callback_func, options={'disp': True}) + + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_args_gh14589(self): + """Using `args` used to cause `shgo` to fail; see #14589, #15986, + #16506""" + res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2) + ) + ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)]) + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + @pytest.mark.slow + def test_4_1_known_f_min(self): + """Test known function minima stopping criteria""" + # Specify known function value + options = {'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + 'minimize_every_iter': True} + # TODO: Make default n higher for faster tests + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + @pytest.mark.slow + def test_4_2_known_f_min(self): + """Test Global mode limiting local evaluations""" + options = { # Specify known function value + 'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform + 'minimize_every_iter': True, + 'local_iter': 1} + + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + def test_4_4_known_f_min(self): + """Test Global mode limiting local evaluations for 1D funcs""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=None, options=options, + sampling_method='sobol') + np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5) + + def test_5_1_simplicial_argless(self): + """Test Default simplicial sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons) + np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5) + + def test_5_2_sobol_argless(self): + """Test Default sobol sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons, + sampling_method='sobol') + np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5) + + def test_6_1_simplicial_max_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'max_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_6_2_simplicial_min_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'min_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_7_1_minkwargs(self): + """Test the minimizer_kwargs arguments for solvers with constraints""" + # Test solvers + for solver in ['COBYLA', 'COBYQA', 'SLSQP']: + # Note that passing global constraints to SLSQP is tested in other + # unittests which run test4_1 normally + minimizer_kwargs = {'method': solver, + 'constraints': test3_1.cons} + run_test(test3_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_7_2_minkwargs(self): + """Test the minimizer_kwargs default inits""" + minimizer_kwargs = {'ftol': 1e-5} + options = {'disp': True} # For coverage purposes + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0], + minimizer_kwargs=minimizer_kwargs, options=options) + + def test_7_3_minkwargs(self): + """Test minimizer_kwargs arguments for solvers without constraints""" + for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', + 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']: + def jac(x): + return np.array([2 * x[0], 2 * x[1]]).T + + def hess(x): + return np.array([[2, 0], [0, 2]]) + + minimizer_kwargs = {'method': solver, + 'jac': jac, + 'hess': hess} + logging.info(f"Solver = {solver}") + logging.info("=" * 100) + run_test(test1_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_8_homology_group_diff(self): + options = {'minhgrd': 1, + 'minimize_every_iter': True} + + run_test(test1_1, n=None, iters=None, options=options, + sampling_method='simplicial') + + def test_9_cons_g(self): + """Test single function constraint passing""" + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0]) + + @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32', + reason="Failing and fix in PyPy not planned (see gh-18632)") + def test_10_finite_time(self): + """Test single function constraint passing""" + options = {'maxtime': 1e-15} + + def f(x): + time.sleep(1e-14) + return 0.0 + + res = shgo(f, test1_1.bounds, iters=5, options=options) + # Assert that only 1 rather than 5 requested iterations ran: + assert res.nit == 1 + + def test_11_f_min_0(self): + """Test to cover the case where f_lowest == 0""" + options = {'f_min': 0.0, + 'disp': True} + res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None, + options=options, sampling_method='sobol') + np.testing.assert_equal(0, res.x[0]) + np.testing.assert_equal(0, res.x[1]) + + # @nottest + @pytest.mark.skip(reason="no way of currently testing this") + def test_12_sobol_inf_cons(self): + """Test to cover the case where f_lowest == 0""" + # TODO: This test doesn't cover anything new, it is unknown what the + # original test was intended for as it was never complete. Delete or + # replace in the future. + options = {'maxtime': 1e-15, + 'f_min': 0.0} + res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + np.testing.assert_equal(0.0, res.fun) + + def test_13_high_sobol(self): + """Test init of high-dimensional sobol sequences""" + + def f(x): + return 0 + + bounds = [(None, None), ] * 41 + SHGOc = SHGO(f, bounds, sampling_method='sobol') + # SHGOc.sobol_points(2, 50) + SHGOc.sampling_function(2, 50) + + def test_14_local_iter(self): + """Test limited local iterations for a pseudo-global mode""" + options = {'local_iter': 4} + run_test(test5_1, n=60, options=options) + + def test_15_min_every_iter(self): + """Test minimize every iter options and cover function cache""" + options = {'minimize_every_iter': True} + run_test(test1_1, n=1, iters=7, options=options, + sampling_method='sobol') + + def test_16_disp_bounds_minimizer(self, capsys): + """Test disp=True with minimizers that do not support bounds """ + options = {'disp': True} + minimizer_kwargs = {'method': 'nelder-mead'} + run_test(test1_2, sampling_method='simplicial', + options=options, minimizer_kwargs=minimizer_kwargs) + + def test_17_custom_sampling(self): + """Test the functionality to add custom sampling methods to shgo""" + + def sample(n, d): + return np.random.uniform(size=(n, d)) + + run_test(test1_1, n=30, sampling_method=sample) + + def test_18_bounds_class(self): + # test that new and old bounds yield same result + def f(x): + return np.square(x).sum() + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = shgo(f, bounds_old) + res_new_bounds = shgo(f, bounds_new) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + x_opt = np.array([-1., 1., 0.]) + np.testing.assert_allclose(res_new_bounds.x, x_opt) + np.testing.assert_allclose(res_new_bounds.x, res_old_bounds.x) + + @pytest.mark.fail_slow(10) + def test_19_parallelization(self): + """Test the functionality to add custom sampling methods to shgo""" + + with Pool(2) as p: + run_test(test1_1, n=30, workers=p.map) # Constrained + run_test(test1_1, n=30, workers=map) # Constrained + with Pool(2) as p: + run_test(test_s, n=30, workers=p.map) # Unconstrained + run_test(test_s, n=30, workers=map) # Unconstrained + + def test_20_constrained_args(self): + """Test that constraints can be passed to arguments""" + + def eggholder(x): + return ( + -(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + def f(x): # (cattle-feed) + return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3] + + bounds = [(0, 1.0), ] * 4 + + def g1_modified(x, i): + return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[ + 3] - 5 # >=0 + + def g2(x): + return ( + 12*x[0] + 11.9*x[1] + 41.8*x[2] + 52.1*x[3] - 21 + - 1.645*np.sqrt( + 0.28*x[0]**2 + 0.19*x[1]**2 + 20.5*x[2]**2 + 0.62*x[3]**2 + ) + ) # >=0 + + def h1(x): + return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + + cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)}, + {'type': 'ineq', 'fun': g2}, + {'type': 'eq', 'fun': h1}) + + shgo(f, bounds, n=300, iters=1, constraints=cons) + # using constrain with arguments AND sampling method sobol + shgo(f, bounds, n=300, iters=1, constraints=cons, + sampling_method='sobol') + + def test_21_1_jac_true(self): + """Test that shgo can handle objective functions that return the + gradient alongside the objective value. Fixes gh-13547""" + # previous + def func(x): + return np.sum(np.power(x, 2)), 2 * x + + shgo( + func, + bounds=[[-1, 1], [1, 2]], + n=100, iters=5, + sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True} + ) + + # new + def func(x): + return np.sum(x ** 2), 2 * x + + bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]] + + res = shgo(func, bounds=bounds, sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True}) + ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds, + jac=True) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp']) + def test_21_2_derivative_options(self, derivative): + """shgo used to raise an error when passing `options` with 'jac' + # see gh-12963. check that this is resolved + """ + + def objective(x): + return 3 * x[0] * x[0] + 2 * x[0] + 5 + + def gradient(x): + return 6 * x[0] + 2 + + def hess(x): + return 6 + + def hessp(x, p): + return 6 * p + + derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp} + options = {derivative: derivative_funcs[derivative]} + minimizer_kwargs = {'method': 'trust-constr'} + + bounds = [(-100, 100)] + res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs, + **options) + + assert res.success + np.testing.assert_allclose(res.fun, ref.fun) + np.testing.assert_allclose(res.x, ref.x) + + def test_21_3_hess_options_rosen(self): + """Ensure the Hessian gets passed correctly to the local minimizer + routine. Previous report gh-14533. + """ + bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)] + options = {'jac': rosen_der, 'hess': rosen_hess} + minimizer_kwargs = {'method': 'Newton-CG'} + res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(rosen, np.zeros(5), method='Newton-CG', + **options) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + def test_21_arg_tuple_sobol(self): + """shgo used to raise an error when passing `args` with Sobol sampling + # see gh-12114. check that this is resolved""" + + def fun(x, k): + return x[0] ** k + + constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1}) + + bounds = [(0, 10)] + res = shgo(fun, bounds, args=(1,), constraints=constraints, + sampling_method='sobol') + ref = minimize(fun, np.zeros(1), bounds=bounds, args=(1,), + constraints=constraints) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +# Failure test functions +class TestShgoFailures: + def test_1_maxiter(self): + """Test failure on insufficient iterations""" + options = {'maxiter': 2} + res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None, + options=options, sampling_method='sobol') + + np.testing.assert_equal(False, res.success) + # np.testing.assert_equal(4, res.nfev) + np.testing.assert_equal(4, res.tnev) + + def test_2_sampling(self): + """Rejection of unknown sampling method""" + assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds, + sampling_method='not_Sobol') + + def test_3_1_no_min_pool_sobol(self): + """Check that the routine stops when no minimiser is found + after maximum specified function evaluations""" + options = {'maxfev': 10, + # 'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='sobol') + np.testing.assert_equal(False, res.success) + # np.testing.assert_equal(9, res.nfev) + np.testing.assert_equal(12, res.nfev) + + def test_3_2_no_min_pool_simplicial(self): + """Check that the routine stops when no minimiser is found + after maximum specified sampling evaluations""" + options = {'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='simplicial') + np.testing.assert_equal(False, res.success) + + def test_4_1_bound_err(self): + """Specified bounds ub > lb""" + bounds = [(6, 3), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_4_2_bound_err(self): + """Specified bounds are of the form (lb, ub)""" + bounds = [(3, 5, 5), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_5_1_1_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Use infty constraints option""" + options = {'maxev': 100, + 'disp': True} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + np.testing.assert_equal(False, res.success) + + def test_5_1_2_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Do not use infty constraints option""" + options = {'maxev': 100, + 'disp': True, + 'infty_constraints': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + np.testing.assert_equal(False, res.success) + + def test_5_2_infeasible_simplicial(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded.""" + options = {'maxev': 1000, + 'disp': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='simplicial') + + np.testing.assert_equal(False, res.success) + + @pytest.mark.thread_unsafe + def test_6_1_lower_known_f_min(self): + """Test Global mode limiting local evaluations with f* too high""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun + 2.0, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + args = (test2_1.f, test2_1.bounds) + kwargs = {'constraints': test2_1.cons, + 'n': None, + 'iters': None, + 'options': options, + 'sampling_method': 'sobol' + } + warns(UserWarning, shgo, *args, **kwargs) + + def test(self): + from scipy.optimize import rosen, shgo + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + print(result.x, result.fun, fun.nfev) # 50 + + +# Returns +class TestShgoReturns: + def test_1_nfev_simplicial(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + np.testing.assert_equal(fun.nfev, result.nfev) + + def test_1_nfev_sobol(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds, sampling_method='sobol') + np.testing.assert_equal(fun.nfev, result.nfev) + + +def test_vector_constraint(): + # gh15514 + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.2], [3]) + oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0])) + + res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol') + assert np.all(np.sum((res.x)**2) >= 2.2) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +@pytest.mark.filterwarnings("ignore:delta_grad") +def test_trust_constr(): + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.6], [3]) + minimizer_kwargs = {'method': 'trust-constr'} + # note that we don't supply the constraints in minimizer_kwargs, + # so if the final result obeys the constraints we know that shgo + # passed them on to 'trust-constr' + res = shgo( + rosen, + [(0, 10), (0, 10)], + constraints=nlc, + sampling_method='sobol', + minimizer_kwargs=minimizer_kwargs + ) + assert np.all(np.sum((res.x)**2) >= 2.6) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +def test_equality_constraints(): + # gh16260 + bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1. + + def faulty(x): + return x[0] + x[1] + + nlc = NonlinearConstraint(faulty, 3.9, 3.9) + res = shgo(rosen, bounds=bounds, constraints=nlc) + assert_allclose(np.sum(res.x), 3.9) + + def faulty(x): + return x[0] + x[1] - 3.9 + + constraints = {'type': 'eq', 'fun': faulty} + res = shgo(rosen, bounds=bounds, constraints=constraints) + assert_allclose(np.sum(res.x), 3.9) + + bounds = [(0, 1.0)] * 4 + # sum of variable should equal 1. + def faulty(x): + return x[0] + x[1] + x[2] + x[3] - 1 + + # options = {'minimize_every_iter': True, 'local_iter':10} + constraints = {'type': 'eq', 'fun': faulty} + res = shgo( + lambda x: - np.prod(x), + bounds=bounds, + constraints=constraints, + sampling_method='sobol' + ) + assert_allclose(np.sum(res.x), 1.0) + +def test_gh16971(): + def cons(x): + return np.sum(x**2) - 0 + + c = {'fun': cons, 'type': 'ineq'} + minimizer_kwargs = { + 'method': 'COBYLA', + 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05} + } + + s = SHGO( + rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs + ) + + assert s.minimizer_kwargs['method'].lower() == 'cobyla' + assert s.minimizer_kwargs['options']['catol'] == 0.05 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4dc52cc20caf0206fe53933d4dfc6d0fbb2c34 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py @@ -0,0 +1,226 @@ +import itertools + +import numpy as np +from numpy import exp +from numpy.testing import assert_, assert_equal + +from scipy.optimize import root + + +def test_performance(): + # Compare performance results to those listed in + # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)] + # and + # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)]. + # and those produced by dfsane.f from M. Raydan's website. + # + # Where the results disagree, the largest limits are taken. + + e_a = 1e-5 + e_r = 1e-4 + + table_1 = [ + dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5), + dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2), + dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11), + dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11), + # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed: + # too sensitive to rounding errors + # Results from dfsane.f; papers list nit=3, nfev=3 + dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), + # Results from dfsane.f; papers list nit=nfev=6? + dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), + dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18), + # Results from dfsane.f; papers list nit=2, nfev=12 + dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), + ] + + # Check also scaling invariance + for xscale, yscale, line_search in itertools.product( + [1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng'] + ): + for problem in table_1: + n = problem['n'] + def func(x, n): + return yscale * problem['F'](x / xscale, n) + args = (n,) + x0 = problem['x0'](n) * xscale + + fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n)) + + sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale) + sigma_0 = xscale/yscale + + with np.errstate(over='ignore'): + sol = root(func, x0, args=args, + options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1, + sigma_0=sigma_0, sigma_eps=sigma_eps, + line_search=line_search), + method='DF-SANE') + + err_msg = repr( + [xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)), + fatol, sol.success, sol.nit, sol.nfev] + ) + assert sol.success, err_msg + # nfev+1: dfsane.f doesn't count first eval + assert sol.nfev <= problem['nfev'] + 1, err_msg + assert sol.nit <= problem['nit'], err_msg + assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg + + +def test_complex(): + def func(z): + return z**2 - 1 + 2j + x0 = 2.0j + + ftol = 1e-4 + sol = root(func, x0, tol=ftol, method='DF-SANE') + + assert_(sol.success) + + f0 = np.linalg.norm(func(x0)) + fx = np.linalg.norm(func(sol.x)) + assert_(fx <= ftol*f0) + + +def test_linear_definite(): + # The DF-SANE paper proves convergence for "strongly isolated" + # solutions. + # + # For linear systems F(x) = A x - b = 0, with A positive or + # negative definite, the solution is strongly isolated. + + def check_solvability(A, b, line_search='cruz'): + def func(x): + return A.dot(x) - b + xp = np.linalg.solve(A, b) + eps = np.linalg.norm(func(xp)) * 1e3 + sol = root( + func, b, + options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search), + method='DF-SANE', + ) + assert_(sol.success) + assert_(np.linalg.norm(func(sol.x)) <= eps) + + n = 90 + + # Test linear pos.def. system + np.random.seed(1234) + A = np.arange(n*n).reshape(n, n) + A = A + n*n * np.diag(1 + np.arange(n)) + assert_(np.linalg.eigvals(A).min() > 0) + b = np.arange(n) * 1.0 + check_solvability(A, b, 'cruz') + check_solvability(A, b, 'cheng') + + # Test linear neg.def. system + check_solvability(-A, b, 'cruz') + check_solvability(-A, b, 'cheng') + + +def test_shape(): + def f(x, arg): + return x - arg + + for dt in [float, complex]: + x = np.zeros([2,2]) + arg = np.ones([2,2], dtype=dt) + + sol = root(f, x, args=(arg,), method='DF-SANE') + assert_(sol.success) + assert_equal(sol.x.shape, x.shape) + + +# Some of the test functions and initial guesses listed in +# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)] + +def F_1(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0] - 1) - 1 + g[1:] = i*(exp(x[1:] - 1) - x[1:]) + return g + +def x0_1(n): + x0 = np.empty([n]) + x0.fill(n/(n-1)) + return x0 + +def F_2(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0]) - 1 + g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1) + return g + +def x0_2(n): + x0 = np.empty([n]) + x0.fill(1/n**2) + return x0 + + +def F_4(x, n): # skip name check + assert_equal(n % 3, 0) + g = np.zeros([n]) + # Note: the first line is typoed in some of the references; + # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] + g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 + g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] + - x[2::3] + 0.2 * x[2::3]**3 + 2.16) + g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 + return g + + +def x0_4(n): # skip name check + assert_equal(n % 3, 0) + x0 = np.array([-1, 1/2, -1] * (n//3)) + return x0 + +def F_6(x, n): + c = 0.9 + mu = (np.arange(1, n+1) - 0.5)/n + return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1)) + +def x0_6(n): + return np.ones([n]) + +def F_7(x, n): + assert_equal(n % 3, 0) + + def phi(t): + v = 0.5*t - 2 + v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1] + v[t >= 2] = (0.5*t + 2)[t >= 2] + return v + g = np.zeros([n]) + g[::3] = 1e4 * x[1::3]**2 - 1 + g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001 + g[2::3] = phi(x[2::3]) + return g + +def x0_7(n): + assert_equal(n % 3, 0) + return np.array([1e-3, 18, 1] * (n//3)) + +def F_9(x, n): + g = np.zeros([n]) + i = np.arange(2, n) + g[0] = x[0]**3/3 + x[1]**2/2 + g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2 + g[-1] = -x[-1]**2/2 + n*x[-1]**3/3 + return g + +def x0_9(n): + return np.ones([n]) + +def F_10(x, n): + return np.log(1 + x) - x/n + +def x0_10(n): + return np.ones([n]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..f3a47fc005a2af6bbd02465634fdb72fa131f8f8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py @@ -0,0 +1,906 @@ +import pytest + +import numpy as np + +from scipy.optimize._bracket import _ELIMITS +from scipy.optimize.elementwise import bracket_root, bracket_minimum +import scipy._lib._elementwise_iterative_method as eim +from scipy import stats +from scipy._lib._array_api_no_0d import (xp_assert_close, xp_assert_equal, + xp_assert_less, array_namespace) +from scipy._lib._array_api import xp_ravel +from scipy.conftest import array_api_compatible + + +# These tests were originally written for the private `optimize._bracket` +# interfaces, but now we want the tests to check the behavior of the public +# `optimize.elementwise` interfaces. Therefore, rather than importing +# `_bracket_root`/`_bracket_minimum` from `_bracket.py`, we import +# `bracket_root`/`bracket_minimum` from `optimize.elementwise` and wrap those +# functions to conform to the private interface. This may look a little strange, +# since it effectively just inverts the interface transformation done within the +# `bracket_root`/`bracket_minimum` functions, but it allows us to run the original, +# unmodified tests on the public interfaces, simplifying the PR that adds +# the public interfaces. We'll refactor this when we want to @parametrize the +# tests over multiple `method`s. +def _bracket_root(*args, **kwargs): + res = bracket_root(*args, **kwargs) + res.xl, res.xr = res.bracket + res.fl, res.fr = res.f_bracket + del res.bracket + del res.f_bracket + return res + + +def _bracket_minimum(*args, **kwargs): + res = bracket_minimum(*args, **kwargs) + res.xl, res.xm, res.xr = res.bracket + res.fl, res.fm, res.fr = res.f_bracket + del res.bracket + del res.f_bracket + return res + + +array_api_strict_skip_reason = 'Array API does not support fancy indexing assignment.' +jax_skip_reason = 'JAX arrays do not support item assignment.' + +@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason) +@pytest.mark.skip_xp_backends('jax.numpy', reason=jax_skip_reason) +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +class TestBracketRoot: + @pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752)) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + @pytest.mark.parametrize("fix_one_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side, xp): + # Property-based test to confirm that _bracket_root is behaving as + # expected. The basic case is when root < a < b. + # The number of times bracket expands (per side) can be found by + # setting the expression for the left endpoint of the bracket to the + # root of f (x=0), solving for i, and rounding up. The corresponding + # lower and upper ends of the bracket are found by plugging this back + # into the expression for the ends of the bracket. + # `other_side=True` is the case that a < b < root + # Special cases like a < root < b are tested separately + rng = np.random.default_rng(seed) + xl0, d, factor = xp.asarray(rng.random(size=3) * [1e5, 10, 5]) + factor = 1 + factor # factor must be greater than 1 + xr0 = xl0 + d # xr0 must be greater than a in basic case + + def f(x): + f.count += 1 + return x # root is 0 + + if use_xmin: + xmin = xp.asarray(-rng.random()) + n = xp.ceil(xp.log(-(xl0 - xmin) / xmin) / xp.log(factor)) + l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin) + else: + n = xp.ceil(xp.log(xr0/d) / xp.log(factor)) + l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor) + + if other_side: + kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0'] + l, u = -u, -l + if 'xmin' in kwargs: + kwargs['xmax'] = -kwargs.pop('xmin') + + if fix_one_side: + if other_side: + kwargs['xmin'] = -xr0 + else: + kwargs['xmax'] = xr0 + + f.count = 0 + res = _bracket_root(f, **kwargs) + + # Compare reported number of function evaluations `nfev` against + # reported `nit`, actual function call count `f.count`, and theoretical + # number of expansions `n`. + # When both sides are free, these get multiplied by 2 because function + # is evaluated on the left and the right each iteration. + # When one side is fixed, however, we add one: on the right side, the + # function gets evaluated once at b. + # Add 1 to `n` and `res.nit` because function evaluations occur at + # iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because + # function is called separately for left and right in iteration 0. + if not fix_one_side: + assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1) + else: + assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1 + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + bracket = xp.asarray([res.xl, res.xr]) + xp_assert_close(bracket, xp.asarray([l, u])) + f_bracket = xp.asarray([res.fl, res.fr]) + xp_assert_close(f_bracket, f(bracket)) + + # Check that bracket is valid and that status and success are correct + assert res.xr > res.xl + signs = xp.sign(f_bracket) + assert signs[0] == -signs[1] + assert res.status == 0 + assert res.success + + def f(self, q, p): + return stats._stats_py._SimpleNormal().cdf(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)]) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize('factor', [1.2, 2]) + def test_basic(self, p, xmin, xmax, factor, xp): + # Test basic functionality to bracket root (distribution PPF) + res = _bracket_root(self.f, xp.asarray(-0.01), 0.01, xmin=xmin, xmax=xmax, + factor=factor, args=(xp.asarray(p),)) + xp_assert_equal(-xp.sign(res.fl), xp.sign(res.fr)) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else np.float64(0.6) + args = (p,) + maxiter = 10 + + @np.vectorize + def bracket_root_single(xl0, xr0, xmin, xmax, factor, p): + return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax, + factor=factor, args=(p,), + maxiter=maxiter) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel() + xl0, xr0, xmin, xmax, factor = (xp.asarray(xl0), xp.asarray(xr0), + xp.asarray(xmin), xp.asarray(xmax), + xp.asarray(factor)) + args = tuple(map(xp.asarray, args)) + res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor, + args=args, maxiter=maxiter) + + attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [xp.asarray(getattr(ref, attr)) for ref in refs] + res_attr = getattr(res, attr) + xp_assert_close(xp_ravel(res_attr, xp=xp), xp.stack(ref_attr)) + xp_assert_equal(res_attr.shape, shape) + + xp_test = array_namespace(xp.asarray(1.)) + assert res.success.dtype == xp_test.bool + if shape: + assert xp.all(res.success[1:-1]) + assert res.status.dtype == xp.int32 + assert res.nfev.dtype == xp.int32 + assert res.nit.dtype == xp.int32 + assert xp.max(res.nit) == f.f_evals - 2 + xp_assert_less(res.xl, res.xr) + xp_assert_close(res.fl, xp.asarray(self.f(res.xl, *args))) + xp_assert_close(res.fr, xp.asarray(self.f(res.xr, *args))) + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: x - 1.5, + lambda x: x - 1000, + lambda x: x - 1000, + lambda x: x * xp.nan, + lambda x: x] + + return [funcs[int(j)](x) for x, j in zip(xs, js)] + + args = (xp.arange(5, dtype=xp.int64),) + res = _bracket_root(f, + xl0=xp.asarray([-1., -1., -1., -1., 4.]), + xr0=xp.asarray([1, 1, 1, 1, -4]), + xmin=xp.asarray([-xp.inf, -1, -xp.inf, -xp.inf, 6]), + xmax=xp.asarray([xp.inf, 1, xp.inf, xp.inf, 2]), + args=args, maxiter=3) + + ref_flags = xp.asarray([eim._ECONVERGED, + _ELIMITS, + eim._ECONVERR, + eim._EVALUEERR, + eim._EINPUTERR], + dtype=xp.int32) + + xp_assert_equal(res.status, ref_flags) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize("dtype", ("float16", "float32", "float64")) + def test_dtype(self, root, xmin, xmax, dtype, xp): + # Test that dtypes are preserved + dtype = getattr(xp, dtype) + xp_test = array_namespace(xp.asarray(1.)) + + xmin = xmin if xmin is None else xp.asarray(xmin, dtype=dtype) + xmax = xmax if xmax is None else xp.asarray(xmax, dtype=dtype) + root = xp.asarray(root, dtype=dtype) + def f(x, root): + return xp_test.astype((x - root) ** 3, dtype) + + bracket = xp.asarray([-0.01, 0.01], dtype=dtype) + res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,)) + assert xp.all(res.success) + assert res.xl.dtype == res.xr.dtype == dtype + assert res.fl.dtype == res.fr.dtype == dtype + + def test_input_validation(self, xp): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_root(None, -4, 4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4+1j, 4) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 'hello') + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=0.5) + + message = "broadcast" + # raised by `xp.broadcast, but the traceback is readable IMO + with pytest.raises(Exception, match=message): + _bracket_root(lambda x: x, xp.asarray([-2, -3]), xp.asarray([3, 4, 5])) + # Consider making this give a more readable error message + # with pytest.raises(ValueError, match=message): + # _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=-1) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter="shrubbery") + + def test_special_cases(self, xp): + # Test edge cases and other special cases + xp_test = array_namespace(xp.asarray(1.)) + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert xp_test.isdtype(x.dtype, "real floating") + return x ** 99 - 1 + + res = _bracket_root(f, xp.asarray(-7.), xp.asarray(5.)) + assert res.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x - 10 + + bracket = (xp.asarray(-3.), xp.asarray(5.)) + res = _bracket_root(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 2 + assert res.status == -2 + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _bracket_root(f, xp.asarray(-1.), xp.asarray(1.), + args=xp.asarray(3.)) + assert res.success + xp_assert_close(res.fl, f(res.xl, 3)) + + # Test other edge cases + + def f(x): + f.count += 1 + return x + + # 1. root lies within guess of bracket + f.count = 0 + _bracket_root(f, xp.asarray(-10), xp.asarray(20)) + assert f.count == 2 + + # 2. bracket endpoint hits root exactly + f.count = 0 + res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.), + factor=2) + + assert res.nfev == 4 + xp_assert_close(res.xl, xp.asarray(0.), atol=1e-15) + xp_assert_close(res.xr, xp.asarray(5.), atol=1e-15) + + # 3. bracket limit hits root exactly + with np.errstate(over='ignore'): + res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.), + xmin=0) + xp_assert_close(res.xl, xp.asarray(0.), atol=1e-15) + + with np.errstate(over='ignore'): + res = _bracket_root(f, xp.asarray(-10.), xp.asarray(-5.), + xmax=0) + xp_assert_close(res.xr, xp.asarray(0.), atol=1e-15) + + # 4. bracket not within min, max + with np.errstate(over='ignore'): + res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.), + xmin=1) + assert not res.success + + def test_bug_fixes(self): + # 1. Bug in double sided bracket search. + # Happened in some cases where there are terminations on one side + # after corresponding searches on other side failed due to reaching the + # boundary. + + # https://github.com/scipy/scipy/pull/22560#discussion_r1962853839 + def f(x, p): + return np.exp(x) - p + + p = np.asarray([0.29, 0.35]) + res = _bracket_root(f, xl0=-1, xmin=-np.inf, xmax=0, args=(p, )) + + # https://github.com/scipy/scipy/pull/22560/files#r1962952517 + def f(x, p, c): + return np.exp(x*c) - p + + p = [0.32061201, 0.39175242, 0.40047535, 0.50527218, 0.55654373, + 0.11911647, 0.37507896, 0.66554191] + c = [1., -1., 1., 1., -1., 1., 1., 1.] + xl0 = [-7.63108551, 3.27840947, -8.36968526, -1.78124372, + 0.92201295, -2.48930123, -0.66733533, -0.44606749] + xr0 = [-6.63108551, 4.27840947, -7.36968526, -0.78124372, + 1.92201295, -1.48930123, 0., 0.] + xmin = [-np.inf, 0., -np.inf, -np.inf, 0., -np.inf, -np.inf, + -np.inf] + xmax = [0., np.inf, 0., 0., np.inf, 0., 0., 0.] + + res = _bracket_root(f, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(p, c)) + + # 2. Default xl0 + 1 for xr0 exceeds xmax. + # https://github.com/scipy/scipy/pull/22560#discussion_r1962947434 + res = _bracket_root(lambda x: x + 0.25, xl0=-0.5, xmin=-np.inf, xmax=0) + assert res.success + + +@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason) +@pytest.mark.skip_xp_backends('jax.numpy', reason=jax_skip_reason) +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +class TestBracketMinimum: + def init_f(self): + def f(x, a, b): + f.count += 1 + return (x - a)**2 + b + f.count = 0 + return f + + def assert_valid_bracket(self, result, xp): + assert xp.all( + (result.xl < result.xm) & (result.xm < result.xr) + ) + assert xp.all( + (result.fl >= result.fm) & (result.fr > result.fm) + | (result.fl > result.fm) & (result.fr > result.fm) + ) + + def get_kwargs( + self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=None + ): + names = ("xl0", "xr0", "xmin", "xmax", "factor", "args") + return { + name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args)) + if val is not None + } + + @pytest.mark.parametrize( + "seed", + ( + 307448016549685229886351382450158984917, + 11650702770735516532954347931959000479, + 113767103358505514764278732330028568336, + ) + ) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side, xp): + rng = np.random.default_rng(seed) + args = (xp.asarray(0.), xp.asarray(0.)) # f(x) = x^2 with minimum at 0 + # xl0, xm0, xr0 are chosen such that the initial bracket is to + # the right of the minimum, and the bracket will expand + # downhill towards zero. + xl0, d1, d2, factor = xp.asarray(rng.random(size=4) * [1e5, 10, 10, 5]) + xm0 = xl0 + d1 + xr0 = xm0 + d2 + # Factor should be greater than one. + factor += 1 + + if use_xmin: + xmin = xp.asarray(-rng.random() * 5, dtype=xp.float64) + n = int(xp.ceil(xp.log(-(xl0 - xmin) / xmin) / xp.log(factor))) + lower = xmin + (xl0 - xmin)*factor**-n + middle = xmin + (xl0 - xmin)*factor**-(n-1) + upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xmin + (xl0 - xmin)*factor**-n, lower, middle + ) + else: + xmin = None + n = int(xp.ceil(xp.log(xl0 / d1) / xp.log(factor))) + lower = xl0 - d1*factor**n + middle = xl0 - d1*factor**(n-1) if n > 1 else xl0 + upper = xl0 - d1*factor**(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xl0 - d1*factor**n, lower, middle + ) + f = self.init_f() + + xmax = None + if other_side: + xl0, xm0, xr0 = -xr0, -xm0, -xl0 + xmin, xmax = None, -xmin if xmin is not None else None + lower, middle, upper = -upper, -middle, -lower + + kwargs = self.get_kwargs( + xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args + ) + result = _bracket_minimum(f, xp.asarray(xm0), **kwargs) + + # Check that `nfev` and `nit` have the correct relationship + assert result.nfev == result.nit + 3 + # Check that `nfev` reports the correct number of function evaluations. + assert result.nfev == f.count + # Check that the number of iterations matches the theoretical value. + assert result.nit == n + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + xp_assert_close(result.xl, lower) + xp_assert_close(result.xm, middle) + xp_assert_close(result.xr, upper) + xp_assert_close(result.fl, f(lower, *args)) + xp_assert_close(result.fm, f(middle, *args)) + xp_assert_close(result.fr, f(upper, *args)) + + self.assert_valid_bracket(result, xp) + assert result.status == 0 + assert result.success + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously + def f(xs, js): + funcs = [lambda x: (x - 1.5)**2, + lambda x: x, + lambda x: x, + lambda x: xp.nan, + lambda x: x**2] + + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (xp.arange(5, dtype=xp.int64),) + xl0 = xp.asarray([-1.0, -1.0, -1.0, -1.0, 6.0]) + xm0 = xp.asarray([0.0, 0.0, 0.0, 0.0, 4.0]) + xr0 = xp.asarray([1.0, 1.0, 1.0, 1.0, 2.0]) + xmin = xp.asarray([-xp.inf, -1.0, -xp.inf, -xp.inf, 8.0]) + + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, + args=args, maxiter=3) + + reference_flags = xp.asarray([eim._ECONVERGED, _ELIMITS, + eim._ECONVERR, eim._EVALUEERR, + eim._EINPUTERR], dtype=xp.int32) + xp_assert_equal(result.status, reference_flags) + + @pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", ("float16", "float32", "float64")) + @pytest.mark.parametrize("xmin", [-5, None]) + @pytest.mark.parametrize("xmax", [5, None]) + def test_dtypes(self, minimum, xmin, xmax, dtype, xp): + dtype = getattr(xp, dtype) + xp_test = array_namespace(xp.asarray(1.)) + xmin = xmin if xmin is None else xp.asarray(xmin, dtype=dtype) + xmax = xmax if xmax is None else xp.asarray(xmax, dtype=dtype) + minimum = xp.asarray(minimum, dtype=dtype) + + def f(x, minimum): + return xp_test.astype((x - minimum)**2, dtype) + + xl0, xm0, xr0 = [-0.01, 0.0, 0.01] + result = _bracket_minimum( + f, xp.asarray(xm0, dtype=dtype), xl0=xp.asarray(xl0, dtype=dtype), + xr0=xp.asarray(xr0, dtype=dtype), xmin=xmin, xmax=xmax, args=(minimum, ) + ) + assert xp.all(result.success) + assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype + assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype + + @pytest.mark.skip_xp_backends(np_only=True, reason="str/object arrays") + def test_input_validation(self, xp): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(None, -4, xl0=4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(4+1j)) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xl0='hello') + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), + xr0='farcical aquatic ceremony') + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x, xp.asarray(-4), factor=0.5) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `xp.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray([-2, -3]), xl0=[-3, -4, -5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter=-1) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter="ekki") + + @pytest.mark.parametrize("xl0", [0.0, None]) + @pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15)) + @pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None)) + # Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum + # is within, or at varying distances to the left or right of the initial + # bracket. + @pytest.mark.parametrize( + "args", + ( + (1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0), + (121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0) + ) + ) + def test_scalar_no_limits(self, xl0, xm0, xr0, args, xp): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=tuple(map(xp.asarray, args))) + result = _bracket_minimum(f, xp.asarray(xm0, dtype=xp.float64), **kwargs) + self.assert_valid_bracket(result, xp) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + # xmin is set at 0.0 in all cases. + "xl0,xm0,xr0,xmin", + ( + # Initial bracket at varying distances from the xmin. + (0.5, 0.75, 1.0, 0.0), + (1.0, 2.5, 4.0, 0.0), + (2.0, 4.0, 6.0, 0.0), + (12.0, 16.0, 20.0, 0.0), + # Test default initial left endpoint selection. It should not + # be below xmin. + (None, 0.75, 1.0, 0.0), + (None, 2.5, 4.0, 0.0), + (None, 4.0, 6.0, 0.0), + (None, 16.0, 20.0, 0.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.0, 0.0), # Minimum is directly at xmin. + (1e-300, 0.0), # Minimum is extremely close to xmin. + (1e-20, 0.0), # Minimum is very close to xmin. + # Minimum at varying distances from xmin. + (0.1, 0.0), + (0.2, 0.0), + (0.4, 0.0) + ) + ) + def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args, xp): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, + args=tuple(map(xp.asarray, args))) + result = _bracket_minimum(f, xp.asarray(xm0), **kwargs) + self.assert_valid_bracket(result, xp) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + #xmax is set to 1.0 in all cases. + "xl0,xm0,xr0,xmax", + ( + # Bracket at varying distances from xmax. + (0.2, 0.3, 0.4, 1.0), + (0.05, 0.075, 0.1, 1.0), + (-0.2, -0.1, 0.0, 1.0), + (-21.2, -17.7, -14.2, 1.0), + # Test default right endpoint selection. It should not exceed xmax. + (0.2, 0.3, None, 1.0), + (0.05, 0.075, None, 1.0), + (-0.2, -0.1, None, 1.0), + (-21.2, -17.7, None, 1.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.9999999999999999, 0.0), # Minimum very close to xmax. + # Minimum at varying distances from xmax. + (0.9, 0.0), + (0.7, 0.0), + (0.5, 0.0) + ) + ) + def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args, xp): + f = self.init_f() + args = tuple(xp.asarray(arg, dtype=xp.float64) for arg in args) + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args) + result = _bracket_minimum(f, xp.asarray(xm0, dtype=xp.float64), **kwargs) + self.assert_valid_bracket(result, xp) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + "xl0,xm0,xr0,xmin,xmax,args", + ( + ( # Case 1: + # Initial bracket. + 0.2, + 0.3, + 0.4, + # Function slopes down to the right from the bracket to a minimum + # at 1.0. xmax is also at 1.0 + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 2: + # Initial bracket. + 1.4, + 1.95, + 2.5, + # Function slopes down to the left from the bracket to a minimum at + # 0.3 with xmin set to 0.3. + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 3: + # Initial bracket. + 2.6, + 3.25, + 3.9, + # Function slopes down and to the right to a minimum at 99.4 with xmax + # at 99.4. Tests case where minimum is at xmax relatively further from + # the bracket. + None, + 99.4, + (99.4, 0) + ), + ( + # Case 4: + # Initial bracket. + 4, + 4.5, + 5, + # Function slopes down and to the left away from the bracket with a + # minimum at -26.3 with xmin set to -26.3. Tests case where minimum is + # at xmin relatively far from the bracket. + -26.3, + None, + (-26.3, 0) + ), + ( + # Case 5: + # Similar to Case 1 above, but tests default values of xl0 and xr0. + None, + 0.3, + None, + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 6: + # Similar to Case 2 above, but tests default values of xl0 and xr0. + None, + 1.95, + None, + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 7: + # Similar to Case 3 above, but tests default values of xl0 and xr0. + None, + 3.25, + None, + None, + 99.4, + (99.4, 0) + ), + ( + # Case 8: + # Similar to Case 4 above, but tests default values of xl0 and xr0. + None, + 4.5, + None, + -26.3, + None, + (-26.3, 0) + ), + ) + ) + def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args, xp): + f = self.init_f() + kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, + args=tuple(map(xp.asarray, args))) + result = _bracket_minimum(f, xp.asarray(xm0), **kwargs) + assert result.status == -1 + assert args[0] in (result.xl, result.xr) + assert result.nfev == f.count + + @pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for + # various input shapes. + a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (a, 0.) + maxiter = 10 + + @np.vectorize + def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a): + return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin, + xmax=xmax, factor=factor, maxiter=maxiter, + args=(a, 0.0)) + + f = self.init_f() + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel() + args = tuple(xp.asarray(arg, dtype=xp.float64) for arg in args) + res = _bracket_minimum(f, xp.asarray(xm0), xl0=xl0, xr0=xr0, xmin=xmin, + xmax=xmax, factor=factor, args=args, maxiter=maxiter) + + attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [xp.asarray(getattr(ref, attr)) for ref in refs] + res_attr = getattr(res, attr) + xp_assert_close(xp_ravel(res_attr, xp=xp), xp.stack(ref_attr)) + xp_assert_equal(res_attr.shape, shape) + + xp_test = array_namespace(xp.asarray(1.)) + assert res.success.dtype == xp_test.bool + if shape: + assert xp.all(res.success[1:-1]) + assert res.status.dtype == xp.int32 + assert res.nfev.dtype == xp.int32 + assert res.nit.dtype == xp.int32 + assert xp.max(res.nit) == f.count - 3 + self.assert_valid_bracket(res, xp) + xp_assert_close(res.fl, f(res.xl, *args)) + xp_assert_close(res.fm, f(res.xm, *args)) + xp_assert_close(res.fr, f(res.xr, *args)) + + def test_special_cases(self, xp): + # Test edge cases and other special cases. + xp_test = array_namespace(xp.asarray(1.)) + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert xp_test.isdtype(x.dtype, "numeric") + return x ** 98 - 1 + + result = _bracket_minimum(f, xp.asarray(-7., dtype=xp.float64), xr0=5) + assert result.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**2 - 10 + + xl0, xm0, xr0 = xp.asarray(-3.), xp.asarray(-1.), xp.asarray(2.) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0) + xp_assert_equal(result.xl, xl0) + xp_assert_equal(result.xm, xm0) + xp_assert_equal(result.xr, xr0) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x**2 - 1 + + result = _bracket_minimum(f, xp.asarray(-1.), args=xp.asarray(3.)) + assert result.success + xp_assert_close(result.fl, f(result.xl, 3)) + + # Initial bracket is valid. + f = self.init_f() + xl0, xm0, xr0 = xp.asarray(-1.0), xp.asarray(-0.2), xp.asarray(1.0) + args = (xp.asarray(0.), xp.asarray(0.)) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args) + assert f.count == 3 + + xp_assert_equal(result.xl, xl0) + xp_assert_equal(result.xm , xm0) + xp_assert_equal(result.xr, xr0) + xp_assert_equal(result.fl, f(xl0, *args)) + xp_assert_equal(result.fm, f(xm0, *args)) + xp_assert_equal(result.fr, f(xr0, *args)) + + def test_gh_20562_left(self, xp): + # Regression test for https://github.com/scipy/scipy/issues/20562 + # minimum of f in [xmin, xmax] is at xmin. + xmin, xmax = xp.asarray(0.21933608), xp.asarray(1.39713606) + + def f(x): + log_a, log_b = xp.log(xmin), xp.log(xmax) + return -((log_b - log_a)*x)**-1 + + result = _bracket_minimum(f, xp.asarray(0.5535723499480897), xmin=xmin, + xmax=xmax) + assert xmin == result.xl + + def test_gh_20562_right(self, xp): + # Regression test for https://github.com/scipy/scipy/issues/20562 + # minimum of f in [xmin, xmax] is at xmax. + xmin, xmax = xp.asarray(-1.39713606), xp.asarray(-0.21933608) + + def f(x): + log_a, log_b = xp.log(-xmax), xp.log(-xmin) + return ((log_b - log_a)*x)**-1 + + result = _bracket_minimum(f, xp.asarray(-0.5535723499480897), + xmin=xmin, xmax=xmax) + assert xmax == result.xr diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..bcc2ae1a70be2883a3d2346a1cb8d95d8ac027fa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py @@ -0,0 +1,984 @@ +import math +import pytest +import numpy as np + +from scipy import stats, special +import scipy._lib._elementwise_iterative_method as eim +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import array_namespace, is_cupy, is_numpy, xp_ravel, xp_size +from scipy._lib._array_api_no_0d import (xp_assert_close, xp_assert_equal, + xp_assert_less) + +from scipy.optimize.elementwise import find_minimum, find_root +from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS + +from itertools import permutations +from .test_zeros import TestScalarRootFinders + + +def _vectorize(xp): + # xp-compatible version of np.vectorize + # assumes arguments are all arrays of the same shape + def decorator(f): + def wrapped(*arg_arrays): + shape = arg_arrays[0].shape + arg_arrays = [xp_ravel(arg_array, xp=xp) for arg_array in arg_arrays] + res = [] + for i in range(math.prod(shape)): + arg_scalars = [arg_array[i] for arg_array in arg_arrays] + res.append(f(*arg_scalars)) + return res + + return wrapped + + return decorator + + +# These tests were originally written for the private `optimize._chandrupatla` +# interfaces, but now we want the tests to check the behavior of the public +# `optimize.elementwise` interfaces. Therefore, rather than importing +# `_chandrupatla`/`_chandrupatla_minimize` from `_chandrupatla.py`, we import +# `find_root`/`find_minimum` from `optimize.elementwise` and wrap those +# functions to conform to the private interface. This may look a little strange, +# since it effectively just inverts the interface transformation done within the +# `find_root`/`find_minimum` functions, but it allows us to run the original, +# unmodified tests on the public interfaces, simplifying the PR that adds +# the public interfaces. We'll refactor this when we want to @parametrize the +# tests over multiple `method`s. +def _wrap_chandrupatla(func): + def _chandrupatla_wrapper(f, *bracket, **kwargs): + # avoid passing arguments to `find_minimum` to this function + tol_keys = {'xatol', 'xrtol', 'fatol', 'frtol'} + tolerances = {key: kwargs.pop(key) for key in tol_keys if key in kwargs} + _callback = kwargs.pop('callback', None) + if callable(_callback): + def callback(res): + if func == find_root: + res.xl, res.xr = res.bracket + res.fl, res.fr = res.f_bracket + else: + res.xl, res.xm, res.xr = res.bracket + res.fl, res.fm, res.fr = res.f_bracket + res.fun = res.f_x + del res.bracket + del res.f_bracket + del res.f_x + return _callback(res) + else: + callback = _callback + + res = func(f, bracket, tolerances=tolerances, callback=callback, **kwargs) + if func == find_root: + res.xl, res.xr = res.bracket + res.fl, res.fr = res.f_bracket + else: + res.xl, res.xm, res.xr = res.bracket + res.fl, res.fm, res.fr = res.f_bracket + res.fun = res.f_x + del res.bracket + del res.f_bracket + del res.f_x + return res + return _chandrupatla_wrapper + + +_chandrupatla_root = _wrap_chandrupatla(find_root) +_chandrupatla_minimize = _wrap_chandrupatla(find_minimum) + + +def f1(x): + return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2. + + +def f2(x): + return 5 + (x - 2.)**6 + + +def f3(x): + xp = array_namespace(x) + return xp.exp(x) - 5*x + + +def f4(x): + return x**5. - 5*x**3. - 20.*x + 5. + + +def f5(x): + return 8*x**3 - 2*x**2 - 7*x + 3 + + +def _bracket_minimum(func, x1, x2): + phi = 1.61803398875 + maxiter = 100 + f1 = func(x1) + f2 = func(x2) + step = x2 - x1 + x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1 + else (x1, x2, f1, f2, step)) + + for i in range(maxiter): + step *= phi + x3 = x2 + step + f3 = func(x3) + if f3 < f2: + x1, x2, f1, f2 = x2, x3, f2, f3 + else: + break + return x1, x2, x3, f1, f2, f3 + + +cases = [ + (f1, -1, 11), + (f1, -2, 13), + (f1, -4, 13), + (f1, -8, 15), + (f1, -16, 16), + (f1, -32, 19), + (f1, -64, 20), + (f1, -128, 21), + (f1, -256, 21), + (f1, -512, 19), + (f1, -1024, 24), + (f2, -1, 8), + (f2, -2, 6), + (f2, -4, 6), + (f2, -8, 7), + (f2, -16, 8), + (f2, -32, 8), + (f2, -64, 9), + (f2, -128, 11), + (f2, -256, 13), + (f2, -512, 12), + (f2, -1024, 13), + (f3, -1, 11), + (f3, -2, 11), + (f3, -4, 11), + (f3, -8, 10), + (f3, -16, 14), + (f3, -32, 12), + (f3, -64, 15), + (f3, -128, 18), + (f3, -256, 18), + (f3, -512, 19), + (f3, -1024, 19), + (f4, -0.05, 9), + (f4, -0.10, 11), + (f4, -0.15, 11), + (f4, -0.20, 11), + (f4, -0.25, 11), + (f4, -0.30, 9), + (f4, -0.35, 9), + (f4, -0.40, 9), + (f4, -0.45, 10), + (f4, -0.50, 10), + (f4, -0.55, 10), + (f5, -0.05, 6), + (f5, -0.10, 7), + (f5, -0.15, 8), + (f5, -0.20, 10), + (f5, -0.25, 9), + (f5, -0.30, 8), + (f5, -0.35, 7), + (f5, -0.40, 7), + (f5, -0.45, 9), + (f5, -0.50, 9), + (f5, -0.55, 8) +] + + +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +@pytest.mark.skip_xp_backends('jax.numpy', + reason='JAX arrays do not support item assignment.') +@pytest.mark.skip_xp_backends('array_api_strict', + reason='Currently uses fancy indexing assignment.') +class TestChandrupatlaMinimize: + + def f(self, x, loc): + xp = array_namespace(x, loc) + res = -xp.exp(-1/2 * (x-loc)**2) / (2*xp.pi)**0.5 + return xp.asarray(res, dtype=x.dtype)[()] + + @pytest.mark.parametrize('dtype', ('float32', 'float64')) + @pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)]) + def test_basic(self, loc, xp, dtype): + # Find mode of normal distribution. Compare mode against location + # parameter and value of pdf at mode against expected pdf. + rtol = {'float32': 5e-3, 'float64': 5e-7}[dtype] + dtype = getattr(xp, dtype) + bracket = (xp.asarray(xi, dtype=dtype) for xi in (-5, 0, 5)) + loc = xp.asarray(loc, dtype=dtype) + fun = xp.broadcast_to(xp.asarray(-stats.norm.pdf(0), dtype=dtype), loc.shape) + + res = _chandrupatla_minimize(self.f, *bracket, args=(loc,)) + xp_assert_close(res.x, loc, rtol=rtol) + xp_assert_equal(res.fun, fun) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + loc = xp.linspace(-0.05, 1.05, 12).reshape(shape) if shape else xp.asarray(0.6) + args = (loc,) + bracket = xp.asarray(-5.), xp.asarray(0.), xp.asarray(5.) + xp_test = array_namespace(loc) # need xp.stack + + @_vectorize(xp) + def chandrupatla_single(loc_single): + return _chandrupatla_minimize(self.f, *bracket, args=(loc_single,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_minimize(f, *bracket, args=args) + refs = chandrupatla_single(loc) + + attrs = ['x', 'fun', 'success', 'status', 'nfev', 'nit', + 'xl', 'xm', 'xr', 'fl', 'fm', 'fr'] + for attr in attrs: + ref_attr = xp_test.stack([getattr(ref, attr) for ref in refs]) + res_attr = xp_ravel(getattr(res, attr)) + xp_assert_equal(res_attr, ref_attr) + assert getattr(res, attr).shape == shape + + xp_assert_equal(res.fun, self.f(res.x, *args)) + xp_assert_equal(res.fl, self.f(res.xl, *args)) + xp_assert_equal(res.fm, self.f(res.xm, *args)) + xp_assert_equal(res.fr, self.f(res.xr, *args)) + assert xp.max(res.nfev) == f.f_evals + assert xp.max(res.nit) == f.f_evals - 3 + + assert xp_test.isdtype(res.success.dtype, 'bool') + assert xp_test.isdtype(res.status.dtype, 'integral') + assert xp_test.isdtype(res.nfev.dtype, 'integral') + assert xp_test.isdtype(res.nit.dtype, 'integral') + + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: (x - 2.5) ** 2, + lambda x: x - 10, + lambda x: (x - 2.5) ** 4, + lambda x: xp.full_like(x, xp.asarray(xp.nan))] + res = [] + for i in range(xp_size(js)): + x = xs[i, ...] + j = int(xp_ravel(js)[i]) + res.append(funcs[j](x)) + return xp.stack(res) + + args = (xp.arange(4, dtype=xp.int64),) + bracket = (xp.asarray([0]*4, dtype=xp.float64), + xp.asarray([2]*4, dtype=xp.float64), + xp.asarray([np.pi]*4, dtype=xp.float64)) + res = _chandrupatla_minimize(f, *bracket, args=args, maxiter=10) + + ref_flags = xp.asarray([eim._ECONVERGED, eim._ESIGNERR, eim._ECONVERR, + eim._EVALUEERR], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_convergence(self, xp): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = xp.asarray(rng.random(size=3)) + bracket = (xp.asarray(-5), xp.asarray(0), xp.asarray(5)) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = xp.abs(res1.xr - res1.xl) + tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype) + xp_assert_less(j1, xp.full((3,), tol, dtype=p.dtype)) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = xp.abs(res2.xr - res2.xl) + tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype) + xp_assert_less(j2, xp.full((3,), tol, dtype=p.dtype)) + xp_assert_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = xp.abs(res1.xr - res1.xl) + tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res1.x), dtype=p.dtype) + xp_assert_less(j1, tol) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = xp.abs(res2.xr - res2.xl) + tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res2.x), dtype=p.dtype) + xp_assert_less(j2, tol) + xp_assert_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr) + tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype) + xp_assert_less(h1, xp.full((3,), tol, dtype=p.dtype)) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr) + tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype) + xp_assert_less(h2, xp.full((3,), tol, dtype=p.dtype)) + xp_assert_less(h2, h1) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr) + tol = xp.asarray(2*kwargs['frtol']*xp.abs(res1.fun), dtype=p.dtype) + xp_assert_less(h1, tol) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr) + tol = xp.asarray(2*kwargs['frtol']*abs(res2.fun), dtype=p.dtype) + xp_assert_less(h2, tol) + xp_assert_less(h2, h1) + + def test_maxiter_callback(self, xp): + # Test behavior of `maxiter` parameter and `callback` interface + loc = xp.asarray(0.612814) + bracket = (xp.asarray(-5), xp.asarray(0), xp.asarray(5)) + maxiter = 5 + + res = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + maxiter=maxiter) + assert not xp.any(res.success) + assert xp.all(res.nfev == maxiter+3) + assert xp.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xm, res.xr) == bracket + else: + changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr) + changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr) + assert xp.all(changed_xr | changed_xl) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + xp_assert_equal(self.f(res.xl, loc), res.fl) + xp_assert_equal(self.f(res.xm, loc), res.fm) + xp_assert_equal(self.f(res.xr, loc), res.fr) + xp_assert_equal(self.f(res.x, loc), res.fun) + if callback.iter == maxiter: + raise StopIteration + + callback.xl = xp.nan + callback.xr = xp.nan + callback.iter = -1 # callback called once before first iteration + callback.res = None + + res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + # assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize('case', cases) + def test_nit_expected(self, case, xp): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 55 test cases, the number of iterations performed + # matches the number reported in the original paper. + func, x1, nit = case + + # Find bracket using the algorithm in the paper + step = 0.2 + x2 = x1 + step + x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2) + + # Use tolerances from original paper + xatol = 0.0001 + fatol = 0.000001 + xrtol = 1e-16 + frtol = 1e-16 + + bracket = xp.asarray(x1), xp.asarray(x2), xp.asarray(x3, dtype=xp.float64) + res = _chandrupatla_minimize(func, *bracket, xatol=xatol, + fatol=fatol, xrtol=xrtol, frtol=frtol) + xp_assert_equal(res.nit, xp.asarray(nit, dtype=xp.int32)) + + @pytest.mark.parametrize("loc", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64')) + def test_dtype(self, loc, dtype, xp): + # Test that dtypes are preserved + dtype = getattr(xp, dtype) + + loc = xp.asarray(loc, dtype=dtype) + bracket = (xp.asarray(-3, dtype=dtype), + xp.asarray(1, dtype=dtype), + xp.asarray(5, dtype=dtype)) + + xp_test = array_namespace(loc) # need astype + def f(x, loc): + assert x.dtype == dtype + return xp_test.astype((x - loc)**2, dtype) + + res = _chandrupatla_minimize(f, *bracket, args=(loc,)) + assert res.x.dtype == dtype + xp_assert_close(res.x, loc, rtol=math.sqrt(xp.finfo(dtype).eps)) + + def test_input_validation(self, xp): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(None, *bracket) + + message = 'Abscissae and function output must be real numbers.' + bracket = xp.asarray(-4 + 1j), xp.asarray(0), xp.asarray(4) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket) + + message = "...be broadcast..." + bracket = xp.asarray([-2, -3]), xp.asarray([0, 0]), xp.asarray([3, 4, 5]) + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises((ValueError, RuntimeError), match=message): + _chandrupatla_minimize(lambda x: x, *bracket) + + message = "The shape of the array returned by `func` must be the same" + bracket = xp.asarray([-3, -3]), xp.asarray([0, 0]), xp.asarray([5, 5]) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: [x[0, ...], x[1, ...], x[1, ...]], + *bracket) + + message = 'Tolerances must be non-negative scalars.' + bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, xrtol=xp.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, frtol=xp.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, *bracket, callback='shrubbery') + + def test_bracket_order(self, xp): + # Confirm that order of points in bracket doesn't + xp_test = array_namespace(xp.asarray(1.)) # need `xp.newaxis` + loc = xp.linspace(-1, 1, 6)[:, xp_test.newaxis] + brackets = xp.asarray(list(permutations([-5, 0, 5]))).T + res = _chandrupatla_minimize(self.f, *brackets, args=(loc,)) + assert xp.all(xp.isclose(res.x, loc) | (res.fun == self.f(loc, loc))) + ref = res.x[:, 0] # all columns should be the same + xp_test = array_namespace(loc) # need `xp.broadcast_arrays + xp_assert_close(*xp_test.broadcast_arrays(res.x.T, ref), rtol=1e-15) + + def test_special_cases(self, xp): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + xp_test = array_namespace(xp.asarray(1.)) # need `xp.isdtype` + def f(x): + assert xp_test.isdtype(x.dtype, "real floating") + return (x - 1)**2 + + bracket = xp.asarray(-7), xp.asarray(0), xp.asarray(8) + with np.errstate(invalid='ignore'): + res = _chandrupatla_minimize(f, *bracket, fatol=0, frtol=0) + assert res.success + xp_assert_close(res.x, xp.asarray(1.), rtol=1e-3) + xp_assert_close(res.fun, xp.asarray(0.), atol=1e-200) + + # Test that if all elements of bracket equal minimizer, algorithm + # reports convergence + def f(x): + return (x-1)**2 + + bracket = xp.asarray(1), xp.asarray(1), xp.asarray(1) + res = _chandrupatla_minimize(f, *bracket) + assert res.success + xp_assert_equal(res.x, xp.asarray(1.)) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return (x-1)**2 + + bracket = xp.asarray(-3), xp.asarray(1.1), xp.asarray(5) + res = _chandrupatla_minimize(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 3 + assert res.status == -2 + assert res.x == 1.1 # best so far + + # Test scalar `args` (not in tuple) + def f(x, c): + return (x-c)**2 - 1 + + bracket = xp.asarray(-1), xp.asarray(0), xp.asarray(1) + c = xp.asarray(1/3) + res = _chandrupatla_minimize(f, *bracket, args=(c,)) + xp_assert_close(res.x, c) + + # Test zero tolerances + def f(x): + return -xp.sin(x) + + bracket = xp.asarray(0), xp.asarray(1), xp.asarray(xp.pi) + res = _chandrupatla_minimize(f, *bracket, xatol=0, xrtol=0, fatol=0, frtol=0) + assert res.success + # found a minimum exactly (according to floating point arithmetic) + assert res.xl < res.xm < res.xr + assert f(res.xl) == f(res.xm) == f(res.xr) + + +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +@pytest.mark.skip_xp_backends('array_api_strict', + reason='Currently uses fancy indexing assignment.') +@pytest.mark.skip_xp_backends('jax.numpy', + reason='JAX arrays do not support item assignment.') +@pytest.mark.skip_xp_backends('cupy', + reason='cupy/cupy#8391') +class TestChandrupatla(TestScalarRootFinders): + + def f(self, q, p): + return special.ndtr(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, p, xp): + # Invert distribution CDF and compare against distribution `ppf` + a, b = xp.asarray(-5.), xp.asarray(5.) + res = _chandrupatla_root(self.f, a, b, args=(xp.asarray(p),)) + ref = xp.asarray(stats.norm().ppf(p), dtype=xp.asarray(p).dtype) + xp_assert_close(res.x, ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = (np.linspace(-0.05, 1.05, 12).reshape(shape) if shape + else np.float64(0.6)) + p_xp = xp.asarray(p) + args_xp = (p_xp,) + dtype = p_xp.dtype + xp_test = array_namespace(p_xp) # need xp.bool + + @np.vectorize + def chandrupatla_single(p): + return _chandrupatla_root(self.f, -5, 5, args=(p,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_root(f, xp.asarray(-5.), xp.asarray(5.), args=args_xp) + refs = chandrupatla_single(p).ravel() + + ref_x = [ref.x for ref in refs] + ref_x = xp.reshape(xp.asarray(ref_x, dtype=dtype), shape) + xp_assert_close(res.x, ref_x) + + ref_fun = [ref.fun for ref in refs] + ref_fun = xp.reshape(xp.asarray(ref_fun, dtype=dtype), shape) + xp_assert_close(res.fun, ref_fun, atol=1e-15) + xp_assert_equal(res.fun, self.f(res.x, *args_xp)) + + ref_success = [bool(ref.success) for ref in refs] + ref_success = xp.reshape(xp.asarray(ref_success, dtype=xp_test.bool), shape) + xp_assert_equal(res.success, ref_success) + + ref_flag = [ref.status for ref in refs] + ref_flag = xp.reshape(xp.asarray(ref_flag, dtype=xp.int32), shape) + xp_assert_equal(res.status, ref_flag) + + ref_nfev = [ref.nfev for ref in refs] + ref_nfev = xp.reshape(xp.asarray(ref_nfev, dtype=xp.int32), shape) + if is_numpy(xp): + xp_assert_equal(res.nfev, ref_nfev) + assert xp.max(res.nfev) == f.f_evals + else: # different backend may lead to different nfev + assert res.nfev.shape == shape + assert res.nfev.dtype == xp.int32 + + ref_nit = [ref.nit for ref in refs] + ref_nit = xp.reshape(xp.asarray(ref_nit, dtype=xp.int32), shape) + if is_numpy(xp): + xp_assert_equal(res.nit, ref_nit) + assert xp.max(res.nit) == f.f_evals-2 + else: + assert res.nit.shape == shape + assert res.nit.dtype == xp.int32 + + ref_xl = [ref.xl for ref in refs] + ref_xl = xp.reshape(xp.asarray(ref_xl, dtype=dtype), shape) + xp_assert_close(res.xl, ref_xl) + + ref_xr = [ref.xr for ref in refs] + ref_xr = xp.reshape(xp.asarray(ref_xr, dtype=dtype), shape) + xp_assert_close(res.xr, ref_xr) + + xp_assert_less(res.xl, res.xr) + finite = xp.isfinite(res.x) + assert xp.all((res.x[finite] == res.xl[finite]) + | (res.x[finite] == res.xr[finite])) + + # PyTorch and CuPy don't solve to the same accuracy as NumPy - that's OK. + atol = 1e-15 if is_numpy(xp) else 1e-9 + + ref_fl = [ref.fl for ref in refs] + ref_fl = xp.reshape(xp.asarray(ref_fl, dtype=dtype), shape) + xp_assert_close(res.fl, ref_fl, atol=atol) + xp_assert_equal(res.fl, self.f(res.xl, *args_xp)) + + ref_fr = [ref.fr for ref in refs] + ref_fr = xp.reshape(xp.asarray(ref_fr, dtype=dtype), shape) + xp_assert_close(res.fr, ref_fr, atol=atol) + xp_assert_equal(res.fr, self.f(res.xr, *args_xp)) + + assert xp.all(xp.abs(res.fun[finite]) == + xp.minimum(xp.abs(res.fl[finite]), + xp.abs(res.fr[finite]))) + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + # Note that full_like and int(j) shouldn't really be required. CuPy + # is just really picky here, so I'm making it a special case to + # make sure the other backends work when the user is less careful. + assert js.dtype == xp.int64 + if is_cupy(xp): + funcs = [lambda x: x - 2.5, + lambda x: x - 10, + lambda x: (x - 0.1)**3, + lambda x: xp.full_like(x, xp.asarray(xp.nan))] + return [funcs[int(j)](x) for x, j in zip(xs, js)] + + funcs = [lambda x: x - 2.5, + lambda x: x - 10, + lambda x: (x - 0.1) ** 3, + lambda x: xp.nan] + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (xp.arange(4, dtype=xp.int64),) + a, b = xp.asarray([0.]*4), xp.asarray([xp.pi]*4) + res = _chandrupatla_root(f, a, b, args=args, maxiter=2) + + ref_flags = xp.asarray([eim._ECONVERGED, + eim._ESIGNERR, + eim._ECONVERR, + eim._EVALUEERR], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_convergence(self, xp): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = xp.asarray(rng.random(size=3)) + bracket = (-xp.asarray(5.), xp.asarray(5.)) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(res1.xr - res1.xl, xp.full_like(p, xp.asarray(1e-3))) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(res2.xr - res2.xl, xp.full_like(p, xp.asarray(1e-6))) + xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(res1.xr - res1.xl, 1e-3 * xp.abs(res1.x)) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(res2.xr - res2.xl, 1e-6 * xp.abs(res2.x)) + xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(xp.abs(res1.fun), xp.full_like(p, xp.asarray(1e-3))) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(xp.abs(res2.fun), xp.full_like(p, xp.asarray(1e-6))) + xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun)) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + x1, x2 = bracket + f0 = xp.minimum(xp.abs(self.f(x1, *args)), xp.abs(self.f(x2, *args))) + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(xp.abs(res1.fun), 1e-3*f0) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + xp_assert_less(xp.abs(res2.fun), 1e-6*f0) + xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun)) + + def test_maxiter_callback(self, xp): + # Test behavior of `maxiter` parameter and `callback` interface + p = xp.asarray(0.612814) + bracket = (xp.asarray(-5.), xp.asarray(5.)) + maxiter = 5 + + def f(q, p): + res = special.ndtr(q) - p + f.x = q + f.fun = res + return res + f.x = None + f.fun = None + + res = _chandrupatla_root(f, *bracket, args=(p,), maxiter=maxiter) + assert not xp.any(res.success) + assert xp.all(res.nfev == maxiter+2) + assert xp.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xr) == bracket + else: + changed = (((res.xl == callback.xl) & (res.xr != callback.xr)) + | ((res.xl != callback.xl) & (res.xr == callback.xr))) + assert xp.all(changed) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + xp_assert_equal(self.f(res.xl, p), res.fl) + xp_assert_equal(self.f(res.xr, p), res.fr) + xp_assert_equal(self.f(res.x, p), res.fun) + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.xl = None + callback.xr = None + + res2 = _chandrupatla_root(f, *bracket, args=(p,), callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + xp_assert_equal(res[key], xp.asarray(eim._ECONVERR, dtype=xp.int32)) + xp_assert_equal(res2[key], xp.asarray(eim._ECALLBACK, dtype=xp.int32)) + elif key.startswith('_'): + continue + else: + xp_assert_equal(res2[key], res[key]) + + @pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS) + def test_nit_expected(self, case, xp): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 40 test cases, the number of iterations performed + # matches the number reported in the original paper. + f, bracket, root, nfeval, id = case + # Chandrupatla's criterion is equivalent to + # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard + # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x + # that used by Chandrupatla in tests. + bracket = (xp.asarray(bracket[0], dtype=xp.float64), + xp.asarray(bracket[1], dtype=xp.float64)) + root = xp.asarray(root, dtype=xp.float64) + + res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5) + xp_assert_close(res.fun, xp.asarray(f(root), dtype=xp.float64), + rtol=1e-8, atol=2e-3) + xp_assert_equal(res.nfev, xp.asarray(nfeval, dtype=xp.int32)) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64')) + def test_dtype(self, root, dtype, xp): + # Test that dtypes are preserved + not_numpy = not is_numpy(xp) + if not_numpy and dtype == 'float16': + pytest.skip("`float16` dtype only supported for NumPy arrays.") + + dtype = getattr(xp, dtype, None) + if dtype is None: + pytest.skip(f"{xp} does not support {dtype}") + + def f(x, root): + res = (x - root) ** 3. + if is_numpy(xp): # NumPy does not preserve dtype + return xp.asarray(res, dtype=dtype) + return res + + a, b = xp.asarray(-3, dtype=dtype), xp.asarray(3, dtype=dtype) + root = xp.asarray(root, dtype=dtype) + res = _chandrupatla_root(f, a, b, args=(root,), xatol=1e-3) + try: + xp_assert_close(res.x, root, atol=1e-3) + except AssertionError: + assert res.x.dtype == dtype + xp.all(res.fun == 0) + + def test_input_validation(self, xp): + # Test input validation for appropriate error messages + + def func(x): + return x + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + bracket = xp.asarray(-4), xp.asarray(4) + _chandrupatla_root(None, *bracket) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + bracket = xp.asarray(-4+1j), xp.asarray(4) + _chandrupatla_root(func, *bracket) + + # raised by `np.broadcast, but the traceback is readable IMO + message = "...not be broadcast..." # all messages include this part + with pytest.raises((ValueError, RuntimeError), match=message): + bracket = xp.asarray([-2, -3]), xp.asarray([3, 4, 5]) + _chandrupatla_root(func, *bracket) + + message = "The shape of the array returned by `func`..." + with pytest.raises(ValueError, match=message): + bracket = xp.asarray([-3, -3]), xp.asarray([5, 5]) + _chandrupatla_root(lambda x: [x[0], x[1], x[1]], *bracket) + + message = 'Tolerances must be non-negative scalars.' + bracket = xp.asarray(-4), xp.asarray(4) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, xrtol=xp.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, frtol=xp.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(func, *bracket, callback='shrubbery') + + def test_special_cases(self, xp): + # Test edge cases and other special cases + + # Test infinite function values + def f(x): + return 1 / x + 1 - 1 / (-x + 1) + + a, b = xp.asarray([0.1, 0., 0., 0.1]), xp.asarray([0.9, 1.0, 0.9, 1.0]) + + with np.errstate(divide='ignore', invalid='ignore'): + res = _chandrupatla_root(f, a, b) + + assert xp.all(res.success) + xp_assert_close(res.x[1:], xp.full((3,), res.x[0])) + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + xp_test = array_namespace(a) # need isdtype + def f(x): + assert xp_test.isdtype(x.dtype, "real floating") + # this would overflow if x were an xp integer dtype + return x ** 31 - 1 + + # note that all inputs are integer type; result is automatically default float + res = _chandrupatla_root(f, xp.asarray(-7), xp.asarray(5)) + assert res.success + xp_assert_close(res.x, xp.asarray(1.)) + + # Test that if both ends of bracket equal root, algorithm reports + # convergence. + def f(x, root): + return x**2 - root + + root = xp.asarray([0, 1]) + res = _chandrupatla_root(f, xp.asarray(1), xp.asarray(1), args=(root,)) + xp_assert_equal(res.success, xp.asarray([False, True])) + xp_assert_equal(res.x, xp.asarray([xp.nan, 1.])) + + def f(x): + return 1/x + + with np.errstate(invalid='ignore'): + inf = xp.asarray(xp.inf) + res = _chandrupatla_root(f, inf, inf) + assert res.success + xp_assert_equal(res.x, xp.asarray(xp.inf)) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**3 - 1 + + a, b = xp.asarray(-3.), xp.asarray(5.) + res = _chandrupatla_root(f, a, b, maxiter=0) + xp_assert_equal(res.success, xp.asarray(False)) + xp_assert_equal(res.status, xp.asarray(-2, dtype=xp.int32)) + xp_assert_equal(res.nit, xp.asarray(0, dtype=xp.int32)) + xp_assert_equal(res.nfev, xp.asarray(2, dtype=xp.int32)) + xp_assert_equal(res.xl, a) + xp_assert_equal(res.xr, b) + # The `x` attribute is the one with the smaller function value + xp_assert_equal(res.x, a) + # Reverse bracket; check that this is still true + res = _chandrupatla_root(f, -b, -a, maxiter=0) + xp_assert_equal(res.x, -a) + + # Test maxiter = 1 + res = _chandrupatla_root(f, a, b, maxiter=1) + xp_assert_equal(res.success, xp.asarray(True)) + xp_assert_equal(res.status, xp.asarray(0, dtype=xp.int32)) + xp_assert_equal(res.nit, xp.asarray(1, dtype=xp.int32)) + xp_assert_equal(res.nfev, xp.asarray(3, dtype=xp.int32)) + xp_assert_close(res.x, xp.asarray(1.)) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _chandrupatla_root(f, xp.asarray(-1), xp.asarray(1), args=xp.asarray(3)) + xp_assert_close(res.x, xp.asarray(1/3)) + + # # TODO: Test zero tolerance + # # ~~What's going on here - why are iterations repeated?~~ + # # tl goes to zero when xatol=xrtol=0. When function is nearly linear, + # # this causes convergence issues. + # def f(x): + # return np.cos(x) + # + # res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0) + # assert res.nit < 100 + # xp = np.nextafter(res.x, np.inf) + # xm = np.nextafter(res.x, -np.inf) + # assert np.abs(res.fun) < np.abs(f(xp)) + # assert np.abs(res.fun) < np.abs(f(xm)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..bd27eb9baa27433d1f801e8582dd2e8d29db3da2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py @@ -0,0 +1,166 @@ +import math + +import numpy as np +from numpy.testing import assert_allclose, assert_, assert_array_equal +import pytest + +from scipy.optimize import fmin_cobyla, minimize, Bounds + + +class TestCobyla: + def setup_method(self): + self.x0 = [4.95, 0.66] + self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] + self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5, + 'maxiter': 100} + + def fun(self, x): + return x[0]**2 + abs(x[1])**3 + + def con1(self, x): + return x[0]**2 + x[1]**2 - 25 + + def con2(self, x): + return -self.con1(x) + + @pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely') + def test_simple(self, capfd): + # use disp=True as smoke test for gh-8118 + x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, + rhoend=1e-5, maxfun=100, disp=True) + assert_allclose(x, self.solution, atol=1e-4) + + def test_minimize_simple(self): + class Callback: + def __init__(self): + self.n_calls = 0 + self.last_x = None + + def __call__(self, x): + self.n_calls += 1 + self.last_x = x + + callback = Callback() + + # Minimize with method='COBYLA' + cons = ({'type': 'ineq', 'fun': self.con1}, + {'type': 'ineq', 'fun': self.con2}) + sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, + callback=callback, options=self.opts) + assert_allclose(sol.x, self.solution, atol=1e-4) + assert_(sol.success, sol.message) + assert_(sol.maxcv < 1e-5, sol) + assert_(sol.nfev < 70, sol) + assert_(sol.fun < self.fun(self.solution) + 1e-3, sol) + assert_(sol.nfev == callback.n_calls, + "Callback is not called exactly once for every function eval.") + assert_array_equal( + sol.x, + callback.last_x, + "Last design vector sent to the callback is not equal to returned value.", + ) + + def test_minimize_constraint_violation(self): + rng = np.random.RandomState(1234) + pb = rng.rand(10, 10) + spread = rng.rand(10) + + def p(w): + return pb.dot(w) + + def f(w): + return -(w * spread).sum() + + def c1(w): + return 500 - abs(p(w)).sum() + + def c2(w): + return 5 - abs(p(w).sum()) + + def c3(w): + return 5 - abs(p(w)).max() + + cons = ({'type': 'ineq', 'fun': c1}, + {'type': 'ineq', 'fun': c2}, + {'type': 'ineq', 'fun': c3}) + w0 = np.zeros((10,)) + sol = minimize(f, w0, method='cobyla', constraints=cons, + options={'catol': 1e-6}) + assert_(sol.maxcv > 1e-6) + assert_(not sol.success) + + +def test_vector_constraints(): + # test that fmin_cobyla and minimize can take a combination + # of constraints, some returning a number and others an array + def fun(x): + return (x[0] - 1)**2 + (x[1] - 2.5)**2 + + def fmin(x): + return fun(x) - 1 + + def cons1(x): + a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) + return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + + a[i, 2] for i in range(len(a))]) + + def cons2(x): + return x # identity, acts as bounds x > 0 + + x0 = np.array([2, 0]) + cons_list = [fun, cons1, cons2] + + xsol = [1.4, 1.7] + fsol = 0.8 + + # testing fmin_cobyla + sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) + assert_allclose(sol, xsol, atol=1e-4) + + sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) + assert_allclose(fun(sol), 1, atol=1e-4) + + # testing minimize + constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.x, xsol, atol=1e-4) + assert_(sol.success, sol.message) + assert_allclose(sol.fun, fsol, atol=1e-4) + + constraints = {'type': 'ineq', 'fun': fmin} + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.fun, 1, atol=1e-4) + + +class TestBounds: + # Test cobyla support for bounds (only when used via `minimize`) + # Invalid bounds is tested in + # test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds + + def test_basic(self): + def f(x): + return np.sum(x**2) + + lb = [-1, None, 1, None, -0.5] + ub = [-0.5, -0.5, None, None, -0.5] + bounds = [(a, b) for a, b in zip(lb, ub)] + # these are converted to Bounds internally + + res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds) + ref = [-0.5, -0.5, 1, 0, -0.5] + assert res.success + assert_allclose(res.x, ref, atol=1e-3) + + def test_unbounded(self): + def f(x): + return np.sum(x**2) + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, 0, atol=1e-3) + + bounds = Bounds([1, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, [1, 0], atol=1e-3) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py new file mode 100644 index 0000000000000000000000000000000000000000..bf16af71625c4d476353407f9708c981915b098a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py @@ -0,0 +1,252 @@ +import numpy as np +import pytest +import threading +from numpy.testing import assert_allclose, assert_equal + +from scipy.optimize import ( + Bounds, + LinearConstraint, + NonlinearConstraint, + OptimizeResult, + minimize, +) + + +class TestCOBYQA: + + def setup_method(self): + self.x0 = [4.95, 0.66] + self.options = {'maxfev': 100} + + @staticmethod + def fun(x, c=1.0): + return x[0]**2 + c * abs(x[1])**3 + + @staticmethod + def con(x): + return x[0]**2 + x[1]**2 - 25.0 + + def test_minimize_simple(self): + class Callback: + def __init__(self): + self.lock = threading.Lock() + self.n_calls = 0 + + def __call__(self, x): + assert isinstance(x, np.ndarray) + with self.lock: + self.n_calls += 1 + + class CallbackNewSyntax: + def __init__(self): + self.lock = threading.Lock() + self.n_calls = 0 + + def __call__(self, intermediate_result): + assert isinstance(intermediate_result, OptimizeResult) + with self.lock: + self.n_calls += 1 + + x0 = [4.95, 0.66] + callback = Callback() + callback_new_syntax = CallbackNewSyntax() + + # Minimize with method='cobyqa'. + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + sol = minimize( + self.fun, + x0, + method='cobyqa', + constraints=constraints, + callback=callback, + options=self.options, + ) + sol_new = minimize( + self.fun, + x0, + method='cobyqa', + constraints=constraints, + callback=callback_new_syntax, + options=self.options, + ) + solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0] + assert_allclose(sol.x, solution, atol=1e-4) + assert sol.success, sol.message + assert sol.maxcv < 1e-8, sol + assert sol.nfev <= 100, sol + assert sol.fun < self.fun(solution) + 1e-3, sol + assert sol.nfev == callback.n_calls, \ + "Callback is not called exactly once for every function eval." + assert_equal(sol.x, sol_new.x) + assert sol_new.success, sol_new.message + assert sol.fun == sol_new.fun + assert sol.maxcv == sol_new.maxcv + assert sol.nfev == sol_new.nfev + assert sol.nit == sol_new.nit + assert sol_new.nfev == callback_new_syntax.n_calls, \ + "Callback is not called exactly once for every function eval." + + def test_minimize_bounds(self): + def fun_check_bounds(x): + assert np.all(bounds.lb <= x) and np.all(x <= bounds.ub) + return self.fun(x) + + # Case where the bounds are not active at the solution. + bounds = Bounds([4.5, 0.6], [5.0, 0.7]) + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + sol = minimize( + fun_check_bounds, + self.x0, + method='cobyqa', + bounds=bounds, + constraints=constraints, + options=self.options, + ) + solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0] + assert_allclose(sol.x, solution, atol=1e-4) + assert sol.success, sol.message + assert sol.maxcv < 1e-8, sol + assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol + assert sol.nfev <= 100, sol + assert sol.fun < self.fun(solution) + 1e-3, sol + + # Case where the bounds are active at the solution. + bounds = Bounds([5.0, 0.6], [5.5, 0.65]) + sol = minimize( + fun_check_bounds, + self.x0, + method='cobyqa', + bounds=bounds, + constraints=constraints, + options=self.options, + ) + assert not sol.success, sol.message + assert sol.maxcv > 0.35, sol + assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol + assert sol.nfev <= 100, sol + + def test_minimize_linear_constraints(self): + constraints = LinearConstraint([1.0, 1.0], 1.0, 1.0) + sol = minimize( + self.fun, + self.x0, + method='cobyqa', + constraints=constraints, + options=self.options, + ) + solution = [(4 - np.sqrt(7)) / 3, (np.sqrt(7) - 1) / 3] + assert_allclose(sol.x, solution, atol=1e-4) + assert sol.success, sol.message + assert sol.maxcv < 1e-8, sol + assert sol.nfev <= 100, sol + assert sol.fun < self.fun(solution) + 1e-3, sol + + def test_minimize_args(self): + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + sol = minimize( + self.fun, + self.x0, + args=(2.0,), + method='cobyqa', + constraints=constraints, + options=self.options, + ) + solution = [np.sqrt(25.0 - 4.0 / 36.0), 2.0 / 6.0] + assert_allclose(sol.x, solution, atol=1e-4) + assert sol.success, sol.message + assert sol.maxcv < 1e-8, sol + assert sol.nfev <= 100, sol + assert sol.fun < self.fun(solution, 2.0) + 1e-3, sol + + def test_minimize_array(self): + def fun_array(x, dim): + f = np.array(self.fun(x)) + return np.reshape(f, (1,) * dim) + + # The argument fun can return an array with a single element. + bounds = Bounds([4.5, 0.6], [5.0, 0.7]) + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + sol = minimize( + self.fun, + self.x0, + method='cobyqa', + bounds=bounds, + constraints=constraints, + options=self.options, + ) + for dim in [0, 1, 2]: + sol_array = minimize( + fun_array, + self.x0, + args=(dim,), + method='cobyqa', + bounds=bounds, + constraints=constraints, + options=self.options, + ) + assert_equal(sol.x, sol_array.x) + assert sol_array.success, sol_array.message + assert sol.fun == sol_array.fun + assert sol.maxcv == sol_array.maxcv + assert sol.nfev == sol_array.nfev + assert sol.nit == sol_array.nit + + # The argument fun cannot return an array with more than one element. + with pytest.raises(TypeError): + minimize( + lambda x: np.array([self.fun(x), self.fun(x)]), + self.x0, + method='cobyqa', + bounds=bounds, + constraints=constraints, + options=self.options, + ) + + def test_minimize_maxfev(self): + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + options = {'maxfev': 2} + sol = minimize( + self.fun, + self.x0, + method='cobyqa', + constraints=constraints, + options=options, + ) + assert not sol.success, sol.message + assert sol.nfev <= 2, sol + + def test_minimize_maxiter(self): + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + options = {'maxiter': 2} + sol = minimize( + self.fun, + self.x0, + method='cobyqa', + constraints=constraints, + options=options, + ) + assert not sol.success, sol.message + assert sol.nit <= 2, sol + + def test_minimize_f_target(self): + constraints = NonlinearConstraint(self.con, 0.0, 0.0) + sol_ref = minimize( + self.fun, + self.x0, + method='cobyqa', + constraints=constraints, + options=self.options, + ) + options = dict(self.options) + options['f_target'] = sol_ref.fun + sol = minimize( + self.fun, + self.x0, + method='cobyqa', + constraints=constraints, + options=options, + ) + assert sol.success, sol.message + assert sol.maxcv < 1e-8, sol + assert sol.nfev <= sol_ref.nfev, sol + assert sol.fun <= sol_ref.fun, sol diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..c33183d5cb4e7ccaf4d755cbd2d8b28afaf46395 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py @@ -0,0 +1,286 @@ +""" +Unit test for constraint conversion +""" + +import numpy as np +from numpy.testing import (assert_array_almost_equal, + assert_allclose, assert_warns, suppress_warnings) +import pytest +from scipy.optimize import (NonlinearConstraint, LinearConstraint, + OptimizeWarning, minimize, BFGS) +from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock, + IneqRosenbrock, EqIneqRosenbrock, + BoundedRosenbrock, Elec) + + +class TestOldToNew: + x0 = (2, 0) + bnds = ((0, None), (0, None)) + method = "trust-constr" + + def test_constraint_dictionary_1(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.4, 1.7], rtol=1e-4) + assert_allclose(res.fun, 0.8, rtol=1e-4) + + def test_constraint_dictionary_2(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = {'type': 'eq', + 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1], + 'args': (1, 1.1), + 'jac': lambda x, p1, p2: np.array([[p1, -p2]])} + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.7918552, 1.62895927]) + assert_allclose(res.fun, 1.3857466063348418) + + def test_constraint_dictionary_3(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)] + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.75, 1.75], rtol=1e-4) + assert_allclose(res.fun, 1.125, rtol=1e-4) + + +class TestNewToOld: + @pytest.mark.fail_slow(2) + def test_multiple_constraint_objects(self, num_parallel_threads): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "cobyqa", "trust-constr"] + + # mixed old and new + coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([LinearConstraint([1, -2, 0], -2, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4) + if num_parallel_threads == 1: + assert_allclose(funs['cobyqa'], funs['trust-constr'], + rtol=1e-4) + + @pytest.mark.fail_slow(20) + def test_individual_constraint_objects(self, num_parallel_threads): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + + cone = [] # with equality constraints (can't use cobyla) + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "cobyqa", "trust-constr"] + + # nonstandard data types for constraint equality bounds + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([1.21]))) + + # multiple equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, 1.21)) # two same equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.4], [1.21, 1.4])) # two different equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.21], 1.21)) # equality specified two ways + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded + + # nonstandard data types for constraint inequality bounds + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([np.inf]))) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + np.array(-np.inf), -3)) + + # multiple inequalities/equalities + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, np.inf)) # two same inequalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.1, .8], [1.2, 1.4])) # bounded above and below + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below + + # quick check of LinearConstraint class (very little new code to test) + cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], + [1.21, -np.inf], [1.21, 1.4])) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) + if num_parallel_threads == 1: + assert_allclose(funs['cobyqa'], funs['trust-constr'], + rtol=1e-3) + + for con in cone: + funs = {} + for method in [method for method in methods if method != 'cobyla']: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + if num_parallel_threads == 1: + assert_allclose(funs['cobyqa'], funs['trust-constr'], + rtol=1e-3) + + +class TestNewToOldSLSQP: + method = 'slsqp' + elec = Elec(n_electrons=2) + elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047, + -0.73597044, 0.34180668, -0.34180667]) + brock = BoundedRosenbrock() + brock.x_opt = [0, 0] + list_of_problems = [Maratos(), + HyperbolicIneq(), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + elec, + brock + ] + + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_array_almost_equal(result.x, prob.x_opt, decimal=3) + + @pytest.mark.thread_unsafe + def test_warn_mixed_constraints(self): + # warns about inefficiency of mixed equality/inequality constraints + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], + [1.1, .8], [1.1, 1.4]) + bnds = ((0, None), (0, None), (0, None)) + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), + method=self.method, bounds=bnds, constraints=cons) + + @pytest.mark.thread_unsafe + def test_warn_ignored_options(self): + # warns about constraint options being ignored + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = (2, 0, 1) + + if self.method == "slsqp": + bnds = ((0, None), (0, None), (0, None)) + else: + bnds = None + + cons = NonlinearConstraint(lambda x: x[0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = LinearConstraint([1, 0, 0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = [] + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + keep_feasible=True)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + hess=BFGS())) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_jac_sparsity=42)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_rel_step=42)) + cons.append(LinearConstraint([1, 0, 0], 2, np.inf, + keep_feasible=True)) + for con in cons: + assert_warns(OptimizeWarning, minimize, fun, x0, + method=self.method, bounds=bnds, constraints=cons) + + +class TestNewToOldCobyla: + method = 'cobyla' + + list_of_problems = [ + Elec(n_electrons=2), + Elec(n_electrons=4), + ] + + @pytest.mark.slow + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + truth = minimize(prob.fun, prob.x0, + method='trust-constr', + bounds=prob.bounds, + constraints=prob.constr) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_allclose(result.fun, truth.fun, rtol=1e-3) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4186ba7b6dd6f56b89e2f39add9eb16e6beccb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py @@ -0,0 +1,255 @@ +import pytest +import numpy as np +from numpy.testing import TestCase, assert_array_equal +import scipy.sparse as sps +from scipy.optimize._constraints import ( + Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint, + new_bounds_to_old, old_bound_to_new, strict_bounds) + + +class TestStrictBounds(TestCase): + def test_scalarvalue_unique_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [2, 2, 2]) + assert_array_equal(strict_ub, [4, 4, 4]) + + def test_vectorvalue_unique_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 5, 6] + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, 2, 3]) + assert_array_equal(strict_ub, [4, 5, 6]) + + def test_scalarvalue_vector_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = [False, True, False] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, 2, -np.inf]) + assert_array_equal(strict_ub, [np.inf, 4, np.inf]) + + def test_vectorvalue_vector_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 6, np.inf] + enforce_feasibility = [True, False, True] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, -np.inf, 3]) + assert_array_equal(strict_ub, [4, np.inf, np.inf]) + + +def test_prepare_constraint_infeasible_x0(): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + enforce_feasibility = np.array([False, True, True], dtype=bool) + bounds = Bounds(lb, ub, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, bounds, x0) + + pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3]) + assert (pc.violation([1, 2, 3]) > 0).any() + assert (pc.violation([0.25, 21, 31]) == 0).all() + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + enforce_feasibility = np.array([True, True, True], dtype=bool) + linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, linear, x0) + + pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0), + [1, 2, 3, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def fun(x): + return A.dot(x) + + def jac(x): + return A + + def hess(x, v): + return sps.csr_matrix((4, 4)) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess, + enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, nonlinear, x0) + + pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + +def test_violation(): + def cons_f(x): + return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) + + nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) + pc = PreparedConstraint(nlc, [0.5, 1]) + + assert_array_equal(pc.violation([0.5, 1]), [0., 0.]) + + np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1]) + + np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14]) + + +def test_new_bounds_to_old(): + lb = np.array([-np.inf, 2, 3]) + ub = np.array([3, np.inf, 10]) + + bounds = [(None, 3), (2, None), (3, 10)] + assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds) + + bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)] + assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb) + + bounds_no_lb = [(None, 3), (None, None), (None, 10)] + assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb) + + bounds_single_ub = [(None, 20), (2, 20), (3, 20)] + assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub) + + bounds_no_ub = [(None, None), (2, None), (3, None)] + assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub) + + bounds_single_both = [(1, 2), (1, 2), (1, 2)] + assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both) + + bounds_no_both = [(None, None), (None, None), (None, None)] + assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both) + + +def test_old_bounds_to_new(): + bounds = ([1, 2], (None, 3), (-1, None)) + lb_true = np.array([1, -np.inf, -1]) + ub_true = np.array([2, 3, np.inf]) + + lb, ub = old_bound_to_new(bounds) + assert_array_equal(lb, lb_true) + assert_array_equal(ub, ub_true) + + bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))] + lb, ub = old_bound_to_new(bounds) + + assert_array_equal(lb, [-np.inf, 1]) + assert_array_equal(ub, [np.inf, 1]) + + +class TestBounds: + def test_repr(self): + # so that eval works + from numpy import array, inf # noqa: F401 + for args in ( + (-1.0, 5.0), + (-1.0, np.inf, True), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf])), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf]), + np.array([True, False])), + ): + bounds = Bounds(*args) + bounds2 = eval(repr(Bounds(*args))) + assert_array_equal(bounds.lb, bounds2.lb) + assert_array_equal(bounds.ub, bounds2.ub) + assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible) + + def test_array(self): + # gh13501 + b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0]) + assert isinstance(b.lb, np.ndarray) + assert isinstance(b.ub, np.ndarray) + + def test_defaults(self): + b1 = Bounds() + b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf)) + assert b1.lb == b2.lb + assert b1.ub == b2.ub + + def test_input_validation(self): + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + Bounds(sps.coo_array([1, 2]), [1, 2]) + with pytest.raises(ValueError, match=message): + Bounds([1, 2], sps.coo_array([1, 2])) + + message = "`keep_feasible` must be a dense array." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True])) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2, 3]) + + def test_residual(self): + bounds = Bounds(-2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2])) + + +class TestLinearConstraint: + def test_defaults(self): + A = np.eye(4) + lc = LinearConstraint(A) + lc2 = LinearConstraint(A, -np.inf, np.inf) + assert_array_equal(lc.lb, lc2.lb) + assert_array_equal(lc.ub, lc2.ub) + + def test_input_validation(self): + A = np.eye(4) + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], [1, 2, 3]) + + message = "Constraint limits must be dense arrays" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, sps.coo_array([1, 2]), [2, 3]) + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], sps.coo_array([2, 3])) + + message = "`keep_feasible` must be a dense array" + with pytest.raises(ValueError, match=message): + keep_feasible = sps.coo_array([True, True]) + LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible) + + A = np.empty((4, 3, 5)) + message = "`A` must have exactly two dimensions." + with pytest.raises(ValueError, match=message): + LinearConstraint(A) + + def test_residual(self): + A = np.eye(2) + lc = LinearConstraint(A, -2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2])) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..2f859c1143eb6b63c439fe278bfdd4fdaa15410f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py @@ -0,0 +1,92 @@ +""" +Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``, +and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a +3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st, +2nd, and 3rd order terms in ``args``. + +.. math:: + + f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0 + +The 3rd order polynomial function is written in Cython and called in a Python +wrapper named after the zero function. See the private ``_zeros`` Cython module +in `scipy.optimize.cython_optimze` for more information. +""" + +import numpy.testing as npt +from scipy.optimize.cython_optimize import _zeros + +# CONSTANTS +# Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9]. +# The ARGS have 3 elements just to show how this could be done for any cubic +# polynomial. +A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term +ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms +XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions +# absolute and relative tolerances and max iterations for zeros functions +XTOL, RTOL, MITR = 0.001, 0.001, 10 +EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0] +# = [1.2599210498948732, +# 1.2805791649874942, +# 1.300591446851387, +# 1.3200061217959123, +# 1.338865900164339, +# 1.3572088082974532, +# 1.375068867074141, +# 1.3924766500838337, +# 1.4094597464129783, +# 1.4260431471424087] + + +# test bisect +def test_bisect(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test ridder +def test_ridder(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brenth +def test_brenth(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq +def test_brentq(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq with full output +def test_brentq_full_output(): + output = _zeros.full_output_example( + (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR) + npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL) + npt.assert_equal(6, output['iterations']) + npt.assert_equal(7, output['funcalls']) + npt.assert_equal(0, output['error_num']) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..cac12af0033c09c3fbf73c52a9c2cb52b84d8239 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py @@ -0,0 +1,805 @@ +import pytest +import platform +import numpy as np +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_, assert_allclose, + assert_equal) +from scipy._lib._gcutils import assert_deallocated +from scipy.sparse import csr_matrix +from scipy.sparse.linalg import LinearOperator +from scipy.optimize._differentiable_functions import (ScalarFunction, + VectorFunction, + LinearVectorFunction, + IdentityVectorFunction) +from scipy.optimize import rosen, rosen_der, rosen_hess +from scipy.optimize._hessian_update_strategy import BFGS + + +class ExScalarFunction: + + def __init__(self): + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + self.ngev += 1 + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + self.nhev += 1 + return 4*np.eye(2) + + +class TestScalarFunction(TestCase): + + def test_finite_difference_grad(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, nfev) + approx = ScalarFunction(ex.fun, x0, (), '2-point', + ex.hess, None, (-np.inf, np.inf)) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + + x = [10, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.0, 1.0] + g_analit = analit.grad(x) + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + def test_fun_and_grad(self): + ex = ExScalarFunction() + + def fg_allclose(x, y): + assert_allclose(x[0], y[0]) + assert_allclose(x[1], y[1]) + + # with analytic gradient + x0 = [2.0, 0.3] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + assert analit.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + + # with finite difference gradient + x0 = [2.0, 0.3] + sf = ScalarFunction(ex.fun, x0, (), '3-point', + ex.hess, None, (-np.inf, np.inf)) + assert sf.ngev == 1 + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + assert sf.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + + def test_finite_difference_hess_linear_operator(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = ScalarFunction(ex.fun, x0, (), ex.grad, + '2-point', None, (-np.inf, np.inf)) + assert_(isinstance(approx.H, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v)) + nfev += 1 + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + @pytest.mark.thread_unsafe + def test_x_storage_overlap(self): + # Scalar_Function should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + + def f(x): + return np.sum(np.asarray(x) ** 2) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # gh13740 x is changed in user function + def ff(x): + x *= x # overwrite x + return np.sum(x) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction( + ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf) + ) + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert_equal(sf.x, np.array([1., 2., 3.])) + assert x is not sf.x + + def test_lowest_x(self): + # ScalarFunction should remember the lowest func(x) visited. + x0 = np.array([2, 3, 4]) + sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess, + None, None) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + def test_float_size(self): + x0 = np.array([2, 3, 4]).astype(np.float32) + + # check that ScalarFunction/approx_derivative always send the correct + # float width + def rosen_(x): + assert x.dtype == np.float32 + return rosen(x) + + sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + res = sf.fun(x0) + assert res.dtype == np.float32 + + +class ExVectorialFunction: + + def __init__(self): + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0], + 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype) + + def jac(self, x): + self.njev += 1 + return np.array([[4*x[0]-1, 4*x[1]], + [12*x[0]**2-3, 8*x[1]]], dtype=x.dtype) + + def hess(self, x, v): + self.nhev += 1 + return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0], + [0, 8]]) + + +class TestVectorialFunction(TestCase): + + def test_finite_difference_jac(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + + x0 = [1.0, 0.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + + x = [10, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx, decimal=4) + + x = [2.0, 1.0] + J_analit = analit.jac(x) + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(J_analit, J_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + def test_finite_difference_hess_linear_operator(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + v0 = [1.0, 2.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None, + (-np.inf, np.inf), None) + assert_(isinstance(approx.H, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p)) + nfev += 1 + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x, v0) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p), + decimal=5) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + v = [1.0, 1.0] + H_analit = analit.hess(x, v) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.jac(x) + H_analit = analit.hess(x, v0) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + v = [2.3, 5.2] + _ = analit.jac(x) + H_analit = analit.hess(x, v) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + @pytest.mark.thread_unsafe + def test_x_storage_overlap(self): + # VectorFunction should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]) + + vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None, + (-np.inf, np.inf), None) + + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x0 = np.array([1.0, 0.0]) + vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None, + (-np.inf, np.inf), None) + + with pytest.warns(UserWarning): + # filter UserWarning because ExVectorialFunction is linear and + # a quasi-Newton approximation is used for the Hessian. + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + def test_float_size(self): + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]).astype(np.float32) + + vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + + res = vf.fun(x0) + assert res.dtype == np.float32 + + res = vf.jac(x0) + assert res.dtype == np.float32 + + +def test_LinearVectorFunction(): + A_dense = np.array([ + [-1, 2, 0], + [0, 4, 2] + ]) + x0 = np.zeros(3) + A_sparse = csr_matrix(A_dense) + x = np.array([1, -1, 0]) + v = np.array([-1, 1]) + Ax = np.array([-3, -4]) + + f1 = LinearVectorFunction(A_dense, x0, None) + assert_(not f1.sparse_jacobian) + + f2 = LinearVectorFunction(A_dense, x0, True) + assert_(f2.sparse_jacobian) + + f3 = LinearVectorFunction(A_dense, x0, False) + assert_(not f3.sparse_jacobian) + + f4 = LinearVectorFunction(A_sparse, x0, None) + assert_(f4.sparse_jacobian) + + f5 = LinearVectorFunction(A_sparse, x0, True) + assert_(f5.sparse_jacobian) + + f6 = LinearVectorFunction(A_sparse, x0, False) + assert_(not f6.sparse_jacobian) + + assert_array_equal(f1.fun(x), Ax) + assert_array_equal(f2.fun(x), Ax) + assert_array_equal(f1.jac(x), A_dense) + assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray()) + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) + + +def test_LinearVectorFunction_memoization(): + A = np.array([[-1, 2, 0], [0, 4, 2]]) + x0 = np.array([1, 2, -1]) + fun = LinearVectorFunction(A, x0, False) + + assert_array_equal(x0, fun.x) + assert_array_equal(A.dot(x0), fun.f) + + x1 = np.array([-1, 3, 10]) + assert_array_equal(A, fun.jac(x1)) + assert_array_equal(x1, fun.x) + assert_array_equal(A.dot(x0), fun.f) + assert_array_equal(A.dot(x1), fun.fun(x1)) + assert_array_equal(A.dot(x1), fun.f) + + +def test_IdentityVectorFunction(): + x0 = np.zeros(3) + + f1 = IdentityVectorFunction(x0, None) + f2 = IdentityVectorFunction(x0, False) + f3 = IdentityVectorFunction(x0, True) + + assert_(f1.sparse_jacobian) + assert_(not f2.sparse_jacobian) + assert_(f3.sparse_jacobian) + + x = np.array([-1, 2, 1]) + v = np.array([-2, 3, 0]) + + assert_array_equal(f1.fun(x), x) + assert_array_equal(f2.fun(x), x) + + assert_array_equal(f1.jac(x).toarray(), np.eye(3)) + assert_array_equal(f2.jac(x), np.eye(3)) + + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) + + +@pytest.mark.skipif( + platform.python_implementation() == "PyPy", + reason="assert_deallocate not available on PyPy" +) +def test_ScalarFunctionNoReferenceCycle(): + """Regression test for gh-20768.""" + ex = ExScalarFunction() + x0 = np.zeros(3) + with assert_deallocated(lambda: ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf))): + pass + + +@pytest.mark.skipif( + platform.python_implementation() == "PyPy", + reason="assert_deallocate not available on PyPy" +) +@pytest.mark.xfail(reason="TODO remove reference cycle from VectorFunction") +def test_VectorFunctionNoReferenceCycle(): + """Regression test for gh-20768.""" + ex = ExVectorialFunction() + x0 = [1.0, 0.0] + with assert_deallocated(lambda: VectorFunction(ex.fun, x0, ex.jac, + ex.hess, None, None, (-np.inf, np.inf), None)): + pass + + +@pytest.mark.skipif( + platform.python_implementation() == "PyPy", + reason="assert_deallocate not available on PyPy" +) +def test_LinearVectorFunctionNoReferenceCycle(): + """Regression test for gh-20768.""" + A_dense = np.array([ + [-1, 2, 0], + [0, 4, 2] + ]) + x0 = np.zeros(3) + A_sparse = csr_matrix(A_dense) + with assert_deallocated(lambda: LinearVectorFunction(A_sparse, x0, None)): + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..835d3164c8d547599a507550dcc7629e7d327394 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py @@ -0,0 +1,321 @@ +""" +Unit test for DIRECT optimization algorithm. +""" +from numpy.testing import (assert_allclose, + assert_array_less) +import pytest +import numpy as np +from scipy.optimize import direct, Bounds +import threading + + +class TestDIRECT: + + def setup_method(self): + self.fun_calls = threading.local() + self.bounds_sphere = 4*[(-2, 3)] + self.optimum_sphere_pos = np.zeros((4, )) + self.optimum_sphere = 0.0 + self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.]) + self.maxiter = 1000 + + # test functions + def sphere(self, x): + if not hasattr(self.fun_calls, 'c'): + self.fun_calls.c = 0 + self.fun_calls.c += 1 + return np.square(x).sum() + + def inv(self, x): + if np.sum(x) == 0: + raise ZeroDivisionError() + return 1/np.sum(x) + + def nan_fun(self, x): + return np.nan + + def inf_fun(self, x): + return np.inf + + def styblinski_tang(self, pos): + x, y = pos + return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct(self, locally_biased): + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + # test accuracy + assert_allclose(res.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) + + # test that result lies within bounds + _bounds = np.asarray(self.bounds_sphere) + assert_array_less(_bounds[:, 0], res.x) + assert_array_less(res.x, _bounds[:, 1]) + + # test number of function evaluations. Original DIRECT overshoots by + # up to 500 evaluations in last iteration + assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1) + # test that number of function evaluations is correct + assert res.nfev == self.fun_calls.c + + # test that number of iterations is below supplied maximum + assert res.nit <= self.maxiter + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct_callback(self, locally_biased): + # test that callback does not change the result + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + def callback(x): + x = 2*x + dummy = np.square(x) + print("DIRECT minimization algorithm callback test") + return dummy + + res_callback = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased, + callback=callback) + + assert_allclose(res.x, res_callback.x) + + assert res.nit == res_callback.nit + assert res.nfev == res_callback.nfev + assert res.status == res_callback.status + assert res.success == res_callback.success + assert res.fun == res_callback.fun + assert_allclose(res.x, res_callback.x) + assert res.message == res_callback.message + + # test accuracy + assert_allclose(res_callback.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res_callback.fun, self.optimum_sphere, + atol=1e-5, rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_exception(self, locally_biased): + bounds = 4*[(-10, 10)] + with pytest.raises(ZeroDivisionError): + direct(self.inv, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_nan(self, locally_biased): + bounds = 4*[(-10, 10)] + direct(self.nan_fun, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("len_tol", [1e-3, 1e-4]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_len_tol(self, len_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, len_tol=len_tol, + vol_tol=1e-30, locally_biased=locally_biased) + assert res.status == 5 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The side length measure of the hyperrectangle containing " + "the lowest function value found is below " + f"len_tol={len_tol}") + assert res.message == message + + @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_vol_tol(self, vol_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol, + len_tol=0., locally_biased=locally_biased) + assert res.status == 4 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The volume of the hyperrectangle containing the lowest " + f"function value found is below vol_tol={vol_tol}") + assert res.message == message + + @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_min(self, f_min_rtol, locally_biased): + # test that desired function value is reached within + # relative tolerance of f_min_rtol + f_min = 1. + bounds = 4*[(-2., 10.)] + res = direct(self.sphere, bounds=bounds, f_min=f_min, + f_min_rtol=f_min_rtol, + locally_biased=locally_biased) + assert res.status == 3 + assert res.success + assert res.fun < f_min * (1. + f_min_rtol) + message = ("The best function value found is within a relative " + f"error={f_min_rtol} of the (known) global optimum f_min") + assert res.message == message + + def circle_with_args(self, x, a, b): + return np.square(x[0] - a) + np.square(x[1] - b).sum() + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_circle_with_args(self, locally_biased): + bounds = 2*[(-2.0, 2.0)] + + res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250, + locally_biased=locally_biased) + assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxfun(self, locally_biased): + # test that if optimization runs for the maximal number of + # evaluations, success = False is returned + + maxfun = 100 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun, locally_biased=locally_biased) + assert result.success is False + assert result.status == 1 + assert result.nfev >= maxfun + message = ("Number of function evaluations done is " + f"larger than maxfun={maxfun}") + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxiter(self, locally_biased): + # test that if optimization runs for the maximal number of + # iterations, success = False is returned + + maxiter = 10 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter, locally_biased=locally_biased) + assert result.success is False + assert result.status == 2 + assert result.nit >= maxiter + message = f"Number of iterations is larger than maxiter={maxiter}" + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_bounds_variants(self, locally_biased): + # test that new and old bounds yield same result + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + x_opt = np.array([-1., 1., 0.]) + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = direct(self.sphere, bounds_old, + locally_biased=locally_biased) + res_new_bounds = direct(self.sphere, bounds_new, + locally_biased=locally_biased) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + assert res_new_bounds.nit == res_old_bounds.nit + assert_allclose(res_new_bounds.x, res_old_bounds.x) + assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2) + + @pytest.mark.parametrize("locally_biased", [True, False]) + @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3]) + def test_epsilon(self, eps, locally_biased): + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + eps=eps, vol_tol=1e-6, + locally_biased=locally_biased) + assert result.status == 4 + assert result.success + + @pytest.mark.xslow + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_no_segmentation_fault(self, locally_biased): + # test that an excessive number of function evaluations + # does not result in segmentation fault + bounds = [(-5., 20.)] * 100 + result = direct(self.sphere, bounds, maxfun=10000000, + maxiter=1000000, locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_inf_fun(self, locally_biased): + # test that an objective value of infinity does not crash DIRECT + bounds = [(-5., 5.)] * 2 + result = direct(self.inf_fun, bounds, + locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("len_tol", [-1, 2]) + def test_len_tol_validation(self, len_tol): + error_msg = "len_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + len_tol=len_tol) + + @pytest.mark.parametrize("vol_tol", [-1, 2]) + def test_vol_tol_validation(self, vol_tol): + error_msg = "vol_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + vol_tol=vol_tol) + + @pytest.mark.parametrize("f_min_rtol", [-1, 2]) + def test_fmin_rtol_validation(self, f_min_rtol): + error_msg = "f_min_rtol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + f_min_rtol=f_min_rtol, f_min=0.) + + @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)]) + def test_maxfun_wrong_type(self, maxfun): + error_msg = "maxfun must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun) + + @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)]) + def test_maxiter_wrong_type(self, maxiter): + error_msg = "maxiter must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter) + + def test_negative_maxiter(self): + error_msg = "maxiter must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=-1) + + def test_negative_maxfun(self): + error_msg = "maxfun must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=-1) + + @pytest.mark.parametrize("bounds", ["bounds", 2., 0]) + def test_invalid_bounds_type(self, bounds): + error_msg = ("bounds must be a sequence or " + "instance of Bounds class") + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("bounds", + [Bounds([-1., -1], [-2, 1]), + Bounds([-np.nan, -1], [-2, np.nan]), + ] + ) + def test_incorrect_bounds(self, bounds): + error_msg = 'Bounds are not consistent min < max' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + def test_inf_bounds(self): + error_msg = 'Bounds must not be inf.' + bounds = Bounds([-np.inf, -1], [-2, np.inf]) + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.]) + def test_locally_biased_validation(self, locally_biased): + error_msg = 'locally_biased must be True or False.' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + locally_biased=locally_biased) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_extending.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_extending.py new file mode 100644 index 0000000000000000000000000000000000000000..279cac794e5f453fee52f19026f96eb530259485 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_extending.py @@ -0,0 +1,28 @@ +import os +import platform +import sysconfig + +import pytest + +from scipy._lib._testutils import IS_EDITABLE, _test_cython_extension, cython + + +@pytest.mark.fail_slow(40) +# essential per https://github.com/scipy/scipy/pull/20487#discussion_r1567057247 +@pytest.mark.skipif(IS_EDITABLE, + reason='Editable install cannot find .pxd headers.') +@pytest.mark.skipif((platform.system() == 'Windows' and + sysconfig.get_config_var('Py_GIL_DISABLED')), + reason='gh-22039') +@pytest.mark.skipif(platform.machine() in ["wasm32", "wasm64"], + reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +def test_cython(tmp_path): + srcdir = os.path.dirname(os.path.dirname(__file__)) + extensions, extensions_cpp = _test_cython_extension(tmp_path, srcdir) + # actually test the cython c-extensions + # From docstring for scipy.optimize.cython_optimize module + x = extensions.brentq_example() + assert x == 0.6999942848231314 + x = extensions_cpp.brentq_example() + assert x == 0.6999942848231314 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..2434e92434ff2ecc74ce2e8b8119632207fd0853 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py @@ -0,0 +1,300 @@ +import re +from copy import deepcopy + +import numpy as np +import pytest +from numpy.linalg import norm +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less) +from scipy.optimize import (BFGS, SR1) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +class TestHessianUpdateStrategy(TestCase): + + + def test_hessian_initialization(self): + + ndims = 5 + symmetric_matrix = np.array([[43, 24, 33, 34, 49], + [24, 36, 44, 15, 44], + [33, 44, 37, 1, 30], + [34, 15, 1, 5, 46], + [49, 44, 30, 46, 22]]) + init_scales = ( + ('auto', np.eye(ndims)), + (2, np.eye(ndims) * 2), + (np.arange(1, ndims + 1) * np.eye(ndims), + np.arange(1, ndims + 1) * np.eye(ndims)), + (symmetric_matrix, symmetric_matrix),) + for approx_type in ['hess', 'inv_hess']: + for init_scale, true_matrix in init_scales: + # large min_{denominator,curvatur} makes them skip an update, + # so we can have our initial matrix + quasi_newton = (BFGS(init_scale=init_scale, + min_curvature=1e50, + exception_strategy='skip_update'), + SR1(init_scale=init_scale, + min_denominator=1e50)) + + for qn in quasi_newton: + qn.initialize(ndims, approx_type) + B = qn.get_matrix() + + assert_array_equal(B, np.eye(ndims)) + # don't test the auto init scale + if isinstance(init_scale, str) and init_scale == 'auto': + continue + + qn.update(np.ones(ndims) * 1e-5, np.arange(ndims) + 0.2) + B = qn.get_matrix() + assert_array_equal(B, true_matrix) + + # For this list of points, it is known + # that no exception occur during the + # Hessian update. Hence no update is + # skiped or damped. + + + def test_initialize_catch_illegal(self): + ndims = 3 + # no complex allowed + inits_msg_errtype = ((complex(3.14), + r"float\(\) argument must be a string or a " + r"(real )?number, not 'complex'", + TypeError), + + (np.array([3.2, 2.3, 1.2]).astype(np.complex128), + "init_scale contains complex elements, " + "must be real.", + TypeError), + + (np.array([[43, 24, 33], + [24, 36, 44, ], + [33, 44, 37, ]]).astype(np.complex128), + "init_scale contains complex elements, " + "must be real.", + TypeError), + + # not square + (np.array([[43, 55, 66]]), + re.escape( + "If init_scale is an array, it must have the " + "dimensions of the hess/inv_hess: (3, 3)." + " Got (1, 3)."), + ValueError), + + # not symmetric + (np.array([[43, 24, 33], + [24.1, 36, 44, ], + [33, 44, 37, ]]), + re.escape("If init_scale is an array, it must be" + " symmetric (passing scipy.linalg.issymmetric)" + " to be an approximation of a hess/inv_hess."), + ValueError), + ) + for approx_type in ['hess', 'inv_hess']: + for init_scale, message, errortype in inits_msg_errtype: + # large min_{denominator,curvatur} makes it skip an update, + # so we can retrieve our initial matrix + quasi_newton = (BFGS(init_scale=init_scale), + SR1(init_scale=init_scale)) + + for qn in quasi_newton: + qn.initialize(ndims, approx_type) + with pytest.raises(errortype, match=message): + qn.update(np.ones(ndims), np.arange(ndims)) + + def test_rosenbrock_with_no_exception(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338], + [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691], + [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041], + [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744], + [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623], + [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448], + [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437], + [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581], + [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553], + [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149], + [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663], + [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288], + [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356], + [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912], + [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305], + [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047], + [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297], + [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032], + [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786], + [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + # Check curvature condition + for s, y in zip(delta_x, delta_grad): + if np.dot(s, y) <= 0: + raise ArithmeticError() + # Define QuasiNewton update + for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4), + SR1(init_scale=1)): + hess = deepcopy(quasi_newton) + inv_hess = deepcopy(quasi_newton) + hess.initialize(len(x_list[0]), 'hess') + inv_hess.initialize(len(x_list[0]), 'inv_hess') + # Compare the hessian and its inverse + for s, y in zip(delta_x, delta_grad): + hess.update(s, y) + inv_hess.update(s, y) + B = hess.get_matrix() + H = inv_hess.get_matrix() + assert_array_almost_equal(np.linalg.inv(B), H, decimal=10) + B_true = prob.hess(x_list[len(delta_x)]) + assert_array_less(norm(B - B_true)/norm(B_true), 0.1) + + def test_SR1_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = SR1(init_scale=1, min_denominator=1e-2) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[17] + y = delta_grad[17] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) + + def test_BFGS_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = BFGS(init_scale=1, min_curvature=10) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[5] + y = delta_grad[5] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) + + +@pytest.mark.parametrize('strategy', [BFGS, SR1]) +@pytest.mark.parametrize('approx_type', ['hess', 'inv_hess']) +def test_matmul_equals_dot(strategy, approx_type): + H = strategy(init_scale=1) + H.initialize(2, approx_type) + v = np.array([1, 2]) + assert_array_equal(H @ v, H.dot(v)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..b49c56db5b4470c1e4e0f787df52c80eb055c120 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py @@ -0,0 +1,167 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import pytest + +from scipy.optimize._pava_pybind import pava +from scipy.optimize import isotonic_regression + + +class TestIsotonicRegression: + @pytest.mark.parametrize( + ("y", "w", "msg"), + [ + ([[0, 1]], None, + "array has incorrect number of dimensions: 2; expected 1"), + ([0, 1], [[1, 2]], + "Input arrays y and w must have one dimension of equal length"), + ([0, 1], [1], + "Input arrays y and w must have one dimension of equal length"), + (1, [1, 2], + "Input arrays y and w must have one dimension of equal length"), + ([1, 2], 1, + "Input arrays y and w must have one dimension of equal length"), + ([0, 1], [0, 1], + "Weights w must be strictly positive"), + ] + ) + def test_raise_error(self, y, w, msg): + with pytest.raises(ValueError, match=msg): + isotonic_regression(y=y, weights=w) + + def test_simple_pava(self): + # Test case of Busing 2020 + # https://doi.org/10.18637/jss.v102.c01 + y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64) + w = np.ones_like(y) + r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp) + pava(y, w, r) + assert_allclose(y, [4, 4, 4, 4, 4, 4, 8]) + # Only first 2 elements of w are changed. + assert_allclose(w, [6, 1, 1, 1, 1, 1, 1]) + # Only first 3 elements of r are changed. + assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1]) + + @pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32]) + @pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32]) + @pytest.mark.parametrize("w", [None, "ones"]) + def test_simple_isotonic_regression(self, w, w_dtype, y_dtype): + # Test case of Busing 2020 + # https://doi.org/10.18637/jss.v102.c01 + y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype) + if w is not None: + w = np.ones_like(y, dtype=w_dtype) + res = isotonic_regression(y, weights=w) + assert res.x.dtype == np.float64 + assert res.weights.dtype == np.float64 + assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8]) + assert_allclose(res.weights, [6, 1]) + assert_allclose(res.blocks, [0, 6, 7]) + # Assert that y was not overwritten + assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)) + + @pytest.mark.parametrize("increasing", [True, False]) + def test_linspace(self, increasing): + n = 10 + y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n) + res = isotonic_regression(y, increasing=increasing) + assert_allclose(res.x, y) + assert_allclose(res.blocks, np.arange(n + 1)) + + def test_weights(self): + w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3]) + y = np.array([3, 2, 1, 10, 9, 8, 20, 10]) + res = isotonic_regression(y, weights=w) + assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4]) + assert_allclose(res.weights, [8, 1.5, 4]) + assert_allclose(res.blocks, [0, 3, 6, 8]) + + # weights are like repeated observations, we repeat the 3rd element 5 + # times. + w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3]) + y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10]) + res2 = isotonic_regression(y2, weights=w2) + assert_allclose(np.diff(res2.x[0:7]), 0) + assert_allclose(res2.x[4:], res.x) + assert_allclose(res2.weights, res.weights) + assert_allclose(res2.blocks[1:] - 4, res.blocks[1:]) + + def test_against_R_monotone(self): + y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4] + res = isotonic_regression(y) + # R code + # library(monotone) + # options(digits=8) + # monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4)) + x_R = [ + 0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667, + 4.1666667, 6.6666667, 6.6666667, 6.6666667, + ] + assert_allclose(res.x, x_R) + assert_equal(res.blocks, [0, 1, 7, 10]) + + n = 100 + y = np.linspace(0, 1, num=n, endpoint=False) + y = 5 * y + np.sin(10 * y) + res = isotonic_regression(y) + # R code + # library(monotone) + # n <- 100 + # y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n) + # options(digits=8) + # monotone(y) + x_R = [ + 0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554, + 0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736, + 1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256, + 1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334, + 2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999, + 3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904, + 4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, + ] + assert_allclose(res.x, x_R) + + # Test increasing + assert np.all(np.diff(res.x) >= 0) + + # Test balance property: sum(y) == sum(x) + assert_allclose(np.sum(res.x), np.sum(y)) + + # Reverse order + res_inv = isotonic_regression(-y, increasing=False) + assert_allclose(-res_inv.x, res.x) + assert_equal(res_inv.blocks, res.blocks) + + def test_readonly(self): + x = np.arange(3, dtype=float) + w = np.ones(3, dtype=float) + + x.flags.writeable = False + w.flags.writeable = False + + res = isotonic_regression(x, weights=w) + assert np.all(np.isfinite(res.x)) + assert np.all(np.isfinite(res.weights)) + assert np.all(np.isfinite(res.blocks)) + + def test_non_contiguous_arrays(self): + x = np.arange(10, dtype=float)[::3] + w = np.ones(10, dtype=float)[::3] + assert not x.flags.c_contiguous + assert not x.flags.f_contiguous + assert not w.flags.c_contiguous + assert not w.flags.f_contiguous + + res = isotonic_regression(x, weights=w) + assert np.all(np.isfinite(res.x)) + assert np.all(np.isfinite(res.weights)) + assert np.all(np.isfinite(res.blocks)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py new file mode 100644 index 0000000000000000000000000000000000000000..8e4452cd61c5400c13f4f239055352bae754ad7e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py @@ -0,0 +1,43 @@ +import numpy as np +from numpy.testing import assert_allclose +import scipy.linalg +from scipy.optimize import minimize + + +def test_1(): + def f(x): + return x**4, 4*x**3 + + for gtol in [1e-8, 1e-12, 1e-20]: + for maxcor in range(20, 35): + result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20, + options={'gtol': gtol, 'maxcor': maxcor}) + + H1 = result.hess_inv(np.array([1])).reshape(1,1) + H2 = result.hess_inv.todense() + + assert_allclose(H1, H2) + + +def test_2(): + H0 = [[3, 0], [1, 2]] + + def f(x): + return np.dot(x, np.dot(scipy.linalg.inv(H0), x)) + + result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20]) + result2 = minimize(fun=f, method='BFGS', x0=[10, 20]) + + H1 = result1.hess_inv.todense() + + H2 = np.vstack(( + result1.hess_inv(np.array([1, 0])), + result1.hess_inv(np.array([0, 1])))) + + assert_allclose( + result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1), + result1.hess_inv(np.array([1, 0]))) + assert_allclose(H1, H2) + assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03) + + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py new file mode 100644 index 0000000000000000000000000000000000000000..ee47f45509c86968b9b551eb8740ad301fd37958 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import _lbfgsb, minimize + + +def objfun(x): + """simplified objective func to test lbfgsb bound violation""" + x0 = [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085] + x1 = [1.0, 0.0, 1.0, 0.0, 0.0] + x2 = [1.0, + 0.0, + 0.9889733043149325, + 0.0, + 0.026353554421041155] + x3 = [1.0, + 0.0, + 0.9889917442915558, + 0.0, + 0.020341986743231205] + + f0 = 5163.647901211178 + f1 = 5149.8181642072905 + f2 = 5149.379332309634 + f3 = 5149.374490771297 + + g0 = np.array([-0.5934820547965749, + 1.6251549718258351, + -71.99168459202559, + 5.346636965797545, + 37.10732723092604]) + g1 = np.array([-0.43295349282641515, + 1.008607936794592, + 18.223666726602975, + 31.927010036981997, + -19.667512518739386]) + g2 = np.array([-0.4699874455100256, + 0.9466285353668347, + -0.016874360242016825, + 48.44999161133457, + 5.819631620590712]) + g3 = np.array([-0.46970678696829116, + 0.9612719312174818, + 0.006129809488833699, + 48.43557729419473, + 6.005481418498221]) + + if np.allclose(x, x0): + f = f0 + g = g0 + elif np.allclose(x, x1): + f = f1 + g = g1 + elif np.allclose(x, x2): + f = f2 + g = g2 + elif np.allclose(x, x3): + f = f3 + g = g3 + else: + raise ValueError( + 'Simplified objective function not defined ' + 'at requested point') + return (np.copy(f), np.copy(g)) + + +def test_setulb_floatround(): + """test if setulb() violates bounds + + checks for violation due to floating point rounding error + """ + + n = 5 + m = 10 + factr = 1e7 + pgtol = 1e-5 + maxls = 20 + nbd = np.full(shape=(n,), fill_value=2, dtype=np.int32) + low_bnd = np.zeros(n, dtype=np.float64) + upper_bnd = np.ones(n, dtype=np.float64) + + x0 = np.array( + [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085]) + x = np.copy(x0) + + f = np.array(0.0, dtype=np.float64) + g = np.zeros(n, dtype=np.float64) + + wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, dtype=np.float64) + iwa = np.zeros(3*n, dtype=np.int32) + task = np.zeros(2, dtype=np.int32) + ln_task = np.zeros(2, dtype=np.int32) + lsave = np.zeros(4, dtype=np.int32) + isave = np.zeros(44, dtype=np.int32) + dsave = np.zeros(29, dtype=np.float64) + + for n_iter in range(7): # 7 steps required to reproduce error + f, g = objfun(x) + + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, + iwa, task, lsave, isave, dsave, maxls, ln_task) + + assert (x <= upper_bnd).all() and (x >= low_bnd).all(), ( + "_lbfgsb.setulb() stepped to a point outside of the bounds") + + +def test_gh_issue18730(): + # issue 18730 reported that l-bfgs-b did not work with objectives + # returning single precision gradient arrays + def fun_single_precision(x): + x = x.astype(np.float32) + return np.sum(x**2), (2*x) + + res = minimize(fun_single_precision, x0=np.array([1., 1.]), jac=True, + method="l-bfgs-b") + np.testing.assert_allclose(res.fun, 0., atol=1e-15) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py new file mode 100644 index 0000000000000000000000000000000000000000..d27d670a1aac01f7129d483e0a048a38dce35404 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py @@ -0,0 +1,874 @@ +from itertools import product + +import numpy as np +from numpy.linalg import norm +from numpy.testing import (assert_, assert_allclose, + assert_equal, suppress_warnings) +import pytest +from pytest import raises as assert_raises +from scipy.sparse import issparse, lil_matrix +from scipy.sparse.linalg import aslinearoperator + +from scipy.optimize import least_squares, Bounds +from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES +from scipy.optimize._lsq.common import EPS, make_strictly_feasible, CL_scaling_vector + + +def fun_trivial(x, a=0): + return (x - a)**2 + 5.0 + + +def jac_trivial(x, a=0.0): + return 2 * (x - a) + + +def fun_2d_trivial(x): + return np.array([x[0], x[1]]) + + +def jac_2d_trivial(x): + return np.identity(2) + + +def fun_rosenbrock(x): + return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + +def jac_rosenbrock(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0] + ]) + + +def jac_rosenbrock_bad_dim(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0], + [0.0, 0.0] + ]) + + +def fun_rosenbrock_cropped(x): + return fun_rosenbrock(x)[0] + + +def jac_rosenbrock_cropped(x): + return jac_rosenbrock(x)[0] + + +# When x is 1-D array, return is 2-D array. +def fun_wrong_dimensions(x): + return np.array([x, x**2, x**3]) + + +def jac_wrong_dimensions(x, a=0.0): + return np.atleast_3d(jac_trivial(x, a=a)) + + +def fun_bvp(x): + n = int(np.sqrt(x.shape[0])) + u = np.zeros((n + 2, n + 2)) + x = x.reshape((n, n)) + u[1:-1, 1:-1] = x + y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3 + return y.ravel() + + +class BroydenTridiagonal: + def __init__(self, n=100, mode='sparse'): + rng = np.random.RandomState(0) + + self.n = n + + self.x0 = -np.ones(n) + self.lb = np.linspace(-2, -1.5, n) + self.ub = np.linspace(-0.8, 0.0, n) + + self.lb += 0.1 * rng.randn(n) + self.ub += 0.1 * rng.randn(n) + + self.x0 += 0.1 * rng.randn(n) + self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub) + + if mode == 'sparse': + self.sparsity = lil_matrix((n, n), dtype=int) + i = np.arange(n) + self.sparsity[i, i] = 1 + i = np.arange(1, n) + self.sparsity[i, i - 1] = 1 + i = np.arange(n - 1) + self.sparsity[i, i + 1] = 1 + + self.jac = self._jac + elif mode == 'operator': + self.jac = lambda x: aslinearoperator(self._jac(x)) + elif mode == 'dense': + self.sparsity = None + self.jac = lambda x: self._jac(x).toarray() + else: + assert_(False) + + def fun(self, x): + f = (3 - x) * x + 1 + f[1:] -= x[:-1] + f[:-1] -= 2 * x[1:] + return f + + def _jac(self, x): + J = lil_matrix((self.n, self.n)) + i = np.arange(self.n) + J[i, i] = 3 - 2 * x + i = np.arange(1, self.n) + J[i, i - 1] = -1 + i = np.arange(self.n - 1) + J[i, i + 1] = -2 + return J + + +class ExponentialFittingProblem: + """Provide data and function for exponential fitting in the form + y = a + exp(b * x) + noise.""" + + def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1), + n_points=11, random_seed=None): + rng = np.random.RandomState(random_seed) + self.m = n_points + self.n = 2 + + self.p0 = np.zeros(2) + self.x = np.linspace(x_range[0], x_range[1], n_points) + + self.y = a + np.exp(b * self.x) + self.y += noise * rng.randn(self.m) + + outliers = rng.randint(0, self.m, n_outliers) + self.y[outliers] += 50 * noise * rng.rand(n_outliers) + + self.p_opt = np.array([a, b]) + + def fun(self, p): + return p[0] + np.exp(p[1] * self.x) - self.y + + def jac(self, p): + J = np.empty((self.m, self.n)) + J[:, 0] = 1 + J[:, 1] = self.x * np.exp(p[1] * self.x) + return J + + +def cubic_soft_l1(z): + rho = np.empty((3, z.size)) + + t = 1 + z + rho[0] = 3 * (t**(1/3) - 1) + rho[1] = t ** (-2/3) + rho[2] = -2/3 * t**(-5/3) + + return rho + + +LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1] + + +class BaseMixin: + def test_basic(self): + # Test that the basic calling sequence works. + res = least_squares(fun_trivial, 2., method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.fun, fun_trivial(res.x)) + + def test_args_kwargs(self): + # Test that args and kwargs are passed correctly to the functions. + a = 3.0 + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, args=(a,), + method=self.method) + res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a}, + method=self.method) + + assert_allclose(res.x, a, rtol=1e-4) + assert_allclose(res1.x, a, rtol=1e-4) + + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + args=(3, 4,), method=self.method) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + kwargs={'kaboom': 3}, method=self.method) + + def test_jac_options(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops', + method=self.method) + + def test_nfev_options(self): + for max_nfev in [None, 20]: + res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev, + method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + def test_x_scale_options(self): + for x_scale in [1.0, np.array([0.5]), 'jac']: + res = least_squares(fun_trivial, 2.0, x_scale=x_scale) + assert_allclose(res.x, 0) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale='auto', method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=-1.0, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=None, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=1.0+2.0j, method=self.method) + + def test_diff_step(self): + # res1 and res2 should be equivalent. + # res2 and res3 should be different. + res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1, + method=self.method) + res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1, + method=self.method) + res3 = least_squares(fun_trivial, 2.0, + diff_step=None, method=self.method) + assert_allclose(res1.x, 0, atol=1e-4) + assert_allclose(res2.x, 0, atol=1e-4) + assert_allclose(res3.x, 0, atol=1e-4) + assert_equal(res1.x, res2.x) + assert_equal(res1.nfev, res2.nfev) + + def test_incorrect_options_usage(self): + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'no_such_option': 100}) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'max_nfev': 100}) + + def test_full_result(self): + # MINPACK doesn't work very well with factor=100 on this problem, + # thus using low 'atol'. + res = least_squares(fun_trivial, 2.0, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.cost, 12.5) + assert_allclose(res.fun, 5) + assert_allclose(res.jac, 0, atol=1e-4) + assert_allclose(res.grad, 0, atol=1e-2) + assert_allclose(res.optimality, 0, atol=1e-2) + assert_equal(res.active_mask, 0) + if self.method == 'lm': + assert_(res.nfev < 30) + assert_(res.njev is None) + else: + assert_(res.nfev < 10) + assert_(res.njev < 10) + assert_(res.status > 0) + assert_(res.success) + + def test_full_result_single_fev(self): + # MINPACK checks the number of nfev after the iteration, + # so it's hard to tell what he is going to compute. + if self.method == 'lm': + return + + res = least_squares(fun_trivial, 2.0, method=self.method, + max_nfev=1) + assert_equal(res.x, np.array([2])) + assert_equal(res.cost, 40.5) + assert_equal(res.fun, np.array([9])) + assert_equal(res.jac, np.array([[4]])) + assert_equal(res.grad, np.array([36])) + assert_equal(res.optimality, 36) + assert_equal(res.active_mask, np.array([0])) + assert_equal(res.nfev, 1) + assert_equal(res.njev, 1) + assert_equal(res.status, 0) + assert_equal(res.success, 0) + + def test_rosenbrock(self): + x0 = [-2, 1] + x_opt = [1, 1] + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.x, x_opt) + + def test_rosenbrock_cropped(self): + x0 = [-2, 1] + if self.method == 'lm': + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, + x0, method='lm') + else: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock_cropped], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + res = least_squares( + fun_rosenbrock_cropped, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.cost, 0, atol=1e-14) + + def test_fun_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_wrong_dimensions, + 2.0, method=self.method) + + def test_jac_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, jac_wrong_dimensions, method=self.method) + + def test_fun_and_jac_inconsistent_dimensions(self): + x0 = [1, 2] + assert_raises(ValueError, least_squares, fun_rosenbrock, x0, + jac_rosenbrock_bad_dim, method=self.method) + + def test_x0_multidimensional(self): + x0 = np.ones(4).reshape(2, 2) + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_scalar(self): + x0 = 2.0 + 0.0*1j + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_array(self): + x0 = [1.0, 2.0 + 0.0*1j] + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_bvp(self): + # This test was introduced with fix #5556. It turned out that + # dogbox solver had a bug with trust-region radius update, which + # could block its progress and create an infinite loop. And this + # discrete boundary value problem is the one which triggers it. + n = 10 + x0 = np.ones(n**2) + if self.method == 'lm': + max_nfev = 5000 # To account for Jacobian estimation. + else: + max_nfev = 100 + res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method, + max_nfev=max_nfev) + + assert_(res.nfev < max_nfev) + assert_(res.cost < 0.5) + + def test_error_raised_when_all_tolerances_below_eps(self): + # Test that all 0 tolerances are not allowed. + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method=self.method, ftol=None, xtol=None, gtol=None) + + def test_convergence_with_only_one_tolerance_enabled(self): + if self.method == 'lm': + return # should not do test + x0 = [-2, 1] + x_opt = [1, 1] + for ftol, xtol, gtol in [(1e-8, None, None), + (None, 1e-8, None), + (None, None, 1e-8)]: + res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock, + ftol=ftol, gtol=gtol, xtol=xtol, + method=self.method) + assert_allclose(res.x, x_opt) + + +class BoundsMixin: + def test_inconsistent(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(10.0, 0.0), method=self.method) + + def test_infeasible(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(3., 4), method=self.method) + + def test_wrong_number(self): + assert_raises(ValueError, least_squares, fun_trivial, 2., + bounds=(1., 2, 3), method=self.method) + + def test_inconsistent_shape(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(1.0, [2.0, 3.0]), method=self.method) + # 1-D array won't be broadcast + assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0], + bounds=([0.0], [3.0, 4.0]), method=self.method) + + def test_in_bounds(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(-1.0, 3.0), method=self.method) + assert_allclose(res.x, 0.0, atol=1e-4) + assert_equal(res.active_mask, [0]) + assert_(-1 <= res.x <= 3) + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(0.5, 3.0), method=self.method) + assert_allclose(res.x, 0.5, atol=1e-4) + assert_equal(res.active_mask, [-1]) + assert_(0.5 <= res.x <= 3) + + def test_bounds_shape(self): + def get_bounds_direct(lb, ub): + return lb, ub + + def get_bounds_instances(lb, ub): + return Bounds(lb, ub) + + for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]: + for bounds_func in [get_bounds_direct, get_bounds_instances]: + x0 = [1.0, 1.0] + res = least_squares(fun_2d_trivial, x0, jac=jac) + assert_allclose(res.x, [0.0, 0.0]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func(0.5, [2.0, 2.0]), + method=self.method) + assert_allclose(res.x, [0.5, 0.5]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([0.3, 0.2], 3.0), + method=self.method) + assert_allclose(res.x, [0.3, 0.2]) + res = least_squares( + fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([-1, 0.5], [1.0, 3.0]), + method=self.method) + assert_allclose(res.x, [0.0, 0.5], atol=1e-5) + + def test_bounds_instances(self): + res = least_squares(fun_trivial, 0.5, bounds=Bounds()) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0)) + assert_allclose(res.x, 1.0, atol=1e-4) + + res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0)) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0)) + assert_allclose(res.x, -1.0, atol=1e-4) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[-1.0, -1.0], ub=1.0)) + assert_allclose(res.x, [0.0, 0.0], atol=1e-5) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[0.1, 0.1])) + assert_allclose(res.x, [0.1, 0.1], atol=1e-5) + + @pytest.mark.fail_slow(10) + def test_rosenbrock_bounds(self): + x0_1 = np.array([-2.0, 1.0]) + x0_2 = np.array([2.0, 2.0]) + x0_3 = np.array([-2.0, 2.0]) + x0_4 = np.array([0.0, 2.0]) + x0_5 = np.array([-1.2, 1.0]) + problems = [ + (x0_1, ([-np.inf, -1.5], np.inf)), + (x0_2, ([-np.inf, 1.5], np.inf)), + (x0_3, ([-np.inf, 1.5], np.inf)), + (x0_4, ([-np.inf, 1.5], [1.0, np.inf])), + (x0_2, ([1.0, 1.5], [3.0, 3.0])), + (x0_5, ([-50.0, 0.0], [0.5, 100])) + ] + for x0, bounds in problems: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, [1.0, 0.5], 'jac'], + ['exact', 'lsmr']): + res = least_squares(fun_rosenbrock, x0, jac, bounds, + x_scale=x_scale, tr_solver=tr_solver, + method=self.method) + assert_allclose(res.optimality, 0.0, atol=1e-5) + + +class SparseMixin: + def test_exact_tr_solver(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='exact', method=self.method) + assert_raises(ValueError, least_squares, p.fun, p.x0, + tr_solver='exact', jac_sparsity=p.sparsity, + method=self.method) + + def test_equivalence(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares( + sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares( + dense.fun, dense.x0, jac=sparse.jac, + method=self.method) + assert_equal(res_sparse.nfev, res_dense.nfev) + assert_allclose(res_sparse.x, res_dense.x, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + + def test_tr_options(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + tr_options={'btol': 1e-10}) + assert_allclose(res.cost, 0, atol=1e-20) + + def test_wrong_parameters(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='best', method=self.method) + assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac, + tr_solver='lsmr', tr_options={'tol': 1e-10}) + + def test_solver_selection(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac, + method=self.method) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_(issparse(res_sparse.jac)) + assert_(isinstance(res_dense.jac, np.ndarray)) + + def test_numerical_jac(self): + p = BroydenTridiagonal() + for jac in ['2-point', '3-point', 'cs']: + res_dense = least_squares(p.fun, p.x0, jac, method=self.method) + res_sparse = least_squares( + p.fun, p.x0, jac,method=self.method, + jac_sparsity=p.sparsity) + assert_equal(res_dense.nfev, res_sparse.nfev) + assert_allclose(res_dense.x, res_sparse.x, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + + @pytest.mark.fail_slow(10) + def test_with_bounds(self): + p = BroydenTridiagonal() + for jac, jac_sparsity in product( + [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]): + res_1 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, np.inf), + method=self.method,jac_sparsity=jac_sparsity) + res_2 = least_squares( + p.fun, p.x0, jac, bounds=(-np.inf, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + res_3 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + assert_allclose(res_1.optimality, 0, atol=1e-10) + assert_allclose(res_2.optimality, 0, atol=1e-10) + assert_allclose(res_3.optimality, 0, atol=1e-10) + + def test_wrong_jac_sparsity(self): + p = BroydenTridiagonal() + sparsity = p.sparsity[:-1] + assert_raises(ValueError, least_squares, p.fun, p.x0, + jac_sparsity=sparsity, method=self.method) + + def test_linear_operator(self): + p = BroydenTridiagonal(mode='operator') + res = least_squares(p.fun, p.x0, p.jac, method=self.method) + assert_allclose(res.cost, 0.0, atol=1e-20) + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, tr_solver='exact') + + def test_x_scale_jac_scale(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + x_scale='jac') + assert_allclose(res.cost, 0.0, atol=1e-20) + + p = BroydenTridiagonal(mode='operator') + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, x_scale='jac') + + +class LossFunctionMixin: + def test_options(self): + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_allclose(res.x, 0, atol=1e-15) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + loss='hinge', method=self.method) + + def test_fun(self): + # Test that res.fun is actual residuals, and not modified by loss + # function stuff. + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_equal(res.fun, fun_trivial(res.x)) + + def test_grad(self): + # Test that res.grad is true gradient of loss function at the + # solution. Use max_nfev = 1, to avoid reaching minimum. + x = np.array([2.0]) # res.x will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x * (x**2 + 5)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x) + + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5) + + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4)) + + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3)) + + def test_jac(self): + # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation + # of Hessian. This approximation is computed by doubly differentiating + # the cost function and dropping the part containing second derivative + # of f. For a scalar function it is computed as + # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the + # brackets is less than EPS it is replaced by EPS. Here, we check + # against the root of H. + + x = 2.0 # res.x will be this. + f = x**2 + 5 # res.fun will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x) + + # For `huber` loss the Jacobian correction is identically zero + # in outlier region, in such cases it is modified to be equal EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x * EPS**0.5) + + # Now, let's apply `loss_scale` to turn the residual into an inlier. + # The loss function becomes linear. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + f_scale=10, max_nfev=1) + assert_equal(res.jac, 2 * x) + + # 'soft_l1' always gives a positive scaling. + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75) + + # For 'cauchy' the correction term turns out to be negative, and it + # replaced by EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Now use scaling to turn the residual to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + f_scale=10, max_nfev=1, method=self.method) + fs = f / 10 + assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2)) + + # 'arctan' gives an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + f_scale=20.0, max_nfev=1, method=self.method) + fs = f / 20 + assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4)) + + # cubic_soft_l1 will give an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, + loss=cubic_soft_l1, f_scale=6, max_nfev=1) + fs = f / 6 + assert_allclose(res.jac, + 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6)) + + def test_robustness(self): + for noise in [0.1, 1.0]: + p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0) + + for jac in ['2-point', '3-point', 'cs', p.jac]: + res_lsq = least_squares(p.fun, p.p0, jac=jac, + method=self.method) + assert_allclose(res_lsq.optimality, 0, atol=1e-2) + for loss in LOSSES: + if loss == 'linear': + continue + res_robust = least_squares( + p.fun, p.p0, jac=jac, loss=loss, f_scale=noise, + method=self.method) + assert_allclose(res_robust.optimality, 0, atol=1e-2) + assert_(norm(res_robust.x - p.p_opt) < + norm(res_lsq.x - p.p_opt)) + + +class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'dogbox' + + +class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'trf' + + def test_lsmr_regularization(self): + p = BroydenTridiagonal() + for regularize in [True, False]: + res = least_squares(p.fun, p.x0, p.jac, method='trf', + tr_options={'regularize': regularize}) + assert_allclose(res.cost, 0, atol=1e-20) + + +class TestLM(BaseMixin): + method = 'lm' + + def test_bounds_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, bounds=(-3.0, 3.0), method='lm') + + def test_m_less_n_not_supported(self): + x0 = [-2, 1] + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, + method='lm') + + def test_sparse_not_supported(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_jac_sparsity_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + jac_sparsity=[1], method='lm') + + def test_LinearOperator_not_supported(self): + p = BroydenTridiagonal(mode="operator") + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_loss(self): + res = least_squares(fun_trivial, 2.0, loss='linear', method='lm') + assert_allclose(res.x, 0.0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method='lm', loss='huber') + + +def test_basic(): + # test that 'method' arg is really optional + res = least_squares(fun_trivial, 2.0) + assert_allclose(res.x, 0, atol=1e-10) + + +def test_small_tolerances_for_lm(): + for ftol, xtol, gtol in [(None, 1e-13, 1e-13), + (1e-13, None, 1e-13), + (1e-13, 1e-13, None)]: + assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol, + ftol=ftol, gtol=gtol, method='lm') + + +def test_fp32_gh12991(): + # checks that smaller FP sizes can be used in least_squares + # this is the minimum working example reported for gh12991 + rng = np.random.RandomState(1) + + x = np.linspace(0, 1, 100).astype("float32") + y = rng.random(100).astype("float32") + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + res = least_squares(err, [-1.0, -1.0], args=(x, y)) + # previously the initial jacobian calculated for this would be all 0 + # and the minimize would terminate immediately, with nfev=1, would + # report a successful minimization (it shouldn't have done), but be + # unchanged from the initial solution. + # It was terminating early because the underlying approx_derivative + # used a step size for FP64 when the working space was FP32. + assert res.nfev > 2 + assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5) + + +def test_gh_18793_and_19351(): + answer = 1e-12 + initial_guess = 1.1e-12 + + def chi2(x): + return (x-answer)**2 + + gtol = 1e-15 + res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf)) + # Original motivation: gh-18793 + # if we choose an initial condition that is close to the solution + # we shouldn't return an answer that is further away from the solution + + # Update: gh-19351 + # However this requirement does not go well with 'trf' algorithm logic. + # Some regressions were reported after the presumed fix. + # The returned solution is good as long as it satisfies the convergence + # conditions. + # Specifically in this case the scaled gradient will be sufficiently low. + + scaling, _ = CL_scaling_vector(res.x, res.grad, + np.atleast_1d(0), np.atleast_1d(np.inf)) + assert res.status == 1 # Converged by gradient + assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol + + +def test_gh_19103(): + # Checks that least_squares trf method selects a strictly feasible point, + # and thus succeeds instead of failing, + # when the initial guess is reported exactly at a boundary point. + # This is a reduced example from gh191303 + + ydata = np.array([0.] * 66 + [ + 1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1., + 1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1., + 0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4., + 6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11., + 10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29., + 28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52., + 67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107., + 108., 118., 96., 115., 138., 137., + ]) + xdata = np.arange(0, ydata.size) * 0.1 + + def exponential_wrapped(params): + A, B, x0 = params + return A * np.exp(B * (xdata - x0)) - ydata + + x0 = [0.01, 1., 5.] + bounds = ((0.01, 0, 0), (np.inf, 10, 20.9)) + res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds) + assert res.success diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..d59792da9eef38e313eaa0bca70f873627f8d3cf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py @@ -0,0 +1,116 @@ +# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck +# License: BSD + +from numpy.testing import assert_array_equal +import pytest + +import numpy as np + +from scipy.optimize import linear_sum_assignment +from scipy.sparse import random +from scipy.sparse._sputils import matrix +from scipy.sparse.csgraph import min_weight_full_bipartite_matching +from scipy.sparse.csgraph.tests.test_matching import ( + linear_sum_assignment_assertions, linear_sum_assignment_test_cases +) + + +def test_linear_sum_assignment_input_shape(): + with pytest.raises(ValueError, match="expected a matrix"): + linear_sum_assignment([1, 2, 3]) + + +def test_linear_sum_assignment_input_object(): + C = [[1, 2, 3], [4, 5, 6]] + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(np.asarray(C))) + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(matrix(C))) + + +def test_linear_sum_assignment_input_bool(): + I = np.identity(3) + assert_array_equal(linear_sum_assignment(I.astype(np.bool_)), + linear_sum_assignment(I)) + + +def test_linear_sum_assignment_input_string(): + I = np.identity(3) + with pytest.raises(TypeError, match="Cannot cast array data"): + linear_sum_assignment(I.astype(str)) + + +def test_linear_sum_assignment_input_nan(): + I = np.diag([np.nan, 1, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_neginf(): + I = np.diag([1, -np.inf, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_inf(): + I = np.identity(3) + I[:, 0] = np.inf + with pytest.raises(ValueError, match="cost matrix is infeasible"): + linear_sum_assignment(I) + + +def test_constant_cost_matrix(): + # Fixes #11602 + n = 8 + C = np.ones((n, n)) + row_ind, col_ind = linear_sum_assignment(C) + assert_array_equal(row_ind, np.arange(n)) + assert_array_equal(col_ind, np.arange(n)) + + +@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) +def test_linear_sum_assignment_trivial_cost(num_rows, num_cols): + C = np.empty(shape=(num_cols, num_rows)) + row_ind, col_ind = linear_sum_assignment(C) + assert len(row_ind) == 0 + assert len(col_ind) == 0 + + +@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) +def test_linear_sum_assignment_small_inputs(sign, test_case): + linear_sum_assignment_assertions( + linear_sum_assignment, np.array, sign, test_case) + + +# Tests that combine scipy.optimize.linear_sum_assignment and +# scipy.sparse.csgraph.min_weight_full_bipartite_matching +def test_two_methods_give_same_result_on_many_sparse_inputs(): + # As opposed to the test above, here we do not spell out the expected + # output; only assert that the two methods give the same result. + # Concretely, the below tests 100 cases of size 100x100, out of which + # 36 are infeasible. + np.random.seed(1234) + for _ in range(100): + lsa_raises = False + mwfbm_raises = False + sparse = random(100, 100, density=0.06, + data_rvs=lambda size: np.random.randint(1, 100, size)) + # In csgraph, zeros correspond to missing edges, so we explicitly + # replace those with infinities + dense = np.full(sparse.shape, np.inf) + dense[sparse.row, sparse.col] = sparse.data + sparse = sparse.tocsr() + try: + row_ind, col_ind = linear_sum_assignment(dense) + lsa_cost = dense[row_ind, col_ind].sum() + except ValueError: + lsa_raises = True + try: + row_ind, col_ind = min_weight_full_bipartite_matching(sparse) + mwfbm_cost = sparse[row_ind, col_ind].sum() + except ValueError: + mwfbm_raises = True + # Ensure that if one method raises, so does the other one. + assert lsa_raises == mwfbm_raises + if not lsa_raises: + assert lsa_cost == mwfbm_cost diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..6eee0743d97665185c35cf144d66e00542925480 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py @@ -0,0 +1,328 @@ +""" +Tests for line search routines +""" +from numpy.testing import (assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_warns, + suppress_warnings) +import scipy.optimize._linesearch as ls +from scipy.optimize._linesearch import LineSearchWarning +import numpy as np +import pytest +import threading + + +def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): + """ + Check that strong Wolfe conditions apply + """ + phi1 = phi(s) + phi0 = phi(0) + derphi0 = derphi(0) + derphi1 = derphi(s) + msg = (f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; phi'(0) = {derphi0};" + f" phi'(s) = {derphi1}; {err_msg}") + + assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg + assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg + + +def assert_armijo(s, phi, c1=1e-4, err_msg=""): + """ + Check that Armijo condition applies + """ + phi1 = phi(s) + phi0 = phi(0) + msg = f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; {err_msg}" + assert phi1 <= (1 - c1*s)*phi0, msg + + +def assert_line_wolfe(x, p, s, f, fprime, **kw): + assert_wolfe(s, phi=lambda sp: f(x + p*sp), + derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) + + +def assert_line_armijo(x, p, s, f, **kw): + assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) + + +def assert_fp_equal(x, y, err_msg="", nulp=50): + """Assert two arrays are equal, up to some floating-point rounding error""" + try: + assert_array_almost_equal_nulp(x, y, nulp) + except AssertionError as e: + raise AssertionError(f"{e}\n{err_msg}") from e + + +class TestLineSearch: + # -- scalar functions; must have dphi(0.) < 0 + def _scalar_func_1(self, s): # skip name check + if not hasattr(self.fcount, 'c'): + self.fcount.c = 0 + self.fcount.c += 1 + p = -s - s**3 + s**4 + dp = -1 - 3*s**2 + 4*s**3 + return p, dp + + def _scalar_func_2(self, s): # skip name check + if not hasattr(self.fcount, 'c'): + self.fcount.c = 0 + self.fcount.c += 1 + p = np.exp(-4*s) + s**2 + dp = -4*np.exp(-4*s) + 2*s + return p, dp + + def _scalar_func_3(self, s): # skip name check + if not hasattr(self.fcount, 'c'): + self.fcount.c = 0 + self.fcount.c += 1 + p = -np.sin(10*s) + dp = -10*np.cos(10*s) + return p, dp + + # -- n-d functions + + def _line_func_1(self, x): # skip name check + if not hasattr(self.fcount, 'c'): + self.fcount.c = 0 + self.fcount.c += 1 + f = np.dot(x, x) + df = 2*x + return f, df + + def _line_func_2(self, x): # skip name check + if not hasattr(self.fcount, 'c'): + self.fcount.c = 0 + self.fcount.c += 1 + f = np.dot(x, np.dot(self.A, x)) + 1 + df = np.dot(self.A + self.A.T, x) + return f, df + + # -- + + def setup_method(self): + self.scalar_funcs = [] + self.line_funcs = [] + self.N = 20 + self.fcount = threading.local() + + def bind_index(func, idx): + # Remember Python's closure semantics! + return lambda *a, **kw: func(*a, **kw)[idx] + + for name in sorted(dir(self)): + if name.startswith('_scalar_func_'): + value = getattr(self, name) + self.scalar_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + elif name.startswith('_line_func_'): + value = getattr(self, name) + self.line_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + + np.random.seed(1234) + self.A = np.random.randn(self.N, self.N) + + def scalar_iter(self): + for name, phi, derphi in self.scalar_funcs: + for old_phi0 in np.random.randn(3): + yield name, phi, derphi, old_phi0 + + def line_iter(self): + rng = np.random.RandomState(1234) + for name, f, fprime in self.line_funcs: + k = 0 + while k < 9: + x = rng.randn(self.N) + p = rng.randn(self.N) + if np.dot(p, fprime(x)) >= 0: + # always pick a descent direction + continue + k += 1 + old_fv = float(rng.randn()) + yield name, f, fprime, x, p, old_fv + + # -- Generic scalar searches + + def test_scalar_search_wolfe1(self): + c = 0 + for name, phi, derphi, old_phi0 in self.scalar_iter(): + c += 1 + s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), + old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + assert_wolfe(s, phi, derphi, err_msg=name) + + assert c > 3 # check that the iterator really works... + + def test_scalar_search_wolfe2(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( + phi, derphi, phi(0), old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + if derphi1 is not None: + assert_fp_equal(derphi1, derphi(s), name) + assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}") + + def test_scalar_search_wolfe2_with_low_amax(self): + def phi(alpha): + return (alpha - 5) ** 2 + + def derphi(alpha): + return 2 * (alpha - 5) + + alpha_star, _, _, derphi_star = ls.scalar_search_wolfe2(phi, derphi, amax=0.001) + assert alpha_star is None # Not converged + assert derphi_star is None # Not converged + + def test_scalar_search_wolfe2_regression(self): + # Regression test for gh-12157 + # This phi has its minimum at alpha=4/3 ~ 1.333. + def phi(alpha): + if alpha < 1: + return - 3*np.pi/2 * (alpha - 1) + else: + return np.cos(3*np.pi/2 * alpha - np.pi) + + def derphi(alpha): + if alpha < 1: + return - 3*np.pi/2 + else: + return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi) + + s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi) + # Without the fix in gh-13073, the scalar_search_wolfe2 + # returned s=2.0 instead. + assert s < 1.5 + + def test_scalar_search_armijo(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) + assert_fp_equal(phi1, phi(s), name) + assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}") + + # -- Generic line searches + + def test_line_search_wolfe1(self): + c = 0 + smax = 100 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount.c = 0 + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount.c, fc+gc) + assert_fp_equal(ofv, f(x)) + if s is None: + continue + assert_fp_equal(fv, f(x + s*p)) + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + + assert c > 3 # check that the iterator really works... + + def test_line_search_wolfe2(self): + c = 0 + smax = 512 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount.c = 0 + with suppress_warnings() as sup: + sup.filter(LineSearchWarning, + "The line search algorithm could not find a solution") + sup.filter(LineSearchWarning, + "The line search algorithm did not converge") + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount.c, fc+gc) + assert_fp_equal(ofv, f(x)) + assert_fp_equal(fv, f(x + s*p)) + if gv is not None: + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + assert c > 3 # check that the iterator really works... + + @pytest.mark.thread_unsafe + def test_line_search_wolfe2_bounds(self): + # See gh-7475 + + # For this f and p, starting at a point on axis 0, the strong Wolfe + # condition 2 is met if and only if the step length s satisfies + # |x + s| <= c2 * |x| + def f(x): + return np.dot(x, x) + def fp(x): + return 2 * x + p = np.array([1, 0]) + + # Smallest s satisfying strong Wolfe conditions for these arguments is 30 + x = -60 * p + c2 = 0.5 + + s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) + assert_line_wolfe(x, p, s, f, fp) + + s, _, _, _, _, _ = assert_warns(LineSearchWarning, + ls.line_search_wolfe2, f, fp, x, p, + amax=29, c2=c2) + assert s is None + + # s=30 will only be tried on the 6th iteration, so this won't converge + assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, + c2=c2, maxiter=5) + + def test_line_search_armijo(self): + c = 0 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount.c = 0 + s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) + c += 1 + assert_equal(self.fcount.c, fc) + assert_fp_equal(fv, f(x + s*p)) + assert_line_armijo(x, p, s, f, err_msg=name) + assert c >= 9 + + # -- More specific tests + + def test_armijo_terminate_1(self): + # Armijo should evaluate the function only once if the trial step + # is already suitable + count = [0] + + def phi(s): + count[0] += 1 + return -s + 0.01*s**2 + s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) + assert_equal(s, 1) + assert_equal(count[0], 2) + assert_armijo(s, phi) + + def test_wolfe_terminate(self): + # wolfe1 and wolfe2 should also evaluate the function only a few + # times if the trial step is already suitable + + def phi(s): + count[0] += 1 + return -s + 0.05*s**2 + + def derphi(s): + count[0] += 1 + return -1 + 0.05*2*s + + for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: + count = [0] + r = func(phi, derphi, phi(0), None, derphi(0)) + assert r[0] is not None, (r, func) + assert count[0] <= 2 + 2, (count, func) + assert_wolfe(r[0], phi, derphi, err_msg=str(func)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..4d18e68e394c31e3bc19f49e80c8e9adbc055193 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py @@ -0,0 +1,2577 @@ +""" +Unit test for Linear Programming +""" +import sys +import platform + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + assert_array_less, assert_warns, suppress_warnings) +from pytest import raises as assert_raises +from scipy.optimize import linprog, OptimizeWarning +from scipy.optimize._numdiff import approx_derivative +from scipy.sparse.linalg import MatrixRankWarning +from scipy.linalg import LinAlgWarning +from scipy._lib._util import VisibleDeprecationWarning +import scipy.sparse +import pytest + +has_umfpack = True +try: + from scikits.umfpack import UmfpackWarning +except ImportError: + has_umfpack = False + +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 +except ImportError: + has_cholmod = False + + +def _assert_iteration_limit_reached(res, maxiter): + assert_(not res.success, "Incorrectly reported success") + assert_(res.success < maxiter, "Incorrectly reported number of iterations") + assert_equal(res.status, 1, "Failed to report iteration limit reached") + + +def _assert_infeasible(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 2, "failed to report infeasible status") + + +def _assert_unbounded(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 3, "failed to report unbounded status") + + +def _assert_unable_to_find_basic_feasible_sol(res): + # res: linprog result object + + # The status may be either 2 or 4 depending on why the feasible solution + # could not be found. If the underlying problem is expected to not have a + # feasible solution, _assert_infeasible should be used. + assert_(not res.success, "incorrectly reported success") + assert_(res.status in (2, 4), "failed to report optimization failure") + + +def _assert_success(res, desired_fun=None, desired_x=None, + rtol=1e-8, atol=1e-8): + # res: linprog result object + # desired_fun: desired objective function value or None + # desired_x: desired solution or None + if not res.success: + msg = f"linprog status {res.status}, message: {res.message}" + raise AssertionError(msg) + + assert_equal(res.status, 0) + if desired_fun is not None: + assert_allclose(res.fun, desired_fun, + err_msg="converged to an unexpected objective value", + rtol=rtol, atol=atol) + if desired_x is not None: + assert_allclose(res.x, desired_x, + err_msg="converged to an unexpected solution", + rtol=rtol, atol=atol) + + +def magic_square(n): + """ + Generates a linear program for which integer solutions represent an + n x n magic square; binary decision variables represent the presence + (or absence) of an integer 1 to n^2 in each position of the square. + """ + + rng = np.random.RandomState(0) + M = n * (n**2 + 1) / 2 + + numbers = np.arange(n**4) // n**2 + 1 + + numbers = numbers.reshape(n**2, n, n) + + zeros = np.zeros((n**2, n, n)) + + A_list = [] + b_list = [] + + # Rule 1: use every number exactly once + for i in range(n**2): + A_row = zeros.copy() + A_row[i, :, :] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 2: Only one number per square + for i in range(n): + for j in range(n): + A_row = zeros.copy() + A_row[:, i, j] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 3: sum of rows is M + for i in range(n): + A_row = zeros.copy() + A_row[:, i, :] = numbers[:, i, :] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 4: sum of columns is M + for i in range(n): + A_row = zeros.copy() + A_row[:, :, i] = numbers[:, :, i] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 5: sum of diagonals is M + A_row = zeros.copy() + A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] + A_list.append(A_row.flatten()) + b_list.append(M) + A_row = zeros.copy() + A_row[:, range(n), range(-1, -n - 1, -1)] = \ + numbers[:, range(n), range(-1, -n - 1, -1)] + A_list.append(A_row.flatten()) + b_list.append(M) + + A = np.array(np.vstack(A_list), dtype=float) + b = np.array(b_list, dtype=float) + c = rng.rand(A.shape[1]) + + return A, b, c, numbers, M + + +def lpgen_2d(m, n): + """ -> A b c LP test: m*n vars, m+n constraints + row sums == n/m, col sums == 1 + https://gist.github.com/denis-bz/8647461 + """ + rng = np.random.RandomState(0) + c = - rng.exponential(size=(m, n)) + Arow = np.zeros((m, m * n)) + brow = np.zeros(m) + for j in range(m): + j1 = j + 1 + Arow[j, j * n:j1 * n] = 1 + brow[j] = n / m + + Acol = np.zeros((n, m * n)) + bcol = np.zeros(n) + for j in range(n): + j1 = j + 1 + Acol[j, j::n] = 1 + bcol[j] = 1 + + A = np.vstack((Arow, Acol)) + b = np.hstack((brow, bcol)) + + return A, b, c.ravel() + + +def very_random_gen(seed=0): + rng = np.random.RandomState(seed) + m_eq, m_ub, n = 10, 20, 50 + c = rng.rand(n)-0.5 + A_ub = rng.rand(m_ub, n)-0.5 + b_ub = rng.rand(m_ub)-0.5 + A_eq = rng.rand(m_eq, n)-0.5 + b_eq = rng.rand(m_eq)-0.5 + lb = -rng.rand(n) + ub = rng.rand(n) + lb[lb < -rng.rand()] = -np.inf + ub[ub > rng.rand()] = np.inf + bounds = np.vstack((lb, ub)).T + return c, A_ub, b_ub, A_eq, b_eq, bounds + + +def nontrivial_problem(): + c = [-1, 8, 4, -6] + A_ub = [[-7, -7, 6, 9], + [1, -1, -3, 0], + [10, -10, -7, 7], + [6, -1, 3, 4]] + b_ub = [-3, 6, -6, 6] + A_eq = [[-10, 1, 1, -8]] + b_eq = [-4] + x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] + f_star = 7083 / 1391 + return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star + + +def l1_regression_prob(seed=0, m=8, d=9, n=100): + ''' + Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)} + x in R^d + y in R + n: number of training samples + d: dimension of x, i.e. x in R^d + phi: feature map R^d -> R^m + m: dimension of feature space + ''' + rng = np.random.RandomState(seed) + phi = rng.normal(0, 1, size=(m, d)) # random feature mapping + w_true = rng.randn(m) + x = rng.normal(0, 1, size=(d, n)) # features + y = w_true @ (phi @ x) + rng.normal(0, 1e-5, size=n) # measurements + + # construct the problem + c = np.ones(m+n) + c[:m] = 0 + A_ub = scipy.sparse.lil_matrix((2*n, n+m)) + idx = 0 + for ii in range(n): + A_ub[idx, :m] = phi @ x[:, ii] + A_ub[idx, m+ii] = -1 + A_ub[idx+1, :m] = -1*phi @ x[:, ii] + A_ub[idx+1, m+ii] = -1 + idx += 2 + A_ub = A_ub.tocsc() + b_ub = np.zeros(2*n) + b_ub[0::2] = y + b_ub[1::2] = -y + bnds = [(None, None)]*m + [(0, None)]*n + return c, A_ub, b_ub, bnds + + +def generic_callback_test(self): + # Check that callback is as advertised + last_cb = {} + + def cb(res): + message = res.pop('message') + complete = res.pop('complete') + + assert_(res.pop('phase') in (1, 2)) + assert_(res.pop('status') in range(4)) + assert_(isinstance(res.pop('nit'), int)) + assert_(isinstance(complete, bool)) + assert_(isinstance(message, str)) + + last_cb['x'] = res['x'] + last_cb['fun'] = res['fun'] + last_cb['slack'] = res['slack'] + last_cb['con'] = res['con'] + + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) + + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + assert_allclose(last_cb['fun'], res['fun']) + assert_allclose(last_cb['x'], res['x']) + assert_allclose(last_cb['con'], res['con']) + assert_allclose(last_cb['slack'], res['slack']) + + +@pytest.mark.thread_unsafe +def test_unknown_solvers_and_options(): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') + message = "Unrecognized options detected: {'rr_method': 'ekki-ekki-ekki'}" + with pytest.warns(OptimizeWarning, match=message): + linprog(c, A_ub=A_ub, b_ub=b_ub, + options={"rr_method": 'ekki-ekki-ekki'}) + + +def test_choose_solver(): + # 'highs' chooses 'dual' + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + res = linprog(c, A_ub, b_ub, method='highs') + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + +@pytest.mark.thread_unsafe +def test_deprecation(): + with pytest.warns(DeprecationWarning): + linprog(1, method='interior-point') + with pytest.warns(DeprecationWarning): + linprog(1, method='revised simplex') + with pytest.warns(DeprecationWarning): + linprog(1, method='simplex') + + +def test_highs_status_message(): + res = linprog(1, method='highs') + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.status == 0 + assert res.message.startswith(msg) + + A, b, c, numbers, M = magic_square(6) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + options = {"time_limit": 0.1} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs', + options=options, integrality=integrality) + msg = "Time limit reached. (HiGHS Status 13:" + assert res.status == 1 + assert res.message.startswith(msg) + + options = {"maxiter": 10} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs-ds', + options=options) + msg = "Iteration limit reached. (HiGHS Status 14:" + assert res.status == 1 + assert res.message.startswith(msg) + + res = linprog(1, bounds=(1, -1), method='highs') + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.status == 2 + assert res.message.startswith(msg) + + res = linprog(-1, method='highs') + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.status == 3 + assert res.message.startswith(msg) + + from scipy.optimize._linprog_highs import _highs_to_scipy_status_message + status, message = _highs_to_scipy_status_message(58, "Hello!") + msg = "The HiGHS status code was not recognized. (HiGHS Status 58:" + assert status == 4 + assert message.startswith(msg) + + status, message = _highs_to_scipy_status_message(None, None) + msg = "HiGHS did not provide a status code. (HiGHS Status None: None)" + assert status == 4 + assert message.startswith(msg) + + +def test_bug_17380(): + linprog([1, 1], A_ub=[[-1, 0]], b_ub=[-2.5], integrality=[1, 1]) + + +A_ub = None +b_ub = None +A_eq = None +b_eq = None +bounds = None + +################ +# Common Tests # +################ + + +class LinprogCommonTests: + """ + Base class for `linprog` tests. Generally, each test will be performed + once for every derived class of LinprogCommonTests, each of which will + typically change self.options and/or self.method. Effectively, these tests + are run for many combination of method (simplex, revised simplex, and + interior point) and options (such as pivoting rule or sparse treatment). + """ + + ################## + # Targeted Tests # + ################## + + def test_callback(self): + generic_callback_test(self) + + def test_disp(self): + # test that display option does not break anything. + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"disp": True}) + _assert_success(res, desired_fun=-64.049494229) + + def test_docstring_example(self): + # Example from linprog docstring. + c = [-1, 4] + A = [[-3, 1], [1, 2]] + b = [6, 4] + x0_bounds = (None, None) + x1_bounds = (-3, None) + res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), + options=self.options, method=self.method) + _assert_success(res, desired_fun=-22) + + def test_type_error(self): + # (presumably) checks that linprog recognizes type errors + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1] + A_eq = [[1]] + b_eq = "hello" + assert_raises(TypeError, linprog, + c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + + def test_aliasing_b_ub(self): + # (presumably) checks that linprog does not modify b_ub + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_ub = np.array([[1.0]]) + b_ub_orig = np.array([3.0]) + b_ub = b_ub_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-4, desired_x=[-4]) + assert_allclose(b_ub_orig, b_ub) + + def test_aliasing_b_eq(self): + # (presumably) checks that linprog does not modify b_eq + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq_orig = np.array([3.0]) + b_eq = b_eq_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + assert_allclose(b_eq_orig, b_eq) + + def test_non_ndarray_args(self): + # (presumably) checks that linprog accepts list in place of arrays + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1.0] + A_ub = [[1.0]] + b_ub = [3.0] + A_eq = [[1.0]] + b_eq = [2.0] + bounds = (-1.0, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=2, desired_x=[2]) + + @pytest.mark.thread_unsafe + def test_unknown_options(self): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + def f(c, A_ub=None, b_ub=None, A_eq=None, + b_eq=None, bounds=None, options=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=options) + + o = {key: self.options[key] for key in self.options} + o['spam'] = 42 + + assert_warns(OptimizeWarning, f, + c, A_ub=A_ub, b_ub=b_ub, options=o) + + @pytest.mark.thread_unsafe + def test_integrality_without_highs(self): + # ensure that using `integrality` parameter without `method='highs'` + # raises warning and produces correct solution to relaxed problem + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + with np.testing.assert_warns(OptimizeWarning): + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1.8, 2.8]) + np.testing.assert_allclose(res.fun, -2.8) + + def test_invalid_inputs(self): + + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Test ill-formatted bounds + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) + + # Test other invalid inputs + assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) + + # this last check doesn't make sense for sparse presolve + if ("_sparse_presolve" in self.options and + self.options["_sparse_presolve"]): + return + # there aren't 3-D sparse matrices + + assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) + + def test_sparse_constraints(self): + # gh-13559: improve error message for sparse inputs when unsupported + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + rng = np.random.RandomState(0) + m = 100 + n = 150 + A_eq = scipy.sparse.rand(m, n, 0.5) + x_valid = rng.randn(n) + c = rng.randn(n) + ub = x_valid + rng.rand(n) + lb = x_valid - rng.rand(n) + bounds = np.column_stack((lb, ub)) + b_eq = A_eq @ x_valid + + if self.method in {'simplex', 'revised simplex'}: + # simplex and revised simplex should raise error + with assert_raises(ValueError, match=f"Method '{self.method}' " + "does not support sparse constraint matrices."): + linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + else: + # other methods should succeed + options = {**self.options} + if self.method in {'interior-point'}: + options['sparse'] = True + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=options) + assert res.success + + def test_maxiter(self): + # test iteration limit w/ Enzo example + c = [4, 8, 3, 0, 0, 0] + A = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b = [185, 155, 600] + np.random.seed(0) + maxiter = 3 + res = linprog(c, A_eq=A, b_eq=b, method=self.method, + options={"maxiter": maxiter}) + _assert_iteration_limit_reached(res, maxiter) + assert_equal(res.nit, maxiter) + + def test_bounds_fixed(self): + + # Test fixed bounds (upper equal to lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, 1), + method=self.method, options=self.options) + _assert_success(res, 1, 1) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], + method=self.method, options=self.options) + _assert_success(res, 12, [5, -1, 3]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1], bounds=[(1, 1), (1, 3)], + method=self.method, options=self.options) + _assert_success(res, 2, [1, 1]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], + bounds=[(-5, 5), (0, 10), (3.5, 3.5)], + method=self.method, options=self.options) + _assert_success(res, 15, [1, 7, 3.5]) + if do_presolve: + assert_equal(res.nit, 0) + + def test_bounds_infeasible(self): + + # Test ill-valued bounds (upper less than lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + @pytest.mark.thread_unsafe + def test_bounds_infeasible_2(self): + + # Test ill-valued bounds (lower inf, upper -inf) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + # For the simplex method, the cases do not result in an + # infeasible status, but in a RuntimeWarning. This is a + # consequence of having _presolve() take care of feasibility + # checks. See issue gh-11618. + do_presolve = self.options.get('presolve', True) + simplex_without_presolve = not do_presolve and self.method == 'simplex' + + c = [1, 2, 3] + bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] + bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] + + if simplex_without_presolve: + def g(c, bounds): + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + return res + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_1) + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_2) + else: + res = linprog(c=c, bounds=bounds_1, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + res = linprog(c=c, bounds=bounds_2, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + def test_empty_constraint_1(self): + c = [-1, -2] + res = linprog(c, method=self.method, options=self.options) + _assert_unbounded(res) + + def test_empty_constraint_2(self): + c = [-1, 1, -1, 1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve requires no iterations + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_empty_constraint_3(self): + c = [1, -1, 1, -1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) + + def test_inequality_constraints(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf + c = np.array([3, 2]) * -1 # maximize + A_ub = [[2, 1], + [1, 1], + [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-18, desired_x=[2, 6]) + + def test_inequality_constraints2(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf + # (dead link) + c = [6, 3] + A_ub = [[0, 3], + [-1, -1], + [-2, 1]] + b_ub = [2, -1, -1] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) + + def test_bounds_simple(self): + c = [1, 2] + bounds = (1, 2) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + bounds = [(1, 2), (1, 2)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + def test_bounded_below_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (1.0, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_below_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (0.5, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounded_above_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (None, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_above_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, 4) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_infinity(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_mixed(self): + # Problem has one unbounded variable and + # another with a negative lower bound. + c = np.array([-1, 4]) * -1 # maximize + A_ub = np.array([[-3, 1], + [1, 2]], dtype=np.float64) + b_ub = [6, 4] + x0_bounds = (-np.inf, np.inf) + x1_bounds = (-3, np.inf) + bounds = (x0_bounds, x1_bounds) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) + + def test_bounds_equal_but_infeasible(self): + c = [-4, 1] + A_ub = [[7, -2], [0, 1], [2, -2]] + b_ub = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_but_infeasible2(self): + c = [-4, 1] + A_eq = [[7, -2], [0, 1], [2, -2]] + b_eq = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_no_presolve(self): + # There was a bug when a lower and upper bound were equal but + # presolve was not on to eliminate the variable. The bound + # was being converted to an equality constraint, but the bound + # was not eliminated, leading to issues in postprocessing. + c = [1, 2] + A_ub = [[1, 2], [1.1, 2.2]] + b_ub = [4, 8] + bounds = [(1, 2), (2, 2)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_zero_column_1(self): + m, n = 3, 4 + rng = np.random.RandomState(0) + c = rng.rand(n) + c[1] = 1 + A_eq = rng.rand(m, n) + A_eq[:, 1] = 0 + b_eq = rng.rand(m) + A_ub = [[1, 0, 1, 1]] + b_ub = 3 + bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-9.7087836730413404) + + def test_zero_column_2(self): + if self.method in {'highs-ds', 'highs-ipm'}: + # See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648 + pytest.xfail() + + rng = np.random.RandomState(0) + m, n = 2, 4 + c = rng.rand(n) + c[1] = -1 + A_eq = rng.rand(m, n) + A_eq[:, 1] = 0 + b_eq = rng.rand(m) + + A_ub = rng.rand(m, n) + A_ub[:, 1] = 0 + b_ub = rng.rand(m) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve + if self.options.get('presolve', True) and "highs" not in self.method: + # HiGHS detects unboundedness or infeasibility in presolve + # It needs an iteration of simplex to be sure of unboundedness + # Other solvers report that the problem is unbounded if feasible + assert_equal(res.nit, 0) + + def test_zero_row_1(self): + c = [1, 2, 3] + A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_eq = [0, 3, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3) + + def test_zero_row_2(self): + A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_ub = [0, 3, 0] + c = [1, 2, 3] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0) + + def test_zero_row_3(self): + m, n = 2, 4 + rng = np.random.RandomState(1234) + c = rng.rand(n) + A_eq = rng.rand(m, n) + A_eq[0, :] = 0 + b_eq = rng.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_zero_row_4(self): + m, n = 2, 4 + rng = np.random.RandomState(1234) + c = rng.rand(n) + A_ub = rng.rand(m, n) + A_ub[0, :] = 0 + b_ub = -rng.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_1(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 2, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_2(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 1, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=4) + + def test_singleton_row_ub_1(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -2, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_ub_2(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -0.5, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0.5) + + def test_infeasible(self): + # Test linprog response to an infeasible problem + c = [-1, -1] + A_ub = [[1, 0], + [0, 1], + [-1, -1]] + b_ub = [2, 2, -5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_infeasible_inequality_bounds(self): + c = [1] + A_ub = [[2]] + b_ub = 4 + bounds = (5, 6) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_unbounded(self): + # Test linprog response to an unbounded problem + c = np.array([1, 1]) * -1 # maximize + A_ub = [[-1, 1], + [-1, -1]] + b_ub = [-1, -2] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_unbounded_below_no_presolve_corrected(self): + c = [1] + bounds = [(None, 1)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c=c, bounds=bounds, + method=self.method, + options=o) + if self.method == "revised simplex": + # Revised simplex has a special pathway for no constraints. + assert_equal(res.status, 5) + else: + _assert_unbounded(res) + + def test_unbounded_no_nontrivial_constraints_1(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, -1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_unbounded_no_nontrivial_constraints_2(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, 1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (None, 0)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], -np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_cyclic_recovery(self): + # Test linprogs recovery from cycling using the Klee-Minty problem + # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf + c = np.array([100, 10, 1]) * -1 # maximize + A_ub = [[1, 0, 0], + [20, 1, 0], + [200, 20, 1]] + b_ub = [1, 100, 10000] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) + + def test_cyclic_bland(self): + # Test the effect of Bland's rule on a cycling problem + c = np.array([-10, 57, 9, 24.]) + A_ub = np.array([[0.5, -5.5, -2.5, 9], + [0.5, -1.5, -0.5, 1], + [1, 0, 0, 0]]) + b_ub = [0, 0, 1] + + # copy the existing options dictionary but change maxiter + maxiter = 100 + o = {key: val for key, val in self.options.items()} + o['maxiter'] = maxiter + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + + if self.method == 'simplex' and not self.options.get('bland'): + # simplex cycles without Bland's rule + _assert_iteration_limit_reached(res, o['maxiter']) + else: + # other methods, including simplex with Bland's rule, succeed + _assert_success(res, desired_x=[1, 0, 1, 0]) + # note that revised simplex skips this test because it may or may not + # cycle depending on the initial basis + + def test_remove_redundancy_infeasibility(self): + # mostly a test of redundancy removal, which is carefully tested in + # test__remove_redundancy.py + m, n = 10, 10 + rng = np.random.RandomState(0) + c = rng.rand(n) + A_eq = rng.rand(m, n) + b_eq = rng.rand(m) + A_eq[-1, :] = 2 * A_eq[-2, :] + b_eq[-1] *= -1 + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + ################# + # General Tests # + ################# + + def test_nontrivial_problem(self): + # Problem involves all constraint types, + # negative resource limits, and rounding issues. + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + + def test_lpgen_problem(self): + # Test linprog with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + A_ub, b_ub, c = lpgen_2d(20, 20) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-64.049494229) + + def test_network_flow(self): + # A network flow problem with supply and demand at nodes + # and with costs along directed edges. + # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf + c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] + n, p = -1, 1 + A_eq = [ + [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], + [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], + [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], + [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], + [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] + b_eq = [0, 19, -16, 33, 0, 0, -36] + with suppress_warnings() as sup: + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) + + def test_network_flow_limited_capacity(self): + # A network flow problem with supply and demand at nodes + # and with costs and capacities along directed edges. + # http://blog.sommer-forst.de/2013/04/10/ + c = [2, 2, 1, 3, 1] + bounds = [ + [0, 4], + [0, 2], + [0, 2], + [0, 3], + [0, 5]] + n, p = -1, 1 + A_eq = [ + [n, n, 0, 0, 0], + [p, 0, n, n, 0], + [0, p, p, 0, n], + [0, 0, 0, p, p]] + b_eq = [-4, 0, 0, 4] + + with suppress_warnings() as sup: + # this is an UmfpackWarning but I had trouble importing it + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=14) + + def test_simplex_algorithm_wikipedia_example(self): + # https://en.wikipedia.org/wiki/Simplex_algorithm#Example + c = [-2, -3, -4] + A_ub = [ + [3, 2, 1], + [2, 5, 3]] + b_ub = [10, 15] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-20) + + def test_enzo_example(self): + # https://github.com/scipy/scipy/issues/1779 lp2.py + # + # Translated from Octave code at: + # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm + # and placed under MIT licence by Enzo Michelangeli + # with permission explicitly granted by the original author, + # Prof. Kazunobu Yoshida + c = [4, 8, 3, 0, 0, 0] + A_eq = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b_eq = [185, 155, 600] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=317.5, + desired_x=[66.25, 0, 17.5, 0, 183.75, 0], + atol=6e-6, rtol=1e-7) + + def test_enzo_example_b(self): + # rescued from https://github.com/scipy/scipy/pull/218 + c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] + A_eq = [[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]] + b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-1.77, + desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) + + def test_enzo_example_c_with_degeneracy(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 20 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) + + def test_enzo_example_c_with_unboundedness(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + # This test relies on `cos(0) -1 == sin(0)`, so ensure that's true + # (SIMD code or -ffast-math may cause spurious failures otherwise) + row0 = np.cos(tmp) - 1 + row0[0] = 0.0 + row1 = np.sin(tmp) + row1[0] = 0.0 + A_eq = np.vstack((row0, row1)) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_enzo_example_c_with_infeasibility(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [1, 1] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_basic_artificial_vars(self): + # Problem is chosen to test two phase simplex methods when at the end + # of phase 1 some artificial variables remain in the basis. + # Also, for `method='simplex'`, the row in the tableau corresponding + # with the artificial variables is not all zero. + c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) + A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], + [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], + [1.0, 1.0, 0, 0, 0, 0]]) + b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) + A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) + b_eq = np.array([0, 0]) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), + atol=2e-6) + + def test_optimize_result(self): + # check all fields in OptimizeResult + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + assert_(res.success) + assert_(res.nit) + assert_(not res.status) + if 'highs' not in self.method: + # HiGHS status/message tested separately + assert_(res.message == "Optimization terminated successfully.") + assert_allclose(c @ res.x, res.fun) + assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) + assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) + for key in ['eqlin', 'ineqlin', 'lower', 'upper']: + if key in res.keys(): + assert isinstance(res[key]['marginals'], np.ndarray) + assert isinstance(res[key]['residual'], np.ndarray) + + ################# + # Bug Fix Tests # + ################# + + def test_bug_5400(self): + # https://github.com/scipy/scipy/issues/5400 + bounds = [ + (0, None), + (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), + (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), + (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] + + f = 1 / 9 + g = -1e4 + h = -3.1 + A_ub = np.array([ + [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], + [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], + [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], + [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], + [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], + [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], + [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], + [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) + + b_ub = np.array([ + 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, + 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + + c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-106.63507541835018) + + def test_bug_6139(self): + # linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + + # Note: This is not strictly a bug as the default tolerance determines + # if a result is "close enough" to zero and should not be expected + # to work for all cases. + + c = np.array([1, 1, 1]) + A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) + b_eq = np.array([5.00000000e+00, -1.00000000e+04]) + A_ub = -np.array([[0., 1000000., 1010000.]]) + b_ub = -np.array([10000000.]) + bounds = (None, None) + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + _assert_success(res, desired_fun=14.95, + desired_x=np.array([5, 4.95, 5])) + + def test_bug_6690(self): + # linprog simplex used to violate bound constraint despite reporting + # success. + # https://github.com/scipy/scipy/issues/6690 + + A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) + b_eq = np.array([0.9626]) + A_ub = np.array([ + [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], + [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], + [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] + ]) + b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) + bounds = np.array([ + [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], + [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] + ]).T + c = np.array([ + -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 + ]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, + "Solving system with option 'cholesky'") + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = -1.19099999999 + desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, + 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) + _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) + + # Add small tol value to ensure arrays are less than or equal. + atol = 1e-6 + assert_array_less(bounds[:, 0] - atol, res.x) + assert_array_less(res.x, bounds[:, 1] + atol) + + def test_bug_7044(self): + # linprog simplex failed to "identify correct constraints" (?) + # leading to a non-optimal solution if A is rank-deficient. + # https://github.com/scipy/scipy/issues/7044 + + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = 1.730550597 + _assert_success(res, desired_fun=desired_fun) + assert_allclose(A_eq.dot(res.x), b_eq) + assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) + + def test_bug_7237(self): + # https://github.com/scipy/scipy/issues/7237 + # linprog simplex "explodes" when the pivot value is very + # close to zero. + + c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) + A_ub = np.array([ + [1., -724., 911., -551., -555., -896., 478., -80., -293.], + [1., 566., 42., 937., 233., 883., 392., -909., 57.], + [1., -208., -894., 539., 321., 532., -924., 942., 55.], + [1., 857., -859., 83., 462., -265., -971., 826., 482.], + [1., 314., -424., 245., -424., 194., -443., -104., -429.], + [1., 540., 679., 361., 149., -827., 876., 633., 302.], + [0., -1., -0., -0., -0., -0., -0., -0., -0.], + [0., -0., -1., -0., -0., -0., -0., -0., -0.], + [0., -0., -0., -1., -0., -0., -0., -0., -0.], + [0., -0., -0., -0., -1., -0., -0., -0., -0.], + [0., -0., -0., -0., -0., -1., -0., -0., -0.], + [0., -0., -0., -0., -0., -0., -1., -0., -0.], + [0., -0., -0., -0., -0., -0., -0., -1., -0.], + [0., -0., -0., -0., -0., -0., -0., -0., -1.], + [0., 1., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 1.] + ]) + b_ub = np.array([ + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) + A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) + b_eq = np.array([[1.]]) + bounds = [(None, None)] * 9 + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=108.568535, atol=1e-6) + + def test_bug_8174(self): + # https://github.com/scipy/scipy/issues/8174 + # The simplex method sometimes "explodes" if the pivot value is very + # close to zero. + A_ub = np.array([ + [22714, 1008, 13380, -2713.5, -1116], + [-4986, -1092, -31220, 17386.5, 684], + [-4986, 0, 0, -2713.5, 0], + [22714, 0, 0, 17386.5, 0]]) + b_ub = np.zeros(A_ub.shape[0]) + c = -np.ones(A_ub.shape[1]) + bounds = [(0, 1)] * A_ub.shape[1] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': + _assert_unable_to_find_basic_feasible_sol(res) + else: + _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) + + def test_bug_8174_2(self): + # Test supplementary example from issue 8174. + # https://github.com/scipy/scipy/issues/8174 + # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution + c = np.array([1, 0, 0, 0, 0, 0, 0]) + A_ub = -np.identity(7) + b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) + A_eq = np.array([ + [1, 1, 1, 1, 1, 1, 0], + [0.3, 1.3, 0.9, 0, 0, 0, -1], + [0.3, 0, 0, 0, 0, 0, -2/3], + [0, 0.65, 0, 0, 0, 0, -1/15], + [0, 0, 0.3, 0, 0, 0, -1/15] + ]) + b_eq = np.array([[100], [0], [0], [0], [0]]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=43.3333333331385) + + def test_bug_8561(self): + # Test that pivot row is chosen correctly when using Bland's rule + # This was originally written for the simplex method with + # Bland's rule only, but it doesn't hurt to test all methods/options + # https://github.com/scipy/scipy/issues/8561 + c = np.array([7, 0, -4, 1.5, 1.5]) + A_ub = np.array([ + [4, 5.5, 1.5, 1.0, -3.5], + [1, -2.5, -2, 2.5, 0.5], + [3, -0.5, 4, -12.5, -7], + [-1, 4.5, 2, -3.5, -2], + [5.5, 2, -4.5, -1, 9.5]]) + b_ub = np.array([0, 0, 0, 0, 1]) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, + method=self.method) + _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) + + def test_bug_8662(self): + # linprog simplex used to report incorrect optimal results + # https://github.com/scipy/scipy/issues/8662 + c = [-10, 10, 6, 3] + A_ub = [[8, -8, -4, 6], + [-8, 8, 4, -6], + [-4, 4, 8, -4], + [3, -3, -3, -10]] + b_ub = [9, -9, -9, -4] + bounds = [(0, None), (0, None), (0, None), (0, None)] + desired_fun = 36.0000000000 + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Set boundary condition as a constraint + A_ub.append([0, 0, -1, 0]) + b_ub.append(0) + bounds[2] = (None, None) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + rtol = 1e-5 + _assert_success(res1, desired_fun=desired_fun, rtol=rtol) + _assert_success(res2, desired_fun=desired_fun, rtol=rtol) + + def test_bug_8663(self): + # exposed a bug in presolve + # https://github.com/scipy/scipy/issues/8663 + c = [1, 5] + A_eq = [[0, -7]] + b_eq = [-6] + bounds = [(0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + # tested for interior-point with presolve off in TestLinprogIPSpecific + # https://github.com/scipy/scipy/issues/8664 + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bug_8973(self): + """ + Test whether bug described at: + https://github.com/scipy/scipy/issues/8973 + was fixed. + """ + c = np.array([0, 0, 0, 1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) + b_ub = np.array([2, -2]) + bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + # solution vector x is not unique + _assert_success(res, desired_fun=-2) + # HiGHS IPM had an issue where the following wasn't true! + assert_equal(c @ res.x, res.fun) + + def test_bug_8973_2(self): + """ + Additional test for: + https://github.com/scipy/scipy/issues/8973 + suggested in + https://github.com/scipy/scipy/pull/8985 + review by @antonior92 + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[-2], desired_fun=0) + + def test_bug_10124(self): + """ + Test for linprog docstring problem + 'disp'=True caused revised simplex failure + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + c = [-1, 4] + A_ub = [[-3, 1], [1, 2]] + b_ub = [6, 4] + bounds = [(None, None), (-3, None)] + o = {"disp": True} + o.update(self.options) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_x=[10, -3], desired_fun=-22) + + def test_bug_10349(self): + """ + Test for redundancy removal tolerance issue + https://github.com/scipy/scipy/issues/10349 + """ + A_eq = np.array([[1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1]]) + b_eq = np.array([221, 210, 10, 141, 198, 102]) + c = np.concatenate((0, 1, np.zeros(4)), axis=None) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) + + @pytest.mark.skipif(sys.platform == 'darwin', + reason=("Failing on some local macOS builds, " + "see gh-13846")) + def test_bug_10466(self): + """ + Test that autoscale fixes poorly-scaled problem + """ + c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] + A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] + + b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, + 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09] + + o = {} + # HiGHS methods don't use autoscale option + if not self.method.startswith("highs"): + o = {"autoscale": True} + o.update(self.options) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option...") + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(RuntimeWarning, "divide by zero encountered...") + sup.filter(RuntimeWarning, "overflow encountered...") + sup.filter(RuntimeWarning, "invalid value encountered...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + assert_allclose(res.fun, -8589934560) + + +######################### +# Method-specific Tests # +######################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogSimplexTests(LinprogCommonTests): + method = "simplex" + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogIPTests(LinprogCommonTests): + method = "interior-point" + + def test_bug_10466(self): + pytest.skip("Test is failing, but solver is deprecated.") + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogRSTests(LinprogCommonTests): + method = "revised simplex" + + # Revised simplex does not reliably solve these problems. + # Failure is intermittent due to the random choice of elements to complete + # the basis after phase 1 terminates. In any case, linprog exists + # gracefully, reporting numerical difficulties. I do not think this should + # prevent revised simplex from being merged, as it solves the problems + # most of the time and solves a broader range of problems than the existing + # simplex implementation. + # I believe that the root cause is the same for all three and that this + # same issue prevents revised simplex from solving many other problems + # reliably. Somehow the pivoting rule allows the algorithm to pivot into + # a singular basis. I haven't been able to find a reference that + # acknowledges this possibility, suggesting that there is a bug. On the + # other hand, the pivoting rule is quite simple, and I can't find a + # mistake, which suggests that this is a possibility with the pivoting + # rule. Hopefully, a better pivoting rule will fix the issue. + + def test_bug_5400(self): + pytest.skip("Intermittent failure acceptable.") + + def test_bug_8662(self): + pytest.skip("Intermittent failure acceptable.") + + def test_network_flow(self): + pytest.skip("Intermittent failure acceptable.") + + +class LinprogHiGHSTests(LinprogCommonTests): + def test_callback(self): + # this is the problem from test_callback + def cb(res): + return None + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, + callback=cb, method=self.method) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize("options", + [{"maxiter": -1}, + {"disp": -1}, + {"presolve": -1}, + {"time_limit": -1}, + {"dual_feasibility_tolerance": -1}, + {"primal_feasibility_tolerance": -1}, + {"ipm_optimality_tolerance": -1}, + {"simplex_dual_edge_weight_strategy": "ekki"}, + ]) + def test_invalid_option_values(self, options): + def f(options): + linprog(1, method=self.method, options=options) + options.update(self.options) + assert_warns(OptimizeWarning, f, options=options) + + def test_crossover(self): + A_eq, b_eq, c, _, _ = magic_square(4) + bounds = (0, 1) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + # there should be nonzero crossover iterations for IPM (only) + assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") + + @pytest.mark.fail_slow(10) + def test_marginals(self): + # Ensure lagrange multipliers are correct by comparing the derivative + # w.r.t. b_ub/b_eq/ub/lb to the reported duals. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + lb, ub = bounds.T + + # sensitivity w.r.t. b_ub + def f_bub(x): + return linprog(c, A_ub, x, A_eq, b_eq, bounds, + method=self.method).fun + + dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun) + assert_allclose(res.ineqlin.marginals, dfdbub) + + # sensitivity w.r.t. b_eq + def f_beq(x): + return linprog(c, A_ub, b_ub, A_eq, x, bounds, + method=self.method).fun + + dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun) + assert_allclose(res.eqlin.marginals, dfdbeq) + + # sensitivity w.r.t. lb + def f_lb(x): + bounds = np.array([x, ub]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + # approx_derivative has trouble where lb is infinite + dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun) + dfdlb[~np.isfinite(lb)] = 0 + + assert_allclose(res.lower.marginals, dfdlb) + + # sensitivity w.r.t. ub + def f_ub(x): + bounds = np.array([lb, x]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun) + dfdub[~np.isfinite(ub)] = 0 + + assert_allclose(res.upper.marginals, dfdub) + + def test_dual_feasibility(self): + # Ensure solution is dual feasible using marginals + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT dual feasibility equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf + resid = (-c + A_ub.T @ res.ineqlin.marginals + + A_eq.T @ res.eqlin.marginals + + res.upper.marginals + + res.lower.marginals) + assert_allclose(resid, 0, atol=1e-12) + + def test_complementary_slackness(self): + # Ensure that the complementary slackness condition is satisfied. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT complementary slackness equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for + # non-zero RHS + assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0) + + @pytest.mark.xfail(reason='Upstream / Wrapper issue, see gh-20589') + def test_bug_20336(self): + """ + Test that `linprog` now solves a poorly-scaled problem + """ + boundaries = [(10000.0, 9010000.0), (0.0, None), (10000.0, None), + (0.0, 84.62623413258109), (10000.0, None), (10000.0, None), + (10000.0, None), (10000.0, None), (10000.0, None), + (10000.0, None), (10000.0, None), (10000.0, None), + (10000.0, None), (None, None), (None, None), (None, None), + (None, None), (None, None), (None, None), (None, None), + (None, None), (None, None), (None, None), (None, None), + (None, None), (None, None), (None, None), (None, None), + (None, None), (None, None), (None, None), (None, None), + (None, None)] + eq_entries = [-1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, + -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0, -1.0, 0.001, + -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, + 0.001, -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, + 1.0, -1.0, 0.001, -0.001, 3.7337777768059636e-10, + 3.7337777768059636e-10, 1.0, -1.0, 0.001, -0.001, + 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, 0.001, + -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, + 0.001, -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, + 1.0, -1.0, 0.001, -0.001, 3.7337777768059636e-10, + 3.7337777768059636e-10, 1.0, -1.0, 0.001, -0.001, + 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, 0.001, + -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, + -1.0, 0.001, -0.001, 3.7337777768059636e-10, + 3.7337777768059636e-10, 1.0, -1.0] + eq_indizes = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, + 11, 11, 12, 12, 12, 12, 13, 13, 14, 14, 14, 14, 15, 15, 16, 16, + 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, + 22, 22, 22, 22, 23, 23, 24, 24, 24, 24, 25, 25, 26, 26, 26, 26, + 27, 27, 28, 28, 28, 28, 29, 29, 30, 30, 30, 30, 31, 31] + eq_vars = [15, 14, 17, 16, 19, 18, 21, 20, 23, 22, 25, 24, 27, 26, 29, 28, 31, + 30, 13, 1, 0, 32, 3, 14, 13, 4, 0, 4, 0, 32, 31, 2, 12, 2, 12, 16, + 15, 5, 4, 5, 4, 18, 17, 6, 5, 6, 5, 20, 19, 7, 6, 7, 6, 22, 21, 8, + 7, 8, 7, 24, 23, 9, 8, 9, 8, 26, 25, 10, 9, 10, 9, 28, 27, 11, 10, + 11, 10, 30, 29, 12, 11, 12, 11] + eq_values = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 9000000.0, 0.0, + 0.006587392118285457, -5032.197406716549, 0.0041860502789104696, + -7918.93439542944, 0.0063205763583549035, -5244.625751707402, + 0.006053760598424349, -5475.7793929428, 0.005786944838493795, + -5728.248403917573, 0.0055201290785632405, -6005.123623538355, + 0.005253313318632687, -6310.123825488683, 0.004986497558702133, + -6647.763714796453, 0.004719681798771578, -7023.578908071522, + 0.004452866038841024, -7444.431798646482] + coefficients = [0.0, 0.0, 0.0, -0.011816666666666668, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + np_eq_entries = np.asarray(eq_entries, dtype=np.float64) + np_eq_indizes = np.asarray(eq_indizes, dtype=np.int32) + np_eq_vars = np.asarray(eq_vars, dtype=np.int32) + + a_eq= scipy.sparse.csr_array((np_eq_entries,(np_eq_indizes, np_eq_vars)), + shape=(32, 33)) + b_eq = np.asarray(eq_values, dtype=np.float64) + c = np.asarray(coefficients, dtype=np.float64) + + result = scipy.optimize.linprog(c, A_ub=None, b_ub=None, A_eq=a_eq, b_eq=b_eq, + bounds=boundaries) + assert result.status==0 + x = result.x + n_r_x = np.linalg.norm(a_eq @ x - b_eq) + n_r = np.linalg.norm(result.eqlin.residual) + assert_allclose(n_r, n_r_x) + + +################################ +# Simplex Option-Specific Tests# +################################ + + +class TestLinprogSimplexDefault(LinprogSimplexTests): + + def setup_method(self): + self.options = {} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_7237_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + pytest.skip("Simplex fails on this problem.") + + @pytest.mark.thread_unsafe + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexBland(LinprogSimplexTests): + + def setup_method(self): + self.options = {'bland': True} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + @pytest.mark.thread_unsafe + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError): + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexNoPresolve(LinprogSimplexTests): + + def setup_method(self): + self.options = {'presolve': False} + + is_32_bit = np.intp(0).itemsize < 8 + is_linux = sys.platform.startswith('linux') + + @pytest.mark.xfail( + condition=is_32_bit and is_linux, + reason='Fails with warning on 32-bit linux') + def test_bug_5400(self): + super().test_bug_5400() + + def test_bug_6139_low_tol(self): + # Linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + # Without ``presolve`` eliminating such rows the result is incorrect. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError, match='linprog status 4'): + return super().test_bug_6139() + + def test_bug_7237_low_tol(self): + pytest.skip("Simplex fails on this problem.") + + @pytest.mark.thread_unsafe + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + def test_unbounded_no_nontrivial_constraints_1(self): + pytest.skip("Tests behavior specific to presolve") + + def test_unbounded_no_nontrivial_constraints_2(self): + pytest.skip("Tests behavior specific to presolve") + + +####################################### +# Interior-Point Option-Specific Tests# +####################################### + + +class TestLinprogIPDense(LinprogIPTests): + options = {"sparse": False} + + # see https://github.com/scipy/scipy/issues/20216 for skip reason + @pytest.mark.skipif( + sys.platform == 'darwin', + reason="Fails on some macOS builds for reason not relevant to test" + ) + def test_bug_6139(self): + super().test_bug_6139() + +if has_cholmod: + class TestLinprogIPSparseCholmod(LinprogIPTests): + options = {"sparse": True, "cholesky": True} + + +if has_umfpack: + class TestLinprogIPSparseUmfpack(LinprogIPTests): + options = {"sparse": True, "cholesky": False} + + def test_network_flow_limited_capacity(self): + pytest.skip("Failing due to numerical issues on some platforms.") + + +class TestLinprogIPSparse(LinprogIPTests): + options = {"sparse": True, "cholesky": False, "sym_pos": False} + + @pytest.mark.skipif( + sys.platform == 'darwin', + reason="Fails on macOS x86 Accelerate builds (gh-20510)" + ) + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + def test_magic_square_sparse_no_presolve(self): + # test linprog with a problem with a rank-deficient A_eq matrix + A_eq, b_eq, c, _, _ = magic_square(3) + bounds = (0, 1) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + sup.filter(OptimizeWarning, "Solving system with option...") + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + def test_sparse_solve_options(self): + # checking that problem is solved with all column permutation options + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Invalid permc_spec option") + o = {key: self.options[key] for key in self.options} + permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', + 'COLAMD', 'ekki-ekki-ekki') + # 'ekki-ekki-ekki' raises warning about invalid permc_spec option + # and uses default + for permc_spec in permc_specs: + o["permc_spec"] = permc_spec + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + +class TestLinprogIPSparsePresolve(LinprogIPTests): + options = {"sparse": True, "_sparse_presolve": True} + + @pytest.mark.skipif( + sys.platform == 'darwin', + reason="Fails on macOS x86 Accelerate builds (gh-20510)" + ) + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + def test_enzo_example_c_with_infeasibility(self): + pytest.skip('_sparse_presolve=True incompatible with presolve=False') + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class TestLinprogIPSpecific: + method = "interior-point" + # the following tests don't need to be performed separately for + # sparse presolve, sparse after presolve, and dense + + def test_solver_select(self): + # check that default solver is selected as expected + if has_cholmod: + options = {'sparse': True, 'cholesky': True} + elif has_umfpack: + options = {'sparse': True, 'cholesky': False} + else: + options = {'sparse': True, 'cholesky': False, 'sym_pos': False} + A, b, c = lpgen_2d(20, 20) + res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) + res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver + assert_allclose(res1.fun, res2.fun, + err_msg="linprog default solver unexpected result", + rtol=2e-15, atol=1e-15) + + def test_unbounded_below_no_presolve_original(self): + # formerly caused segfault in TravisCI w/ "cholesky":True + c = [-1] + bounds = [(None, 1)] + res = linprog(c=c, bounds=bounds, + method=self.method, + options={"presolve": False, "cholesky": True}) + _assert_success(res, desired_fun=-1) + + def test_cholesky(self): + # use cholesky factorization and triangular solves + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"cholesky": True}) # only for dense + _assert_success(res, desired_fun=-64.049494229) + + def test_alternate_initial_point(self): + # use "improved" initial point + A, b, c = lpgen_2d(20, 20) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"ip": True, "disp": True}) + # ip code is independent of sparse/dense + _assert_success(res, desired_fun=-64.049494229) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options={"presolve": False}) + assert_(not res.success, "Incorrectly reported success") + + +######################################## +# Revised Simplex Option-Specific Tests# +######################################## + + +class TestLinprogRSCommon(LinprogRSTests): + options = {} + + def test_cyclic_bland(self): + pytest.skip("Intermittent failure acceptable.") + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_unbounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, None), (None, None), (0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, 1), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_negative_unbounded_variable(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + b_eq = [4] + x_star = np.array([-219/385, 582/385, 0, 4/10]) + f_star = 3951/385 + bounds = [(None, None), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + def test_redundant_constraints_with_guess(self): + A, b, c, _, _ = magic_square(3) + p = np.random.rand(*c.shape) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_eq=A, b_eq=b, method=self.method) + res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) + res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) + _assert_success(res2, desired_fun=1.730550597) + assert_equal(res2.nit, 0) + _assert_success(res3) + assert_(res3.nit < res.nit) # hot start reduces iterations + + +class TestLinprogRSBland(LinprogRSTests): + options = {"pivot": "bland"} + + +############################################ +# HiGHS-Simplex-Dual Option-Specific Tests # +############################################ + + +class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): + method = "highs-ds" + options = {} + + def test_lad_regression(self): + ''' + The scaled model should be optimal, i.e. not produce unscaled model + infeasible. See https://github.com/ERGO-Code/HiGHS/issues/494. + ''' + # Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled + # and unscaled model statuses) + c, A_ub, b_ub, bnds = l1_regression_prob() + res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, + method=self.method, options=self.options) + assert_equal(res.status, 0) + assert_(res.x is not None) + assert_(np.all(res.slack > -1e-6)) + assert_(np.all(res.x <= [np.inf if ub is None else ub + for lb, ub in bnds])) + assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7 + for lb, ub in bnds])) + + +################################### +# HiGHS-IPM Option-Specific Tests # +################################### + + +class TestLinprogHiGHSIPM(LinprogHiGHSTests): + method = "highs-ipm" + options = {} + + +################################### +# HiGHS-MIP Option-Specific Tests # +################################### + + +class TestLinprogHiGHSMIP: + method = "highs" + options = {} + + @pytest.mark.fail_slow(10) + @pytest.mark.xfail(condition=(sys.maxsize < 2 ** 32 and + platform.system() == "Linux"), + run=False, + reason="gh-16347") + def test_mip1(self): + # solve non-relaxed magic square problem (finally!) + # also check that values are all integers - they don't always + # come out of HiGHS that way + n = 4 + A, b, c, numbers, M = magic_square(n) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c*0, A_eq=A, b_eq=b, bounds=bounds, + method=self.method, integrality=integrality) + + s = (numbers.flatten() * res.x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + np.testing.assert_allclose(res.x, np.round(res.x), atol=1e-12) + + def test_mip2(self): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + + # use all array inputs to test gh-16681 (integrality couldn't be array) + A_ub = np.array([[2, -2], [-8, 10]]) + b_ub = np.array([-1, 13]) + c = -np.array([1, 1]) + + bounds = np.array([(0, np.inf)] * len(c)) + integrality = np.ones_like(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1, 2]) + np.testing.assert_allclose(res.fun, -3) + + def test_mip3(self): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + def test_mip4(self): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[-1, -2], [-4, -1], [2, 1]]) + b_ub = np.array([14, -33, 20]) + c = np.array([8, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [0, 1] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [6.5, 7]) + np.testing.assert_allclose(res.fun, 59) + + def test_mip5(self): + # solve MIP with inequality and inequality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[1, 1, 1]]) + b_ub = np.array([7]) + A_eq = np.array([[4, 2, 1]]) + b_eq = np.array([12]) + c = np.array([-3, -2, -1]) + + bounds = [(0, np.inf), (0, np.inf), (0, 1)] + integrality = [0, 1, 0] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality) + + np.testing.assert_allclose(res.x, [0, 6, 0]) + np.testing.assert_allclose(res.fun, -12) + + # gh-16897: these fields were not present, ensure that they are now + assert res.get("mip_node_count", None) is not None + assert res.get("mip_dual_bound", None) is not None + assert res.get("mip_gap", None) is not None + + @pytest.mark.xslow + def test_mip6(self): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + @pytest.mark.xslow + def test_mip_rel_gap_passdown(self): + # MIP taken from test_mip6, solved with different values of mip_rel_gap + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + mip_rel_gaps = [0.5, 0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality, + options={"mip_rel_gap": mip_rel_gap}) + final_mip_gap = res["mip_gap"] + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert final_mip_gap <= mip_rel_gap + sol_mip_gaps.append(final_mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. np.diff does + # x[i+1] - x[i], so flip the array before differencing to get + # what should be a positive, monotone decreasing series of solution + # gaps + gap_diffs = np.diff(np.flip(sol_mip_gaps)) + assert np.all(gap_diffs >= 0) + assert not np.all(gap_diffs == 0) + + def test_semi_continuous(self): + # See issue #18106. This tests whether the solution is being + # checked correctly (status is 0) when integrality > 1: + # values are allowed to be 0 even if 0 is out of bounds. + + c = np.array([1., 1., -1, -1]) + bounds = np.array([[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0.5, 1.5]]) + integrality = np.array([2, 3, 2, 3]) + + res = linprog(c, bounds=bounds, + integrality=integrality, method='highs') + + np.testing.assert_allclose(res.x, [0, 0, 1.5, 1]) + assert res.status == 0 + + def test_bug_20584(self): + """ + Test that when integrality is a list of all zeros, linprog gives the + same result as when it is an array of all zeros / integrality=None + """ + c = [1, 1] + A_ub = [[-1, 0]] + b_ub = [-2.5] + res1 = linprog(c, A_ub=A_ub, b_ub=b_ub, integrality=[0, 0]) + res2 = linprog(c, A_ub=A_ub, b_ub=b_ub, integrality=np.asarray([0, 0])) + res3 = linprog(c, A_ub=A_ub, b_ub=b_ub, integrality=None) + assert_equal(res1.x, res2.x) + assert_equal(res1.x, res3.x) + + +########################### +# Autoscale-Specific Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class AutoscaleTests: + options = {"autoscale": True} + + test_bug_6139 = LinprogCommonTests.test_bug_6139 + test_bug_6690 = LinprogCommonTests.test_bug_6690 + test_bug_7237 = LinprogCommonTests.test_bug_7237 + + +class TestAutoscaleIP(AutoscaleTests): + method = "interior-point" + + def test_bug_6139(self): + self.options['tol'] = 1e-10 + return AutoscaleTests.test_bug_6139(self) + + +class TestAutoscaleSimplex(AutoscaleTests): + method = "simplex" + + +class TestAutoscaleRS(AutoscaleTests): + method = "revised simplex" + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + +########################### +# Redundancy Removal Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class RRTests: + method = "interior-point" + LCT = LinprogCommonTests + # these are a few of the existing tests that have redundancy + test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility + test_bug_10349 = LCT.test_bug_10349 + test_bug_7044 = LCT.test_bug_7044 + test_NFLC = LCT.test_network_flow_limited_capacity + test_enzo_example_b = LCT.test_enzo_example_b + + +class TestRRSVD(RRTests): + options = {"rr_method": "SVD"} + + +class TestRRPivot(RRTests): + options = {"rr_method": "pivot"} + + +class TestRRID(RRTests): + options = {"rr_method": "ID"} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py new file mode 100644 index 0000000000000000000000000000000000000000..650deedce88b6babd8a3f2b62a5839f1a6cb966c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py @@ -0,0 +1,297 @@ +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +import numpy as np + +from scipy.optimize._lsq.common import ( + step_size_to_bound, find_active_constraints, make_strictly_feasible, + CL_scaling_vector, intersect_trust_region, build_quadratic_1d, + minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, + left_multiplied_operator, right_multiplied_operator) + + +class TestBounds: + def test_step_size_to_bounds(self): + lb = np.array([-1.0, 2.5, 10.0]) + ub = np.array([1.0, 5.0, 100.0]) + x = np.array([0.0, 2.5, 12.0]) + + s = np.array([0.1, 0.0, 0.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 10) + assert_equal(hits, [1, 0, 0]) + + s = np.array([0.01, 0.05, -1.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 2) + assert_equal(hits, [0, 0, -1]) + + s = np.array([10.0, -0.0001, 100.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.array(-0)) + assert_equal(hits, [0, -1, 0]) + + s = np.array([1.0, 0.5, -2.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 1.0) + assert_equal(hits, [1, 0, -1]) + + s = np.zeros(3) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.inf) + assert_equal(hits, [0, 0, 0]) + + def test_find_active_constraints(self): + lb = np.array([0.0, -10.0, 1.0]) + ub = np.array([1.0, 0.0, 100.0]) + + x = np.array([0.5, -5.0, 2.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 0]) + + x = np.array([0.0, 0.0, 10.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + x = np.array([1e-9, -1e-8, 100 - 1e-9]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 1]) + + active = find_active_constraints(x, lb, ub, rtol=1.5e-9) + assert_equal(active, [-1, 0, 1]) + + lb = np.array([1.0, -np.inf, -np.inf]) + ub = np.array([np.inf, 10.0, np.inf]) + + x = np.ones(3) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 0, 0]) + + # Handles out-of-bound cases. + x = np.array([0.0, 11.0, 0.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + def test_make_strictly_feasible(self): + lb = np.array([-0.5, -0.8, 2.0]) + ub = np.array([0.8, 1.0, 3.0]) + + x = np.array([-0.5, 0.0, 2 + 1e-10]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(x_new[0] > -0.5) + assert_equal(x_new[1:], x[1:]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) + assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) + + x = np.array([-0.5, -1, 3.1]) + x_new = make_strictly_feasible(x, lb, ub) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + lb = np.array([-1, 100.0]) + ub = np.array([1, 100.0 + 1e-10]) + x = np.array([0, 100.0]) + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) + assert_equal(x_new, [0, 100.0 + 0.5e-10]) + + def test_scaling_vector(self): + lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) + ub = np.array([1.0, np.inf, 10.0, np.inf]) + x = np.array([0.5, 2.0, 5.0, 0.0]) + g = np.array([1.0, 0.1, -10.0, 0.0]) + v, dv = CL_scaling_vector(x, g, lb, ub) + assert_equal(v, [1.0, 7.0, 5.0, 1.0]) + assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) + + +class TestQuadraticFunction: + def setup_method(self): + self.J = np.array([ + [0.1, 0.2], + [-1.0, 1.0], + [0.5, 0.2]]) + self.g = np.array([0.8, -2.0]) + self.diag = np.array([1.0, 2.0]) + + def test_build_quadratic_1d(self): + s = np.zeros(2) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 0) + assert_equal(b, 0) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 0) + assert_equal(b, 0) + + s = np.array([1.0, -1.0]) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 2.05) + assert_equal(b, 2.8) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 3.55) + assert_equal(b, 2.8) + + s0 = np.array([0.5, 0.5]) + a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) + assert_equal(a, 3.55) + assert_allclose(b, 2.39) + assert_allclose(c, -0.1525) + + def test_minimize_quadratic_1d(self): + a = 5 + b = -1 + + t, y = minimize_quadratic_1d(a, b, 1, 2) + assert_equal(t, 1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -2, -1) + assert_equal(t, -1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -1, 1) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + c = 10 + t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) + assert_equal(t, 0) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + a = -1 + b = 0.2 + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf) + assert_equal(t, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0) + assert_equal(t, -np.inf) + assert_equal(y, -np.inf) + + def test_evaluate_quadratic(self): + s = np.array([1.0, -1.0]) + + value = evaluate_quadratic(self.J, self.g, s) + assert_equal(value, 4.85) + + value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_equal(value, 6.35) + + s = np.array([[1.0, -1.0], + [1.0, 1.0], + [0.0, 0.0]]) + + values = evaluate_quadratic(self.J, self.g, s) + assert_allclose(values, [4.85, -0.91, 0.0]) + + values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_allclose(values, [6.35, 0.59, 0.0]) + + +class TestTrustRegion: + def test_intersect(self): + Delta = 1.0 + + x = np.zeros(3) + s = np.array([1.0, 0.0, 0.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_equal(t_neg, -1) + assert_equal(t_pos, 1) + + s = np.array([-1.0, 1.0, -1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -3**-0.5) + assert_allclose(t_pos, 3**-0.5) + + x = np.array([0.5, -0.5, 0]) + s = np.array([0, 0, 1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -2**-0.5) + assert_allclose(t_pos, 2**-0.5) + + x = np.ones(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + x = np.zeros(3) + s = np.zeros(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + +def test_reflective_transformation(): + lb = np.array([-1, -2], dtype=float) + ub = np.array([5, 3], dtype=float) + + y = np.array([0, 0]) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, y) + assert_equal(g, np.ones(2)) + + y = np.array([-4, 4], dtype=float) + + x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf])) + assert_equal(x, [2, 4]) + assert_equal(g, [-1, 1]) + + x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub) + assert_equal(x, [-4, 2]) + assert_equal(g, [1, -1]) + + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [2, 2]) + assert_equal(g, [-1, -1]) + + lb = np.array([-np.inf, -2]) + ub = np.array([5, np.inf]) + y = np.array([10, 10], dtype=float) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [0, 10]) + assert_equal(g, [-1, 1]) + + +def test_linear_operators(): + A = np.arange(6).reshape((3, 2)) + + d_left = np.array([-1, 2, 5]) + DA = np.diag(d_left).dot(A) + J_left = left_multiplied_operator(A, d_left) + + d_right = np.array([5, 10]) + AD = A.dot(np.diag(d_right)) + J_right = right_multiplied_operator(A, d_right) + + x = np.array([-2, 3]) + X = -2 * np.arange(2, 8).reshape((2, 3)) + xt = np.array([0, -2, 15]) + + assert_allclose(DA.dot(x), J_left.matvec(x)) + assert_allclose(DA.dot(X), J_left.matmat(X)) + assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) + + assert_allclose(AD.dot(x), J_right.matvec(x)) + assert_allclose(AD.dot(X), J_right.matmat(X)) + assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..23032e99764ed2e90d7192c078e5cb4518e328fd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py @@ -0,0 +1,287 @@ +import pytest + +import numpy as np +from numpy.linalg import lstsq +from numpy.testing import assert_allclose, assert_equal, assert_ + +from scipy.sparse import rand, coo_matrix +from scipy.sparse.linalg import aslinearoperator +from scipy.optimize import lsq_linear +from scipy.optimize._minimize import Bounds + + +A = np.array([ + [0.171, -0.057], + [-0.049, -0.248], + [-0.166, 0.054], +]) +b = np.array([0.074, 1.014, -0.383]) + + +class BaseMixin: + def setup_method(self): + self.rnd = np.random.RandomState(0) + + def test_dense_no_bounds(self): + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.x, res.unbounded_sol[0]) + + def test_dense_bounds(self): + # Solutions for comparison are taken from MATLAB. + lb = np.array([-1, -10]) + ub = np.array([1, 0]) + unbounded_sol = lstsq(A, b, rcond=-1)[0] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0.0, -np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.0, -4.084174437334673]), + atol=1e-6) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([-1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.448427311733504, 0]), + atol=1e-15) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([np.inf, -5]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-0.105560998682388, -5])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([-1, np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-1, -4.181102129483254])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0, -4]) + ub = np.array([1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.005236663400791, -4])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + def test_bounds_variants(self): + x = np.array([1, 3]) + A = self.rnd.uniform(size=(2, 2)) + b = A@x + lb = np.array([1, 1]) + ub = np.array([2, 2]) + bounds_old = (lb, ub) + bounds_new = Bounds(lb, ub) + res_old = lsq_linear(A, b, bounds_old) + res_new = lsq_linear(A, b, bounds_new) + assert not np.allclose(res_new.x, res_new.unbounded_sol[0]) + assert_allclose(res_old.x, res_new.x) + + def test_np_matrix(self): + # gh-10711 + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]]) + k = np.array([20, 15]) + lsq_linear(A, k) + + def test_dense_rank_deficient(self): + A = np.array([[-0.307, -0.184]]) + b = np.array([0.773]) + lb = [-0.1, -0.1] + ub = [0.1, 0.1] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, [-0.1, -0.1]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + A = np.array([ + [0.334, 0.668], + [-0.516, -1.032], + [0.192, 0.384], + ]) + b = np.array([-1.436, 0.135, 0.909]) + lb = [0, -1] + ub = [1, -0.5] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.optimality, 0, atol=1e-11) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + def test_full_result(self): + lb = np.array([0, -4]) + ub = np.array([1, 0]) + res = lsq_linear(A, b, (lb, ub), method=self.method) + + assert_allclose(res.x, [0.005236663400791, -4]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + r = A.dot(res.x) - b + assert_allclose(res.cost, 0.5 * np.dot(r, r)) + assert_allclose(res.fun, r) + + assert_allclose(res.optimality, 0.0, atol=1e-12) + assert_equal(res.active_mask, [0, -1]) + assert_(res.nit < 15) + assert_(res.status == 1 or res.status == 3) + assert_(isinstance(res.message, str)) + assert_(res.success) + + # This is a test for issue #9982. + def test_almost_singular(self): + A = np.array( + [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789], + [0.3742460132129041, 0.0130523214078376, 0.0130523214077873], + [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]]) + + b = np.array( + [0.0055029366538097, 0.0026677442422208, 0.0066612514782381]) + + result = lsq_linear(A, b, method=self.method) + assert_(result.cost < 1.1e-8) + + @pytest.mark.xslow + def test_large_rank_deficient(self): + np.random.seed(0) + n, m = np.sort(np.random.randint(2, 1000, size=2)) + m *= 2 # make m >> n + A = 1.0 * np.random.randint(-99, 99, size=[m, n]) + b = 1.0 * np.random.randint(-99, 99, size=[m]) + bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0) + bounds[1, :] += 1.0 # ensure up > lb + + # Make the A matrix strongly rank deficient by replicating some columns + w = np.random.choice(n, n) # Select random columns with duplicates + A = A[:, w] + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + def test_convergence_small_matrix(self): + A = np.array([[49.0, 41.0, -32.0], + [-19.0, -32.0, -8.0], + [-13.0, 10.0, 69.0]]) + b = np.array([-41.0, -90.0, 47.0]) + bounds = np.array([[31.0, -44.0, 26.0], + [54.0, -32.0, 28.0]]) + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + +class SparseMixin: + def test_sparse_and_LinearOperator(self): + m = 5000 + n = 1000 + rng = np.random.RandomState(0) + A = rand(m, n, random_state=rng) + b = rng.randn(m) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + A = aslinearoperator(A) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + @pytest.mark.fail_slow(10) + def test_sparse_bounds(self): + m = 5000 + n = 1000 + rng = np.random.RandomState(0) + A = rand(m, n, random_state=rng) + b = rng.randn(m) + lb = rng.randn(n) + ub = lb + 1 + res = lsq_linear(A, b, (lb, ub)) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13, + lsmr_maxiter=1500) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto') + assert_allclose(res.optimality, 0.0, atol=1e-6) + + def test_sparse_ill_conditioned(self): + # Sparse matrix with condition number of ~4 million + data = np.array([1., 1., 1., 1. + 1e-6, 1.]) + row = np.array([0, 0, 1, 2, 2]) + col = np.array([0, 2, 1, 0, 2]) + A = coo_matrix((data, (row, col)), shape=(3, 3)) + + # Get the exact solution + exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact') + + # Default lsmr arguments should not fully converge the solution + default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr') + with pytest.raises(AssertionError, match=""): + assert_allclose(exact_sol.x, default_lsmr_sol.x) + + # By increasing the maximum lsmr iters, it will converge + conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10) + assert_allclose(exact_sol.x, conv_lsmr.x) + + +class TestTRF(BaseMixin, SparseMixin): + method = 'trf' + lsq_solvers = ['exact', 'lsmr'] + + +class TestBVLS(BaseMixin): + method = 'bvls' + lsq_solvers = ['exact'] + + +class TestErrorChecking: + def test_option_lsmr_tol(self): + # Should work with a positive float, string equal to 'auto', or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto') + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None) + + # Should raise error with negative float, strings + # other than 'auto', and integers + err_message = "`lsmr_tol` must be None, 'auto', or positive float." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo') + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1) + + def test_option_lsmr_maxiter(self): + # Should work with positive integers or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None) + + # Should raise error with 0 or negative max iter + err_message = "`lsmr_maxiter` must be None or positive integer." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..165417fa1c0a9799056fd67a7218499c611a673e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py @@ -0,0 +1,459 @@ +""" +Unit test for Mixed Integer Linear Programming +""" +import re +import sys + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest + +from .test_linprog import magic_square +from scipy.optimize import milp, Bounds, LinearConstraint +from scipy import sparse + + +_IS_32BIT = (sys.maxsize < 2**32) + +def test_milp_iv(): + + message = "`c` must be a dense array" + with pytest.raises(ValueError, match=message): + milp(sparse.coo_array([0, 0])) + + message = "`c` must be a one-dimensional array of finite numbers with" + with pytest.raises(ValueError, match=message): + milp(np.zeros((3, 4))) + with pytest.raises(ValueError, match=message): + milp([]) + with pytest.raises(ValueError, match=message): + milp(None) + + message = "`bounds` must be convertible into an instance of..." + with pytest.raises(ValueError, match=message): + milp(1, bounds=10) + + message = "`constraints` (or each element within `constraints`) must be" + with pytest.raises(ValueError, match=re.escape(message)): + milp(1, constraints=10) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3])) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(2), constraints=([[1, 2]], [2], sparse.coo_array([2]))) + + message = "The shape of `A` must be (len(b_l), len(c))." + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2]], [2], [2])) + + message = "`integrality` must be a dense array" + with pytest.raises(ValueError, match=message): + milp([1, 2], integrality=sparse.coo_array([1, 2])) + + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 2]) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 5, 3]) + + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], sparse.coo_array([3, 4]))) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [4, 5])) + + message = "`bounds.lb` and `bounds.ub` must contain reals and..." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5])) + + +@pytest.mark.xfail(run=False, + reason="Needs to be fixed in `_highs_wrapper`") +def test_milp_options(capsys): + # run=False now because of gh-16347 + message = "Unrecognized options detected: {'ekki'}..." + options = {'ekki': True} + with pytest.warns(RuntimeWarning, match=message): + milp(1, options=options) + + A, b, c, numbers, M = magic_square(3) + options = {"disp": True, "presolve": False, "time_limit": 0.05} + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options=options) + + captured = capsys.readouterr() + assert "Presolve is switched off" in captured.out + assert "Time Limit Reached" in captured.out + assert not res.success + + +def test_result(): + A, b, c, numbers, M = magic_square(3) + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1) + assert res.status == 0 + assert res.success + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.message.startswith(msg) + assert isinstance(res.x, np.ndarray) + assert isinstance(res.fun, float) + assert isinstance(res.mip_node_count, int) + assert isinstance(res.mip_dual_bound, float) + assert isinstance(res.mip_gap, float) + + A, b, c, numbers, M = magic_square(6) + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options={'time_limit': 0.05}) + assert res.status == 1 + assert not res.success + msg = "Time limit reached. (HiGHS Status 13:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(1, bounds=(1, -1)) + assert res.status == 2 + assert not res.success + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(-1) + assert res.status == 3 + assert not res.success + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + +def test_milp_optional_args(): + # check that arguments other than `c` are indeed optional + res = milp(1) + assert res.fun == 0 + assert_array_equal(res.x, [0]) + + +def test_milp_1(): + # solve magic square problem + n = 3 + A, b, c, numbers, M = magic_square(n) + A = sparse.csc_array(A) # confirm that sparse arrays are accepted + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1) + + # check that solution is a magic square + x = np.round(res.x) + s = (numbers.flatten() * x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + +def test_milp_2(): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + # also check that `milp` accepts all valid ways of specifying constraints + c = -np.ones(2) + A = [[-2, 2], [-8, 10]] + b_l = [1, -np.inf] + b_u = [np.inf, 13] + linear_constraint = LinearConstraint(A, b_l, b_u) + + # solve original problem + res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True) + res2 = milp(c=c, constraints=linear_constraint, integrality=True) + res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True) + res4 = milp(c=c, constraints=[linear_constraint], integrality=True) + res5 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + (A[1:], b_l[1:], b_u[1:])]) + res6 = milp(c=c, integrality=True, + constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + res7 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x]) + funs = np.array([res1.fun, res2.fun, res3.fun, + res4.fun, res5.fun, res6.fun, res7.fun]) + np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape)) + np.testing.assert_allclose(funs, -3) + + # solve relaxed problem + res = milp(c=c, constraints=(A, b_l, b_u)) + np.testing.assert_allclose(res.x, [4, 4.5]) + np.testing.assert_allclose(res.fun, -8.5) + + +def test_milp_3(): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + c = [0, -1] + A = [[-1, 1], [3, 2], [2, 3]] + b_u = [1, 12, 12] + b_l = np.full_like(b_u, -np.inf, dtype=np.float64) + constraints = LinearConstraint(A, b_l, b_u) + + integrality = np.ones_like(c) + + # solve original problem + res = milp(c=c, constraints=constraints, integrality=integrality) + assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + # solve relaxed problem + res = milp(c=c, constraints=constraints) + assert_allclose(res.fun, -2.8) + assert_allclose(res.x, [1.8, 2.8]) + + +def test_milp_4(): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [8, 1] + integrality = [0, 1] + A = [[1, 2], [-4, -1], [2, 1]] + b_l = [-14, -np.inf, -np.inf] + b_u = [np.inf, -33, 20] + constraints = LinearConstraint(A, b_l, b_u) + bounds = Bounds(-np.inf, np.inf) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + assert_allclose(res.fun, 59) + assert_allclose(res.x, [6.5, 7]) + + +def test_milp_5(): + # solve MIP with inequality and equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [-3, -2, -1] + integrality = [0, 0, 1] + lb = [0, 0, 0] + ub = [np.inf, np.inf, 1] + bounds = Bounds(lb, ub) + A = [[1, 1, 1], [4, 2, 1]] + b_l = [-np.inf, 12] + b_u = [7, 12] + constraints = LinearConstraint(A, b_l, b_u) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + # there are multiple solutions + assert_allclose(res.fun, -12) + + +@pytest.mark.xslow +def test_milp_6(): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + integrality = 1 + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + +def test_infeasible_prob_16609(): + # Ensure presolve does not mark trivially infeasible problems + # as Optimal -- see gh-16609 + c = [1.0, 0.0] + integrality = [0, 1] + + lb = [0, -np.inf] + ub = [np.inf, np.inf] + bounds = Bounds(lb, ub) + + A_eq = [[0.0, 1.0]] + b_eq = [0.5] + constraints = LinearConstraint(A_eq, b_eq, b_eq) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + np.testing.assert_equal(res.status, 2) + + +_msg_time = "Time limit reached. (HiGHS Status 13:" +_msg_iter = "Iteration limit reached. (HiGHS Status 14:" + +@pytest.mark.thread_unsafe +# See https://github.com/scipy/scipy/pull/19255#issuecomment-1778438888 +@pytest.mark.xfail(reason="Often buggy, revisit with callbacks, gh-19255") +@pytest.mark.skipif(np.intp(0).itemsize < 8, + reason="Unhandled 32-bit GCC FP bug") +@pytest.mark.slow +@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 0.1}, _msg_time), + ({"node_limit": 1}, _msg_iter)]) +def test_milp_timeout_16545(options, msg): + # Ensure solution is not thrown away if MILP solver times out + # -- see gh-16545 + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(100, 100)) + b_lb = np.full(100, fill_value=-np.inf) + b_ub = np.full(100, fill_value=25) + constraints = LinearConstraint(A, b_lb, b_ub) + variable_lb = np.zeros(100) + variable_ub = np.ones(100) + variable_bounds = Bounds(variable_lb, variable_ub) + integrality = np.ones(100) + c_vector = -np.ones(100) + res = milp( + c_vector, + integrality=integrality, + bounds=variable_bounds, + constraints=constraints, + options=options, + ) + + assert res.message.startswith(msg) + assert res["x"] is not None + + # ensure solution is feasible + x = res["x"] + tol = 1e-8 # sometimes needed due to finite numerical precision + assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol) + assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol) + assert np.allclose(x, np.round(x)) + + +def test_three_constraints_16878(): + # `milp` failed when exactly three constraints were passed + # Ensure that this is no longer the case. + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(6, 6)) + bl = np.full(6, fill_value=-np.inf) + bu = np.full(6, fill_value=10) + constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]), + LinearConstraint(A[2:4], bl[2:4], bu[2:4]), + LinearConstraint(A[4:], bl[4:], bu[4:])] + constraints2 = [(A[:2], bl[:2], bu[:2]), + (A[2:4], bl[2:4], bu[2:4]), + (A[4:], bl[4:], bu[4:])] + lb = np.zeros(6) + ub = np.ones(6) + variable_bounds = Bounds(lb, ub) + c = -np.ones(6) + res1 = milp(c, bounds=variable_bounds, constraints=constraints) + res2 = milp(c, bounds=variable_bounds, constraints=constraints2) + ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu)) + assert res1.success and res2.success + assert_allclose(res1.x, ref.x) + assert_allclose(res2.x, ref.x) + + +@pytest.mark.xslow +def test_mip_rel_gap_passdown(): + # Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases + # Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown + # MIP taken from test_mip_6 above + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + mip_rel_gaps = [0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq), + integrality=True, options={"mip_rel_gap": mip_rel_gap}) + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert res.mip_gap <= mip_rel_gap + # check that `res.mip_gap` is as defined in the documentation + assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun + sol_mip_gaps.append(res.mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. + assert np.all(np.diff(sol_mip_gaps) < 0) + +@pytest.mark.xfail(reason='Upstream / Wrapper issue, see gh-20116') +def test_large_numbers_gh20116(): + h = 10 ** 12 + A = np.array([[100.4534, h], [100.4534, -h]]) + b = np.array([h, 0]) + constraints = LinearConstraint(A=A, ub=b) + bounds = Bounds([0, 0], [1, 1]) + c = np.array([0, 0]) + res = milp(c=c, constraints=constraints, bounds=bounds, integrality=1) + assert res.status == 0 + assert np.all(A @ res.x < b) + + +def test_presolve_gh18907(): + from scipy.optimize import milp + import numpy as np + inf = np.inf + + # set up problem + c = np.array([-0.85850509, -0.82892676, -0.80026454, -0.63015535, -0.5099006, + -0.50077193, -0.4894404, -0.47285865, -0.39867774, -0.38069646, + -0.36733012, -0.36733012, -0.35820411, -0.31576141, -0.20626091, + -0.12466144, -0.10679516, -0.1061887, -0.1061887, -0.1061887, + -0., -0., -0., -0., 0., 0., 0., 0.]) + + A = np.array([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., -25., -0., -0., -0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + -1., 0., 0., 0., 0., 0., -1., 0., 0., 0., 2., 0., 0., 0.], + [0., 0., 0., 0., 1., 1., 1., 1., 0., 1., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., -0., -25., -0., -0.], + [0., 0., 0., 0., -1., -1., -1., -1., 0., -1., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 2., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 1., 1., 1., 0., 0., 0., 0., -0., -0., -25., -0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., -1., -1., -1., 0., 0., 0., 0., 0., 0., 2., 0.], + [1., 1., 1., 1., 0., 0., 0., 0., 1., 0., 1., 1., 1., 1., 0., + 1., 1., 0., 0., 0., 0., 1., 1., 1., -0., -0., -0., -25.], + [-1., -1., -1., -1., 0., 0., 0., 0., -1., 0., -1., -1., -1., -1., + 0., -1., -1., 0., 0., 0., 0., -1., -1., -1., 0., 0., 0., 2.]]) + bl = np.array([-inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf]) + bu = np.array([100., 0., 0., 0., 0., 0., 0., 0., 0.]) + constraints = LinearConstraint(A, bl, bu) + integrality = 1 + bounds = (0, 1) + r1 = milp(c=c, constraints=constraints, integrality=integrality, bounds=bounds, + options={'presolve': True}) + r2 = milp(c=c, constraints=constraints, integrality=integrality, bounds=bounds, + options={'presolve': False}) + assert r1.status == r2.status + assert_allclose(r1.x, r2.x) + + # another example from the same issue + bounds = Bounds(lb=0, ub=1) + integrality = [1, 1, 0, 0] + c = [10, 9.52380952, -1000, -952.38095238] + A = [[1, 1, 0, 0], [0, 0, 1, 1], [200, 0, 0, 0], [0, 200, 0, 0], + [0, 0, 2000, 0], [0, 0, 0, 2000], [-1, 0, 1, 0], [-1, -1, 0, 1]] + ub = [1, 1, 200, 200, 1000, 1000, 0, 0] + constraints = LinearConstraint(A, ub=ub) + r1 = milp(c=c, constraints=constraints, bounds=bounds, + integrality=integrality, options={"presolve": False}) + r2 = milp(c=c, constraints=constraints, bounds=bounds, + integrality=integrality, options={"presolve": False}) + assert r1.status == r2.status + assert_allclose(r1.x, r2.x) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py new file mode 100644 index 0000000000000000000000000000000000000000..cda21fd1dc2b24b128e47405e01353fdae41a75c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py @@ -0,0 +1,845 @@ +import numpy as np +import pytest +from scipy.linalg import block_diag +from scipy.sparse import csc_matrix +from numpy.testing import (assert_array_almost_equal, + assert_array_less, assert_, + suppress_warnings) +from scipy.optimize import (NonlinearConstraint, + LinearConstraint, + Bounds, + minimize, + BFGS, + SR1, + rosen) + + +class Maratos: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[2*x[0], 2*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosTestArgs: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.a = a + self.b = b + self.bounds = None + + def _test_args(self, a, b): + if self.a != a or self.b != b: + raise ValueError() + + def fun(self, x, a, b): + self._test_args(a, b) + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x, a, b): + self._test_args(a, b) + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x, a, b): + self._test_args(a, b) + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosGradInFunc: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return (2*(x[0]**2 + x[1]**2 - 1) - x[0], + np.array([4*x[0]-1, 4*x[1]])) + + @property + def grad(self): + return True + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class HyperbolicIneq: + """Problem 15.1 from Nocedal and Wright + + The following optimization problem: + minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + Subject to: 1/(x[0] + 1) - x[1] >= 1/4 + x[0] >= 0 + x[1] >= 0 + """ + def __init__(self, constr_jac=None, constr_hess=None): + self.x0 = [0, 0] + self.x_opt = [1.952823, 0.088659] + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = Bounds(0, np.inf) + + def fun(self, x): + return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + + def grad(self, x): + return [x[0] - 2, x[1] - 1/2] + + def hess(self, x): + return np.eye(2) + + @property + def constr(self): + def fun(x): + return 1/(x[0] + 1) - x[1] + + if self.constr_jac is None: + def jac(x): + return [[-1/(x[0] + 1)**2, -1]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0], + [0, 0]]) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 0.25, np.inf, jac, hess) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + self.bounds = None + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + @property + def constr(self): + return () + + +class IneqRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.5022, 0.2489] + self.bounds = None + + @property + def constr(self): + A = [[1, 2]] + b = 1 + return LinearConstraint(A, -np.inf, b) + + +class BoundedRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: -2 <= x[0] <= 0 + 0 <= x[1] <= 2 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-0.2, 0.2] + self.x_opt = None + self.bounds = Bounds([-2, 0], [0, 2]) + + +class EqIneqRosenbrock(Rosenbrock): + """Rosenbrock subject to equality and inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + 2 x[0] + x[1] = 1 + + Taken from matlab ``fimincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.41494, 0.17011] + self.bounds = None + + @property + def constr(self): + A_ineq = [[1, 2]] + b_ineq = 1 + A_eq = [[2, 1]] + b_eq = 1 + return (LinearConstraint(A_ineq, -np.inf, b_ineq), + LinearConstraint(A_eq, b_eq, b_eq)) + + +class Elec: + """Distribution of electrons on a sphere. + + Problem no 2 from COPS collection [2]_. Find + the equilibrium state distribution (of minimal + potential) of the electrons positioned on a + conducting sphere. + + References + ---------- + .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson, + "Benchmarking optimization software with COPS 3.0.", + Argonne National Lab., Argonne, IL (US), 2004. + """ + def __init__(self, n_electrons=200, random_state=0, + constr_jac=None, constr_hess=None): + self.n_electrons = n_electrons + self.rng = np.random.RandomState(random_state) + # Initial Guess + phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons) + theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons) + x = np.cos(theta) * np.cos(phi) + y = np.cos(theta) * np.sin(phi) + z = np.sin(theta) + self.x0 = np.hstack((x, y, z)) + self.x_opt = None + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def _get_cordinates(self, x): + x_coord = x[:self.n_electrons] + y_coord = x[self.n_electrons:2 * self.n_electrons] + z_coord = x[2 * self.n_electrons:] + return x_coord, y_coord, z_coord + + def _compute_coordinate_deltas(self, x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + dx = x_coord[:, None] - x_coord + dy = y_coord[:, None] - y_coord + dz = z_coord[:, None] - z_coord + return dx, dy, dz + + def fun(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + with np.errstate(divide='ignore'): + dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 + dm1[np.diag_indices_from(dm1)] = 0 + return 0.5 * np.sum(dm1) + + def grad(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + + with np.errstate(divide='ignore'): + dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 + dm3[np.diag_indices_from(dm3)] = 0 + + grad_x = -np.sum(dx * dm3, axis=1) + grad_y = -np.sum(dy * dm3, axis=1) + grad_z = -np.sum(dz * dm3, axis=1) + + return np.hstack((grad_x, grad_y, grad_z)) + + def hess(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + d = (dx**2 + dy**2 + dz**2) ** 0.5 + + with np.errstate(divide='ignore'): + dm3 = d ** -3 + dm5 = d ** -5 + + i = np.arange(self.n_electrons) + dm3[i, i] = 0 + dm5[i, i] = 0 + + Hxx = dm3 - 3 * dx**2 * dm5 + Hxx[i, i] = -np.sum(Hxx, axis=1) + + Hxy = -3 * dx * dy * dm5 + Hxy[i, i] = -np.sum(Hxy, axis=1) + + Hxz = -3 * dx * dz * dm5 + Hxz[i, i] = -np.sum(Hxz, axis=1) + + Hyy = dm3 - 3 * dy**2 * dm5 + Hyy[i, i] = -np.sum(Hyy, axis=1) + + Hyz = -3 * dy * dz * dm5 + Hyz[i, i] = -np.sum(Hyz, axis=1) + + Hzz = dm3 - 3 * dz**2 * dm5 + Hzz[i, i] = -np.sum(Hzz, axis=1) + + H = np.vstack(( + np.hstack((Hxx, Hxy, Hxz)), + np.hstack((Hxy, Hyy, Hyz)), + np.hstack((Hxz, Hyz, Hzz)) + )) + + return H + + @property + def constr(self): + def fun(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + return x_coord**2 + y_coord**2 + z_coord**2 - 1 + + if self.constr_jac is None: + def jac(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + Jx = 2 * np.diag(x_coord) + Jy = 2 * np.diag(y_coord) + Jz = 2 * np.diag(z_coord) + return csc_matrix(np.hstack((Jx, Jy, Jz))) + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + D = 2 * np.diag(v) + return block_diag(D, D, D) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, -np.inf, 0, jac, hess) + + +class TestTrustRegionConstr: + list_of_problems = [Maratos(), + Maratos(constr_hess='2-point'), + Maratos(constr_hess=SR1()), + Maratos(constr_jac='2-point', constr_hess=SR1()), + MaratosGradInFunc(), + HyperbolicIneq(), + HyperbolicIneq(constr_hess='3-point'), + HyperbolicIneq(constr_hess=BFGS()), + HyperbolicIneq(constr_jac='3-point', + constr_hess=BFGS()), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + BoundedRosenbrock(), + Elec(n_electrons=2), + Elec(n_electrons=2, constr_hess='2-point'), + Elec(n_electrons=2, constr_hess=SR1()), + Elec(n_electrons=2, constr_jac='3-point', + constr_hess=SR1())] + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize('prob', list_of_problems) + @pytest.mark.parametrize('grad', ('prob.grad', '3-point', False)) + @pytest.mark.parametrize('hess', ("prob.hess", '3-point', SR1(), + BFGS(exception_strategy='damp_update'), + BFGS(exception_strategy='skip_update'))) + def test_list_of_problems(self, prob, grad, hess): + grad = prob.grad if grad == "prob.grad" else grad + hess = prob.hess if hess == "prob.hess" else hess + # Remove exceptions + if (grad in {'2-point', '3-point', 'cs', False} and + hess in {'2-point', '3-point', 'cs'}): + pytest.skip("Numerical Hessian needs analytical gradient") + if prob.grad is True and grad in {'3-point', False}: + pytest.skip("prob.grad incompatible with grad in {'3-point', False}") + sensitive = (isinstance(prob, BoundedRosenbrock) and grad == '3-point' + and isinstance(hess, BFGS)) + if sensitive: + pytest.xfail("Seems sensitive to initial conditions w/ Accelerate") + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=grad, hess=hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, + decimal=5) + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + + # check for max iter + message = f"Invalid termination condition: {result.status}." + assert result.status not in {0, 3}, message + + + def test_default_jac_and_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_default_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', + jac='2-point') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_no_constraints(self): + prob = Rosenbrock() + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hess=prob.hess) + result1 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='2-point') + + result2 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='3-point') + assert_array_almost_equal(result.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) + + def test_hessp(self): + prob = Maratos() + + def hessp(x, p): + H = prob.hess(x) + return H.dot(p) + + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hessp=hessp, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_args(self): + prob = MaratosTestArgs("a", 234) + + result = minimize(prob.fun, prob.x0, ("a", 234), + method='trust-constr', + jac=prob.grad, hess=prob.hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_raise_exception(self): + prob = Maratos() + message = "Whenever the gradient is estimated via finite-differences" + with pytest.raises(ValueError, match=message): + minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point', + hess='2-point', constraints=prob.constr) + + def test_issue_9044(self): + # https://github.com/scipy/scipy/issues/9044 + # Test the returned `OptimizeResult` contains keys consistent with + # other solvers. + + def callback(x, info): + assert_('nit' in info) + assert_('niter' in info) + + result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, + hess=lambda x: 2, callback=callback, + method='trust-constr') + assert_(result.get('success')) + assert_(result.get('nit', -1) == 1) + + # Also check existence of the 'niter' attribute, for backward + # compatibility + assert_(result.get('niter', -1) == 1) + + def test_issue_15093(self): + # scipy docs define bounds as inclusive, so it shouldn't be + # an issue to set x0 on the bounds even if keep_feasible is + # True. Previously, trust-constr would treat bounds as + # exclusive. + + x0 = np.array([0., 0.5]) + + def obj(x): + x1 = x[0] + x2 = x[1] + return x1 ** 2 + x2 ** 2 + + bounds = Bounds(np.array([0., 0.]), np.array([1., 1.]), + keep_feasible=True) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize( + method='trust-constr', + fun=obj, + x0=x0, + bounds=bounds) + + assert result['success'] + +class TestEmptyConstraint: + """ + Here we minimize x^2+y^2 subject to x^2-y^2>1. + The actual minimum is at (0, 0) which fails the constraint. + Therefore we will find a minimum on the boundary at (+/-1, 0). + + When minimizing on the boundary, optimize uses a set of + constraints that removes the constraint that sets that + boundary. In our case, there's only one constraint, so + the result is an empty constraint. + + This tests that the empty constraint works. + """ + def test_empty_constraint(self): + + def function(x): + return x[0]**2 + x[1]**2 + + def functionjacobian(x): + return np.array([2.*x[0], 2.*x[1]]) + + def functionhvp(x, v): + return 2.*v + + def constraint(x): + return np.array([x[0]**2 - x[1]**2]) + + def constraintjacobian(x): + return np.array([[2*x[0], -2*x[1]]]) + + def constraintlcoh(x, v): + return np.array([[2., 0.], [0., -2.]]) * v[0] + + constraint = NonlinearConstraint(constraint, 1., np.inf, + constraintjacobian, constraintlcoh) + + startpoint = [1., 2.] + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + + result = minimize( + function, + startpoint, + method='trust-constr', + jac=functionjacobian, + hessp=functionhvp, + constraints=[constraint], + bounds=bounds, + ) + + assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4) + + +def test_bug_11886(): + def opt(x): + return x[0]**2+x[1]**2 + + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix(np.diag([1, 1])) + lin_cons = LinearConstraint(A, -1, np.inf) + # just checking that there are no errors + minimize(opt, 2*[1], constraints = lin_cons) + + +def test_gh11649(): + # trust - constr error when attempting to keep bound constrained solutions + # feasible. Algorithm attempts to go outside bounds when evaluating finite + # differences. (don't give objective an analytic gradient) + bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True) + + def assert_inbounds(x): + assert np.all(x >= bnds.lb) + assert np.all(x <= bnds.ub) + + def obj(x): + assert_inbounds(x) + return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1) + + def nce(x): + assert_inbounds(x) + return x[0]**2 + x[1] + + def nce_jac(x): + return np.array([2*x[0], 1]) + + def nci(x): + assert_inbounds(x) + return x[0]*x[1] + + x0 = np.array((0.99, -0.99)) + nlcs = [NonlinearConstraint(nci, -10, np.inf), + NonlinearConstraint(nce, 1, 1, jac=nce_jac)] + + res = minimize(fun=obj, x0=x0, method='trust-constr', + bounds=bnds, constraints=nlcs) + assert_inbounds(res.x) + assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub + + +def test_gh20665_too_many_constraints(): + # gh-20665 reports a confusing error message when there are more equality + # constraints than variables. Check that the error message is improved. + message = "...more equality constraints than independent variables..." + with pytest.raises(ValueError, match=message): + x0 = np.ones((2,)) + A_eq, b_eq = np.arange(6).reshape((3, 2)), np.ones((3,)) + g = NonlinearConstraint(lambda x: A_eq @ x, lb=b_eq, ub=b_eq) + minimize(rosen, x0, method='trust-constr', constraints=[g]) + # no error with `SVDFactorization` + with np.testing.suppress_warnings() as sup: + sup.filter(UserWarning) + minimize(rosen, x0, method='trust-constr', constraints=[g], + options={'factorization_method': 'SVDFactorization'}) + +def test_issue_18882(): + def lsf(u): + u1, u2 = u + a, b = [3.0, 4.0] + return 1.0 + u1**2 / a**2 - u2**2 / b**2 + + def of(u): + return np.sum(u**2) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + sup.filter(UserWarning, "Singular Jacobian matrix.") + res = minimize( + of, + [0.0, 0.0], + method="trust-constr", + constraints=NonlinearConstraint(lsf, 0, 0), + ) + assert (not res.success) and (res.constr_violation > 1e-8) + +class TestBoundedNelderMead: + + @pytest.mark.parametrize('bounds, x_opt', + [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt), + (Bounds(-np.inf, -0.8), [-0.8, -0.8]), + (Bounds(3.0, np.inf), [3.0, 9.0]), + (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]), + ]) + def test_rosen_brock_with_bounds(self, bounds, x_opt): + prob = Rosenbrock() + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, -10], + method='Nelder-Mead', + bounds=bounds) + assert np.less_equal(bounds.lb, result.x).all() + assert np.less_equal(result.x, bounds.ub).all() + assert np.allclose(prob.fun(result.x), result.fun) + assert np.allclose(result.x, x_opt, atol=1.e-3) + + def test_equal_all_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 5.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 5.0]) + + def test_equal_one_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 20.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 16.0]) + + def test_invalid_bounds(self): + prob = Rosenbrock() + message = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, -5.0]) + minimize(prob.fun, [-10, 3], + method='Nelder-Mead', + bounds=bounds) + + @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, " + "see gh-13846") + def test_outside_bounds_warning(self): + prob = Rosenbrock() + message = "Initial guess is not within the specified bounds" + with pytest.warns(UserWarning, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, 5.0]) + minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..ef107c5692c2d30a06b6943542696b95bc21e818 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py @@ -0,0 +1,1194 @@ +""" +Unit tests for optimization routines from minpack.py. +""" +import warnings +import pytest +import threading + +from numpy.testing import (assert_, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose, + assert_warns, suppress_warnings) +from pytest import raises as assert_raises +import numpy as np +from numpy import array, float64 +from multiprocessing.pool import ThreadPool + +from scipy import optimize, linalg +from scipy.special import lambertw +from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point +from scipy.optimize import OptimizeWarning +from scipy.optimize._minimize import Bounds + + +class ReturnShape: + """This class exists to create a callable that does not have a '__name__' attribute. + + __init__ takes the argument 'shape', which should be a tuple of ints. + When an instance is called with a single argument 'x', it returns numpy.ones(shape). + """ + + def __init__(self, shape): + self.shape = shape + + def __call__(self, x): + return np.ones(self.shape) + + +def dummy_func(x, shape): + """A function that returns an array of ones of the given shape. + `x` is ignored. + """ + return np.ones(shape) + + +def sequence_parallel(fs): + with ThreadPool(len(fs)) as pool: + return pool.map(lambda f: f(), fs) + + +# Function and Jacobian for tests of solvers for systems of nonlinear +# equations + + +def pressure_network(flow_rates, Qtot, k): + """Evaluate non-linear equation system representing + the pressures and flows in a system of n parallel pipes:: + + f_i = P_i - P_0, for i = 1..n + f_0 = sum(Q_i) - Qtot + + where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. + Pressure is modeled as a P=kQ**2 where k is a valve coefficient and + Q is the flow rate. + + Parameters + ---------- + flow_rates : float + A 1-D array of n flow rates [kg/s]. + k : float + A 1-D array of n valve coefficients [1/kg m]. + Qtot : float + A scalar, the total input flow rate [kg/s]. + + Returns + ------- + F : float + A 1-D array, F[i] == f_i. + + """ + P = k * flow_rates**2 + F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) + return F + + +def pressure_network_jacobian(flow_rates, Qtot, k): + """Return the jacobian of the equation system F(flow_rates) + computed by `pressure_network` with respect to + *flow_rates*. See `pressure_network` for the detailed + description of parameters. + + Returns + ------- + jac : float + *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` + and *f_i* and *Q_i* are described in the doc for `pressure_network` + """ + n = len(flow_rates) + pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) + + jac = np.empty((n, n)) + jac[:n-1, :n-1] = pdiff * 0 + jac[:n-1, n-1] = 0 + jac[n-1, :] = np.ones(n) + + return jac + + +def pressure_network_fun_and_grad(flow_rates, Qtot, k): + return (pressure_network(flow_rates, Qtot, k), + pressure_network_jacobian(flow_rates, Qtot, k)) + + +class TestFSolve: + def test_pressure_network_no_gradient(self): + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_pressure_network_with_gradient(self): + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=pressure_network_jacobian) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_fprime_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_wrong_shape_fprime_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0], fprime=deriv_func) + + def test_float32(self): + def func(x): + return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2 + p = optimize.fsolve(func, np.array([1, 1], np.float32)) + assert_allclose(func(p), [0, 0], atol=1e-3) + + def test_reentrant_func(self): + def func(*args): + self.test_pressure_network_no_gradient() + return pressure_network(*args) + + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + func, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_reentrant_Dfunc(self): + def deriv_func(*args): + self.test_pressure_network_with_gradient() + return pressure_network_jacobian(*args) + + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=deriv_func) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_pressure_network_no_gradient] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_pressure_network_with_gradient] * 10) + assert all([result is None for result in v]) + + +class TestRootHybr: + def test_pressure_network_no_gradient(self): + # root/hybr without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='hybr', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient(self): + # root/hybr with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([[2., 0., 2., 0.]]) + final_flows = optimize.root(pressure_network, initial_guess, + args=(Qtot, k), method='hybr', + jac=pressure_network_jacobian).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient_combined(self): + # root/hybr with gradient and function combined, equal pipes -> equal + # flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network_fun_and_grad, + initial_guess, args=(Qtot, k), + method='hybr', jac=True).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestRootLM: + def test_pressure_network_no_gradient(self): + # root/lm without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='lm', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestNfev: + def setup_method(self): + self.nfev = threading.local() + + def zero_f(self, y): + if not hasattr(self.nfev, 'c'): + self.nfev.c = 0 + self.nfev.c += 1 + return y**2-3 + + @pytest.mark.parametrize('method', ['hybr', 'lm', 'broyden1', + 'broyden2', 'anderson', + 'linearmixing', 'diagbroyden', + 'excitingmixing', 'krylov', + 'df-sane']) + def test_root_nfev(self, method): + self.nfev.c = 0 + solution = optimize.root(self.zero_f, 100, method=method) + assert solution.nfev == self.nfev.c + + def test_fsolve_nfev(self): + self.nfev.c = 0 + x, info, ier, mesg = optimize.fsolve(self.zero_f, 100, full_output=True) + assert info['nfev'] == self.nfev.c + + +class TestLeastSq: + def setup_method(self): + x = np.linspace(0, 10, 40) + a,b,c = 3.1, 42, -304.2 + self.x = x + self.abc = a,b,c + y_true = a*x**2 + b*x + c + np.random.seed(0) + self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) + + def residuals(self, p, y, x): + a,b,c = p + err = y-(a*x**2 + b*x + c) + return err + + def residuals_jacobian(self, _p, _y, x): + return -np.vstack([x**2, x, np.ones_like(x)]).T + + def test_basic(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_basic_with_gradient(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=self.residuals_jacobian) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_full_output(self): + p0 = array([[0,0,0]]) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), f'solution not found: {mesg}') + + def test_input_untouched(self): + p0 = array([0,0,0],dtype=float64) + p0_copy = array(p0, copy=True) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), f'solution not found: {mesg}') + assert_array_equal(p0, p0_copy) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_Dfun_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_wrong_shape_Dfun_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_float32(self): + # Regression test for gh-1447 + def func(p,x,y): + q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3] + return q - y + + x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286, + 1.231], dtype=np.float32) + y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258, + 0.034,0.0396], dtype=np.float32) + p0 = np.array([1.0,1.0,1.0,1.0]) + p1, success = optimize.leastsq(func, p0, args=(x,y)) + + assert_(success in [1,2,3,4]) + assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum()) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0], Dfun=deriv_func) + + def test_reentrant_func(self): + def func(*args): + self.test_basic() + return self.residuals(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(func, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_reentrant_Dfun(self): + def deriv_func(*args): + self.test_basic() + return self.residuals_jacobian(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=deriv_func) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_basic] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_basic_with_gradient] * 10) + assert all([result is None for result in v]) + + def test_func_input_output_length_check(self): + + def func(x): + return 2 * (x[0] - 3) ** 2 + 1 + + with assert_raises(TypeError, + match='Improper input: func input vector length N='): + optimize.leastsq(func, x0=[0, 1]) + + +class TestCurveFit: + def setup_method(self): + self.y = array([1.0, 3.2, 9.5, 13.7]) + self.x = array([1.0, 2.0, 3.0, 4.0]) + + def test_one_argument(self): + def func(x,a): + return x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 1) + assert_(pcov.shape == (1,1)) + assert_almost_equal(popt[0], 1.9149, decimal=4) + assert_almost_equal(pcov[0,0], 0.0016, decimal=4) + + # Test if we get the same with full_output. Regression test for #1415. + # Also test if check_finite can be turned off. + res = curve_fit(func, self.x, self.y, + full_output=1, check_finite=False) + (popt2, pcov2, infodict, errmsg, ier) = res + assert_array_almost_equal(popt, popt2) + + def test_two_argument(self): + def func(x, a, b): + return b*x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 2) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_func_is_classmethod(self): + class test_self: + """This class tests if curve_fit passes the correct number of + arguments when the model function is a class instance method. + """ + + def func(self, x, a, b): + return b * x**a + + test_self_inst = test_self() + popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_regression_2639(self): + # This test fails if epsfcn in leastsq is too large. + x = [574.14200000000005, 574.154, 574.16499999999996, + 574.17700000000002, 574.18799999999999, 574.19899999999996, + 574.21100000000001, 574.22199999999998, 574.23400000000004, + 574.245] + y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, + 1550.0, 949.0, 841.0] + guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, + 0.0035019999999983615, 859.0] + good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03, + 1.0068462e-02, 8.57450661e+02] + + def f_double_gauss(x, x0, x1, A0, A1, sigma, c): + return (A0*np.exp(-(x-x0)**2/(2.*sigma**2)) + + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c) + popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000) + assert_allclose(popt, good, rtol=1e-5) + + def test_pcov(self): + xdata = np.array([0, 1, 2, 3, 4, 5]) + ydata = np.array([1, 1, 5, 7, 8, 12]) + sigma = np.array([1, 2, 1, 2, 1, 2]) + + def f(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3) + + # infinite variances + + def f_flat(x, a, b): + return a*x + + pcov_expected = np.array([np.inf]*4).reshape(2, 2) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Covariance of the parameters could not be estimated") + popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma) + popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0]) + + assert_(pcov.shape == (2, 2)) + assert_array_equal(pcov, pcov_expected) + + assert_(pcov1.shape == (2, 2)) + assert_array_equal(pcov1, pcov_expected) + + def test_array_like(self): + # Test sequence input. Regression test for gh-3037. + def f_linear(x, a, b): + return a*x + b + + x = [1, 2, 3, 4] + y = [3, 5, 7, 9] + assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10) + + @pytest.mark.thread_unsafe + def test_indeterminate_covariance(self): + # Test that a warning is returned when pcov is indeterminate + xdata = np.array([1, 2, 3, 4, 5, 6]) + ydata = np.array([1, 2, 3, 4, 5.5, 6]) + assert_warns(OptimizeWarning, curve_fit, + lambda x, a, b: a*x, xdata, ydata) + + def test_NaN_handling(self): + # Test for correct handling of NaNs in input data: gh-3422 + + # create input with NaNs + xdata = np.array([1, np.nan, 3]) + ydata = np.array([1, 2, 3]) + + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, xdata, ydata) + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, ydata, xdata) + + assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, + xdata, ydata, **{"check_finite": True}) + + @staticmethod + def _check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method): + kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan, + 'method': method, 'check_finite': False} + # propagate test + error_msg = ("`nan_policy='propagate'` is not supported " + "by this function.") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="propagate", maxfev=2000) + + # raise test + with assert_raises(ValueError, match="The input contains nan"): + curve_fit(**kwargs, nan_policy="raise") + + # omit test + result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit") + kwargs['xdata'] = xdata_without_nan + kwargs['ydata'] = ydata_without_nan + result_without_nan, _ = curve_fit(**kwargs) + assert_allclose(result_with_nan, result_without_nan) + + # not valid policy test + # check for argument names in any order + error_msg = (r"nan_policy must be one of \{(?:'raise'|'omit'|None)" + r"(?:, ?(?:'raise'|'omit'|None))*\}") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="hi") + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_1d(self, method): + def f(x, a, b): + return a*x + b + + xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7]) + xdata_without_nan = np.array([2, 3, 4]) + ydata_without_nan = np.array([1, 2, 3]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2d(self, method): + def f(x, a, b): + x1 = x[0, :] + x2 = x[1, :] + return a*x1 + b + x2 + + xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('n', [2, 3]) + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2_3d(self, n, method): + def f(x, a, b): + x1 = x[..., 0, :].squeeze() + x2 = x[..., 1, :].squeeze() + return a*x1 + b + x2 + + xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]]) + xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + def test_empty_inputs(self): + # Test both with and without bounds (regression test for gh-9864) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [], + bounds=(1, 2)) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [], + bounds=(1, 2)) + + def test_function_zero_params(self): + # Fit args is zero, so "Unable to determine number of fit parameters." + assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4]) + + def test_None_x(self): # Added in GH10196 + popt, pcov = curve_fit(lambda _, a: a * np.arange(10), + None, 2 * np.arange(10)) + assert_allclose(popt, [2.]) + + def test_method_argument(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov = curve_fit(f, xdata, ydata, method=method) + assert_allclose(popt, [2., 2.]) + + assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown') + + def test_full_output(self): + def f(x, a, b): + return a * np.exp(-b * x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov, infodict, errmsg, ier = curve_fit( + f, xdata, ydata, method=method, full_output=True) + assert_allclose(popt, [2., 2.]) + assert "nfev" in infodict + assert "fvec" in infodict + if method == 'lm' or method is None: + assert "fjac" in infodict + assert "ipvt" in infodict + assert "qtf" in infodict + assert isinstance(errmsg, str) + assert ier in (1, 2, 3, 4) + + def test_bounds(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # The minimum w/out bounds is at [2., 2.], + # and with bounds it's at [1.5, smth]. + lb = [1., 0] + ub = [1.5, 3.] + + # Test that both variants of the bounds yield the same result + bounds = (lb, ub) + bounds_class = Bounds(lb, ub) + for method in [None, 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, + method=method) + assert_allclose(popt[0], 1.5) + + popt_class, pcov_class = curve_fit(f, xdata, ydata, + bounds=bounds_class, + method=method) + assert_allclose(popt_class, popt) + + # With bounds, the starting estimate is feasible. + popt, pcov = curve_fit(f, xdata, ydata, method='trf', + bounds=([0., 0], [0.6, np.inf])) + assert_allclose(popt[0], 0.6) + + # method='lm' doesn't support bounds. + assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, + method='lm') + + def test_bounds_p0(self): + # This test is for issue #5719. The problem was that an initial guess + # was ignored when 'trf' or 'dogbox' methods were invoked. + def f(x, a): + return np.sin(x + a) + + xdata = np.linspace(-2*np.pi, 2*np.pi, 40) + ydata = np.sin(xdata) + bounds = (-3 * np.pi, 3 * np.pi) + for method in ['trf', 'dogbox']: + popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi) + popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi, + bounds=bounds, method=method) + + # If the initial guess is ignored, then popt_2 would be close 0. + assert_allclose(popt_1, popt_2) + + def test_jac(self): + # Test that Jacobian callable is handled correctly and + # weighted if sigma is provided. + def f(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # Test numerical options for least_squares backend. + for method in ['trf', 'dogbox']: + for scheme in ['2-point', '3-point', 'cs']: + popt, pcov = curve_fit(f, xdata, ydata, jac=scheme, + method=method) + assert_allclose(popt, [2, 2]) + + # Test the analytic option. + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac) + assert_allclose(popt, [2, 2]) + + # Now add an outlier and provide sigma. + ydata[5] = 100 + sigma = np.ones(xdata.shape[0]) + sigma[5] = 200 + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method, + jac=jac) + # Still the optimization process is influenced somehow, + # have to set rtol=1e-3. + assert_allclose(popt, [2, 2], rtol=1e-3) + + def test_maxfev_and_bounds(self): + # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq) + # but with bounds, the parameter is `max_nfev` (via least_squares) + x = np.arange(0, 10) + y = 2*x + popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100) + popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100) + + assert_allclose(popt1, 2, atol=1e-14) + assert_allclose(popt2, 2, atol=1e-14) + + @pytest.mark.parametrize("sigma_dim", [0, 1, 2]) + def test_curvefit_omitnan(self, sigma_dim): + def exponential(x, a, b): + return b * np.exp(a * x) + + rng = np.random.default_rng(578285731148908) + N = 100 + x = np.linspace(1, 10, N) + y = exponential(x, 0.2, 0.5) + + if (sigma_dim == 0): + sigma = 0.05 + y += rng.normal(0, sigma, N) + + elif (sigma_dim == 1): + sigma = x * 0.05 + y += rng.normal(0, sigma, N) + + elif (sigma_dim == 2): + # The covariance matrix must be symmetric positive-semidefinite + a = rng.normal(0, 2, (N, N)) + sigma = a @ a.T + y += rng.multivariate_normal(np.zeros_like(x), sigma) + else: + assert False, "The sigma must be a scalar, 1D array or 2D array." + + p0 = [0.1, 1.0] + + # Choose indices to place NaNs. + i_x = rng.integers(N, size=5) + i_y = rng.integers(N, size=5) + + # Add NaNs and compute result using `curve_fit` + x[i_x] = np.nan + y[i_y] = np.nan + res_opt, res_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma, + nan_policy="omit") + + # Manually remove elements that should be eliminated, and + # calculate reference using `curve_fit` + i_delete = np.unique(np.concatenate((i_x, i_y))) + x = np.delete(x, i_delete, axis=0) + y = np.delete(y, i_delete, axis=0) + + sigma = np.asarray(sigma) + if sigma.ndim == 1: + sigma = np.delete(sigma, i_delete) + elif sigma.ndim == 2: + sigma = np.delete(sigma, i_delete, axis=0) + sigma = np.delete(sigma, i_delete, axis=1) + ref_opt, ref_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma) + + assert_allclose(res_opt, ref_opt, atol=1e-14) + assert_allclose(res_cov, ref_cov, atol=1e-14) + + def test_curvefit_simplecovariance(self): + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + + for jac1, jac2 in [(jac, jac), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, atol=1e-14) + assert_allclose(pcov1, pcov2, atol=1e-14) + + def test_curvefit_covariance(self): + + def funcp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + return rotn.dot(a * np.exp(-b*x)) + + def jacp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + e = np.exp(-b*x) + return rotn.dot(np.vstack((e, -a * x * e)).T) + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + rng = np.random.RandomState(0) + xdata = np.arange(1, 4) + y = func(xdata, 2.5, 1.0) + ydata = y + 0.2 * rng.normal(size=len(xdata)) + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + # Get a rotation matrix, and obtain ydatap = R ydata + # Chisq = ydata^T C^{-1} ydata + # = ydata^T R^T R C^{-1} R^T R ydata + # = ydatap^T Cp^{-1} ydatap + # Cp^{-1} = R C^{-1} R^T + # Cp = R C R^T, since R^-1 = R^T + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + ydatap = rotn.dot(ydata) + covarp = rotn.dot(covar).dot(rotn.T) + + for jac1, jac2 in [(jac, jacp), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14) + assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14) + + @pytest.mark.parametrize("absolute_sigma", [False, True]) + def test_curvefit_scalar_sigma(self, absolute_sigma): + def func(x, a, b): + return a * x + b + + x, y = self.x, self.y + _, pcov1 = curve_fit(func, x, y, sigma=2, absolute_sigma=absolute_sigma) + # Explicitly building the sigma 1D array + _, pcov2 = curve_fit( + func, x, y, sigma=np.full_like(y, 2), absolute_sigma=absolute_sigma + ) + assert np.all(pcov1 == pcov2) + + def test_dtypes(self): + # regression test for gh-9581: curve_fit fails if x and y dtypes differ + x = np.arange(-3, 5) + y = 1.5*x + 3.0 + 0.5*np.sin(x) + + def func(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + for dtx in [np.float32, np.float64]: + for dty in [np.float32, np.float64]: + x = x.astype(dtx) + y = y.astype(dty) + + with warnings.catch_warnings(): + warnings.simplefilter("error", OptimizeWarning) + p, cov = curve_fit(func, x, y, method=method) + + assert np.isfinite(cov).all() + assert not np.allclose(p, 1) # curve_fit's initial value + + def test_dtypes2(self): + # regression test for gh-7117: curve_fit fails if + # both inputs are float32 + def hyperbola(x, s_1, s_2, o_x, o_y, c): + b_2 = (s_1 + s_2) / 2 + b_1 = (s_2 - s_1) / 2 + return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4) + + min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0]) + max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0]) + guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5]) + + params = [-2, .4, -1, -5, 9.5] + xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32]) + ydata = hyperbola(xdata, *params) + + # run optimization twice, with xdata being float32 and float64 + popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + xdata = xdata.astype(np.float32) + ydata = hyperbola(xdata, *params) + + popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + assert_allclose(popt_32, popt_64, atol=2e-5) + + def test_broadcast_y(self): + xdata = np.arange(10) + target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata)) + def fit_func(x, a, b): + return a * x ** 2 + b * x - target + for method in ['lm', 'trf', 'dogbox']: + popt0, pcov0 = curve_fit(fit_func, + xdata=xdata, + ydata=np.zeros_like(xdata), + method=method) + popt1, pcov1 = curve_fit(fit_func, + xdata=xdata, + ydata=0, + method=method) + assert_allclose(pcov0, pcov1) + + def test_args_in_kwargs(self): + # Ensure that `args` cannot be passed as keyword argument to `curve_fit` + + def func(x, a, b): + return a * x + b + + with assert_raises(ValueError): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17], + p0=[1], + args=(1,)) + + def test_data_point_number_validation(self): + def func(x, a, b, c, d, e): + return a * np.exp(-b * x) + c + d + e + + with assert_raises(TypeError, match="The number of func parameters="): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17]) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_gh4555(self): + # gh-4555 reported that covariance matrices returned by `leastsq` + # can have negative diagonal elements and eigenvalues. (In fact, + # they can also be asymmetric.) This shows up in the output of + # `scipy.optimize.curve_fit`. Check that it has been resolved.giit + def f(x, a, b, c, d, e): + return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e + + rng = np.random.default_rng(408113519974467917) + n = 100 + x = np.arange(n) + y = np.linspace(2, 7, n) + rng.random(n) + p, cov = optimize.curve_fit(f, x, y, maxfev=100000) + assert np.all(np.diag(cov) > 0) + eigs = linalg.eigh(cov)[0] # separate line for debugging + # some platforms see a small negative eigevenvalue + assert np.all(eigs > -1e-2) + assert_allclose(cov, cov.T) + + def test_gh4555b(self): + # check that PR gh-17247 did not significantly change covariance matrix + # for simple cases + rng = np.random.default_rng(408113519974467917) + + def func(x, a, b, c): + return a * np.exp(-b * x) + c + + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3, 0.5) + y_noise = 0.2 * rng.normal(size=xdata.size) + ydata = y + y_noise + _, res = curve_fit(func, xdata, ydata) + # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b + ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749], + [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403], + [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]] + # Linux_Python_38_32bit_full fails with default tolerance + assert_allclose(res, ref, 2e-7) + + def test_gh13670(self): + # gh-13670 reported that `curve_fit` executes callables + # with the same values of the parameters at the beginning of + # optimization. Check that this has been resolved. + + rng = np.random.default_rng(8250058582555444926) + x = np.linspace(0, 3, 101) + y = 2 * x + 1 + rng.normal(size=101) * 0.5 + + def line(x, *p): + assert not np.all(line.last_p == p) + line.last_p = p + return x * p[0] + p[1] + + def jac(x, *p): + assert not np.all(jac.last_p == p) + jac.last_p = p + return np.array([x, np.ones_like(x)]).T + + line.last_p = None + jac.last_p = None + p0 = np.array([1.0, 5.0]) + curve_fit(line, x, y, p0, method='lm', jac=jac) + + @pytest.mark.parametrize('method', ['trf', 'dogbox']) + def test_gh20155_error_mentions_x0(self, method): + # `curve_fit` produced an error message that referred to an undocumented + # variable `x0`, which was really `p0`. Check that this is resolved. + def func(x,a): + return x**a + message = "Initial guess is outside of provided bounds" + with pytest.raises(ValueError, match=message): + curve_fit(func, self.x, self.y, p0=[1], bounds=(1000, 1001), + method=method) + + +class TestFixedPoint: + + def test_scalar_trivial(self): + # f(x) = 2x; fixed point should be x=0 + def func(x): + return 2.0*x + x0 = 1.0 + x = fixed_point(func, x0) + assert_almost_equal(x, 0.0) + + def test_scalar_basic1(self): + # f(x) = x**2; x0=1.05; fixed point should be x=1 + def func(x): + return x**2 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_scalar_basic2(self): + # f(x) = x**0.5; x0=1.05; fixed point should be x=1 + def func(x): + return x**0.5 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_array_trivial(self): + def func(x): + return 2.0*x + x0 = [0.3, 0.15] + with np.errstate(all='ignore'): + x = fixed_point(func, x0) + assert_almost_equal(x, [0.0, 0.0]) + + def test_array_basic1(self): + # f(x) = c * x**2; fixed point should be x=1/c + def func(x, c): + return c * x**2 + c = array([0.75, 1.0, 1.25]) + x0 = [1.1, 1.15, 0.9] + with np.errstate(all='ignore'): + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, 1.0/c) + + def test_array_basic2(self): + # f(x) = c * x**0.5; fixed point should be x=c**2 + def func(x, c): + return c * x**0.5 + c = array([0.75, 1.0, 1.25]) + x0 = [0.8, 1.1, 1.1] + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, c**2) + + def test_lambertw(self): + # python-list/2010-December/594592.html + xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, + args=(), xtol=1e-12, maxiter=500) + assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) + assert_allclose(xxroot, lambertw(1)/2) + + def test_no_acceleration(self): + # GitHub issue 5460 + ks = 2 + kl = 6 + m = 1.3 + n0 = 1.001 + i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1)) + + def func(n): + return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1 + + n = fixed_point(func, n0, method='iteration') + assert_allclose(n, m) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py new file mode 100644 index 0000000000000000000000000000000000000000..67443dd6147bd7ee8898bac2a1a7993f6e56e799 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py @@ -0,0 +1,429 @@ +import numpy as np +from numpy.testing import assert_allclose +from pytest import raises as assert_raises +from scipy.optimize import nnls + + +class TestNNLS: + def setup_method(self): + self.rng = np.random.default_rng(1685225766635251) + + def test_nnls(self): + a = np.arange(25.0).reshape(-1, 5) + x = np.arange(5.0) + y = a @ x + x, res = nnls(a, y) + assert res < 1e-7 + assert np.linalg.norm((a @ x) - y) < 1e-7 + + def test_nnls_tall(self): + a = self.rng.uniform(low=-10, high=10, size=[50, 10]) + x = np.abs(self.rng.uniform(low=-2, high=2, size=[10])) + x[::2] = 0 + b = a @ x + xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) + assert_allclose(xact, x, rtol=0., atol=1e-10) + assert rnorm < 1e-12 + + def test_nnls_wide(self): + # If too wide then problem becomes too ill-conditioned ans starts + # emitting warnings, hence small m, n difference. + a = self.rng.uniform(low=-10, high=10, size=[100, 120]) + x = np.abs(self.rng.uniform(low=-2, high=2, size=[120])) + x[::2] = 0 + b = a @ x + xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) + assert_allclose(xact, x, rtol=0., atol=1e-10) + assert rnorm < 1e-12 + + def test_maxiter(self): + # test that maxiter argument does stop iterations + a = self.rng.uniform(size=(5, 10)) + b = self.rng.uniform(size=5) + with assert_raises(RuntimeError): + nnls(a, b, maxiter=1) + + def test_nnls_inner_loop_case1(self): + # See gh-20168 + n = np.array( + [3, 2, 0, 1, 1, 1, 3, 8, 14, 16, 29, 23, 41, 47, 53, 57, 67, 76, + 103, 89, 97, 94, 85, 95, 78, 78, 78, 77, 73, 50, 50, 56, 68, 98, + 95, 112, 134, 145, 158, 172, 213, 234, 222, 215, 216, 216, 206, + 183, 135, 156, 110, 92, 63, 60, 52, 29, 20, 16, 12, 5, 5, 5, 1, 2, + 3, 0, 2]) + k = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0.7205812007860187, 0., 1.4411624015720375, + 0.7205812007860187, 2.882324803144075, 5.76464960628815, + 5.76464960628815, 12.249880413362318, 15.132205216506394, + 20.176273622008523, 27.382085629868712, 48.27894045266326, + 47.558359251877235, 68.45521407467177, 97.99904330689854, + 108.0871801179028, 135.46926574777152, 140.51333415327366, + 184.4687874012208, 171.49832578707245, 205.36564222401535, + 244.27702706646033, 214.01261663344755, 228.42424064916793, + 232.02714665309804, 205.36564222401535, 172.9394881886445, + 191.67459940908097, 162.1307701768542, 153.48379576742198, + 110.96950492104689, 103.04311171240067, 86.46974409432225, + 60.528820866025576, 43.234872047161126, 23.779179625938617, + 24.499760826724636, 17.29394881886445, 11.5292992125763, + 5.76464960628815, 5.044068405502131, 3.6029060039300935, 0., + 2.882324803144075, 0., 0., 0.]) + d = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0.003889242101538, 0., 0.007606268390096, 0., + 0.025457371599973, 0.036952882091577, 0., 0.08518359183449, + 0.048201126400243, 0.196234990022205, 0.144116240157247, + 0.171145134062442, 0., 0., 0.269555036538714, 0., 0., 0., + 0.010893241091872, 0., 0., 0., 0., 0., 0., 0., 0., + 0.048167058272886, 0.011238724891049, 0., 0., 0.055162603456078, + 0., 0., 0., 0., 0.027753339088588, 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0.]) + # The following code sets up a system of equations such that + # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and + # monotonicity constraints on $p_i$. This translates to a system of + # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and + # non-negativity constraints on the $d_i$. If $n_i$ is zero the + # system is modified such that $d_i - d_{i+1}$ is then minimized. + N = len(n) + A = np.diag(n) @ np.tril(np.ones((N, N))) + w = n ** 0.5 + + nz = (n == 0).nonzero()[0] + A[nz, nz] = 1 + A[nz, np.minimum(nz + 1, N - 1)] = -1 + w[nz] = 1 + k[nz] = 0 + W = np.diag(w) + + # Small perturbations can already make the infinite loop go away (just + # uncomment the next line) + # k = k + 1e-10 * np.random.normal(size=N) + dact, _ = nnls(W @ A, W @ k) + assert_allclose(dact, d, rtol=0., atol=1e-10) + + def test_nnls_inner_loop_case2(self): + # See gh-20168 + n = np.array( + [1, 0, 1, 2, 2, 2, 3, 3, 5, 4, 14, 14, 19, 26, 36, 42, 36, 64, 64, + 64, 81, 85, 85, 95, 95, 95, 75, 76, 69, 81, 62, 59, 68, 64, 71, 67, + 74, 78, 118, 135, 153, 159, 210, 195, 218, 243, 236, 215, 196, 175, + 185, 149, 144, 103, 104, 75, 56, 40, 32, 26, 17, 9, 12, 8, 2, 1, 1, + 1]) + k = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.7064355064917867, 0., 0., 2.11930651947536, + 0.7064355064917867, 0., 3.5321775324589333, 7.064355064917867, + 11.302968103868587, 16.95445215580288, 20.486629688261814, + 20.486629688261814, 37.44108184406469, 55.808405012851146, + 78.41434122058831, 103.13958394780086, 105.965325973768, + 125.74552015553803, 149.057891869767, 176.60887662294667, + 197.09550631120848, 211.930651947536, 204.86629688261814, + 233.8301526487814, 221.1143135319292, 195.6826352982249, + 197.80194181770025, 191.4440222592742, 187.91184472681525, + 144.11284332432447, 131.39700420747232, 116.5618585711448, + 93.24948685691584, 89.01087381796512, 53.68909849337579, + 45.211872415474346, 31.083162285638615, 24.72524272721253, + 16.95445215580288, 9.890097090885014, 9.890097090885014, + 2.8257420259671466, 2.8257420259671466, 1.4128710129835733, + 0.7064355064917867, 1.4128710129835733]) + d = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.0021916146355674473, 0., 0., + 0.011252740799789484, 0., 0., 0.037746623295934395, + 0.03602328132946222, 0.09509167709829734, 0.10505765870204821, + 0.01391037014274718, 0.0188296228752321, 0.20723559202324254, + 0.3056220879462608, 0.13304643490426477, 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.043185876949706214, 0.0037266261379722554, + 0., 0., 0., 0., 0., 0.094797899357143, 0., 0., 0., 0., 0., 0., 0., + 0., 0.23450935613672663, 0., 0., 0.07064355064917871]) + # The following code sets up a system of equations such that + # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and + # monotonicity constraints on $p_i$. This translates to a system of + # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and + # non-negativity constraints on the $d_i$. If $n_i$ is zero the + # system is modified such that $d_i - d_{i+1}$ is then minimized. + N = len(n) + A = np.diag(n) @ np.tril(np.ones((N, N))) + w = n ** 0.5 + + nz = (n == 0).nonzero()[0] + A[nz, nz] = 1 + A[nz, np.minimum(nz + 1, N - 1)] = -1 + w[nz] = 1 + k[nz] = 0 + W = np.diag(w) + + dact, _ = nnls(W @ A, W @ k, atol=1e-7) + + p = np.cumsum(dact) + assert np.all(dact >= 0) + assert np.linalg.norm(k - n * p, ord=np.inf) < 28 + assert_allclose(dact, d, rtol=0., atol=1e-10) + + def test_nnls_gh20302(self): + # See gh-20302 + A = np.array( + [0.33408569134321575, 0.11136189711440525, 0.049140798007949286, + 0.03712063237146841, 0.055680948557202625, 0.16642814595936478, + 0.11095209730624318, 0.09791993030943345, 0.14793612974165757, + 0.44380838922497273, 0.11099502671044059, 0.11099502671044059, + 0.14693672599330593, 0.3329850801313218, 1.498432860590948, + 0.0832374225132955, 0.11098323001772734, 0.19589481249472837, + 0.5919105600945457, 3.5514633605672747, 0.06658716751427037, + 0.11097861252378394, 0.24485832778293645, 0.9248217710315328, + 6.936163282736496, 0.05547609388181014, 0.11095218776362029, + 0.29376003042571264, 1.3314262531634435, 11.982836278470993, + 0.047506113282944136, 0.11084759766020298, 0.3423969672933396, + 1.8105107617833156, 19.010362998724812, 0.041507335004505576, + 0.11068622667868154, 0.39074115283013344, 2.361306169145206, + 28.335674029742474, 0.03682846280947718, 0.11048538842843154, + 0.4387861797121048, 2.9831054875676517, 40.2719240821633, + 0.03311278164362387, 0.11037593881207958, 0.4870572300443105, + 3.6791979604026523, 55.187969406039784, 0.030079304092299915, + 0.11029078167176636, 0.5353496017200152, 4.448394860761242, + 73.3985152025605, 0.02545939709595835, 0.11032405408248619, + 0.6328767609778363, 6.214921713313388, 121.19097340961108, + 0.022080881724881523, 0.11040440862440762, 0.7307742886903428, + 8.28033064683057, 186.30743955368786, 0.020715838214945492, + 0.1104844704797093, 0.7800578384588346, 9.42800814760186, + 226.27219554244465, 0.01843179728340054, 0.11059078370040323, + 0.8784095015912599, 11.94380463964355, 322.48272527037585, + 0.015812787653789077, 0.11068951357652354, 1.0257259848595766, + 16.27135849574896, 512.5477926160922, 0.014438550529330062, + 0.11069555405819713, 1.1234754801775881, 19.519316032262093, + 673.4164031130423, 0.012760770585072577, 0.110593345070629, + 1.2688431112524712, 24.920367089248398, 971.8943164806875, + 0.011427556646114315, 0.11046638091243838, 1.413623342459821, + 30.967408782453557, 1347.0822820367298, 0.010033330264470307, + 0.11036663290917338, 1.6071533470570285, 40.063087746029936, + 1983.122843428482, 0.008950061496507258, 0.11038409179025618, + 1.802244865119193, 50.37194055362024, 2795.642700725923, + 0.008071078821135658, 0.11030474388885401, 1.9956465761433504, + 61.80742482572119, 3801.1566267818534, 0.007191031207777556, + 0.11026247851925586, 2.238160187262168, 77.7718015155818, + 5366.2543045751445, 0.00636834224248, 0.11038459886965334, + 2.5328963107984297, 99.49331844784753, 7760.4788389321075, + 0.005624259098118485, 0.11061042892966355, 2.879742607664547, + 128.34496770138628, 11358.529641572684, 0.0050354270614989555, + 0.11077939535297703, 3.2263279459292575, 160.85168205252265, + 15924.316523199741, 0.0044997853165982555, 0.1109947044760903, + 3.6244287189055613, 202.60233390369015, 22488.859063309606, + 0.004023601950058174, 0.1113196539516095, 4.07713905729421, + 255.6270320242126, 31825.565487014468, 0.0036024117873727094, + 0.111674765408554, 4.582933773135057, 321.9583486728612, + 44913.18963986413, 0.003201503089582304, 0.11205260813538065, + 5.191786833370116, 411.79333489752383, 64857.45024636, + 0.0028633044552448853, 0.11262330857296549, 5.864295861648949, + 522.7223161899905, 92521.84996562831, 0.0025691897303891965, + 0.11304434813712465, 6.584584405106342, 656.5615739804199, + 129999.19164812315, 0.0022992911894424675, 0.11343169867916175, + 7.4080129906658305, 828.2026426227864, 183860.98666225857, + 0.0020449922071108764, 0.11383789952917212, 8.388975556433872, + 1058.2750599896935, 265097.9025274183, 0.001831274615120854, + 0.11414945100919989, 9.419351803810935, 1330.564050780237, + 373223.2162438565, 0.0016363333454631633, 0.11454333418242145, + 10.6143816579462, 1683.787012481595, 530392.9089317025, + 0.0014598610433380044, 0.11484240207592301, 11.959688127956882, + 2132.0874753402027, 754758.9662704318, 0.0012985240015312626, + 0.11513579480243862, 13.514425358573531, 2715.5160990137824, + 1083490.9235064993, 0.0011614735761289934, 0.11537304189548002, + 15.171418602667567, 3415.195870828736, 1526592.554260445, + 0.0010347472698811352, 0.11554677847006009, 17.080800985009617, + 4322.412404600832, 2172012.2333119176, 0.0009232988811258664, + 0.1157201264344419, 19.20004861829407, 5453.349531598553, + 3075689.135821584, 0.0008228871862975205, 0.11602709326795038, + 21.65735242414206, 6920.203923780365, 4390869.389638642, + 0.00073528900066722, 0.11642075843897651, 24.40223571298994, + 8755.811207598026, 6238515.485413593, 0.0006602764384729194, + 0.11752920604817965, 27.694443541914293, 11171.386093291572, + 8948280.260726549, 0.0005935538977939806, 0.11851292825953147, + 31.325508920763063, 14174.185724149384, 12735505.873148222, + 0.0005310755355633124, 0.11913794514470308, 35.381052949627765, + 17987.010118815077, 18157886.71494382, 0.00047239949671590953, + 0.1190446731724092, 39.71342528048061, 22679.438775422022, + 25718483.571328573, 0.00041829129789387623, 0.11851586773659825, + 44.45299332965028, 28542.57147989741, 36391778.63686921, + 0.00037321512015419886, 0.11880681324908665, 50.0668539579632, + 36118.26128449941, 51739409.29004541, 0.0003315539616702064, + 0.1184752823034871, 56.04387059062639, 45383.29960621684, + 72976345.76679668, 0.00029456064937920213, 0.11831519416731286, + 62.91195073220101, 57265.53993693082, 103507463.43600245, + 0.00026301867496859703, 0.11862142241083726, 70.8217262087034, + 72383.14781936012, 146901598.49939138, 0.00023618734450420032, + 0.11966825454879482, 80.26535457124461, 92160.51176984518, + 210125966.835247, 0.00021165918071578316, 0.12043407382728061, + 90.7169587544247, 116975.56852918258, 299515943.218972, + 0.00018757727511329545, 0.11992440455576689, 101.49899864101785, + 147056.26174166967, 423080865.0307836, 0.00016654469159895833, + 0.11957908856805206, 113.65970431102812, 184937.67016486943, + 597533612.3026931, 0.00014717439179415048, 0.11872067604728138, + 126.77899683346702, 231758.58906776624, 841283678.3159915, + 0.00012868496382376066, 0.1166314722122684, 139.93635237349534, + 287417.30847929465, 1172231492.6328032, 0.00011225559452625302, + 0.11427619522772557, 154.0034283704458, 355281.4912295324, + 1627544511.322488, 9.879511142981067e-05, 0.11295574406808354, + 170.96532050841535, 442971.0111288653, 2279085852.2580123, + 8.71257780313587e-05, 0.11192758284428547, 190.35067416684697, + 554165.2523674504, 3203629323.93623, 7.665069027765277e-05, + 0.11060694607065294, 211.28835951100046, 690933.608546013, + 4486577387.093535, 6.734021094824451e-05, 0.10915848194710433, + 234.24338803525194, 860487.9079859136, 6276829044.8032465, + 5.9191625040287665e-05, 0.10776821865668373, 259.7454711820425, + 1071699.0387579766, 8780430224.544102, 5.1856803674907676e-05, + 0.10606444911641115, 287.1843540288165, 1331126.3723998806, + 12251687131.5685, 4.503421404759231e-05, 0.10347361247668461, + 314.7338642485931, 1638796.0697522392, 16944331963.203278, + 3.90470387455642e-05, 0.1007804070023012, 344.3427560918527, + 2014064.4865519698, 23392351979.057854, 3.46557661636393e-05, + 0.10046706610839032, 385.56603915081587, 2533036.2523656, + 33044724430.235435, 3.148745865254635e-05, 0.1025441570117926, + 442.09038234164746, 3262712.3882769793, 47815050050.199135, + 2.9790762078715404e-05, 0.1089845379379672, 527.8068231298969, + 4375751.903321453, 72035815708.42941, 2.8772639817606534e-05, + 0.11823636789048445, 643.2048194503195, 5989838.001888927, + 110764084330.93005, 2.7951691815106586e-05, 0.12903432664913705, + 788.5500418523591, 8249371.000613411, 171368308481.2427, + 2.6844392423114212e-05, 0.1392060709754626, 955.6296403631383, + 11230229.319931043, 262063016295.25085, 2.499458273851386e-05, + 0.14559344445184325, 1122.7022399726002, 14820229.698461473, + 388475270970.9214, 2.337386729019776e-05, 0.15294300496886065, + 1324.8158105672455, 19644861.137128454, 578442936182.7473, + 2.0081014872174113e-05, 0.14760215298210377, 1436.2385042492353, + 23923681.729276657, 791311658718.4193, 1.773374462991839e-05, + 0.14642752940923615, 1600.5596278736678, 29949429.82503553, + 1112815989293.9326, 1.5303115839590797e-05, 0.14194150045081785, + 1742.873058605698, 36634451.931305364, 1529085389160.7544, + 1.3148448731163076e-05, 0.13699368732998807, 1889.5284359054356, + 44614279.74469635, 2091762812969.9607, 1.1739194407590062e-05, + 0.13739553134643406, 2128.794599579694, 56462810.11822766, + 2973783283306.8145, 1.0293367506254706e-05, 0.13533033372723272, + 2355.372854690074, 70176508.28667311, 4151852759764.441, + 9.678312586863569e-06, 0.14293577249119244, 2794.531827932675, + 93528671.31952812, 6215821967224.52, -1.174086323572049e-05, + 0.1429501325944908, 3139.4804810720925, 118031680.16618933, + -6466892421886.174, -2.1188265307407812e-05, 0.1477108290912869, + 3644.1133424610953, 153900132.62392554, -4828013117542.036, + -8.614483025123122e-05, 0.16037100755883044, 4444.386620899393, + 210846007.89660168, -1766340937974.433, 4.981445776141726e-05, + 0.16053420251962536, 4997.558254401547, 266327328.4755411, + 3862250287024.725, 1.8500019169456637e-05, 0.15448417164977674, + 5402.289867444643, 323399508.1475582, 12152445411933.408, + -5.647882376069748e-05, 0.1406372975946189, 5524.633133597753, + 371512945.9909363, -4162951345292.1514, 2.8048523486337994e-05, + 0.13183417571186926, 5817.462495763679, 439447252.3728975, + 9294740538175.03]).reshape(89, 5) + b = np.ones(89, dtype=np.float64) + sol, rnorm = nnls(A, b) + assert_allclose(sol, np.array([0.61124315, 8.22262829, 0., 0., 0.])) + assert_allclose(rnorm, 1.0556460808977297) + + def test_nnls_gh21021_ex1(self): + # Review examples used in gh-21021 + A = [[0.004734199143798789, -0.09661916455815653, -0.04308779048103441, + 0.4039475561867938, -0.27742598780954364, -0.20816924034369574, + -0.17264070902176, 0.05251808558963846], + [-0.030263548855047975, -0.30356483926431466, 0.18080406600591398, + -0.06892233941254086, -0.41837298885432317, 0.30245352819647003, + -0.19008975278116397, -0.00990809825429995], + [-0.2561747595787612, -0.04376282125249583, 0.4422181991706678, + -0.13720906318924858, -0.0069523811763796475, -0.059238287107464795, + 0.028663214369642594, 0.5415531284893763], + [0.2949336072968401, 0.33997647534935094, 0.38441519339815755, + -0.306001783010386, 0.18120773805949028, -0.36669767490747895, + -0.021539960590992304, -0.2784251712424615], + [0.5009075736232653, -0.20161970347571165, 0.08404512586550646, + 0.2520496489348788, 0.14812015101612894, -0.25823455803981266, + -0.1596872058396596, 0.5960141613922691] + ] + b = [18.036779281222124, -18.126530733870887, 13.535652034584029, + -2.6654275476795966, 9.166315328199575] + + # Obtained from matlab's lstnonneg + des_sol = np.array([0., 118.017802006619, 45.1996532316584, 102.62156313537, + 0., 55.8590204314398, 0., 29.7328833253434]) + sol, res = nnls(A, b) + assert_allclose(sol, des_sol) + assert np.abs(np.linalg.norm(A@sol - b) - res) < 5e-14 + + def test_nnls_gh21021_ex2(self): + A = np.array([ + [0.2508259992635229, -0.24031300195203256], + [0.510647748500133, 0.2872936081767836], + [0.8196387904102849, -0.03520620107046682], + [0.030739759120097084, -0.07768656359879388]]) + b = np.array([24.456141951303913, + 28.047143273432333, + 41.10526799545987, + -1.2078282698324068]) + + sol, res = nnls(A, b) + assert_allclose(sol, np.array([54.3047953202271, 0.0])) + assert np.abs(np.linalg.norm(A@sol - b) - res) < 5e-14 + + def test_nnls_gh21021_ex3(self): + A = np.array([ + [0.08247592017366788, 0.058398241636675674, -0.1031496693415968, + 0.03156983127072098, -0.029503680182026665], + [0.21463607509982277, -0.2164518969308173, -0.10816833396662294, + 0.12133867146012027, -0.15025010408668332], + [0.07251900316494089, -0.003044559315020767, 0.042682817961676424, + -0.018157525489298176, 0.11561953260568134], + [0.2328797918159187, -0.09112909645892767, 0.21348169727099078, + 0.00449447624089599, -0.16615256386885716], + [-0.02440856024843897, -0.20131427208575386, 0.030275781997161483, + -0.04560777213546784, 0.11007266012013553], + [-0.2928391429686263, -0.20437574856615687, -0.020892110811574407, + -0.10455040720819309, 0.05337267000160461], + [0.22041503019400316, 0.014262782992311842, 0.08274606359871121, + -0.17933172096518907, -0.11809690350702161], + [0.10440436007469953, 0.09171452270577712, 0.03942347724809893, + 0.11457669688231396, 0.07529747295631585], + [-0.052087576116032056, -0.15787717158077047, -0.08232202515883282, + -0.03194837933710708, -0.0546812506025729], + [-0.010388407673304468, 0.015174707581808923, 0.04764509565386281, + -0.1781221936030805, 0.10218894080536609], + [0.03272263140115928, -0.27576456949442574, 0.024897570959901753, + -0.1417129166632282, -0.03320796462136591], + [-0.12490006751823997, -0.03012003515442302, -0.051495264012509506, + 0.012070729698374614, 0.04811700123118234], + [0.15254854117990788, -0.051863547789218374, 0.058012914127346174, + -0.06717991061422621, -0.14514671564242257], + [0.12251250415395559, -0.17462495626695362, -0.025334728552179834, + 0.11425350676877533, 0.06183915953812639], + [0.19334259720491218, 0.2164301986218955, -0.018882278726614483, + 0.07950236716817938, -0.2220529357431092], + [-0.01822205701890852, 0.12630444976752267, -0.03118092027244001, + 0.02773743885242581, 0.06444433740044248], + [0.13344116850581977, -0.05142877469996826, 0.3385702016705455, + -0.25814970787123004, 0.2679034842977378], + [0.1309747058619377, 0.12090608957940627, -0.13957978654106512, + 0.17048819760322642, -0.241775259969348], + [0.28613102173467275, -0.47153463906732174, 0.20359970518269746, + -0.0962095202871843, -0.07703076550836387], + [0.2212788380372723, 0.02569245145758152, -0.021596152392209966, + 0.04610005150029433, -0.2024454395619734], + [-0.043225338359410316, 0.17816095186290315, -0.014709092962616079, + 0.06993970293287989, -0.09033722782555903], + [0.17747622942563512, -0.20991014784011458, 0.06265720409894943, + 0.0689704059061795, 0.024474319398401525], + [-0.1163880385601698, 0.29989570587630027, 0.033443765320984545, + 0.008470296514656, -0.0014457113271462002], + [0.024375314902718406, 0.05279830705548363, 0.02691082431023144, + 0.05265079368002343, 0.15542988147487913], + [-0.01855218360922308, -0.050265869142888164, 0.2567912677240452, + -0.2606428528561333, 0.25334396245022245]]) + + b = np.array([-7.876625373734849, -8.259856278691373, 3.2593082374900963, + 16.30170376973345, 2.311892943629045, -1.595345202555738, + 6.318582970536518, 3.0104212955340093, -6.286202915842167, + 3.6382333725029294, 1.9012066681249356, -3.932236581436514, + 4.4299317131740406, -1.9345885161292682, -1.4418721521970805, + -2.3810103256943926, 25.853603392922526, -10.658470311610483, + 15.547103681119214, -1.6491066136547277, -1.1232029689817422, + 4.7845749463206975, 2.553803732013229, 2.0549409701753705, + 19.60887153608244]) + + sol, res = nnls(A, b) + assert_allclose(sol, np.array([0.0, 0.0, 76.3611306173957, 0.0, 0.0]), + atol=5e-14) + assert np.abs(np.linalg.norm(A@sol - b) - res) < 5e-14 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..e5eb094c15902eca6e089ba3bbf6dfd8eb06970e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py @@ -0,0 +1,536 @@ +""" Unit tests for nonlinear solvers +Author: Ondrej Certik +May 2007 +""" +from numpy.testing import assert_ +import pytest +from functools import partial + +from scipy.optimize import _nonlin as nonlin, root +from scipy.sparse import csr_array +from numpy import diag, dot +from numpy.linalg import inv +import numpy as np +import scipy + +from .test_minpack import pressure_network + +SOLVERS = {'anderson': nonlin.anderson, + 'diagbroyden': nonlin.diagbroyden, + 'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing, + 'broyden1': nonlin.broyden1, + 'broyden2': nonlin.broyden2, + 'krylov': nonlin.newton_krylov} +MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1, + 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov} + +# ---------------------------------------------------------------------------- +# Test problems +# ---------------------------------------------------------------------------- + + +def F(x): + x = np.asarray(x).T + d = diag([3, 2, 1.5, 1, 0.5]) + c = 0.01 + f = -d @ x - c * float(x.T @ x) * x + return f + + +F.xin = [1, 1, 1, 1, 1] +F.KNOWN_BAD = {} +F.JAC_KSP_BAD = {} +F.ROOT_JAC_KSP_BAD = {} + + +def F2(x): + return x + + +F2.xin = [1, 2, 3, 4, 5, 6] +F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing} +F2.JAC_KSP_BAD = {} +F2.ROOT_JAC_KSP_BAD = {} + + +def F2_lucky(x): + return x + + +F2_lucky.xin = [0, 0, 0, 0, 0, 0] +F2_lucky.KNOWN_BAD = {} +F2_lucky.JAC_KSP_BAD = {} +F2_lucky.ROOT_JAC_KSP_BAD = {} + + +def F3(x): + A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]]) + b = np.array([1, 2, 3.]) + return A @ x - b + + +F3.xin = [1, 2, 3] +F3.KNOWN_BAD = {} +F3.JAC_KSP_BAD = {} +F3.ROOT_JAC_KSP_BAD = {} + + +def F4_powell(x): + A = 1e4 + return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)] + + +F4_powell.xin = [-1, -2] +F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing, + 'diagbroyden': nonlin.diagbroyden} +# In the extreme case, it does not converge for nolinear problem solved by +# MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using +# Krylov method to approximate Jacobian +F4_powell.JAC_KSP_BAD = {'minres'} +F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'} + + +def F5(x): + return pressure_network(x, 4, np.array([.5, .5, .5, .5])) + + +F5.xin = [2., 0, 2, 0] +F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} +# In the extreme case, the Jacobian inversion yielded zero vector for nonlinear +# problem solved by CGS/MINRES and it does not converge for root problem solved +# by MINRES and when using Krylov method to approximate Jacobian +F5.JAC_KSP_BAD = {'cgs', 'minres'} +F5.ROOT_JAC_KSP_BAD = {'minres'} + + +def F6(x): + x1, x2 = x + J0 = np.array([[-4.256, 14.7], + [0.8394989, 0.59964207]]) + v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6, + np.sin(x2 * np.exp(x1) - 1)]) + return -np.linalg.solve(J0, v) + + +F6.xin = [-0.5, 1.4] +F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} +F6.JAC_KSP_BAD = {} +F6.ROOT_JAC_KSP_BAD = {} + + +# ---------------------------------------------------------------------------- +# Tests +# ---------------------------------------------------------------------------- + + +class TestNonlin: + """ + Check the Broyden methods for a few test problems. + + broyden1, broyden2, and newton_krylov must succeed for + all functions. Some of the others don't -- tests in KNOWN_BAD are skipped. + + """ + + def _check_nonlin_func(self, f, func, f_tol=1e-2): + # Test all methods mentioned in the class `KrylovJacobian` + if func == SOLVERS['krylov']: + for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: + if method in f.JAC_KSP_BAD: + continue + + x = func(f, f.xin, method=method, line_search=None, + f_tol=f_tol, maxiter=200, verbose=0) + assert_(np.absolute(f(x)).max() < f_tol) + + x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0) + assert_(np.absolute(f(x)).max() < f_tol) + + def _check_root(self, f, method, f_tol=1e-2): + # Test Krylov methods + if method == 'krylov': + for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: + if jac_method in f.ROOT_JAC_KSP_BAD: + continue + + res = root(f, f.xin, method=method, + options={'ftol': f_tol, 'maxiter': 200, + 'disp': 0, + 'jac_options': {'method': jac_method}}) + assert_(np.absolute(res.fun).max() < f_tol) + + res = root(f, f.xin, method=method, + options={'ftol': f_tol, 'maxiter': 200, 'disp': 0}) + assert_(np.absolute(res.fun).max() < f_tol) + + @pytest.mark.xfail + def _check_func_fail(self, *a, **kw): + pass + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + def test_problem_nonlin(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for func in SOLVERS.values(): + if func in f.KNOWN_BAD.values(): + if func in MUST_WORK.values(): + self._check_func_fail(f, func) + continue + self._check_nonlin_func(f, func) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + @pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs', + 'minres', 'tfqmr']) + def test_tol_norm_called(self, method): + # Check that supplying tol_norm keyword to nonlin_solve works + self._tol_norm_used = False + + def local_norm_func(x): + self._tol_norm_used = True + return np.absolute(x).max() + + nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2, + maxiter=200, verbose=0, + tol_norm=local_norm_func) + assert_(self._tol_norm_used) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + def test_problem_root(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for meth in SOLVERS: + if meth in f.KNOWN_BAD: + if meth in MUST_WORK: + self._check_func_fail(f, meth) + continue + self._check_root(f, meth) + + def test_no_convergence(self): + def wont_converge(x): + return 1e3 + x + + with pytest.raises(scipy.optimize.NoConvergence): + nonlin.newton_krylov(wont_converge, xin=[0], maxiter=1) + + +class TestSecant: + """Check that some Jacobian approximations satisfy the secant condition""" + + xs = [np.array([1., 2., 3., 4., 5.]), + np.array([2., 3., 4., 5., 1.]), + np.array([3., 4., 5., 1., 2.]), + np.array([4., 5., 1., 2., 3.]), + np.array([9., 1., 9., 1., 3.]), + np.array([0., 1., 9., 1., 3.]), + np.array([5., 5., 7., 1., 1.]), + np.array([1., 2., 7., 5., 1.]),] + fs = [x**2 - 1 for x in xs] + + def _check_secant(self, jac_cls, npoints=1, **kw): + """ + Check that the given Jacobian approximation satisfies secant + conditions for last `npoints` points. + """ + jac = jac_cls(**kw) + jac.setup(self.xs[0], self.fs[0], None) + for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + jac.update(x, f) + + for k in range(min(npoints, j+1)): + dx = self.xs[j-k+1] - self.xs[j-k] + df = self.fs[j-k+1] - self.fs[j-k] + assert_(np.allclose(dx, jac.solve(df))) + + # Check that the `npoints` secant bound is strict + if j >= npoints: + dx = self.xs[j-npoints+1] - self.xs[j-npoints] + df = self.fs[j-npoints+1] - self.fs[j-npoints] + assert_(not np.allclose(dx, jac.solve(df))) + + def test_broyden1(self): + self._check_secant(nonlin.BroydenFirst) + + def test_broyden2(self): + self._check_secant(nonlin.BroydenSecond) + + def test_broyden1_update(self): + # Check that BroydenFirst update works as for a dense matrix + jac = nonlin.BroydenFirst(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + B = np.identity(5) * (-1/0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + B += (df - dot(B, dx))[:, None] * dx[None, :] / dot(dx, dx) + jac.update(x, f) + assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)) + + def test_broyden2_update(self): + # Check that BroydenSecond update works as for a dense matrix + jac = nonlin.BroydenSecond(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + H = np.identity(5) * (-0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + H += (dx - dot(H, df))[:, None] * df[None, :] / dot(df, df) + jac.update(x, f) + assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)) + + def test_anderson(self): + # Anderson mixing (with w0=0) satisfies secant conditions + # for the last M iterates, see [Ey]_ + # + # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3) + + +class TestLinear: + """Solve a linear equation; + some methods find the exact solution in a finite number of steps""" + + def _check(self, jac, N, maxiter, complex=False, **kw): + np.random.seed(123) + + A = np.random.randn(N, N) + if complex: + A = A + 1j*np.random.randn(N, N) + b = np.random.randn(N) + if complex: + b = b + 1j*np.random.randn(N) + + def func(x): + return dot(A, x) - b + + sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter, + f_tol=1e-6, line_search=None, verbose=0) + assert_(np.allclose(dot(A, sol), b, atol=1e-6)) + + def test_broyden1(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True) + + def test_broyden2(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True) + + def test_anderson(self): + # Anderson is rather similar to Broyden, if given enough storage space + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False) + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True) + + def test_krylov(self): + # Krylov methods solve linear systems exactly in N inner steps + self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10) + self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10) + + def _check_autojac(self, A, b): + def func(x): + return A.dot(x) - b + + def jac(v): + return A + + sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), jac, maxiter=2, + f_tol=1e-6, line_search=None, verbose=0) + np.testing.assert_allclose(A @ sol, b, atol=1e-6) + # test jac input as array -- not a function + sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), A, maxiter=2, + f_tol=1e-6, line_search=None, verbose=0) + np.testing.assert_allclose(A @ sol, b, atol=1e-6) + + def test_jac_sparse(self): + A = csr_array([[1, 2], [2, 1]]) + b = np.array([1, -1]) + self._check_autojac(A, b) + self._check_autojac((1 + 2j) * A, (2 + 2j) * b) + + def test_jac_ndarray(self): + A = np.array([[1, 2], [2, 1]]) + b = np.array([1, -1]) + self._check_autojac(A, b) + self._check_autojac((1 + 2j) * A, (2 + 2j) * b) + + +class TestJacobianDotSolve: + """ + Check that solve/dot methods in Jacobian approximations are consistent + """ + + def _func(self, x, A=None): + return x**2 - 1 + np.dot(A, x) + + def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw): + rng = np.random.RandomState(123) + + N = 7 + + def rand(*a): + q = rng.rand(*a) + if complex: + q = q + 1j*rng.rand(*a) + return q + + def assert_close(a, b, msg): + d = abs(a - b).max() + f = tol + abs(b).max()*tol + if d > f: + raise AssertionError(f'{msg}: err {d:g}') + + A = rand(N, N) + + # initialize + x0 = rng.rand(N) + jac = jac_cls(**kw) + jac.setup(x0, self._func(x0, A), partial(self._func, A=A)) + + # check consistency + for k in range(2*N): + v = rand(N) + + if hasattr(jac, '__array__'): + Jd = np.array(jac) + if hasattr(jac, 'solve'): + Gv = jac.solve(v) + Gv2 = np.linalg.solve(Jd, v) + assert_close(Gv, Gv2, 'solve vs array') + if hasattr(jac, 'rsolve'): + Gv = jac.rsolve(v) + Gv2 = np.linalg.solve(Jd.T.conj(), v) + assert_close(Gv, Gv2, 'rsolve vs array') + if hasattr(jac, 'matvec'): + Jv = jac.matvec(v) + Jv2 = np.dot(Jd, v) + assert_close(Jv, Jv2, 'dot vs array') + if hasattr(jac, 'rmatvec'): + Jv = jac.rmatvec(v) + Jv2 = np.dot(Jd.T.conj(), v) + assert_close(Jv, Jv2, 'rmatvec vs array') + + if hasattr(jac, 'matvec') and hasattr(jac, 'solve'): + Jv = jac.matvec(v) + Jv2 = jac.solve(jac.matvec(Jv)) + assert_close(Jv, Jv2, 'dot vs solve') + + if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'): + Jv = jac.rmatvec(v) + Jv2 = jac.rmatvec(jac.rsolve(Jv)) + assert_close(Jv, Jv2, 'rmatvec vs rsolve') + + x = rand(N) + jac.update(x, self._func(x, A)) + + def test_broyden1(self): + self._check_dot(nonlin.BroydenFirst, complex=False) + self._check_dot(nonlin.BroydenFirst, complex=True) + + def test_broyden2(self): + self._check_dot(nonlin.BroydenSecond, complex=False) + self._check_dot(nonlin.BroydenSecond, complex=True) + + def test_anderson(self): + self._check_dot(nonlin.Anderson, complex=False) + self._check_dot(nonlin.Anderson, complex=True) + + def test_diagbroyden(self): + self._check_dot(nonlin.DiagBroyden, complex=False) + self._check_dot(nonlin.DiagBroyden, complex=True) + + def test_linearmixing(self): + self._check_dot(nonlin.LinearMixing, complex=False) + self._check_dot(nonlin.LinearMixing, complex=True) + + def test_excitingmixing(self): + self._check_dot(nonlin.ExcitingMixing, complex=False) + self._check_dot(nonlin.ExcitingMixing, complex=True) + + @pytest.mark.thread_unsafe + def test_krylov(self): + self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3) + self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3) + + +class TestNonlinOldTests: + """ Test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def test_broyden1(self): + x = nonlin.broyden1(F, F.xin, iter=12, alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_broyden2(self): + x = nonlin.broyden2(F, F.xin, iter=12, alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_anderson(self): + x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5) + assert_(nonlin.norm(x) < 0.33) + + def test_linearmixing(self): + x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5) + assert_(nonlin.norm(x) < 1e-7) + assert_(nonlin.norm(F(x)) < 1e-7) + + def test_exciting(self): + x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5) + assert_(nonlin.norm(x) < 1e-5) + assert_(nonlin.norm(F(x)) < 1e-5) + + def test_diagbroyden(self): + x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1) + assert_(nonlin.norm(x) < 1e-8) + assert_(nonlin.norm(F(x)) < 1e-8) + + def test_root_broyden1(self): + res = root(F, F.xin, method='broyden1', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_broyden2(self): + res = root(F, F.xin, method='broyden2', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_anderson(self): + res = root(F, F.xin, method='anderson', + options={'nit': 12, + 'jac_options': {'alpha': 0.03, 'M': 5}}) + assert_(nonlin.norm(res.x) < 0.33) + + def test_root_linearmixing(self): + res = root(F, F.xin, method='linearmixing', + options={'nit': 60, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-7) + assert_(nonlin.norm(res.fun) < 1e-7) + + def test_root_excitingmixing(self): + res = root(F, F.xin, method='excitingmixing', + options={'nit': 20, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-5) + assert_(nonlin.norm(res.fun) < 1e-5) + + def test_root_diagbroyden(self): + res = root(F, F.xin, method='diagbroyden', + options={'nit': 11, + 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-8) + assert_(nonlin.norm(res.fun) < 1e-8) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..913ef51f049386a06c61164a3fc07c15111ed212 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py @@ -0,0 +1,3257 @@ +""" +Unit tests for optimization routines from optimize.py + +Authors: + Ed Schofield, Nov 2005 + Andrew Straw, April 2008 + +""" +import itertools +import platform +import threading +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_almost_equal, + assert_no_warnings, assert_warns, + assert_array_less, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +import scipy +from scipy import optimize +from scipy.optimize._minimize import Bounds, NonlinearConstraint +from scipy.optimize._minimize import (MINIMIZE_METHODS, + MINIMIZE_METHODS_NEW_CB, + MINIMIZE_SCALAR_METHODS) +from scipy.optimize._linprog import LINPROG_METHODS +from scipy.optimize._root import ROOT_METHODS +from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS +from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS +from scipy.optimize._optimize import MemoizeJac, show_options, OptimizeResult +from scipy.optimize import rosen, rosen_der, rosen_hess + +from scipy.sparse import (coo_matrix, csc_matrix, csr_matrix, coo_array, + csr_array, csc_array) +from scipy.conftest import array_api_compatible +from scipy._lib._array_api_no_0d import xp_assert_equal, array_namespace + +skip_xp_backends = pytest.mark.skip_xp_backends + + +def test_check_grad(): + # Verify if check_grad is able to estimate the derivative of the + # expit (logistic sigmoid) function. + + def expit(x): + return 1 / (1 + np.exp(-x)) + + def der_expit(x): + return np.exp(-x) / (1 + np.exp(-x))**2 + + x0 = np.array([1.5]) + + r = optimize.check_grad(expit, der_expit, x0) + assert_almost_equal(r, 0) + # SPEC-007 leave one call with seed to check it still works + r = optimize.check_grad(expit, der_expit, x0, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6) + assert_almost_equal(r, 0) + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6, + direction='random', rng=1234) + assert_almost_equal(r, 0) + + # Check if the epsilon parameter is being considered. + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0) + assert r > 1e-7 + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1, + direction='random', rng=1234) - 0) + assert r > 1e-7 + + def x_sinx(x): + return (x*np.sin(x)).sum() + + def der_x_sinx(x): + return np.sin(x) + x*np.cos(x) + + x0 = np.arange(0, 2, 0.2) + + r = optimize.check_grad(x_sinx, der_x_sinx, x0, + direction='random', rng=1234) + assert_almost_equal(r, 0) + + assert_raises(ValueError, optimize.check_grad, + x_sinx, der_x_sinx, x0, + direction='random_projection', rng=1234) + + # checking can be done for derivatives of vector valued functions + r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0, + direction='all', rng=1234) + assert r < 5e-7 + + +class CheckOptimize: + """ Base test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def setup_method(self): + self.F = np.array([[1, 1, 1], + [1, 1, 0], + [1, 0, 1], + [1, 0, 0], + [1, 0, 0]]) + self.K = np.array([1., 0.3, 0.5]) + self.startparams = np.zeros(3, np.float64) + self.solution = np.array([0., -0.524869316, 0.487525860]) + self.maxiter = 1000 + self.funccalls = threading.local() + self.gradcalls = threading.local() + self.trace = threading.local() + + def func(self, x): + if not hasattr(self.funccalls, 'c'): + self.funccalls.c = 0 + + if not hasattr(self.gradcalls, 'c'): + self.gradcalls.c = 0 + + self.funccalls.c += 1 + if self.funccalls.c > 6000: + raise RuntimeError("too many iterations in optimization routine") + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + f = logZ - np.dot(self.K, x) + if not hasattr(self.trace, 't'): + self.trace.t = [] + self.trace.t.append(np.copy(x)) + return f + + def grad(self, x): + if not hasattr(self.gradcalls, 'c'): + self.gradcalls.c = 0 + self.gradcalls.c += 1 + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.transpose(), p) - self.K + + def hess(self, x): + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.T, + np.dot(np.diag(p), self.F - np.dot(self.F.T, p))) + + def hessp(self, x, p): + return np.dot(self.hess(x), p) + + +class CheckOptimizeParameterized(CheckOptimize): + + def test_cg(self): + # conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='CG', jac=self.grad, + options=opts) + params, fopt, func_calls, grad_calls, warnflag = \ + res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + else: + retval = optimize.fmin_cg(self.func, self.startparams, + self.grad, (), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c == 9, self.funccalls.c + assert self.gradcalls.c == 7, self.gradcalls.c + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[2:4], + [[0, -0.5, 0.5], + [0, -5.05700028e-01, 4.95985862e-01]], + atol=1e-14, rtol=1e-7) + + def test_cg_cornercase(self): + def f(r): + return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2 + + # Check several initial guesses. (Too far away from the + # minimum, the function ends up in the flat region of exp.) + for x0 in np.linspace(-0.75, 3, 71): + sol = optimize.minimize(f, [x0], method='CG') + assert sol.success + assert_allclose(sol.x, [0.5], rtol=1e-5) + + def test_bfgs(self): + # Broyden-Fletcher-Goldfarb-Shanno optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, + jac=self.grad, method='BFGS', args=(), + options=opts) + + params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = ( + res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + else: + retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, gopt, Hopt, + func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c == 10, self.funccalls.c + assert self.gradcalls.c == 8, self.gradcalls.c + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[6:8], + [[0, -5.25060743e-01, 4.87748473e-01], + [0, -5.24885582e-01, 4.87530347e-01]], + atol=1e-14, rtol=1e-7) + + def test_bfgs_hess_inv0_neg(self): + # Ensure that BFGS does not accept neg. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': -np.eye(5)} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_semipos(self): + # Ensure that BFGS does not accept semi pos. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + hess_inv0 = np.eye(5) + hess_inv0[0, 0] = 0 + opts = {'disp': self.disp, 'hess_inv0': hess_inv0} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_sanity(self): + # Ensure that BFGS handles `hess_inv0` parameter correctly. + fun = optimize.rosen + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': 1e-2 * np.eye(5)} + res = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options=opts) + res_true = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options={'disp': self.disp}) + assert_allclose(res.fun, res_true.fun, atol=1e-6) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_bfgs_infinite(self): + # Test corner case where -Inf is the minimum. See gh-2019. + def func(x): + return -np.e ** (-x) + def fprime(x): + return -func(x) + x0 = [0] + with np.errstate(over='ignore'): + if self.use_wrapper: + opts = {'disp': self.disp} + x = optimize.minimize(func, x0, jac=fprime, method='BFGS', + args=(), options=opts)['x'] + else: + x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp) + assert not np.isfinite(func(x)) + + def test_bfgs_xrtol(self): + # test for #17345 to test xrtol parameter + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'xrtol': 1e-3}) + ref = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'gtol': 1e-3}) + assert res.nit != ref.nit + + def test_bfgs_c1(self): + # test for #18977 insufficiently low value of c1 leads to precision loss + # for poor starting parameters + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + res_c1_small = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-8}) + res_c1_big = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-1}) + + assert res_c1_small.nfev > res_c1_big.nfev + + def test_bfgs_c2(self): + # test that modification of c2 parameter + # results in different number of iterations + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res_default = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': .9}) + res_mod = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': 1e-2}) + assert res_default.nit > res_mod.nit + + @pytest.mark.parametrize(["c1", "c2"], [[0.5, 2], + [-0.1, 0.1], + [0.2, 0.1]]) + def test_invalid_c1_c2(self, c1, c2): + with pytest.raises(ValueError, match="'c1' and 'c2'"): + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + optimize.minimize(optimize.rosen, x0, method='cg', + options={'c1': c1, 'c2': c2}) + + def test_powell(self): + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + # params[0] does not affect the objective function + assert_allclose(params[1:], self.solution[1:], atol=5e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + # + # However, some leeway must be added: the exact evaluation + # count is sensitive to numerical error, and floating-point + # computations are not bit-for-bit reproducible across + # machines, and when using e.g., MKL, data alignment + # etc., affect the rounding error. + # + assert self.funccalls.c <= 116 + 20, self.funccalls.c + assert self.gradcalls.c == 0, self.gradcalls.c + + @pytest.mark.xfail(reason="This part of test_powell fails on some " + "platforms, but the solution returned by powell is " + "still valid.") + def test_powell_gh14014(self): + # This part of test_powell started failing on some CI platforms; + # see gh-14014. Since the solution is still correct and the comments + # in test_powell suggest that small differences in the bits are known + # to change the "trace" of the solution, seems safe to xfail to get CI + # green now and investigate later. + + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[34:39], + [[0.72949016, -0.44156936, 0.47100962], + [0.72949016, -0.44156936, 0.48052496], + [1.45898031, -0.88313872, 0.95153458], + [0.72949016, -0.44156936, 0.47576729], + [1.72949016, -0.44156936, 0.47576729]], + atol=1e-14, rtol=1e-7) + + def test_powell_bounded(self): + # Powell (direction set) optimization routine + # same as test_powell above, but with bounds + bounds = [(-np.pi, np.pi) for _ in self.startparams] + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + bounds=bounds, + method='Powell', options=opts) + params, func_calls = (res['x'], res['nfev']) + + assert func_calls == self.funccalls.c + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6, rtol=1e-5) + + # The exact evaluation count is sensitive to numerical error, and + # floating-point computations are not bit-for-bit reproducible + # across machines, and when using e.g. MKL, data alignment etc. + # affect the rounding error. + # It takes 155 calls on my machine, but we can add the same +20 + # margin as is used in `test_powell` + assert self.funccalls.c <= 155 + 20 + assert self.gradcalls.c == 0 + + def test_neldermead(self): + # Nelder-Mead simplex algorithm + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['nit'], res['nfev'], + res['status']) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c == 167, self.funccalls.c + assert self.gradcalls.c == 0, self.gradcalls.c + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[76:78], + [[0.1928968, -0.62780447, 0.35166118], + [0.19572515, -0.63648426, 0.35838135]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex(self): + # Nelder-Mead simplex algorithm + simplex = np.zeros((4, 3)) + simplex[...] = self.startparams + for j in range(3): + simplex[j+1, j] += 0.1 + + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': True, 'initial_simplex': simplex} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = (res['x'], + res['fun'], + res['nit'], + res['nfev'], + res['status']) + assert_allclose(res['allvecs'][0], simplex[0]) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.17.0. Don't allow them to increase. + assert self.funccalls.c == 100, self.funccalls.c + assert self.gradcalls.c == 0, self.gradcalls.c + + # Ensure that the function behaves the same; this is from SciPy 0.15.0 + assert_allclose(self.trace.t[50:52], + [[0.14687474, -0.5103282, 0.48252111], + [0.14474003, -0.5282084, 0.48743951]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex_bad(self): + # Check it fails with a bad simplices + bad_simplices = [] + + simplex = np.zeros((3, 2)) + simplex[...] = self.startparams[:2] + for j in range(2): + simplex[j+1, j] += 0.1 + bad_simplices.append(simplex) + + simplex = np.zeros((3, 3)) + bad_simplices.append(simplex) + + for simplex in bad_simplices: + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': False, 'initial_simplex': simplex} + assert_raises(ValueError, + optimize.minimize, + self.func, + self.startparams, + args=(), + method='Nelder-mead', + options=opts) + else: + assert_raises(ValueError, optimize.fmin, + self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + def test_neldermead_x0_ub(self): + # checks whether minimisation occurs correctly for entries where + # x0 == ub + # gh19991 + def quad(x): + return np.sum(x**2) + + res = optimize.minimize( + quad, + [1], + bounds=[(0, 1.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0]) + + res = optimize.minimize( + quad, + [1, 2], + bounds=[(0, 1.), (1, 3.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0, 1]) + + def test_ncg_negative_maxiter(self): + # Regression test for gh-8241 + opts = {'maxiter': -1} + result = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts) + assert result.status == 1 + + def test_ncg_zero_xtol(self): + # Regression test for gh-20214 + def cosine(x): + return np.cos(x[0]) + + def jac(x): + return -np.sin(x[0]) + + x0 = [0.1] + xtol = 0 + result = optimize.minimize(cosine, + x0=x0, + jac=jac, + method="newton-cg", + options=dict(xtol=xtol)) + assert result.status == 0 + assert_almost_equal(result.x[0], np.pi) + + def test_ncg(self): + # line-search Newton conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c == 7, self.funccalls.c + assert self.gradcalls.c <= 22, self.gradcalls.c # 0.13.0 + # assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hess(self): + # Newton conjugate gradient with Hessian + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hess=self.hess, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess=self.hess, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c <= 7, self.funccalls.c # gh10673 + assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hessp(self): + # Newton conjugate gradient with Hessian times a vector p. + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hessp=self.hessp, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess_p=self.hessp, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c <= 7, self.funccalls.c # gh10673 + assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace.t[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_cobyqa(self): + # COBYQA method. + if self.use_wrapper: + res = optimize.minimize( + self.func, + self.startparams, + method='cobyqa', + options={'maxiter': self.maxiter, 'disp': self.disp}, + ) + assert_allclose(res.fun, self.func(self.solution), atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 1.14.0. Don't allow them to increase. The exact evaluation + # count is sensitive to numerical error and floating-point + # computations are not bit-for-bit reproducible across machines. It + # takes 45 calls on my machine, but we can add the same +20 margin + # as is used in `test_powell` + assert self.funccalls.c <= 45 + 20, self.funccalls.c + + +def test_maxfev_test(): + rng = np.random.default_rng(271707100830272976862395227613146332411) + + def cost(x): + return rng.random(1) * 1000 # never converged problem + + for imaxfev in [1, 10, 50]: + # "TNC" and "L-BFGS-B" also supports max function evaluation, but + # these may violate the limit because of evaluating gradients + # by numerical differentiation. See the discussion in PR #14805. + for method in ['Powell', 'Nelder-Mead']: + result = optimize.minimize(cost, rng.random(10), + method=method, + options={'maxfev': imaxfev}) + assert result["nfev"] == imaxfev + + +def test_wrap_scalar_function_with_validation(): + + def func_(x): + return x + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + for i in range(5): + func(np.asarray(i)) + assert fcalls[0] == i+1 + + msg = "Too many function calls" + with assert_raises(optimize._optimize._MaxFuncCallError, match=msg): + func(np.asarray(i)) # exceeded maximum function call + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + msg = "The user-provided objective function must return a scalar value." + with assert_raises(ValueError, match=msg): + func(np.array([1, 1])) + + +def test_obj_func_returns_scalar(): + match = ("The user-provided " + "objective function must " + "return a scalar value.") + with assert_raises(ValueError, match=match): + optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS') + + +def test_neldermead_iteration_num(): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + res = optimize._minimize._minimize_neldermead(optimize.rosen, x0, + xatol=1e-8) + assert res.nit <= 339 + + +def test_neldermead_respect_fp(): + # Nelder-Mead should respect the fp type of the input + function + x0 = np.array([5.0, 4.0]).astype(np.float32) + def rosen_(x): + assert x.dtype == np.float32 + return optimize.rosen(x) + + optimize.minimize(rosen_, x0, method='Nelder-Mead') + + +def test_neldermead_xatol_fatol(): + # gh4484 + # test we can call with fatol, xatol specified + def func(x): + return x[0] ** 2 + x[1] ** 2 + + optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2, + xatol=1e-3, fatol=1e-3) + + +def test_neldermead_adaptive(): + def func(x): + return np.sum(x ** 2) + p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, + 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652, + 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474] + + res = optimize.minimize(func, p0, method='Nelder-Mead') + assert_equal(res.success, False) + + res = optimize.minimize(func, p0, method='Nelder-Mead', + options={'adaptive': True}) + assert_equal(res.success, True) + + +@pytest.mark.thread_unsafe +def test_bounded_powell_outsidebounds(): + # With the bounded Powell method if you start outside the bounds the final + # should still be within the bounds (provided that the user doesn't make a + # bad choice for the `direc` argument). + def func(x): + return np.sum(x ** 2) + bounds = (-1, 1), (-1, 1), (-1, 1) + x0 = [-4, .5, -.8] + + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, bounds=bounds, method="Powell") + assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6) + assert_equal(res.success, True) + assert_equal(res.status, 0) + + # However, now if we change the `direc` argument such that the + # set of vectors does not span the parameter space, then we may + # not end up back within the bounds. Here we see that the first + # parameter cannot be updated! + direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, + bounds=bounds, method="Powell", + options={'direc': direc}) + assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6) + assert_equal(res.success, False) + assert_equal(res.status, 4) + + +@pytest.mark.thread_unsafe +def test_bounded_powell_vs_powell(): + # here we test an example where the bounded Powell method + # will return a different result than the standard Powell + # method. + + # first we test a simple example where the minimum is at + # the origin and the minimum that is within the bounds is + # larger than the minimum at the origin. + def func(x): + return np.sum(x ** 2) + bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2) + x0 = [-2.1, -5.2, 1.9, 0, -2] + + options = {'ftol': 1e-10, 'xtol': 1e-10} + + res_powell = optimize.minimize(func, x0, method="Powell", options=options) + assert_allclose(res_powell.x, 0., atol=1e-6) + assert_allclose(res_powell.fun, 0., atol=1e-6) + + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # now we test bounded Powell but with a mix of inf bounds. + bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2) + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # next we test an example where the global minimum is within + # the bounds, but the bounded Powell method performs better + # than the standard Powell method. + def func(x): + t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1]) + t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2])) + return t**2 + + bounds = [(-2, 5)] * 3 + x0 = [-0.5, -0.5, -0.5] + + res_powell = optimize.minimize(func, x0, method="Powell") + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6) + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + # next we test the previous example where the we provide Powell + # with (-inf, inf) bounds, and compare it to providing Powell + # with no bounds. They should end up the same. + bounds = [(-np.inf, np.inf)] * 3 + + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6) + assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6) + assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6) + + # now test when x0 starts outside of the bounds. + x0 = [45.46254415, -26.52351498, 31.74830248] + bounds = [(-2, 5)] * 3 + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + +def test_onesided_bounded_powell_stability(): + # When the Powell method is bounded on only one side, a + # np.tan transform is done in order to convert it into a + # completely bounded problem. Here we do some simple tests + # of one-sided bounded Powell where the optimal solutions + # are large to test the stability of the transformation. + kwargs = {'method': 'Powell', + 'bounds': [(-np.inf, 1e6)] * 3, + 'options': {'ftol': 1e-8, 'xtol': 1e-8}} + x0 = [1, 1, 1] + + # df/dx is constant. + def f(x): + return -np.sum(x) + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -3e6, atol=1e-4) + + # df/dx gets smaller and smaller. + def f(x): + return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** (0.1)) + + # df/dx gets larger and larger. + def f(x): + return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7) + + # df/dx gets larger for some of the variables and smaller for others. + def f(x): + t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1) + t *= (1 if np.all(x > 0) else -1) + return t + + kwargs['bounds'] = [(-np.inf, 1e3)] * 3 + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7) + + +class TestOptimizeWrapperDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = True + + +class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = False + + +class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = True + + +class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = False + + +class TestOptimizeSimple(CheckOptimize): + + def test_bfgs_nan(self): + # Test corner case where nan is fed to optimizer. See gh-2067. + def func(x): + return x + def fprime(x): + return np.ones_like(x) + x0 = [np.nan] + with np.errstate(over='ignore', invalid='ignore'): + x = optimize.fmin_bfgs(func, x0, fprime, disp=False) + assert np.isnan(func(x)) + + def test_bfgs_nan_return(self): + # Test corner cases where fun returns NaN. See gh-4793. + + # First case: NaN from first call. + def func(x): + return np.nan + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0) + + assert np.isnan(result['fun']) + assert result['success'] is False + + # Second case: NaN from second call. + def func(x): + return 0 if x == 0 else np.nan + def fprime(x): + return np.ones_like(x) # Steer away from zero. + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0, jac=fprime) + + assert np.isnan(result['fun']) + assert result['success'] is False + + def test_bfgs_numerical_jacobian(self): + # BFGS with numerical Jacobian and a vector epsilon parameter. + # define the epsilon parameter using a random vector + epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution)) + + params = optimize.fmin_bfgs(self.func, self.startparams, + epsilon=epsilon, args=(), + maxiter=self.maxiter, disp=False) + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_jac(self): + methods = ['BFGS', 'CG', 'TNC'] + jacs = ['2-point', '3-point', None] + for method, jac in itertools.product(methods, jacs): + result = optimize.minimize(self.func, self.startparams, + method=method, jac=jac) + assert_allclose(self.func(result.x), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_hess(self): + # test that all the methods that require hess can use finite-difference + # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is + # wrapped in a hessp function + # dogleg, trust-exact actually require true hessians at the moment, so + # they're excluded. + methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov'] + hesses = FD_METHODS + (optimize.BFGS,) + for method, hess in itertools.product(methods, hesses): + if hess is optimize.BFGS: + hess = hess() + result = optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=hess) + assert result.success + + # check that the methods demand some sort of Hessian specification + # Newton-CG creates its own hessp, and trust-constr doesn't need a hess + # specified either + methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact'] + for method in methods: + with pytest.raises(ValueError): + optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=None) + + def test_bfgs_gh_2169(self): + def f(x): + if x < 0: + return 1.79769313e+308 + else: + return x + 1./x + xs = optimize.fmin_bfgs(f, [10.], disp=False) + assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) + + def test_bfgs_double_evaluations(self): + # check BFGS does not evaluate twice in a row at same point + def f(x): + xp = x[0] + assert xp not in seen + seen.add(xp) + return 10*x**2, 20*x + + seen = set() + optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7) + + def test_l_bfgs_b(self): + # limited-memory bound-constrained BFGS algorithm + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + self.grad, args=(), + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls.c == 7, self.funccalls.c + assert self.gradcalls.c == 5, self.gradcalls.c + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + # test fixed in gh10673 + assert_allclose(self.trace.t[3:5], + [[8.117083e-16, -5.196198e-01, 4.897617e-01], + [0., -0.52489628, 0.48753042]], + atol=1e-14, rtol=1e-7) + + def test_l_bfgs_b_numjac(self): + # L-BFGS-B with numerical Jacobian + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + approx_grad=True, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with combined objective function and Jacobian + def fun(x): + return self.func(x), self.grad(x) + + retval = optimize.fmin_l_bfgs_b(fun, self.startparams, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_maxiter(self): + # gh7854 + # Ensure that not more than maxiters are ever run. + class Callback: + def __init__(self): + self.nit = 0 + self.fun = None + self.x = None + + def __call__(self, x): + self.x = x + self.fun = optimize.rosen(x) + self.nit += 1 + + c = Callback() + res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b', + callback=c, options={'maxiter': 5}) + + assert_equal(res.nit, 5) + assert_almost_equal(res.x, c.x) + assert_almost_equal(res.fun, c.fun) + assert_equal(res.status, 1) + assert res.success is False + assert_equal(res.message, + 'STOP: TOTAL NO. OF ITERATIONS REACHED LIMIT') + + def test_minimize_l_bfgs_b(self): + # Minimize with L-BFGS-B method + opts = {'disp': False, 'maxiter': self.maxiter} + r = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + assert_allclose(self.func(r.x), self.func(self.solution), + atol=1e-6) + assert self.gradcalls.c == r.njev + + self.funccalls.c = self.gradcalls.c = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', options=opts) + # check that function evaluations in approximate jacobian are counted + # assert_(ra.nfev > r.nfev) + assert self.funccalls.c == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + self.funccalls.c = self.gradcalls.c = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, jac='3-point', + method='L-BFGS-B', options=opts) + assert self.funccalls.c == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + def test_minimize_l_bfgs_b_ftol(self): + # Check that the `ftol` parameter in l_bfgs_b works as expected + v0 = None + for tol in [1e-1, 1e-4, 1e-7, 1e-10]: + opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol} + sol = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + v = self.func(sol.x) + + if v0 is None: + v0 = v + else: + assert v < v0 + + assert_allclose(v, self.func(self.solution), rtol=tol) + + def test_minimize_l_bfgs_maxls(self): + # check that the maxls is passed down to the Fortran routine + sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]), + method='L-BFGS-B', jac=optimize.rosen_der, + options={'disp': False, 'maxls': 1}) + assert not sol.success + + def test_minimize_l_bfgs_b_maxfun_interruption(self): + # gh-6162 + f = optimize.rosen + g = optimize.rosen_der + values = [] + x0 = np.full(7, 1000) + + def objfun(x): + value = f(x) + values.append(value) + return value + + # Look for an interesting test case. + # Request a maxfun that stops at a particularly bad function + # evaluation somewhere between 100 and 300 evaluations. + low, medium, high = 30, 100, 300 + optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high) + v, k = max((y, i) for i, y in enumerate(values[medium:])) + maxfun = medium + k + # If the minimization strategy is reasonable, + # the minimize() result should not be worse than the best + # of the first 30 function evaluations. + target = min(values[:low]) + xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun) + assert_array_less(fmin, target) + + def test_custom(self): + # This function comes from the documentation example. + def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = x0 + besty = fun(x0) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for dim in range(np.size(x0)): + for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]: + testx = np.copy(bestx) + testx[dim] = s + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + x0 = [1.35, 0.9, 0.8, 1.1, 1.2] + res = optimize.minimize(optimize.rosen, x0, method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) + + def test_gh10771(self): + # check that minimize passes bounds and constraints to a custom + # minimizer without altering them. + bounds = [(-2, 2), (0, 3)] + constraints = 'constraints' + + def custmin(fun, x0, **options): + assert options['bounds'] is bounds + assert options['constraints'] is constraints + return optimize.OptimizeResult() + + x0 = [1, 1] + optimize.minimize(optimize.rosen, x0, method=custmin, + bounds=bounds, constraints=constraints) + + def test_minimize_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return x**2*y**2 + x**4 + 1 + + def dfunc(z): + x, y = z + return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) + + for method in ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'cobyqa', 'slsqp']: + if method in ('nelder-mead', 'powell', 'cobyla', 'cobyqa'): + jac = None + else: + jac = dfunc + + sol1 = optimize.minimize(func, [2, 2], jac=jac, tol=1e-10, + method=method) + sol2 = optimize.minimize(func, [2, 2], jac=jac, tol=1.0, + method=method) + assert func(sol1.x) < func(sol2.x), \ + f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" + + @pytest.mark.fail_slow(10) + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547 + @pytest.mark.parametrize('method', + ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs', + 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc', + 'fmin_slsqp'] + MINIMIZE_METHODS) + def test_minimize_callback_copies_array(self, method): + # Check that arrays passed to callbacks are not modified + # inplace by the optimizer afterward + + if method in ('fmin_tnc', 'fmin_l_bfgs_b'): + def func(x): + return optimize.rosen(x), optimize.rosen_der(x) + else: + func = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + x0 = np.zeros(10) + + # Set options + kwargs = {} + if method.startswith('fmin'): + routine = getattr(optimize, method) + if method == 'fmin_slsqp': + kwargs['iter'] = 5 + elif method == 'fmin_tnc': + kwargs['maxfun'] = 100 + elif method in ('fmin', 'fmin_powell'): + kwargs['maxiter'] = 3500 + else: + kwargs['maxiter'] = 5 + else: + def routine(*a, **kw): + kw['method'] = method + return optimize.minimize(*a, **kw) + + if method == 'tnc': + kwargs['options'] = dict(maxfun=100) + else: + kwargs['options'] = dict(maxiter=5) + + if method in ('fmin_ncg',): + kwargs['fprime'] = jac + elif method in ('newton-cg',): + kwargs['jac'] = jac + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = jac + kwargs['hess'] = hess + + # Run with callback + results = [] + + def callback(x, *args, **kwargs): + assert not isinstance(x, optimize.OptimizeResult) + results.append((x, np.copy(x))) + + routine(func, x0, callback=callback, **kwargs) + + # Check returned arrays coincide with their copies + # and have no memory overlap + assert len(results) > 2 + assert all(np.all(x == y) for x, y in results) + combinations = itertools.combinations(results, 2) + assert not any(np.may_share_memory(x[0], y[0]) for x, y in combinations) + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', + 'bfgs', 'newton-cg', 'l-bfgs-b', + 'tnc', 'cobyla', 'cobyqa', 'slsqp']) + def test_no_increase(self, method): + # Check that the solver doesn't return a value worse than the + # initial point. + + def func(x): + return (x - 1)**2 + + def bad_grad(x): + # purposefully invalid gradient function, simulates a case + # where line searches start failing + return 2*(x - 1) * (-1) - 2 + + x0 = np.array([2.0]) + f0 = func(x0) + jac = bad_grad + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + if method in ['nelder-mead', 'powell', 'cobyla', 'cobyqa']: + jac = None + sol = optimize.minimize(func, x0, jac=jac, method=method, + options=options) + assert_equal(func(sol.x), sol.fun) + + if method == 'slsqp': + pytest.xfail("SLSQP returns slightly worse") + assert func(sol.x) <= f0 + + def test_slsqp_respect_bounds(self): + # Regression test for gh-3108 + def f(x): + return sum((x - np.array([1., 2., 3., 4.]))**2) + + def cons(x): + a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]]) + return np.concatenate([np.dot(a, x) + np.array([5, 10]), x]) + + x0 = np.array([0.5, 1., 1.5, 2.]) + res = optimize.minimize(f, x0, method='slsqp', + constraints={'type': 'ineq', 'fun': cons}) + assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) + + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS', + 'Newton-CG', 'L-BFGS-B', 'SLSQP', + 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov', + 'cobyqa']) + def test_respect_maxiter(self, method): + # Check that the number of iterations equals max_iter, assuming + # convergence doesn't establish before + MAXITER = 4 + + x0 = np.zeros(10) + + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + + # Set options + kwargs = {'method': method, 'options': dict(maxiter=MAXITER)} + + if method in ('Newton-CG',): + kwargs['jac'] = sf.grad + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = sf.grad + kwargs['hess'] = sf.hess + + sol = optimize.minimize(sf.fun, x0, **kwargs) + assert sol.nit == MAXITER + assert sol.nfev >= sf.nfev + if hasattr(sol, 'njev'): + assert sol.njev >= sf.ngev + + # method specific tests + if method == 'SLSQP': + assert sol.status == 9 # Iteration limit reached + elif method == 'cobyqa': + assert sol.status == 6 # Iteration limit reached + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', + 'fmin', 'fmin_powell']) + def test_runtime_warning(self, method): + x0 = np.zeros(10) + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + options = {"maxiter": 1, "disp": True} + with pytest.warns(RuntimeWarning, + match=r'Maximum number of iterations'): + if method.startswith('fmin'): + routine = getattr(optimize, method) + routine(sf.fun, x0, **options) + else: + optimize.minimize(sf.fun, x0, method=method, options=options) + + def test_respect_maxiter_trust_constr_ineq_constraints(self): + # special case of minimization with trust-constr and inequality + # constraints to check maxiter limit is obeyed when using internal + # method 'tr_interior_point' + MAXITER = 4 + f = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + def fun(x): + return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + cons = ({'type': 'ineq', + 'fun': fun},) + + x0 = np.zeros(10) + sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess, + method='trust-constr', + options=dict(maxiter=MAXITER)) + assert sol.nit == MAXITER + + def test_minimize_automethod(self): + def f(x): + return x**2 + + def cons(x): + return x - 2 + + x0 = np.array([10.]) + sol_0 = optimize.minimize(f, x0) + sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', + 'fun': cons}]) + sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)]) + sol_3 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(5, 10)]) + sol_4 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(1, 10)]) + for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]: + assert sol.success + assert_allclose(sol_0.x, 0, atol=1e-7) + assert_allclose(sol_1.x, 2, atol=1e-7) + assert_allclose(sol_2.x, 5, atol=1e-7) + assert_allclose(sol_3.x, 5, atol=1e-7) + assert_allclose(sol_4.x, 2, atol=1e-7) + + def test_minimize_coerce_args_param(self): + # Regression test for gh-3503 + def Y(x, c): + return np.sum((x-c)**2) + + def dY_dx(x, c=None): + return 2*(x-c) + + c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) + xinit = np.random.randn(len(c)) + optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS") + + def test_initial_step_scaling(self): + # Check that optimizer initial step is not huge even if the + # function and gradients are + + scales = [1e-50, 1, 1e50] + methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG'] + + def f(x): + if first_step_size[0] is None and x[0] != x0[0]: + first_step_size[0] = abs(x[0] - x0[0]) + if abs(x).max() > 1e4: + raise AssertionError("Optimization stepped far away!") + return scale*(x[0] - 1)**2 + + def g(x): + return np.array([scale*(x[0] - 1)]) + + for scale, method in itertools.product(scales, methods): + if method in ('CG', 'BFGS'): + options = dict(gtol=scale*1e-8) + else: + options = dict() + + if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'): + # XXX: return initial point if they see small gradient + continue + + x0 = [-1.0] + first_step_size = [None] + res = optimize.minimize(f, x0, jac=g, method=method, + options=options) + + err_msg = f"{method} {scale}: {first_step_size}: {res}" + + assert res.success, err_msg + assert_allclose(res.x, [1.0], err_msg=err_msg) + assert res.nit <= 3, err_msg + + if scale > 1e-10: + if method in ('CG', 'BFGS'): + assert_allclose(first_step_size[0], 1.01, err_msg=err_msg) + else: + # Newton-CG and L-BFGS-B use different logic for the first + # step, but are both scaling invariant with step sizes ~ 1 + assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg + else: + # step size has upper bound of ||grad||, so line + # search makes many small steps + pass + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'cobyqa', 'slsqp', + 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov']) + def test_nan_values(self, method, num_parallel_threads): + if num_parallel_threads > 1 and method == 'cobyqa': + pytest.skip('COBYQA does not support concurrent execution') + + # Check nan values result to failed exit status + rng = np.random.RandomState(1234) + + count = [0] + + def func(x): + return np.nan + + def func2(x): + count[0] += 1 + if count[0] > 2: + return np.nan + else: + return rng.rand() + + def grad(x): + return np.array([1.0]) + + def hess(x): + return np.array([[1.0]]) + + x0 = np.array([1.0]) + + needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg') + needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg') + + funcs = [func, func2] + grads = [grad] if needs_grad else [grad, None] + hesss = [hess] if needs_hess else [hess, None] + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + for f, g, h in itertools.product(funcs, grads, hesss): + count = [0] + sol = optimize.minimize(f, x0, jac=g, hess=h, method=method, + options=options) + assert_equal(sol.success, False) + + @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs', + 'l-bfgs-b', 'tnc', + 'cobyla', 'cobyqa', 'slsqp', + 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov']) + def test_duplicate_evaluations(self, method): + # check that there are no duplicate evaluations for any methods + jac = hess = None + if method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg'): + jac = self.grad + if method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg'): + hess = self.hess + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + # for trust-constr + sup.filter(UserWarning, "delta_grad == 0.*") + optimize.minimize(self.func, self.startparams, + method=method, jac=jac, hess=hess) + + for i in range(1, len(self.trace.t)): + if np.array_equal(self.trace.t[i - 1], self.trace.t[i]): + raise RuntimeError( + f"Duplicate evaluations made by {method}") + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB) + @pytest.mark.parametrize('new_cb_interface', [0, 1, 2]) + def test_callback_stopiteration(self, method, new_cb_interface): + # Check that if callback raises StopIteration, optimization + # terminates with the same result as if iterations were limited + + def f(x): + f.flag = False # check that f isn't called after StopIteration + return optimize.rosen(x) + f.flag = False + + def g(x): + f.flag = False + return optimize.rosen_der(x) + + def h(x): + f.flag = False + return optimize.rosen_hess(x) + + maxiter = 5 + + if new_cb_interface == 1: + def callback_interface(*, intermediate_result): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + elif new_cb_interface == 2: + class Callback: + def __call__(self, intermediate_result: OptimizeResult): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + callback_interface = Callback() + else: + def callback_interface(xk, *args): # type: ignore[misc] + callback() + + def callback(): + callback.i += 1 + callback.flag = False + if callback.i == maxiter: + callback.flag = True + raise StopIteration() + callback.i = 0 + callback.flag = False + + kwargs = {'x0': [1.1]*5, 'method': method, + 'fun': f, 'jac': g, 'hess': h} + + res = optimize.minimize(**kwargs, callback=callback_interface) + if method == 'nelder-mead': + maxiter = maxiter + 1 # nelder-mead counts differently + if method == 'cobyqa': + ref = optimize.minimize(**kwargs, options={'maxfev': maxiter}) + assert res.nfev == ref.nfev == maxiter + else: + ref = optimize.minimize(**kwargs, options={'maxiter': maxiter}) + assert res.nit == ref.nit == maxiter + assert res.fun == ref.fun + assert_equal(res.x, ref.x) + assert res.status == (3 if method in [ + 'trust-constr', + 'cobyqa', + ] else 99) + + def test_ndim_error(self): + msg = "'x0' must only have one dimension." + with assert_raises(ValueError, match=msg): + optimize.minimize(lambda x: x, np.ones((2, 1))) + + @pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc', + 'powell', 'cobyla', 'cobyqa', + 'trust-constr')) + def test_minimize_invalid_bounds(self, method): + def f(x): + return np.sum(x**2) + + bounds = Bounds([1, 2], [3, 4]) + msg = 'The number of bounds is not compatible with the length of `x0`.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + bounds = Bounds([1, 6, 1], [3, 4, 2]) + msg = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell']) + def test_minimize_warnings_gh1953(self, method): + # test that minimize methods produce warnings rather than just using + # `print`; see gh-1953. + kwargs = {} if method=='powell' else {'jac': optimize.rosen_der} + warning_type = (RuntimeWarning if method=='powell' + else optimize.OptimizeWarning) + + options = {'disp': True, 'maxiter': 10} + with pytest.warns(warning_type, match='Maximum number'): + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + options['disp'] = False + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + +@pytest.mark.parametrize( + 'method', + ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead', 'cobyqa'] +) +def test_minimize_with_scalar(method): + # checks that minimize works with a scalar being provided to it. + def f(x): + return np.sum(x ** 2) + + res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method) + assert res.success + assert_allclose(res.x, [0.0], atol=1e-5) + + +class TestLBFGSBBounds: + def setup_method(self): + self.bounds = ((1, None), (None, None)) + self.solution = (1, 0) + + def fun(self, x, p=2.0): + return 1.0 / p * (x[0]**p + x[1]**p) + + def jac(self, x, p=2.0): + return x**(p - 1) + + def fj(self, x, p=2.0): + return self.fun(x, p), self.jac(x, p) + + def test_l_bfgs_b_bounds(self): + x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1], + fprime=self.jac, + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with fun and jac combined and extra arguments + x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ), + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_l_bfgs_b_bounds(self): + # Minimize with method='L-BFGS-B' with bounds + res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=self.bounds) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + @pytest.mark.parametrize('bounds', [ + ([(10, 1), (1, 10)]), + ([(1, 10), (10, 1)]), + ([(10, 1), (10, 1)]) + ]) + def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds): + with pytest.raises(ValueError, match='.*bound.*'): + optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=bounds) + + def test_minimize_l_bfgs_b_bounds_FD(self): + # test that initial starting value outside bounds doesn't raise + # an error (done with clipping). + # test all different finite differences combos, with and without args + + jacs = ['2-point', '3-point', None] + argss = [(2.,), ()] + for jac, args in itertools.product(jacs, argss): + res = optimize.minimize(self.fun, [0, -1], args=args, + method='L-BFGS-B', + jac=jac, bounds=self.bounds, + options={'finite_diff_rel_step': None}) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + +class TestOptimizeScalar: + def setup_method(self): + self.solution = 1.5 + + def fun(self, x, a=1.5): + """Objective function""" + return (x - a)**2 - 0.8 + + def test_brent(self): + x = optimize.brent(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(0, -1, 1)) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_golden(self): + x = optimize.golden(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, tol=0) + assert_allclose(x, self.solution) + + maxiter_test_cases = [0, 1, 5] + for maxiter in maxiter_test_cases: + x0 = optimize.golden(self.fun, maxiter=0, full_output=True) + x = optimize.golden(self.fun, maxiter=maxiter, full_output=True) + nfev0, nfev = x0[2], x[2] + assert_equal(nfev - nfev0, maxiter) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(0, -1, 1)) + + def test_fminbound(self): + x = optimize.fminbound(self.fun, 0, 1) + assert_allclose(x, 1, atol=1e-4) + + x = optimize.fminbound(self.fun, 1, 5) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.fminbound(self.fun, np.array([1]), np.array([5])) + assert_allclose(x, self.solution, atol=1e-6) + assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) + + def test_fminbound_scalar(self): + with pytest.raises(ValueError, match='.*must be finite scalars.*'): + optimize.fminbound(self.fun, np.zeros((1, 2)), 1) + + x = optimize.fminbound(self.fun, 1, np.array(5)) + assert_allclose(x, self.solution, atol=1e-6) + + def test_gh11207(self): + def fun(x): + return x**2 + optimize.fminbound(fun, 0, 0) + + def test_minimize_scalar(self): + # combine all tests above for the minimize_scalar wrapper + x = optimize.minimize_scalar(self.fun).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent') + assert x.success + + x = optimize.minimize_scalar(self.fun, method='Brent', + options=dict(maxiter=3)) + assert not x.success + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='golden', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,), + method='Bounded').x + assert_allclose(x, 1, atol=1e-4) + + x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]), + np.array([5])), + args=(np.array([1.5]), ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(5, 1), method='bounded', args=(1.5, )) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(np.zeros(2), 1), method='bounded', args=(1.5, )) + + x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_scalar_custom(self): + # This function comes from the documentation example. + def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = (bracket[1] + bracket[0]) / 2.0 + besty = fun(bestx) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for testx in [bestx - stepsize, bestx + stepsize]: + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + res = optimize.minimize_scalar(self.fun, bracket=(0, 4), + method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, self.solution, atol=1e-6) + + def test_minimize_scalar_coerce_args_param(self): + # Regression test for gh-3503 + optimize.minimize_scalar(self.fun, args=1.5) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_disp(self, method): + # test that all minimize_scalar methods accept a disp option. + for disp in [0, 1, 2, 3]: + optimize.minimize_scalar(self.fun, options={"disp": disp}) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_result_attributes(self, method): + kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {} + result = optimize.minimize_scalar(self.fun, method=method, **kwargs) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_nan_values(self, method): + # Check nan values result to failed exit status + np.random.seed(1234) + + count = [0] + + def func(x): + count[0] += 1 + if count[0] > 4: + return np.nan + else: + return x**2 + 0.1 * np.sin(x) + + bracket = (-1, 0, 1) + bounds = (-1, 1) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + count = [0] + + kwargs = {"bounds": bounds} if method == 'bounded' else {} + sol = optimize.minimize_scalar(func, bracket=bracket, + **kwargs, method=method, + options=dict(maxiter=20)) + assert_equal(sol.success, False) + + def test_minimize_scalar_defaults_gh10911(self): + # Previously, bounds were silently ignored unless `method='bounds'` + # was chosen. See gh-10911. Check that this is no longer the case. + def f(x): + return x**2 + + res = optimize.minimize_scalar(f) + assert_allclose(res.x, 0, atol=1e-8) + + res = optimize.minimize_scalar(f, bounds=(1, 100), + options={'xatol': 1e-10}) + assert_allclose(res.x, 1) + + def test_minimize_non_finite_bounds_gh10911(self): + # Previously, minimize_scalar misbehaved with infinite bounds. + # See gh-10911. Check that it now raises an error, instead. + msg = "Optimization bounds must be finite scalars." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(1, np.inf)) + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(np.nan, 1)) + + @pytest.mark.parametrize("method", ['brent', 'golden']) + def test_minimize_unbounded_method_with_bounds_gh10911(self, method): + # Previously, `bounds` were silently ignored when `method='brent'` or + # `method='golden'`. See gh-10911. Check that error is now raised. + msg = "Use of `bounds` is incompatible with..." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2)) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize("method", MINIMIZE_SCALAR_METHODS) + @pytest.mark.parametrize("tol", [1, 1e-6]) + @pytest.mark.parametrize("fshape", [(), (1,), (1, 1)]) + def test_minimize_scalar_dimensionality_gh16196(self, method, tol, fshape): + # gh-16196 reported that the output shape of `minimize_scalar` was not + # consistent when an objective function returned an array. Check that + # `res.fun` and `res.x` are now consistent. + def f(x): + return np.array(x**4).reshape(fshape) + + a, b = -0.1, 0.2 + kwargs = (dict(bracket=(a, b)) if method != "bounded" + else dict(bounds=(a, b))) + kwargs.update(dict(method=method, tol=tol)) + + res = optimize.minimize_scalar(f, **kwargs) + assert res.x.shape == res.fun.shape == f(res.x).shape == fshape + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize('method', ['bounded', 'brent', 'golden']) + def test_minimize_scalar_warnings_gh1953(self, method): + # test that minimize_scalar methods produce warnings rather than just + # using `print`; see gh-1953. + def f(x): + return (x - 1)**2 + + kwargs = {} + kwd = 'bounds' if method == 'bounded' else 'bracket' + kwargs[kwd] = [-2, 10] + + options = {'disp': True, 'maxiter': 3} + with pytest.warns(optimize.OptimizeWarning, match='Maximum number'): + optimize.minimize_scalar(f, method=method, options=options, + **kwargs) + + options['disp'] = False + optimize.minimize_scalar(f, method=method, options=options, **kwargs) + + +class TestBracket: + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_errors_and_status_false(self): + # Check that `bracket` raises the errors it is supposed to + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, 1) + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, np.inf) + with pytest.raises(RuntimeError, match=message): + optimize.brent(f, brack=(-1, 1)) + with pytest.raises(RuntimeError, match=message): + optimize.golden(f, brack=(-1, 1)) + + def f(x): # gh-5899 + return -5 * x**5 + 4 * x**4 - 12 * x**3 + 11 * x**2 - 2 * x + 1 + + message = "No valid bracket was found before the iteration limit..." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -0.5, 0.5, maxiter=10) + + @pytest.mark.parametrize('method', ('brent', 'golden')) + def test_minimize_scalar_success_false(self, method): + # Check that status information from `bracket` gets to minimize_scalar + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + + res = optimize.minimize_scalar(f, bracket=(-1, 1), method=method) + assert not res.success + assert message in res.message + assert res.nfev == 3 + assert res.nit == 0 + assert res.fun == 100 + + +def test_brent_negative_tolerance(): + assert_raises(ValueError, optimize.brent, np.cos, tol=-.01) + + +class TestNewtonCg: + def test_rosenbrock(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.rosen_hess, + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_himmelblau(self): + x0 = np.array(himmelblau_x0) + sol = optimize.minimize(himmelblau, + x0, + jac=himmelblau_grad, + hess=himmelblau_hess, + method='Newton-CG', + tol=1e-6) + assert sol.success, sol.message + assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4) + assert_allclose(sol.fun, himmelblau_min, atol=1e-4) + + def test_finite_difference(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess='2-point', + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_hessian_update_strategy(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.BFGS(), + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + +def test_line_for_search(): + # _line_for_search is only used in _linesearch_powell, which is also + # tested below. Thus there are more tests of _line_for_search in the + # test_linesearch_powell_bounded function. + + line_for_search = optimize._optimize._line_for_search + # args are x0, alpha, lower_bound, upper_bound + # returns lmin, lmax + + lower_bound = np.array([-5.3, -1, -1.5, -3]) + upper_bound = np.array([1.9, 1, 2.8, 3]) + + # test when starting in the bounds + x0 = np.array([0., 0, 0, 0]) + # and when starting outside of the bounds + x1 = np.array([0., 2, -3, 0]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -1.5, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, 3), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, 1.5), + (x0, np.array([2., 0, -1, 2]), -1.5, 0.95), + (x1, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), 1.5, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, 3), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.3, -1.5), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + # now with infinite bounds + lower_bound = np.array([-np.inf, -1, -np.inf, -3]) + upper_bound = np.array([np.inf, 1, 2.8, np.inf]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, np.inf), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, np.inf), + (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, np.inf), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.8, np.inf), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + +def test_linesearch_powell(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p + direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + lower_bound = np.array([-np.inf] * 4) + upper_bound = np.array([np.inf] * 4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, + fval=fval, tol=1e-5) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_linesearch_powell_bounded(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p+direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # first choose bounds such that the same tests from + # test_linesearch_powell should pass. + lower_bound = np.array([-2.]*4) + upper_bound = np.array([2.]*4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose bounds such that unbounded vs bounded gives different results + lower_bound = np.array([-.3]*3 + [-1]) + upper_bound = np.array([.45]*3 + [.9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .45), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now mix in inf + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # now choose bounds that mix inf + lower_bound = np.array([-.3, -np.inf, -np.inf, -1]) + upper_bound = np.array([np.inf, .45, np.inf, .9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_powell_limits(): + # gh15342 - powell was going outside bounds for some function evaluations. + bounds = optimize.Bounds([0, 0], [0.6, 20]) + + def fun(x): + a, b = x + assert (x >= bounds.lb).all() and (x <= bounds.ub).all() + return a ** 2 + b ** 2 + + optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds) + + # Another test from the original report - gh-13411 + bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,]) + + def func(x): + assert x >= 0 and x <= 1 + return np.exp(x) + + optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds) + + +def test_powell_output(): + funs = [rosen, lambda x: np.array(rosen(x)), lambda x: np.array([rosen(x)])] + for fun in funs: + res = optimize.minimize(fun, x0=[0.6, 20], method='Powell') + assert np.isscalar(res.fun) + + +@array_api_compatible +class TestRosen: + def test_rosen(self, xp): + # integer input should be promoted to the default floating type + x = xp.asarray([1, 1, 1]) + xp_assert_equal(optimize.rosen(x), + xp.asarray(0.)) + + @skip_xp_backends('jax.numpy', + reasons=["JAX arrays do not support item assignment"]) + @pytest.mark.usefixtures("skip_xp_backends") + def test_rosen_der(self, xp): + x = xp.asarray([1, 1, 1, 1]) + xp_assert_equal(optimize.rosen_der(x), + xp.zeros_like(x, dtype=xp.asarray(1.).dtype)) + + @skip_xp_backends('jax.numpy', + reasons=["JAX arrays do not support item assignment"]) + @pytest.mark.usefixtures("skip_xp_backends") + def test_hess_prod(self, xp): + one = xp.asarray(1.) + xp_test = array_namespace(one) + # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775. + x = xp.asarray([3, 4, 5]) + p = xp.asarray([2, 2, 2]) + hp = optimize.rosen_hess_prod(x, p) + p = xp_test.astype(p, one.dtype) + dothp = optimize.rosen_hess(x) @ p + xp_assert_equal(hp, dothp) + + +def himmelblau(p): + """ + R^2 -> R^1 test function for optimization. The function has four local + minima where himmelblau(xopt) == 0. + """ + x, y = p + a = x*x + y - 11 + b = x + y*y - 7 + return a*a + b*b + + +def himmelblau_grad(p): + x, y = p + return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14, + 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22]) + + +def himmelblau_hess(p): + x, y = p + return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y], + [4*x + 4*y, 4*x + 12*y**2 - 26]]) + + +himmelblau_x0 = [-0.27, -0.9] +himmelblau_xopt = [3, 2] +himmelblau_min = 0.0 + + +def test_minimize_multiple_constraints(): + # Regression test for gh-4240. + def func(x): + return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + + def func1(x): + return np.array([x[1]]) + + def func2(x): + return np.array([x[2]]) + + cons = ({'type': 'ineq', 'fun': func}, + {'type': 'ineq', 'fun': func1}, + {'type': 'ineq', 'fun': func2}) + + def f(x): + return -1 * (x[0] + x[1] + x[2]) + + res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) + assert_allclose(res.x, [125, 0, 0], atol=1e-10) + + +class TestOptimizeResultAttributes: + # Test that all minimizers return an OptimizeResult containing + # all the OptimizeResult attributes + def setup_method(self): + self.x0 = [5, 5] + self.func = optimize.rosen + self.jac = optimize.rosen_der + self.hess = optimize.rosen_hess + self.hessp = optimize.rosen_hess_prod + self.bounds = [(0., 10.), (0., 10.)] + + @pytest.mark.fail_slow(2) + def test_attributes_present(self): + attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun', + 'message'] + skip = {'cobyla': ['nit']} + for method in MINIMIZE_METHODS: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + ("Method .+ does not use (gradient|Hessian.*)" + " information")) + res = optimize.minimize(self.func, self.x0, method=method, + jac=self.jac, hess=self.hess, + hessp=self.hessp) + for attribute in attributes: + if method in skip and attribute in skip[method]: + continue + + assert hasattr(res, attribute) + assert attribute in dir(res) + + # gh13001, OptimizeResult.message should be a str + assert isinstance(res.message, str) + + +def f1(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + +def f2(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + +def f3(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + +def brute_func(z, *params): + return f1(z, *params) + f2(z, *params) + f3(z, *params) + + +class TestBrute: + # Test the "brute force" method + def setup_method(self): + self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + self.solution = np.array([-1.05665192, 1.80834843]) + + def brute_func(self, z, *params): + # an instance method optimizing + return brute_func(z, *params) + + def test_brute(self): + # test fmin + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=optimize.fmin) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test minimize + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test that brute can optimize an instance method (the other tests use + # a non-class based function + resbrute = optimize.brute(self.brute_func, self.rranges, + args=self.params, full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + + def test_1D(self): + # test that for a 1-D problem the test function is passed an array, + # not a scalar. + def f(x): + assert len(x.shape) == 1 + assert x.shape[0] == 1 + return x ** 2 + + optimize.brute(f, [(-1, 1)], Ns=3, finish=None) + + @pytest.mark.fail_slow(10) + def test_workers(self): + # check that parallel evaluation works + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None) + + resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None, workers=2) + + assert_allclose(resbrute1[-1], resbrute[-1]) + assert_allclose(resbrute1[0], resbrute[0]) + + @pytest.mark.thread_unsafe + def test_runtime_warning(self, capsys): + rng = np.random.default_rng(1234) + + def func(z, *params): + return rng.random(1) * 1000 # never converged problem + + msg = "final optimization did not succeed.*|Maximum number of function eval.*" + with pytest.warns(RuntimeWarning, match=msg): + optimize.brute(func, self.rranges, args=self.params, disp=True) + + def test_coerce_args_param(self): + # optimize.brute should coerce non-iterable args to a tuple. + def f(x, *args): + return x ** args[0] + + resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2) + assert_allclose(resbrute, 0) + + +@pytest.mark.thread_unsafe +@pytest.mark.fail_slow(20) +def test_cobyla_threadsafe(): + + # Verify that cobyla is threadsafe. Will segfault if it is not. + + import concurrent.futures + import time + + def objective1(x): + time.sleep(0.1) + return x[0]**2 + + def objective2(x): + time.sleep(0.1) + return (x[0]-1)**2 + + min_method = "COBYLA" + + def minimizer1(): + return optimize.minimize(objective1, + [0.0], + method=min_method) + + def minimizer2(): + return optimize.minimize(objective2, + [0.0], + method=min_method) + + with concurrent.futures.ThreadPoolExecutor() as pool: + tasks = [] + tasks.append(pool.submit(minimizer1)) + tasks.append(pool.submit(minimizer2)) + for t in tasks: + t.result() + + +class TestIterationLimits: + # Tests that optimisation does not give up before trying requested + # number of iterations or evaluations. And that it does not succeed + # by exceeding the limits. + def setup_method(self): + self.funcalls = threading.local() + + def slow_func(self, v): + if not hasattr(self.funcalls, 'c'): + self.funcalls.c = 0 + self.funcalls.c += 1 + r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1]) + return np.sin(r*20 + t)+r*0.5 + + @pytest.mark.fail_slow(10) + def test_neldermead_limit(self): + self.check_limits("Nelder-Mead", 200) + + def test_powell_limit(self): + self.check_limits("powell", 1000) + + def check_limits(self, method, default_iters): + for start_v in [[0.1, 0.1], [1, 1], [2, 2]]: + for mfev in [50, 500, 5000]: + self.funcalls.c = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxfev": mfev}) + assert self.funcalls.c == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev + else: + assert res["nfev"] >= mfev + for mit in [50, 500, 5000]: + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit}) + if res["success"]: + assert res["nit"] <= mit + else: + assert res["nit"] >= mit + for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]: + self.funcalls.c = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls.c == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev and res["nit"] <= mit + else: + assert res["nfev"] >= mfev or res["nit"] >= mit + for mfev, mit in [[np.inf, None], [None, np.inf]]: + self.funcalls.c = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls.c == res["nfev"] + if res["success"]: + if mfev is None: + assert res["nfev"] < default_iters*2 + else: + assert res["nit"] <= default_iters*2 + else: + assert (res["nfev"] >= default_iters*2 + or res["nit"] >= default_iters*2) + + +def test_result_x_shape_when_len_x_is_one(): + def fun(x): + return x * x + + def jac(x): + return 2. * x + + def hess(x): + return np.array([[2.]]) + + methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', + 'COBYLA', 'COBYQA', 'SLSQP'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method) + assert res.x.shape == (1,) + + # use jac + hess + methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'Newton-CG'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac, + hess=hess) + assert res.x.shape == (1,) + + +class FunctionWithGradient: + def __init__(self): + self.number_of_calls = threading.local() + + def __call__(self, x): + if not hasattr(self.number_of_calls, 'c'): + self.number_of_calls.c = 0 + self.number_of_calls.c += 1 + return np.sum(x**2), 2 * x + + +@pytest.fixture +def function_with_gradient(): + return FunctionWithGradient() + + +def test_memoize_jac_function_before_gradient(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls.c == 1 + + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls.c == 1, \ + "function is not recomputed " \ + "if gradient is requested after function value" + + assert_allclose( + memoized_function(2 * x0), 20.0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls.c == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_gradient_before_function(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls.c == 1 + + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls.c == 1, \ + "function is not recomputed " \ + "if function value is requested after gradient" + + assert_allclose( + memoized_function.derivative(2 * x0), 4 * x0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls.c == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_with_bfgs(function_with_gradient): + """ Tests that using MemoizedJac in combination with ScalarFunction + and BFGS does not lead to repeated function evaluations. + Tests changes made in response to GH11868. + """ + memoized_function = MemoizeJac(function_with_gradient) + jac = memoized_function.derivative + hess = optimize.BFGS() + + x0 = np.array([1.0, 0.5]) + scalar_function = ScalarFunction( + memoized_function, x0, (), jac, hess, None, None) + assert function_with_gradient.number_of_calls.c == 1 + + scalar_function.fun(x0 + 0.1) + assert function_with_gradient.number_of_calls.c == 2 + + scalar_function.fun(x0 + 0.2) + assert function_with_gradient.number_of_calls.c == 3 + + +def test_gh12696(): + # Test that optimize doesn't throw warning gh-12696 + with assert_no_warnings(): + optimize.fminbound( + lambda x: np.array([x**2]), -np.pi, np.pi, disp=False) + + +# --- Test minimize with equal upper and lower bounds --- # + +def setup_test_equal_bounds(): + + rng = np.random.RandomState(0) + x0 = rng.rand(4) + lb = np.array([0, 2, -1, -1.0]) + ub = np.array([3, 2, 2, -1.0]) + i_eb = (lb == ub) + + def check_x(x, check_size=True, check_values=True): + if check_size: + assert x.size == 4 + if check_values: + assert_allclose(x[i_eb], lb[i_eb]) + + def func(x): + check_x(x) + return optimize.rosen(x) + + def grad(x): + check_x(x) + return optimize.rosen_der(x) + + def callback(x, *args): + check_x(x) + + def constraint1(x): + check_x(x, check_values=False) + return x[0:1] - 1 + + def jacobian1(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[0] = 1 + return dc + + def constraint2(x): + check_x(x, check_values=False) + return x[2:3] - 0.5 + + def jacobian2(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[2] = 1 + return dc + + c1a = NonlinearConstraint(constraint1, -np.inf, 0) + c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1) + c2a = NonlinearConstraint(constraint2, -np.inf, 0) + c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2) + + # test using the three methods that accept bounds, use derivatives, and + # have some trouble when bounds fix variables + methods = ('L-BFGS-B', 'SLSQP', 'TNC') + + # test w/out gradient, w/ gradient, and w/ combined objective/gradient + kwds = ({"fun": func, "jac": False}, + {"fun": func, "jac": grad}, + {"fun": (lambda x: (func(x), grad(x))), + "jac": True}) + + # test with both old- and new-style bounds + bound_types = (lambda lb, ub: list(zip(lb, ub)), + Bounds) + + # Test for many combinations of constraints w/ and w/out jacobian + # Pairs in format: (test constraints, reference constraints) + # (always use analytical jacobian in reference) + constraints = ((None, None), ([], []), + (c1a, c1b), (c2b, c2b), + ([c1b], [c1b]), ([c2a], [c2b]), + ([c1a, c2a], [c1b, c2b]), + ([c1a, c2b], [c1b, c2b]), + ([c1b, c2b], [c1b, c2b])) + + # test with and without callback function + callbacks = (None, callback) + + data = {"methods": methods, "kwds": kwds, "bound_types": bound_types, + "constraints": constraints, "callbacks": callbacks, + "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb} + + return data + + +eb_data = setup_test_equal_bounds() + + +# This test is about handling fixed variables, not the accuracy of the solvers +@pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic") +@pytest.mark.xfail(scipy.show_config(mode='dicts')['Compilers']['fortran']['name'] == + "intel-llvm", + reason="Failures due to floating point issues, not logic") +@pytest.mark.parametrize('method', eb_data["methods"]) +@pytest.mark.parametrize('kwds', eb_data["kwds"]) +@pytest.mark.parametrize('bound_type', eb_data["bound_types"]) +@pytest.mark.parametrize('constraints', eb_data["constraints"]) +@pytest.mark.parametrize('callback', eb_data["callbacks"]) +def test_equal_bounds(method, kwds, bound_type, constraints, callback): + """ + Tests that minimizers still work if (bounds.lb == bounds.ub).any() + gh12502 - Divide by zero in Jacobian numerical differentiation when + equality bounds constraints are used + """ + # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882 + if (platform.machine() == 'aarch64' and method == "TNC" + and kwds["jac"] is False and callback is not None): + pytest.skip('Tolerance violation on aarch') + + lb, ub = eb_data["lb"], eb_data["ub"] + x0, i_eb = eb_data["x0"], eb_data["i_eb"] + + test_constraints, reference_constraints = constraints + if test_constraints and not method == 'SLSQP': + pytest.skip('Only SLSQP supports nonlinear constraints') + # reference constraints always have analytical jacobian + # if test constraints are not the same, we'll need finite differences + fd_needed = (test_constraints != reference_constraints) + + bounds = bound_type(lb, ub) # old- or new-style + + kwds.update({"x0": x0, "method": method, "bounds": bounds, + "constraints": test_constraints, "callback": callback}) + res = optimize.minimize(**kwds) + + expected = optimize.minimize(optimize.rosen, x0, method=method, + jac=optimize.rosen_der, bounds=bounds, + constraints=reference_constraints) + + # compare the output of a solution with FD vs that of an analytic grad + assert res.success + assert_allclose(res.fun, expected.fun, rtol=1.5e-6) + assert_allclose(res.x, expected.x, rtol=5e-4) + + if fd_needed or kwds['jac'] is False: + expected.jac[i_eb] = np.nan + assert res.jac.shape[0] == 4 + assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6) + + if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)): + # compare the output to an equivalent FD minimization that doesn't + # need factorization + def fun(x): + new_x = np.array([np.nan, 2, np.nan, -1]) + new_x[[0, 2]] = x + return optimize.rosen(new_x) + + fd_res = optimize.minimize(fun, + x0[[0, 2]], + method=method, + bounds=bounds[::2]) + assert_allclose(res.fun, fd_res.fun) + # TODO this test should really be equivalent to factorized version + # above, down to res.nfev. However, testing found that when TNC is + # called with or without a callback the output is different. The two + # should be the same! This indicates that the TNC callback may be + # mutating something when it shouldn't. + assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6) + + +@pytest.mark.parametrize('method', eb_data["methods"]) +def test_all_bounds_equal(method): + # this only tests methods that have parameters factored out when lb==ub + # it does not test other methods that work with bounds + def f(x, p1=1): + return np.linalg.norm(x) + p1 + + bounds = [(1, 1), (2, 2)] + x0 = (1.0, 3.0) + res = optimize.minimize(f, x0, bounds=bounds, method=method) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + assert res.message == 'All independent variables were fixed by bounds.' + + args = (2,) + res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0], 2)) + + if method.upper() == 'SLSQP': + def con(x): + return np.sum(x) + nlc = NonlinearConstraint(con, -np.inf, 0.0) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is False + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds, but" + assert res.message.startswith(message) + + nlc = NonlinearConstraint(con, -np.inf, 4) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is True + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds at values" + assert res.message.startswith(message) + + +def test_eb_constraints(): + # make sure constraint functions aren't overwritten when equal bounds + # are employed, and a parameter is factored out. GH14859 + def f(x): + return x[0]**3 + x[1]**2 + x[2]*x[3] + + def cfun(x): + return x[0] + x[1] + x[2] + x[3] - 40 + + constraints = [{'type': 'ineq', 'fun': cfun}] + + bounds = [(0, 20)] * 4 + bounds[1] = (5, 5) + optimize.minimize( + f, + x0=[1, 2, 3, 4], + method='SLSQP', + bounds=bounds, + constraints=constraints, + ) + assert constraints[0]['fun'] == cfun + + +def test_show_options(): + solver_methods = { + 'minimize': MINIMIZE_METHODS, + 'minimize_scalar': MINIMIZE_SCALAR_METHODS, + 'root': ROOT_METHODS, + 'root_scalar': ROOT_SCALAR_METHODS, + 'linprog': LINPROG_METHODS, + 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS, + } + for solver, methods in solver_methods.items(): + for method in methods: + # testing that `show_options` works without error + show_options(solver, method) + + unknown_solver_method = { + 'minimize': "ekki", # unknown method + 'maximize': "cg", # unknown solver + 'maximize_scalar': "ekki", # unknown solver and method + } + for solver, method in unknown_solver_method.items(): + # testing that `show_options` raises ValueError + assert_raises(ValueError, show_options, solver, method) + + +def test_bounds_with_list(): + # gh13501. Bounds created with lists weren't working for Powell. + bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.]) + optimize.minimize( + optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds + ) + + +def test_x_overwritten_user_function(): + # if the user overwrites the x-array in the user function it's likely + # that the minimizer stops working properly. + # gh13740 + def fquad(x): + a = np.arange(np.size(x)) + x -= a + x *= x + return np.sum(x) + + def fquad_jac(x): + a = np.arange(np.size(x)) + x *= 2 + x -= 2 * a + return x + + def fquad_hess(x): + return np.eye(np.size(x)) * 2.0 + + meth_jac = [ + 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'trust-constr' + ] + meth_hess = [ + 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr' + ] + + x0 = np.ones(5) * 1.5 + + for meth in MINIMIZE_METHODS: + jac = None + hess = None + if meth in meth_jac: + jac = fquad_jac + if meth in meth_hess: + hess = fquad_hess + res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess) + assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4) + + +class TestGlobalOptimization: + + def test_optimize_result_attributes(self): + def func(x): + return x ** 2 + + # Note that `brute` solver does not return `OptimizeResult` + results = [optimize.basinhopping(func, x0=1), + optimize.differential_evolution(func, [(-4, 4)]), + optimize.shgo(func, [(-4, 4)]), + optimize.dual_annealing(func, [(-4, 4)]), + optimize.direct(func, [(-4, 4)]), + ] + + for result in results: + assert isinstance(result, optimize.OptimizeResult) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + +def test_approx_fprime(): + # check that approx_fprime (serviced by approx_derivative) works for + # jac and hess + g = optimize.approx_fprime(himmelblau_x0, himmelblau) + assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6) + + h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad) + assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6) + + +def test_gh12594(): + # gh-12594 reported an error in `_linesearch_powell` and + # `_line_for_search` when `Bounds` was passed lists instead of arrays. + # Check that results are the same whether the inputs are lists or arrays. + + def f(x): + return x[0]**2 + (x[1] - 1)**2 + + bounds = Bounds(lb=[-10, -10], ub=[10, 10]) + res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10])) + ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +@pytest.mark.parametrize('method', ['Newton-CG', 'trust-constr']) +@pytest.mark.parametrize('sparse_type', [coo_matrix, csc_matrix, csr_matrix, + coo_array, csr_array, csc_array]) +def test_sparse_hessian(method, sparse_type): + # gh-8792 reported an error for minimization with `newton_cg` when `hess` + # returns a sparse matrix. Check that results are the same whether `hess` + # returns a dense or sparse matrix for optimization methods that accept + # sparse Hessian matrices. + + def sparse_rosen_hess(x): + return sparse_type(rosen_hess(x)) + + x0 = [2., 2.] + + res_sparse = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=sparse_rosen_hess) + res_dense = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=rosen_hess) + + assert_allclose(res_dense.fun, res_sparse.fun) + assert_allclose(res_dense.x, res_sparse.x) + assert res_dense.nfev == res_sparse.nfev + assert res_dense.njev == res_sparse.njev + assert res_dense.nhev == res_sparse.nhev diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7f26158d8e7ddb33012db4dfc4030661aa75bb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py @@ -0,0 +1,455 @@ +import pytest +import numpy as np +from numpy.random import default_rng +from scipy.optimize import quadratic_assignment, OptimizeWarning +from scipy.optimize._qap import _calc_score as _score +from numpy.testing import assert_equal, assert_, assert_warns + + +################ +# Common Tests # +################ + +def chr12c(): + A = [ + [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0], + [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0], + [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0], + [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0], + [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0], + [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0], + [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37], + [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0], + ] + B = [ + [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95], + [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36], + [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63], + [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85], + [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76], + [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34], + [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37], + [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80], + [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33], + [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86], + [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18], + [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0], + ] + A, B = np.array(A), np.array(B) + n = A.shape[0] + + opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n + + return A, B, opt_perm + + +@pytest.mark.filterwarnings("ignore:The NumPy global RNG was seeded by calling") +class QAPCommonTests: + """ + Base class for `quadratic_assignment` tests. + """ + # Test global optima of problem from Umeyama IVB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_1(self): + # besides testing accuracy, check that A and B can be lists + rng = np.random.default_rng(4358764578823597324) + + A = [[0, 3, 4, 2], + [0, 0, 1, 2], + [1, 0, 0, 1], + [0, 0, 1, 0]] + + B = [[0, 4, 2, 4], + [0, 0, 1, 0], + [0, 2, 0, 2], + [0, 1, 2, 0]] + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": False}) + + assert_equal(res.fun, 10) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": True}) + + if self.method == 'faq': + # Global optimum is 40, but FAQ gets 37 + assert_equal(res.fun, 37) + assert_equal(res.col_ind, np.array([0, 2, 3, 1])) + else: + assert_equal(res.fun, 40) + assert_equal(res.col_ind, np.array([0, 3, 1, 2])) + + quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": True}) + + # Test global optima of problem from Umeyama IIIB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_2(self): + rng = np.random.default_rng(4358764578823597324) + + A = np.array([[0, 5, 8, 6], + [5, 0, 5, 1], + [8, 5, 0, 2], + [6, 1, 2, 0]]) + + B = np.array([[0, 1, 8, 4], + [1, 0, 5, 2], + [8, 5, 0, 5], + [4, 2, 5, 0]]) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": False}) + + if self.method == 'faq': + # Global optimum is 176, but FAQ gets 178 + assert_equal(res.fun, 178) + assert_equal(res.col_ind, np.array([1, 0, 3, 2])) + else: + assert_equal(res.fun, 176) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": True}) + + assert_equal(res.fun, 286) + assert_equal(res.col_ind, np.array([2, 3, 0, 1])) + + def test_accuracy_3(self): + rng = np.random.default_rng(4358764578823597324) + A, B, opt_perm = chr12c() + + # basic minimization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # basic maximization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": rng, 'maximize': True}) + assert_(74000 <= res.fun < 85000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # check ofv with strictly partial match + seed_cost = np.array([4, 8, 10]) + seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed, "rng": rng}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost]) + + # check performance when partial match is the global optimum + seed = np.asarray([np.arange(len(A)), opt_perm]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed, "rng": rng}) + assert_equal(res.col_ind, seed[:, 1].T) + assert_equal(res.fun, 11156) + assert_equal(res.nit, 0) + + # check performance with zero sized matrix inputs + empty = np.empty((0, 0)) + res = quadratic_assignment(empty, empty, method=self.method, + options={"rng": rng}) + assert_equal(res.nit, 0) + assert_equal(res.fun, 0) + + @pytest.mark.thread_unsafe + def test_unknown_options(self): + A, B, opt_perm = chr12c() + + def f(): + quadratic_assignment(A, B, method=self.method, + options={"ekki-ekki": True}) + assert_warns(OptimizeWarning, f) + + @pytest.mark.thread_unsafe + def test_deprecation_future_warnings(self): + # May be removed after SPEC-7 transition is complete in SciPy 1.17 + A = np.arange(16).reshape((4, 4)) + B = np.arange(16).reshape((4, 4)) + + with pytest.warns(DeprecationWarning, match="Use of `RandomState`*"): + rng = np.random.RandomState(0) + quadratic_assignment(A, B, method=self.method, + options={"rng": rng, "maximize": False}) + + with pytest.warns(FutureWarning, match="The NumPy global RNG was seeded*"): + np.random.seed(0) + quadratic_assignment(A, B, method=self.method, + options={"maximize": False}) + + with pytest.warns(FutureWarning, match="The behavior when the rng option*"): + quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": False}) + + +class TestFAQ(QAPCommonTests): + method = "faq" + + def test_options(self): + # cost and distance matrices of QAPLIB instance chr12c + rng = np.random.default_rng(4358764578823597324) + + A, B, opt_perm = chr12c() + n = len(A) + + # check that max_iter is obeying with low input value + res = quadratic_assignment(A, B, options={'maxiter': 5}) + assert_equal(res.nit, 5) + + # test with shuffle + res = quadratic_assignment(A, B, options={'shuffle_input': True}) + assert_(11156 <= res.fun < 21000) + + # test with randomized init + res = quadratic_assignment(A, B, options={'rng': rng, 'P0': "randomized"}) + assert_(11156 <= res.fun < 21000) + + # check with specified P0 + K = np.ones((n, n)) / float(n) + K = _doubly_stochastic(K) + res = quadratic_assignment(A, B, options={'P0': K}) + assert_(11156 <= res.fun < 21000) + + def test_specific_input_validation(self): + + A = np.identity(2) + B = A + + # method is implicitly faq + + # ValueError Checks: making sure single value parameters are of + # correct value + with pytest.raises(ValueError, match="Invalid 'P0' parameter"): + quadratic_assignment(A, B, options={'P0': "random"}) + with pytest.raises( + ValueError, match="'maxiter' must be a positive integer"): + quadratic_assignment(A, B, options={'maxiter': -1}) + with pytest.raises(ValueError, match="'tol' must be a positive float"): + quadratic_assignment(A, B, options={'tol': -1}) + + # TypeError Checks: making sure single value parameters are of + # correct type + with pytest.raises(TypeError): + quadratic_assignment(A, B, options={'maxiter': 1.5}) + + # test P0 matrix input + with pytest.raises( + ValueError, + match="`P0` matrix must have shape m' x m', where m'=n-m"): + quadratic_assignment( + np.identity(4), np.identity(4), + options={'P0': np.ones((3, 3))} + ) + + K = [[0.4, 0.2, 0.3], + [0.3, 0.6, 0.2], + [0.2, 0.2, 0.7]] + # matrix that isn't quite doubly stochastic + with pytest.raises( + ValueError, match="`P0` matrix must be doubly stochastic"): + quadratic_assignment( + np.identity(3), np.identity(3), options={'P0': K} + ) + + +class Test2opt(QAPCommonTests): + method = "2opt" + + def test_deterministic(self): + n = 20 + rng = default_rng(51982908) + A = rng.random(size=(n, n)) + B = rng.random(size=(n, n)) + res1 = quadratic_assignment(A, B, method=self.method, options={'rng': rng}) + + rng = default_rng(51982908) + A = rng.random(size=(n, n)) + B = rng.random(size=(n, n)) + res2 = quadratic_assignment(A, B, method=self.method, options={'rng': rng}) + + assert_equal(res1.nit, res2.nit) + + def test_partial_guess(self): + n = 5 + rng = np.random.default_rng(4358764578823597324) + + A = rng.random(size=(n, n)) + B = rng.random(size=(n, n)) + + res1 = quadratic_assignment(A, B, method=self.method, + options={'rng': rng}) + guess = np.array([np.arange(5), res1.col_ind]).T + res2 = quadratic_assignment(A, B, method=self.method, + options={'rng': rng, 'partial_guess': guess}) + fix = [2, 4] + match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T + res3 = quadratic_assignment(A, B, method=self.method, + options={'rng': rng, 'partial_guess': guess, + 'partial_match': match}) + assert_(res1.nit != n*(n+1)/2) + assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once + assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once + + def test_specific_input_validation(self): + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_guess` can have only as many entries as"): + quadratic_assignment(np.identity(3), np.identity(3), + method=self.method, + options={'partial_guess': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_guess` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_guess` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_guess` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_guess` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), method=self.method, + options={'partial_guess': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_guess` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.ones((2, 2))} + ) + + +@pytest.mark.filterwarnings("ignore:The NumPy global RNG was seeded by calling") +class TestQAPOnce: + + # these don't need to be repeated for each method + def test_common_input_validation(self): + rng = default_rng(12349038) + # test that non square matrices return error + with pytest.raises(ValueError, match="`A` must be square"): + quadratic_assignment( + rng.random((3, 4)), + rng.random((3, 3)), + ) + with pytest.raises(ValueError, match="`B` must be square"): + quadratic_assignment( + rng.random((3, 3)), + rng.random((3, 4)), + ) + # test that cost and dist matrices have no more than two dimensions + with pytest.raises( + ValueError, match="`A` and `B` must have exactly two"): + quadratic_assignment( + rng.random((3, 3, 3)), + rng.random((3, 3, 3)), + ) + # test that cost and dist matrices of different sizes return error + with pytest.raises( + ValueError, + match="`A` and `B` matrices must be of equal size"): + quadratic_assignment( + rng.random((3, 3)), + rng.random((4, 4)), + ) + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_match` can have only as many seeds as"): + quadratic_assignment(np.identity(3), np.identity(3), + options={'partial_match': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_match` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_match` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_match` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_match` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), + options={'partial_match': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_match` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.ones((2, 2))} + ) + + +def _range_matrix(a, b): + mat = np.zeros((a, b)) + for i in range(b): + mat[:, i] = np.arange(a) + return mat + + +def _doubly_stochastic(P, tol=1e-3): + # cleaner implementation of btaba/sinkhorn_knopp + + max_iter = 1000 + c = 1 / P.sum(axis=0) + r = 1 / (P @ c) + P_eps = P + + for it in range(max_iter): + if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and + (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): + # All column/row sums ~= 1 within threshold + break + + c = 1 / (r @ P) + r = 1 / (P @ c) + P_eps = r[:, None] * P * c + + return P_eps diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..44916ba96293db19756b8222422e76945aa48ebb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py @@ -0,0 +1,40 @@ +"""Regression tests for optimize. + +""" +import numpy as np +from numpy.testing import assert_almost_equal +from pytest import raises as assert_raises + +import scipy.optimize + + +class TestRegression: + + def test_newton_x0_is_0(self): + # Regression test for gh-1601 + tgt = 1 + res = scipy.optimize.newton(lambda x: x - 1, 0) + assert_almost_equal(res, tgt) + + def test_newton_integers(self): + # Regression test for gh-1741 + root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, + fprime=lambda x: 2*x) + assert_almost_equal(root, 1.0) + + def test_lmdif_errmsg(self): + # This shouldn't cause a crash on Python 3 + class SomeError(Exception): + pass + counter = [0] + + def func(x): + counter[0] += 1 + if counter[0] < 3: + return x**2 - np.array([9, 10, 11]) + else: + raise SomeError() + assert_raises(SomeError, + scipy.optimize.leastsq, + func, [1, 2, 3]) + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..45216aa296b56a6a71b89c994e8fc360b826ba00 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py @@ -0,0 +1,613 @@ +""" +Unit test for SLSQP optimization. +""" +from numpy.testing import (assert_, assert_array_almost_equal, + assert_allclose, assert_equal) +from pytest import raises as assert_raises +import pytest +import numpy as np +import scipy + +from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint + + +class MyCallBack: + """pass a custom callback function + + This makes sure it's being used. + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x): + self.been_called = True + self.ncalls += 1 + + +class TestSLSQP: + """ + Test SLSQP algorithm using Example 14.4 from Numerical Methods for + Engineers by Steven Chapra and Raymond Canale. + This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, + which has a maximum at x=2, y=1. + """ + def setup_method(self): + self.opts = {'disp': False} + + def fun(self, d, sign=1.0): + """ + Arguments: + d - A list of two elements, where d[0] represents x and d[1] represents y + in the following equation. + sign - A multiplier for f. Since we want to optimize it, and the SciPy + optimizers can only minimize functions, we need to multiply it by + -1 to achieve the desired solution + Returns: + 2*x*y + 2*x - x**2 - 2*y**2 + + """ + x = d[0] + y = d[1] + return sign*(2*x*y + 2*x - x**2 - 2*y**2) + + def jac(self, d, sign=1.0): + """ + This is the derivative of fun, returning a NumPy array + representing df/dx and df/dy. + + """ + x = d[0] + y = d[1] + dfdx = sign*(-2*x + 2*y + 2) + dfdy = sign*(2*x - 4*y) + return np.array([dfdx, dfdy], float) + + def fun_and_jac(self, d, sign=1.0): + return self.fun(d, sign), self.jac(d, sign) + + def f_eqcon(self, x, sign=1.0): + """ Equality constraint """ + return np.array([x[0] - x[1]]) + + def fprime_eqcon(self, x, sign=1.0): + """ Equality constraint, derivative """ + return np.array([[1, -1]]) + + def f_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint """ + return self.f_eqcon(x, sign)[0] + + def fprime_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint, derivative """ + return self.fprime_eqcon(x, sign)[0].tolist() + + def f_ieqcon(self, x, sign=1.0): + """ Inequality constraint """ + return np.array([x[0] - x[1] - 1.0]) + + def fprime_ieqcon(self, x, sign=1.0): + """ Inequality constraint, derivative """ + return np.array([[1, -1]]) + + def f_ieqcon2(self, x): + """ Vector inequality constraint """ + return np.asarray(x) + + def fprime_ieqcon2(self, x): + """ Vector inequality constraint, derivative """ + return np.identity(x.shape[0]) + + # minimize + def test_minimize_unbounded_approximated(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, method='SLSQP', + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_unbounded_given(self): + # Minimize, method='SLSQP': unbounded, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=self.jac, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_approximated(self): + # Minimize, method='SLSQP': bounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + with np.errstate(invalid='ignore'): + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + bounds=((2.5, None), (None, 0.5)), + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2.5, 0.5]) + assert_(2.5 <= res.x[0]) + assert_(res.x[1] <= 0.5) + + def test_minimize_unbounded_combined(self): + # Minimize, method='SLSQP': unbounded, combined function and Jacobian. + res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ), + jac=True, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_equality_approximated(self): + # Minimize with method='SLSQP': equality constraint, approx. jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, )}, + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'eq', 'fun':self.f_eqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given2(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian + # for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given_cons_scalar(self): + # Minimize with method='SLSQP': scalar equality constraint, given + # Jacobian for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon_scalar, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon_scalar}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_inequality_given(self): + # Minimize with method='SLSQP': inequality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1], atol=1e-3) + + def test_minimize_inequality_given_vector_constraints(self): + # Minimize with method='SLSQP': vector inequality constraint, given + # Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon2, + 'jac': self.fprime_ieqcon2}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_constraint(self): + # when the constraint makes the solver go up against a parameter + # bound make sure that the numerical differentiation of the + # jacobian doesn't try to exceed that bound using a finite difference. + # gh11403 + def c(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return x[0] ** 0.5 + x[1] + + def f(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return -x[0] ** 2 + x[1] ** 2 + + cns = [NonlinearConstraint(c, 0, 1.5)] + x0 = np.asarray([0.9, 0.5]) + bnd = Bounds([0., 0.], [1.0, 1.0]) + minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns) + + def test_minimize_bound_equality_given2(self): + # Minimize with method='SLSQP': bounds, eq. const., given jac. for + # fun. and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + bounds=[(-0.8, 1.), (-1, 0.8)], + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [0.8, 0.8], atol=1e-3) + assert_(-0.8 <= res.x[0] <= 1) + assert_(-1 <= res.x[1] <= 0.8) + + # fmin_slsqp + def test_unbounded_approximated(self): + # SLSQP: unbounded, approximated Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_unbounded_given(self): + # SLSQP: unbounded, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + fprime = self.jac, iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_equality_approximated(self): + # SLSQP: equality constraint, approximated Jacobian. + res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,), + eqcons = [self.f_eqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given(self): + # SLSQP: equality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + eqcons = [self.f_eqcon], iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given2(self): + # SLSQP: equality constraint, given Jacobian for fun and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_inequality_given(self): + # SLSQP: inequality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + ieqcons = [self.f_ieqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1], decimal=3) + + def test_bound_equality_given2(self): + # SLSQP: bounds, eq. const., given jac. for fun. and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + bounds = [(-0.8, 1.), (-1, 0.8)], + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [0.8, 0.8], decimal=3) + assert_(-0.8 <= x[0] <= 1) + assert_(-1 <= x[1] <= 0.8) + + def test_scalar_constraints(self): + # Regression test for gh-2182 + x = fmin_slsqp(lambda z: z**2, [3.], + ieqcons=[lambda z: z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + x = fmin_slsqp(lambda z: z**2, [3.], + f_ieqcons=lambda z: [z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + def test_integer_bounds(self): + # This should not raise an exception + fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0) + + def test_array_bounds(self): + # NumPy used to treat n-dimensional 1-element arrays as scalars + # in some cases. The handling of `bounds` by `fmin_slsqp` still + # supports this behavior. + bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))] + x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds, + iprint=0) + assert_array_almost_equal(x, [0, 2]) + + def test_obj_must_return_scalar(self): + # Regression test for Github Issue #5433 + # If objective function does not return a scalar, raises ValueError + with assert_raises(ValueError): + fmin_slsqp(lambda x: [0, 1], [1, 2, 3]) + + def test_obj_returns_scalar_in_list(self): + # Test for Github Issue #5433 and PR #6691 + # Objective function should be able to return length-1 Python list + # containing the scalar + fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0) + + def test_callback(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback + callback = MyCallBack() + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + method='SLSQP', callback=callback, options=self.opts) + assert_(res['success'], res['message']) + assert_(callback.been_called) + assert_equal(callback.ncalls, res['nit']) + + def test_inconsistent_linearization(self): + # SLSQP must be able to solve this problem, even if the + # linearized problem at the starting point is infeasible. + + # Linearized constraints are + # + # 2*x0[0]*x[0] >= 1 + # + # At x0 = [0, 1], the second constraint is clearly infeasible. + # This triggers a call with n2==1 in the LSQ subroutine. + x = [0, 1] + def f1(x): + return x[0] + x[1] - 2 + def f2(x): + return x[0] ** 2 - 1 + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': f1}, + {'type':'ineq','fun': f2}), + bounds=((0,None), (0,None)), + method='SLSQP') + x = sol.x + + assert_allclose(f1(x), 0, atol=1e-8) + assert_(f2(x) >= -1e-8) + assert_(sol.success, sol) + + def test_regression_5743(self): + # SLSQP must not indicate success for this problem, + # which is infeasible. + x = [1, 2] + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1}, + {'type':'ineq','fun': lambda x: x[0]-2}), + bounds=((0,None), (0,None)), + method='SLSQP') + assert_(not sol.success, sol) + + def test_gh_6676(self): + def func(x): + return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2 + + sol = minimize(func, [0, 0, 0], method='SLSQP') + assert_(sol.jac.shape == (3,)) + + def test_invalid_bounds(self): + # Raise correct error when lower bound is greater than upper bound. + # See Github issue 6875. + bounds_list = [ + ((1, 2), (2, 1)), + ((2, 1), (1, 2)), + ((2, 1), (2, 1)), + ((np.inf, 0), (np.inf, 0)), + ((1, -np.inf), (0, 1)), + ] + for bounds in bounds_list: + with assert_raises(ValueError): + minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP') + + def test_bounds_clipping(self): + # + # SLSQP returns bogus results for initial guess out of bounds, gh-6859 + # + def f(x): + return (x[0] - 1)**2 + + sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_infeasible_initial(self): + # Check SLSQP behavior with infeasible initial point + def f(x): + x, = x + return x*x - 2*x + 1 + + cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}] + cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}] + cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x}, + {'type': 'ineq', 'fun': lambda x: x + 1}] + + sol = minimize(f, [10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + @pytest.mark.xfail(scipy.show_config(mode='dicts')['Compilers']['fortran']['name'] + == "intel-llvm", + reason="Runtime warning due to floating point issues, not logic") + def test_inconsistent_inequalities(self): + # gh-7618 + + def cost(x): + return -1 * x[0] + 4 * x[1] + + def ineqcons1(x): + return x[1] - x[0] - 1 + + def ineqcons2(x): + return x[0] - x[1] + + # The inequalities are inconsistent, so no solution can exist: + # + # x1 >= x0 + 1 + # x0 >= x1 + + x0 = (1,5) + bounds = ((-5, 5), (-5, 5)) + cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2)) + res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons) + + assert_(not res.success) + + def test_new_bounds_type(self): + def f(x): + return x[0] ** 2 + x[1] ** 2 + bounds = Bounds([1, 0], [np.inf, np.inf]) + sol = minimize(f, [0, 0], method='slsqp', bounds=bounds) + assert_(sol.success) + assert_allclose(sol.x, [1, 0]) + + def test_nested_minimization(self): + + class NestedProblem: + + def __init__(self): + self.F_outer_count = 0 + + def F_outer(self, x): + self.F_outer_count += 1 + if self.F_outer_count > 1000: + raise Exception("Nested minimization failed to terminate.") + inner_res = minimize(self.F_inner, (3, 4), method="SLSQP") + assert_(inner_res.success) + assert_allclose(inner_res.x, [1, 1]) + return x[0]**2 + x[1]**2 + x[2]**2 + + def F_inner(self, x): + return (x[0] - 1)**2 + (x[1] - 1)**2 + + def solve(self): + outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP") + assert_(outer_res.success) + assert_allclose(outer_res.x, [0, 0, 0]) + + problem = NestedProblem() + problem.solve() + + def test_gh1758(self): + # the test suggested in gh1758 + # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/ + # implement two equality constraints, in R^2. + def fun(x): + return np.sqrt(x[1]) + + def f_eqcon(x): + """ Equality constraint """ + return x[1] - (2 * x[0]) ** 3 + + def f_eqcon2(x): + """ Equality constraint """ + return x[1] - (-x[0] + 1) ** 3 + + c1 = {'type': 'eq', 'fun': f_eqcon} + c2 = {'type': 'eq', 'fun': f_eqcon2} + + res = minimize(fun, [8, 0.25], method='SLSQP', + constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)]) + + np.testing.assert_allclose(res.fun, 0.5443310539518) + np.testing.assert_allclose(res.x, [0.33333333, 0.2962963]) + assert res.success + + def test_gh9640(self): + np.random.seed(10) + cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3}, + {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2}) + bnds = ((-2, 2), (-2, 2), (-2, 2)) + + def target(x): + return 1 + x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696] + res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons, + options={'disp':False, 'maxiter':10000}) + + # The problem is infeasible, so it cannot succeed + assert not res.success + + @pytest.mark.thread_unsafe + def test_parameters_stay_within_bounds(self): + # gh11403. For some problems the SLSQP Fortran code suggests a step + # outside one of the lower/upper bounds. When this happens + # approx_derivative complains because it's being asked to evaluate + # a gradient outside its domain. + np.random.seed(1) + bounds = Bounds(np.array([0.1]), np.array([1.0])) + n_inputs = len(bounds.lb) + x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) * + np.random.random(n_inputs)) + + def f(x): + assert (x >= bounds.lb).all() + return np.linalg.norm(x) + + with pytest.warns(RuntimeWarning, match='x were outside bounds'): + res = minimize(f, x0, method='SLSQP', bounds=bounds) + assert res.success diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..2cde9837bfd08e62916660a9750d833629b6b547 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py @@ -0,0 +1,345 @@ +""" +Unit tests for TNC optimization routine from tnc.py +""" +import pytest +from numpy.testing import assert_allclose, assert_equal + +import numpy as np +from math import pow + +from scipy import optimize + + +class TestTnc: + """TNC non-linear optimization. + + These tests are taken from Prof. K. Schittkowski's test examples + for constrained non-linear programming. + + http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm + + """ + def setup_method(self): + # options for minimize + self.opts = {'disp': False, 'maxfun': 200} + + # objective functions and Jacobian for each test + def f1(self, x, a=100.0): + return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2) + + def g1(self, x, a=100.0): + dif = [0, 0] + dif[1] = 2 * a * (x[1] - pow(x[0], 2)) + dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0) + return dif + + def fg1(self, x, a=100.0): + return self.f1(x, a), self.g1(x, a) + + def f3(self, x): + return x[1] + pow(x[1] - x[0], 2) * 1.0e-5 + + def g3(self, x): + dif = [0, 0] + dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5 + dif[1] = 1.0 - dif[0] + return dif + + def fg3(self, x): + return self.f3(x), self.g3(x) + + def f4(self, x): + return pow(x[0] + 1.0, 3) / 3.0 + x[1] + + def g4(self, x): + dif = [0, 0] + dif[0] = pow(x[0] + 1.0, 2) + dif[1] = 1.0 + return dif + + def fg4(self, x): + return self.f4(x), self.g4(x) + + def f5(self, x): + return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \ + 1.5 * x[0] + 2.5 * x[1] + 1.0 + + def g5(self, x): + dif = [0, 0] + v1 = np.cos(x[0] + x[1]) + v2 = 2.0*(x[0] - x[1]) + + dif[0] = v1 + v2 - 1.5 + dif[1] = v1 - v2 + 2.5 + return dif + + def fg5(self, x): + return self.f5(x), self.g5(x) + + def f38(self, x): + return (100.0 * pow(x[1] - pow(x[0], 2), 2) + + pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) + + pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) + + pow(x[3] - 1.0, 2)) + + 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5 + + def g38(self, x): + dif = [0, 0, 0, 0] + dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) - + 2.0 * (1.0 - x[0])) * 1.0e-5 + dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) + + 19.8 * (x[3] - 1.0)) * 1.0e-5 + dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) - + 2.0 * (1.0 - x[2])) * 1.0e-5 + dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) + + 19.8 * (x[1] - 1.0)) * 1.0e-5 + return dif + + def fg38(self, x): + return self.f38(x), self.g38(x) + + def f45(self, x): + return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0 + + def g45(self, x): + dif = [0] * 5 + dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0 + dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0 + dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0 + dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0 + dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0 + return dif + + def fg45(self, x): + return self.f45(x), self.g45(x) + + # tests + # minimize with method=TNC + def test_minimize_tnc1(self): + x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + iterx = [] # to test callback + + res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, + bounds=bnds, options=self.opts, + callback=iterx.append) + assert_allclose(res.fun, self.f1(xopt), atol=1e-8) + assert_equal(len(iterx), res.nit) + + def test_minimize_tnc1b(self): + x0, bnds = np.array([-2, 1]), ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.f1, x0, method='TNC', + bounds=bnds, options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4) + + def test_minimize_tnc1c(self): + x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.fg1, x0, method='TNC', + jac=True, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc2(self): + x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + x = optimize.minimize(self.f1, x0, method='TNC', + jac=self.g1, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc3(self): + x0, bnds = [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + x = optimize.minimize(self.f3, x0, method='TNC', + jac=self.g3, bounds=bnds, + options=self.opts).x + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8) + + def test_minimize_tnc4(self): + x0,bnds = [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + x = optimize.minimize(self.f4, x0, method='TNC', + jac=self.g4, bounds=bnds, + options=self.opts).x + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8) + + def test_minimize_tnc5(self): + x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + x = optimize.minimize(self.f5, x0, method='TNC', + jac=self.g5, bounds=bnds, + options=self.opts).x + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8) + + def test_minimize_tnc38(self): + x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + x = optimize.minimize(self.f38, x0, method='TNC', + jac=self.g38, bounds=bnds, + options=self.opts).x + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8) + + def test_minimize_tnc45(self): + x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + x = optimize.minimize(self.f45, x0, method='TNC', + jac=self.g45, bounds=bnds, + options=self.opts).x + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8) + + # fmin_tnc + def test_tnc1(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ), + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1b(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1c(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc2(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc3(self): + fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc4(self): + fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc5(self): + fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc38(self): + fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc45(self): + fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3), + (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_raising_exceptions(self): + # tnc was ported to cython from hand-crafted cpython code + # check that Exception handling works. + def myfunc(x): + raise RuntimeError("myfunc") + + def myfunc1(x): + return optimize.rosen(x) + + def callback(x): + raise ValueError("callback") + + with pytest.raises(RuntimeError): + optimize.minimize(myfunc, [0, 1], method="TNC") + + with pytest.raises(ValueError): + optimize.minimize( + myfunc1, [0, 1], method="TNC", callback=callback + ) + + def test_callback_shouldnt_affect_minimization(self): + # gh14879. The output of a TNC minimization was different depending + # on whether a callback was used or not. The two should be equivalent. + # The issue was that TNC was unscaling/scaling x, and this process was + # altering x in the process. Now the callback uses an unscaled + # temporary copy of x. + def callback(x): + pass + + fun = optimize.rosen + bounds = [(0, 10)] * 4 + x0 = [1, 2, 3, 4.] + res = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000} + ) + res2 = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}, + callback=callback + ) + assert_allclose(res2.x, res.x) + assert_allclose(res2.fun, res.fun) + assert_equal(res2.nfev, res.nfev) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py new file mode 100644 index 0000000000000000000000000000000000000000..0439f8125c565d70cbed8c6c41f762fdae06d4ce --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py @@ -0,0 +1,110 @@ +""" +Unit tests for trust-region optimization routines. + +""" +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_allclose +from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess, + rosen_hess_prod) + + +class Accumulator: + """ This is for testing callbacks.""" + def __init__(self): + self.count = 0 + self.accum = None + + def __call__(self, x): + self.count += 1 + if self.accum is None: + self.accum = np.array(x) + else: + self.accum += x + + +class TestTrustRegionSolvers: + + def setup_method(self): + self.x_opt = [1.0, 1.0] + self.easy_guess = [2.0, 2.0] + self.hard_guess = [-1.2, 1.0] + + def test_dogleg_accuracy(self): + # test the accuracy and the return_all option + x0 = self.hard_guess + r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8, + method='dogleg', options={'return_all': True},) + assert_allclose(x0, r['allvecs'][0]) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(r['x'], self.x_opt) + + def test_dogleg_callback(self): + # test the callback mechanism and the maxiter and return_all options + accumulator = Accumulator() + maxiter = 5 + r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess, + callback=accumulator, method='dogleg', + options={'return_all': True, 'maxiter': maxiter},) + assert_equal(accumulator.count, maxiter) + assert_equal(len(r['allvecs']), maxiter+1) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(sum(r['allvecs'][1:]), accumulator.accum) + + @pytest.mark.thread_unsafe + def test_dogleg_user_warning(self): + with pytest.warns(RuntimeWarning, + match=r'Maximum number of iterations'): + minimize(rosen, self.hard_guess, jac=rosen_der, + hess=rosen_hess, method='dogleg', + options={'disp': True, 'maxiter': 1}, ) + + def test_solver_concordance(self): + # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock + # test function, although this does not necessarily mean + # that dogleg is faster or better than ncg even for this function + # and especially not for other test functions. + f = rosen + g = rosen_der + h = rosen_hess + for x0 in (self.easy_guess, self.hard_guess): + r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='dogleg', options={'return_all': True}) + r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-ncg', + options={'return_all': True}) + r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-krylov', + options={'return_all': True}) + r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='newton-cg', options={'return_all': True}) + r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-exact', + options={'return_all': True}) + assert_allclose(self.x_opt, r_dogleg['x']) + assert_allclose(self.x_opt, r_trust_ncg['x']) + assert_allclose(self.x_opt, r_trust_krylov['x']) + assert_allclose(self.x_opt, r_ncg['x']) + assert_allclose(self.x_opt, r_iterative['x']) + assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs'])) + + def test_trust_ncg_hessp(self): + for x0 in (self.easy_guess, self.hard_guess, self.x_opt): + r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_ncg_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_krylov_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-krylov') + assert_allclose(self.x_opt, r['x']) + + def test_trust_exact_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-exact') + assert_allclose(self.x_opt, r['x']) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py new file mode 100644 index 0000000000000000000000000000000000000000..020b6d883a5fa59006c142df4fcb8c3e115c46bf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py @@ -0,0 +1,351 @@ +""" +Unit tests for trust-region iterative subproblem. + +""" +import pytest +import numpy as np +from scipy.optimize._trustregion_exact import ( + estimate_smallest_singular_value, + singular_leading_submatrix, + IterativeSubproblem) +from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm) +from numpy.testing import (assert_array_equal, + assert_equal, assert_array_almost_equal) + + +def random_entry(n, min_eig, max_eig, case): + + # Generate random matrix + rand = np.random.uniform(-1, 1, (n, n)) + + # QR decomposition + Q, _, _ = qr(rand, pivoting='True') + + # Generate random eigenvalues + eigvalues = np.random.uniform(min_eig, max_eig, n) + eigvalues = np.sort(eigvalues)[::-1] + + # Generate matrix + Qaux = np.multiply(eigvalues, Q) + A = np.dot(Qaux, Q.T) + + # Generate gradient vector accordingly + # to the case is being tested. + if case == 'hard': + g = np.zeros(n) + g[:-1] = np.random.uniform(-1, 1, n-1) + g = np.dot(Q, g) + elif case == 'jac_equal_zero': + g = np.zeros(n) + else: + g = np.random.uniform(-1, 1, n) + + return A, g + + +class TestEstimateSmallestSingularValue: + + def test_for_ill_condiotioned_matrix(self): + + # Ill-conditioned triangular matrix + C = np.array([[1, 2, 3, 4], + [0, 0.05, 60, 7], + [0, 0, 0.8, 9], + [0, 0, 0, 10]]) + + # Get svd decomposition + U, s, Vt = svd(C) + + # Get smallest singular value and correspondent right singular vector. + smin_svd = s[-1] + zmin_svd = Vt[-1, :] + + # Estimate smallest singular value + smin, zmin = estimate_smallest_singular_value(C) + + # Check the estimation + assert_array_almost_equal(smin, smin_svd, decimal=8) + assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8) + + +class TestSingularLeadingSubmatrix: + + def test_for_already_singular_leading_submatrix(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[1, 2, 3], + [2, 4, 5], + [3, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_simetric_indefinite_matrix(self): + + # Define test matrix A. + # Note that the leading 5x5 submatrix is indefinite. + A = np.asarray([[1, 2, 3, 7, 8], + [2, 5, 5, 9, 0], + [3, 5, 11, 1, 2], + [7, 9, 1, 7, 5], + [8, 0, 2, 5, 8]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_first_element_equal_to_zero(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[0, 3, 11], + [3, 12, 5], + [11, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + +class TestIterativeSubproblem: + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [1, 1, 1, 1] + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, [0.00393332, -0.55260862, + 0.67065477, -0.49480341]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [6.4852641521327437, 1, 1, 1] + s = -8.2151519874416614 + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(-s, subprob.lambda_current) + + def test_for_interior_convergence(self): + + H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]] + + g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H)) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + assert_array_almost_equal(subprob.lambda_current, 0) + assert_array_almost_equal(subprob.niter, 1) + + def test_for_jac_equal_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 0] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_jac_very_close_to_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 1e-15] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + @pytest.mark.fail_slow(10) + def test_for_random_entries(self): + # Seed + np.random.seed(1) + + # Dimension + n = 5 + + for case in ('easy', 'hard', 'jac_equal_zero'): + + eig_limits = [(-20, -15), + (-10, -5), + (-10, 0), + (-5, 5), + (-10, 10), + (0, 10), + (5, 10), + (15, 20)] + + for min_eig, max_eig in eig_limits: + # Generate random symmetric matrix H with + # eigenvalues between min_eig and max_eig. + H, g = random_entry(n, min_eig, max_eig, case) + + # Trust radius + trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10] + + for trust_radius in trust_radius_list: + # Solve subproblem with very high accuracy + subprob_ac = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=1e-10, + k_hard=1e-10) + + p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius) + + # Compute objective function value + J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac) + + stop_criteria = [(0.1, 2), + (0.5, 1.1), + (0.9, 1.01)] + + for k_opt, k_trf in stop_criteria: + + # k_easy and k_hard computed in function + # of k_opt and k_trf accordingly to + # Conn, A. R., Gould, N. I., & Toint, P. L. (2000). + # "Trust region methods". Siam. p. 197. + k_easy = min(k_trf-1, + 1-np.sqrt(k_opt)) + k_hard = 1-k_opt + + # Solve subproblem + subprob = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=k_easy, + k_hard=k_hard) + p, hits_boundary = subprob.solve(trust_radius) + + # Compute objective function value + J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p) + + # Check if it respect k_trf + if hits_boundary: + assert_array_equal(np.abs(norm(p)-trust_radius) <= + (k_trf-1)*trust_radius, True) + else: + assert_equal(norm(p) <= trust_radius, True) + + # Check if it respect k_opt + assert_equal(J <= k_opt*J_ac, True) + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py new file mode 100644 index 0000000000000000000000000000000000000000..ee288c1b1348d280141bb62c09aba8fa67027142 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py @@ -0,0 +1,170 @@ +""" +Unit tests for Krylov space trust-region subproblem solver. + +""" +import pytest +import numpy as np +from scipy.optimize._trlib import (get_trlib_quadratic_subproblem) +from numpy.testing import (assert_, + assert_almost_equal, + assert_equal, assert_array_almost_equal) + +KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6) +KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=True) + +class TestKrylovQuadraticSubproblem: + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([5.0, 0.0, 4.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, + np.array([-0.46125446, 0., -0.19298788])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([0.0, 2.0, 0.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_interior_convergence(self): + + H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]) + g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + + def test_for_very_close_to_zero(self): + + H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + @pytest.mark.thread_unsafe + def test_disp(self, capsys): + H = -np.eye(5) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + subprob = KrylovQP_disp(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + out, err = capsys.readouterr() + assert_(out.startswith(' TR Solving trust region problem'), repr(out)) + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..99fedb181424a3669719f9d4703b739bb53fa8c4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py @@ -0,0 +1,965 @@ +import pytest + +from functools import lru_cache + +from numpy.testing import (assert_warns, assert_, + assert_allclose, + assert_equal, + assert_array_equal, + suppress_warnings) +import numpy as np +from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos + +from scipy import optimize +from scipy.optimize import (_zeros_py as zeros, newton, root_scalar, + OptimizeResult) + +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +# Import testing parameters +from scipy.optimize._tstutils import get_tests, functions as tstutils_functions + +TOL = 4*np.finfo(float).eps # tolerance + +_FLOAT_EPS = finfo(float).eps + +bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth, + zeros.toms748] +gradient_methods = [zeros.newton] +all_methods = bracket_methods + gradient_methods + +# A few test functions used frequently: +# # A simple quadratic, (x-1)^2 - 1 +def f1(x): + return x ** 2 - 2 * x - 1 + + +def f1_1(x): + return 2 * x - 2 + + +def f1_2(x): + return 2.0 + 0 * x + + +def f1_and_p_and_pp(x): + return f1(x), f1_1(x), f1_2(x) + + +# Simple transcendental function +def f2(x): + return exp(x) - cos(x) + + +def f2_1(x): + return exp(x) + sin(x) + + +def f2_2(x): + return exp(x) + cos(x) + + +# lru cached function +@lru_cache +def f_lrucached(x): + return x + + +class TestScalarRootFinders: + # Basic tests for all scalar root finders + + xtol = 4 * np.finfo(float).eps + rtol = 4 * np.finfo(float).eps + + def _run_one_test(self, tc, method, sig_args_keys=None, + sig_kwargs_keys=None, **kwargs): + method_args = [] + for k in sig_args_keys or []: + if k not in tc: + # If a,b not present use x0, x1. Similarly for f and func + k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k) + method_args.append(tc[k]) + + method_kwargs = dict(**kwargs) + method_kwargs.update({'full_output': True, 'disp': False}) + for k in sig_kwargs_keys or []: + method_kwargs[k] = tc[k] + + root = tc.get('root') + func_args = tc.get('args', ()) + + try: + r, rr = method(*method_args, args=func_args, **method_kwargs) + return root, rr, tc + except Exception: + return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR, method), tc + + def run_tests(self, tests, method, name, known_fail=None, **kwargs): + r"""Run test-cases using the specified method and the supplied signature. + + Extract the arguments for the method call from the test case + dictionary using the supplied keys for the method's signature.""" + # The methods have one of two base signatures: + # (f, a, b, **kwargs) # newton + # (func, x0, **kwargs) # bisect/brentq/... + + # FullArgSpec with args, varargs, varkw, defaults, ... + sig = _getfullargspec(method) + assert_(not sig.kwonlyargs) + nDefaults = len(sig.defaults) + nRequired = len(sig.args) - nDefaults + sig_args_keys = sig.args[:nRequired] + sig_kwargs_keys = [] + if name in ['secant', 'newton', 'halley']: + if name in ['newton', 'halley']: + sig_kwargs_keys.append('fprime') + if name in ['halley']: + sig_kwargs_keys.append('fprime2') + kwargs['tol'] = self.xtol + else: + kwargs['xtol'] = self.xtol + kwargs['rtol'] = self.rtol + + results = [list(self._run_one_test( + tc, method, sig_args_keys=sig_args_keys, + sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests] + # results= [[true root, full output, tc], ...] + + known_fail = known_fail or [] + notcvgd = [elt for elt in results if not elt[1].converged] + notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail] + notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd] + assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []]) + + # The usable xtol and rtol depend on the test + tols = {'xtol': self.xtol, 'rtol': self.rtol} + tols.update(**kwargs) + rtol = tols['rtol'] + atol = tols.get('tol', tols['xtol']) + + cvgd = [elt for elt in results if elt[1].converged] + approx = [elt[1].root for elt in cvgd] + correct = [elt[0] for elt in cvgd] + # See if the root matches the reference value + notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if + not isclose(a, c, rtol=rtol, atol=atol) + and elt[-1]['ID'] not in known_fail] + # If not, evaluate the function and see if is 0 at the purported root + fvs = [tc['f'](aroot, *tc.get('args', tuple())) + for aroot, c, fullout, tc in notclose] + notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0] + assert_equal([notclose, len(notclose)], [[], 0]) + method_from_result = [result[1].method for result in results] + expected_method = [name for _ in results] + assert_equal(method_from_result, expected_method) + + def run_collection(self, collection, method, name, smoothness=None, + known_fail=None, **kwargs): + r"""Run a collection of tests using the specified method. + + The name is used to determine some optional arguments.""" + tests = get_tests(collection, smoothness=smoothness) + self.run_tests(tests, method, name, known_fail=known_fail, **kwargs) + + +class TestBracketMethods(TestScalarRootFinders): + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_root_scalar(self, method, function): + # Tests bracketing root finders called via `root_scalar` on a small + # set of simple problems, each of which has a root at `x=1`. Checks for + # converged status and that the root was found. + a, b = .5, sqrt(3) + + r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a, + xtol=self.xtol, rtol=self.rtol) + assert r.converged + assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol) + assert r.method == method.__name__ + + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_individual(self, method, function): + # Tests individual bracketing root finders on a small set of simple + # problems, each of which has a root at `x=1`. Checks for converged + # status and that the root was found. + a, b = .5, sqrt(3) + root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol, + full_output=True) + + assert r.converged + assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol) + + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_bracket_is_array(self, method, function): + # Test bracketing root finders called via `root_scalar` on a small set + # of simple problems, each of which has a root at `x=1`. Check that + # passing `bracket` as a `ndarray` is accepted and leads to finding the + # correct root. + a, b = .5, sqrt(3) + r = root_scalar(function, method=method.__name__, + bracket=np.array([a, b]), x0=a, xtol=self.xtol, + rtol=self.rtol) + assert r.converged + assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol) + assert r.method == method.__name__ + + @pytest.mark.parametrize('method', bracket_methods) + def test_aps_collection(self, method): + self.run_collection('aps', method, method.__name__, smoothness=1) + + @pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder, + zeros.toms748]) + def test_chandrupatla_collection(self, method): + known_fail = {'fun7.4'} if method == zeros.ridder else {} + self.run_collection('chandrupatla', method, method.__name__, + known_fail=known_fail) + + @pytest.mark.parametrize('method', bracket_methods) + def test_lru_cached_individual(self, method): + # check that https://github.com/scipy/scipy/issues/10846 is fixed + # (`root_scalar` failed when passed a function that was `@lru_cache`d) + a, b = -1, 1 + root, r = method(f_lrucached, a, b, full_output=True) + assert r.converged + assert_allclose(root, 0) + + +class TestNewton(TestScalarRootFinders): + def test_newton_collections(self): + known_fail = ['aps.13.00'] + known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27 + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'newton', + smoothness=2, known_fail=known_fail) + + def test_halley_collections(self): + known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', + 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', + 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', + 'aps.12.18', 'aps.13.00'] + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'halley', + smoothness=2, known_fail=known_fail) + + def test_newton(self): + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + x = zeros.newton(f, 3, tol=1e-6) + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1 + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley + assert_allclose(f(x), 0, atol=1e-6) + + def test_newton_by_name(self): + r"""Invoke newton through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f' + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_secant_by_name(self): + r"""Invoke secant through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1 + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_halley_by_name(self): + r"""Invoke halley through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='halley', x0=3, + fprime=f_1, fprime2=f_2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_root_scalar_fail(self): + message = 'fprime2 must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 + message = 'fprime must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime + + def test_array_newton(self): + """test newton with array""" + + def f1(x, *a): + b = a[0] + x * a[3] + return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x + + def f1_1(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1 + + def f1_2(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2 + + a0 = np.array([ + 5.32725221, 5.48673747, 5.49539973, + 5.36387202, 4.80237316, 1.43764452, + 5.23063958, 5.46094772, 5.50512718, + 5.42046290 + ]) + a1 = (np.sin(range(10)) + 1.0) * 7.0 + args = (a0, a1, 1e-09, 0.004, 10, 0.27456) + x0 = [7.0] * 10 + x = zeros.newton(f1, x0, f1_1, args) + x_expected = ( + 6.17264965, 11.7702805, 12.2219954, + 7.11017681, 1.18151293, 0.143707955, + 4.31928228, 10.5419107, 12.7552490, + 8.91225749 + ) + assert_allclose(x, x_expected) + # test halley's + x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) + assert_allclose(x, x_expected) + # test secant + x = zeros.newton(f1, x0, args=args) + assert_allclose(x, x_expected) + + def test_array_newton_complex(self): + def f(x): + return x + 1+1j + + def fprime(x): + return 1.0 + + t = np.full(4, 1j) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + # should work even if x0 is not complex + t = np.ones(4) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + x = zeros.newton(f, t) + assert_allclose(f(x), 0.) + + def test_array_secant_active_zero_der(self): + """test secant doesn't continue to iterate zero derivatives""" + x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5], + args=[np.array([17, 25])]) + assert_allclose(x, (4.123105625617661, 5.0)) + + def test_array_newton_integers(self): + # test secant with float + x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2, + args=([15.0, 17.0],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + # test integer becomes float + x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + + @pytest.mark.thread_unsafe + def test_array_newton_zero_der_failures(self): + # test derivative zero warning + assert_warns(RuntimeWarning, zeros.newton, + lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y) + # test failures and zero_der + with pytest.warns(RuntimeWarning): + results = zeros.newton(lambda y: y**2 - 2, [0., 0.], + lambda y: 2*y, full_output=True) + assert_allclose(results.root, 0) + assert results.zero_der.all() + assert not results.converged.any() + + def test_newton_combined(self): + def f1(x): + return x ** 2 - 2 * x - 1 + def f1_1(x): + return 2 * x - 2 + def f1_2(x): + return 2.0 + 0 * x + + def f1_and_p_and_pp(x): + return x**2 - 2*x-1, 2*x-2, 2.0 + + sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) + sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(2*sol.function_calls, sol0.function_calls) + + sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) + sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(3*sol.function_calls, sol0.function_calls) + + def test_newton_full_output(self, capsys): + # Test the full_output capability, both when converging and not. + # Use simple polynomials, to avoid hitting platform dependencies + # (e.g., exp & trig) in number of iterations + + x0 = 3 + expected_counts = [(6, 7), (5, 10), (3, 9)] + + for derivs in range(3): + kwargs = {'tol': 1e-6, 'full_output': True, } + for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]: + kwargs[k] = v + + x, r = zeros.newton(f1, x0, disp=False, **kwargs) + assert_(r.converged) + assert_equal(x, r.root) + assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) + if derivs == 0: + assert r.function_calls <= r.iterations + 1 + else: + assert_equal(r.function_calls, (derivs + 1) * r.iterations) + + # Now repeat, allowing one fewer iteration to force convergence failure + iters = r.iterations - 1 + x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs) + assert_(not r.converged) + assert_equal(x, r.root) + assert_equal(r.iterations, iters) + + if derivs == 1: + # Check that the correct Exception is raised and + # validate the start of the message. + msg = 'Failed to converge after %d iterations, value is .*' % (iters) + with pytest.raises(RuntimeError, match=msg): + x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs) + + @pytest.mark.thread_unsafe + def test_deriv_zero_warning(self): + def func(x): + return x ** 2 - 2.0 + def dfunc(x): + return 2 * x + assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False) + with pytest.raises(RuntimeError, match='Derivative was zero'): + zeros.newton(func, 0.0, dfunc) + + def test_newton_does_not_modify_x0(self): + # https://github.com/scipy/scipy/issues/9964 + x0 = np.array([0.1, 3]) + x0_copy = x0.copy() # Copy to test for equality. + newton(np.sin, x0, np.cos) + assert_array_equal(x0, x0_copy) + + def test_gh17570_defaults(self): + # Previously, when fprime was not specified, root_scalar would default + # to secant. When x1 was not specified, secant failed. + # Check that without fprime, the default is secant if x1 is specified + # and newton otherwise. + # Also confirm that `x` is always a scalar (gh-21148) + def f(x): + assert np.isscalar(x) + return f1(x) + + res_newton_default = root_scalar(f, method='newton', x0=3, xtol=1e-6) + res_secant_default = root_scalar(f, method='secant', x0=3, x1=2, + xtol=1e-6) + # `newton` uses the secant method when `x1` and `x2` are specified + res_secant = newton(f, x0=3, x1=2, tol=1e-6, full_output=True)[1] + + # all three found a root + assert_allclose(f(res_newton_default.root), 0, atol=1e-6) + assert res_newton_default.root.shape == tuple() + assert_allclose(f(res_secant_default.root), 0, atol=1e-6) + assert res_secant_default.root.shape == tuple() + assert_allclose(f(res_secant.root), 0, atol=1e-6) + assert res_secant.root.shape == tuple() + + # Defaults are correct + assert (res_secant_default.root + == res_secant.root + != res_newton_default.iterations) + assert (res_secant_default.iterations + == res_secant_default.function_calls - 1 # true for secant + == res_secant.iterations + != res_newton_default.iterations + == res_newton_default.function_calls/2) # newton 2-point diff + + @pytest.mark.parametrize('kwargs', [dict(), {'method': 'newton'}]) + def test_args_gh19090(self, kwargs): + def f(x, a, b): + assert a == 3 + assert b == 1 + return (x ** a - b) + + res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs) + assert res.converged + assert_allclose(res.root, 1) + + @pytest.mark.parametrize('method', ['secant', 'newton']) + def test_int_x0_gh19280(self, method): + # Originally, `newton` ensured that only floats were passed to the + # callable. This was inadvertently changed by gh-17669. Check that + # it has been changed back. + def f(x): + # an integer raised to a negative integer power would fail + return x**-2 - 2 + + res = optimize.root_scalar(f, x0=1, method=method) + assert res.converged + assert_allclose(abs(res.root), 2**-0.5) + assert res.root.dtype == np.dtype(np.float64) + + +def test_gh_5555(): + root = 0.1 + + def f(x): + return x - root + + methods = [zeros.bisect, zeros.ridder] + xtol = rtol = TOL + for method in methods: + res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol) + assert_allclose(root, res, atol=xtol, rtol=rtol, + err_msg=f'method {method.__name__}') + + +def test_gh_5557(): + # Show that without the changes in 5557 brentq and brenth might + # only achieve a tolerance of 2*(xtol + rtol*|res|). + + # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1, + # 0.4). The important parts are that |f(0)| < |f(1)| (so that + # brent takes 0 as the initial guess), |f(0)| < atol (so that + # brent accepts 0 as the root), and that the exact root of f lies + # more than atol away from 0 (so that brent doesn't achieve the + # desired tolerance). + def f(x): + if x < 0.5: + return -0.1 + else: + return x - 0.6 + + atol = 0.51 + rtol = 4 * _FLOAT_EPS + methods = [zeros.brentq, zeros.brenth] + for method in methods: + res = method(f, 0, 1, xtol=atol, rtol=rtol) + assert_allclose(0.6, res, atol=atol, rtol=rtol) + + +def test_brent_underflow_in_root_bracketing(): + # Testing if an interval [a,b] brackets a zero of a function + # by checking f(a)*f(b) < 0 is not reliable when the product + # underflows/overflows. (reported in issue# 13737) + + underflow_scenario = (-450.0, -350.0, -400.0) + overflow_scenario = (350.0, 450.0, 400.0) + + for a, b, root in [underflow_scenario, overflow_scenario]: + c = np.exp(root) + for method in [zeros.brenth, zeros.brentq]: + res = method(lambda x: np.exp(x)-c, a, b) + assert_allclose(root, res) + + +class TestRootResults: + r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0, + method="newton") + + def test_repr(self): + expected_repr = (" converged: True\n flag: converged" + "\n function_calls: 46\n iterations: 44\n" + " root: 1.0\n method: newton") + assert_equal(repr(self.r), expected_repr) + + def test_type(self): + assert isinstance(self.r, OptimizeResult) + + +def test_complex_halley(): + """Test Halley's works with complex roots""" + def f(x, *a): + return a[0] * x**2 + a[1] * x + a[2] + + def f_1(x, *a): + return 2 * a[0] * x + a[1] + + def f_2(x, *a): + retval = 2 * a[0] + try: + size = len(x) + except TypeError: + return retval + else: + return [retval] * size + + z = complex(1.0, 2.0) + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + # (-0.75000000000000078+1.1989578808281789j) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + z = [z] * 10 + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + + +@pytest.mark.thread_unsafe +def test_zero_der_nz_dp(capsys): + """Test secant method with a non-zero dp, but an infinite newton step""" + # pick a symmetrical functions and choose a point on the side that with dx + # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, + # which has a root at x = 100 and is symmetrical around the line x = 100 + # we have to pick a really big number so that it is consistently true + # now find a point on each side so that the secant has a zero slope + dx = np.finfo(float).eps ** 0.33 + # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 + # -> 200 = p0 * (2 + dx) + dx + p0 = (200.0 - dx) / (2.0 + dx) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "RMS of") + x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) + assert_allclose(x, [100] * 10) + # test scalar cases too + p0 = (2.0 - 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, 1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True) + p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, -1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True) + + +@pytest.mark.thread_unsafe +def test_array_newton_failures(): + """Test that array newton fails as expected""" + # p = 0.68 # [MPa] + # dp = -0.068 * 1e6 # [Pa] + # T = 323 # [K] + diameter = 0.10 # [m] + # L = 100 # [m] + roughness = 0.00015 # [m] + rho = 988.1 # [kg/m**3] + mu = 5.4790e-04 # [Pa*s] + u = 2.488 # [m/s] + reynolds_number = rho * u * diameter / mu # Reynolds number + + def colebrook_eqn(darcy_friction, re, dia): + return (1 / np.sqrt(darcy_friction) + + 2 * np.log10(roughness / 3.7 / dia + + 2.51 / re / np.sqrt(darcy_friction))) + + # only some failures + with pytest.warns(RuntimeWarning): + result = zeros.newton( + colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + assert not result.converged.all() + # they all fail + with pytest.raises(RuntimeError): + result = zeros.newton( + colebrook_eqn, x0=[0.01] * 2, maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + + +# this test should **not** raise a RuntimeWarning +def test_gh8904_zeroder_at_root_fails(): + """Test that Newton or Halley don't warn if zero derivative at root""" + + # a function that has a zero derivative at it's root + def f_zeroder_root(x): + return x**3 - x**2 + + # should work with secant + r = zeros.newton(f_zeroder_root, x0=0) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # 1st derivative + def fder(x): + return 3 * x**2 - 2 * x + + # 2nd derivative + def fder2(x): + return 6*x - 2 + + # should work with newton and halley + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # also test that if a root is found we do not raise RuntimeWarning even if + # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and + # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the + # root, but if the solver continued with that guess, then it will calculate + # a zero derivative, so it should return the root w/o RuntimeWarning + r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # doesn't apply to halley + + +def test_gh_8881(): + r"""Test that Halley's method realizes that the 2nd order adjustment + is too big and drops off to the 1st order adjustment.""" + n = 9 + + def f(x): + return power(x, 1.0/n) - power(n, 1.0/n) + + def fp(x): + return power(x, (1.0-n)/n)/n + + def fpp(x): + return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n + + x0 = 0.1 + # The root is at x=9. + # The function has positive slope, x0 < root. + # Newton succeeds in 8 iterations + rt, r = newton(f, x0, fprime=fp, full_output=True) + assert r.converged + # Before the Issue 8881/PR 8882, halley would send x in the wrong direction. + # Check that it now succeeds. + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + +def test_gh_9608_preserve_array_shape(): + """ + Test that shape is preserved for array inputs even if fprime or fprime2 is + scalar + """ + def f(x): + return x**2 + + def fp(x): + return 2 * x + + def fpp(x): + return 2 + + x0 = np.array([-2], dtype=np.float32) + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + x0_array = np.array([-2, -3], dtype=np.float32) + # This next invocation should fail + with pytest.raises(IndexError): + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp, full_output=True + ) + + def fpp_array(x): + return np.full(np.shape(x), 2, dtype=np.float32) + + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True + ) + assert result.converged.all() + + +@pytest.mark.parametrize( + "maximum_iterations,flag_expected", + [(10, zeros.CONVERR), (100, zeros.CONVERGED)]) +def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected): + """ + Test that if the maximum iterations is exceeded that the flag is not + converged. + """ + result = zeros.brentq( + lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5, + -30, 30, (), 1e-6, 1e-6, maximum_iterations, + full_output=True, disp=False) + assert result[1].flag == flag_expected + if flag_expected == zeros.CONVERR: + # didn't converge because exceeded maximum iterations + assert result[1].iterations == maximum_iterations + elif flag_expected == zeros.CONVERGED: + # converged before maximum iterations + assert result[1].iterations < maximum_iterations + + +@pytest.mark.thread_unsafe +def test_gh9551_raise_error_if_disp_true(): + """Test that if disp is true then zero derivative raises RuntimeError""" + + def f(x): + return x*x + 1 + + def f_p(x): + return 2*x + + assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False) + with pytest.raises( + RuntimeError, + match=r'^Derivative was zero\. Failed to converge after \d+ iterations, ' + r'value is [+-]?\d*\.\d+\.$'): + zeros.newton(f, 1.0, f_p) + root = zeros.newton(f, complex(10.0, 10.0), f_p) + assert_allclose(root, complex(0.0, 1.0)) + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh3089_8394(solver_name): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that this is resolved. + def f(x): + return np.nan + + solver = getattr(zeros, solver_name) + with pytest.raises(ValueError, match="The function value at x..."): + solver(f, 0, 1) + + +@pytest.mark.parametrize('method', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh18171(method): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that `root_scalar` returns + # normally but indicates that convergence was unsuccessful. See gh-18171. + def f(x): + f._count += 1 + return np.nan + f._count = 0 + + res = root_scalar(f, bracket=(0, 1), method=method) + assert res.converged is False + assert res.flag.startswith("The function value at x") + assert res.function_calls == f._count + assert str(res.root) in res.flag + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_function_calls(solver_name, rs_interface): + # There do not appear to be checks that the bracketing solvers report the + # correct number of function evaluations. Check that this is the case. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + f.calls += 1 + return x**2 - 1 + f.calls = 0 + + res = solver(f, 0, 10, full_output=True) + + if rs_interface: + assert res.function_calls == f.calls + else: + assert res[1].function_calls == f.calls + + +@pytest.mark.thread_unsafe +def test_gh_14486_converged_false(): + """Test that zero slope with secant method results in a converged=False""" + def lhs(x): + return x * np.exp(-x*x) - 0.07 + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0) + assert not res.converged + assert res.flag == 'convergence error' + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1] + assert not res.converged + assert res.flag == 'convergence error' + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_gh5584(solver_name, rs_interface): + # gh-5584 reported that an underflow can cause sign checks in the algorithm + # to fail. Check that this is resolved. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + return 1e-200*x + + # Report failure when signs are the same + with pytest.raises(ValueError, match='...must have different signs'): + solver(f, -0.5, -0.4, full_output=True) + + # Solve successfully when signs are different + res = solver(f, -0.5, 0.4, full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + # Solve successfully when one side is negative zero + res = solver(f, -0.5, float('-0.0'), full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + +def test_gh13407(): + # gh-13407 reported that the message produced by `scipy.optimize.toms748` + # when `rtol < eps` is incorrect, and also that toms748 is unusual in + # accepting `rtol` as low as eps while other solvers raise at 4*eps. Check + # that the error message has been corrected and that `rtol=eps` can produce + # a lower function value than `rtol=4*eps`. + def f(x): + return x**3 - 2*x - 5 + + xtol = 1e-300 + eps = np.finfo(float).eps + x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps) + f1 = f(x1) + x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps) + f4 = f(x4) + assert f1 < f4 + + # using old-style syntax to get exactly the same message + message = fr"rtol too small \({eps/2:g} < {eps:g}\)" + with pytest.raises(ValueError, match=message): + zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2) + + +def test_newton_complex_gh10103(): + # gh-10103 reported a problem when `newton` is pass a Python complex x0, + # no `fprime` (secant method), and no `x1` (`x1` must be constructed). + # Check that this is resolved. + def f(z): + return z - 1 + res = newton(f, 1+1j) + assert_allclose(res, 1, atol=1e-12) + + res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant') + assert_allclose(res.root, 1, atol=1e-12) + + +@pytest.mark.parametrize('method', all_methods) +def test_maxiter_int_check_gh10236(method): + # gh-10236 reported that the error message when `maxiter` is not an integer + # was difficult to interpret. Check that this was resolved (by gh-10907). + message = "'float' object cannot be interpreted as an integer" + with pytest.raises(TypeError, match=message): + method(f1, 0.0, 1.0, maxiter=72.45) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tnc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f66058bbcc501eb1303eb3075cb55705b93192 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/tnc.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'fmin_tnc', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="tnc", + private_modules=["_tnc"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/zeros.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..907d49d37fc1e7476e81a25dbbc0d3910cbbe004 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/optimize/zeros.py @@ -0,0 +1,26 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'RootResults', + 'bisect', + 'brenth', + 'brentq', + 'newton', + 'ridder', + 'toms748', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="zeros", + private_modules=["_zeros_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1eaa9243ff83caa8e8078adc0cfb268119a4c72 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/__init__.py @@ -0,0 +1,327 @@ +""" +======================================= +Signal processing (:mod:`scipy.signal`) +======================================= + +Convolution +=========== + +.. autosummary:: + :toctree: generated/ + + convolve -- N-D convolution. + correlate -- N-D correlation. + fftconvolve -- N-D convolution using the FFT. + oaconvolve -- N-D convolution using the overlap-add method. + convolve2d -- 2-D convolution (more options). + correlate2d -- 2-D correlation (more options). + sepfir2d -- Convolve with a 2-D separable FIR filter. + choose_conv_method -- Chooses faster of FFT and direct convolution methods. + correlation_lags -- Determines lag indices for 1D cross-correlation. + +B-splines +========= + +.. autosummary:: + :toctree: generated/ + + gauss_spline -- Gaussian approximation to the B-spline basis function. + cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. + qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. + cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. + qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. + cspline1d_eval -- Evaluate a cubic spline at the given points. + qspline1d_eval -- Evaluate a quadratic spline at the given points. + spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. + +Filtering +========= + +.. autosummary:: + :toctree: generated/ + + order_filter -- N-D order filter. + medfilt -- N-D median filter. + medfilt2d -- 2-D median filter (faster). + wiener -- N-D Wiener filter. + + symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). + symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). + lfilter -- 1-D FIR and IIR digital linear filtering. + lfiltic -- Construct initial conditions for `lfilter`. + lfilter_zi -- Compute an initial state zi for the lfilter function that + -- corresponds to the steady state of the step response. + filtfilt -- A forward-backward filter. + savgol_filter -- Filter a signal using the Savitzky-Golay filter. + + deconvolve -- 1-D deconvolution using lfilter. + + sosfilt -- 1-D IIR digital linear filtering using + -- a second-order sections filter representation. + sosfilt_zi -- Compute an initial state zi for the sosfilt function that + -- corresponds to the steady state of the step response. + sosfiltfilt -- A forward-backward filter for second-order sections. + hilbert -- Compute 1-D analytic signal, using the Hilbert transform. + hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform. + envelope -- Compute the envelope of a real- or complex-valued signal. + + decimate -- Downsample a signal. + detrend -- Remove linear and/or constant trends from data. + resample -- Resample using Fourier method. + resample_poly -- Resample using polyphase filtering method. + upfirdn -- Upsample, apply FIR filter, downsample. + +Filter design +============= + +.. autosummary:: + :toctree: generated/ + + bilinear -- Digital filter from an analog filter using + -- the bilinear transform. + bilinear_zpk -- Digital filter from an analog filter using + -- the bilinear transform. + findfreqs -- Find array of frequencies for computing filter response. + firls -- FIR filter design using least-squares error minimization. + firwin -- Windowed FIR filter design, with frequency response + -- defined as pass and stop bands. + firwin2 -- Windowed FIR filter design, with arbitrary frequency + -- response. + freqs -- Analog filter frequency response from TF coefficients. + freqs_zpk -- Analog filter frequency response from ZPK coefficients. + freqz -- Digital filter frequency response from TF coefficients. + freqz_sos -- Digital filter frequency response for SOS format filter. + freqz_zpk -- Digital filter frequency response from ZPK coefficients. + gammatone -- FIR and IIR gammatone filter design. + group_delay -- Digital filter group delay. + iirdesign -- IIR filter design given bands and gains. + iirfilter -- IIR filter design given order and critical frequencies. + kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given + -- the number of taps and the transition width at + -- discontinuities in the frequency response. + kaiser_beta -- Compute the Kaiser parameter beta, given the desired + -- FIR filter attenuation. + kaiserord -- Design a Kaiser window to limit ripple and width of + -- transition region. + minimum_phase -- Convert a linear phase FIR filter to minimum phase. + savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay + -- filter. + remez -- Optimal FIR filter design. + + unique_roots -- Unique roots and their multiplicities. + residue -- Partial fraction expansion of b(s) / a(s). + residuez -- Partial fraction expansion of b(z) / a(z). + invres -- Inverse partial fraction expansion for analog filter. + invresz -- Inverse partial fraction expansion for digital filter. + BadCoefficients -- Warning on badly conditioned filter coefficients. + +Lower-level filter design functions: + +.. autosummary:: + :toctree: generated/ + + abcd_normalize -- Check state-space matrices and ensure they are rank-2. + band_stop_obj -- Band Stop Objective Function for order minimization. + besselap -- Return (z,p,k) for analog prototype of Bessel filter. + buttap -- Return (z,p,k) for analog prototype of Butterworth filter. + cheb1ap -- Return (z,p,k) for type I Chebyshev filter. + cheb2ap -- Return (z,p,k) for type II Chebyshev filter. + ellipap -- Return (z,p,k) for analog prototype of elliptic filter. + lp2bp -- Transform a lowpass filter prototype to a bandpass filter. + lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter. + lp2bs -- Transform a lowpass filter prototype to a bandstop filter. + lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter. + lp2hp -- Transform a lowpass filter prototype to a highpass filter. + lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter. + lp2lp -- Transform a lowpass filter prototype to a lowpass filter. + lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter. + normalize -- Normalize polynomial representation of a transfer function. + + + +Matlab-style IIR filter design +============================== + +.. autosummary:: + :toctree: generated/ + + butter -- Butterworth + buttord + cheby1 -- Chebyshev Type I + cheb1ord + cheby2 -- Chebyshev Type II + cheb2ord + ellip -- Elliptic (Cauer) + ellipord + bessel -- Bessel (no order selection available -- try butterod) + iirnotch -- Design second-order IIR notch digital filter. + iirpeak -- Design second-order IIR peak (resonant) digital filter. + iircomb -- Design IIR comb filter. + +Continuous-time linear systems +============================== + +.. autosummary:: + :toctree: generated/ + + lti -- Continuous-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + lsim -- Continuous-time simulation of output to linear system. + impulse -- Impulse response of linear, time-invariant (LTI) system. + step -- Step response of continuous-time LTI system. + freqresp -- Frequency response of a continuous-time LTI system. + bode -- Bode magnitude and phase data (continuous-time LTI). + +Discrete-time linear systems +============================ + +.. autosummary:: + :toctree: generated/ + + dlti -- Discrete-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + dlsim -- Simulation of output to a discrete-time linear system. + dimpulse -- Impulse response of a discrete-time LTI system. + dstep -- Step response of a discrete-time LTI system. + dfreqresp -- Frequency response of a discrete-time LTI system. + dbode -- Bode magnitude and phase data (discrete-time LTI). + +LTI representations +=================== + +.. autosummary:: + :toctree: generated/ + + tf2zpk -- Transfer function to zero-pole-gain. + tf2sos -- Transfer function to second-order sections. + tf2ss -- Transfer function to state-space. + zpk2tf -- Zero-pole-gain to transfer function. + zpk2sos -- Zero-pole-gain to second-order sections. + zpk2ss -- Zero-pole-gain to state-space. + ss2tf -- State-pace to transfer function. + ss2zpk -- State-space to pole-zero-gain. + sos2zpk -- Second-order sections to zero-pole-gain. + sos2tf -- Second-order sections to transfer function. + cont2discrete -- Continuous-time to discrete-time LTI conversion. + place_poles -- Pole placement. + +Waveforms +========= + +.. autosummary:: + :toctree: generated/ + + chirp -- Frequency swept cosine signal, with several freq functions. + gausspulse -- Gaussian modulated sinusoid. + max_len_seq -- Maximum length sequence. + sawtooth -- Periodic sawtooth. + square -- Square wave. + sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial. + unit_impulse -- Discrete unit impulse. + +Window functions +================ + +For window functions, see the `scipy.signal.windows` namespace. + +In the `scipy.signal` namespace, there is a convenience function to +obtain these windows by name: + +.. autosummary:: + :toctree: generated/ + + get_window -- Return a window of a given length and type. + +Peak finding +============ + +.. autosummary:: + :toctree: generated/ + + argrelmin -- Calculate the relative minima of data. + argrelmax -- Calculate the relative maxima of data. + argrelextrema -- Calculate the relative extrema of data. + find_peaks -- Find a subset of peaks inside a signal. + find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation. + peak_prominences -- Calculate the prominence of each peak in a signal. + peak_widths -- Calculate the width of each peak in a signal. + +Spectral analysis +================= + +.. autosummary:: + :toctree: generated/ + + periodogram -- Compute a (modified) periodogram. + welch -- Compute a periodogram using Welch's method. + csd -- Compute the cross spectral density, using Welch's method. + coherence -- Compute the magnitude squared coherence, using Welch's method. + spectrogram -- Compute the spectrogram (legacy). + lombscargle -- Computes the Lomb-Scargle periodogram. + vectorstrength -- Computes the vector strength. + ShortTimeFFT -- Interface for calculating the \ + :ref:`Short Time Fourier Transform ` and \ + its inverse. + stft -- Compute the Short Time Fourier Transform (legacy). + istft -- Compute the Inverse Short Time Fourier Transform (legacy). + check_COLA -- Check the COLA constraint for iSTFT reconstruction. + check_NOLA -- Check the NOLA constraint for iSTFT reconstruction. + +Chirp Z-transform and Zoom FFT +============================================ + +.. autosummary:: + :toctree: generated/ + + czt - Chirp z-transform convenience function + zoom_fft - Zoom FFT convenience function + CZT - Chirp z-transform function generator + ZoomFFT - Zoom FFT function generator + czt_points - Output the z-plane points sampled by a chirp z-transform + +The functions are simpler to use than the classes, but are less efficient when +using the same transform on many arrays of the same length, since they +repeatedly generate the same chirp signal with every call. In these cases, +use the classes to create a reusable function instead. + +""" + +from . import _sigtools, windows +from ._waveforms import * +from ._max_len_seq import max_len_seq +from ._upfirdn import upfirdn + +from ._spline import ( + sepfir2d +) + +from ._spline_filters import * +from ._filter_design import * +from ._fir_filter_design import * +from ._ltisys import * +from ._lti_conversion import * +from ._signaltools import * +from ._savitzky_golay import savgol_coeffs, savgol_filter +from ._spectral_py import * +from ._short_time_fft import * +from ._peak_finding import * +from ._czt import * +from .windows import get_window # keep this one in signal namespace + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + bsplines, filter_design, fir_filter_design, lti_conversion, ltisys, + spectral, signaltools, waveforms, wavelets, spline +) + +__all__ = [ + s for s in dir() if not s.startswith("_") +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_arraytools.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_arraytools.py new file mode 100644 index 0000000000000000000000000000000000000000..87ce75d8d892a64021da7abc5d149556c22cf983 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_arraytools.py @@ -0,0 +1,264 @@ +""" +Functions for acting on a axis of an array. +""" +import numpy as np + + +def axis_slice(a, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a'. + + Parameters + ---------- + a : numpy.ndarray + The array to be sliced. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import axis_slice + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> axis_slice(a, start=0, stop=1, axis=1) + array([[1], + [4], + [7]]) + >>> axis_slice(a, start=1, axis=0) + array([[4, 5, 6], + [7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_slice() does not + handle its arguments the exactly the same as indexing. To select + a single index k, for example, use + axis_slice(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use numpy.squeeze() + to remove trivial axes.) + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + b = a[tuple(a_slice)] + return b + + +def axis_reverse(a, axis=-1): + """Reverse the 1-D slices of `a` along axis `axis`. + + Returns axis_slice(a, step=-1, axis=axis). + """ + return axis_slice(a, step=-1, axis=axis) + + +def odd_ext(x, n, axis=-1): + """ + Odd extension at the boundaries of an array + + Generate a new ndarray by making an odd extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import odd_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> odd_ext(a, 2) + array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) + + Odd extension is a "180 degree rotation" at the endpoints of the original + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = odd_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='odd extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_end = axis_slice(x, start=0, stop=1, axis=axis) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((2 * left_end - left_ext, + x, + 2 * right_end - right_ext), + axis=axis) + return ext + + +def even_ext(x, n, axis=-1): + """ + Even extension at the boundaries of an array + + Generate a new ndarray by making an even extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import even_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> even_ext(a, 2) + array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], + [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) + + Even extension is a "mirror image" at the boundaries of the original array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = even_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='even extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def const_ext(x, n, axis=-1): + """ + Constant extension at the boundaries of an array + + Generate a new ndarray that is a constant extension of `x` along an axis. + + The extension repeats the values at the first and last element of + the axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import const_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> const_ext(a, 2) + array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], + [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) + + Constant extension continues with the same values as the endpoints of the + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = const_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='constant extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + left_end = axis_slice(x, start=0, stop=1, axis=axis) + ones_shape = [1] * x.ndim + ones_shape[axis] = n + ones = np.ones(ones_shape, dtype=x.dtype) + left_ext = ones * left_end + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = ones * right_end + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def zero_ext(x, n, axis=-1): + """ + Zero padding at the boundaries of an array + + Generate a new ndarray that is a zero-padded extension of `x` along + an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the + axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import zero_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> zero_ext(a, 2) + array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], + [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) + """ + if n < 1: + return x + zeros_shape = list(x.shape) + zeros_shape[axis] = n + zeros = np.zeros(zeros_shape, dtype=x.dtype) + ext = np.concatenate((zeros, x, zeros), axis=axis) + return ext + + +def _validate_fs(fs, allow_none=True): + """ + Check if the given sampling frequency is a scalar and raises an exception + otherwise. If allow_none is False, also raises an exception for none + sampling rates. Returns the sampling frequency as float or none if the + input is none. + """ + if fs is None: + if not allow_none: + raise ValueError("Sampling frequency can not be none.") + else: # should be float + if not np.isscalar(fs): + raise ValueError("Sampling frequency fs must be a single scalar.") + fs = float(fs) + return fs diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_czt.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..c5e5715b460fb2719b68d4694474bc1efc0a9fa0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_czt.py @@ -0,0 +1,575 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +""" +Chirp z-transform. + +We provide two interfaces to the chirp z-transform: an object interface +which precalculates part of the transform and can be applied efficiently +to many different data sets, and a functional interface which is applied +only to the given data set. + +Transforms +---------- + +CZT : callable (x, axis=-1) -> array + Define a chirp z-transform that can be applied to different signals. +ZoomFFT : callable (x, axis=-1) -> array + Define a Fourier transform on a range of frequencies. + +Functions +--------- + +czt : array + Compute the chirp z-transform for a signal. +zoom_fft : array + Compute the Fourier transform on a range of frequencies. +""" + +import cmath +import numbers +import numpy as np +from numpy import pi, arange +from scipy.fft import fft, ifft, next_fast_len + +__all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points'] + + +def _validate_sizes(n, m): + if n < 1 or not isinstance(n, numbers.Integral): + raise ValueError('Invalid number of CZT data ' + f'points ({n}) specified. ' + 'n must be positive and integer type.') + + if m is None: + m = n + elif m < 1 or not isinstance(m, numbers.Integral): + raise ValueError('Invalid number of CZT output ' + f'points ({m}) specified. ' + 'm must be positive and integer type.') + + return m + + +def czt_points(m, w=None, a=1+0j): + """ + Return the points at which the chirp z-transform is computed. + + Parameters + ---------- + m : int + The number of points desired. + w : complex, optional + The ratio between points in each step. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + out : ndarray + The points in the Z plane at which `CZT` samples the z-transform, + when called with arguments `m`, `w`, and `a`, as complex numbers. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + czt : Convenience function for quickly calculating CZT. + + Examples + -------- + Plot the points of a 16-point FFT: + + >>> import numpy as np + >>> from scipy.signal import czt_points + >>> points = czt_points(16) + >>> import matplotlib.pyplot as plt + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + + and a 91-point logarithmic spiral that crosses the unit circle: + + >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6) + >>> points = czt_points(m, w, a) + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + m = _validate_sizes(1, m) + + k = arange(m) + + a = 1.0 * a # at least float + + if w is None: + # Nothing specified, default to FFT + return a * np.exp(2j * pi * k / m) + else: + # w specified + w = 1.0 * w # at least float + return a * w**-k + + +class CZT: + """ + Create a callable chirp z-transform function. + + Transform to compute the frequency response around a spiral. + Objects of this class are callables which can compute the + chirp z-transform on their inputs. This object precalculates the constant + chirps used in the given transform. + + Parameters + ---------- + n : int + The size of the signal. + m : int, optional + The number of output points desired. Default is `n`. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + f : CZT + Callable object ``f(x, axis=-1)`` for computing the chirp z-transform + on `x`. + + See Also + -------- + czt : Convenience function for quickly calculating CZT. + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``f(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to + ``fft.fft(x, m)``. + + If `w` does not lie on the unit circle, then the transform will be + around a spiral with exponentially-increasing radius. Regardless, + angle will increase linearly. + + For transforms that do lie on the unit circle, accuracy is better when + using `ZoomFFT`, since any numerical error in `w` is + accumulated for long data lengths, drifting away from the unit circle. + + The chirp z-transform can be faster than an equivalent FFT with + zero padding. Try it with your own array sizes to see. + + However, the chirp z-transform is considerably less precise than the + equivalent zero-padded FFT. + + As this CZT is implemented using the Bluestein algorithm, it can compute + large prime-length Fourier transforms in O(N log N) time, rather than the + O(N**2) time required by the direct DFT calculation. (`scipy.fft` also + uses Bluestein's algorithm'.) + + (The name "chirp z-transform" comes from the use of a chirp in the + Bluestein algorithm. It does not decompose signals into chirps, like + other transforms with "chirp" in the name.) + + References + ---------- + .. [1] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and + its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). + + Examples + -------- + Compute multiple prime-length FFTs: + + >>> from scipy.signal import CZT + >>> import numpy as np + >>> a = np.random.rand(7) + >>> b = np.random.rand(7) + >>> c = np.random.rand(7) + >>> czt_7 = CZT(n=7) + >>> A = czt_7(a) + >>> B = czt_7(b) + >>> C = czt_7(c) + + Display the points at which the FFT is calculated: + + >>> czt_7.points() + array([ 1.00000000+0.j , 0.62348980+0.78183148j, + -0.22252093+0.97492791j, -0.90096887+0.43388374j, + -0.90096887-0.43388374j, -0.22252093-0.97492791j, + 0.62348980-0.78183148j]) + >>> import matplotlib.pyplot as plt + >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + + def __init__(self, n, m=None, w=None, a=1+0j): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if w is None: + # Nothing specified, default to FFT-like + w = cmath.exp(-2j*pi/m) + wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m) + else: + # w specified + wk2 = w**(k**2/2.) + + a = 1.0 * a # at least float + + self.w, self.a = w, a + self.m, self.n = m, n + + nfft = next_fast_len(n + m - 1) + self._Awk2 = a**-k[:n] * wk2[:n] + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + def __call__(self, x, *, axis=-1): + """ + Calculate the chirp z-transform of a signal. + + Parameters + ---------- + x : array + The signal to transform. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + """ + x = np.asarray(x) + if x.shape[axis] != self.n: + raise ValueError(f"CZT defined for length {self.n}, not " + f"{x.shape[axis]}") + # Calculate transpose coordinates, to allow operation on any given axis + trnsp = np.arange(x.ndim) + trnsp[[axis, -1]] = [-1, axis] + x = x.transpose(*trnsp) + y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) + y = y[..., self._yidx] * self._wk2 + return y.transpose(*trnsp) + + def points(self): + """ + Return the points at which the chirp z-transform is computed. + """ + return czt_points(self.m, self.w, self.a) + + +class ZoomFFT(CZT): + """ + Create a callable zoom FFT transform function. + + This is a specialization of the chirp z-transform (`CZT`) for a set of + equally-spaced frequencies around the unit circle, used to calculate a + section of the FFT more efficiently than calculating the entire FFT and + truncating. + + Parameters + ---------- + n : int + The size of the signal. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. Default is `n`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + + Returns + ------- + f : ZoomFFT + Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`. + + See Also + -------- + zoom_fft : Convenience function for calculating a zoom FFT. + + Notes + ----- + The defaults are chosen such that ``f(x, 2)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to + ``fft.fft(x, m)``. + + Sampling frequency is 1/dt, the time step between samples in the + signal `x`. The unit circle corresponds to frequencies from 0 up + to the sampling frequency. The default sampling frequency of 2 + means that `f1`, `f2` values up to the Nyquist frequency are in the + range [0, 1). For `f1`, `f2` values expressed in radians, a sampling + frequency of 2*pi should be used. + + Remember that a zoom FFT can only interpolate the points of the existing + FFT. It cannot help to resolve two separate nearby frequencies. + Frequency resolution can only be increased by increasing acquisition + time. + + These functions are implemented using Bluestein's algorithm (as is + `scipy.fft`). [2]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 29 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + .. [2] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import ZoomFFT + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021) + >>> X = transform(x) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + + def __init__(self, n, fn, m=None, *, fs=2, endpoint=False): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if np.size(fn) == 2: + f1, f2 = fn + elif np.size(fn) == 1: + f1, f2 = 0.0, fn + else: + raise ValueError('fn must be a scalar or 2-length sequence') + + self.f1, self.f2, self.fs = f1, f2, fs + + if endpoint: + scale = ((f2 - f1) * m) / (fs * (m - 1)) + else: + scale = (f2 - f1) / fs + a = cmath.exp(2j * pi * f1/fs) + wk2 = np.exp(-(1j * pi * scale * k**2) / m) + + self.w = cmath.exp(-2j*pi/m * scale) + self.a = a + self.m, self.n = m, n + + ak = np.exp(-2j * pi * f1/fs * k[:n]) + self._Awk2 = ak * wk2[:n] + + nfft = next_fast_len(n + m - 1) + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + +def czt(x, m=None, w=None, a=1+0j, *, axis=-1): + """ + Compute the frequency response around a spiral in the Z plane. + + Parameters + ---------- + x : array + The signal to transform. + m : int, optional + The number of output points desired. Default is the length of the + input data. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + zoom_fft : Convenience function for partial FFT calculations. + + Notes + ----- + The defaults are chosen such that ``signal.czt(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is + equivalent to ``fft.fft(x, m)``. + + If the transform needs to be repeated, use `CZT` to construct a + specialized transform function which can be reused without + recomputing constants. + + An example application is in system identification, repeatedly evaluating + small slices of the z-transform of a system, around where a pole is + expected to exist, to refine the estimate of the pole's true location. [1]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 20 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + + Examples + -------- + Generate a sinusoid: + + >>> import numpy as np + >>> f1, f2, fs = 8, 10, 200 # Hz + >>> t = np.linspace(0, 1, fs, endpoint=False) + >>> x = np.sin(2*np.pi*t*f2) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + Its discrete Fourier transform has all of its energy in a single frequency + bin: + + >>> from scipy.fft import rfft, rfftfreq + >>> from scipy.signal import czt, czt_points + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + However, if the sinusoid is logarithmically-decaying: + + >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2) + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + the DFT will have spectral leakage: + + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + While the DFT always samples the z-transform around the unit circle, the + chirp z-transform allows us to sample the Z-transform along any + logarithmic spiral, such as a circle with radius smaller than unity: + + >>> M = fs // 2 # Just positive frequencies, like rfft + >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1 + >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle + >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist + >>> plt.plot(points.real, points.imag, '.') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05]) + >>> plt.show() + + With the correct radius, this transforms the decaying sinusoid (and others + with the same decay rate) without spectral leakage: + + >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft + >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma + >>> plt.plot(freqs, abs(z_vals)) + >>> plt.margins(0, 0.1) + >>> plt.show() + """ + x = np.asarray(x) + transform = CZT(x.shape[axis], m=m, w=w, a=a) + return transform(x, axis=axis) + + +def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1): + """ + Compute the DFT of `x` only for frequencies in range `fn`. + + Parameters + ---------- + x : array + The signal to transform. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. The default is the length of `x`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + The transformed signal. The Fourier transform will be calculated + at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m. + + See Also + -------- + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent + to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)`` + is equivalent to ``fft.fft(x, m)``. + + To graph the magnitude of the resulting transform, use:: + + plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m))) + + If the transform needs to be repeated, use `ZoomFFT` to construct + a specialized transform function which can be reused without + recomputing constants. + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import zoom_fft + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + x = np.asarray(x) + transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint) + return transform(x, axis=axis) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..0f177247c602cd529a064cc043d8052aa6cbc811 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_filter_design.py @@ -0,0 +1,5663 @@ +"""Filter design.""" +import math +import operator +import warnings + +import numpy as np +from numpy import (atleast_1d, poly, polyval, roots, real, asarray, + resize, pi, absolute, sqrt, tan, log10, + arcsinh, sin, exp, cosh, arccosh, ceil, conjugate, + zeros, sinh, append, concatenate, prod, ones, full, array, + mintypecode) +from numpy.polynomial.polynomial import polyval as npp_polyval +from numpy.polynomial.polynomial import polyvalfromroots + +from scipy import special, optimize, fft as sp_fft +from scipy.special import comb +from scipy._lib._util import float_factorial +from scipy.signal._arraytools import _validate_fs + + +__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'freqz_sos', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', + 'gammatone', 'iircomb'] + + +class BadCoefficients(UserWarning): + """Warning about badly conditioned filter coefficients""" + pass + + +abs = absolute + + +def _is_int_type(x): + """ + Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will + pass, while ``5.0`` and ``array([5])`` will fail. + """ + if np.ndim(x) != 0: + # Older versions of NumPy did not raise for np.array([1]).__index__() + # This is safe to remove when support for those versions is dropped + return False + try: + operator.index(x) + except TypeError: + return False + else: + return True + + +def findfreqs(num, den, N, kind='ba'): + """ + Find array of frequencies for computing the response of an analog filter. + + Parameters + ---------- + num, den : array_like, 1-D + The polynomial coefficients of the numerator and denominator of the + transfer function of the filter or LTI system, where the coefficients + are ordered from highest to lowest degree. Or, the roots of the + transfer function numerator and denominator (i.e., zeroes and poles). + N : int + The length of the array to be computed. + kind : str {'ba', 'zp'}, optional + Specifies whether the numerator and denominator are specified by their + polynomial coefficients ('ba'), or their roots ('zp'). + + Returns + ------- + w : (N,) ndarray + A 1-D array of frequencies, logarithmically spaced. + + Examples + -------- + Find a set of nine frequencies that span the "interesting part" of the + frequency response for the filter with the transfer function + + H(s) = s / (s^2 + 8s + 25) + + >>> from scipy import signal + >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) + array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, + 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, + 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) + """ + if kind == 'ba': + ep = atleast_1d(roots(den)) + 0j + tz = atleast_1d(roots(num)) + 0j + elif kind == 'zp': + ep = atleast_1d(den) + 0j + tz = atleast_1d(num) + 0j + else: + raise ValueError("input must be one of {'ba', 'zp'}") + + if len(ep) == 0: + ep = atleast_1d(-1000) + 0j + + ez = np.r_[ep[ep.imag >= 0], tz[(np.abs(tz) < 1e5) & (tz.imag >= 0)]] + + integ = np.abs(ez) < 1e-10 + hfreq = np.round(np.log10(np.max(3 * np.abs(ez.real + integ) + + 1.5 * ez.imag)) + 0.5) + lfreq = np.round(np.log10(0.1 * np.min(np.abs((ez + integ).real) + + 2 * ez.imag)) - 0.5) + + w = np.logspace(lfreq, hfreq, N) + return w + + +def freqs(b, a, worN=200, plot=None): + """ + Compute frequency response of analog filter. + + Given the M-order numerator `b` and N-order denominator `a` of an analog + filter, compute its frequency response:: + + b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] + H(w) = ---------------------------------------------- + a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] + + Parameters + ---------- + b : array_like + Numerator of a linear filter. + a : array_like + Denominator of a linear filter. + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + plot : callable, optional + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqs`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqz : Compute the frequency response of a digital filter. + + Notes + ----- + Using Matplotlib's "plot" function as the callable for `plot` produces + unexpected results, this plots the real part of the complex transfer + function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. + + Examples + -------- + >>> from scipy.signal import freqs, iirfilter + >>> import numpy as np + + >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1') + + >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid(True) + >>> plt.show() + + """ + if worN is None: + # For backwards compatibility + w = findfreqs(b, a, 200) + elif _is_int_type(worN): + w = findfreqs(b, a, worN) + else: + w = atleast_1d(worN) + + s = 1j * w + h = polyval(b, s) / polyval(a, s) + if plot is not None: + plot(w, h) + + return w, h + + +def freqs_zpk(z, p, k, worN=200): + """ + Compute frequency response of analog filter. + + Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its + frequency response:: + + (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) + H(w) = k * ---------------------------------------- + (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqz : Compute the frequency response of a digital filter in TF form + freqz_zpk : Compute the frequency response of a digital filter in ZPK form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import freqs_zpk, iirfilter + + >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1', + ... output='zpk') + + >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid(True) + >>> plt.show() + + """ + k = np.asarray(k) + if k.size > 1: + raise ValueError('k must be a single scalar gain') + + if worN is None: + # For backwards compatibility + w = findfreqs(z, p, 200, kind='zp') + elif _is_int_type(worN): + w = findfreqs(z, p, worN, kind='zp') + else: + w = worN + + w = atleast_1d(w) + s = 1j * w + num = polyvalfromroots(s, z) + den = polyvalfromroots(s, p) + h = k * num/den + return w, h + + +def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi, + include_nyquist=False): + """ + Compute the frequency response of a digital filter. + + Given the M-order numerator `b` and N-order denominator `a` of a digital + filter, compute its frequency response:: + + jw -jw -jwM + jw B(e ) b[0] + b[1]e + ... + b[M]e + H(e ) = ------ = ----------------------------------- + jw -jw -jwN + A(e ) a[0] + a[1]e + ... + a[N]e + + Parameters + ---------- + b : array_like + Numerator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + a : array_like + Denominator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). This is a convenient alternative to:: + + np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist) + + Using a number that is fast for FFT computations can result in + faster computations (see Notes). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if worN is array_like. + plot : callable + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqz`. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + include_nyquist : bool, optional + If `whole` is False and `worN` is an integer, setting `include_nyquist` + to True will include the last frequency (Nyquist frequency) and is + otherwise ignored. + + .. versionadded:: 1.5.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz_zpk + freqz_sos + + Notes + ----- + Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable + for `plot` produces unexpected results, as this plots the real part of the + complex transfer function, not the magnitude. + Try ``lambda w, h: plot(w, np.abs(h))``. + + A direct computation via (R)FFT is used to compute the frequency response + when the following conditions are met: + + 1. An integer value is given for `worN`. + 2. `worN` is fast to compute via FFT (i.e., + `next_fast_len(worN) ` equals `worN`). + 3. The denominator coefficients are a single value (``a.shape[0] == 1``). + 4. `worN` is at least as long as the numerator coefficients + (``worN >= b.shape[0]``). + 5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``. + + For long FIR filters, the FFT approach can have lower error and be much + faster than the equivalent direct polynomial calculation. + + Examples + -------- + >>> from scipy import signal + >>> import numpy as np + >>> taps, f_c = 80, 1.0 # number of taps and cut-off frequency + >>> b = signal.firwin(taps, f_c, window=('kaiser', 8), fs=2*np.pi) + >>> w, h = signal.freqz(b) + + >>> import matplotlib.pyplot as plt + >>> fig, ax1 = plt.subplots(tight_layout=True) + >>> ax1.set_title(f"Frequency Response of {taps} tap FIR Filter" + + ... f"($f_c={f_c}$ rad/sample)") + >>> ax1.axvline(f_c, color='black', linestyle=':', linewidth=0.8) + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'C0') + >>> ax1.set_ylabel("Amplitude in dB", color='C0') + >>> ax1.set(xlabel="Frequency in rad/sample", xlim=(0, np.pi)) + + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'C1') + >>> ax2.set_ylabel('Phase [rad]', color='C1') + >>> ax2.grid(True) + >>> ax2.axis('tight') + >>> plt.show() + + Broadcasting Examples + + Suppose we have two FIR filters whose coefficients are stored in the + rows of an array with shape (2, 25). For this demonstration, we'll + use random data: + + >>> rng = np.random.default_rng() + >>> b = rng.random((2, 25)) + + To compute the frequency response for these two filters with one call + to `freqz`, we must pass in ``b.T``, because `freqz` expects the first + axis to hold the coefficients. We must then extend the shape with a + trivial dimension of length 1 to allow broadcasting with the array + of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has + shape (25, 2, 1): + + >>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + Now, suppose we have two transfer functions, with the same numerator + coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators + are stored in the first dimension of the 2-D array `a`:: + + a = [ 1 1 ] + [ -0.25, -0.5 ] + + >>> b = np.array([0.5, 0.5]) + >>> a = np.array([[1, 1], [-0.25, -0.5]]) + + Only `a` is more than 1-D. To make it compatible for + broadcasting with the frequencies, we extend it with a trivial dimension + in the call to `freqz`: + + >>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + """ + b = atleast_1d(b) + a = atleast_1d(a) + + fs = _validate_fs(fs, allow_none=False) + + if worN is None: + # For backwards compatibility + worN = 512 + + h = None + + if _is_int_type(worN): + N = operator.index(worN) + del worN + if N < 0: + raise ValueError(f'worN must be nonnegative, got {N}') + lastpoint = 2 * pi if whole else pi + # if include_nyquist is true and whole is false, w should + # include end point + w = np.linspace(0, lastpoint, N, + endpoint=include_nyquist and not whole) + n_fft = N if whole else 2 * (N - 1) if include_nyquist else 2 * N + if (a.size == 1 and (b.ndim == 1 or (b.shape[-1] == 1)) + and n_fft >= b.shape[0] + and n_fft > 0): # TODO: review threshold acc. to benchmark? + if np.isrealobj(b) and np.isrealobj(a): + fft_func = sp_fft.rfft + else: + fft_func = sp_fft.fft + h = fft_func(b, n=n_fft, axis=0)[:N] + h /= a + if fft_func is sp_fft.rfft and whole: + # exclude DC and maybe Nyquist (no need to use axis_reverse + # here because we can build reversal with the truncation) + stop = -1 if n_fft % 2 == 1 else -2 + h_flip = slice(stop, 0, -1) + h = np.concatenate((h, h[h_flip].conj())) + if b.ndim > 1: + # Last axis of h has length 1, so drop it. + h = h[..., 0] + # Move the first axis of h to the end. + h = np.moveaxis(h, 0, -1) + else: + w = atleast_1d(worN) + del worN + w = 2*pi*w/fs + + if h is None: # still need to compute using freqs w + zm1 = exp(-1j * w) + h = (npp_polyval(zm1, b, tensor=False) / + npp_polyval(zm1, a, tensor=False)) + + w = w*(fs/(2*pi)) + + if plot is not None: + plot(w, h) + + return w, h + + +def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in ZPK form. + + Given the Zeros, Poles and Gain of a digital filter, compute its frequency + response: + + :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` + + where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are + the `poles`. + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqs_zpk : Compute the frequency response of an analog filter in ZPK form + freqz : Compute the frequency response of a digital filter in TF form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a + system with sample rate of 1000 Hz, and plot the frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000) + >>> w, h = signal.freqz_zpk(z, p, k, fs=1000) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(1, 1, 1) + >>> ax1.set_title('Digital filter frequency response') + + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [Hz]') + >>> ax1.grid(True) + + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'g') + >>> ax2.set_ylabel('Phase [rad]', color='g') + + >>> plt.axis('tight') + >>> plt.show() + + """ + z, p = map(atleast_1d, (z, p)) + + fs = _validate_fs(fs, allow_none=False) + + if whole: + lastpoint = 2 * pi + else: + lastpoint = pi + + if worN is None: + # For backwards compatibility + w = np.linspace(0, lastpoint, 512, endpoint=False) + elif _is_int_type(worN): + w = np.linspace(0, lastpoint, worN, endpoint=False) + else: + w = atleast_1d(worN) + w = 2*pi*w/fs + + zm1 = exp(1j * w) + h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p) + + w = w*(fs/(2*pi)) + + return w, h + + +def group_delay(system, w=512, whole=False, fs=2*pi): + r"""Compute the group delay of a digital filter. + + The group delay measures by how many samples amplitude envelopes of + various spectral components of a signal are delayed by a filter. + It is formally defined as the derivative of continuous (unwrapped) phase:: + + d jw + D(w) = - -- arg H(e) + dw + + Parameters + ---------- + system : tuple of array_like (b, a) + Numerator and denominator coefficients of a filter transfer function. + w : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the delay at the frequencies given. These + are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which group delay was computed, in the same units + as `fs`. By default, `w` is normalized to the range [0, pi) + (radians/sample). + gd : ndarray + The group delay. + + See Also + -------- + freqz : Frequency response of a digital filter + + Notes + ----- + The similar function in MATLAB is called `grpdelay`. + + If the transfer function :math:`H(z)` has zeros or poles on the unit + circle, the group delay at corresponding frequencies is undefined. + When such a case arises the warning is raised and the group delay + is set to 0 at those frequencies. + + For the details of numerical computation of the group delay refer to [1]_ or [2]_. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, + 3rd edition", p. 830. + .. [2] Julius O. Smith III, "Numerical Computation of Group Delay", + in "Introduction to Digital Filters with Audio Applications", + online book, 2007, + https://ccrma.stanford.edu/~jos/fp/Numerical_Computation_Group_Delay.html + + Examples + -------- + >>> from scipy import signal + >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1') + >>> w, gd = signal.group_delay((b, a)) + + >>> import matplotlib.pyplot as plt + >>> plt.title('Digital filter group delay') + >>> plt.plot(w, gd) + >>> plt.ylabel('Group delay [samples]') + >>> plt.xlabel('Frequency [rad/sample]') + >>> plt.show() + + """ + if w is None: + # For backwards compatibility + w = 512 + + fs = _validate_fs(fs, allow_none=False) + + if _is_int_type(w): + if whole: + w = np.linspace(0, 2 * pi, w, endpoint=False) + else: + w = np.linspace(0, pi, w, endpoint=False) + else: + w = np.atleast_1d(w) + w = 2*pi*w/fs + + b, a = map(np.atleast_1d, system) + c = np.convolve(b, conjugate(a[::-1])) + cr = c * np.arange(c.size) + z = np.exp(-1j * w) + num = np.polyval(cr[::-1], z) + den = np.polyval(c[::-1], z) + gd = np.real(num / den) - a.size + 1 + singular = ~np.isfinite(gd) + near_singular = np.absolute(den) < 10 * EPSILON + + if np.any(singular): + gd[singular] = 0 + warnings.warn( + "The group delay is singular at frequencies " + f"[{', '.join(f'{ws:.3f}' for ws in w[singular])}], setting to 0", + stacklevel=2 + ) + + elif np.any(near_singular): + warnings.warn( + "The filter's denominator is extremely small at frequencies " + f"[{', '.join(f'{ws:.3f}' for ws in w[near_singular])}], " + "around which a singularity may be present", + stacklevel=2 + ) + + w = w*(fs/(2*pi)) + + return w, gd + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = np.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if not (sos[:, 3] == 1).all(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +def freqz_sos(sos, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in SOS format. + + Given `sos`, an array with shape (n, 6) of second order sections of + a digital filter, compute the frequency response of the system function:: + + B0(z) B1(z) B{n-1}(z) + H(z) = ----- * ----- * ... * --------- + A0(z) A1(z) A{n-1}(z) + + for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and + denominator of the transfer function of the k-th second order section. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). Using a number that is fast for FFT computations can result + in faster computations (see Notes of `freqz`). + + If an array_like, compute the response at the frequencies given (must + be 1-D). These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz, sosfilt + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 15th-order bandpass filter in SOS format. + + >>> from scipy import signal + >>> import numpy as np + >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='sos') + + Compute the frequency response at 1500 points from DC to Nyquist. + + >>> w, h = signal.freqz_sos(sos, worN=1500) + + Plot the response. + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + If the same filter is implemented as a single transfer function, + numerical error corrupts the frequency response: + + >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='ba') + >>> w, h = signal.freqz(b, a, worN=1500) + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=False) + + sos, n_sections = _validate_sos(sos) + if n_sections == 0: + raise ValueError('Cannot compute frequencies with no sections') + h = 1. + for row in sos: + w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs) + h *= rowh + return w, h + + +def sosfreqz(*args, **kwargs): + """ + Compute the frequency response of a digital filter in SOS format. + + .. warning:: This function is an alias, provided for backward + compatibility. New code should use the function + :func:`scipy.signal.freqz_sos`. + """ + return freqz_sos(*args, **kwargs) + + +def _cplxreal(z, tol=None): + """ + Split into complex and real parts, combining conjugate pairs. + + The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`) + elements. Every complex element must be part of a complex-conjugate pair, + which are combined into a single number (with positive imaginary part) in + the output. Two complex numbers are considered a conjugate pair if their + real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. + + Parameters + ---------- + z : array_like + Vector of complex numbers to be sorted and split + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for + float64) + + Returns + ------- + zc : ndarray + Complex elements of `z`, with each pair represented by a single value + having positive imaginary part, sorted first by real part, and then + by magnitude of imaginary part. The pairs are averaged when combined + to reduce error. + zr : ndarray + Real elements of `z` (those having imaginary part less than + `tol` times their magnitude), sorted by value. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxpair + + Examples + -------- + >>> from scipy.signal._filter_design import _cplxreal + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> zc, zr = _cplxreal(a) + >>> print(zc) + [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] + >>> print(zr) + [ 1. 3. 4.] + """ + + z = atleast_1d(z) + if z.size == 0: + return z, z + elif z.ndim != 1: + raise ValueError('_cplxreal only accepts 1-D input') + + if tol is None: + # Get tolerance from dtype of input + tol = 100 * np.finfo((1.0 * z).dtype).eps + + # Sort by real part, magnitude of imaginary part (speed up further sorting) + z = z[np.lexsort((abs(z.imag), z.real))] + + # Split reals from conjugate pairs + real_indices = abs(z.imag) <= tol * abs(z) + zr = z[real_indices].real + + if len(zr) == len(z): + # Input is entirely real + return array([]), zr + + # Split positive and negative halves of conjugates + z = z[~real_indices] + zp = z[z.imag > 0] + zn = z[z.imag < 0] + + if len(zp) != len(zn): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Find runs of (approximately) the same real part + same_real = np.diff(zp.real) <= tol * abs(zp[:-1]) + diffs = np.diff(concatenate(([0], same_real, [0]))) + run_starts = np.nonzero(diffs > 0)[0] + run_stops = np.nonzero(diffs < 0)[0] + + # Sort each run by their imaginary parts + for i in range(len(run_starts)): + start = run_starts[i] + stop = run_stops[i] + 1 + for chunk in (zp[start:stop], zn[start:stop]): + chunk[...] = chunk[np.lexsort([abs(chunk.imag)])] + + # Check that negatives match positives + if any(abs(zp - zn.conj()) > tol * abs(zn)): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Average out numerical inaccuracy in real vs imag parts of pairs + zc = (zp + zn.conj()) / 2 + + return zc, zr + + +def _cplxpair(z, tol=None): + """ + Sort into pairs of complex conjugates. + + Complex conjugates in `z` are sorted by increasing real part. In each + pair, the number with negative imaginary part appears first. + + If pairs have identical real parts, they are sorted by increasing + imaginary magnitude. + + Two complex numbers are considered a conjugate pair if their real and + imaginary parts differ in magnitude by less than ``tol * abs(z)``. The + pairs are forced to be exact complex conjugates by averaging the positive + and negative values. + + Purely real numbers are also sorted, but placed after the complex + conjugate pairs. A number is considered real if its imaginary part is + smaller than `tol` times the magnitude of the number. + + Parameters + ---------- + z : array_like + 1-D input array to be sorted. + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for + float64) + + Returns + ------- + y : ndarray + Complex conjugate pairs followed by real numbers. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxreal + + Examples + -------- + >>> from scipy.signal._filter_design import _cplxpair + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> z = _cplxpair(a) + >>> print(z) + [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j + 3.+0.j 4.+0.j] + """ + + z = atleast_1d(z) + if z.size == 0 or np.isrealobj(z): + return np.sort(z) + + if z.ndim != 1: + raise ValueError('z must be 1-D') + + zc, zr = _cplxreal(z, tol) + + # Interleave complex values and their conjugates, with negative imaginary + # parts first in each pair + zc = np.dstack((zc.conj(), zc)).flatten() + z = np.append(zc, zr) + return z + + +def tf2zpk(b, a): + r"""Return zero, pole, gain (z, p, k) representation from a numerator, + denominator representation of a linear filter. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + If some values of `b` are too close to 0, they are removed. In that case, + a BadCoefficients warning is emitted. + + The `b` and `a` arrays are interpreted as coefficients for positive, + descending powers of the transfer function variable. So the inputs + :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` + can represent an analog filter of the form: + + .. math:: + + H(s) = \frac + {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} + {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} + + or a discrete-time filter of the form: + + .. math:: + + H(z) = \frac + {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} + {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} + + This "positive powers" form is found more commonly in controls + engineering. If `M` and `N` are equal (which is true for all filters + generated by the bilinear transform), then this happens to be equivalent + to the "negative powers" discrete-time form preferred in DSP: + + .. math:: + + H(z) = \frac + {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} + {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} + + Although this is true for common filters, remember that this is not true + in the general case. If `M` and `N` are not equal, the discrete-time + transfer function coefficients must first be converted to the "positive + powers" form before finding the poles and zeros. + + Examples + -------- + Find the zeroes, poles and gain of + a filter with the transfer function + + .. math:: + + H(s) = \frac{3s^2}{s^2 + 5s + 13} + + >>> from scipy.signal import tf2zpk + >>> tf2zpk([3, 0, 0], [1, 5, 13]) + ( array([ 0. , 0. ]), + array([ -2.5+2.59807621j , -2.5-2.59807621j]), + 3.0) + """ + b, a = normalize(b, a) + b = (b + 0.0) / a[0] + a = (a + 0.0) / a[0] + k = b[0] + b /= b[0] + z = roots(b) + p = roots(a) + return z, p, k + + +def zpk2tf(z, p, k): + r""" + Return polynomial transfer function representation from zeros and poles + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + Examples + -------- + Find the polynomial representation of a transfer function H(s) + using its 'zpk' (Zero-Pole-Gain) representation. + + .. math:: + + H(z) = 5 \frac + { (s - 2)(s - 6) } + { (s - 1)(s - 8) } + + >>> from scipy.signal import zpk2tf + >>> z = [2, 6] + >>> p = [1, 8] + >>> k = 5 + >>> zpk2tf(z, p, k) + ( array([ 5., -40., 60.]), array([ 1., -9., 8.])) + """ + z = atleast_1d(z) + k = atleast_1d(k) + if len(z.shape) > 1: + temp = poly(z[0]) + b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char) + if len(k) == 1: + k = [k[0]] * z.shape[0] + for i in range(z.shape[0]): + b[i] = k[i] * poly(z[i]) + else: + b = k * poly(z) + a = atleast_1d(poly(p)) + + # Use real output if possible. Copied from np.poly, since + # we can't depend on a specific version of numpy. + if issubclass(b.dtype.type, np.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = np.asarray(z, complex) + pos_roots = np.compress(roots.imag > 0, roots) + neg_roots = np.conjugate(np.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)): + b = b.real.copy() + + if issubclass(a.dtype.type, np.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = np.asarray(p, complex) + pos_roots = np.compress(roots.imag > 0, roots) + neg_roots = np.conjugate(np.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)): + a = a.real.copy() + + return b, a + + +def tf2sos(b, a, pairing=None, *, analog=False): + r""" + Return second-order sections from transfer function representation + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + See `zpk2sos` for information and restrictions on `pairing` and + `analog` arguments. + analog : bool, optional + If True, system is analog, otherwise discrete. + + .. versionadded:: 1.8.0 + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + zpk2sos, sosfilt + + Notes + ----- + It is generally discouraged to convert from TF to SOS format, since doing + so usually will not improve numerical precision errors. Instead, consider + designing filters in ZPK format and converting directly to SOS. TF is + converted to SOS by first converting to ZPK format, then converting + ZPK to SOS. + + .. versionadded:: 0.16.0 + + Examples + -------- + Find the 'sos' (second-order sections) of the transfer function H(s) + using its polynomial representation. + + .. math:: + + H(s) = \frac{s^2 - 3.5s - 2}{s^4 + 3s^3 - 15s^2 - 19s + 30} + + >>> from scipy.signal import tf2sos + >>> tf2sos([1, -3.5, -2], [1, 3, -15, -19, 30], analog=True) + array([[ 0. , 0. , 1. , 1. , 2. , -15. ], + [ 1. , -3.5, -2. , 1. , 1. , -2. ]]) + """ + return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) + + +def sos2tf(sos): + r""" + Return a single transfer function from a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Find the polynomial representation of an elliptic filter + using its 'sos' (second-order sections) format. + + >>> from scipy.signal import sos2tf + >>> from scipy import signal + >>> sos = signal.ellip(1, 0.001, 50, 0.1, output='sos') + >>> sos2tf(sos) + ( array([0.91256522, 0.91256522, 0. ]), + array([1. , 0.82513043, 0. ])) + """ + sos = np.asarray(sos) + result_type = sos.dtype + if result_type.kind in 'bui': + result_type = np.float64 + + b = np.array([1], dtype=result_type) + a = np.array([1], dtype=result_type) + n_sections = sos.shape[0] + for section in range(n_sections): + b = np.polymul(b, sos[section, :3]) + a = np.polymul(a, sos[section, 3:]) + return b, a + + +def sos2zpk(sos): + """ + Return zeros, poles, and gain of a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + The number of zeros and poles returned will be ``n_sections * 2`` + even if some of these are (effectively) zero. + + .. versionadded:: 0.16.0 + """ + sos = np.asarray(sos) + n_sections = sos.shape[0] + z = np.zeros(n_sections*2, np.complex128) + p = np.zeros(n_sections*2, np.complex128) + k = 1. + for section in range(n_sections): + zpk = tf2zpk(sos[section, :3], sos[section, 3:]) + z[2*section:2*section+len(zpk[0])] = zpk[0] + p[2*section:2*section+len(zpk[1])] = zpk[1] + k *= zpk[2] + return z, p, k + + +def _nearest_real_complex_idx(fro, to, which): + """Get the next closest real or complex element based on distance""" + assert which in ('real', 'complex', 'any') + order = np.argsort(np.abs(fro - to)) + if which == 'any': + return order[0] + else: + mask = np.isreal(fro[order]) + if which == 'complex': + mask = ~mask + return order[np.nonzero(mask)[0][0]] + + +def _single_zpksos(z, p, k): + """Create one second-order section from up to two zeros and poles""" + sos = np.zeros(6) + b, a = zpk2tf(z, p, k) + sos[3-len(b):3] = b + sos[6-len(a):6] = a + return sos + + +def zpk2sos(z, p, k, pairing=None, *, analog=False): + """Return second-order sections from zeros, poles, and gain of a system + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + If analog is False and pairing is None, pairing is set to 'nearest'; + if analog is True, pairing must be 'minimal', and is set to that if + it is None. + analog : bool, optional + If True, system is analog, otherwise discrete. + + .. versionadded:: 1.8.0 + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + sosfilt + + Notes + ----- + The algorithm used to convert ZPK to SOS format is designed to + minimize errors due to numerical precision issues. The pairing + algorithm attempts to minimize the peak gain of each biquadratic + section. This is done by pairing poles with the nearest zeros, starting + with the poles closest to the unit circle for discrete-time systems, and + poles closest to the imaginary axis for continuous-time systems. + + ``pairing='minimal'`` outputs may not be suitable for `sosfilt`, + and ``analog=True`` outputs will never be suitable for `sosfilt`. + + *Algorithms* + + The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``, + and ``pairing='minimal'`` algorithms are mostly shared. The + ``'nearest'`` algorithm attempts to minimize the peak gain, while + ``'keep_odd'`` minimizes peak gain under the constraint that + odd-order systems should retain one section as first order. + ``'minimal'`` is similar to ``'keep_odd'``, but no additional + poles or zeros are introduced + + The algorithm steps are as follows: + + As a pre-processing step for ``pairing='nearest'``, + ``pairing='keep_odd'``, add poles or zeros to the origin as + necessary to obtain the same number of poles and zeros for + pairing. If ``pairing == 'nearest'`` and there are an odd number + of poles, add an additional pole and a zero at the origin. + + The following steps are then iterated over until no more poles or + zeros remain: + + 1. Take the (next remaining) pole (complex or real) closest to the + unit circle (or imaginary axis, for ``analog=True``) to + begin a new filter section. + + 2. If the pole is real and there are no other remaining real poles [#]_, + add the closest real zero to the section and leave it as a first + order section. Note that after this step we are guaranteed to be + left with an even number of real poles, complex poles, real zeros, + and complex zeros for subsequent pairing iterations. + + 3. Else: + + 1. If the pole is complex and the zero is the only remaining real + zero*, then pair the pole with the *next* closest zero + (guaranteed to be complex). This is necessary to ensure that + there will be a real zero remaining to eventually create a + first-order section (thus keeping the odd order). + + 2. Else pair the pole with the closest remaining zero (complex or + real). + + 3. Proceed to complete the second-order section by adding another + pole and zero to the current pole and zero in the section: + + 1. If the current pole and zero are both complex, add their + conjugates. + + 2. Else if the pole is complex and the zero is real, add the + conjugate pole and the next closest real zero. + + 3. Else if the pole is real and the zero is complex, add the + conjugate zero and the real pole closest to those zeros. + + 4. Else (we must have a real pole and real zero) add the next + real pole closest to the unit circle, and then add the real + zero closest to that pole. + + .. [#] This conditional can only be met for specific odd-order inputs + with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods. + + .. versionadded:: 0.16.0 + + Examples + -------- + + Design a 6th order low-pass elliptic digital filter for a system with a + sampling rate of 8000 Hz that has a pass-band corner frequency of + 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and + the attenuation in the stop-band should be at least 90 dB. + + In the following call to `ellip`, we could use ``output='sos'``, + but for this example, we'll use ``output='zpk'``, and then convert + to SOS format with `zpk2sos`: + + >>> from scipy import signal + >>> import numpy as np + >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') + + Now convert to SOS format. + + >>> sos = signal.zpk2sos(z, p, k) + + The coefficients of the numerators of the sections: + + >>> sos[:, :3] + array([[0.0014152 , 0.00248677, 0.0014152 ], + [1. , 0.72976874, 1. ], + [1. , 0.17607852, 1. ]]) + + The symmetry in the coefficients occurs because all the zeros are on the + unit circle. + + The coefficients of the denominators of the sections: + + >>> sos[:, 3:] + array([[ 1. , -1.32544025, 0.46989976], + [ 1. , -1.26118294, 0.62625924], + [ 1. , -1.2570723 , 0.8619958 ]]) + + The next example shows the effect of the `pairing` option. We have a + system with three poles and three zeros, so the SOS array will have + shape (2, 6). The means there is, in effect, an extra pole and an extra + zero at the origin in the SOS representation. + + >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) + >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) + + With ``pairing='nearest'`` (the default), we obtain + + >>> signal.zpk2sos(z1, p1, 1) + array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], + [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) + + The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles + {0, 0.75}, and the second section has the zeros {-1, 0} and poles + {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin + have been assigned to different sections. + + With ``pairing='keep_odd'``, we obtain: + + >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd') + array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], + [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) + + The extra pole and zero at the origin are in the same section. + The first section is, in effect, a first-order section. + + With ``pairing='minimal'``, the first-order section doesn't have + the extra pole and zero at the origin: + + >>> signal.zpk2sos(z1, p1, 1, pairing='minimal') + array([[ 0. , 1. , 1. , 0. , 1. , -0.75], + [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) + + """ + # TODO in the near future: + # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). + # 2. Make `decimate` use `sosfilt` instead of `lfilter`. + # 3. Make sosfilt automatically simplify sections to first order + # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). + # 4. Further optimizations of the section ordering / pole-zero pairing. + # See the wiki for other potential issues. + + if pairing is None: + pairing = 'minimal' if analog else 'nearest' + + valid_pairings = ['nearest', 'keep_odd', 'minimal'] + if pairing not in valid_pairings: + raise ValueError(f'pairing must be one of {valid_pairings}, not {pairing}') + + if analog and pairing != 'minimal': + raise ValueError('for analog zpk2sos conversion, ' + 'pairing must be "minimal"') + + if len(z) == len(p) == 0: + if not analog: + return np.array([[k, 0., 0., 1., 0., 0.]]) + else: + return np.array([[0., 0., k, 0., 0., 1.]]) + + if pairing != 'minimal': + # ensure we have the same number of poles and zeros, and make copies + p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0)))) + z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0)))) + n_sections = (max(len(p), len(z)) + 1) // 2 + + if len(p) % 2 == 1 and pairing == 'nearest': + p = np.concatenate((p, [0.])) + z = np.concatenate((z, [0.])) + assert len(p) == len(z) + else: + if len(p) < len(z): + raise ValueError('for analog zpk2sos conversion, ' + 'must have len(p)>=len(z)') + + n_sections = (len(p) + 1) // 2 + + # Ensure we have complex conjugate pairs + # (note that _cplxreal only gives us one element of each complex pair): + z = np.concatenate(_cplxreal(z)) + p = np.concatenate(_cplxreal(p)) + if not np.isreal(k): + raise ValueError('k must be real') + k = k.real + + if not analog: + # digital: "worst" is the closest to the unit circle + def idx_worst(p): + return np.argmin(np.abs(1 - np.abs(p))) + else: + # analog: "worst" is the closest to the imaginary axis + def idx_worst(p): + return np.argmin(np.abs(np.real(p))) + + sos = np.zeros((n_sections, 6)) + + # Construct the system, reversing order so the "worst" are last + for si in range(n_sections-1, -1, -1): + # Select the next "worst" pole + p1_idx = idx_worst(p) + p1 = p[p1_idx] + p = np.delete(p, p1_idx) + + # Pair that pole with a zero + + if np.isreal(p1) and np.isreal(p).sum() == 0: + # Special case (1): last remaining real pole + if pairing != 'minimal': + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1, 0], [p1, 0], 1) + elif len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1], [p1], 1) + else: + sos[si] = _single_zpksos([], [p1], 1) + + elif (len(p) + 1 == len(z) + and not np.isreal(p1) + and np.isreal(p).sum() == 1 + and np.isreal(z).sum() == 1): + + # Special case (2): there's one real pole and one real zero + # left, and an equal number of poles and zeros to pair up. + # We *must* pair with a complex zero + + z1_idx = _nearest_real_complex_idx(z, p1, 'complex') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1) + + else: + if np.isreal(p1): + prealidx = np.flatnonzero(np.isreal(p)) + p2_idx = prealidx[idx_worst(p[prealidx])] + p2 = p[p2_idx] + p = np.delete(p, p2_idx) + else: + p2 = p1.conj() + + # find closest zero + if len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'any') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + + if not np.isreal(z1): + sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1) + else: + if len(z) > 0: + z2_idx = _nearest_real_complex_idx(z, p1, 'real') + z2 = z[z2_idx] + assert np.isreal(z2) + z = np.delete(z, z2_idx) + sos[si] = _single_zpksos([z1, z2], [p1, p2], 1) + else: + sos[si] = _single_zpksos([z1], [p1, p2], 1) + else: + # no more zeros + sos[si] = _single_zpksos([], [p1, p2], 1) + + assert len(p) == len(z) == 0 # we've consumed all poles and zeros + del p, z + + # put gain in first sos + sos[0][:3] *= k + return sos + + +def _align_nums(nums): + """Aligns the shapes of multiple numerators. + + Given an array of numerator coefficient arrays [[a_1, a_2,..., + a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator + arrays with zero's so that all numerators have the same length. Such + alignment is necessary for functions like 'tf2ss', which needs the + alignment when dealing with SIMO transfer functions. + + Parameters + ---------- + nums: array_like + Numerator or list of numerators. Not necessarily with same length. + + Returns + ------- + nums: array + The numerator. If `nums` input was a list of numerators then a 2-D + array with padded zeros for shorter numerators is returned. Otherwise + returns ``np.asarray(nums)``. + """ + try: + # The statement can throw a ValueError if one + # of the numerators is a single digit and another + # is array-like e.g. if nums = [5, [1, 2, 3]] + nums = asarray(nums) + + if not np.issubdtype(nums.dtype, np.number): + raise ValueError("dtype of numerator is non-numeric") + + return nums + + except ValueError: + nums = [np.atleast_1d(num) for num in nums] + max_width = max(num.size for num in nums) + + # pre-allocate + aligned_nums = np.zeros((len(nums), max_width)) + + # Create numerators with padded zeros + for index, num in enumerate(nums): + aligned_nums[index, -num.size:] = num + + return aligned_nums + + +def normalize(b, a): + """Normalize numerator/denominator of a continuous-time transfer function. + + If values of `b` are too close to 0, they are removed. In that case, a + BadCoefficients warning is emitted. + + Parameters + ---------- + b: array_like + Numerator of the transfer function. Can be a 2-D array to normalize + multiple transfer functions. + a: array_like + Denominator of the transfer function. At most 1-D. + + Returns + ------- + num: array + The numerator of the normalized transfer function. At least a 1-D + array. A 2-D array if the input `num` is a 2-D array. + den: 1-D array + The denominator of the normalized transfer function. + + Notes + ----- + Coefficients for both the numerator and denominator should be specified in + descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + >>> from scipy.signal import normalize + + Normalize the coefficients of the transfer function + ``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``: + + >>> b = [3, -2, 5] + >>> a = [2, 3, 1] + >>> normalize(b, a) + (array([ 1.5, -1. , 2.5]), array([1. , 1.5, 0.5])) + + A warning is generated if, for example, the first coefficient of + `b` is 0. In the following example, the result is as expected: + + >>> import warnings + >>> with warnings.catch_warnings(record=True) as w: + ... num, den = normalize([0, 3, 6], [2, -5, 4]) + + >>> num + array([1.5, 3. ]) + >>> den + array([ 1. , -2.5, 2. ]) + + >>> print(w[0].message) + Badly conditioned filter coefficients (numerator): the results may be meaningless + + """ + num, den = b, a + + den = np.atleast_1d(den) + num = np.atleast_2d(_align_nums(num)) + + if den.ndim != 1: + raise ValueError("Denominator polynomial must be rank-1 array.") + if num.ndim > 2: + raise ValueError("Numerator polynomial must be rank-1 or" + " rank-2 array.") + if np.all(den == 0): + raise ValueError("Denominator must have at least on nonzero element.") + + # Trim leading zeros in denominator, leave at least one. + den = np.trim_zeros(den, 'f') + + # Normalize transfer function + num, den = num / den[0], den / den[0] + + # Count numerator columns that are all zero + leading_zeros = 0 + for col in num.T: + if np.allclose(col, 0, atol=1e-14): + leading_zeros += 1 + else: + break + + # Trim leading zeros of numerator + if leading_zeros > 0: + warnings.warn("Badly conditioned filter coefficients (numerator): the " + "results may be meaningless", + BadCoefficients, stacklevel=2) + # Make sure at least one column remains + if leading_zeros == num.shape[1]: + leading_zeros -= 1 + num = num[:, leading_zeros:] + + # Squeeze first dimension if singular + if num.shape[0] == 1: + num = num[0, :] + + return num, den + + +def lp2lp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g. rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed low-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed low-pass filter. + + See Also + -------- + lp2hp, lp2bp, lp2bs, bilinear + lp2lp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + Examples + -------- + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_lp2, p_lp2 = lp2.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_lp2, label='Transformed Lowpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + M = max((d, n)) + pwo = pow(wo, np.arange(M - 1, -1, -1)) + start1 = max((n - d, 0)) + start2 = max((d - n, 0)) + b = b * pwo[start1] / pwo[start2:] + a = a * pwo[start1] / pwo[start1:] + return normalize(b, a) + + +def lp2hp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed high-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed high-pass filter. + + See Also + -------- + lp2lp, lp2bp, lp2bs, bilinear + lp2hp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_hp, p_hp = hp.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_hp, label='Highpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + if wo != 1: + pwo = pow(wo, np.arange(max((d, n)))) + else: + pwo = np.ones(max((d, n)), b.dtype.char) + if d >= n: + outa = a[::-1] * pwo + outb = resize(b, (d,)) + outb[n:] = 0.0 + outb[:n] = b[::-1] * pwo[:n] + else: + outb = b[::-1] * pwo + outa = resize(a, (n,)) + outa[d:] = 0.0 + outa[:d] = a[::-1] * pwo[:d] + + return normalize(outb, outa) + + +def lp2bp(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed band-pass filter. + + See Also + -------- + lp2lp, lp2hp, lp2bs, bilinear + lp2bp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_bp, p_bp = bp.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_bp, label='Bandpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + """ + + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + ma = max([N, D]) + Np = N + ma + Dp = D + ma + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def lp2bs(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-stop filter. + a : array_like + Denominator polynomial coefficients of the transformed band-stop filter. + + See Also + -------- + lp2lp, lp2hp, lp2bp, bilinear + lp2bs_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.5]) + >>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_bs, p_bs = bs.bode(w) + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_bs, label='Bandstop') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + """ + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + M = max([N, D]) + Np = M + M + Dp = M + M + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * b[N - i] * + (wosq) ** (M - i - k) * bw ** i) + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * a[D - i] * + (wosq) ** (M - i - k) * bw ** i) + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def bilinear(b, a, fs=1.0): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + b : array_like + Numerator of the analog filter transfer function. + a : array_like + Denominator of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + b : ndarray + Numerator of the transformed digital filter transfer function. + a : ndarray + Denominator of the transformed digital filter transfer function. + + See Also + -------- + lp2lp, lp2hp, lp2bp, lp2bs + bilinear_zpk + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 100 + >>> bf = 2 * np.pi * np.array([7, 13]) + >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', + ... analog=True)) + >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs)) + >>> wz, hz = signal.freqz(filtz.num, filtz.den) + >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz) + + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), + ... label=r'$|H_z(e^{j \omega})|$') + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), + ... label=r'$|H(j \omega)|$') + >>> plt.legend() + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(True) + """ + fs = _validate_fs(fs, allow_none=False) + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = float + M = max([N, D]) + Np = M + Dp = M + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + for j in range(Np + 1): + val = 0.0 + for i in range(N + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * b[N - i] * + pow(2 * fs, i) * (-1) ** k) + bprime[j] = real(val) + for j in range(Dp + 1): + val = 0.0 + for i in range(D + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * a[D - i] * + pow(2 * fs, i) * (-1) ** k) + aprime[j] = real(val) + + return normalize(bprime, aprime) + + +def _validate_gpass_gstop(gpass, gstop): + + if gpass <= 0.0: + raise ValueError("gpass should be larger than 0.0") + elif gstop <= 0.0: + raise ValueError("gstop should be larger than 0.0") + elif gpass > gstop: + raise ValueError("gpass should be smaller than gstop") + + +def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', + fs=None): + """Complete IIR digital and analog filter design. + + Given passband and stopband frequencies and gains, construct an analog or + digital IIR filter of minimum order for a given basic type. Return the + output in numerator, denominator ('ba'), pole-zero ('zpk') or second order + sections ('sos') form. + + Parameters + ---------- + wp, ws : float or array like, shape (2,) + Passband and stopband edge frequencies. Possible values are scalars + (for lowpass and highpass filters) or ranges (for bandpass and bandstop + filters). + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + Note, that for bandpass and bandstop filters passband must lie strictly + inside stopband or vice versa. Also note that the cutoff at the band edges + for IIR filters is defined as half-power, so -3dB, not half-amplitude (-6dB) + like for `scipy.signal.fiwin`. + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import matplotlib.ticker + + >>> wp = 0.2 + >>> ws = 0.3 + >>> gpass = 1 + >>> gstop = 40 + + >>> system = signal.iirdesign(wp, ws, gpass, gstop) + >>> w, h = signal.freqz(*system) + + >>> fig, ax1 = plt.subplots() + >>> ax1.set_title('Digital filter frequency response') + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [rad/sample]') + >>> ax1.grid(True) + >>> ax1.set_ylim([-120, 20]) + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'g') + >>> ax2.set_ylabel('Phase [rad]', color='g') + >>> ax2.grid(True) + >>> ax2.axis('tight') + >>> ax2.set_ylim([-6, 1]) + >>> nticks = 8 + >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) + >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) + + """ + try: + ordfunc = filter_dict[ftype][1] + except KeyError as e: + raise ValueError(f"Invalid IIR filter type: {ftype}") from e + except IndexError as e: + raise ValueError(f"{ftype} does not have order selection. " + "Use iirfilter function.") from e + + _validate_gpass_gstop(gpass, gstop) + + wp = atleast_1d(wp) + ws = atleast_1d(ws) + + fs = _validate_fs(fs, allow_none=True) + + if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]: + raise ValueError("wp and ws must have one or two elements each, and " + f"the same shape, got {wp.shape} and {ws.shape}") + + if any(wp <= 0) or any(ws <= 0): + raise ValueError("Values for wp, ws must be greater than 0") + + if not analog: + if fs is None: + if any(wp >= 1) or any(ws >= 1): + raise ValueError("Values for wp, ws must be less than 1") + elif any(wp >= fs/2) or any(ws >= fs/2): + raise ValueError("Values for wp, ws must be less than fs/2 " + f"(fs={fs} -> fs/2={fs/2})") + + if wp.shape[0] == 2: + if not ((ws[0] < wp[0] and wp[1] < ws[1]) or + (wp[0] < ws[0] and ws[1] < wp[1])): + raise ValueError("Passband must lie strictly inside stopband " + "or vice versa") + + band_type = 2 * (len(wp) - 1) + band_type += 1 + if wp[0] >= ws[0]: + band_type += 1 + + btype = {1: 'lowpass', 2: 'highpass', + 3: 'bandstop', 4: 'bandpass'}[band_type] + + N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) + return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, + ftype=ftype, output=output, fs=fs) + + +def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, + ftype='butter', output='ba', fs=None): + """ + IIR digital and analog filter design given order and critical points. + + Design an Nth-order digital or analog filter and return the filter + coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + + When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``. + rp : float, optional + For Chebyshev and elliptic filters, provides the maximum ripple + in the passband. (dB) + rs : float, optional + For Chebyshev and elliptic filters, provides the minimum attenuation + in the stop band. (dB) + btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + The type of filter. Default is 'bandpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + - Bessel/Thomson: 'bessel' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirdesign : General filter design using passband and stopband spec + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to + 200 Hz and plot the frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, + ... btype='band', analog=True, ftype='cheby2') + >>> w, h = signal.freqs(b, a, 1000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + Create a digital filter with the same properties, in a system with + sampling rate of 2000 Hz, and plot the frequency response. (Second-order + sections implementation is required to ensure stability of a filter of + this order): + + >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', + ... analog=False, ftype='cheby2', fs=2000, + ... output='sos') + >>> w, h = signal.freqz_sos(sos, 2000, fs=2000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + ftype, btype, output = (x.lower() for x in (ftype, btype, output)) + Wn = asarray(Wn) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + Wn = Wn / (fs/2) + + if np.any(Wn <= 0): + raise ValueError("filter critical frequencies must be greater than 0") + + if Wn.size > 1 and not Wn[0] < Wn[1]: + raise ValueError("Wn[0] must be less than Wn[1]") + + try: + btype = band_dict[btype] + except KeyError as e: + raise ValueError(f"'{btype}' is an invalid bandtype for filter.") from e + + try: + typefunc = filter_dict[ftype][0] + except KeyError as e: + raise ValueError(f"'{ftype}' is not a valid basic IIR filter.") from e + + if output not in ['ba', 'zpk', 'sos']: + raise ValueError(f"'{output}' is not a valid output form.") + + if rp is not None and rp < 0: + raise ValueError("passband ripple (rp) must be positive") + + if rs is not None and rs < 0: + raise ValueError("stopband attenuation (rs) must be positive") + + # Get analog lowpass prototype + if typefunc == buttap: + z, p, k = typefunc(N) + elif typefunc == besselap: + z, p, k = typefunc(N, norm=bessel_norms[ftype]) + elif typefunc == cheb1ap: + if rp is None: + raise ValueError("passband ripple (rp) must be provided to " + "design a Chebyshev I filter.") + z, p, k = typefunc(N, rp) + elif typefunc == cheb2ap: + if rs is None: + raise ValueError("stopband attenuation (rs) must be provided to " + "design an Chebyshev II filter.") + z, p, k = typefunc(N, rs) + elif typefunc == ellipap: + if rs is None or rp is None: + raise ValueError("Both rp and rs must be provided to design an " + "elliptic filter.") + z, p, k = typefunc(N, rp, rs) + else: + raise NotImplementedError(f"'{ftype}' not implemented in iirfilter.") + + # Pre-warp frequencies for digital filter design + if not analog: + if np.any(Wn <= 0) or np.any(Wn >= 1): + if fs is not None: + raise ValueError("Digital filter critical frequencies must " + f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})") + raise ValueError("Digital filter critical frequencies " + "must be 0 < Wn < 1") + fs = 2.0 + warped = 2 * fs * tan(pi * Wn / fs) + else: + warped = Wn + + # transform to lowpass, bandpass, highpass, or bandstop + if btype in ('lowpass', 'highpass'): + if np.size(Wn) != 1: + raise ValueError('Must specify a single critical frequency Wn ' + 'for lowpass or highpass filter') + + if btype == 'lowpass': + z, p, k = lp2lp_zpk(z, p, k, wo=warped) + elif btype == 'highpass': + z, p, k = lp2hp_zpk(z, p, k, wo=warped) + elif btype in ('bandpass', 'bandstop'): + try: + bw = warped[1] - warped[0] + wo = sqrt(warped[0] * warped[1]) + except IndexError as e: + raise ValueError('Wn must specify start and stop frequencies for ' + 'bandpass or bandstop filter') from e + + if btype == 'bandpass': + z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) + elif btype == 'bandstop': + z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) + else: + raise NotImplementedError(f"'{btype}' not implemented in iirfilter.") + + # Find discrete equivalent if necessary + if not analog: + z, p, k = bilinear_zpk(z, p, k, fs=fs) + + # Transform to proper out type (pole-zero, state-space, numer-denom) + if output == 'zpk': + return z, p, k + elif output == 'ba': + return zpk2tf(z, p, k) + elif output == 'sos': + return zpk2sos(z, p, k, analog=analog) + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + else: + return degree + + +def bilinear_zpk(z, p, k, fs): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + z : ndarray + Zeros of the transformed digital filter transfer function. + p : ndarray + Poles of the transformed digital filter transfer function. + k : float + System gain of the transformed digital filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk + bilinear + + Notes + ----- + .. versionadded:: 1.1.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 100 + >>> bf = 2 * np.pi * np.array([7, 13]) + >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True, + ... output='zpk')) + >>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles, + ... filts.gain, fs)) + >>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain) + >>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain, + ... worN=fs*wz) + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), + ... label=r'$|H_z(e^{j \omega})|$') + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), + ... label=r'$|H(j \omega)|$') + >>> plt.legend() + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(True) + """ + z = atleast_1d(z) + p = atleast_1d(p) + + fs = _validate_fs(fs, allow_none=False) + + degree = _relative_degree(z, p) + + fs2 = 2.0*fs + + # Bilinear transform the poles and zeros + z_z = (fs2 + z) / (fs2 - z) + p_z = (fs2 + p) / (fs2 - p) + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z = append(z_z, -ones(degree)) + + # Compensate for gain change + k_z = k * real(prod(fs2 - z) / prod(fs2 - p)) + + return z_z, p_z, k_z + + +def lp2lp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed low-pass filter transfer function. + p : ndarray + Poles of the transformed low-pass filter transfer function. + k : float + System gain of the transformed low-pass filter. + + See Also + -------- + lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2lp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a new 'zpk' representation associated with a cutoff frequency wo. + + >>> from scipy.signal import lp2lp_zpk + >>> z = [7, 2] + >>> p = [5, 13] + >>> k = 0.8 + >>> wo = 0.4 + >>> lp2lp_zpk(z, p, k, wo) + ( array([2.8, 0.8]), array([2. , 5.2]), 0.8) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) # Avoid int wraparound + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = wo * z + p_lp = wo * p + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def lp2hp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed high-pass filter transfer function. + p : ndarray + Poles of the transformed high-pass filter transfer function. + k : float + System gain of the transformed high-pass filter. + + See Also + -------- + lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2hp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a highpass filter with a cutoff frequency wo. + + >>> from scipy.signal import lp2hp_zpk + >>> z = [ -2 + 3j , -0.5 - 0.8j ] + >>> p = [ -1 , -4 ] + >>> k = 10 + >>> wo = 0.6 + >>> lp2hp_zpk(z, p, k, wo) + ( array([-0.09230769-0.13846154j, -0.33707865+0.53932584j]), + array([-0.6 , -0.15]), + 8.5) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + + degree = _relative_degree(z, p) + + # Invert positions radially about unit circle to convert LPF to HPF + # Scale all points radially from origin to shift cutoff frequency + z_hp = wo / z + p_hp = wo / p + + # If lowpass had zeros at infinity, inverting moves them to origin. + z_hp = append(z_hp, zeros(degree)) + + # Cancel out gain change caused by inversion + k_hp = k * real(prod(-z) / prod(-p)) + + return z_hp, p_hp, k_hp + + +def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-pass filter transfer function. + p : ndarray + Poles of the transformed band-pass filter transfer function. + k : float + System gain of the transformed band-pass filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear + lp2bp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a bandpass filter with a center frequency wo and + bandwidth bw. + + >>> from scipy.signal import lp2bp_zpk + >>> z = [ 5 + 2j , 5 - 2j ] + >>> p = [ 7 , -16 ] + >>> k = 0.8 + >>> wo = 0.62 + >>> bw = 15 + >>> lp2bp_zpk(z, p, k, wo, bw) + ( array([7.49955815e+01+3.00017676e+01j, 7.49955815e+01-3.00017676e+01j, + 4.41850748e-03-1.76761126e-03j, 4.41850748e-03+1.76761126e-03j]), + array([1.04996339e+02+0.j, -1.60167736e-03+0.j, 3.66108003e-03+0.j, + -2.39998398e+02+0.j]), 0.8) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Scale poles and zeros to desired bandwidth + z_lp = z * bw/2 + p_lp = p * bw/2 + + # Square root needs to produce complex result, not NaN + z_lp = z_lp.astype(complex) + p_lp = p_lp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2), + z_lp - sqrt(z_lp**2 - wo**2))) + p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2), + p_lp - sqrt(p_lp**2 - wo**2))) + + # Move degree zeros to origin, leaving degree zeros at infinity for BPF + z_bp = append(z_bp, zeros(degree)) + + # Cancel out gain change from frequency scaling + k_bp = k * bw**degree + + return z_bp, p_bp, k_bp + + +def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + stopband width `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-stop filter transfer function. + p : ndarray + Poles of the transformed band-stop filter transfer function. + k : float + System gain of the transformed band-stop filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear + lp2bs + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + Examples + -------- + Transform a low-pass filter represented in 'zpk' (Zero-Pole-Gain) form + into a bandstop filter represented in 'zpk' form, with a center frequency wo and + bandwidth bw. + + >>> from scipy.signal import lp2bs_zpk + >>> z = [ ] + >>> p = [ 0.7 , -1 ] + >>> k = 9 + >>> wo = 0.5 + >>> bw = 10 + >>> lp2bs_zpk(z, p, k, wo, bw) + ( array([0.+0.5j, 0.+0.5j, 0.-0.5j, 0.-0.5j]), + array([14.2681928 +0.j, -0.02506281+0.j, 0.01752149+0.j, -9.97493719+0.j]), + -12.857142857142858) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Invert to a highpass filter with desired bandwidth + z_hp = (bw/2) / z + p_hp = (bw/2) / p + + # Square root needs to produce complex result, not NaN + z_hp = z_hp.astype(complex) + p_hp = p_hp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2), + z_hp - sqrt(z_hp**2 - wo**2))) + p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2), + p_hp - sqrt(p_hp**2 - wo**2))) + + # Move any zeros that were at infinity to the center of the stopband + z_bs = append(z_bs, full(degree, +1j*wo)) + z_bs = append(z_bs, full(degree, -1j*wo)) + + # Cancel out gain change caused by inversion + k_bs = k * real(prod(-z) / prod(-p)) + + return z_bs, p_bs, k_bs + + +def butter(N, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Butterworth digital and analog filter design. + + Design an Nth-order digital or analog Butterworth filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. For 'bandpass' and 'bandstop' filters, + the resulting order of the final second-order sections ('sos') + matrix is ``2*N``, with `N` the number of biquad sections + of the desired system. + Wn : array_like + The critical frequency or frequencies. For lowpass and highpass + filters, Wn is a scalar; for bandpass and bandstop filters, + Wn is a length-2 sequence. + + For a Butterworth filter, this is the point at which the gain + drops to 1/sqrt(2) that of the passband (the "-3 dB point"). + + For digital filters, if `fs` is not specified, `Wn` units are + normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is + thus in half cycles / sample and defined as 2*critical frequencies + / `fs`). If `fs` is specified, `Wn` is in the same units as `fs`. + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + buttord, buttap + + Notes + ----- + The Butterworth filter has maximally flat frequency response in the + passband. + + The ``'sos'`` output parameter was added in 0.16.0. + + If the transfer function form ``[b, a]`` is requested, numerical + problems can occur since the conversion between roots and + the polynomial coefficients is a numerically sensitive operation, + even for N >= 4. It is recommended to work with the SOS + representation. + + .. warning:: + Designing high-order and narrowband IIR filters in TF form can + result in unstable or incorrect filtering due to floating point + numerical precision issues. Consider inspecting output filter + characteristics `freqz` or designing the filters with second-order + sections via ``output='sos'``. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth filter frequency response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='butter', fs=fs) + + +def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type I digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type I filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type I filters, this is the point in the transition band at which + the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb1ord, cheb1ap + + Notes + ----- + The Chebyshev type I filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the passband and increased ringing in the step response. + + Type I filters roll off faster than Type II (`cheby2`), but Type II + filters do not have any ripple in the passband. + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type I frequency response (rp=5)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, + output=output, ftype='cheby1', fs=fs) + + +def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type II digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type II filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type II filters, this is the point in the transition band at which + the gain first reaches -`rs`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb2ord, cheb2ap + + Notes + ----- + The Chebyshev type II filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the stopband and increased ringing in the step response. + + Type II filters do not roll off as fast as Type I (`cheby1`). + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type II frequency response (rs=40)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, + output=output, ftype='cheby2', fs=fs) + + +def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Elliptic (Cauer) digital and analog filter design. + + Design an Nth-order digital or analog elliptic filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For elliptic filters, this is the point in the transition band at + which the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + ellipord, ellipap + + Notes + ----- + Also known as Cauer or Zolotarev filters, the elliptical filter maximizes + the rate of transition between the frequency response's passband and + stopband, at the expense of ripple in both, and increased ringing in the + step response. + + As `rp` approaches 0, the elliptical filter becomes a Chebyshev + type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev + type I filter (`cheby1`). As both approach 0, it becomes a Butterworth + filter (`butter`). + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, + output=output, ftype='elliptic', fs=fs) + + +def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase', + fs=None): + """ + Bessel/Thomson digital and analog filter design. + + Design an Nth-order digital or analog Bessel filter and return the + filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies (defined + by the `norm` parameter). + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. (See Notes.) + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + norm : {'phase', 'delay', 'mag'}, optional + Critical frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for + both low-pass and high-pass filters, so this is the + "phase-matched" case. + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by + solving Bessel polynomials. + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency `Wn`. + + .. versionadded:: 0.18.0 + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + Notes + ----- + Also known as a Thomson filter, the analog Bessel filter has maximally + flat group delay and maximally linear phase response, with very little + ringing in the step response. [1]_ + + The Bessel is inherently an analog filter. This function generates digital + Bessel filters using the bilinear transform, which does not preserve the + phase response of the analog filter. As such, it is only approximately + correct at frequencies below about fs/4. To get maximally-flat group + delay at higher frequencies, the analog Bessel filter must be transformed + using phase-preserving techniques. + + See `besselap` for implementation details and references. + + The ``'sos'`` output parameter was added in 0.16.0. + + References + ---------- + .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + + Examples + -------- + Plot the phase-normalized frequency response, showing the relationship + to the Butterworth's cutoff frequency (green): + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed') + >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.title('Bessel filter magnitude response (with Butterworth)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + and the phase midpoint: + + >>> plt.figure() + >>> plt.semilogx(w, np.unwrap(np.angle(h))) + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-np.pi, color='red') # phase midpoint + >>> plt.title('Bessel filter phase response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Phase [rad]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the magnitude-normalized frequency response, showing the -3 dB cutoff: + + >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.axhline(-3, color='red') # -3 dB magnitude + >>> plt.axvline(10, color='green') # cutoff frequency + >>> plt.title('Amplitude-normalized Bessel filter frequency response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the delay-normalized filter, showing the maximally-flat group delay + at 0.1 seconds: + + >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay') + >>> w, h = signal.freqs(b, a) + >>> plt.figure() + >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w)) + >>> plt.axhline(0.1, color='red') # 0.1 seconds group delay + >>> plt.title('Bessel filter group delay') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Group delay [s]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='bessel_'+norm, fs=fs) + + +def maxflat(): + pass + + +def yulewalk(): + pass + + +def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): + """ + Band Stop Objective Function for order minimization. + + Returns the non-integer order for an analog band stop filter. + + Parameters + ---------- + wp : scalar + Edge of passband `passb`. + ind : int, {0, 1} + Index specifying which `passb` edge to vary (0 or 1). + passb : ndarray + Two element sequence of fixed passband edges. + stopb : ndarray + Two element sequence of fixed stopband edges. + gstop : float + Amount of attenuation in stopband in dB. + gpass : float + Amount of ripple in the passband in dB. + type : {'butter', 'cheby', 'ellip'} + Type of filter. + + Returns + ------- + n : scalar + Filter order (possibly non-integer). + + Notes + ----- + Band-stop filters are used in applications where certain frequency + components need to be blocked while others are allowed; for instance, + removing noise at specific frequencies while allowing the desired signal + to pass through. The order of a filter often determines its complexity and + accuracy. Determining the right order can be a challenge. This function + aims to provide an appropriate order for an analog band stop filter. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.signal import band_stop_obj + >>> wp = 2 + >>> ind = 1 + >>> passb = np.array([1, 3]) + >>> stopb = np.array([0.5, 4]) + >>> gstop = 30 + >>> gpass = 3 + >>> filter_type = 'butter' + >>> band_stop_obj(wp, ind, passb, stopb, gpass, gstop, filter_type) + np.float64(-2.758504160760643) + + """ + + _validate_gpass_gstop(gpass, gstop) + + passbC = passb.copy() + passbC[ind] = wp + nat = (stopb * (passbC[0] - passbC[1]) / + (stopb ** 2 - passbC[0] * passbC[1])) + nat = min(abs(nat)) + + if type == 'butter': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) + elif type == 'cheby': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) + elif type == 'ellip': + GSTOP = 10 ** (0.1 * gstop) + GPASS = 10 ** (0.1 * gpass) + arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) + arg0 = 1.0 / nat + d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) + d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) + n = (d0[0] * d1[1] / (d0[1] * d1[0])) + else: + raise ValueError(f"Incorrect type: {type}") + return n + + +def _pre_warp(wp, ws, analog): + # Pre-warp frequencies for digital filter design + if not analog: + passb = np.tan(pi * wp / 2.0) + stopb = np.tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + return passb, stopb + + +def _validate_wp_ws(wp, ws, fs, analog): + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2 * wp / fs + ws = 2 * ws / fs + + filter_type = 2 * (len(wp) - 1) + 1 + if wp[0] >= ws[0]: + filter_type += 1 + + return wp, ws, filter_type + + +def _find_nat_freq(stopb, passb, gpass, gstop, filter_type, filter_kind): + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + + ### breakpoint() + + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + else: + raise ValueError(f"should not happen: {filter_type =}.") + + nat = min(abs(nat)) + return nat, passb + + +def _postprocess_wn(WN, analog, fs): + wn = WN if analog else np.arctan(WN) * 2.0 / pi + if len(wn) == 1: + wn = wn[0] + if fs is not None: + wn = wn * fs / 2 + return wn + + +def buttord(wp, ws, gpass, gstop, analog=False, fs=None): + """Butterworth filter order selection. + + Return the order of the lowest order digital or analog Butterworth filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Butterworth filter which meets specs. + wn : ndarray or float + The Butterworth natural frequency (i.e. the "3dB frequency"). Should + be used with `butter` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `butter`. + + See Also + -------- + butter : Filter design using order and critical points + cheb1ord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog bandpass filter with passband within 3 dB from 20 to + 50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s. + Plot its frequency response, showing the passband and stopband + constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True) + >>> b, a = signal.butter(N, Wn, 'band', True) + >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth bandpass filter fit to constraints') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop + >>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop + >>> plt.axis([10, 100, -60, 3]) + >>> plt.show() + + """ + _validate_gpass_gstop(gpass, gstop) + fs = _validate_fs(fs, allow_none=True) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'butter') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) + + # Find the Butterworth natural frequency WN (or the "3dB" frequency") + # to give exactly gpass at passb. + try: + W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) + except ZeroDivisionError: + W0 = 1.0 + warnings.warn("Order is zero...check input parameters.", + RuntimeWarning, stacklevel=2) + + # now convert this frequency back from lowpass prototype + # to the original analog filter + + if filter_type == 1: # low + WN = W0 * passb + elif filter_type == 2: # high + WN = passb / W0 + elif filter_type == 3: # stop + WN = np.empty(2, float) + discr = sqrt((passb[1] - passb[0]) ** 2 + + 4 * W0 ** 2 * passb[0] * passb[1]) + WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) + WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) + WN = np.sort(abs(WN)) + elif filter_type == 4: # pass + W0 = np.array([-W0, W0], float) + WN = (-W0 * (passb[1] - passb[0]) / 2.0 + + sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + + passb[0] * passb[1])) + WN = np.sort(abs(WN)) + else: + raise ValueError(f"Bad type: {filter_type}") + + wn = _postprocess_wn(WN, analog, fs) + + return ord, wn + + +def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type I filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type I + filter that loses no more than `gpass` dB in the passband and has at + least `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type I filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby1` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby1`. + + See Also + -------- + cheby1 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital lowpass filter such that the passband is within 3 dB up + to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40) + >>> b, a = signal.cheby1(N, 3, Wn, 'low') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev I lowpass filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass + >>> plt.axis([0.08, 1, -60, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(ceil(v_pass_stop / np.arccosh(nat))) + + # Natural frequencies are just the passband edges + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn + + +def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type II filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type II + filter that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type II filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby2` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby2`. + + See Also + -------- + cheby2 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to + 0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above + 0.6*(fs/2). Plot its frequency response, showing the passband and + stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + >>> b, a = signal.cheby2(N, 60, Wn, 'stop') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev II bandstop filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass + >>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop + >>> plt.axis([0.06, 1, -80, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(ceil(v_pass_stop / arccosh(nat))) + + # Find frequency where analog response is -gpass dB. + # Then convert back from low-pass prototype to the original filter. + + new_freq = cosh(1.0 / ord * v_pass_stop) + new_freq = 1.0 / new_freq + + if filter_type == 1: + nat = passb / new_freq + elif filter_type == 2: + nat = passb * new_freq + elif filter_type == 3: + nat = np.empty(2, float) + nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + + sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + + passb[1] * passb[0])) + nat[1] = passb[1] * passb[0] / nat[0] + elif filter_type == 4: + nat = np.empty(2, float) + nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + + sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + + passb[1] * passb[0])) + nat[1] = passb[0] * passb[1] / nat[0] + + wn = _postprocess_wn(nat, analog, fs) + + return ord, wn + + +_POW10_LOG10 = np.log(10) + + +def _pow10m1(x): + """10 ** x - 1 for x near 0""" + return np.expm1(_POW10_LOG10 * x) + + +def ellipord(wp, ws, gpass, gstop, analog=False, fs=None): + """Elliptic (Cauer) filter order selection. + + Return the order of the lowest order digital or analog elliptic filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for an Elliptic (Cauer) filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `ellip` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `ellip`. + + See Also + -------- + ellip : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog highpass filter such that the passband is within 3 dB + above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.ellipord(30, 10, 3, 60, True) + >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True) + >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptical highpass filter fit to constraints') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop + >>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.axis([1, 300, -80, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'ellip') + + arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop) + arg0 = 1.0 / nat + d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2) + d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq) + ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) + + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn + + +def buttap(N): + """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. + + The filter will have an angular (e.g., rad/s) cutoff frequency of 1. + + See Also + -------- + butter : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + z = np.array([]) + m = np.arange(-N+1, N, 2) + # Middle value is 0 to ensure an exactly real pole + p = -np.exp(1j * pi * m / (2 * N)) + k = 1 + return z, p, k + + +def cheb1ap(N, rp): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rp` decibels of ripple in the passband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + cheby1 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero error + # Even order filters have DC gain of -rp dB + return np.array([]), np.array([]), 10**(-rp/20) + z = np.array([]) + + # Ripple factor (epsilon) + eps = np.sqrt(10 ** (0.1 * rp) - 1.0) + mu = 1.0 / N * arcsinh(1 / eps) + + # Arrange poles in an ellipse on the left half of the S-plane + m = np.arange(-N+1, N, 2) + theta = pi * m / (2*N) + p = -sinh(mu + 1j*theta) + + k = np.prod(-p, axis=0).real + if N % 2 == 0: + k = k / sqrt(1 + eps * eps) + + return z, p, k + + +def cheb2ap(N, rs): + """ + Return (z,p,k) for Nth-order Chebyshev type II analog lowpass filter. + + The returned filter prototype has attenuation of at least ``rs`` decibels + in the stopband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the attenuation first reaches ``rs``. + + See Also + -------- + cheby2 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + return np.array([]), np.array([]), 1 + + # Ripple factor (epsilon) + de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) + mu = arcsinh(1.0 / de) / N + + if N % 2: + m = np.concatenate((np.arange(-N+1, 0, 2), np.arange(2, N, 2))) + else: + m = np.arange(-N+1, N, 2) + + z = -conjugate(1j / sin(m * pi / (2.0 * N))) + + # Poles around the unit circle like Butterworth + p = -exp(1j * pi * np.arange(-N+1, N, 2) / (2 * N)) + # Warp into Chebyshev II + p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag + p = 1.0 / p + + k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real + return z, p, k + + +EPSILON = 2e-16 + +# number of terms in solving degree equation +_ELLIPDEG_MMAX = 7 + + +def _ellipdeg(n, m1): + """Solve degree equation using nomes + + Given n, m1, solve + n * K(m) / K'(m) = K1(m1) / K1'(m1) + for m + + See [1], Eq. (49) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + """ + K1 = special.ellipk(m1) + K1p = special.ellipkm1(m1) + + q1 = np.exp(-np.pi * K1p / K1) + q = q1 ** (1/n) + + mnum = np.arange(_ELLIPDEG_MMAX + 1) + mden = np.arange(1, _ELLIPDEG_MMAX + 2) + + num = np.sum(q ** (mnum * (mnum+1))) + den = 1 + 2 * np.sum(q ** (mden**2)) + + return 16 * q * (num / den) ** 4 + + +# Maximum number of iterations in Landen transformation recursion +# sequence. 10 is conservative; unit tests pass with 4, Orfanidis +# (see _arc_jac_cn [1]) suggests 5. +_ARC_JAC_SN_MAXITER = 10 + + +def _arc_jac_sn(w, m): + """Inverse Jacobian elliptic sn + + Solve for z in w = sn(z, m) + + Parameters + ---------- + w : complex scalar + argument + + m : scalar + modulus; in interval [0, 1] + + + See [1], Eq. (56) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + + """ + + def _complement(kx): + # (1-k**2) ** 0.5; the expression below + # works for small kx + return ((1 - kx) * (1 + kx)) ** 0.5 + + k = m ** 0.5 + + if k > 1: + return np.nan + elif k == 1: + return np.arctanh(w) + + ks = [k] + niter = 0 + while ks[-1] != 0: + k_ = ks[-1] + k_p = _complement(k_) + ks.append((1 - k_p) / (1 + k_p)) + niter += 1 + if niter > _ARC_JAC_SN_MAXITER: + raise ValueError('Landen transformation not converging') + + K = np.prod(1 + np.array(ks[1:])) * np.pi/2 + + wns = [w] + + for kn, knext in zip(ks[:-1], ks[1:]): + wn = wns[-1] + wnext = (2 * wn / + ((1 + knext) * (1 + _complement(kn * wn)))) + wns.append(wnext) + + u = 2 / np.pi * np.arcsin(wns[-1]) + + z = K * u + return z + + +def _arc_jac_sc1(w, m): + """Real inverse Jacobian sc, with complementary modulus + + Solve for z in w = sc(z, 1-m) + + w - real scalar + + m - modulus + + From [1], sc(z, m) = -i * sn(i * z, 1 - m) + + References + ---------- + # noqa: E501 + .. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html, + "Representations through other Jacobi functions" + + """ + + zcomplex = _arc_jac_sn(1j * w, m) + if abs(zcomplex.real) > 1e-14: + raise ValueError + + return zcomplex.imag + + +def ellipap(N, rp, rs): + """Return (z,p,k) of Nth-order elliptic analog lowpass filter. + + The filter is a normalized prototype that has `rp` decibels of ripple + in the passband and a stopband `rs` decibels down. + + The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + ellip : Filter design function using this prototype + + References + ---------- + .. [1] Lutovac, Tosic, and Evans, "Filter Design for Signal Processing", + Chapters 5 and 12. + + .. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + # Even order filters have DC gain of -rp dB + return np.array([]), np.array([]), 10**(-rp/20) + elif N == 1: + p = -sqrt(1.0 / _pow10m1(0.1 * rp)) + k = -p + z = [] + return asarray(z), asarray(p), k + + eps_sq = _pow10m1(0.1 * rp) + + eps = np.sqrt(eps_sq) + ck1_sq = eps_sq / _pow10m1(0.1 * rs) + if ck1_sq == 0: + raise ValueError("Cannot design a filter with given rp and rs" + " specifications.") + + val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq) + + m = _ellipdeg(N, ck1_sq) + + capk = special.ellipk(m) + + j = np.arange(1 - N % 2, N, 2) + jj = len(j) + + [s, c, d, phi] = special.ellipj(j * capk / N, m * np.ones(jj)) + snew = np.compress(abs(s) > EPSILON, s, axis=-1) + z = 1.0 / (sqrt(m) * snew) + z = 1j * z + z = np.concatenate((z, conjugate(z))) + + r = _arc_jac_sc1(1. / eps, ck1_sq) + v0 = capk * r / (N * val[0]) + + [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) + p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) + + if N % 2: + newp = np.compress( + abs(p.imag) > EPSILON * np.sqrt(np.sum(p * np.conjugate(p), axis=0).real), + p, axis=-1 + ) + p = np.concatenate((p, conjugate(newp))) + else: + p = np.concatenate((p, conjugate(p))) + + k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real + if N % 2 == 0: + k = k / np.sqrt(1 + eps_sq) + + return z, p, k + + +# TODO: Make this a real public function scipy.misc.ff +def _falling_factorial(x, n): + r""" + Return the factorial of `x` to the `n` falling. + + This is defined as: + + .. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1) + + This can more efficiently calculate ratios of factorials, since: + + n!/m! == falling_factorial(n, n-m) + + where n >= m + + skipping the factors that cancel out + + the usual factorial n! == ff(n, n) + """ + val = 1 + for k in range(x - n + 1, x + 1): + val *= k + return val + + +def _bessel_poly(n, reverse=False): + """ + Return the coefficients of Bessel polynomial of degree `n` + + If `reverse` is true, a reverse Bessel polynomial is output. + + Output is a list of coefficients: + [1] = 1 + [1, 1] = 1*s + 1 + [1, 3, 3] = 1*s^2 + 3*s + 3 + [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 + [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 + etc. + + Output is a Python list of arbitrary precision long ints, so n is only + limited by your hardware's memory. + + Sequence is http://oeis.org/A001498, and output can be confirmed to + match http://oeis.org/A001498/b001498.txt : + + >>> from scipy.signal._filter_design import _bessel_poly + >>> i = 0 + >>> for n in range(51): + ... for x in _bessel_poly(n, reverse=True): + ... print(i, x) + ... i += 1 + + """ + if abs(int(n)) != n: + raise ValueError("Polynomial order must be a nonnegative integer") + else: + n = int(n) # np.int32 doesn't work, for instance + + out = [] + for k in range(n + 1): + num = _falling_factorial(2*n - k, n) + den = 2**(n - k) * math.factorial(k) + out.append(num // den) + + if reverse: + return out[::-1] + else: + return out + + +def _campos_zeros(n): + """ + Return approximate zero locations of Bessel polynomials y_n(x) for order + `n` using polynomial fit (Campos-Calderon 2011) + """ + if n == 1: + return asarray([-1+0j]) + + s = npp_polyval(n, [0, 0, 2, 0, -3, 1]) + b3 = npp_polyval(n, [16, -8]) / s + b2 = npp_polyval(n, [-24, -12, 12]) / s + b1 = npp_polyval(n, [8, 24, -12, -2]) / s + b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s + + r = npp_polyval(n, [0, 0, 2, 1]) + a1 = npp_polyval(n, [-6, -6]) / r + a2 = 6 / r + + k = np.arange(1, n+1) + x = npp_polyval(k, [0, a1, a2]) + y = npp_polyval(k, [b0, b1, b2, b3]) + + return x + 1j*y + + +def _aberth(f, fp, x0, tol=1e-15, maxiter=50): + """ + Given a function `f`, its first derivative `fp`, and a set of initial + guesses `x0`, simultaneously find the roots of the polynomial using the + Aberth-Ehrlich method. + + ``len(x0)`` should equal the number of roots of `f`. + + (This is not a complete implementation of Bini's algorithm.) + """ + + N = len(x0) + + x = array(x0, complex) + beta = np.empty_like(x0) + + for iteration in range(maxiter): + alpha = -f(x) / fp(x) # Newton's method + + # Model "repulsion" between zeros + for k in range(N): + beta[k] = np.sum(1/(x[k] - x[k+1:])) + beta[k] += np.sum(1/(x[k] - x[:k])) + + x += alpha / (1 + alpha * beta) + + if not all(np.isfinite(x)): + raise RuntimeError('Root-finding calculation failed') + + # Mekwi: The iterative process can be stopped when |hn| has become + # less than the largest error one is willing to permit in the root. + if all(abs(alpha) <= tol): + break + else: + raise Exception('Zeros failed to converge') + + return x + + +def _bessel_zeros(N): + """ + Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of + modified Bessel function of the second kind + """ + if N == 0: + return asarray([]) + + # Generate starting points + x0 = _campos_zeros(N) + + # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary + # Bessel polynomial y_N(x) + def f(x): + return special.kve(N+0.5, 1/x) + + # First derivative of above + def fp(x): + return (special.kve(N-0.5, 1/x)/(2*x**2) - + special.kve(N+0.5, 1/x)/(x**2) + + special.kve(N+1.5, 1/x)/(2*x**2)) + + # Starting points converge to true zeros + x = _aberth(f, fp, x0) + + # Improve precision using Newton's method on each + for i in range(len(x)): + x[i] = optimize.newton(f, x[i], fp, tol=1e-15) + + # Average complex conjugates to make them exactly symmetrical + x = np.mean((x, x[::-1].conj()), 0) + + # Zeros should sum to -1 + if abs(np.sum(x) + 1) > 1e-15: + raise RuntimeError('Generated zeros are inaccurate') + + return x + + +def _norm_factor(p, k): + """ + Numerically find frequency shift to apply to delay-normalized filter such + that -3 dB point is at 1 rad/sec. + + `p` is an array_like of polynomial poles + `k` is a float gain + + First 10 values are listed in "Bessel Scale Factors" table, + "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond." + """ + p = asarray(p, dtype=complex) + + def G(w): + """ + Gain of filter + """ + return abs(k / prod(1j*w - p)) + + def cutoff(w): + """ + When gain = -3 dB, return 0 + """ + return G(w) - 1/np.sqrt(2) + + return optimize.newton(cutoff, 1.5) + + +def besselap(N, norm='phase'): + """ + Return (z,p,k) for analog prototype of an Nth-order Bessel filter. + + Parameters + ---------- + N : int + The order of the filter. + norm : {'phase', 'delay', 'mag'}, optional + Frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This + happens for both low-pass and high-pass filters, so this is the + "phase-matched" case. [6]_ + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1 (e.g., 1 second). This is the "natural" type obtained by + solving Bessel polynomials + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency 1. This is called "frequency normalization" by + Bond. [1]_ + + .. versionadded:: 0.18.0 + + Returns + ------- + z : ndarray + Zeros of the transfer function. Is always an empty array. + p : ndarray + Poles of the transfer function. + k : scalar + Gain of the transfer function. For phase-normalized, this is always 1. + + See Also + -------- + bessel : Filter design function using this prototype + + Notes + ----- + To find the pole locations, approximate starting points are generated [2]_ + for the zeros of the ordinary Bessel polynomial [3]_, then the + Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to + calculate more accurate zeros, and these locations are then inverted about + the unit circle. + + References + ---------- + .. [1] C.R. Bond, "Bessel Filter Constants", + http://www.crbond.com/papers/bsf.pdf + .. [2] Campos and Calderon, "Approximate closed-form formulas for the + zeros of the Bessel Polynomials", :arXiv:`1105.0957`. + .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial + Simultaneously", Mathematics of Computation, Vol. 27, No. 122, + April 1973 + .. [5] Ehrlich, "A modified Newton method for polynomials", Communications + of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967, + :DOI:`10.1145/363067.363115` + .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to + Others", RaneNote 147, 1998, + https://www.ranecommercial.com/legacy/note147.html + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + + N = int(N) # calculation below doesn't always fit in np.int64 + if N == 0: + p = [] + k = 1 + else: + # Find roots of reverse Bessel polynomial + p = 1/_bessel_zeros(N) + + a_last = _falling_factorial(2*N, N) // 2**N + + # Shift them to a different normalization if required + if norm in ('delay', 'mag'): + # Normalized for group delay of 1 + k = a_last + if norm == 'mag': + # -3 dB magnitude point is at 1 rad/sec + norm_factor = _norm_factor(p, k) + p /= norm_factor + k = norm_factor**-N * a_last + elif norm == 'phase': + # Phase-matched (1/2 max phase shift at 1 rad/sec) + # Asymptotes are same as Butterworth filter + p *= 10**(-math.log10(a_last)/N) + k = 1 + else: + raise ValueError('normalization not understood') + + return asarray([]), asarray(p, dtype=complex), float(k) + + +def iirnotch(w0, Q, fs=2.0): + """ + Design second-order IIR notch digital filter. + + A notch filter is a band-stop filter with a narrow bandwidth + (high quality factor). It rejects a narrow frequency band and + leaves the rest of the spectrum little changed. + + Parameters + ---------- + w0 : float + Frequency to remove from a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirpeak + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the 60 Hz component from a + signal sampled at 200 Hz, using a quality factor Q = 30 + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 200.0 # Sample frequency (Hz) + >>> f0 = 60.0 # Frequency to be removed from signal (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design notch filter + >>> b, a = signal.iirnotch(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 100]) + >>> ax[0].set_ylim([-25, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 100]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "notch", fs) + + +def iirpeak(w0, Q, fs=2.0): + """ + Design second-order IIR peak (resonant) digital filter. + + A peak filter is a band-pass filter with a narrow bandwidth + (high quality factor). It rejects components outside a narrow + frequency band. + + Parameters + ---------- + w0 : float + Frequency to be retained in a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + peak filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirnotch + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the frequencies other than the 300 Hz + component from a signal sampled at 1000 Hz, using a quality factor Q = 30 + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 1000.0 # Sample frequency (Hz) + >>> f0 = 300.0 # Frequency to be retained (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design peak filter + >>> b, a = signal.iirpeak(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 500]) + >>> ax[0].set_ylim([-50, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 500]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "peak", fs) + + +def _design_notch_peak_filter(w0, Q, ftype, fs=2.0): + """ + Design notch or peak digital filter. + + Parameters + ---------- + w0 : float + Normalized frequency to remove from a signal. If `fs` is specified, + this is in the same units as `fs`. By default, it is a normalized + scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` + corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : str + The type of IIR filter to design: + + - notch filter : ``notch`` + - peak filter : ``peak`` + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0: + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + """ + fs = _validate_fs(fs, allow_none=False) + + # Guarantee that the inputs are floats + w0 = float(w0) + Q = float(Q) + w0 = 2*w0/fs + + # Checks if w0 is within the range + if w0 > 1.0 or w0 < 0.0: + raise ValueError("w0 should be such that 0 < w0 < 1") + + # Get bandwidth + bw = w0/Q + + # Normalize inputs + bw = bw*np.pi + w0 = w0*np.pi + + if ftype not in ("notch", "peak"): + raise ValueError("Unknown ftype.") + + # Compute beta according to Eqs. 11.3.4 (p.575) and 11.3.19 (p.579) from + # reference [1]. Due to assuming a -3 dB attenuation value, i.e, assuming + # gb = 1 / np.sqrt(2), the following terms simplify to: + # (np.sqrt(1.0 - gb**2.0) / gb) = 1 + # (gb / np.sqrt(1.0 - gb**2.0)) = 1 + beta = np.tan(bw/2.0) + + # Compute gain: formula 11.3.6 (p.575) from reference [1] + gain = 1.0/(1.0+beta) + + # Compute numerator b and denominator a + # formulas 11.3.7 (p.575) and 11.3.21 (p.579) + # from reference [1] + if ftype == "notch": + b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0]) + else: + b = (1.0-gain)*np.array([1.0, 0.0, -1.0]) + a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)]) + + return b, a + + +def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False): + """ + Design IIR notching or peaking digital comb filter. + + A notching comb filter consists of regularly-spaced band-stop filters with + a narrow bandwidth (high quality factor). Each rejects a narrow frequency + band and leaves the rest of the spectrum little changed. + + A peaking comb filter consists of regularly-spaced band-pass filters with + a narrow bandwidth (high quality factor). Each rejects components outside + a narrow frequency band. + + Parameters + ---------- + w0 : float + The fundamental frequency of the comb filter (the spacing between its + peaks). This must evenly divide the sampling frequency. If `fs` is + specified, this is in the same units as `fs`. By default, it is + a normalized scalar that must satisfy ``0 < w0 < 1``, with + ``w0 = 1`` corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : {'notch', 'peak'} + The type of comb filter generated by the function. If 'notch', then + the Q factor applies to the notches. If 'peak', then the Q factor + applies to the peaks. Default is 'notch'. + fs : float, optional + The sampling frequency of the signal. Default is 2.0. + pass_zero : bool, optional + If False (default), the notches (nulls) of the filter are centered on + frequencies [0, w0, 2*w0, ...], and the peaks are centered on the + midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered + on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa. + + .. versionadded:: 1.9.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + Raises + ------ + ValueError + If `w0` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `fs` is not divisible by `w0`, if `ftype` + is not 'notch' or 'peak' + + See Also + -------- + iirnotch + iirpeak + + Notes + ----- + For implementation details, see [1]_. The TF implementation of the + comb filter is numerically stable even at higher orders due to the + use of a single repeated pole, which won't suffer from precision loss. + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996, ch. 11, "Digital Filter Design" + + Examples + -------- + Design and plot notching comb filter at 20 Hz for a + signal sampled at 200 Hz, using quality factor Q = 30 + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 200.0 # Sample frequency (Hz) + >>> f0 = 20.0 # Frequency to be removed from signal (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design notching comb filter + >>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> response = abs(h) + >>> # To avoid divide by zero when graphing + >>> response[response == 0] = 1e-20 + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) + >>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 100]) + >>> ax[0].set_ylim([-30, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 100]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + + Design and plot peaking comb filter at 250 Hz for a + signal sampled at 1000 Hz, using quality factor Q = 30 + + >>> fs = 1000.0 # Sample frequency (Hz) + >>> f0 = 250.0 # Frequency to be retained (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design peaking filter + >>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> response = abs(h) + >>> # To avoid divide by zero when graphing + >>> response[response == 0] = 1e-20 + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) + >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 500]) + >>> ax[0].set_ylim([-80, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 500]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + # Convert w0, Q, and fs to float + w0 = float(w0) + Q = float(Q) + fs = _validate_fs(fs, allow_none=False) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + if not 0 < w0 < fs / 2: + raise ValueError(f"w0 must be between 0 and {fs / 2}" + f" (Nyquist), but given {w0}.") + if ftype not in ('notch', 'peak'): + raise ValueError('ftype must be either notch or peak.') + + # Compute the order of the filter + N = round(fs / w0) + + # Check for cutoff frequency divisibility + if abs(w0 - fs/N)/fs > 1e-14: + raise ValueError('fs must be divisible by w0.') + + # Compute frequency in radians and filter bandwidth + # Eq. 11.3.1 (p. 574) from reference [1] + w0 = (2 * np.pi * w0) / fs + w_delta = w0 / Q + + # Define base gain values depending on notch or peak filter + # Compute -3dB attenuation + # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1] + if ftype == 'notch': + G0, G = 1, 0 + elif ftype == 'peak': + G0, G = 0, 1 + + # Compute beta according to Eq. 11.5.3 (p. 591) from reference [1]. Due to + # assuming a -3 dB attenuation value, i.e, assuming GB = 1 / np.sqrt(2), + # the following term simplifies to: + # np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) = 1 + beta = np.tan(N * w_delta / 4) + + # Compute filter coefficients + # Eq 11.5.1 (p. 590) variables a, b, c from reference [1] + ax = (1 - beta) / (1 + beta) + bx = (G0 + G * beta) / (1 + beta) + cx = (G0 - G * beta) / (1 + beta) + + # Last coefficients are negative to get peaking comb that passes zero or + # notching comb that doesn't. + negative_coef = ((ftype == 'peak' and pass_zero) or + (ftype == 'notch' and not pass_zero)) + + # Compute numerator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # b - cz^-N or b + cz^-N + b = np.zeros(N + 1) + b[0] = bx + if negative_coef: + b[-1] = -cx + else: + b[-1] = +cx + + # Compute denominator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # 1 - az^-N or 1 + az^-N + a = np.zeros(N + 1) + a[0] = 1 + if negative_coef: + a[-1] = -ax + else: + a[-1] = +ax + + return b, a + + +def _hz_to_erb(hz): + """ + Utility for converting from frequency (Hz) to the + Equivalent Rectangular Bandwidth (ERB) scale + ERB = frequency / EarQ + minBW + """ + EarQ = 9.26449 + minBW = 24.7 + return hz / EarQ + minBW + + +def gammatone(freq, ftype, order=None, numtaps=None, fs=None): + """ + Gammatone filter design. + + This function computes the coefficients of an FIR or IIR gammatone + digital filter [1]_. + + Parameters + ---------- + freq : float + Center frequency of the filter (expressed in the same units + as `fs`). + ftype : {'fir', 'iir'} + The type of filter the function generates. If 'fir', the function + will generate an Nth order FIR gammatone filter. If 'iir', the + function will generate an 8th order digital IIR filter, modeled as + as 4th order gammatone filter. + order : int, optional + The order of the filter. Only used when ``ftype='fir'``. + Default is 4 to model the human auditory system. Must be between + 0 and 24. + numtaps : int, optional + Length of the filter. Only used when ``ftype='fir'``. + Default is ``fs*0.015`` if `fs` is greater than 1000, + 15 if `fs` is less than or equal to 1000. + fs : float, optional + The sampling frequency of the signal. `freq` must be between + 0 and ``fs/2``. Default is 2. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials of the filter. + + Raises + ------ + ValueError + If `freq` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than + or equal to 0 or greater than 24 when ``ftype='fir'`` + + See Also + -------- + firwin + iirfilter + + References + ---------- + .. [1] Slaney, Malcolm, "An Efficient Implementation of the + Patterson-Holdsworth Auditory Filter Bank", Apple Computer + Technical Report 35, 1993, pp.3-8, 34-39. + + Examples + -------- + 16-sample 4th order FIR Gammatone filter centered at 440 Hz + + >>> from scipy import signal + >>> signal.gammatone(440, 'fir', numtaps=16, fs=16000) + (array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06, + 1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05, + 1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05, + -1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]), + [1.0]) + + IIR Gammatone filter centered at 440 Hz + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fc, fs = 440, 16000 + >>> b, a = signal.gammatone(fc, 'iir', fs=fs) + >>> w, h = signal.freqz(b, a) + >>> plt.plot(w * fs / (2 * np.pi), 20 * np.log10(abs(h))) + >>> plt.xscale('log') + >>> plt.title('Gammatone filter frequency response') + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(fc, color='green') # cutoff frequency + >>> plt.show() + """ + # Converts freq to float + freq = float(freq) + + # Set sampling rate if not passed + if fs is None: + fs = 2 + fs = _validate_fs(fs, allow_none=False) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + filter_types = ['fir', 'iir'] + if not 0 < freq < fs / 2: + raise ValueError(f"The frequency must be between 0 and {fs / 2}" + f" (Nyquist), but given {freq}.") + if ftype not in filter_types: + raise ValueError('ftype must be either fir or iir.') + + # Calculate FIR gammatone filter + if ftype == 'fir': + # Set order and numtaps if not passed + if order is None: + order = 4 + order = operator.index(order) + + if numtaps is None: + numtaps = max(int(fs * 0.015), 15) + numtaps = operator.index(numtaps) + + # Check for invalid order + if not 0 < order <= 24: + raise ValueError("Invalid order: order must be > 0 and <= 24.") + + # Gammatone impulse response settings + t = np.arange(numtaps) / fs + bw = 1.019 * _hz_to_erb(freq) + + # Calculate the FIR gammatone filter + b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t) + b *= np.cos(2 * np.pi * freq * t) + + # Scale the FIR filter so the frequency response is 1 at cutoff + scale_factor = 2 * (2 * np.pi * bw) ** (order) + scale_factor /= float_factorial(order - 1) + scale_factor /= fs + b *= scale_factor + a = [1.0] + + # Calculate IIR gammatone filter + elif ftype == 'iir': + # Raise warning if order and/or numtaps is passed + if order is not None: + warnings.warn('order is not used for IIR gammatone filter.', stacklevel=2) + if numtaps is not None: + warnings.warn('numtaps is not used for IIR gammatone filter.', stacklevel=2) + + # Gammatone impulse response settings + T = 1./fs + bw = 2 * np.pi * 1.019 * _hz_to_erb(freq) + fr = 2 * freq * np.pi * T + bwT = bw * T + + # Calculate the gain to normalize the volume at the center frequency + g1 = -2 * np.exp(2j * fr) * T + g2 = 2 * np.exp(-(bwT) + 1j * fr) * T + g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr) + g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr) + g5 = np.exp(2j * fr) + + g = g1 + g2 * (np.cos(fr) - g4) + g *= (g1 + g2 * (np.cos(fr) + g4)) + g *= (g1 + g2 * (np.cos(fr) - g3)) + g *= (g1 + g2 * (np.cos(fr) + g3)) + g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4) + g = np.abs(g) + + # Create empty filter coefficient lists + b = np.empty(5) + a = np.empty(9) + + # Calculate the numerator coefficients + b[0] = (T ** 4) / g + b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g + b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g + b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g + b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g + + # Calculate the denominator coefficients + a[0] = 1 + a[1] = -8 * np.cos(fr) / np.exp(bw * T) + a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T) + a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) + a[3] /= np.exp(3 * bw * T) + a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr)) + a[4] /= np.exp(4 * bw * T) + a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) + a[5] /= np.exp(5 * bw * T) + a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T) + a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T) + a[8] = np.exp(-8 * bw * T) + + return b, a + + +filter_dict = {'butter': [buttap, buttord], + 'butterworth': [buttap, buttord], + + 'cauer': [ellipap, ellipord], + 'elliptic': [ellipap, ellipord], + 'ellip': [ellipap, ellipord], + + 'bessel': [besselap], + 'bessel_phase': [besselap], + 'bessel_delay': [besselap], + 'bessel_mag': [besselap], + + 'cheby1': [cheb1ap, cheb1ord], + 'chebyshev1': [cheb1ap, cheb1ord], + 'chebyshevi': [cheb1ap, cheb1ord], + + 'cheby2': [cheb2ap, cheb2ord], + 'chebyshev2': [cheb2ap, cheb2ord], + 'chebyshevii': [cheb2ap, cheb2ord], + } + +band_dict = {'band': 'bandpass', + 'bandpass': 'bandpass', + 'pass': 'bandpass', + 'bp': 'bandpass', + + 'bs': 'bandstop', + 'bandstop': 'bandstop', + 'bands': 'bandstop', + 'stop': 'bandstop', + + 'l': 'lowpass', + 'low': 'lowpass', + 'lowpass': 'lowpass', + 'lp': 'lowpass', + + 'high': 'highpass', + 'highpass': 'highpass', + 'h': 'highpass', + 'hp': 'highpass', + } + +bessel_norms = {'bessel': 'phase', + 'bessel_phase': 'phase', + 'bessel_delay': 'delay', + 'bessel_mag': 'mag'} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..b46d1bcc72f5fb03965b66cf131c25c2ca2f5585 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py @@ -0,0 +1,1286 @@ +"""Functions for FIR filter design.""" + +from math import ceil, log +import operator +import warnings +from typing import Literal + +import numpy as np +from numpy.fft import irfft, fft, ifft +from scipy.special import sinc +from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning, + lstsq) +from scipy.signal._arraytools import _validate_fs + +from . import _sigtools + +__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase'] + + +# Some notes on function parameters: +# +# `cutoff` and `width` are given as numbers between 0 and 1. These are +# relative frequencies, expressed as a fraction of the Nyquist frequency. +# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width +# of 300 Hz. +# +# The `order` of a FIR filter is one less than the number of taps. +# This is a potential source of confusion, so in the following code, +# we will always use the number of taps as the parameterization of +# the 'size' of the filter. The "number of taps" means the number +# of coefficients, which is the same as the length of the impulse +# response of the filter. + + +def kaiser_beta(a): + """Compute the Kaiser parameter `beta`, given the attenuation `a`. + + Parameters + ---------- + a : float + The desired attenuation in the stopband and maximum ripple in + the passband, in dB. This should be a *positive* number. + + Returns + ------- + beta : float + The `beta` parameter to be used in the formula for a Kaiser window. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + Examples + -------- + Suppose we want to design a lowpass filter, with 65 dB attenuation + in the stop band. The Kaiser window parameter to be used in the + window method is computed by ``kaiser_beta(65)``: + + >>> from scipy.signal import kaiser_beta + >>> kaiser_beta(65) + 6.20426 + + """ + if a > 50: + beta = 0.1102 * (a - 8.7) + elif a > 21: + beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) + else: + beta = 0.0 + return beta + + +def kaiser_atten(numtaps, width): + """Compute the attenuation of a Kaiser FIR filter. + + Given the number of taps `N` and the transition width `width`, compute the + attenuation `a` in dB, given by Kaiser's formula: + + a = 2.285 * (N - 1) * pi * width + 7.95 + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. + width : float + The desired width of the transition region between passband and + stopband (or, in general, at any discontinuity) for the filter, + expressed as a fraction of the Nyquist frequency. + + Returns + ------- + a : float + The attenuation of the ripple, in dB. + + See Also + -------- + kaiserord, kaiser_beta + + Examples + -------- + Suppose we want to design a FIR filter using the Kaiser window method + that will have 211 taps and a transition width of 9 Hz for a signal that + is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency, + the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB) + is computed as follows: + + >>> from scipy.signal import kaiser_atten + >>> kaiser_atten(211, 0.0375) + 64.48099630593983 + + """ + a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 + return a + + +def kaiserord(ripple, width): + """ + Determine the filter window parameters for the Kaiser window method. + + The parameters returned by this function are generally used to create + a finite impulse response filter using the window method, with either + `firwin` or `firwin2`. + + Parameters + ---------- + ripple : float + Upper bound for the deviation (in dB) of the magnitude of the + filter's frequency response from that of the desired filter (not + including frequencies in any transition intervals). That is, if w + is the frequency expressed as a fraction of the Nyquist frequency, + A(w) is the actual frequency response of the filter and D(w) is the + desired frequency response, the design requirement is that:: + + abs(A(w) - D(w))) < 10**(-ripple/20) + + for 0 <= w <= 1 and w not in a transition interval. + width : float + Width of transition region, normalized so that 1 corresponds to pi + radians / sample. That is, the frequency is expressed as a fraction + of the Nyquist frequency. + + Returns + ------- + numtaps : int + The length of the Kaiser window. + beta : float + The beta parameter for the Kaiser window. + + See Also + -------- + kaiser_beta, kaiser_atten + + Notes + ----- + There are several ways to obtain the Kaiser window: + + - ``signal.windows.kaiser(numtaps, beta, sym=True)`` + - ``signal.get_window(beta, numtaps)`` + - ``signal.get_window(('kaiser', beta), numtaps)`` + + The empirical equations discovered by Kaiser are used. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476. + + Examples + -------- + We will use the Kaiser window method to design a lowpass FIR filter + for a signal that is sampled at 1000 Hz. + + We want at least 65 dB rejection in the stop band, and in the pass + band the gain should vary no more than 0.5%. + + We want a cutoff frequency of 175 Hz, with a transition between the + pass band and the stop band of 24 Hz. That is, in the band [0, 163], + the gain varies no more than 0.5%, and in the band [187, 500], the + signal is attenuated by at least 65 dB. + + >>> import numpy as np + >>> from scipy.signal import kaiserord, firwin, freqz + >>> import matplotlib.pyplot as plt + >>> fs = 1000.0 + >>> cutoff = 175 + >>> width = 24 + + The Kaiser method accepts just a single parameter to control the pass + band ripple and the stop band rejection, so we use the more restrictive + of the two. In this case, the pass band ripple is 0.005, or 46.02 dB, + so we will use 65 dB as the design parameter. + + Use `kaiserord` to determine the length of the filter and the + parameter for the Kaiser window. + + >>> numtaps, beta = kaiserord(65, width/(0.5*fs)) + >>> numtaps + 167 + >>> beta + 6.20426 + + Use `firwin` to create the FIR filter. + + >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta), + ... scale=False, fs=fs) + + Compute the frequency response of the filter. ``w`` is the array of + frequencies, and ``h`` is the corresponding complex array of frequency + responses. + + >>> w, h = freqz(taps, worN=8000) + >>> w *= 0.5*fs/np.pi # Convert w to Hz. + + Compute the deviation of the magnitude of the filter's response from + that of the ideal lowpass filter. Values in the transition region are + set to ``nan``, so they won't appear in the plot. + + >>> ideal = w < cutoff # The "ideal" frequency response. + >>> deviation = np.abs(np.abs(h) - ideal) + >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan + + Plot the deviation. A close look at the left end of the stop band shows + that the requirement for 65 dB attenuation is violated in the first lobe + by about 0.125 dB. This is not unusual for the Kaiser window method. + + >>> plt.plot(w, 20*np.log10(np.abs(deviation))) + >>> plt.xlim(0, 0.5*fs) + >>> plt.ylim(-90, -60) + >>> plt.grid(alpha=0.25) + >>> plt.axhline(-65, color='r', ls='--', alpha=0.3) + >>> plt.xlabel('Frequency (Hz)') + >>> plt.ylabel('Deviation from ideal (dB)') + >>> plt.title('Lowpass Filter Frequency Response') + >>> plt.show() + + """ + A = abs(ripple) # in case somebody is confused as to what's meant + if A < 8: + # Formula for N is not valid in this range. + raise ValueError("Requested maximum ripple attenuation " + f"{A:f} is too small for the Kaiser formula.") + beta = kaiser_beta(A) + + # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter + # order, so we have to add 1 to get the number of taps. + numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 + + return int(ceil(numtaps)), beta + + +def firwin(numtaps, cutoff, *, width=None, window='hamming', pass_zero=True, + scale=True, fs=None): + """ + FIR filter design using the window method. + + This function computes the coefficients of a finite impulse response + filter. The filter will have linear phase; it will be Type I if + `numtaps` is odd and Type II if `numtaps` is even. + + Type II filters always have zero response at the Nyquist frequency, so a + ValueError exception is raised if firwin is called with `numtaps` even and + having a passband whose right end is at the Nyquist frequency. + + Parameters + ---------- + numtaps : int + Length of the filter (number of coefficients, i.e. the filter + order + 1). `numtaps` must be odd if a passband includes the + Nyquist frequency. + cutoff : float or 1-D array_like + Cutoff frequency of filter (expressed in the same units as `fs`) + OR an array of cutoff frequencies (that is, band edges). In the + former case, as a float, the cutoff frequency should correspond + with the half-amplitude point, where the attenuation will be -6dB. + In the latter case, the frequencies in `cutoff` should be positive + and monotonically increasing between 0 and `fs/2`. The values 0 + and `fs/2` must not be included in `cutoff`. It should be noted + that this is different than the behavior of `scipy.signal.iirdesign`, + where the cutoff is the half-power point (-3dB). + width : float or None, optional + If `width` is not None, then assume it is the approximate width + of the transition region (expressed in the same units as `fs`) + for use in Kaiser FIR filter design. In this case, the `window` + argument is ignored. + window : string or tuple of string and parameter values, optional + Desired window to use. See `scipy.signal.get_window` for a list + of windows and required parameters. + pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + If True, the gain at the frequency 0 (i.e., the "DC gain") is 1. + If False, the DC gain is 0. Can also be a string argument for the + desired filter type (equivalent to ``btype`` in IIR design functions). + + .. versionadded:: 1.3.0 + Support for string arguments. + scale : bool, optional + Set to True to scale the coefficients so that the frequency + response is exactly unity at a certain frequency. + That frequency is either: + + - 0 (DC) if the first passband starts at 0 (i.e. pass_zero + is True) + - `fs/2` (the Nyquist frequency) if the first passband ends at + `fs/2` (i.e the filter is a single band highpass filter); + center of first passband otherwise + + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + h : (numtaps,) ndarray + Coefficients of length `numtaps` FIR filter. + + Raises + ------ + ValueError + If any value in `cutoff` is less than or equal to 0 or greater + than or equal to ``fs/2``, if the values in `cutoff` are not strictly + monotonically increasing, or if `numtaps` is even but a passband + includes the Nyquist frequency. + + See Also + -------- + firwin2 + firls + minimum_phase + remez + + Examples + -------- + Low-pass from 0 to f: + + >>> from scipy import signal + >>> numtaps = 3 + >>> f = 0.1 + >>> signal.firwin(numtaps, f) + array([ 0.06799017, 0.86401967, 0.06799017]) + + Use a specific window function: + + >>> signal.firwin(numtaps, f, window='nuttall') + array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) + + High-pass ('stop' from 0 to f): + + >>> signal.firwin(numtaps, f, pass_zero=False) + array([-0.00859313, 0.98281375, -0.00859313]) + + Band-pass: + + >>> f1, f2 = 0.1, 0.2 + >>> signal.firwin(numtaps, [f1, f2], pass_zero=False) + array([ 0.06301614, 0.88770441, 0.06301614]) + + Band-stop: + + >>> signal.firwin(numtaps, [f1, f2]) + array([-0.00801395, 1.0160279 , -0.00801395]) + + Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): + + >>> f3, f4 = 0.3, 0.4 + >>> signal.firwin(numtaps, [f1, f2, f3, f4]) + array([-0.01376344, 1.02752689, -0.01376344]) + + Multi-band (passbands are [f1, f2] and [f3,f4]): + + >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) + array([ 0.04890915, 0.91284326, 0.04890915]) + + """ + # The major enhancements to this function added in November 2010 were + # developed by Tom Krauss (see ticket #902). + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + + nyq = 0.5 * fs + + cutoff = np.atleast_1d(cutoff) / float(nyq) + + # Check for invalid input. + if cutoff.ndim > 1: + raise ValueError("The cutoff argument must be at most " + "one-dimensional.") + if cutoff.size == 0: + raise ValueError("At least one cutoff frequency must be given.") + if cutoff.min() <= 0 or cutoff.max() >= 1: + raise ValueError("Invalid cutoff frequency: frequencies must be " + "greater than 0 and less than fs/2.") + if np.any(np.diff(cutoff) <= 0): + raise ValueError("Invalid cutoff frequencies: the frequencies " + "must be strictly increasing.") + + if width is not None: + # A width was given. Find the beta parameter of the Kaiser window + # and set `window`. This overrides the value of `window` passed in. + atten = kaiser_atten(numtaps, float(width) / nyq) + beta = kaiser_beta(atten) + window = ('kaiser', beta) + + if isinstance(pass_zero, str): + if pass_zero in ('bandstop', 'lowpass'): + if pass_zero == 'lowpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="lowpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandstop", got {cutoff.shape}') + pass_zero = True + elif pass_zero in ('bandpass', 'highpass'): + if pass_zero == 'highpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="highpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandpass", got {cutoff.shape}') + pass_zero = False + else: + raise ValueError('pass_zero must be True, False, "bandpass", ' + '"lowpass", "highpass", or "bandstop", got ' + f'{pass_zero}') + pass_zero = bool(operator.index(pass_zero)) # ensure bool-like + + pass_nyquist = bool(cutoff.size & 1) ^ pass_zero + if pass_nyquist and numtaps % 2 == 0: + raise ValueError("A filter with an even number of coefficients must " + "have zero response at the Nyquist frequency.") + + # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff + # is even, and each pair in cutoff corresponds to passband. + cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) + + # `bands` is a 2-D array; each row gives the left and right edges of + # a passband. + bands = cutoff.reshape(-1, 2) + + # Build up the coefficients. + alpha = 0.5 * (numtaps - 1) + m = np.arange(0, numtaps) - alpha + h = 0 + for left, right in bands: + h += right * sinc(right * m) + h -= left * sinc(left * m) + + # Get and apply the window function. + from .windows import get_window + win = get_window(window, numtaps, fftbins=False) + h *= win + + # Now handle scaling if desired. + if scale: + # Get the first passband. + left, right = bands[0] + if left == 0: + scale_frequency = 0.0 + elif right == 1: + scale_frequency = 1.0 + else: + scale_frequency = 0.5 * (left + right) + c = np.cos(np.pi * m * scale_frequency) + s = np.sum(h * c) + h /= s + + return h + + +# Original version of firwin2 from scipy ticket #457, submitted by "tash". +# +# Rewritten by Warren Weckesser, 2010. +def firwin2(numtaps, freq, gain, *, nfreqs=None, window='hamming', + antisymmetric=False, fs=None): + """ + FIR filter design using the window method. + + From the given frequencies `freq` and corresponding gains `gain`, + this function constructs an FIR filter with linear phase and + (approximately) the given frequency response. + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be less than + `nfreqs`. + freq : array_like, 1-D + The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being + Nyquist. The Nyquist frequency is half `fs`. + The values in `freq` must be nondecreasing. A value can be repeated + once to implement a discontinuity. The first value in `freq` must + be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must + not be repeated. + gain : array_like + The filter gains at the frequency sampling points. Certain + constraints to gain values, depending on the filter type, are applied, + see Notes for details. + nfreqs : int, optional + The size of the interpolation mesh used to construct the filter. + For most efficient behavior, this should be a power of 2 plus 1 + (e.g, 129, 257, etc). The default is one more than the smallest + power of 2 that is not less than `numtaps`. `nfreqs` must be greater + than `numtaps`. + window : string or (string, float) or float, or None, optional + Window function to use. Default is "hamming". See + `scipy.signal.get_window` for the complete list of possible values. + If None, no window function is applied. + antisymmetric : bool, optional + Whether resulting impulse response is symmetric/antisymmetric. + See Notes for more details. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + taps : ndarray + The filter coefficients of the FIR filter, as a 1-D array of length + `numtaps`. + + See Also + -------- + firls + firwin + minimum_phase + remez + + Notes + ----- + From the given set of frequencies and gains, the desired response is + constructed in the frequency domain. The inverse FFT is applied to the + desired response to create the associated convolution kernel, and the + first `numtaps` coefficients of this kernel, scaled by `window`, are + returned. + + The FIR filter will have linear phase. The type of filter is determined by + the value of 'numtaps` and `antisymmetric` flag. + There are four possible combinations: + + - odd `numtaps`, `antisymmetric` is False, type I filter is produced + - even `numtaps`, `antisymmetric` is False, type II filter is produced + - odd `numtaps`, `antisymmetric` is True, type III filter is produced + - even `numtaps`, `antisymmetric` is True, type IV filter is produced + + Magnitude response of all but type I filters are subjects to following + constraints: + + - type II -- zero at the Nyquist frequency + - type III -- zero at zero and Nyquist frequencies + - type IV -- zero at zero frequency + + .. versionadded:: 0.9.0 + + References + ---------- + .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal + Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). + (See, for example, Section 7.4.) + + .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital + Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm + + Examples + -------- + A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and + that decreases linearly on [0.5, 1.0] from 1 to 0: + + >>> from scipy import signal + >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + >>> print(taps[72:78]) + [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + nyq = 0.5 * fs + + if len(freq) != len(gain): + raise ValueError('freq and gain must be of same length.') + + if nfreqs is not None and numtaps >= nfreqs: + raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' + 'called with ntaps=%d and nfreqs=%s') % + (numtaps, nfreqs)) + + if freq[0] != 0 or freq[-1] != nyq: + raise ValueError('freq must start with 0 and end with fs/2.') + d = np.diff(freq) + if (d < 0).any(): + raise ValueError('The values in freq must be nondecreasing.') + d2 = d[:-1] + d[1:] + if (d2 == 0).any(): + raise ValueError('A value in freq must not occur more than twice.') + if freq[1] == 0: + raise ValueError('Value 0 must not be repeated in freq') + if freq[-2] == nyq: + raise ValueError('Value fs/2 must not be repeated in freq') + + if antisymmetric: + if numtaps % 2 == 0: + ftype = 4 + else: + ftype = 3 + else: + if numtaps % 2 == 0: + ftype = 2 + else: + ftype = 1 + + if ftype == 2 and gain[-1] != 0.0: + raise ValueError("A Type II filter must have zero gain at the " + "Nyquist frequency.") + elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): + raise ValueError("A Type III filter must have zero gain at zero " + "and Nyquist frequencies.") + elif ftype == 4 and gain[0] != 0.0: + raise ValueError("A Type IV filter must have zero gain at zero " + "frequency.") + + if nfreqs is None: + nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) + + if (d == 0).any(): + # Tweak any repeated values in freq so that interp works. + freq = np.array(freq, copy=True) + eps = np.finfo(float).eps * nyq + for k in range(len(freq) - 1): + if freq[k] == freq[k + 1]: + freq[k] = freq[k] - eps + freq[k + 1] = freq[k + 1] + eps + # Check if freq is strictly increasing after tweak + d = np.diff(freq) + if (d <= 0).any(): + raise ValueError("freq cannot contain numbers that are too close " + "(within eps * (fs/2): " + f"{eps}) to a repeated value") + + # Linearly interpolate the desired response on a uniform mesh `x`. + x = np.linspace(0.0, nyq, nfreqs) + fx = np.interp(x, freq, gain) + + # Adjust the phases of the coefficients so that the first `ntaps` of the + # inverse FFT are the desired filter coefficients. + shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) + if ftype > 2: + shift *= 1j + + fx2 = fx * shift + + # Use irfft to compute the inverse FFT. + out_full = irfft(fx2) + + if window is not None: + # Create the window to apply to the filter coefficients. + from .windows import get_window + wind = get_window(window, numtaps, fftbins=False) + else: + wind = 1 + + # Keep only the first `numtaps` coefficients in `out`, and multiply by + # the window. + out = out_full[:numtaps] * wind + + if ftype == 3: + out[out.size // 2] = 0.0 + + return out + + +def remez(numtaps, bands, desired, *, weight=None, type='bandpass', + maxiter=25, grid_density=16, fs=None): + """ + Calculate the minimax optimal filter using the Remez exchange algorithm. + + Calculate the filter-coefficients for the finite impulse response + (FIR) filter whose transfer function minimizes the maximum error + between the desired gain and the realized gain in the specified + frequency bands using the Remez exchange algorithm. + + Parameters + ---------- + numtaps : int + The desired number of taps in the filter. The number of taps is + the number of terms in the filter, or the filter order plus one. + bands : array_like + A monotonic sequence containing the band edges. + All elements must be non-negative and less than half the sampling + frequency as given by `fs`. + desired : array_like + A sequence half the size of bands containing the desired gain + in each of the specified bands. + weight : array_like, optional + A relative weighting to give to each band region. The length of + `weight` has to be half the length of `bands`. + type : {'bandpass', 'differentiator', 'hilbert'}, optional + The type of filter: + + * 'bandpass' : flat response in bands. This is the default. + + * 'differentiator' : frequency proportional response in bands. + + * 'hilbert' : filter with odd symmetry, that is, type III + (for even order) or type IV (for odd order) + linear phase filters. + + maxiter : int, optional + Maximum number of iterations of the algorithm. Default is 25. + grid_density : int, optional + Grid density. The dense grid used in `remez` is of size + ``(numtaps + 1) * grid_density``. Default is 16. + fs : float, optional + The sampling frequency of the signal. Default is 1. + + Returns + ------- + out : ndarray + A rank-1 array containing the coefficients of the optimal + (in a minimax sense) filter. + + See Also + -------- + firls + firwin + firwin2 + minimum_phase + + References + ---------- + .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the + design of optimum FIR linear phase digital filters", + IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. + .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer + Program for Designing Optimum FIR Linear Phase Digital + Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, + pp. 506-525, 1973. + + Examples + -------- + In these examples, `remez` is used to design low-pass, high-pass, + band-pass and band-stop filters. The parameters that define each filter + are the filter order, the band boundaries, the transition widths of the + boundaries, the desired gains in each band, and the sampling frequency. + + We'll use a sample frequency of 22050 Hz in all the examples. In each + example, the desired gain in each band is either 0 (for a stop band) + or 1 (for a pass band). + + `freqz` is used to compute the frequency response of each filter, and + the utility function ``plot_response`` defined below is used to plot + the response. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 22050 # Sample rate, Hz + + >>> def plot_response(w, h, title): + ... "Utility function to plot response functions" + ... fig = plt.figure() + ... ax = fig.add_subplot(111) + ... ax.plot(w, 20*np.log10(np.abs(h))) + ... ax.set_ylim(-40, 5) + ... ax.grid(True) + ... ax.set_xlabel('Frequency (Hz)') + ... ax.set_ylabel('Gain (dB)') + ... ax.set_title(title) + + The first example is a low-pass filter, with cutoff frequency 8 kHz. + The filter length is 325, and the transition width from pass to stop + is 100 Hz. + + >>> cutoff = 8000.0 # Desired cutoff frequency, Hz + >>> trans_width = 100 # Width of transition from pass to stop, Hz + >>> numtaps = 325 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], + ... [1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Low-pass Filter") + >>> plt.show() + + This example shows a high-pass filter: + + >>> cutoff = 2000.0 # Desired cutoff frequency, Hz + >>> trans_width = 250 # Width of transition from pass to stop, Hz + >>> numtaps = 125 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs], + ... [0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "High-pass Filter") + >>> plt.show() + + This example shows a band-pass filter with a pass-band from 2 kHz to + 5 kHz. The transition width is 260 Hz and the length of the filter + is 63, which is smaller than in the other examples: + + >>> band = [2000, 5000] # Desired pass band, Hz + >>> trans_width = 260 # Width of transition from pass to stop, Hz + >>> numtaps = 63 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-pass Filter") + >>> plt.show() + + The low order leads to higher ripple and less steep transitions. + + The next example shows a band-stop filter. + + >>> band = [6000, 8000] # Desired stop band, Hz + >>> trans_width = 200 # Width of transition from pass to stop, Hz + >>> numtaps = 175 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-stop Filter") + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 1.0 if fs is None else fs + + # Convert type + try: + tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] + except KeyError as e: + raise ValueError("Type must be 'bandpass', 'differentiator', " + "or 'hilbert'") from e + + # Convert weight + if weight is None: + weight = [1] * len(desired) + + bands = np.asarray(bands).copy() + return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs, + maxiter, grid_density) + + +def firls(numtaps, bands, desired, *, weight=None, fs=None): + """ + FIR filter design using least-squares error minimization. + + Calculate the filter coefficients for the linear-phase finite + impulse response (FIR) filter which has the best approximation + to the desired frequency response described by `bands` and + `desired` in the least squares sense (i.e., the integral of the + weighted mean-squared error within the specified bands is + minimized). + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be odd. + bands : array_like + A monotonic nondecreasing sequence containing the band edges in + Hz. All elements must be non-negative and less than or equal to + the Nyquist frequency given by `nyq`. The bands are specified as + frequency pairs, thus, if using a 1D array, its length must be + even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the + bands can be specified as an nx2 sized 2D array, where n is the + number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`. + desired : array_like + A sequence the same size as `bands` containing the desired gain + at the start and end point of each band. + weight : array_like, optional + A relative weighting to give to each band region when solving + the least squares problem. `weight` has to be half the size of + `bands`. + fs : float, optional + The sampling frequency of the signal. Each frequency in `bands` + must be between 0 and ``fs/2`` (inclusive). Default is 2. + + Returns + ------- + coeffs : ndarray + Coefficients of the optimal (in a least squares sense) FIR filter. + + See Also + -------- + firwin + firwin2 + minimum_phase + remez + + Notes + ----- + This implementation follows the algorithm given in [1]_. + As noted there, least squares design has multiple advantages: + + 1. Optimal in a least-squares sense. + 2. Simple, non-iterative method. + 3. The general solution can obtained by solving a linear + system of equations. + 4. Allows the use of a frequency dependent weighting function. + + This function constructs a Type I linear phase FIR filter, which + contains an odd number of `coeffs` satisfying for :math:`n < numtaps`: + + .. math:: coeffs(n) = coeffs(numtaps - 1 - n) + + The odd number of coefficients and filter symmetry avoid boundary + conditions that could otherwise occur at the Nyquist and 0 frequencies + (e.g., for Type II, III, or IV variants). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares. + OpenStax CNX. Aug 9, 2005. + https://eeweb.engineering.nyu.edu/iselesni/EL713/firls/firls.pdf + + Examples + -------- + We want to construct a band-pass filter. Note that the behavior in the + frequency ranges between our stop bands and pass bands is unspecified, + and thus may overshoot depending on the parameters of our filter: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> fig, axs = plt.subplots(2) + >>> fs = 10.0 # Hz + >>> desired = (0, 0, 1, 1, 0, 0) + >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))): + ... fir_firls = signal.firls(73, bands, desired, fs=fs) + ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs) + ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs) + ... hs = list() + ... ax = axs[bi] + ... for fir in (fir_firls, fir_remez, fir_firwin2): + ... freq, response = signal.freqz(fir) + ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0]) + ... for band, gains in zip(zip(bands[::2], bands[1::2]), + ... zip(desired[::2], desired[1::2])): + ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2) + ... if bi == 0: + ... ax.legend(hs, ('firls', 'remez', 'firwin2'), + ... loc='lower center', frameon=False) + ... else: + ... ax.set_xlabel('Frequency (Hz)') + ... ax.grid(True) + ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude') + ... + >>> fig.tight_layout() + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + nyq = 0.5 * fs + + numtaps = int(numtaps) + if numtaps % 2 == 0 or numtaps < 1: + raise ValueError("numtaps must be odd and >= 1") + M = (numtaps-1) // 2 + + # normalize bands 0->1 and make it 2 columns + nyq = float(nyq) + if nyq <= 0: + raise ValueError(f'nyq must be positive, got {nyq} <= 0.') + bands = np.asarray(bands).flatten() / nyq + if len(bands) % 2 != 0: + raise ValueError("bands must contain frequency pairs.") + if (bands < 0).any() or (bands > 1).any(): + raise ValueError("bands must be between 0 and 1 relative to Nyquist") + bands.shape = (-1, 2) + + # check remaining params + desired = np.asarray(desired).flatten() + if bands.size != desired.size: + raise ValueError( + f"desired must have one entry per frequency, got {desired.size} " + f"gains for {bands.size} frequencies." + ) + desired.shape = (-1, 2) + if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any(): + raise ValueError("bands must be monotonically nondecreasing and have " + "width > 0.") + if (bands[:-1, 1] > bands[1:, 0]).any(): + raise ValueError("bands must not overlap.") + if (desired < 0).any(): + raise ValueError("desired must be non-negative.") + if weight is None: + weight = np.ones(len(desired)) + weight = np.asarray(weight).flatten() + if len(weight) != len(desired): + raise ValueError("weight must be the same size as the number of " + f"band pairs ({len(bands)}).") + if (weight < 0).any(): + raise ValueError("weight must be non-negative.") + + # Set up the linear matrix equation to be solved, Qa = b + + # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) + # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. + + # We omit the factor of 0.5 above, instead adding it during coefficient + # calculation. + + # We also omit the 1/π from both Q and b equations, as they cancel + # during solving. + + # We have that: + # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval f1->f2 we get: + # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = np.arange(numtaps)[:, np.newaxis, np.newaxis] + q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight) + + # Now we assemble our sum of Toeplitz and Hankel + Q1 = toeplitz(q[:M+1]) + Q2 = hankel(q[:M+1], q[M:]) + Q = Q1 + Q2 + + # Now for b(n) we have that: + # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval and a linear term for D(ω) we get (over each f1->f2 interval): + # b(n) = W ∫ (mf+c)cos(πnf)df + # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = n[:M + 1] # only need this many coefficients here + # Choose m and c such that we are at the start and end weights + m = (np.diff(desired, axis=1) / np.diff(bands, axis=1)) + c = desired[:, [0]] - bands[:, [0]] * m + b = bands * (m*bands + c) * np.sinc(bands * n) + # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 + b[0] -= m * bands * bands / 2. + b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2 + b = np.dot(np.diff(b, axis=2)[:, :, 0], weight) + + # Now we can solve the equation + try: # try the fast way + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + a = solve(Q, b, assume_a="pos", check_finite=False) + for ww in w: + if (ww.category == LinAlgWarning and + str(ww.message).startswith('Ill-conditioned matrix')): + raise LinAlgError(str(ww.message)) + except LinAlgError: # in case Q is rank deficient + # This is faster than pinvh, even though we don't explicitly use + # the symmetry here. gelsy was faster than gelsd and gelss in + # some non-exhaustive tests. + a = lstsq(Q, b, lapack_driver='gelsy')[0] + + # make coefficients symmetric (linear phase) + coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:])) + return coeffs + + +def _dhtm(mag): + """Compute the modified 1-D discrete Hilbert transform + + Parameters + ---------- + mag : ndarray + The magnitude spectrum. Should be 1-D with an even length, and + preferably a fast length for FFT/IFFT. + """ + # Adapted based on code by Niranjan Damera-Venkata, + # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) + sig = np.zeros(len(mag)) + # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 + midpt = len(mag) // 2 + sig[1:midpt] = 1 + sig[midpt+1:] = -1 + # eventually if we want to support complex filters, we will need a + # np.abs() on the mag inside the log, and should remove the .real + recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real + return recon + + +def minimum_phase(h: np.ndarray, + method: Literal['homomorphic', 'hilbert'] = 'homomorphic', + n_fft: int | None = None, *, half: bool = True) -> np.ndarray: + """Convert a linear-phase FIR filter to minimum phase + + Parameters + ---------- + h : array + Linear-phase FIR filter coefficients. + method : {'hilbert', 'homomorphic'} + The provided methods are: + + 'homomorphic' (default) + This method [4]_ [5]_ works best with filters with an + odd number of taps, and the resulting minimum phase filter + will have a magnitude response that approximates the square + root of the original filter's magnitude response using half + the number of taps when ``half=True`` (default), or the + original magnitude spectrum using the same number of taps + when ``half=False``. + + 'hilbert' + This method [1]_ is designed to be used with equiripple + filters (e.g., from `remez`) with unity or zero gain + regions. + + n_fft : int + The number of points to use for the FFT. Should be at least a + few times larger than the signal length (see Notes). + half : bool + If ``True``, create a filter that is half the length of the original, with a + magnitude spectrum that is the square root of the original. If ``False``, + create a filter that is the same length as the original, with a magnitude + spectrum that is designed to match the original (only supported when + ``method='homomorphic'``). + + .. versionadded:: 1.14.0 + + Returns + ------- + h_minimum : array + The minimum-phase version of the filter, with length + ``(len(h) + 1) // 2`` when ``half is True`` or ``len(h)`` otherwise. + + See Also + -------- + firwin + firwin2 + remez + + Notes + ----- + Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection + of an FFT length to estimate the complex cepstrum of the filter. + + In the case of the Hilbert method, the deviation from the ideal + spectrum ``epsilon`` is related to the number of stopband zeros + ``n_stop`` and FFT length ``n_fft`` as:: + + epsilon = 2. * n_stop / n_fft + + For example, with 100 stopband zeros and a FFT length of 2048, + ``epsilon = 0.0976``. If we conservatively assume that the number of + stopband zeros is one less than the filter length, we can take the FFT + length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: + + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + + This gives reasonable results for both the Hilbert and homomorphic + methods, and gives the value used when ``n_fft=None``. + + Alternative implementations exist for creating minimum-phase filters, + including zero inversion [2]_ and spectral factorization [3]_ [4]_. + For more information, see `this DSPGuru page + `__. + + References + ---------- + .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and + complex minimum phase digital FIR filters," Acoustics, Speech, + and Signal Processing, 1999. Proceedings., 1999 IEEE International + Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. + :doi:`10.1109/ICASSP.1999.756179` + .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR + filters by direct factorization," Signal Processing, + vol. 10, no. 4, pp. 369-383, Jun. 1986. + .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in + Handbook for Digital Signal Processing, chapter 4, + New York: Wiley-Interscience, 1993. + .. [4] J. S. Lim, Advanced Topics in Signal Processing. + Englewood Cliffs, N.J.: Prentice Hall, 1988. + .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, + "Discrete-Time Signal Processing," 3rd edition. + Upper Saddle River, N.J.: Pearson, 2009. + + Examples + -------- + Create an optimal linear-phase low-pass filter `h` with a transition band of + [0.2, 0.3] (assuming a Nyquist frequency of 1): + + >>> import numpy as np + >>> from scipy.signal import remez, minimum_phase, freqz, group_delay + >>> import matplotlib.pyplot as plt + >>> freq = [0, 0.2, 0.3, 1.0] + >>> desired = [1, 0] + >>> h_linear = remez(151, freq, desired, fs=2) + + Convert it to minimum phase: + + >>> h_hil = minimum_phase(h_linear, method='hilbert') + >>> h_hom = minimum_phase(h_linear, method='homomorphic') + >>> h_hom_full = minimum_phase(h_linear, method='homomorphic', half=False) + + Compare the impulse and frequency response of the four filters: + + >>> fig0, ax0 = plt.subplots(figsize=(6, 3), tight_layout=True) + >>> fig1, axs = plt.subplots(3, sharex='all', figsize=(6, 6), tight_layout=True) + >>> ax0.set_title("Impulse response") + >>> ax0.set(xlabel='Samples', ylabel='Amplitude', xlim=(0, len(h_linear) - 1)) + >>> axs[0].set_title("Frequency Response") + >>> axs[0].set(xlim=(0, .65), ylabel="Magnitude / dB") + >>> axs[1].set(ylabel="Phase / rad") + >>> axs[2].set(ylabel="Group Delay / samples", ylim=(-31, 81), + ... xlabel='Normalized Frequency (Nyqist frequency: 1)') + >>> for h, lb in ((h_linear, f'Linear ({len(h_linear)})'), + ... (h_hil, f'Min-Hilbert ({len(h_hil)})'), + ... (h_hom, f'Min-Homomorphic ({len(h_hom)})'), + ... (h_hom_full, f'Min-Homom. Full ({len(h_hom_full)})')): + ... w_H, H = freqz(h, fs=2) + ... w_gd, gd = group_delay((h, 1), fs=2) + ... + ... alpha = 1.0 if lb == 'linear' else 0.5 # full opacity for 'linear' line + ... ax0.plot(h, '.-', alpha=alpha, label=lb) + ... axs[0].plot(w_H, 20 * np.log10(np.abs(H)), alpha=alpha) + ... axs[1].plot(w_H, np.unwrap(np.angle(H)), alpha=alpha, label=lb) + ... axs[2].plot(w_gd, gd, alpha=alpha) + >>> ax0.grid(True) + >>> ax0.legend(title='Filter Phase (Order)') + >>> axs[1].legend(title='Filter Phase (Order)', loc='lower right') + >>> for ax_ in axs: # shade transition band: + ... ax_.axvspan(freq[1], freq[2], color='y', alpha=.25) + ... ax_.grid(True) + >>> plt.show() + + The impulse response and group delay plot depict the 75 sample delay of the linear + phase filter `h`. The phase should also be linear in the stop band--due to the small + magnitude, numeric noise dominates there. Furthermore, the plots show that the + minimum phase filters clearly show a reduced (negative) phase slope in the pass and + transition band. The plots also illustrate that the filter with parameters + ``method='homomorphic', half=False`` has same order and magnitude response as the + linear filter `h` whereas the other minimum phase filters have only half the order + and the square root of the magnitude response. + """ + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError('Complex filters not supported') + if h.ndim != 1 or h.size <= 2: + raise ValueError('h must be 1-D and at least 2 samples long') + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn('h does not appear to by symmetric, conversion may fail', + RuntimeWarning, stacklevel=2) + if not isinstance(method, str) or method not in \ + ('homomorphic', 'hilbert',): + raise ValueError(f'method must be "homomorphic" or "hilbert", got {method!r}') + if method == "hilbert" and not half: + raise ValueError("`half=False` is only supported when `method='homomorphic'`") + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError(f'n_fft must be at least len(h)=={len(h)}') + if method == 'hilbert': + w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half) + H = np.real(fft(h, n_fft) * np.exp(1j * w)) + dp = max(H) - 1 + ds = 0 - min(H) + S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2 + H += ds + H *= S + H = np.sqrt(H, out=H) + H += 1e-10 # ensure that the log does not explode + h_minimum = _dhtm(H) + else: # method == 'homomorphic' + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + if half: # halving of magnitude spectrum optional + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + # i.e., double the positive frequencies and zero out the negative ones; + # Oppenheim+Shafer 3rd ed p991 eq13.42b and p1004 fig13.7 + win = np.zeros(n_fft) + win[0] = 1 + stop = n_fft // 2 + win[1:stop] = 2 + if n_fft % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + n_out = (n_half + len(h) % 2) if half else len(h) + return h_minimum[:n_out] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..52c6efbbfa53288934d12918566016db6c742ef1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py @@ -0,0 +1,533 @@ +""" +ltisys -- a collection of functions to convert linear time invariant systems +from one representation to another. +""" + +import numpy as np +from numpy import (r_, eye, atleast_2d, poly, dot, + asarray, zeros, array, outer) +from scipy import linalg + +from ._filter_design import tf2zpk, zpk2tf, normalize + + +__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete'] + + +def tf2ss(num, den): + r"""Transfer function to state-space representation. + + Parameters + ---------- + num, den : array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree. The + denominator needs to be at least as long as the numerator. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + Examples + -------- + Convert the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + to the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> from scipy.signal import tf2ss + >>> A, B, C, D = tf2ss(num, den) + >>> A + array([[-2., -1.], + [ 1., 0.]]) + >>> B + array([[ 1.], + [ 0.]]) + >>> C + array([[ 1., 2.]]) + >>> D + array([[ 1.]]) + """ + # Controller canonical state-space representation. + # if M+1 = len(num) and K+1 = len(den) then we must have M <= K + # states are found by asserting that X(s) = U(s) / D(s) + # then Y(s) = N(s) * X(s) + # + # A, B, C, and D follow quite naturally. + # + num, den = normalize(num, den) # Strips zeros, checks arrays + nn = len(num.shape) + if nn == 1: + num = asarray([num], num.dtype) + M = num.shape[1] + K = len(den) + if M > K: + msg = "Improper transfer function. `num` is longer than `den`." + raise ValueError(msg) + if M == 0 or K == 0: # Null system + return (array([], float), array([], float), array([], float), + array([], float)) + + # pad numerator to have same number of columns has denominator + num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num)) + + if num.shape[-1] > 0: + D = atleast_2d(num[:, 0]) + + else: + # We don't assign it an empty array because this system + # is not 'null'. It just doesn't have a non-zero D + # matrix. Thus, it should have a non-zero shape so that + # it can be operated on by functions like 'ss2tf' + D = array([[0]], float) + + if K == 1: + D = D.reshape(num.shape) + + return (zeros((1, 1)), zeros((1, D.shape[1])), + zeros((D.shape[0], 1)), D) + + frow = -array([den[1:]]) + A = r_[frow, eye(K - 2, K - 1)] + B = eye(K - 1, 1) + C = num[:, 1:] - outer(num[:, 0], den[1:]) + D = D.reshape((C.shape[0], B.shape[1])) + + return A, B, C, D + + +def _none_to_empty_2d(arg): + if arg is None: + return zeros((0, 0)) + else: + return arg + + +def _atleast_2d_or_none(arg): + if arg is not None: + return atleast_2d(arg) + + +def _shape_or_none(M): + if M is not None: + return M.shape + else: + return (None,) * 2 + + +def _choice_not_none(*args): + for arg in args: + if arg is not None: + return arg + + +def _restore(M, shape): + if M.shape == (0, 0): + return zeros(shape) + else: + if M.shape != shape: + raise ValueError("The input arrays have incompatible shapes.") + return M + + +def abcd_normalize(A=None, B=None, C=None, D=None): + """Check state-space matrices and ensure they are 2-D. + + If enough information on the system is provided, that is, enough + properly-shaped arrays are passed to the function, the missing ones + are built from this information, ensuring the correct number of + rows and columns. Otherwise a ValueError is raised. + + Parameters + ---------- + A, B, C, D : array_like, optional + State-space matrices. All of them are None (missing) by default. + See `ss2tf` for format. + + Returns + ------- + A, B, C, D : array + Properly shaped state-space matrices. + + Raises + ------ + ValueError + If not enough information on the system was provided. + + """ + A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) + + MA, NA = _shape_or_none(A) + MB, NB = _shape_or_none(B) + MC, NC = _shape_or_none(C) + MD, ND = _shape_or_none(D) + + p = _choice_not_none(MA, MB, NC) + q = _choice_not_none(NB, ND) + r = _choice_not_none(MC, MD) + if p is None or q is None or r is None: + raise ValueError("Not enough information on the system.") + + A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) + A = _restore(A, (p, p)) + B = _restore(B, (p, q)) + C = _restore(C, (r, p)) + D = _restore(D, (r, q)) + + return A, B, C, D + + +def ss2tf(A, B, C, D, input=0): + r"""State-space to transfer function. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + num : 2-D ndarray + Numerator(s) of the resulting transfer function(s). `num` has one row + for each of the system's outputs. Each row is a sequence representation + of the numerator polynomial. + den : 1-D ndarray + Denominator of the resulting transfer function(s). `den` is a sequence + representation of the denominator polynomial. + + Examples + -------- + Convert the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> A = [[-2, -1], [1, 0]] + >>> B = [[1], [0]] # 2-D column vector + >>> C = [[1, 2]] # 2-D row vector + >>> D = 1 + + to the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy.signal import ss2tf + >>> ss2tf(A, B, C, D) + (array([[1., 3., 3.]]), array([ 1., 2., 1.])) + """ + # transfer function is C (sI - A)**(-1) B + D + + # Check consistency and make them all rank-2 arrays + A, B, C, D = abcd_normalize(A, B, C, D) + + nout, nin = D.shape + if input >= nin: + raise ValueError("System does not have the input specified.") + + # make SIMO from possibly MIMO system. + B = B[:, input:input + 1] + D = D[:, input:input + 1] + + try: + den = poly(A) + except ValueError: + den = 1 + + if (B.size == 0) and (C.size == 0): + num = np.ravel(D) + if (D.size == 0) and (A.size == 0): + den = [] + return num, den + + num_states = A.shape[0] + type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 + num = np.empty((nout, num_states + 1), type_test.dtype) + for k in range(nout): + Ck = atleast_2d(C[k, :]) + num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den + + return num, den + + +def zpk2ss(z, p, k): + """Zero-pole-gain representation to state-space representation + + Parameters + ---------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + """ + return tf2ss(*zpk2tf(z, p, k)) + + +def ss2zpk(A, B, C, D, input=0): + """State-space representation to zero-pole-gain representation. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + """ + return tf2zpk(*ss2tf(A, B, C, D, input=input)) + + +def cont2discrete(system, dt, method="zoh", alpha=None): + """ + Transform a continuous to a discrete state-space system. + + Parameters + ---------- + system : a tuple describing the system or an instance of `lti` + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + dt : float + The discretization time step. + method : str, optional + Which method to use: + + * gbt: generalized bilinear transformation + * bilinear: Tustin's approximation ("gbt" with alpha=0.5) + * euler: Euler (or forward differencing) method ("gbt" with alpha=0) + * backward_diff: Backwards differencing ("gbt" with alpha=1.0) + * zoh: zero-order hold (default) + * foh: first-order hold (*versionadded: 1.3.0*) + * impulse: equivalent impulse response (*versionadded: 1.3.0*) + + alpha : float within [0, 1], optional + The generalized bilinear transformation weighting parameter, which + should only be specified with method="gbt", and is ignored otherwise + + Returns + ------- + sysd : tuple containing the discrete system + Based on the input type, the output will be of the form + + * (num, den, dt) for transfer function input + * (zeros, poles, gain, dt) for zeros-poles-gain input + * (A, B, C, D, dt) for state-space system input + + Notes + ----- + By default, the routine uses a Zero-Order Hold (zoh) method to perform + the transformation. Alternatively, a generalized bilinear transformation + may be used, which includes the common Tustin's bilinear approximation, + an Euler's method technique, or a backwards differencing technique. + + The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear + approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method + is based on [4]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models + + .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf + + .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized + bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, + 2009. + (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) + + .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control + of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, + pp. 204-206, 1998. + + Examples + -------- + We can transform a continuous state-space system to a discrete one: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cont2discrete, lti, dlti, dstep + + Define a continuous state-space system. + + >>> A = np.array([[0, 1],[-10., -3]]) + >>> B = np.array([[0],[10.]]) + >>> C = np.array([[1., 0]]) + >>> D = np.array([[0.]]) + >>> l_system = lti(A, B, C, D) + >>> t, x = l_system.step(T=np.linspace(0, 5, 100)) + >>> fig, ax = plt.subplots() + >>> ax.plot(t, x, label='Continuous', linewidth=3) + + Transform it to a discrete state-space system using several methods. + + >>> dt = 0.1 + >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: + ... d_system = cont2discrete((A, B, C, D), dt, method=method) + ... s, x_d = dstep(d_system) + ... ax.step(s, np.squeeze(x_d), label=method, where='post') + >>> ax.axis([t[0], t[-1], x[0], 1.4]) + >>> ax.legend(loc='best') + >>> fig.tight_layout() + >>> plt.show() + + """ + if len(system) == 1: + return system.to_discrete() + if len(system) == 2: + sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, + alpha=alpha) + return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 3: + sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, + method=method, alpha=alpha) + return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 4: + a, b, c, d = system + else: + raise ValueError("First argument must either be a tuple of 2 (tf), " + "3 (zpk), or 4 (ss) arrays.") + + if method == 'gbt': + if alpha is None: + raise ValueError("Alpha parameter must be specified for the " + "generalized bilinear transform (gbt) method") + elif alpha < 0 or alpha > 1: + raise ValueError("Alpha parameter must be within the interval " + "[0,1] for the gbt method") + + if method == 'gbt': + # This parameter is used repeatedly - compute once here + ima = np.eye(a.shape[0]) - alpha*dt*a + ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) + bd = linalg.solve(ima, dt*b) + + # Similarly solve for the output equation matrices + cd = linalg.solve(ima.transpose(), c.transpose()) + cd = cd.transpose() + dd = d + alpha*np.dot(c, bd) + + elif method == 'bilinear' or method == 'tustin': + return cont2discrete(system, dt, method="gbt", alpha=0.5) + + elif method == 'euler' or method == 'forward_diff': + return cont2discrete(system, dt, method="gbt", alpha=0.0) + + elif method == 'backward_diff': + return cont2discrete(system, dt, method="gbt", alpha=1.0) + + elif method == 'zoh': + # Build an exponential matrix + em_upper = np.hstack((a, b)) + + # Need to stack zeros under the a and b matrices + em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), + np.zeros((b.shape[1], b.shape[1])))) + + em = np.vstack((em_upper, em_lower)) + ms = linalg.expm(dt * em) + + # Dispose of the lower rows + ms = ms[:a.shape[0], :] + + ad = ms[:, 0:a.shape[1]] + bd = ms[:, a.shape[1]:] + + cd = c + dd = d + + elif method == 'foh': + # Size parameters for convenience + n = a.shape[0] + m = b.shape[1] + + # Build an exponential matrix similar to 'zoh' method + em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) + em_lower = zeros((m, n + 2 * m)) + em = np.block([[em_upper], [em_lower]]) + + ms = linalg.expm(em) + + # Get the three blocks from upper rows + ms11 = ms[:n, 0:n] + ms12 = ms[:n, n:n + m] + ms13 = ms[:n, n + m:] + + ad = ms11 + bd = ms12 - ms13 + ms11 @ ms13 + cd = c + dd = d + c @ ms13 + + elif method == 'impulse': + if not np.allclose(d, 0): + raise ValueError("Impulse method is only applicable " + "to strictly proper systems") + + ad = linalg.expm(a * dt) + bd = ad @ b * dt + cd = c + dd = c @ b * dt + + else: + raise ValueError(f"Unknown transformation method '{method}'") + + return ad, bd, cd, dd, dt diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_ltisys.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..3992797a09a3ceee4be1f052603fcd6593ae6274 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_ltisys.py @@ -0,0 +1,3519 @@ +""" +ltisys -- a collection of classes and functions for modeling linear +time invariant systems. +""" +# +# Author: Travis Oliphant 2001 +# +# Feb 2010: Warren Weckesser +# Rewrote lsim2 and added impulse2. +# Apr 2011: Jeffrey Armstrong +# Added dlsim, dstep, dimpulse, cont2discrete +# Aug 2013: Juan Luis Cano +# Rewrote abcd_normalize. +# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr +# Added pole placement +# Mar 2015: Clancy Rowley +# Rewrote lsim +# May 2015: Felix Berkenkamp +# Split lti class into subclasses +# Merged discrete systems and added dlti + +import warnings + +# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 +# use scipy's qr until this is solved + +from scipy.linalg import qr as s_qr +from scipy import linalg +from scipy.interpolate import make_interp_spline +from ._filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, + freqz_zpk) +from ._lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, + cont2discrete, _atleast_2d_or_none) + +import numpy as np +from numpy import (real, atleast_1d, squeeze, asarray, zeros, + dot, transpose, ones, linspace) +import copy + +__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode'] + + +class LinearTimeInvariant: + def __new__(cls, *system, **kwargs): + """Create a new object, don't allow direct instances.""" + if cls is LinearTimeInvariant: + raise NotImplementedError('The LinearTimeInvariant class is not ' + 'meant to be used directly, use `lti` ' + 'or `dlti` instead.') + return super().__new__(cls) + + def __init__(self): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__() + + self.inputs = None + self.outputs = None + self._dt = None + + @property + def dt(self): + """Return the sampling time of the system, `None` for `lti` systems.""" + return self._dt + + @property + def _dt_dict(self): + if self.dt is None: + return {} + else: + return {'dt': self.dt} + + @property + def zeros(self): + """Zeros of the system.""" + return self.to_zpk().zeros + + @property + def poles(self): + """Poles of the system.""" + return self.to_zpk().poles + + def _as_ss(self): + """Convert to `StateSpace` system, without copying. + + Returns + ------- + sys: StateSpace + The `StateSpace` system. If the class is already an instance of + `StateSpace` then this instance is returned. + """ + if isinstance(self, StateSpace): + return self + else: + return self.to_ss() + + def _as_zpk(self): + """Convert to `ZerosPolesGain` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `ZerosPolesGain` system. If the class is already an instance of + `ZerosPolesGain` then this instance is returned. + """ + if isinstance(self, ZerosPolesGain): + return self + else: + return self.to_zpk() + + def _as_tf(self): + """Convert to `TransferFunction` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `TransferFunction` system. If the class is already an instance of + `TransferFunction` then this instance is returned. + """ + if isinstance(self, TransferFunction): + return self + else: + return self.to_tf() + + +class lti(LinearTimeInvariant): + r""" + Continuous-time linear time invariant system base class. + + Parameters + ---------- + *system : arguments + The `lti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + continuous-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, dlti + + Notes + ----- + `lti` instances do not exist directly. Instead, `lti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, + 5]``). + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> signal.lti(1, 2, 3, 4) + StateSpaceContinuous( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: None + ) + + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> signal.lti([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`: + + >>> signal.lti([3, 4], [1, 2]) + TransferFunctionContinuous( + array([3., 4.]), + array([1., 2.]), + dt: None + ) + + """ + def __new__(cls, *system): + """Create an instance of the appropriate subclass.""" + if cls is lti: + N = len(system) + if N == 2: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, *system) + elif N == 3: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, *system) + elif N == 4: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system) + else: + raise ValueError("`system` needs to be an instance of `lti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__(*system) + + def impulse(self, X0=None, T=None, N=None): + """ + Return the impulse response of a continuous-time system. + See `impulse` for details. + """ + return impulse(self, X0=X0, T=T, N=N) + + def step(self, X0=None, T=None, N=None): + """ + Return the step response of a continuous-time system. + See `step` for details. + """ + return step(self, X0=X0, T=T, N=N) + + def output(self, U, T, X0=None): + """ + Return the response of a continuous-time system to input `U`. + See `lsim` for details. + """ + return lsim(self, U, T, X0=X0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `bode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return bode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000): + """ + Calculate the frequency response of a continuous-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `freqresp` for details. + """ + return freqresp(self, w=w, n=n) + + def to_discrete(self, dt, method='zoh', alpha=None): + """Return a discretized version of the current system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` + """ + raise NotImplementedError('to_discrete is not implemented for this ' + 'system class.') + + +class dlti(LinearTimeInvariant): + r""" + Discrete-time linear time invariant system base class. + + Parameters + ---------- + *system: arguments + The `dlti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + discrete-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to ``True`` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, lti + + Notes + ----- + `dlti` instances do not exist directly. Instead, `dlti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, + 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + + >>> signal.dlti(1, 2, 3, 4) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: True + ) + + >>> signal.dlti(1, 2, 3, 4, dt=0.1) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: 0.1 + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with + a sampling time of 0.1 seconds: + + >>> signal.dlti([3, 4], [1, 2], dt=0.1) + TransferFunctionDiscrete( + array([3., 4.]), + array([1., 2.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Create an instance of the appropriate subclass.""" + if cls is dlti: + N = len(system) + if N == 2: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, *system, **kwargs) + elif N == 3: + return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, + *system, **kwargs) + elif N == 4: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, + **kwargs) + else: + raise ValueError("`system` needs to be an instance of `dlti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + dt = kwargs.pop('dt', True) + super().__init__(*system, **kwargs) + + self.dt = dt + + @property + def dt(self): + """Return the sampling time of the system.""" + return self._dt + + @dt.setter + def dt(self, dt): + self._dt = dt + + def impulse(self, x0=None, t=None, n=None): + """ + Return the impulse response of the discrete-time `dlti` system. + See `dimpulse` for details. + """ + return dimpulse(self, x0=x0, t=t, n=n) + + def step(self, x0=None, t=None, n=None): + """ + Return the step response of the discrete-time `dlti` system. + See `dstep` for details. + """ + return dstep(self, x0=x0, t=t, n=n) + + def output(self, u, t, x0=None): + """ + Return the response of the discrete-time system to input `u`. + See `dlsim` for details. + """ + return dlsim(self, u, t, x0=x0) + + def bode(self, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `dbode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` + with sampling time 0.5s: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) + + Equivalent: signal.dbode(sys) + + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return dbode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `dfreqresp` for details. + + """ + return dfreqresp(self, w=w, n=n, whole=whole) + + +class TransferFunction(LinearTimeInvariant): + r"""Linear Time Invariant system class in transfer function form. + + Represents the system as the continuous-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the + discrete-time transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + `TransferFunction` systems inherit additional + functionality from the `lti`, respectively the `dlti` classes, depending on + which system representation is used. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, lti, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be + represented as ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.1 seconds: + + >>> signal.TransferFunction(num, den, dt=0.1) + TransferFunctionDiscrete( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of lti.""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_tf() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is TransferFunction: + if kwargs.get('dt') is None: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, + *system, + **kwargs) + else: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, + *system, + **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space LTI system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._num = None + self._den = None + + self.num, self.den = normalize(*system) + + def __repr__(self): + """Return representation of the system's transfer function""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.num)},\n' + f'{repr(self.den)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + @property + def num(self): + """Numerator of the `TransferFunction` system.""" + return self._num + + @num.setter + def num(self, num): + self._num = atleast_1d(num) + + # Update dimensions + if len(self.num.shape) > 1: + self.outputs, self.inputs = self.num.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def den(self): + """Denominator of the `TransferFunction` system.""" + return self._den + + @den.setter + def den(self, den): + self._den = atleast_1d(den) + + def _copy(self, system): + """ + Copy the parameters of another `TransferFunction` object + + Parameters + ---------- + system : `TransferFunction` + The `StateSpace` system that is to be copied + + """ + self.num = system.num + self.den = system.den + + def to_tf(self): + """ + Return a copy of the current `TransferFunction` system. + + Returns + ------- + sys : instance of `TransferFunction` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_zpk(self): + """ + Convert system representation to `ZerosPolesGain`. + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*tf2zpk(self.num, self.den), + **self._dt_dict) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*tf2ss(self.num, self.den), + **self._dt_dict) + + @staticmethod + def _z_to_zinv(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((np.zeros(diff), den)) + elif diff < 0: + num = np.hstack((np.zeros(-diff), num)) + return num, den + + @staticmethod + def _zinv_to_z(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((den, np.zeros(diff))) + elif diff < 0: + num = np.hstack((num, np.zeros(-diff))) + return num, den + + +class TransferFunctionContinuous(TransferFunction, lti): + r""" + Continuous-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Continuous-time `TransferFunction` systems inherit additional + functionality from the `lti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + + See Also + -------- + ZerosPolesGain, StateSpace, lti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `TransferFunction` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return TransferFunction(*cont2discrete((self.num, self.den), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class TransferFunctionDiscrete(TransferFunction, dlti): + r""" + Discrete-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Discrete-time `TransferFunction` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.5 seconds: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den, dt=0.5) + TransferFunctionDiscrete( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: 0.5 + ) + + """ + pass + + +class ZerosPolesGain(LinearTimeInvariant): + r""" + Linear Time Invariant system class in zeros, poles, gain form. + + Represents the system as the continuous- or discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + `ZerosPolesGain` systems inherit additional functionality from the `lti`, + respectively the `dlti` classes, depending on which system representation + is used. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + + See Also + -------- + TransferFunction, StateSpace, lti, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of `lti`""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_zpk() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is ZerosPolesGain: + if kwargs.get('dt') is None: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, + *system, + **kwargs) + else: + return ZerosPolesGainDiscrete.__new__( + ZerosPolesGainDiscrete, + *system, + **kwargs + ) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the zeros, poles, gain system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + super().__init__(**kwargs) + + self._zeros = None + self._poles = None + self._gain = None + + self.zeros, self.poles, self.gain = system + + def __repr__(self): + """Return representation of the `ZerosPolesGain` system.""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.zeros)},\n' + f'{repr(self.poles)},\n' + f'{repr(self.gain)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + @property + def zeros(self): + """Zeros of the `ZerosPolesGain` system.""" + return self._zeros + + @zeros.setter + def zeros(self, zeros): + self._zeros = atleast_1d(zeros) + + # Update dimensions + if len(self.zeros.shape) > 1: + self.outputs, self.inputs = self.zeros.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def poles(self): + """Poles of the `ZerosPolesGain` system.""" + return self._poles + + @poles.setter + def poles(self, poles): + self._poles = atleast_1d(poles) + + @property + def gain(self): + """Gain of the `ZerosPolesGain` system.""" + return self._gain + + @gain.setter + def gain(self, gain): + self._gain = gain + + def _copy(self, system): + """ + Copy the parameters of another `ZerosPolesGain` system. + + Parameters + ---------- + system : instance of `ZerosPolesGain` + The zeros, poles gain system that is to be copied + + """ + self.poles = system.poles + self.zeros = system.zeros + self.gain = system.gain + + def to_tf(self): + """ + Convert system representation to `TransferFunction`. + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), + **self._dt_dict) + + def to_zpk(self): + """ + Return a copy of the current 'ZerosPolesGain' system. + + Returns + ------- + sys : instance of `ZerosPolesGain` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), + **self._dt_dict) + + +class ZerosPolesGainContinuous(ZerosPolesGain, lti): + r""" + Continuous-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the continuous time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Continuous-time `ZerosPolesGain` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + + See Also + -------- + TransferFunction, StateSpace, lti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `ZerosPolesGain` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `ZerosPolesGain` + """ + return ZerosPolesGain( + *cont2discrete((self.zeros, self.poles, self.gain), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): + r""" + Discrete-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the discrete-time transfer function + :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is + the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`. + Discrete-time `ZerosPolesGain` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, StateSpace, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + pass + + +class StateSpace(LinearTimeInvariant): + r""" + Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u` or the discrete-time difference + equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems + inherit additional functionality from the `lti`, respectively the `dlti` + classes, depending on which system representation is used. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 4 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, lti, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + >>> import numpy as np + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + >>> sys.to_discrete(0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + + # Override NumPy binary operations and ufuncs + __array_priority__ = 100.0 + __array_ufunc__ = None + + def __new__(cls, *system, **kwargs): + """Create new StateSpace object and settle inheritance.""" + # Handle object conversion if input is an instance of `lti` + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_ss() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is StateSpace: + if kwargs.get('dt') is None: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system, **kwargs) + else: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, + *system, **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space lti/dlti system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._A = None + self._B = None + self._C = None + self._D = None + + self.A, self.B, self.C, self.D = abcd_normalize(*system) + + def __repr__(self): + """Return representation of the `StateSpace` system.""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.A)},\n' + f'{repr(self.B)},\n' + f'{repr(self.C)},\n' + f'{repr(self.D)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + def _check_binop_other(self, other): + return isinstance(other, (StateSpace, np.ndarray, float, complex, + np.number, int)) + + def __mul__(self, other): + """ + Post-multiply another system or a scalar + + Handles multiplication of systems in the sense of a frequency domain + multiplication. That means, given two systems E1(s) and E2(s), their + multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) + is equivalent to first applying E2(s), and then E1(s). + + Notes + ----- + For SISO systems the order of system application does not matter. + However, for MIMO systems, where the two systems are matrices, the + order above ensures standard Matrix multiplication rules apply. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + return NotImplemented + + if self.dt != other.dt: + raise TypeError('Cannot multiply systems with different `dt`.') + + n1 = self.A.shape[0] + n2 = other.A.shape[0] + + # Interconnection of systems + # x1' = A1 x1 + B1 u1 + # y1 = C1 x1 + D1 u1 + # x2' = A2 x2 + B2 y1 + # y2 = C2 x2 + D2 y1 + # + # Plugging in with u1 = y2 yields + # [x1'] [A1 B1*C2 ] [x1] [B1*D2] + # [x2'] = [0 A2 ] [x2] + [B2 ] u2 + # [x1] + # y2 = [C1 D1*C2] [x2] + D1*D2 u2 + a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), + np.hstack((zeros((n2, n1)), other.A)))) + b = np.vstack((np.dot(self.B, other.D), other.B)) + c = np.hstack((self.C, np.dot(self.D, other.C))) + d = np.dot(self.D, other.D) + else: + # Assume that other is a scalar / matrix + # For post multiplication the input gets scaled + a = self.A + b = np.dot(self.B, other) + c = self.C + d = np.dot(self.D, other) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __rmul__(self, other): + """Pre-multiply a scalar or matrix (but not StateSpace)""" + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + # For pre-multiplication only the output gets scaled + a = self.A + b = self.B + c = np.dot(other, self.C) + d = np.dot(other, self.D) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __neg__(self): + """Negate the system (equivalent to pre-multiplying by -1).""" + return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict) + + def __add__(self, other): + """ + Adds two systems in the sense of frequency domain addition. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + raise TypeError(f'Cannot add {type(self)} and {type(other)}') + + if self.dt != other.dt: + raise TypeError('Cannot add systems with different `dt`.') + # Interconnection of systems + # x1' = A1 x1 + B1 u + # y1 = C1 x1 + D1 u + # x2' = A2 x2 + B2 u + # y2 = C2 x2 + D2 u + # y = y1 + y2 + # + # Plugging in yields + # [x1'] [A1 0 ] [x1] [B1] + # [x2'] = [0 A2] [x2] + [B2] u + # [x1] + # y = [C1 C2] [x2] + [D1 + D2] u + a = linalg.block_diag(self.A, other.A) + b = np.vstack((self.B, other.B)) + c = np.hstack((self.C, other.C)) + d = self.D + other.D + else: + other = np.atleast_2d(other) + if self.D.shape == other.shape: + # A scalar/matrix is really just a static system (A=0, B=0, C=0) + a = self.A + b = self.B + c = self.C + d = self.D + other + else: + raise ValueError("Cannot add systems with incompatible " + f"dimensions ({self.D.shape} and {other.shape})") + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __sub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(-other) + + def __radd__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(other) + + def __rsub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return (-self).__add__(other) + + def __truediv__(self, other): + """ + Divide by a scalar + """ + # Division by non-StateSpace scalars + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + if isinstance(other, np.ndarray) and other.ndim > 0: + # It's ambiguous what this means, so disallow it + raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays") + + return self.__mul__(1/other) + + @property + def A(self): + """State matrix of the `StateSpace` system.""" + return self._A + + @A.setter + def A(self, A): + self._A = _atleast_2d_or_none(A) + + @property + def B(self): + """Input matrix of the `StateSpace` system.""" + return self._B + + @B.setter + def B(self, B): + self._B = _atleast_2d_or_none(B) + self.inputs = self.B.shape[-1] + + @property + def C(self): + """Output matrix of the `StateSpace` system.""" + return self._C + + @C.setter + def C(self, C): + self._C = _atleast_2d_or_none(C) + self.outputs = self.C.shape[0] + + @property + def D(self): + """Feedthrough matrix of the `StateSpace` system.""" + return self._D + + @D.setter + def D(self, D): + self._D = _atleast_2d_or_none(D) + + def _copy(self, system): + """ + Copy the parameters of another `StateSpace` system. + + Parameters + ---------- + system : instance of `StateSpace` + The state-space system that is to be copied + + """ + self.A = system.A + self.B = system.B + self.C = system.C + self.D = system.D + + def to_tf(self, **kwargs): + """ + Convert system representation to `TransferFunction`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_zpk(self, **kwargs): + """ + Convert system representation to `ZerosPolesGain`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_ss(self): + """ + Return a copy of the current `StateSpace` system. + + Returns + ------- + sys : instance of `StateSpace` + The current system (copy) + + """ + return copy.deepcopy(self) + + +class StateSpaceContinuous(StateSpace, lti): + r""" + Continuous-time Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u`. + Continuous-time `StateSpace` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + + See Also + -------- + TransferFunction, ZerosPolesGain, lti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `StateSpace` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class StateSpaceDiscrete(StateSpace, dlti): + r""" + Discrete-time Linear Time Invariant system in state-space form. + + Represents the system as the discrete-time difference equation + :math:`x[k+1] = A x[k] + B u[k]`. + `StateSpace` systems inherit additional functionality from the `dlti` + class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[ 1. , 0.1], + [ 0. , 1. ]]), + array([[ 0.005], + [ 0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + pass + + +def lsim(system, U, T, X0=None, interp=True): + """ + Simulate output of a continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like + An input array describing the input at each time `T` + (interpolation is assumed between given times). If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U = 0 or None, a zero input is used. + T : array_like + The time steps at which the input is defined and at which the + output is desired. Must be nonnegative, increasing, and equally spaced. + X0 : array_like, optional + The initial conditions on the state vector (zero by default). + interp : bool, optional + Whether to use linear (True, the default) or zero-order-hold (False) + interpolation for the input array. + + Returns + ------- + T : 1D ndarray + Time values for the output. + yout : 1D ndarray + System response. + xout : ndarray + Time evolution of the state vector. + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + We'll use `lsim` to simulate an analog Bessel filter applied to + a signal. + + >>> import numpy as np + >>> from scipy.signal import bessel, lsim + >>> import matplotlib.pyplot as plt + + Create a low-pass Bessel filter with a cutoff of 12 Hz. + + >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True) + + Generate data to which the filter is applied. + + >>> t = np.linspace(0, 1.25, 500, endpoint=False) + + The input signal is the sum of three sinusoidal curves, with + frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly + eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal. + + >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) + + ... 0.5*np.cos(2*np.pi*80*t)) + + Simulate the filter with `lsim`. + + >>> tout, yout, xout = lsim((b, a), U=u, T=t) + + Plot the result. + + >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input') + >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output') + >>> plt.legend(loc='best', shadow=True, framealpha=1) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + In a second example, we simulate a double integrator ``y'' = u``, with + a constant input ``u = 1``. We'll use the state space representation + of the integrator. + + >>> from scipy.signal import lti + >>> A = np.array([[0.0, 1.0], [0.0, 0.0]]) + >>> B = np.array([[0.0], [1.0]]) + >>> C = np.array([[1.0, 0.0]]) + >>> D = 0.0 + >>> system = lti(A, B, C, D) + + `t` and `u` define the time and input signal for the system to + be simulated. + + >>> t = np.linspace(0, 5, num=50) + >>> u = np.ones_like(t) + + Compute the simulation, and then plot `y`. As expected, the plot shows + the curve ``y = 0.5*t**2``. + + >>> tout, y, x = lsim(system, u, t) + >>> plt.plot(t, y) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + T = atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) + n_states = A.shape[0] + n_inputs = B.shape[1] + + n_steps = T.size + if X0 is None: + X0 = zeros(n_states, sys.A.dtype) + xout = np.empty((n_steps, n_states), sys.A.dtype) + + if T[0] == 0: + xout[0] = X0 + elif T[0] > 0: + # step forward to initial time, with zero input + xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) + else: + raise ValueError("Initial time must be nonnegative") + + no_input = (U is None or + (isinstance(U, (int, float)) and U == 0.) or + not np.any(U)) + + if n_steps == 1: + yout = squeeze(xout @ C.T) + if not no_input: + yout += squeeze(U @ D.T) + return T, yout, squeeze(xout) + + dt = T[1] - T[0] + if not np.allclose(np.diff(T), dt): + raise ValueError("Time steps are not equally spaced.") + + if no_input: + # Zero input: just use matrix exponential + # take transpose because state is a row vector + expAT_dt = linalg.expm(A.T * dt) + for i in range(1, n_steps): + xout[i] = xout[i-1] @ expAT_dt + yout = squeeze(xout @ C.T) + return T, yout, squeeze(xout) + + # Nonzero input + U = atleast_1d(U) + if U.ndim == 1: + U = U[:, np.newaxis] + + if U.shape[0] != n_steps: + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if U.shape[1] != n_inputs: + raise ValueError("System does not define that many inputs.") + + if not interp: + # Zero-order hold + # Algorithm: to integrate from time 0 to time dt, we solve + # xdot = A x + B u, x(0) = x0 + # udot = 0, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 ] [ u0 ] + M = np.vstack([np.hstack([A * dt, B * dt]), + np.zeros((n_inputs, n_states + n_inputs))]) + # transpose everything because the state and input are row vectors + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd = expMT[n_states:, :n_states] + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd + else: + # Linear interpolation between steps + # Algorithm: to integrate from time 0 to time dt, with linear + # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve + # xdot = A x + B u, x(0) = x0 + # udot = (u1 - u0) / dt, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] + # [u1 - u0] [ 0 0 0 ] [u1 - u0] + M = np.vstack([np.hstack([A * dt, B * dt, + np.zeros((n_states, n_inputs))]), + np.hstack([np.zeros((n_inputs, n_states + n_inputs)), + np.identity(n_inputs)]), + np.zeros((n_inputs, n_states + 2 * n_inputs))]) + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd1 = expMT[n_states+n_inputs:, :n_states] + Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd0 + U[i] @ Bd1 + + yout = squeeze(xout @ C.T) + squeeze(U @ D.T) + return T, yout, squeeze(xout) + + +def _default_response_times(A, n): + """Compute a reasonable set of time samples for the response time. + + This function is used by `impulse` and `step` to compute the response time + when the `T` argument to the function is None. + + Parameters + ---------- + A : array_like + The system matrix, which is square. + n : int + The number of time samples to generate. + + Returns + ------- + t : ndarray + The 1-D array of length `n` of time samples at which the response + is to be computed. + """ + # Create a reasonable time interval. + # TODO: This could use some more work. + # For example, what is expected when the system is unstable? + vals = linalg.eigvals(A) + r = min(abs(real(vals))) + if r == 0.0: + r = 1.0 + tc = 1.0 / r + t = linspace(0.0, 7 * tc, n) + return t + + +def impulse(system, X0=None, T=None, N=None): + """Impulse response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector. Defaults to zero. + T : array_like, optional + Time points. Computed if not given. + N : int, optional + The number of time points to compute (if `T` is not given). + + Returns + ------- + T : ndarray + A 1-D array of time points. + yout : ndarray + A 1-D array containing the impulse response of the system (except for + singularities at zero). + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Compute the impulse response of a second order system with a repeated + root: ``x''(t) + 2*x'(t) + x(t) = u(t)`` + + >>> from scipy import signal + >>> system = ([1.0], [1.0, 2.0, 1.0]) + >>> t, y = signal.impulse(system) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, y) + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if X0 is None: + X = squeeze(sys.B) + else: + X = squeeze(sys.B + X0) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + + _, h, _ = lsim(sys, 0., T, X, interp=False) + return T, h + + +def step(system, X0=None, T=None, N=None): + """Step response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> lti = signal.lti([1.0], [1.0, 1.0]) + >>> t, y = signal.step(lti) + >>> plt.plot(t, y) + >>> plt.xlabel('Time [s]') + >>> plt.ylabel('Amplitude') + >>> plt.title('Step response for 1. Order Lowpass') + >>> plt.grid() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + U = ones(T.shape, sys.A.dtype) + vals = lsim(sys, U, T, X0=X0, interp=False) + return vals[0], vals[1] + + +def bode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is calculated + for every value in this array. If not given a reasonable set will be + calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = signal.bode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = freqresp(system, w=w, n=n) + + mag = 20.0 * np.log10(abs(y)) + phase = np.unwrap(np.arctan2(y.imag, y.real)) * 180.0 / np.pi + + return w, mag, phase + + +def freqresp(system, w=None, n=10000): + r"""Calculate the frequency response of a continuous-time system. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is + calculated for every value in this array. If not given, a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`: + + >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) + + >>> w, H = signal.freqresp(s1) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + """ + if isinstance(system, lti): + if isinstance(system, (TransferFunction, ZerosPolesGain)): + sys = system + else: + sys = system._as_zpk() + elif isinstance(system, dlti): + raise AttributeError('freqresp can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_zpk() + + if sys.inputs != 1 or sys.outputs != 1: + raise ValueError("freqresp() requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(sys, TransferFunction): + # In the call to freqs(), sys.num.ravel() is used because there are + # cases where sys.num is a 2-D array with a single row. + w, h = freqs(sys.num.ravel(), sys.den, worN=worN) + + elif isinstance(sys, ZerosPolesGain): + w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) + + return w, h + + +# This class will be used by place_poles to return its results +# see https://code.activestate.com/recipes/52308/ +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +def _valid_inputs(A, B, poles, method, rtol, maxiter): + """ + Check the poles come in complex conjugate pairs + Check shapes of A, B and poles are compatible. + Check the method chosen is compatible with provided poles + Return update method to use and ordered poles + + """ + poles = np.asarray(poles) + if poles.ndim > 1: + raise ValueError("Poles must be a 1D array like.") + # Will raise ValueError if poles do not come in complex conjugates pairs + poles = _order_complex_poles(poles) + if A.ndim > 2: + raise ValueError("A must be a 2D array/matrix.") + if B.ndim > 2: + raise ValueError("B must be a 2D array/matrix") + if A.shape[0] != A.shape[1]: + raise ValueError("A must be square") + if len(poles) > A.shape[0]: + raise ValueError("maximum number of poles is %d but you asked for %d" % + (A.shape[0], len(poles))) + if len(poles) < A.shape[0]: + raise ValueError("number of poles is %d but you should provide %d" % + (len(poles), A.shape[0])) + r = np.linalg.matrix_rank(B) + for p in poles: + if sum(p == poles) > r: + raise ValueError("at least one of the requested pole is repeated " + "more than rank(B) times") + # Choose update method + update_loop = _YT_loop + if method not in ('KNV0','YT'): + raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") + + if method == "KNV0": + update_loop = _KNV0_loop + if not all(np.isreal(poles)): + raise ValueError("Complex poles are not supported by KNV0") + + if maxiter < 1: + raise ValueError("maxiter must be at least equal to 1") + + # We do not check rtol <= 0 as the user can use a negative rtol to + # force maxiter iterations + if rtol > 1: + raise ValueError("rtol can not be greater than 1") + + return update_loop, poles + + +def _order_complex_poles(poles): + """ + Check we have complex conjugates pairs and reorder P according to YT, ie + real_poles, complex_i, conjugate complex_i, .... + The lexicographic sort on the complex poles is added to help the user to + compare sets of poles. + """ + ordered_poles = np.sort(poles[np.isreal(poles)]) + im_poles = [] + for p in np.sort(poles[np.imag(poles) < 0]): + if np.conj(p) in poles: + im_poles.extend((p, np.conj(p))) + + ordered_poles = np.hstack((ordered_poles, im_poles)) + + if poles.shape[0] != len(ordered_poles): + raise ValueError("Complex poles must come with their conjugates") + return ordered_poles + + +def _KNV0(B, ker_pole, transfer_matrix, j, poles): + """ + Algorithm "KNV0" Kautsky et Al. Robust pole + assignment in linear state feedback, Int journal of Control + 1985, vol 41 p 1129->1155 + https://la.epfl.ch/files/content/sites/la/files/ + users/105941/public/KautskyNicholsDooren + + """ + # Remove xj form the base + transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) + # If we QR this matrix in full mode Q=Q0|Q1 + # then Q1 will be a single column orthogonal to + # Q0, that's what we are looking for ! + + # After merge of gh-4249 great speed improvements could be achieved + # using QR updates instead of full QR in the line below + + # To debug with numpy qr uncomment the line below + # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") + Q, R = s_qr(transfer_matrix_not_j, mode="full") + + mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) + yj = np.dot(mat_ker_pj, Q[:, -1]) + + # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its + # projection into ker_pole[j] will yield a vector + # close to 0. As we are looking for a vector in ker_pole[j] + # simply stick with transfer_matrix[:, j] (unless someone provides me with + # a better choice ?) + + if not np.allclose(yj, 0): + xj = yj/np.linalg.norm(yj) + transfer_matrix[:, j] = xj + + # KNV does not support complex poles, using YT technique the two lines + # below seem to work 9 out of 10 times but it is not reliable enough: + # transfer_matrix[:, j]=real(xj) + # transfer_matrix[:, j+1]=imag(xj) + + # Add this at the beginning of this function if you wish to test + # complex support: + # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): + # return + # Problems arise when imag(xj)=>0 I have no idea on how to fix this + + +def _YT_real(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.1 page 19 related to real pairs + """ + # step 1 page 19 + u = Q[:, -2, np.newaxis] + v = Q[:, -1, np.newaxis] + + # step 2 page 19 + m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - + np.dot(v, u.T)), ker_pole[j]) + + # step 3 page 19 + um, sm, vm = np.linalg.svd(m) + # mu1, mu2 two first columns of U => 2 first lines of U.T + mu1, mu2 = um.T[:2, :, np.newaxis] + # VM is V.T with numpy we want the first two lines of V.T + nu1, nu2 = vm[:2, :, np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( + transfer_matrix[:, i, np.newaxis], + transfer_matrix[:, j, np.newaxis])) + + if not np.allclose(sm[0], sm[1]): + ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) + ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) + ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) + else: + ker_pole_ij = np.vstack(( + np.hstack((ker_pole[i], + np.zeros(ker_pole[i].shape))), + np.hstack((np.zeros(ker_pole[j].shape), + ker_pole[j])) + )) + mu_nu_matrix = np.vstack( + (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) + ) + ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) + transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), + transfer_matrix_j_mo_transfer_matrix_j) + if not np.allclose(transfer_matrix_ij, 0): + transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / + np.linalg.norm(transfer_matrix_ij)) + transfer_matrix[:, i] = transfer_matrix_ij[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = transfer_matrix_ij[ + transfer_matrix[:, i].shape[0]:, 0 + ] + else: + # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to + # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to + # ker_pole_mu_nu and iterate. As we are looking for a vector in + # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help + # (that's a guess, not a claim !) + transfer_matrix[:, i] = ker_pole_mu_nu[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = ker_pole_mu_nu[ + transfer_matrix[:, i].shape[0]:, 0 + ] + + +def _YT_complex(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.2 page 20 related to complex pairs + """ + # step 1 page 20 + ur = np.sqrt(2)*Q[:, -2, np.newaxis] + ui = np.sqrt(2)*Q[:, -1, np.newaxis] + u = ur + 1j*ui + + # step 2 page 20 + ker_pole_ij = ker_pole[i] + m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - + np.dot(np.conj(u), u.T)), ker_pole_ij) + + # step 3 page 20 + e_val, e_vec = np.linalg.eig(m) + # sort eigenvalues according to their module + e_val_idx = np.argsort(np.abs(e_val)) + mu1 = e_vec[:, e_val_idx[-1], np.newaxis] + mu2 = e_vec[:, e_val_idx[-2], np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + + # remember transfer_matrix_i has been split as + # transfer_matrix[i]=real(transfer_matrix_i) and + # transfer_matrix[j]=imag(transfer_matrix_i) + transfer_matrix_j_mo_transfer_matrix_j = ( + transfer_matrix[:, i, np.newaxis] + + 1j*transfer_matrix[:, j, np.newaxis] + ) + if not np.allclose(np.abs(e_val[e_val_idx[-1]]), + np.abs(e_val[e_val_idx[-2]])): + ker_pole_mu = np.dot(ker_pole_ij, mu1) + else: + mu1_mu2_matrix = np.hstack((mu1, mu2)) + ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) + transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), + transfer_matrix_j_mo_transfer_matrix_j) + + if not np.allclose(transfer_matrix_i_j, 0): + transfer_matrix_i_j = (transfer_matrix_i_j / + np.linalg.norm(transfer_matrix_i_j)) + transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) + transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) + else: + # same idea as in YT_real + transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) + transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) + + +def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Algorithm "YT" Tits, Yang. Globally Convergent + Algorithms for Robust Pole Assignment by State Feedback + https://hdl.handle.net/1903/5598 + The poles P have to be sorted accordingly to section 6.2 page 20 + + """ + # The IEEE edition of the YT paper gives useful information on the + # optimal update order for the real poles in order to minimize the number + # of times we have to loop over all poles, see page 1442 + nb_real = poles[np.isreal(poles)].shape[0] + # hnb => Half Nb Real + hnb = nb_real // 2 + + # Stick to the indices in the paper and then remove one to get numpy array + # index it is a bit easier to link the code to the paper this way even if it + # is not very clean. The paper is unclear about what should be done when + # there is only one real pole => use KNV0 on this real pole seem to work + if nb_real > 0: + #update the biggest real pole with the smallest one + update_order = [[nb_real], [1]] + else: + update_order = [[],[]] + + r_comp = np.arange(nb_real+1, len(poles)+1, 2) + # step 1.a + r_p = np.arange(1, hnb+nb_real % 2) + update_order[0].extend(2*r_p) + update_order[1].extend(2*r_p+1) + # step 1.b + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 1.c + r_p = np.arange(1, hnb+1) + update_order[0].extend(2*r_p-1) + update_order[1].extend(2*r_p) + # step 1.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.a + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+j) + # step 2.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.c + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(hnb+1, nb_real+1): + idx_1 = i+j + if idx_1 > nb_real: + idx_1 = i+j-nb_real + update_order[0].append(i) + update_order[1].append(idx_1) + # step 2.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 3.a + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+hnb) + # step 3.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + + update_order = np.array(update_order).T-1 + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for i, j in update_order: + if i == j: + assert i == 0, "i!=0 for KNV call in YT" + assert np.isreal(poles[i]), "calling KNV on a complex pole" + _KNV0(B, ker_pole, transfer_matrix, i, poles) + else: + transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), + axis=1) + # after merge of gh-4249 great speed improvements could be + # achieved using QR updates instead of full QR in the line below + + #to debug with numpy qr uncomment the line below + #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") + Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") + + if np.isreal(poles[i]): + assert np.isreal(poles[j]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_real(ker_pole, Q, transfer_matrix, i, j) + else: + assert ~np.isreal(poles[i]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_complex(ker_pole, Q, transfer_matrix, i, j) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs( + (det_transfer_matrix - + det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + nb_try += 1 + return stop, cur_rtol, nb_try + + +def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Loop over all poles one by one and apply KNV method 0 algorithm + """ + # This method is useful only because we need to be able to call + # _KNV0 from YT without looping over all poles, otherwise it would + # have been fine to mix _KNV0_loop and _KNV0 in a single function + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for j in range(B.shape[0]): + _KNV0(B, ker_pole, transfer_matrix, j, poles) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + + nb_try += 1 + return stop, cur_rtol, nb_try + + +def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): + """ + Compute K such that eigenvalues (A - dot(B, K))=poles. + + K is the gain matrix such as the plant described by the linear system + ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, + as close as possible to those asked for in poles. + + SISO, MISO and MIMO systems are supported. + + Parameters + ---------- + A, B : ndarray + State-space representation of linear system ``AX + BU``. + poles : array_like + Desired real poles and/or complex conjugates poles. + Complex poles are only supported with ``method="YT"`` (default). + method: {'YT', 'KNV0'}, optional + Which method to choose to find the gain matrix K. One of: + + - 'YT': Yang Tits + - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 + + See References and Notes for details on the algorithms. + rtol: float, optional + After each iteration the determinant of the eigenvectors of + ``A - B*K`` is compared to its previous value, when the relative + error between these two values becomes lower than `rtol` the algorithm + stops. Default is 1e-3. + maxiter: int, optional + Maximum number of iterations to compute the gain matrix. + Default is 30. + + Returns + ------- + full_state_feedback : Bunch object + full_state_feedback is composed of: + gain_matrix : 1-D ndarray + The closed loop matrix K such as the eigenvalues of ``A-BK`` + are as close as possible to the requested poles. + computed_poles : 1-D ndarray + The poles corresponding to ``A-BK`` sorted as first the real + poles in increasing order, then the complex conjugates in + lexicographic order. + requested_poles : 1-D ndarray + The poles the algorithm was asked to place sorted as above, + they may differ from what was achieved. + X : 2-D ndarray + The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` + (see Notes) + rtol : float + The relative tolerance achieved on ``det(X)`` (see Notes). + `rtol` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + nb_iter : int + The number of iterations performed before converging. + `nb_iter` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + + Notes + ----- + The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et + al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer + matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses + rank-2 updates. This yields on average more robust solutions (see [2]_ + pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV + does not in its original version. Only update method 0 proposed by KNV has + been implemented here, hence the name ``'KNV0'``. + + KNV extended to complex poles is used in Matlab's ``place`` function, YT is + distributed under a non-free licence by Slicot under the name ``robpole``. + It is unclear and undocumented how KNV0 has been extended to complex poles + (Tits and Yang claim on page 14 of their paper that their method can not be + used to extend KNV to complex poles), therefore only YT supports them in + this implementation. + + As the solution to the problem of pole placement is not unique for MIMO + systems, both methods start with a tentative transfer matrix which is + altered in various way to increase its determinant. Both methods have been + proven to converge to a stable solution, however depending on the way the + initial transfer matrix is chosen they will converge to different + solutions and therefore there is absolutely no guarantee that using + ``'KNV0'`` will yield results similar to Matlab's or any other + implementation of these algorithms. + + Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` + is only provided because it is needed by ``'YT'`` in some specific cases. + Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` + when ``abs(det(X))`` is used as a robustness indicator. + + [2]_ is available as a technical report on the following URL: + https://hdl.handle.net/1903/5598 + + References + ---------- + .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment + in linear state feedback", International Journal of Control, Vol. 41 + pp. 1129-1155, 1985. + .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust + pole assignment by state feedback", IEEE Transactions on Automatic + Control, Vol. 41, pp. 1432-1452, 1996. + + Examples + -------- + A simple example demonstrating real pole placement using both KNV and YT + algorithms. This is example number 1 from section 4 of the reference KNV + publication ([1]_): + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], + ... [-0.5814, -4.290, 0, 0.6750 ], + ... [ 1.067, 4.273, -6.654, 5.893 ], + ... [ 0.0480, 4.273, 1.343, -2.104 ]]) + >>> B = np.array([[ 0, 5.679 ], + ... [ 1.136, 1.136 ], + ... [ 0, 0, ], + ... [-3.146, 0 ]]) + >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + Now compute K with KNV method 0, with the default YT method and with the YT + method while forcing 100 iterations of the algorithm and print some results + after each call. + + >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') + >>> fsf1.gain_matrix + array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], + [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) + + >>> fsf2 = signal.place_poles(A, B, P) # uses YT method + >>> fsf2.computed_poles + array([-8.6659, -5.0566, -0.5 , -0.2 ]) + + >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) + >>> fsf3.X + array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], + [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], + [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], + [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) + + The absolute value of the determinant of X is a good indicator to check the + robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing + it. Below a comparison of the robustness of the results above: + + >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) + True + >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) + True + + Now a simple example for complex poles: + + >>> A = np.array([[ 0, 7/3., 0, 0 ], + ... [ 0, 0, 0, 7/9. ], + ... [ 0, 0, 0, 0 ], + ... [ 0, 0, 0, 0 ]]) + >>> B = np.array([[ 0, 0 ], + ... [ 0, 0 ], + ... [ 1, 0 ], + ... [ 0, 1 ]]) + >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. + >>> fsf = signal.place_poles(A, B, P, method='YT') + + We can plot the desired and computed poles in the complex plane: + + >>> t = np.linspace(0, 2*np.pi, 401) + >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle + >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, + ... 'wo', label='Desired') + >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', + ... label='Placed') + >>> plt.grid() + >>> plt.axis('image') + >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) + >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) + + """ + # Move away all the inputs checking, it only adds noise to the code + update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) + + # The current value of the relative tolerance we achieved + cur_rtol = 0 + # The number of iterations needed before converging + nb_iter = 0 + + # Step A: QR decomposition of B page 1132 KN + # to debug with numpy qr uncomment the line below + # u, z = np.linalg.qr(B, mode="complete") + u, z = s_qr(B, mode="full") + rankB = np.linalg.matrix_rank(B) + u0 = u[:, :rankB] + u1 = u[:, rankB:] + z = z[:rankB, :] + + # If we can use the identity matrix as X the solution is obvious + if B.shape[0] == rankB: + # if B is square and full rank there is only one solution + # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) + # i.e K=inv(B)*(diag(P)-A) + # if B has as many lines as its rank (but not square) there are many + # solutions and we can choose one using least squares + # => use lstsq in both cases. + # In both cases the transfer matrix X will be eye(A.shape[0]) and I + # can hardly think of a better one so there is nothing to optimize + # + # for complex poles we use the following trick + # + # |a -b| has for eigenvalues a+b and a-b + # |b a| + # + # |a+bi 0| has the obvious eigenvalues a+bi and a-bi + # |0 a-bi| + # + # e.g solving the first one in R gives the solution + # for the second one in C + diag_poles = np.zeros(A.shape) + idx = 0 + while idx < poles.shape[0]: + p = poles[idx] + diag_poles[idx, idx] = np.real(p) + if ~np.isreal(p): + diag_poles[idx, idx+1] = -np.imag(p) + diag_poles[idx+1, idx+1] = np.real(p) + diag_poles[idx+1, idx] = np.imag(p) + idx += 1 # skip next one + idx += 1 + gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] + transfer_matrix = np.eye(A.shape[0]) + cur_rtol = np.nan + nb_iter = np.nan + else: + # step A (p1144 KNV) and beginning of step F: decompose + # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors + # in the same loop + ker_pole = [] + + # flag to skip the conjugate of a complex pole + skip_conjugate = False + # select orthonormal base ker_pole for each Pole and vectors for + # transfer_matrix + for j in range(B.shape[0]): + if skip_conjugate: + skip_conjugate = False + continue + pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T + + # after QR Q=Q0|Q1 + # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. + # Q1 is orthogonal to Q0 and will be multiplied by the zeros in + # R when using mode "complete". In default mode Q1 and the zeros + # in R are not computed + + # To debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(pole_space_j, mode="complete") + Q, _ = s_qr(pole_space_j, mode="full") + + ker_pole_j = Q[:, pole_space_j.shape[1]:] + + # We want to select one vector in ker_pole_j to build the transfer + # matrix, however qr returns sometimes vectors with zeros on the + # same line for each pole and this yields very long convergence + # times. + # Or some other times a set of vectors, one with zero imaginary + # part and one (or several) with imaginary parts. After trying + # many ways to select the best possible one (eg ditch vectors + # with zero imaginary part for complex poles) I ended up summing + # all vectors in ker_pole_j, this solves 100% of the problems and + # is a valid choice for transfer_matrix. + # This way for complex poles we are sure to have a non zero + # imaginary part that way, and the problem of lines full of zeros + # in transfer_matrix is solved too as when a vector from + # ker_pole_j has a zero the other one(s) when + # ker_pole_j.shape[1]>1) for sure won't have a zero there. + + transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] + transfer_matrix_j = (transfer_matrix_j / + np.linalg.norm(transfer_matrix_j)) + if ~np.isreal(poles[j]): # complex pole + transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), + np.imag(transfer_matrix_j)]) + ker_pole.extend([ker_pole_j, ker_pole_j]) + + # Skip next pole as it is the conjugate + skip_conjugate = True + else: # real pole, nothing to do + ker_pole.append(ker_pole_j) + + if j == 0: + transfer_matrix = transfer_matrix_j + else: + transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) + + if rankB > 1: # otherwise there is nothing we can optimize + stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, + poles, B, maxiter, rtol) + if not stop and rtol > 0: + # if rtol<=0 the user has probably done that on purpose, + # don't annoy them + err_msg = ( + "Convergence was not reached after maxiter iterations.\n" + f"You asked for a tolerance of {rtol}, we got {cur_rtol}." + ) + warnings.warn(err_msg, stacklevel=2) + + # reconstruct transfer_matrix to match complex conjugate pairs, + # ie transfer_matrix_j/transfer_matrix_j+1 are + # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after + transfer_matrix = transfer_matrix.astype(complex) + idx = 0 + while idx < poles.shape[0]-1: + if ~np.isreal(poles[idx]): + rel = transfer_matrix[:, idx].copy() + img = transfer_matrix[:, idx+1] + # rel will be an array referencing a column of transfer_matrix + # if we don't copy() it will changer after the next line and + # and the line after will not yield the correct value + transfer_matrix[:, idx] = rel-1j*img + transfer_matrix[:, idx+1] = rel+1j*img + idx += 1 # skip next one + idx += 1 + + try: + m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), + transfer_matrix.T)).T + gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) + except np.linalg.LinAlgError as e: + raise ValueError("The poles you've chosen can't be placed. " + "Check the controllability matrix and try " + "another set of poles") from e + + # Beware: Kautsky solves A+BK but the usual form is A-BK + gain_matrix = -gain_matrix + # K still contains complex with ~=0j imaginary parts, get rid of them + gain_matrix = np.real(gain_matrix) + + full_state_feedback = Bunch() + full_state_feedback.gain_matrix = gain_matrix + full_state_feedback.computed_poles = _order_complex_poles( + np.linalg.eig(A - np.dot(B, gain_matrix))[0] + ) + full_state_feedback.requested_poles = poles + full_state_feedback.X = transfer_matrix + full_state_feedback.rtol = cur_rtol + full_state_feedback.nb_iter = nb_iter + + return full_state_feedback + + +def dlsim(system, u, t=None, x0=None): + """ + Simulate output of a discrete-time linear system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + u : array_like + An input array describing the input at each time `t` (interpolation is + assumed between given times). If there are multiple inputs, then each + column of the rank-2 array represents an input. + t : array_like, optional + The time steps at which the input is defined. If `t` is given, it + must be the same length as `u`, and the final value in `t` determines + the number of steps returned in the output. + x0 : array_like, optional + The initial conditions on the state vector (zero by default). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : ndarray + System response, as a 1-D array. + xout : ndarray, optional + Time-evolution of the state-vector. Only generated if the input is a + `StateSpace` system. + + See Also + -------- + lsim, dstep, dimpulse, cont2discrete + + Examples + -------- + A simple integrator transfer function with a discrete time step of 1.0 + could be implemented as: + + >>> import numpy as np + >>> from scipy import signal + >>> tf = ([1.0,], [1.0, -1.0], 1.0) + >>> t_in = [0.0, 1.0, 2.0, 3.0] + >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) + >>> t_out, y = signal.dlsim(tf, u, t=t_in) + >>> y.T + array([[ 0., 0., 0., 1.]]) + + """ + # Convert system to dlti-StateSpace + if isinstance(system, lti): + raise AttributeError('dlsim can only be used with discrete-time dlti ' + 'systems.') + elif not isinstance(system, dlti): + system = dlti(*system[:-1], dt=system[-1]) + + # Condition needed to ensure output remains compatible + is_ss_input = isinstance(system, StateSpace) + system = system._as_ss() + + u = np.atleast_1d(u) + + if u.ndim == 1: + u = np.atleast_2d(u).T + + if t is None: + out_samples = len(u) + stoptime = (out_samples - 1) * system.dt + else: + stoptime = t[-1] + out_samples = int(np.floor(stoptime / system.dt)) + 1 + + # Pre-build output arrays + xout = np.zeros((out_samples, system.A.shape[0])) + yout = np.zeros((out_samples, system.C.shape[0])) + tout = np.linspace(0.0, stoptime, num=out_samples) + + # Check initial condition + if x0 is None: + xout[0, :] = np.zeros((system.A.shape[1],)) + else: + xout[0, :] = np.asarray(x0) + + # Pre-interpolate inputs into the desired time steps + if t is None: + u_dt = u + else: + if len(u.shape) == 1: + u = u[:, np.newaxis] + + u_dt = make_interp_spline(t, u, k=1)(tout) + + # Simulate the system + for i in range(0, out_samples - 1): + xout[i+1, :] = (np.dot(system.A, xout[i, :]) + + np.dot(system.B, u_dt[i, :])) + yout[i, :] = (np.dot(system.C, xout[i, :]) + + np.dot(system.D, u_dt[i, :])) + + # Last point + yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + + np.dot(system.D, u_dt[out_samples-1, :])) + + if is_ss_input: + return tout, yout, xout + else: + return tout, yout + + +def dimpulse(system, x0=None, t=None, n=None): + """ + Impulse response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : tuple of ndarray + Impulse response of system. Each element of the tuple represents + the output of the system based on an impulse in each input. + + See Also + -------- + impulse, dstep, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dimpulse(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dimpulse can only be used with discrete-time ' + 'dlti systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[0, i] = 1.0 + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dstep(system, x0=None, t=None, n=None): + """ + Step response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Output time points, as a 1-D array. + yout : tuple of ndarray + Step response of system. Each element of the tuple represents + the output of the system based on a step response to each input. + + See Also + -------- + step, dimpulse, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dstep(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dstep can only be used with discrete-time dlti ' + 'systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[:, i] = np.ones((t.shape[0],)) + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dfreqresp(system, w=None, n=10000, whole=False): + r""" + Calculate the frequency response of a discrete-time system. + + Parameters + ---------- + system : an instance of the `dlti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (numerator, denominator, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + whole : bool, optional + Normally, if 'w' is not given, frequencies are computed from 0 to the + Nyquist frequency, pi radians/sample (upper-half of unit-circle). If + `whole` is True, compute frequencies from 0 to 2*pi radians/sample. + + Returns + ------- + w : 1D ndarray + Frequency array [radians/sample] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function + :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05 + seconds: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + >>> w, H = signal.dfreqresp(sys) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + + """ + if not isinstance(system, dlti): + if isinstance(system, lti): + raise AttributeError('dfreqresp can only be used with ' + 'discrete-time systems.') + + system = dlti(*system[:-1], dt=system[-1]) + + if isinstance(system, StateSpace): + # No SS->ZPK code exists right now, just SS->TF->ZPK + system = system._as_tf() + + if not isinstance(system, (TransferFunction, ZerosPolesGain)): + raise ValueError('Unknown system type') + + if system.inputs != 1 or system.outputs != 1: + raise ValueError("dfreqresp requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(system, TransferFunction): + # Convert numerator and denominator from polynomials in the variable + # 'z' to polynomials in the variable 'z^-1', as freqz expects. + num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) + w, h = freqz(num, den, worN=worN, whole=whole) + + elif isinstance(system, ZerosPolesGain): + w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, + whole=whole) + + return w, h + + +def dbode(system, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Parameters + ---------- + system : + An instance of the LTI class `dlti` or a tuple describing the system. + The number of elements in the tuple determine the interpretation, i.e.: + + 1. ``(sys_dlti)``: Instance of LTI class `dlti`. Note that derived instances, + such as instances of `TransferFunction`, `ZerosPolesGain`, or `StateSpace`, + are allowed as well. + 2. ``(num, den, dt)``: Rational polynomial as described in `TransferFunction`. + The coefficients of the polynomials should be specified in descending + exponent order, e.g., z² + 3z + 5 would be represented as ``[1, 3, 5]``. + 3. ``(zeros, poles, gain, dt)``: Zeros, poles, gain form as described + in `ZerosPolesGain`. + 4. ``(A, B, C, D, dt)``: State-space form as described in `StateSpace`. + + w : array_like, optional + Array of frequencies normalized to the Nyquist frequency being π, i.e., + having unit radiant / sample. Magnitude and phase data is calculated for every + value in this array. If not given, a reasonable set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Array of frequencies normalized to the Nyquist frequency being ``np.pi/dt`` + with ``dt`` being the sampling interval of the `system` parameter. + The unit is rad/s assuming ``dt`` is in seconds. + mag : 1D ndarray + Magnitude array in dB + phase : 1D ndarray + Phase array in degrees + + Notes + ----- + This function is a convenience wrapper around `dfreqresp` for extracting + magnitude and phase from the calculated complex-valued amplitude of the + frequency response. + + .. versionadded:: 0.18.0 + + See Also + -------- + dfreqresp, dlti, TransferFunction, ZerosPolesGain, StateSpace + + + Examples + -------- + The following example shows how to create a Bode plot of a 5-th order + Butterworth lowpass filter with a corner frequency of 100 Hz: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy import signal + ... + >>> T = 1e-4 # sampling interval in s + >>> f_c, o = 1e2, 5 # corner frequency in Hz (i.e., -3 dB value) and filter order + >>> bb, aa = signal.butter(o, f_c, 'lowpass', fs=1/T) + ... + >>> w, mag, phase = signal.dbode((bb, aa, T)) + >>> w /= 2*np.pi # convert unit of frequency into Hertz + ... + >>> fg, (ax0, ax1) = plt.subplots(2, 1, sharex='all', figsize=(5, 4), + ... tight_layout=True) + >>> ax0.set_title("Bode Plot of Butterworth Lowpass Filter " + + ... rf"($f_c={f_c:g}\,$Hz, order={o})") + >>> ax0.set_ylabel(r"Magnitude in dB") + >>> ax1.set(ylabel=r"Phase in Degrees", + ... xlabel="Frequency $f$ in Hertz", xlim=(w[1], w[-1])) + >>> ax0.semilogx(w, mag, 'C0-', label=r"$20\,\log_{10}|G(f)|$") # Magnitude plot + >>> ax1.semilogx(w, phase, 'C1-', label=r"$\angle G(f)$") # Phase plot + ... + >>> for ax_ in (ax0, ax1): + ... ax_.axvline(f_c, color='m', alpha=0.25, label=rf"${f_c=:g}\,$Hz") + ... ax_.grid(which='both', axis='x') # plot major & minor vertical grid lines + ... ax_.grid(which='major', axis='y') + ... ax_.legend() + >>> plt.show() + """ + w, y = dfreqresp(system, w=w, n=n) + + if isinstance(system, dlti): + dt = system.dt + else: + dt = system[-1] + + mag = 20.0 * np.log10(abs(y)) + phase = np.rad2deg(np.unwrap(np.angle(y))) + + return w / dt, mag, phase diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..4d64beaca86d1da50b563668679b6fc52c954ab0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py @@ -0,0 +1,139 @@ +# Author: Eric Larson +# 2014 + +"""Tools for MLS generation""" + +import numpy as np + +from ._max_len_seq_inner import _max_len_seq_inner + +__all__ = ['max_len_seq'] + + +# These are definitions of linear shift register taps for use in max_len_seq() +_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], + 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], + 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], + 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], + 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], + 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], + 31: [28], 32: [31, 30, 10]} + +def max_len_seq(nbits, state=None, length=None, taps=None): + """ + Maximum length sequence (MLS) generator. + + Parameters + ---------- + nbits : int + Number of bits to use. Length of the resulting sequence will + be ``(2**nbits) - 1``. Note that generating long sequences + (e.g., greater than ``nbits == 16``) can take a long time. + state : array_like, optional + If array, must be of length ``nbits``, and will be cast to binary + (bool) representation. If None, a seed of ones will be used, + producing a repeatable representation. If ``state`` is all + zeros, an error is raised as this is invalid. Default: None. + length : int, optional + Number of samples to compute. If None, the entire length + ``(2**nbits) - 1`` is computed. + taps : array_like, optional + Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). + If None, taps will be automatically selected (for up to + ``nbits == 32``). + + Returns + ------- + seq : array + Resulting MLS sequence of 0's and 1's. + state : array + The final state of the shift register. + + Notes + ----- + The algorithm for MLS generation is generically described in: + + https://en.wikipedia.org/wiki/Maximum_length_sequence + + The default values for taps are specifically taken from the first + option listed for each value of ``nbits`` in: + + https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm + + .. versionadded:: 0.15.0 + + Examples + -------- + MLS uses binary convention: + + >>> from scipy.signal import max_len_seq + >>> max_len_seq(4)[0] + array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8) + + MLS has a white spectrum (except for DC): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, ifft, fftshift, fftfreq + >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1 + >>> spec = fft(seq) + >>> N = len(seq) + >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Circular autocorrelation of MLS is an impulse: + + >>> acorrcirc = ifft(spec * np.conj(spec)).real + >>> plt.figure() + >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Linear autocorrelation of MLS is approximately an impulse: + + >>> acorr = np.correlate(seq, seq, 'full') + >>> plt.figure() + >>> plt.plot(np.arange(-N+1, N), acorr, '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + """ + taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64 + if taps is None: + if nbits not in _mls_taps: + known_taps = np.array(list(_mls_taps.keys())) + raise ValueError(f'nbits must be between {known_taps.min()} and ' + f'{known_taps.max()} if taps is None') + taps = np.array(_mls_taps[nbits], taps_dtype) + else: + taps = np.unique(np.array(taps, taps_dtype))[::-1] + if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1: + raise ValueError('taps must be non-empty with values between ' + 'zero and nbits (inclusive)') + taps = np.array(taps) # needed for Cython and Pythran + n_max = (2**nbits) - 1 + if length is None: + length = n_max + else: + length = int(length) + if length < 0: + raise ValueError('length must be greater than or equal to 0') + # We use int8 instead of bool here because NumPy arrays of bools + # don't seem to work nicely with Cython + if state is None: + state = np.ones(nbits, dtype=np.int8, order='c') + else: + # makes a copy if need be, ensuring it's 0's and 1's + state = np.array(state, dtype=bool, order='c').astype(np.int8) + if state.ndim != 1 or state.size != nbits: + raise ValueError('state must be a 1-D array of size nbits') + if np.all(state == 0): + raise ValueError('state must not be all zeros') + + seq = np.empty(length, dtype=np.int8, order='c') + state = _max_len_seq_inner(taps, state, nbits, length, seq) + return seq, state diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4eb7c7b2a3b658919f14450c20cad1e612a327d7 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_peak_finding.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..ccbeca5b7a4839bbc28e9c1cfd1ebd1d028a82cf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_peak_finding.py @@ -0,0 +1,1310 @@ +""" +Functions for identifying peaks in signals. +""" +import math +import numpy as np + +from scipy.signal._wavelets import _cwt, _ricker +from scipy.stats import scoreatpercentile + +from ._peak_finding_utils import ( + _local_maxima_1d, + _select_by_peak_distance, + _peak_prominences, + _peak_widths +) + + +__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences', + 'peak_widths', 'find_peaks', 'find_peaks_cwt'] + + +def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Relative extrema are calculated by finding locations where + ``comparator(data[n], data[n+1:n+order+1])`` is True. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n,n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default 'clip'. See numpy.take. + + Returns + ------- + extrema : ndarray + Boolean array of the same shape as `data` that is True at an extrema, + False otherwise. + + See also + -------- + argrelmax, argrelmin + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._peak_finding import _boolrelextrema + >>> testdata = np.array([1,2,3,2,1]) + >>> _boolrelextrema(testdata, np.greater, axis=0) + array([False, False, True, False, False], dtype=bool) + + """ + if (int(order) != order) or (order < 1): + raise ValueError('Order must be an int >= 1') + + datalen = data.shape[axis] + locs = np.arange(0, datalen) + + results = np.ones(data.shape, dtype=bool) + main = data.take(locs, axis=axis, mode=mode) + for shift in range(1, order + 1): + plus = data.take(locs + shift, axis=axis, mode=mode) + minus = data.take(locs - shift, axis=axis, mode=mode) + results &= comparator(main, plus) + results &= comparator(main, minus) + if ~results.any(): + return results + return results + + +def argrelmin(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative minima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative minima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See numpy.take. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the minima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelextrema, argrelmax, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.less as comparator. Therefore, it + requires a strict inequality on both sides of a value to consider it a + minimum. This means flat minima (more than one sample wide) are not detected. + In case of 1-D `data` `find_peaks` can be used to detect all + local minima, including flat ones, by calling it with negated `data`. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelmin + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmin(x) + (array([1, 5]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmin(y, axis=1) + (array([0, 2]), array([2, 1])) + + """ + return argrelextrema(data, np.less, axis, order, mode) + + +def argrelmax(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative maxima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative maxima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelextrema, argrelmin, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.greater as comparator. Therefore, + it requires a strict inequality on both sides of a value to consider it a + maximum. This means flat maxima (more than one sample wide) are not detected. + In case of 1-D `data` `find_peaks` can be used to detect all + local maxima, including flat ones. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelmax + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmax(x) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmax(y, axis=1) + (array([0]), array([1])) + """ + return argrelextrema(data, np.greater, axis, order, mode) + + +def argrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default is 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelmin, argrelmax + + Notes + ----- + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelextrema + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelextrema(x, np.greater) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelextrema(y, np.less, axis=1) + (array([0, 2]), array([2, 1])) + + """ + results = _boolrelextrema(data, comparator, + axis, order, mode) + return np.nonzero(results) + + +def _arg_x_as_expected(value): + """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64'). + + Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x` + compatible with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array with dtype('float64'). + """ + value = np.asarray(value, order='C', dtype=np.float64) + if value.ndim != 1: + raise ValueError('`x` must be a 1-D array') + return value + + +def _arg_peaks_as_expected(value): + """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp'). + + Used in `peak_prominences` and `peak_widths` to make `peaks` compatible + with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array with dtype('intp'). + """ + value = np.asarray(value) + if value.size == 0: + # Empty arrays default to np.float64 but are valid input + value = np.array([], dtype=np.intp) + try: + # Safely convert to C-contiguous array of type np.intp + value = value.astype(np.intp, order='C', casting='safe', + subok=False, copy=False) + except TypeError as e: + raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e + if value.ndim != 1: + raise ValueError('`peaks` must be a 1-D array') + return value + + +def _arg_wlen_as_expected(value): + """Ensure argument `wlen` is of type `np.intp` and larger than 1. + + Used in `peak_prominences` and `peak_widths`. + + Returns + ------- + value : np.intp + The original `value` rounded up to an integer or -1 if `value` was + None. + """ + if value is None: + # _peak_prominences expects an intp; -1 signals that no value was + # supplied by the user + value = -1 + elif 1 < value: + # Round up to a positive integer + if isinstance(value, float): + value = math.ceil(value) + value = np.intp(value) + else: + raise ValueError(f'`wlen` must be larger than 1, was {value}') + return value + + +def peak_prominences(x, peaks, wlen=None): + """ + Calculate the prominence of each peak in a signal. + + The prominence of a peak measures how much a peak stands out from the + surrounding baseline of the signal and is defined as the vertical distance + between the peak and its lowest contour line. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + wlen : int, optional + A window length in samples that optionally limits the evaluated area for + each peak to a subset of `x`. The peak is always placed in the middle of + the window therefore the given length is rounded up to the next odd + integer. This parameter can speed up the calculation (see Notes). + + Returns + ------- + prominences : ndarray + The calculated prominences for each peak in `peaks`. + left_bases, right_bases : ndarray + The peaks' bases as indices in `x` to the left and right of each peak. + The higher base of each pair is a peak's lowest contour line. + + Raises + ------ + ValueError + If a value in `peaks` is an invalid index for `x`. + + Warns + ----- + PeakPropertyWarning + For indices in `peaks` that don't point to valid local maxima in `x`, + the returned prominence will be 0 and this warning is raised. This + also happens if `wlen` is smaller than the plateau size of a peak. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_widths + Calculate the width of peaks. + + Notes + ----- + Strategy to compute a peak's prominence: + + 1. Extend a horizontal line from the current peak to the left and right + until the line either reaches the window border (see `wlen`) or + intersects the signal again at the slope of a higher peak. An + intersection with a peak of the same height is ignored. + 2. On each side find the minimal signal value within the interval defined + above. These points are the peak's bases. + 3. The higher one of the two bases marks the peak's lowest contour line. The + prominence can then be calculated as the vertical difference between the + peaks height itself and its lowest contour line. + + Searching for the peak's bases can be slow for large `x` with periodic + behavior because large chunks or even the full signal need to be evaluated + for the first algorithmic step. This evaluation area can be limited with the + parameter `wlen` which restricts the algorithm to a window around the + current peak and can shorten the calculation time if the window length is + short in relation to `x`. + However, this may stop the algorithm from finding the true global contour + line if the peak's true bases are outside this window. Instead, a higher + contour line is found within the restricted window leading to a smaller + calculated prominence. In practice, this is only relevant for the highest set + of peaks in `x`. This behavior may even be used intentionally to calculate + "local" prominences. + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Wikipedia Article for Topographic Prominence: + https://en.wikipedia.org/wiki/Topographic_prominence + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import find_peaks, peak_prominences + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlaid harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate prominences + + >>> peaks, _ = find_peaks(x) + >>> prominences = peak_prominences(x, peaks)[0] + >>> prominences + array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 , + 0.47822491, 2.48340261, 0.47822491]) + + Calculate the height of each peak's contour line and plot the results + + >>> contour_heights = x[peaks] - prominences + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks]) + >>> plt.show() + + Let's evaluate a second example that demonstrates several edge cases for + one peak at index 5. + + >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0]) + >>> peaks = np.array([5]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases) + (array([3.]), array([2]), array([6])) + + Note how the peak at index 3 of the same height is not considered as a + border while searching for the left base. Instead, two minima at 0 and 2 + are found in which case the one closer to the evaluated peak is always + chosen. On the right side, however, the base must be placed at 6 because the + higher peak represents the right border to the evaluated area. + + >>> peak_prominences(x, peaks, wlen=3.1) + (array([2.]), array([4]), array([6])) + + Here, we restricted the algorithm to a window from 3 to 7 (the length is 5 + samples because `wlen` was rounded up to the next odd integer). Thus, the + only two candidates in the evaluated area are the two neighboring samples + and a smaller prominence is calculated. + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + wlen = _arg_wlen_as_expected(wlen) + return _peak_prominences(x, peaks, wlen) + + +def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): + """ + Calculate the width of each peak in a signal. + + This function calculates the width of a peak in samples at a relative + distance to the peak's height and prominence. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + rel_height : float, optional + Chooses the relative height at which the peak width is measured as a + percentage of its prominence. 1.0 calculates the width of the peak at + its lowest contour line while 0.5 evaluates at half the prominence + height. Must be at least 0. See notes for further explanation. + prominence_data : tuple, optional + A tuple of three arrays matching the output of `peak_prominences` when + called with the same arguments `x` and `peaks`. This data are calculated + internally if not provided. + wlen : int, optional + A window length in samples passed to `peak_prominences` as an optional + argument for internal calculation of `prominence_data`. This argument + is ignored if `prominence_data` is given. + + Returns + ------- + widths : ndarray + The widths for each peak in samples. + width_heights : ndarray + The height of the contour lines at which the `widths` where evaluated. + left_ips, right_ips : ndarray + Interpolated positions of left and right intersection points of a + horizontal line at the respective evaluation height. + + Raises + ------ + ValueError + If `prominence_data` is supplied but doesn't satisfy the condition + ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak, + has the wrong dtype, is not C-contiguous or does not have the same + shape. + + Warns + ----- + PeakPropertyWarning + Raised if any calculated width is 0. This may stem from the supplied + `prominence_data` or if `rel_height` is set to 0. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_prominences + Calculate the prominence of peaks. + + Notes + ----- + The basic algorithm to calculate a peak's width is as follows: + + * Calculate the evaluation height :math:`h_{eval}` with the formula + :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the + height of the peak itself, :math:`P` is the peak's prominence and + :math:`R` a positive ratio specified with the argument `rel_height`. + * Draw a horizontal line at the evaluation height to both sides, starting at + the peak's current vertical position until the lines either intersect a + slope, the signal border or cross the vertical position of the peak's + base (see `peak_prominences` for an definition). For the first case, + intersection with the signal, the true intersection point is estimated + with linear interpolation. + * Calculate the width as the horizontal distance between the chosen + endpoints on both sides. As a consequence of this the maximal possible + width for each peak is the horizontal distance between its bases. + + As shown above to calculate a peak's width its prominence and bases must be + known. You can supply these yourself with the argument `prominence_data`. + Otherwise, they are internally calculated (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import chirp, find_peaks, peak_widths + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlaid harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate their widths at the relative height of 0.5 + (contour line at half the prominence height) and 1 (at the lowest contour + line at full prominence height). + + >>> peaks, _ = find_peaks(x) + >>> results_half = peak_widths(x, peaks, rel_height=0.5) + >>> results_half[0] # widths + array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081, + 35.46729324, 41.30429622, 181.93835853, 45.37078546]) + >>> results_full = peak_widths(x, peaks, rel_height=1) + >>> results_full[0] # widths + array([181.9396084 , 72.99284945, 61.28657872, 373.84622694, + 61.78404617, 72.48822812, 253.09161876, 79.36860878]) + + Plot signal, peaks and contour lines at which the widths where calculated + + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.hlines(*results_half[1:], color="C2") + >>> plt.hlines(*results_full[1:], color="C3") + >>> plt.show() + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + if prominence_data is None: + # Calculate prominence if not supplied and use wlen if supplied. + wlen = _arg_wlen_as_expected(wlen) + prominence_data = _peak_prominences(x, peaks, wlen) + return _peak_widths(x, peaks, rel_height, *prominence_data) + + +def _unpack_condition_args(interval, x, peaks): + """ + Parse condition arguments for `find_peaks`. + + Parameters + ---------- + interval : number or ndarray or sequence + Either a number or ndarray or a 2-element sequence of the former. The + first value is always interpreted as `imin` and the second, if supplied, + as `imax`. + x : ndarray + The signal with `peaks`. + peaks : ndarray + An array with indices used to reduce `imin` and / or `imax` if those are + arrays. + + Returns + ------- + imin, imax : number or ndarray or None + Minimal and maximal value in `argument`. + + Raises + ------ + ValueError : + If interval border is given as array and its size does not match the size + of `x`. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + try: + imin, imax = interval + except (TypeError, ValueError): + imin, imax = (interval, None) + + # Reduce arrays if arrays + if isinstance(imin, np.ndarray): + if imin.size != x.size: + raise ValueError('array size of lower interval border must match x') + imin = imin[peaks] + if isinstance(imax, np.ndarray): + if imax.size != x.size: + raise ValueError('array size of upper interval border must match x') + imax = imax[peaks] + + return imin, imax + + +def _select_by_property(peak_properties, pmin, pmax): + """ + Evaluate where the generic property of peaks confirms to an interval. + + Parameters + ---------- + peak_properties : ndarray + An array with properties for each peak. + pmin : None or number or ndarray + Lower interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + pmax : None or number or ndarray + Upper interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peak_properties` confirms to the + interval. + + See Also + -------- + find_peaks + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + keep = np.ones(peak_properties.size, dtype=bool) + if pmin is not None: + keep &= (pmin <= peak_properties) + if pmax is not None: + keep &= (peak_properties <= pmax) + return keep + + +def _select_by_peak_threshold(x, peaks, tmin, tmax): + """ + Evaluate which peaks fulfill the threshold condition. + + Parameters + ---------- + x : ndarray + A 1-D array which is indexable by `peaks`. + peaks : ndarray + Indices of peaks in `x`. + tmin, tmax : scalar or ndarray or None + Minimal and / or maximal required thresholds. If supplied as ndarrays + their size must match `peaks`. ``None`` is interpreted as an open + border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peaks` fulfill the threshold + condition. + left_thresholds, right_thresholds : ndarray + Array matching `peak` containing the thresholds of each peak on + both sides. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + # Stack thresholds on both sides to make min / max operations easier: + # tmin is compared with the smaller, and tmax with the greater threshold to + # each peak's side + stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], + x[peaks] - x[peaks + 1]]) + keep = np.ones(peaks.size, dtype=bool) + if tmin is not None: + min_thresholds = np.min(stacked_thresholds, axis=0) + keep &= (tmin <= min_thresholds) + if tmax is not None: + max_thresholds = np.max(stacked_thresholds, axis=0) + keep &= (max_thresholds <= tmax) + + return keep, stacked_thresholds[0], stacked_thresholds[1] + + +def find_peaks(x, height=None, threshold=None, distance=None, + prominence=None, width=None, wlen=None, rel_height=0.5, + plateau_size=None): + """ + Find peaks inside a signal based on peak properties. + + This function takes a 1-D array and finds all local maxima by + simple comparison of neighboring values. Optionally, a subset of these + peaks can be selected by specifying conditions for a peak's properties. + + Parameters + ---------- + x : sequence + A signal with peaks. + height : number or ndarray or sequence, optional + Required height of peaks. Either a number, ``None``, an array matching + `x` or a 2-element sequence of the former. The first element is + always interpreted as the minimal and the second, if supplied, as the + maximal required height. + threshold : number or ndarray or sequence, optional + Required threshold of peaks, the vertical distance to its neighboring + samples. Either a number, ``None``, an array matching `x` or a + 2-element sequence of the former. The first element is always + interpreted as the minimal and the second, if supplied, as the maximal + required threshold. + distance : number, optional + Required minimal horizontal distance (>= 1) in samples between + neighbouring peaks. Smaller peaks are removed first until the condition + is fulfilled for all remaining peaks. + prominence : number or ndarray or sequence, optional + Required prominence of peaks. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required prominence. + width : number or ndarray or sequence, optional + Required width of peaks in samples. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required width. + wlen : int, optional + Used for calculation of the peaks prominences, thus it is only used if + one of the arguments `prominence` or `width` is given. See argument + `wlen` in `peak_prominences` for a full description of its effects. + rel_height : float, optional + Used for calculation of the peaks width, thus it is only used if `width` + is given. See argument `rel_height` in `peak_widths` for a full + description of its effects. + plateau_size : number or ndarray or sequence, optional + Required size of the flat top of peaks in samples. Either a number, + ``None``, an array matching `x` or a 2-element sequence of the former. + The first element is always interpreted as the minimal and the second, + if supplied as the maximal required plateau size. + + .. versionadded:: 1.2.0 + + Returns + ------- + peaks : ndarray + Indices of peaks in `x` that satisfy all given conditions. + properties : dict + A dictionary containing properties of the returned peaks which were + calculated as intermediate results during evaluation of the specified + conditions: + + * 'peak_heights' + If `height` is given, the height of each peak in `x`. + * 'left_thresholds', 'right_thresholds' + If `threshold` is given, these keys contain a peaks vertical + distance to its neighbouring samples. + * 'prominences', 'right_bases', 'left_bases' + If `prominence` is given, these keys are accessible. See + `peak_prominences` for a description of their content. + * 'widths', 'width_heights', 'left_ips', 'right_ips' + If `width` is given, these keys are accessible. See `peak_widths` + for a description of their content. + * 'plateau_sizes', left_edges', 'right_edges' + If `plateau_size` is given, these keys are accessible and contain + the indices of a peak's edges (edges are still part of the + plateau) and the calculated plateau sizes. + + .. versionadded:: 1.2.0 + + To calculate and return properties without excluding peaks, provide the + open interval ``(None, None)`` as a value to the appropriate argument + (excluding `distance`). + + Warns + ----- + PeakPropertyWarning + Raised if a peak's properties have unexpected values (see + `peak_prominences` and `peak_widths`). + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks_cwt + Find peaks using the wavelet transformation. + peak_prominences + Directly calculate the prominence of peaks. + peak_widths + Directly calculate the width of peaks. + + Notes + ----- + In the context of this function, a peak or local maximum is defined as any + sample whose two direct neighbours have a smaller amplitude. For flat peaks + (more than one sample of equal amplitude wide) the index of the middle + sample is returned (rounded down in case the number of samples is even). + For noisy signals the peak locations can be off because the noise might + change the position of local maxima. In those cases consider smoothing the + signal before searching for peaks or use other peak finding and fitting + methods (like `find_peaks_cwt`). + + Some additional comments on specifying conditions: + + * Almost all conditions (excluding `distance`) can be given as half-open or + closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open + interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval + :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified + as well, which returns the matching properties without exclusion of peaks. + * The border is always included in the interval used to select valid peaks. + * For several conditions the interval borders can be specified with + arrays matching `x` in shape which enables dynamic constrains based on + the sample position. + * The conditions are evaluated in the following order: `plateau_size`, + `height`, `threshold`, `distance`, `prominence`, `width`. In most cases + this order is the fastest one because faster operations are applied first + to reduce the number of peaks that need to be evaluated later. + * While indices in `peaks` are guaranteed to be at least `distance` samples + apart, edges of flat peaks may be closer than the allowed `distance`. + * Use `wlen` to reduce the time it takes to evaluate the conditions for + `prominence` or `width` if `x` is large or has many local maxima + (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + To demonstrate this function's usage we use a signal `x` supplied with + SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local + maxima) in `x` whose amplitude lies above 0. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.datasets import electrocardiogram + >>> from scipy.signal import find_peaks + >>> x = electrocardiogram()[2000:4000] + >>> peaks, _ = find_peaks(x, height=0) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.plot(np.zeros_like(x), "--", color="gray") + >>> plt.show() + + We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching + `x` in size to reflect a changing condition for different parts of the + signal. + + >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size)) + >>> peaks, _ = find_peaks(x, height=(-border, border)) + >>> plt.plot(x) + >>> plt.plot(-border, "--", color="gray") + >>> plt.plot(border, ":", color="gray") + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Another useful condition for periodic signals can be given with the + `distance` argument. In this case, we can easily select the positions of + QRS complexes within the electrocardiogram (ECG) by demanding a distance of + at least 150 samples. + + >>> peaks, _ = find_peaks(x, distance=150) + >>> np.diff(peaks) + array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Especially for noisy signals peaks can be easily grouped by their + prominence (see `peak_prominences`). E.g., we can select all peaks except + for the mentioned QRS complexes by limiting the allowed prominence to 0.6. + + >>> peaks, properties = find_peaks(x, prominence=(None, 0.6)) + >>> properties["prominences"].max() + 0.5049999999999999 + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + And, finally, let's examine a different section of the ECG which contains + beat forms of different shape. To select only the atypical heart beats, we + combine two conditions: a minimal prominence of 1 and width of at least 20 + samples. + + >>> x = electrocardiogram()[17000:18000] + >>> peaks, properties = find_peaks(x, prominence=1, width=20) + >>> properties["prominences"], properties["widths"] + (array([1.495, 2.3 ]), array([36.93773946, 39.32723577])) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"], + ... ymax = x[peaks], color = "C1") + >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], + ... xmax=properties["right_ips"], color = "C1") + >>> plt.show() + """ + # _argmaxima1d expects array of dtype 'float64' + x = _arg_x_as_expected(x) + if distance is not None and distance < 1: + raise ValueError('`distance` must be greater or equal to 1') + + peaks, left_edges, right_edges = _local_maxima_1d(x) + properties = {} + + if plateau_size is not None: + # Evaluate plateau size + plateau_sizes = right_edges - left_edges + 1 + pmin, pmax = _unpack_condition_args(plateau_size, x, peaks) + keep = _select_by_property(plateau_sizes, pmin, pmax) + peaks = peaks[keep] + properties["plateau_sizes"] = plateau_sizes + properties["left_edges"] = left_edges + properties["right_edges"] = right_edges + properties = {key: array[keep] for key, array in properties.items()} + + if height is not None: + # Evaluate height condition + peak_heights = x[peaks] + hmin, hmax = _unpack_condition_args(height, x, peaks) + keep = _select_by_property(peak_heights, hmin, hmax) + peaks = peaks[keep] + properties["peak_heights"] = peak_heights + properties = {key: array[keep] for key, array in properties.items()} + + if threshold is not None: + # Evaluate threshold condition + tmin, tmax = _unpack_condition_args(threshold, x, peaks) + keep, left_thresholds, right_thresholds = _select_by_peak_threshold( + x, peaks, tmin, tmax) + peaks = peaks[keep] + properties["left_thresholds"] = left_thresholds + properties["right_thresholds"] = right_thresholds + properties = {key: array[keep] for key, array in properties.items()} + + if distance is not None: + # Evaluate distance condition + keep = _select_by_peak_distance(peaks, x[peaks], distance) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if prominence is not None or width is not None: + # Calculate prominence (required for both conditions) + wlen = _arg_wlen_as_expected(wlen) + properties.update(zip( + ['prominences', 'left_bases', 'right_bases'], + _peak_prominences(x, peaks, wlen=wlen) + )) + + if prominence is not None: + # Evaluate prominence condition + pmin, pmax = _unpack_condition_args(prominence, x, peaks) + keep = _select_by_property(properties['prominences'], pmin, pmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if width is not None: + # Calculate widths + properties.update(zip( + ['widths', 'width_heights', 'left_ips', 'right_ips'], + _peak_widths(x, peaks, rel_height, properties['prominences'], + properties['left_bases'], properties['right_bases']) + )) + # Evaluate width condition + wmin, wmax = _unpack_condition_args(width, x, peaks) + keep = _select_by_property(properties['widths'], wmin, wmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + return peaks, properties + + +def _identify_ridge_lines(matr, max_distances, gap_thresh): + """ + Identify ridges in the 2-D matrix. + + Expect that the width of the wavelet feature increases with increasing row + number. + + Parameters + ---------- + matr : 2-D ndarray + Matrix in which to identify ridge lines. + max_distances : 1-D sequence + At each row, a ridge line is only connected + if the relative max at row[n] is within + `max_distances`[n] from the relative max at row[n+1]. + gap_thresh : int + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if + there are more than `gap_thresh` points without connecting + a new relative maximum. + + Returns + ------- + ridge_lines : tuple + Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the + ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none + found. Each ridge-line will be sorted by row (increasing), but the + order of the ridge lines is not specified. + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._peak_finding import _identify_ridge_lines + >>> rng = np.random.default_rng() + >>> data = rng.random((5,5)) + >>> max_dist = 3 + >>> max_distances = np.full(20, max_dist) + >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1) + + Notes + ----- + This function is intended to be used in conjunction with `cwt` + as part of `find_peaks_cwt`. + + """ + if len(max_distances) < matr.shape[0]: + raise ValueError('Max_distances must have at least as many rows ' + 'as matr') + + all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1) + # Highest row for which there are any relative maxima + has_relmax = np.nonzero(all_max_cols.any(axis=1))[0] + if len(has_relmax) == 0: + return [] + start_row = has_relmax[-1] + # Each ridge line is a 3-tuple: + # rows, cols,Gap number + ridge_lines = [[[start_row], + [col], + 0] for col in np.nonzero(all_max_cols[start_row])[0]] + final_lines = [] + rows = np.arange(start_row - 1, -1, -1) + cols = np.arange(0, matr.shape[1]) + for row in rows: + this_max_cols = cols[all_max_cols[row]] + + # Increment gap number of each line, + # set it to zero later if appropriate + for line in ridge_lines: + line[2] += 1 + + # XXX These should always be all_max_cols[row] + # But the order might be different. Might be an efficiency gain + # to make sure the order is the same and avoid this iteration + prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines]) + # Look through every relative maximum found at current row + # Attempt to connect them with existing ridge lines. + for ind, col in enumerate(this_max_cols): + # If there is a previous ridge line within + # the max_distance to connect to, do so. + # Otherwise start a new one. + line = None + if len(prev_ridge_cols) > 0: + diffs = np.abs(col - prev_ridge_cols) + closest = np.argmin(diffs) + if diffs[closest] <= max_distances[row]: + line = ridge_lines[closest] + if line is not None: + # Found a point close enough, extend current ridge line + line[1].append(col) + line[0].append(row) + line[2] = 0 + else: + new_line = [[row], + [col], + 0] + ridge_lines.append(new_line) + + # Remove the ridge lines with gap_number too high + # XXX Modifying a list while iterating over it. + # Should be safe, since we iterate backwards, but + # still tacky. + for ind in range(len(ridge_lines) - 1, -1, -1): + line = ridge_lines[ind] + if line[2] > gap_thresh: + final_lines.append(line) + del ridge_lines[ind] + + out_lines = [] + for line in (final_lines + ridge_lines): + sortargs = np.array(np.argsort(line[0])) + rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs) + rows[sortargs] = line[0] + cols[sortargs] = line[1] + out_lines.append([rows, cols]) + + return out_lines + + +def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None, + min_snr=1, noise_perc=10): + """ + Filter ridge lines according to prescribed criteria. Intended + to be used for finding relative maxima. + + Parameters + ---------- + cwt : 2-D ndarray + Continuous wavelet transform from which the `ridge_lines` were defined. + ridge_lines : 1-D sequence + Each element should contain 2 sequences, the rows and columns + of the ridge line (respectively). + window_size : int, optional + Size of window to use to calculate noise floor. + Default is ``cwt.shape[1] / 20``. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the value of + the cwt matrix at the shortest length scale (``cwt[0, loc]``), the + noise is the `noise_perc`\\ th percentile of datapoints contained within a + window of `window_size` around ``cwt[0, loc]``. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + scipy.stats.scoreatpercentile. + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + """ + num_points = cwt.shape[1] + if min_length is None: + min_length = np.ceil(cwt.shape[0] / 4) + if window_size is None: + window_size = np.ceil(num_points / 20) + + window_size = int(window_size) + hf_window, odd = divmod(window_size, 2) + + # Filter based on SNR + row_one = cwt[0, :] + noises = np.empty_like(row_one) + for ind, val in enumerate(row_one): + window_start = max(ind - hf_window, 0) + window_end = min(ind + hf_window + odd, num_points) + noises[ind] = scoreatpercentile(row_one[window_start:window_end], + per=noise_perc) + + def filt_func(line): + if len(line[0]) < min_length: + return False + snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]]) + if snr < min_snr: + return False + return True + + return list(filter(filt_func, ridge_lines)) + + +def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, + gap_thresh=None, min_length=None, + min_snr=1, noise_perc=10, window_size=None): + """ + Find peaks in a 1-D array with wavelet transformation. + + The general approach is to smooth `vector` by convolving it with + `wavelet(width)` for each width in `widths`. Relative maxima which + appear at enough length scales, and with sufficiently high SNR, are + accepted. + + Parameters + ---------- + vector : ndarray + 1-D array in which to find the peaks. + widths : float or sequence + Single width or 1-D array-like of widths to use for calculating + the CWT matrix. In general, + this range should cover the expected width of peaks of interest. + wavelet : callable, optional + Should take two parameters and return a 1-D array to convolve + with `vector`. The first parameter determines the number of points + of the returned wavelet array, the second parameter is the scale + (`width`) of the wavelet. Should be normalized and symmetric. + Default is the ricker wavelet. + max_distances : ndarray, optional + At each row, a ridge line is only connected if the relative max at + row[n] is within ``max_distances[n]`` from the relative max at + ``row[n+1]``. Default value is ``widths/4``. + gap_thresh : float, optional + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if there are more + than `gap_thresh` points without connecting a new relative maximum. + Default is the first value of the widths array i.e. widths[0]. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient + on the largest ridge line. The noise is `noise_perc` th percentile of + datapoints contained within the same ridge line. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + `stats.scoreatpercentile`. Default is 10. + window_size : int, optional + Size of window to use to calculate noise floor. + Default is ``cwt.shape[1] / 20``. + + Returns + ------- + peaks_indices : ndarray + Indices of the locations in the `vector` where peaks were found. + The list is sorted. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + + Notes + ----- + This approach was designed for finding sharp peaks among noisy data, + however with proper parameter selection it should function well for + different peak shapes. + + The algorithm is as follows: + 1. Perform a continuous wavelet transform on `vector`, for the supplied + `widths`. This is a convolution of `vector` with `wavelet(width)` for + each width in `widths`. See `cwt`. + 2. Identify "ridge lines" in the cwt matrix. These are relative maxima + at each row, connected across adjacent rows. See identify_ridge_lines + 3. Filter the ridge_lines using filter_ridge_lines. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> xs = np.arange(0, np.pi, 0.05) + >>> data = np.sin(xs) + >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10)) + >>> peakind, xs[peakind], data[peakind] + ([32], array([ 1.6]), array([ 0.9995736])) + + """ + widths = np.atleast_1d(np.asarray(widths)) + + if gap_thresh is None: + gap_thresh = np.ceil(widths[0]) + if max_distances is None: + max_distances = widths / 4.0 + if wavelet is None: + wavelet = _ricker + + cwt_dat = _cwt(vector, wavelet, widths) + ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh) + filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, + window_size=window_size, min_snr=min_snr, + noise_perc=noise_perc) + max_locs = np.asarray([x[1][0] for x in filtered]) + max_locs.sort() + + return max_locs diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..addcbe6951f8df093461c47848f8027dfdd406f2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py @@ -0,0 +1,357 @@ +import numpy as np +from scipy.linalg import lstsq +from scipy._lib._util import float_factorial +from scipy.ndimage import convolve1d # type: ignore[attr-defined] +from ._arraytools import axis_slice + + +def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, + use="conv"): + """Compute the coefficients for a 1-D Savitzky-Golay FIR filter. + + Parameters + ---------- + window_length : int + The length of the filter window (i.e., the number of coefficients). + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. + pos : int or None, optional + If pos is not None, it specifies evaluation position within the + window. The default is the middle of the window. + use : str, optional + Either 'conv' or 'dot'. This argument chooses the order of the + coefficients. The default is 'conv', which means that the + coefficients are ordered to be used in a convolution. With + use='dot', the order is reversed, so the filter is applied by + dotting the coefficients with the data set. + + Returns + ------- + coeffs : 1-D ndarray + The filter coefficients. + + See Also + -------- + savgol_filter + + Notes + ----- + .. versionadded:: 0.14.0 + + References + ---------- + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by + Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), + pp 1627-1639. + Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and + differentiation filter for even number data. Signal Process. + 85, 7 (July 2005), 1429-1434. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_coeffs + >>> savgol_coeffs(5, 2) + array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) + >>> savgol_coeffs(5, 2, deriv=1) + array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01, + -2.00000000e-01]) + + Note that use='dot' simply reverses the coefficients. + + >>> savgol_coeffs(5, 2, pos=3) + array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) + >>> savgol_coeffs(5, 2, pos=3, use='dot') + array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) + >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot') + array([0.45, -0.85, -0.65, 1.05]) + + `x` contains data from the parabola x = t**2, sampled at + t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the + derivative at the last position. When dotted with `x` the result should + be 6. + + >>> x = np.array([1, 0, 1, 4, 9]) + >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') + >>> c.dot(x) + 6.0 + """ + + # An alternative method for finding the coefficients when deriv=0 is + # t = np.arange(window_length) + # unit = (t == pos).astype(int) + # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) + # The method implemented here is faster. + + # To recreate the table of sample coefficients shown in the chapter on + # the Savitzy-Golay filter in the Numerical Recipes book, use + # window_length = nL + nR + 1 + # pos = nL + 1 + # c = savgol_coeffs(window_length, M, pos=pos, use='dot') + + if polyorder >= window_length: + raise ValueError("polyorder must be less than window_length.") + + halflen, rem = divmod(window_length, 2) + + if pos is None: + if rem == 0: + pos = halflen - 0.5 + else: + pos = halflen + + if not (0 <= pos < window_length): + raise ValueError("pos must be nonnegative and less than " + "window_length.") + + if use not in ['conv', 'dot']: + raise ValueError("`use` must be 'conv' or 'dot'") + + if deriv > polyorder: + coeffs = np.zeros(window_length) + return coeffs + + # Form the design matrix A. The columns of A are powers of the integers + # from -pos to window_length - pos - 1. The powers (i.e., rows) range + # from 0 to polyorder. (That is, A is a vandermonde matrix, but not + # necessarily square.) + x = np.arange(-pos, window_length - pos, dtype=float) + + if use == "conv": + # Reverse so that result can be used in a convolution. + x = x[::-1] + + order = np.arange(polyorder + 1).reshape(-1, 1) + A = x ** order + + # y determines which order derivative is returned. + y = np.zeros(polyorder + 1) + # The coefficient assigned to y[deriv] scales the result to take into + # account the order of the derivative and the sample spacing. + y[deriv] = float_factorial(deriv) / (delta ** deriv) + + # Find the least-squares solution of A*c = y + coeffs, _, _, _ = lstsq(A, y) + + return coeffs + + +def _polyder(p, m): + """Differentiate polynomials represented with coefficients. + + p must be a 1-D or 2-D array. In the 2-D case, each column gives + the coefficients of a polynomial; the first row holds the coefficients + associated with the highest power. m must be a nonnegative integer. + (numpy.polyder doesn't handle the 2-D case.) + """ + + if m == 0: + result = p + else: + n = len(p) + if n <= m: + result = np.zeros_like(p[:1, ...]) + else: + dp = p[:-m].copy() + for k in range(m): + rng = np.arange(n - k - 1, m - k - 1, -1) + dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) + result = dp + return result + + +def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, + axis, polyorder, deriv, delta, y): + """ + Given an N-d array `x` and the specification of a slice of `x` from + `window_start` to `window_stop` along `axis`, create an interpolating + polynomial of each 1-D slice, and evaluate that polynomial in the slice + from `interp_start` to `interp_stop`. Put the result into the + corresponding slice of `y`. + """ + + # Get the edge into a (window_length, -1) array. + x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) + if axis == 0 or axis == -x.ndim: + xx_edge = x_edge + swapped = False + else: + xx_edge = x_edge.swapaxes(axis, 0) + swapped = True + xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) + + # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), + # where '-1' is the same as in xx_edge. + poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), + xx_edge, polyorder) + + if deriv > 0: + poly_coeffs = _polyder(poly_coeffs, deriv) + + # Compute the interpolated values for the edge. + i = np.arange(interp_start - window_start, interp_stop - window_start) + values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) + + # Now put the values into the appropriate slice of y. + # First reshape values to match y. + shp = list(y.shape) + shp[0], shp[axis] = shp[axis], shp[0] + values = values.reshape(interp_stop - interp_start, *shp[1:]) + if swapped: + values = values.swapaxes(0, axis) + # Get a view of the data to be replaced by values. + y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) + y_edge[...] = values + + +def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): + """ + Use polynomial interpolation of x at the low and high ends of the axis + to fill in the halflen values in y. + + This function just calls _fit_edge twice, once for each end of the axis. + """ + halflen = window_length // 2 + _fit_edge(x, 0, window_length, 0, halflen, axis, + polyorder, deriv, delta, y) + n = x.shape[axis] + _fit_edge(x, n - window_length, n, n - halflen, n, axis, + polyorder, deriv, delta, y) + + +def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, + axis=-1, mode='interp', cval=0.0): + """ Apply a Savitzky-Golay filter to an array. + + This is a 1-D filter. If `x` has dimension greater than 1, `axis` + determines the axis along which the filter is applied. + + Parameters + ---------- + x : array_like + The data to be filtered. If `x` is not a single or double precision + floating point array, it will be converted to type ``numpy.float64`` + before filtering. + window_length : int + The length of the filter window (i.e., the number of coefficients). + If `mode` is 'interp', `window_length` must be less than or equal + to the size of `x`. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. Default is 1.0. + axis : int, optional + The axis of the array `x` along which the filter is to be applied. + Default is -1. + mode : str, optional + Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This + determines the type of extension to use for the padded signal to + which the filter is applied. When `mode` is 'constant', the padding + value is given by `cval`. See the Notes for more details on 'mirror', + 'constant', 'wrap', and 'nearest'. + When the 'interp' mode is selected (the default), no extension + is used. Instead, a degree `polyorder` polynomial is fit to the + last `window_length` values of the edges, and this polynomial is + used to evaluate the last `window_length // 2` output values. + cval : scalar, optional + Value to fill past the edges of the input if `mode` is 'constant'. + Default is 0.0. + + Returns + ------- + y : ndarray, same shape as `x` + The filtered data. + + See Also + -------- + savgol_coeffs + + Notes + ----- + Details on the `mode` options: + + 'mirror': + Repeats the values at the edges in reverse order. The value + closest to the edge is not included. + 'nearest': + The extension contains the nearest input value. + 'constant': + The extension contains the value given by the `cval` argument. + 'wrap': + The extension contains the values from the other end of the array. + + For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and + `window_length` is 7, the following shows the extended data for + the various `mode` options (assuming `cval` is 0):: + + mode | Ext | Input | Ext + -----------+---------+------------------------+--------- + 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 + 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 + 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 + 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_filter + >>> np.set_printoptions(precision=2) # For compact display. + >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) + + Filter with a window length of 5 and a degree 2 polynomial. Use + the defaults for all other parameters. + + >>> savgol_filter(x, 5, 2) + array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) + + Note that the last five values in x are samples of a parabola, so + when mode='interp' (the default) is used with polyorder=2, the last + three values are unchanged. Compare that to, for example, + `mode='nearest'`: + + >>> savgol_filter(x, 5, 2, mode='nearest') + array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) + + """ + if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: + raise ValueError("mode must be 'mirror', 'constant', 'nearest' " + "'wrap' or 'interp'.") + + x = np.asarray(x) + # Ensure that x is either single or double precision floating point. + if x.dtype != np.float64 and x.dtype != np.float32: + x = x.astype(np.float64) + + coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) + + if mode == "interp": + if window_length > x.shape[axis]: + raise ValueError("If mode is 'interp', window_length must be less " + "than or equal to the size of x.") + + # Do not pad. Instead, for the elements within `window_length // 2` + # of the ends of the sequence, use the polynomial that is fitted to + # the last `window_length` elements. + y = convolve1d(x, coeffs, axis=axis, mode="constant") + _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) + else: + # Any mode other than 'interp' is passed on to ndimage.convolve1d. + y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) + + return y diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..6c87718dea8b777d23c77d6147b4ca6368204637 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py @@ -0,0 +1,1738 @@ +"""Implementation of an FFT-based Short-time Fourier Transform. """ + +# Implementation Notes for this file (as of 2023-07) +# -------------------------------------------------- +# * MyPy version 1.1.1 does not seem to support decorated property methods +# properly. Hence, applying ``@property`` to methods decorated with `@cache`` +# (as tried with the ``lower_border_end`` method) causes a mypy error when +# accessing it as an index (e.g., ``SFT.lower_border_end[0]``). +# * Since the method `stft` and `istft` have identical names as the legacy +# functions in the signal module, referencing them as HTML link in the +# docstrings has to be done by an explicit `~ShortTimeFFT.stft` instead of an +# ambiguous `stft` (The ``~`` hides the class / module name). +# * The HTML documentation currently renders each method/property on a separate +# page without reference to the parent class. Thus, a link to `ShortTimeFFT` +# was added to the "See Also" section of each method/property. These links +# can be removed, when SciPy updates ``pydata-sphinx-theme`` to >= 0.13.3 +# (currently 0.9). Consult Issue 18512 and PR 16660 for further details. +# + +# Provides typing union operator ``|`` in Python 3.9: +# Linter does not allow to import ``Generator`` from ``typing`` module: +from collections.abc import Generator, Callable +from functools import cache, lru_cache, partial +from typing import get_args, Literal + +import numpy as np + +import scipy.fft as fft_lib +from scipy.signal import detrend +from scipy.signal.windows import get_window + +__all__ = ['ShortTimeFFT'] + + +#: Allowed values for parameter `padding` of method `ShortTimeFFT.stft()`: +PAD_TYPE = Literal['zeros', 'edge', 'even', 'odd'] + +#: Allowed values for property `ShortTimeFFT.fft_mode`: +FFT_MODE_TYPE = Literal['twosided', 'centered', 'onesided', 'onesided2X'] + + +def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray: + """Calculate canonical dual window for 1d window `win` and a time step + of `hop` samples. + + A ``ValueError`` is raised, if the inversion fails. + + This is a separate function not a method, since it is also used in the + class method ``ShortTimeFFT.from_dual()``. + """ + if hop > len(win): + raise ValueError(f"{hop=} is larger than window length of {len(win)}" + + " => STFT not invertible!") + if issubclass(win.dtype.type, np.integer): + raise ValueError("Parameter 'win' cannot be of integer type, but " + + f"{win.dtype=} => STFT not invertible!") + # The calculation of `relative_resolution` does not work for ints. + # Furthermore, `win / DD` casts the integers away, thus an implicit + # cast is avoided, which can always cause confusion when using 32-Bit + # floats. + + w2 = win.real**2 + win.imag**2 # win*win.conj() does not ensure w2 is real + DD = w2.copy() + for k_ in range(hop, len(win), hop): + DD[k_:] += w2[:-k_] + DD[:-k_] += w2[k_:] + + # check DD > 0: + relative_resolution = np.finfo(win.dtype).resolution * max(DD) + if not np.all(DD >= relative_resolution): + raise ValueError("Short-time Fourier Transform not invertible!") + + return win / DD + + +# noinspection PyShadowingNames +class ShortTimeFFT: + r"""Provide a parametrized discrete Short-time Fourier transform (stft) + and its inverse (istft). + + .. currentmodule:: scipy.signal.ShortTimeFFT + + The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a + window (`win`) over an input signal by `hop` increments. It can be used to + quantify the change of the spectrum over time. + + The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p] + where the p-th column represents an FFT with the window centered at the + time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling + interval of the input signal. The q-th row represents the values at the + frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being + the bin width of the FFT. + + The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps + of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the + result with the so-called dual window (see `dual_win`). Shift the result by + p * `delta_t` and add the result to previous shifted results to reconstruct + the signal. If only the dual window is known and the STFT is invertible, + `from_dual` can be used to instantiate this class. + + Due to the convention of time t = 0 being at the first sample of the input + signal, the STFT values typically have negative time slots. Hence, + negative indexes like `p_min` or `k_min` do not indicate counting + backwards from an array's end like in standard Python indexing but being + left of t = 0. + + More detailed information can be found in the :ref:`tutorial_stft` section + of the :ref:`user_guide`. + + Note that all parameters of the initializer, except `scale_to` (which uses + `scaling`) have identical named attributes. + + Parameters + ---------- + win : np.ndarray + The window must be a real- or complex-valued 1d array. + hop : int + The increment in samples, by which the window is shifted in each step. + fs : float + Sampling frequency of input signal and window. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + dual_win : np.ndarray | None + The dual window of `win`. If set to ``None``, it is calculated if + needed. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each STFT + column represents either a 'magnitude' or a power spectral density + ('psd') spectrum. This parameter sets the property `scaling` to the + same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase shift + on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following example shows the magnitude of the STFT of a sine with + varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency + >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utilized Gaussian window is 50 samples or 2.5 s long. The parameter + ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled + by a factor of 4: + + >>> g_std = 8 # standard deviation for Gaussian window in samples + >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window + >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude') + >>> Sx = SFT.stft(x) # perform the STFT + + In the plot, the time extent of the signal `x` is marked by vertical dashed + lines. Note that the SFT produces values outside the time range of `x`. The + shaded areas on the left and the right indicate border effects caused + by the window slices in that area not fully being inside time range of + `x`: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " + + ... rf"$\sigma_t={g_std*SFT.T}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + ... + >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='viridis') + >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line: + ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + Reconstructing the signal with the `~ShortTimeFFT.istft` is + straightforward, but note that the length of `x1` should be specified, + since the SFT length increases in `hop` steps: + + >>> SFT.invertible # check if invertible + True + >>> x1 = SFT.istft(Sx, k1=N) + >>> np.allclose(x, x1) + True + + It is possible to calculate the SFT of signal parts: + + >>> N2 = SFT.nearest_k_p(N // 2) + >>> Sx0 = SFT.stft(x[:N2]) + >>> Sx1 = SFT.stft(x[N2:]) + + When assembling sequential STFT parts together, the overlap needs to be + considered: + + >>> p0_ub = SFT.upper_border_begin(N2)[1] - SFT.p_min + >>> p1_le = SFT.lower_border_end[1] - SFT.p_min + >>> Sx01 = np.hstack((Sx0[:, :p0_ub], + ... Sx0[:, p0_ub:] + Sx1[:, :p1_le], + ... Sx1[:, p1_le:])) + >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal + True + + It is also possible to calculate the `itsft` for signal parts: + + >>> y_p = SFT.istft(Sx, N//3, N//2) + >>> np.allclose(y_p, x[N//3:N//2]) + True + + """ + # immutable attributes (only have getters but no setters): + _win: np.ndarray # window + _dual_win: np.ndarray | None = None # canonical dual window + _hop: int # Step of STFT in number of samples + + # mutable attributes: + _fs: float # sampling frequency of input signal and window + _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use + _mfft: int # length of FFT used - defaults to len(win) + _scaling: Literal['magnitude', 'psd'] | None = None # Scaling of _win + _phase_shift: int | None # amount to shift phase of FFT in samples + + # attributes for caching calculated values: + _fac_mag: float | None = None + _fac_psd: float | None = None + _lower_border_end: tuple[int, int] | None = None + + def __init__(self, win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + dual_win: np.ndarray | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + if not (win.ndim == 1 and win.size > 0): + raise ValueError(f"Parameter win must be 1d, but {win.shape=}!") + if not all(np.isfinite(win)): + raise ValueError("Parameter win must have finite entries!") + if not (hop >= 1 and isinstance(hop, int)): + raise ValueError(f"Parameter {hop=} is not an integer >= 1!") + self._win, self._hop, self.fs = win, hop, fs + + self.mfft = len(win) if mfft is None else mfft + + if dual_win is not None: + if dual_win.shape != win.shape: + raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!") + if not all(np.isfinite(dual_win)): + raise ValueError("Parameter dual_win must be a finite array!") + self._dual_win = dual_win # needs to be set before scaling + + if scale_to is not None: # needs to be set before fft_mode + self.scale_to(scale_to) + + self.fft_mode, self.phase_shift = fft_mode, phase_shift + + @classmethod + def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + r"""Instantiate a `ShortTimeFFT` by only providing a dual window. + + If an STFT is invertible, it is possible to calculate the window `win` + from a given dual window `dual_win`. All other parameters have the + same meaning as in the initializer of `ShortTimeFFT`. + + As explained in the :ref:`tutorial_stft` section of the + :ref:`user_guide`, an invertible STFT can be interpreted as series + expansion of time-shifted and frequency modulated dual windows. E.g., + the series coefficient S[q,p] belongs to the term, which shifted + `dual_win` by p * `delta_t` and multiplied it by + exp( 2 * j * pi * t * q * `delta_f`). + + + Examples + -------- + The following example discusses decomposing a signal into time- and + frequency-shifted Gaussians. A Gaussian with standard deviation of + one made up of 51 samples will be used: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T, N = 0.1, 51 + >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window + >>> t = T * (np.arange(N) - N//2) + ... + >>> fg1, ax1 = plt.subplots() + >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1]), ylim=(0, 1.1*max(d_win))) + >>> ax1.plot(t, d_win, 'C0-') + + The following plot with the overlap of 41, 11 and 2 samples show how + the `hop` interval affects the shape of the window `win`: + + >>> fig2, axx = plt.subplots(3, 1, sharex='all') + ... + >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$") + >>> for c_, h_ in enumerate([10, 40, 49]): + ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T) + ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None) + ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None) + ... axx[c_].plot(t, SFT.win, f'C{c_+1}', + ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t) + ... axx[c_].set_ylim(0, 1.1*max(SFT.win)) + ... axx[c_].legend(loc='center') + >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1])) + >>> plt.show() + + Beside the window `win` centered at t = 0 the previous (t = -`delta_t`) + and following window (t = `delta_t`) are depicted. It can be seen that + for small `hop` intervals, the window is compact and smooth, having a + good time-frequency concentration in the STFT. For the large `hop` + interval of 4.9 s, the window has small values around t = 0, which are + not covered by the overlap of the adjacent windows, which could lead to + numeric inaccuracies. Furthermore, the peaky shape at the beginning and + the end of the window points to a higher bandwidth, resulting in a + poorer time-frequency resolution of the STFT. + Hence, the choice of the `hop` interval will be a compromise between + a time-frequency resolution and memory requirements demanded by small + `hop` sizes. + + See Also + -------- + from_window: Create instance by wrapping `get_window`. + ShortTimeFFT: Create instance using standard initializer. + """ + win = _calc_dual_canonical_window(dual_win, hop) + return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, + dual_win=dual_win, scale_to=scale_to, + phase_shift=phase_shift) + + @classmethod + def from_window(cls, win_param: str | tuple | float, + fs: float, nperseg: int, noverlap: int, *, + symmetric_win: bool = False, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + """Instantiate `ShortTimeFFT` by using `get_window`. + + The method `get_window` is used to create a window of length + `nperseg`. The parameter names `noverlap`, and `nperseg` are used here, + since they more inline with other classical STFT libraries. + + Parameters + ---------- + win_param: Union[str, tuple, float], + Parameters passed to `get_window`. For windows with no parameters, + it may be a string (e.g., ``'hann'``), for parametrized windows a + tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying + the shape parameter of a kaiser window (i.e. ``4.`` and + ``('kaiser', 4.)`` are equal. See `get_window` for more details. + fs : float + Sampling frequency of input signal. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + nperseg: int + Window length in samples, which corresponds to the `m_num`. + noverlap: int + Window overlap in samples. It relates to the `hop` increment by + ``hop = npsereg - noverlap``. + symmetric_win: bool + If ``True`` then a symmetric window is generated, else a periodic + window is generated (default). Though symmetric windows seem for + most applications to be more sensible, the default of a periodic + windows was chosen to correspond to the default of `get_window`. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each + STFT column represents either a 'magnitude' or a power spectral + density ('psd') spectrum. This parameter sets the property + `scaling` to the same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase + shift on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following instances ``SFT0`` and ``SFT1`` are equivalent: + + >>> from scipy.signal import ShortTimeFFT, get_window + >>> nperseg = 9 # window length + >>> w = get_window(('gaussian', 2.), nperseg) + >>> fs = 128 # sampling frequency + >>> hop = 3 # increment of STFT time slice + >>> SFT0 = ShortTimeFFT(w, hop, fs=fs) + >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg, + ... noverlap=nperseg-hop) + + See Also + -------- + scipy.signal.get_window: Return a window of a given length and type. + from_dual: Create instance using dual window. + ShortTimeFFT: Create instance using standard initializer. + """ + win = get_window(win_param, nperseg, fftbins=not symmetric_win) + return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode, + mfft=mfft, scale_to=scale_to, phase_shift=phase_shift) + + @property + def win(self) -> np.ndarray: + """Window function as real- or complex-valued 1d array. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._win + + @property + def hop(self) -> int: + """Time increment in signal samples for sliding window. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + T: Sampling interval of input signal and of the window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._hop + + @property + def T(self) -> float: + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / self._fs + + @T.setter + def T(self, v: float): + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling interval T={v} must be positive!") + self._fs = 1 / v + + @property + def fs(self) -> float: + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + ShortTimeFFT: Class this property belongs to. + """ + return self._fs + + @fs.setter + def fs(self, v: float): + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling frequency fs={v} must be positive!") + self._fs = v + + @property + def fft_mode(self) -> FFT_MODE_TYPE: + """Mode of utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X'). + + It can have the following values: + + 'twosided': + Two-sided FFT, where values for the negative frequencies are in + upper half of the array. Corresponds to :func:`~scipy.fft.fft()`. + 'centered': + Two-sided FFT with the values being ordered along monotonically + increasing frequencies. Corresponds to applying + :func:`~scipy.fft.fftshift()` to :func:`~scipy.fft.fft()`. + 'onesided': + Calculates only values for non-negative frequency values. + Corresponds to :func:`~scipy.fft.rfft()`. + 'onesided2X': + Like `onesided`, but the non-zero frequencies are doubled if + `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if + set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to + `onesided2X` is not allowed. + If the FFT length `mfft` is even, the last FFT value is not paired, + and thus it is not scaled. + + Note that `onesided` and `onesided2X` do not work for complex-valued signals or + complex-valued windows. Furthermore, the frequency values can be obtained by + reading the `f` property, and the number of samples by accessing the `f_pts` + property. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + f_pts: Width of the frequency bins of the STFT. + onesided_fft: True if a one-sided FFT is used. + scaling: Normalization applied to the window function + ShortTimeFFT: Class this property belongs to. + """ + return self._fft_mode + + @fft_mode.setter + def fft_mode(self, t: FFT_MODE_TYPE): + """Set mode of FFT. + + Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. + See the property `fft_mode` for more details. + """ + if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)): + raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!") + + if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win): + raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " + + "are not allowed for complex-valued windows!") + + if t == 'onesided2X' and self.scaling is None: + raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!" + "Do scale_to('psd') or scale_to('magnitude')!") + self._fft_mode = t + + @property + def mfft(self) -> int: + """Length of input for the FFT used - may be larger than window + length `m_num`. + + If not set, `mfft` defaults to the window length `m_num`. + + See Also + -------- + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + m_num: Number of samples in window `win`. + ShortTimeFFT: Class this property belongs to. + """ + return self._mfft + + @mfft.setter + def mfft(self, n_: int): + """Setter for the length of FFT utilized. + + See the property `mfft` for further details. + """ + if not (n_ >= self.m_num): + raise ValueError(f"Attribute mfft={n_} needs to be at least the " + + f"window length m_num={self.m_num}!") + self._mfft = n_ + + @property + def scaling(self) -> Literal['magnitude', 'psd'] | None: + """Normalization applied to the window function + ('magnitude', 'psd' or ``None``). + + If not ``None``, the FFTs can be either interpreted as a magnitude or + a power spectral density spectrum. + + The window function can be scaled by calling the `scale_to` method, + or it is set by the initializer parameter ``scale_to``. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + ShortTimeFFT: Class this property belongs to. + """ + return self._scaling + + def scale_to(self, scaling: Literal['magnitude', 'psd']): + """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. + + The window of a 'magnitude' spectrum has an integral of one, i.e., unit + area for non-negative windows. This ensures that absolute the values of + spectrum does not change if the length of the window changes (given + the input signal is stationary). + + To represent the power spectral density ('psd') for varying length + windows the area of the absolute square of the window needs to be + unity. + + The `scaling` property shows the current scaling. The properties + `fac_magnitude` and `fac_psd` show the scaling factors required to + scale the STFT values to a magnitude or a psd spectrum. + + This method is called, if the initializer parameter `scale_to` is set. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this method belongs to. + """ + if scaling not in (scaling_values := {'magnitude', 'psd'}): + raise ValueError(f"{scaling=} not in {scaling_values}!") + if self._scaling == scaling: # do nothing + return + + s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude + self._win = self._win * s_fac + if self._dual_win is not None: + self._dual_win = self._dual_win / s_fac + self._fac_mag, self._fac_psd = None, None # reset scaling factors + self._scaling = scaling + + @property + def phase_shift(self) -> int | None: + """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT + slice of frequency `f`. + + Shifting (more precisely `rolling`) an `mfft`-point FFT input by + `phase_shift` samples results in a multiplication of the output by + ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`. + + The default value 0 ensures that there is no phase shift on the + zeroth slice (in which t=0 is centered). + No phase shift (``phase_shift is None``) is equivalent to + ``phase_shift = -mfft//2``. In this case slices are not shifted + before calculating the FFT. + + The absolute value of `phase_shift` is limited to be less than `mfft`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of input for the FFT used + ShortTimeFFT: Class this property belongs to. + """ + return self._phase_shift + + @phase_shift.setter + def phase_shift(self, v: int | None): + """The absolute value of the phase shift needs to be less than mfft + samples. + + See the `phase_shift` getter method for more details. + """ + if v is None: + self._phase_shift = v + return + if not isinstance(v, int): + raise ValueError(f"phase_shift={v} has the unit samples. Hence " + + "it needs to be an int or it may be None!") + if not (-self.mfft < v < self.mfft): + raise ValueError("-mfft < phase_shift < mfft does not hold " + + f"for mfft={self.mfft}, phase_shift={v}!") + self._phase_shift = v + + def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, + padding: PAD_TYPE) -> Generator[np.ndarray, None, None]: + """Generate signal slices along last axis of `x`. + + This method is only used by `stft_detrend`. The parameters are + described in `~ShortTimeFFT.stft`. + """ + if padding not in (padding_types := get_args(PAD_TYPE)): + raise ValueError(f"Parameter {padding=} not in {padding_types}!") + pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad: + 'zeros': dict(mode='constant', constant_values=(0, 0)), + 'edge': dict(mode='edge'), + 'even': dict(mode='reflect', reflect_type='even'), + 'odd': dict(mode='reflect', reflect_type='odd'), + } # typing of pad_kws is needed to make mypy happy + + n, n1 = x.shape[-1], (p1 - p0) * self.hop + k0 = p0 * self.hop - self.m_num_mid + k_off # start sample + k1 = k0 + n1 + self.m_num # end sample + + i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x + # dimensions for padding x: + pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))] + + x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding]) + for k_ in range(0, n1, self.hop): + yield x1[..., k_:k_ + self.m_num] + + def stft(self, x: np.ndarray, p0: int | None = None, + p1: int | None = None, *, k_offset: int = 0, + padding: PAD_TYPE = 'zeros', axis: int = -1) \ + -> np.ndarray: + """Perform the short-time Fourier transform. + + A two-dimensional matrix with ``p1-p0`` columns is calculated. + The `f_pts` rows represent value at the frequencies `f`. The q-th + column of the windowed FFT with the window `win` is centered at t[q]. + The columns represent the values at the frequencies `f`. + + Parameters + ---------- + x + The input signal as real or complex valued array. For complex values, the + property `fft_mode` must be set to 'twosided' or 'centered'. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + padding + Kind of values which are added, when the sliding window sticks out + on either the lower or upper end of the input `x`. Zeros are added + if the default 'zeros' is set. For 'edge' either the first or the + last value of `x` is used. 'even' pads by reflecting the + signal on the first or last sample and 'odd' additionally + multiplies it with -1. + axis + The axis of `x` over which to compute the STFT. + If not given, the last axis is used. + + Returns + ------- + S + A complex array is returned with the dimension always being larger + by one than of `x`. The last axis always represent the time slices + of the STFT. `axis` defines the frequency axis (default second to + last). E.g., for a one-dimensional `x`, a complex 2d array is + returned, with axis 0 representing frequency and axis 1 the time + slices. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + delta_t: Time increment of STFT + f: Frequencies values of the STFT. + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + p_range: Determine and validate slice index range. + stft_detrend: STFT with detrended segments. + t: Times of STFT for an input signal with `n` samples. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + + def stft_detrend(self, x: np.ndarray, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None, # noqa: E501 + p0: int | None = None, p1: int | None = None, *, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + """Short-time Fourier transform with a trend being subtracted from each + segment beforehand. + + If `detr` is set to 'constant', the mean is subtracted, if set to + "linear", the linear trend is removed. This is achieved by calling + :func:`scipy.signal.detrend`. If `detr` is a function, `detr` is + applied to each segment. + All other parameters have the same meaning as in `~ShortTimeFFT.stft`. + + Note that due to the detrending, the original signal cannot be + reconstructed by the `~ShortTimeFFT.istft`. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform + (without detrending). + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if self.onesided_fft and np.iscomplexobj(x): + raise ValueError(f"Complex-valued `x` not allowed for {self.fft_mode=}'! " + "Set property `fft_mode` to 'twosided' or 'centered'.") + if isinstance(detr, str): + detr = partial(detrend, type=detr) + elif not (detr is None or callable(detr)): + raise ValueError(f"Parameter {detr=} is not a str, function or " + + "None!") + n = x.shape[axis] + if not (n >= (m2p := self.m_num-self.m_num_mid)): + e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}' + raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!") + + if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms: + x = np.moveaxis(x, axis, -1) + # determine slice index range: + p0, p1 = self.p_range(n, p0, p1) + S_shape_1d = (self.f_pts, p1 - p0) + S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d + S = np.zeros(S_shape, dtype=complex) + for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)): + if detr is not None: + x_ = detr(x_) + S[..., :, p_] = self._fft_func(x_ * self.win.conj()) + if x.ndim > 1: + return np.moveaxis(S, -2, axis if axis >= 0 else axis-1) + return S + + def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None = None, # noqa: E501 + *, + p0: int | None = None, p1: int | None = None, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + r"""Calculate spectrogram or cross-spectrogram. + + The spectrogram is the absolute square of the STFT, i.e., it is + ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always + non-negative. + For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined + as ``Sx[q,p] * np.conj(Sy[q,p])`` and is complex-valued. + This is a convenience function for calling `~ShortTimeFFT.stft` / + `stft_detrend`, hence all parameters are discussed there. If `y` is not + ``None`` it needs to have the same shape as `x`. + + Examples + -------- + The following example shows the spectrogram of a square wave with + varying frequency :math:`f_i(t)` (marked by a green dashed line in the + plot) sampled with 20 Hz: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import square, ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency + >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utilized Gaussian window is 50 samples or 2.5 s long. The + parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval + of 2 in `ShortTimeFFT` was chosen to produce a sufficient number of + points: + + >>> g_std = 12 # standard deviation for Gaussian window in samples + >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind. + >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd') + >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT + + The plot's colormap is logarithmically scaled as the power spectral + density is in dB. The time extent of the signal `x` is marked by + vertical dashed lines and the shaded areas mark the presence of border + effects: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " + + ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB + >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='magma') + >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label='Power Spectral Density ' + + ... r"$20\,\log_{10}|S_x(t, f)|$ in dB") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line + ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + The logarithmic scaling reveals the odd harmonics of the square wave, + which are reflected at the Nyquist frequency of 10 Hz. This aliasing + is also the main source of the noise artifacts in the plot. + + + See Also + -------- + :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform. + stft_detrend: STFT with a trend subtracted from each segment. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + if y is None or y is x: # do spectrogram: + return Sx.real**2 + Sx.imag**2 + # Cross-spectrogram: + Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + return Sx * Sy.conj() + + @property + def dual_win(self) -> np.ndarray: + """Canonical dual window. + + A STFT can be interpreted as the input signal being expressed as a + weighted sum of modulated and time-shifted dual windows. Note that for + a given window there exist many dual windows. The canonical window is + the one with the minimal energy (i.e., :math:`L_2` norm). + + `dual_win` has same length as `win`, namely `m_num` samples. + + If the dual window cannot be calculated a ``ValueError`` is raised. + This attribute is read only and calculated lazily. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + if self._dual_win is None: + self._dual_win = _calc_dual_canonical_window(self.win, self.hop) + return self._dual_win + + @property + def invertible(self) -> bool: + """Check if STFT is invertible. + + This is achieved by trying to calculate the canonical dual window. + + See Also + -------- + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + m_num: Number of samples in window `win` and `dual_win`. + dual_win: Canonical dual window. + win: Window for STFT. + ShortTimeFFT: Class this property belongs to. + """ + try: + return len(self.dual_win) > 0 # call self.dual_win() + except ValueError: + return False + + def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *, + f_axis: int = -2, t_axis: int = -1) \ + -> np.ndarray: + """Inverse short-time Fourier transform. + + It returns an array of dimension ``S.ndim - 1`` which is real + if `onesided_fft` is set, else complex. If the STFT is not + `invertible`, or the parameters are out of bounds a ``ValueError`` is + raised. + + Parameters + ---------- + S + A complex valued array where `f_axis` denotes the frequency + values and the `t-axis` dimension the temporal values of the + STFT values. + k0, k1 + The start and the end index of the reconstructed signal. The + default (``k0 = 0``, ``k1 = None``) assumes that the maximum length + signal should be reconstructed. + f_axis, t_axis + The axes in `S` denoting the frequency and the time dimension. + + Notes + ----- + It is required that `S` has `f_pts` entries along the `f_axis`. For + the `t_axis` it is assumed that the first entry corresponds to + `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be + compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must + hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with:: + + q_max = S.shape[t_range] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the + slicing behavior by means of an example. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if f_axis == t_axis: + raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!") + if S.shape[f_axis] != self.f_pts: + raise ValueError(f"{S.shape[f_axis]=} must be equal to " + + f"{self.f_pts=} ({S.shape=})!") + n_min = self.m_num-self.m_num_mid # minimum signal length + if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))): + raise ValueError(f"{S.shape[t_axis]=} needs to have at least " + + f"{q_num} slices ({S.shape=})!") + if t_axis != S.ndim - 1 or f_axis != S.ndim - 2: + t_axis = S.ndim + t_axis if t_axis < 0 else t_axis + f_axis = S.ndim + f_axis if f_axis < 0 else f_axis + S = np.moveaxis(S, (f_axis, t_axis), (-2, -1)) + + q_max = S.shape[-1] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + k1 = k_max if k1 is None else k1 + if not (self.k_min <= k0 < k1 <= k_max): + raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " + + f"({k_max=}) is false!") + if not (num_pts := k1 - k0) >= n_min: + raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " + + f"least the half the window length {n_min}!") + + q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0 + k0 // self.hop) + q1 = min(self.p_max(k1), q_max) + k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False) + n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid + x = np.zeros(S.shape[:-2] + (n_pts,), + dtype=float if self.onesided_fft else complex) + for q_ in range(q0, q1): + xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win + i0 = q_ * self.hop - self.m_num_mid + i1 = min(i0 + self.m_num, n_pts+k0) + j0, j1 = 0, i1 - i0 + if i0 < k0: # xs sticks out to the left on x: + j0 += k0 - i0 + i0 = k0 + x[..., i0-k0:i1-k0] += xs[..., j0:j1] + x = x[..., :k1-k0] + if x.ndim > 1: + x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis) + return x + + @property + def fac_magnitude(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a magnitude spectrum. + + It is 1 if attribute ``scaling == 'magnitude'``. + The window can be scaled to a magnitude spectrum by using the method + `scale_to`. + + See Also + -------- + fac_psd: Scaling factor for to a power spectral density spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'magnitude': + return 1 + if self._fac_mag is None: + self._fac_mag = 1 / abs(sum(self.win)) + return self._fac_mag + + @property + def fac_psd(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a power spectral density (PSD). + + It is 1 if attribute ``scaling == 'psd'``. + The window can be scaled to a psd spectrum by using the method + `scale_to`. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'psd': + return 1 + if self._fac_psd is None: + self._fac_psd = 1 / np.sqrt( + sum(self.win.real**2+self.win.imag**2) / self.T) + return self._fac_psd + + @property + def m_num(self) -> int: + """Number of samples in window `win`. + + Note that the FFT can be oversampled by zero-padding. This is achieved + by setting the `mfft` property. + + See Also + -------- + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: Time increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return len(self.win) + + @property + def m_num_mid(self) -> int: + """Center index of window `win`. + + For odd `m_num`, ``(m_num - 1) / 2`` is returned and + for even `m_num` (per definition) ``m_num / 2`` is returned. + + See Also + -------- + m_num: Number of samples in window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self.m_num // 2 + + @cache + def _pre_padding(self) -> tuple[int, int]: + """Smallest signal index and slice index due to padding. + + Since, per convention, for time t=0, n,q is zero, the returned values + are negative or zero. + """ + w2 = self.win.real**2 + self.win.imag**2 + # move window to the left until the overlap with t >= 0 vanishes: + n0 = -self.m_num_mid + for q_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)): + n_next = n_ - self.hop + if n_next + self.m_num <= 0 or all(w2[n_next:] == 0): + return n_, -q_ + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the first slice should be + # returned, i.e.: return n0, 0 + + @property + def k_min(self) -> int: + """The smallest possible signal index of the STFT. + + `k_min` is the index of the left-most non-zero value of the lowest + slice `p_min`. Since the zeroth slice is centered over the zeroth + sample of the input signal, `k_min` is never positive. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[0] + + @property + def p_min(self) -> int: + """The smallest possible slice index. + + `p_min` is the index of the left-most slice, where the window still + sticks into the signal, i.e., has non-zero part for t >= 0. + `k_min` is the smallest index where the window function of the slice + `p_min` is non-zero. + + Since, per convention the zeroth slice is centered at t=0, + `p_min` <= 0 always holds. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[1] + + @lru_cache(maxsize=256) + def _post_padding(self, n: int) -> tuple[int, int]: + """Largest signal index and slice index due to padding. + + Parameters + ---------- + n : int + Number of samples of input signal (must be ≥ half of the window length). + """ + if not (n >= (m2p := self.m_num - self.m_num_mid)): + raise ValueError(f"Parameter n must be >= ceil(m_num/2) = {m2p}!") + w2 = self.win.real**2 + self.win.imag**2 + # move window to the right until the overlap for t < t[n] vanishes: + q1 = n // self.hop # last slice index with t[p1] <= t[n] + k1 = q1 * self.hop - self.m_num_mid + for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1): + n_next = k_ + self.hop + if n_next >= n or all(w2[:n-n_next] == 0): + return k_ + self.m_num, q_ + 1 + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the last slice should be + # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1 + + def k_max(self, n: int) -> int: + """First sample index after signal end not touched by a time slice. + + `k_max` - 1 is the largest sample index of the slice `p_max` for a + given input signal of `n` samples. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + Parameters + ---------- + n : int + Number of samples of input signal (must be ≥ half of the window length). + + See Also + -------- + k_min: The smallest possible signal index. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[0] + + def p_max(self, n: int) -> int: + """Index of first non-overlapping upper time slice for `n` sample + input. + + Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically + larger than last time index t[n-1] == (`n`-1) * `T`. The upper border + of samples indexes covered by the window slices is given by `k_max`. + Furthermore, `p_max` does not denote the number of slices `p_num` since + `p_min` is typically less than zero. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_min: The smallest possible slice index. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[1] + + def p_num(self, n: int) -> int: + """Number of time slices for an input signal with `n` samples. + + It is given by `p_num` = `p_max` - `p_min` with `p_min` typically + being negative. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this method belongs to. + """ + return self.p_max(n) - self.p_min + + @property + def lower_border_end(self) -> tuple[int, int]: + """First signal index and first slice index unaffected by pre-padding. + + Describes the point where the window does not stick out to the left + of the signal domain. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + # not using @cache decorator due to MyPy limitations + if self._lower_border_end is not None: + return self._lower_border_end + + # first non-zero element in self.win: + m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0] + + # move window to the right until does not stick out to the left: + k0 = -self.m_num_mid + m0 + for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)): + if k_ + self.hop >= 0: # next entry does not stick out anymore + self._lower_border_end = (k_ + self.m_num, q_ + 1) + return self._lower_border_end + self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice + return self._lower_border_end + + @lru_cache(maxsize=256) + def upper_border_begin(self, n: int) -> tuple[int, int]: + """First signal index and first slice index affected by post-padding. + + Describes the point where the window does begin stick out to the right + of the signal domain. + A detailed example is given :ref:`tutorial_stft_sliding_win` section + of the :ref:`user_guide`. + + Parameters + ---------- + n : int + Number of samples of input signal (must be ≥ half of the window length). + + Returns + ------- + k_ub : int + Lowest signal index, where a touching time slice sticks out past the + signal end. + p_ub : int + Lowest index of time slice of which the end sticks out past the signal end. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + if not (n >= (m2p := self.m_num - self.m_num_mid)): + raise ValueError(f"Parameter n must be >= ceil(m_num/2) = {m2p}!") + w2 = self.win.real**2 + self.win.imag**2 + q2 = n // self.hop + 1 # first t[q] >= t[n] + q1 = max((n-self.m_num) // self.hop - 1, -1) + # move window left until does not stick out to the right: + for q_ in range(q2, q1, -1): + k_ = q_ * self.hop + (self.m_num - self.m_num_mid) + if k_ <= n or all(w2[n-k_:] == 0): + return (q_ + 1) * self.hop - self.m_num_mid, q_ + 1 + return 0, 0 # border starts at first slice + + @property + def delta_t(self) -> float: + """Time increment of STFT. + + The time increment `delta_t` = `T` * `hop` represents the sample + increment `hop` converted to time based on the sampling interval `T`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + hop: Hop size in signal samples for sliding window. + t: Times of STFT for an input signal with `n` samples. + T: Sampling interval of input signal and window `win`. + ShortTimeFFT: Class this property belongs to + """ + return self.T * self.hop + + def p_range(self, n: int, p0: int | None = None, + p1: int | None = None) -> tuple[int, int]: + """Determine and validate slice index range. + + Parameters + ---------- + n : int + Number of samples of input signal, assuming t[0] = 0. + p0 : int | None + First slice index. If 0 then the first slice is centered at t = 0. + If ``None`` then `p_min` is used. Note that p0 may be < 0 if + slices are left of t = 0. + p1 : int | None + End of interval (last value is p1-1). + If ``None`` then `p_max(n)` is used. + + + Returns + ------- + p0_ : int + The fist slice index + p1_ : int + End of interval (last value is p1-1). + + Notes + ----- + A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not + hold. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + p_max = self.p_max(n) # shorthand + p0_ = self.p_min if p0 is None else p0 + p1_ = p_max if p1 is None else p1 + if not (self.p_min <= p0_ < p1_ <= p_max): + raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " + + f"{self.p_min=} <= p0 < p1 <= {p_max=} " + + f"does not hold for signal length {n=}!") + return p0_, p1_ + + @lru_cache(maxsize=1) + def t(self, n: int, p0: int | None = None, p1: int | None = None, + k_offset: int = 0) -> np.ndarray: + """Times of STFT for an input signal with `n` samples. + + Returns a 1d array with times of the `~ShortTimeFFT.stft` values with + the same parametrization. Note that the slices are + ``delta_t = hop * T`` time units apart. + + Parameters + ---------- + n + Number of sample of the input signal. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + ShortTimeFFT: Class this method belongs to. + """ + p0, p1 = self.p_range(n, p0, p1) + return np.arange(p0, p1) * self.delta_t + k_offset * self.T + + def nearest_k_p(self, k: int, left: bool = True) -> int: + """Return nearest sample index k_p for which t[k_p] == t[p] holds. + + The nearest next smaller time sample p (where t[p] is the center + position of the window of the p-th slice) is p_k = k // `hop`. + If `hop` is a divisor of `k` than `k` is returned. + If `left` is set than p_k * `hop` is returned else (p_k+1) * `hop`. + + This method can be used to slice an input signal into chunks for + calculating the STFT and iSTFT incrementally. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this method belongs to. + """ + p_q, remainder = divmod(k, self.hop) + if remainder == 0: + return k + return p_q * self.hop if left else (p_q + 1) * self.hop + + @property + def delta_f(self) -> float: + """Width of the frequency bins of the STFT. + + Return the frequency interval `delta_f` = 1 / (`mfft` * `T`). + + See Also + -------- + delta_t: Time increment of STFT. + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + T: Sampling interval. + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / (self.mfft * self.T) + + @property + def f_pts(self) -> int: + """Number of points along the frequency axis. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + return self.mfft // 2 + 1 if self.onesided_fft else self.mfft + + @property + def onesided_fft(self) -> bool: + """Return True if a one-sided FFT is used. + + Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'. + + See Also + -------- + fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X') + ShortTimeFFT: Class this property belongs to. + """ + return self.fft_mode in {'onesided', 'onesided2X'} + + @property + def f(self) -> np.ndarray: + """Frequencies values of the STFT. + + A 1d array of length `f_pts` with `delta_f` spaced entries is returned. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f_pts: Number of points along the frequency axis. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + if self.fft_mode in {'onesided', 'onesided2X'}: + return fft_lib.rfftfreq(self.mfft, self.T) + elif self.fft_mode == 'twosided': + return fft_lib.fftfreq(self.mfft, self.T) + elif self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) + # This should never happen but makes the Linters happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _fft_func(self, x: np.ndarray) -> np.ndarray: + """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift` + attributes. + + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.phase_shift is not None: + if x.shape[-1] < self.mfft: # zero pad if needed + z_shape = list(x.shape) + z_shape[-1] = self.mfft - x.shape[-1] + x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype))) + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + x = np.roll(x, -p_s, axis=-1) + + if self.fft_mode == 'twosided': + return fft_lib.fft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1) + if self.fft_mode == 'onesided': + return fft_lib.rfft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'onesided2X': + X = fft_lib.rfft(x, n=self.mfft, axis=-1) + # Either squared magnitude (psd) or magnitude is doubled: + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even input length, the last entry is unpaired: + X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac + return X + # This should never happen but makes the Linter happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _ifft_func(self, X: np.ndarray) -> np.ndarray: + """Inverse to `_fft_func`. + + Returned is an array of length `m_num`. If the FFT is `onesided` + then a float array is returned else a complex array is returned. + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.fft_mode == 'twosided': + x = fft_lib.ifft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'centered': + x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided': + x = fft_lib.irfft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided2X': + Xc = X.copy() # we do not want to modify function parameters + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even length X the last value is not paired with a negative + # value on the two-sided FFT: + q1 = -1 if self.mfft % 2 == 0 else None + Xc[..., 1:q1] /= fac + x = fft_lib.irfft(Xc, n=self.mfft, axis=-1) + else: # This should never happen but makes the Linter happy: + error_str = f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!" + raise RuntimeError(error_str) + + if self.phase_shift is None: + return x[..., :self.m_num] + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + return np.roll(x, p_s, axis=-1)[..., :self.m_num] + + def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf', + center_bins: bool = False) -> tuple[float, float, float, float]: + """Return minimum and maximum values time-frequency values. + + A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and + ``(f0, f1, t0, t1)`` for 'ft' is returned describing the corners + of the time-frequency domain of the `~ShortTimeFFT.stft`. + That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter + with the same name. + + Parameters + ---------- + n : int + Number of samples in input signal. + axes_seq : {'tf', 'ft'} + Return time extent first and then frequency extent or vice-versa. + center_bins: bool + If set (default ``False``), the values of the time slots and + frequency bins are moved from the side the middle. This is useful, + when plotting the `~ShortTimeFFT.stft` values as step functions, + i.e., with no interpolation. + + See Also + -------- + :func:`matplotlib.pyplot.imshow`: Display data as an image. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + + Examples + -------- + The following two plots illustrate the effect of the parameter `center_bins`: + The grid lines represent the three time and the four frequency values of the + STFT. + The left plot, where ``(t0, t1, f0, f1) = (0, 3, 0, 4)`` is passed as parameter + ``extent`` to `~matplotlib.pyplot.imshow`, shows the standard behavior of the + time and frequency values being at the lower edge of the corrsponding bin. + The right plot, with ``(t0, t1, f0, f1) = (-0.5, 2.5, -0.5, 3.5)``, shows that + the bins are centered over the respective values when passing + ``center_bins=True``. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import ShortTimeFFT + ... + >>> n, m = 12, 6 + >>> SFT = ShortTimeFFT.from_window('hann', fs=m, nperseg=m, noverlap=0) + >>> Sxx = SFT.stft(np.cos(np.arange(n))) # produces a colorful plot + ... + >>> fig, axx = plt.subplots(1, 2, tight_layout=True, figsize=(6., 4.)) + >>> for ax_, center_bins in zip(axx, (False, True)): + ... ax_.imshow(abs(Sxx), origin='lower', interpolation=None, aspect='equal', + ... cmap='viridis', extent=SFT.extent(n, 'tf', center_bins)) + ... ax_.set_title(f"{center_bins=}") + ... ax_.set_xlabel(f"Time ({SFT.p_num(n)} points, Δt={SFT.delta_t})") + ... ax_.set_ylabel(f"Frequency ({SFT.f_pts} points, Δf={SFT.delta_f})") + ... ax_.set_xticks(SFT.t(n)) # vertical grid line are timestamps + ... ax_.set_yticks(SFT.f) # horizontal grid line are frequency values + ... ax_.grid(True) + >>> plt.show() + + Note that the step-like behavior with the constant colors is caused by passing + ``interpolation=None`` to `~matplotlib.pyplot.imshow`. + """ + if axes_seq not in ('tf', 'ft'): + raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!") + + if self.onesided_fft: + q0, q1 = 0, self.f_pts + elif self.fft_mode == 'centered': + q0 = -(self.mfft // 2) + q1 = self.mfft // 2 if self.mfft % 2 == 0 else self.mfft // 2 + 1 + else: + raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " + + "in ['centered', 'onesided', 'onesided2X']") + + p0, p1 = self.p_min, self.p_max(n) # shorthand + if center_bins: + t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5) + f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5) + else: + t0, t1 = self.delta_t * p0, self.delta_t * p1 + f0, f1 = self.delta_f * q0, self.delta_f * q1 + return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_signaltools.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..340e227760ecd0a4bbf7bf5a135b92acb123a4d3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_signaltools.py @@ -0,0 +1,4989 @@ +# Author: Travis Oliphant +# 1999 -- 2002 + +from __future__ import annotations # Provides typing union operator `|` in Python 3.9 +import operator +import math +from math import prod as _prod +import timeit +import warnings +from typing import Literal + +from numpy._typing import ArrayLike + +from scipy.spatial import cKDTree +from . import _sigtools +from ._ltisys import dlti +from ._upfirdn import upfirdn, _output_len, _upfirdn_modes +from scipy import linalg, fft as sp_fft +from scipy import ndimage +from scipy.fft._helper import _init_nd_shape_and_axes +import numpy as np +from scipy.special import lambertw +from .windows import get_window +from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext +from ._filter_design import cheby1, _validate_sos, zpk2sos +from ._fir_filter_design import firwin +from ._sosfilt import _sosfilt + + +__all__ = ['correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', 'envelope', + 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength'] + + +_modedict = {'valid': 0, 'same': 1, 'full': 2} + +_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, + 'symmetric': 1, 'reflect': 4} + + +def _valfrommode(mode): + try: + return _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + +def _bvalfromboundary(boundary): + try: + return _boundarydict[boundary] << 2 + except KeyError as e: + raise ValueError("Acceptable boundary flags are 'fill', 'circular' " + "(or 'wrap'), and 'symmetric' (or 'symm').") from e + + +def _inputs_swap_needed(mode, shape1, shape2, axes=None): + """Determine if inputs arrays need to be swapped in `"valid"` mode. + + If in `"valid"` mode, returns whether or not the input arrays need to be + swapped depending on whether `shape1` is at least as large as `shape2` in + every calculated dimension. + + This is important for some of the correlation and convolution + implementations in this module, where the larger array input needs to come + before the smaller array input when operating in this mode. + + Note that if the mode provided is not 'valid', False is immediately + returned. + + """ + if mode != 'valid': + return False + + if not shape1: + return False + + if axes is None: + axes = range(len(shape1)) + + ok1 = all(shape1[i] >= shape2[i] for i in axes) + ok2 = all(shape2[i] >= shape1[i] for i in axes) + + if not (ok1 or ok2): + raise ValueError("For 'valid' mode, one must be at least " + "as large as the other in every dimension") + + return not ok1 + + +def _reject_objects(arr, name): + """Warn if arr.dtype is object or longdouble. + """ + dt = np.asarray(arr).dtype + if not (np.issubdtype(dt, np.integer) + or dt in [np.bool_, np.float16, np.float32, np.float64, + np.complex64, np.complex128] + ): + msg = ( + f"dtype={dt} is not supported by {name} and will raise an error in " + f"SciPy 1.17.0. Supported dtypes are: boolean, integer, `np.float16`," + f"`np.float32`, `np.float64`, `np.complex64`, `np.complex128`." + ) + warnings.warn(msg, category=DeprecationWarning, stacklevel=3) + + +def correlate(in1, in2, mode='full', method='auto'): + r""" + Cross-correlate two N-dimensional arrays. + + Cross-correlate `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the correlation. + + ``direct`` + The correlation is determined directly from sums, the definition of + correlation. + ``fft`` + The Fast Fourier Transform is used to perform the correlation more + quickly (only available for numerical arrays.) + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See `convolve` Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + correlate : array + An N-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + See Also + -------- + choose_conv_method : contains more documentation on `method`. + correlation_lags : calculates the lag / displacement indices array for 1D + cross-correlation. + + Notes + ----- + The correlation z of two d-dimensional arrays x and y is defined as:: + + z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) + + This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` + then + + .. math:: + + z[k] = (x * y)(k - N + 1) + = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} + + for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` + + where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, + and :math:`y_m` is 0 when m is outside the range of y. + + ``method='fft'`` only works for numerical arrays as it relies on + `fftconvolve`. In certain cases (i.e., arrays of objects or when + rounding integers can lose precision), ``method='direct'`` is always used. + + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Implement a matched filter using cross-correlation, to recover a signal + that has passed through a noisy channel. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 + + >>> clock = np.arange(64, len(sig), 128) + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.plot(clock, sig[clock], 'ro') + >>> ax_orig.set_title('Original signal') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_corr.plot(corr) + >>> ax_corr.plot(clock, corr[clock], 'ro') + >>> ax_corr.axhline(0.5, ls=':') + >>> ax_corr.set_title('Cross-correlated with rectangular pulse') + >>> ax_orig.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + Compute the cross-correlation of a noisy signal with the original signal. + + >>> x = np.arange(128) / 128 + >>> sig = np.sin(2 * np.pi * x) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, sig) + >>> lags = signal.correlation_lags(len(sig), len(sig_noise)) + >>> corr /= np.max(corr) + + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8)) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original signal') + >>> ax_orig.set_xlabel('Sample Number') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_noise.set_xlabel('Sample Number') + >>> ax_corr.plot(lags, corr) + >>> ax_corr.set_title('Cross-correlated signal') + >>> ax_corr.set_xlabel('Lag') + >>> ax_orig.margins(0, 0.1) + >>> ax_noise.margins(0, 0.1) + >>> ax_corr.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + _reject_objects(in1, 'correlate') + _reject_objects(in2, 'correlate') + + if in1.ndim == in2.ndim == 0: + return in1 * in2.conj() + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + + # Don't use _valfrommode, since correlate should not accept numeric modes + try: + val = _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + # this either calls fftconvolve or this function with method=='direct' + if method in ('fft', 'auto'): + return convolve(in1, _reverse_and_conj(in2), mode, method) + + elif method == 'direct': + # fastpath to faster numpy.correlate for 1d inputs when possible + if _np_conv_ok(in1, in2, mode): + return np.correlate(in1, in2, mode) + + # _correlateND is far slower when in2.size > in1.size, so swap them + # and then undo the effect afterward if mode == 'full'. Also, it fails + # with 'valid' mode if in2 is larger than in1, so swap those, too. + # Don't swap inputs for 'same' mode, since shape of in1 matters. + swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or + _inputs_swap_needed(mode, in1.shape, in2.shape)) + + if swapped_inputs: + in1, in2 = in2, in1 + + if mode == 'valid': + ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] + out = np.empty(ps, in1.dtype) + + z = _sigtools._correlateND(in1, in2, out, val) + + else: + ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] + + # zero pad input + in1zpadded = np.zeros(ps, in1.dtype) + sc = tuple(slice(0, i) for i in in1.shape) + in1zpadded[sc] = in1.copy() + + if mode == 'full': + out = np.empty(ps, in1.dtype) + elif mode == 'same': + out = np.empty(in1.shape, in1.dtype) + + z = _sigtools._correlateND(in1zpadded, in2, out, val) + + if swapped_inputs: + # Reverse and conjugate to undo the effect of swapping inputs + z = _reverse_and_conj(z) + + return z + + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def correlation_lags(in1_len, in2_len, mode='full'): + r""" + Calculates the lag / displacement indices array for 1D cross-correlation. + + Parameters + ---------- + in1_len : int + First input size. + in2_len : int + Second input size. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `correlate` for more information. + + Returns + ------- + lags : array + Returns an array containing cross-correlation lag/displacement indices. + Indices can be indexed with the np.argmax of the correlation to return + the lag/displacement. + + See Also + -------- + correlate : Compute the N-dimensional cross-correlation. + + Notes + ----- + Cross-correlation for continuous functions :math:`f` and :math:`g` is + defined as: + + .. math:: + + \left ( f\star g \right )\left ( \tau \right ) + \triangleq \int_{t_0}^{t_0 +T} + \overline{f\left ( t \right )}g\left ( t+\tau \right )dt + + Where :math:`\tau` is defined as the displacement, also known as the lag. + + Cross correlation for discrete functions :math:`f` and :math:`g` is + defined as: + + .. math:: + \left ( f\star g \right )\left [ n \right ] + \triangleq \sum_{-\infty}^{\infty} + \overline{f\left [ m \right ]}g\left [ m+n \right ] + + Where :math:`n` is the lag. + + Examples + -------- + Cross-correlation of a signal with its time-delayed self. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(1000) + >>> y = np.concatenate([rng.standard_normal(100), x]) + >>> correlation = signal.correlate(x, y, mode="full") + >>> lags = signal.correlation_lags(x.size, y.size, mode="full") + >>> lag = lags[np.argmax(correlation)] + """ + + # calculate lag ranges in different modes of operation + if mode == "full": + # the output is the full discrete linear convolution + # of the inputs. (Default) + lags = np.arange(-in2_len + 1, in1_len) + elif mode == "same": + # the output is the same size as `in1`, centered + # with respect to the 'full' output. + # calculate the full output + lags = np.arange(-in2_len + 1, in1_len) + # determine the midpoint in the full output + mid = lags.size // 2 + # determine lag_bound to be used with respect + # to the midpoint + lag_bound = in1_len // 2 + # calculate lag ranges for even and odd scenarios + if in1_len % 2 == 0: + lags = lags[(mid-lag_bound):(mid+lag_bound)] + else: + lags = lags[(mid-lag_bound):(mid+lag_bound)+1] + elif mode == "valid": + # the output consists only of those elements that do not + # rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + # must be at least as large as the other in every dimension. + + # the lag_bound will be either negative or positive + # this let's us infer how to present the lag range + lag_bound = in1_len - in2_len + if lag_bound >= 0: + lags = np.arange(lag_bound + 1) + else: + lags = np.arange(lag_bound, 1) + else: + raise ValueError(f"Mode {mode} is invalid") + return lags + + +def _centered(arr, newshape): + # Return the center newshape portion of the array. + newshape = np.asarray(newshape) + currshape = np.array(arr.shape) + startind = (currshape - newshape) // 2 + endind = startind + newshape + myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] + return arr[tuple(myslice)] + + +def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): + """Handle the axes argument for frequency-domain convolution. + + Returns the inputs and axes in a standard form, eliminating redundant axes, + swapping the inputs if necessary, and checking for various potential + errors. + + Parameters + ---------- + in1 : array + First input. + in2 : array + Second input. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the FFTs. + sorted_axes : bool, optional + If `True`, sort the axes. + Default is `False`, do not sort. + + Returns + ------- + in1 : array + The first input, possible swapped with the second input. + in2 : array + The second input, possible swapped with the first input. + axes : list of ints + Axes over which to compute the FFTs. + + """ + s1 = in1.shape + s2 = in2.shape + noaxes = axes is None + + _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes) + + if not noaxes and not len(axes): + raise ValueError("when provided, axes cannot be empty") + + # Axes of length 1 can rely on broadcasting rules for multiply, + # no fft needed. + axes = [a for a in axes if s1[a] != 1 and s2[a] != 1] + + if sorted_axes: + axes.sort() + + if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 + for a in range(in1.ndim) if a not in axes): + raise ValueError("incompatible shapes for in1 and in2:" + f" {s1} and {s2}") + + # Check that input sizes are compatible with 'valid' mode. + if _inputs_swap_needed(mode, s1, s2, axes=axes): + # Convolution is commutative; order doesn't have any effect on output. + in1, in2 = in2, in1 + + return in1, in2, axes + + +def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): + """Convolve two arrays in the frequency domain. + + This function implements only base the FFT-related operations. + Specifically, it converts the signals to the frequency domain, multiplies + them, then converts them back to the time domain. Calculations of axes, + shapes, convolution mode, etc. are implemented in higher level-functions, + such as `fftconvolve` and `oaconvolve`. Those functions should be used + instead of this one. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + axes : array_like of ints + Axes over which to compute the FFTs. + shape : array_like of ints + The sizes of the FFTs. + calc_fast_len : bool, optional + If `True`, set each value of `shape` to the next fast FFT length. + Default is `False`, use `axes` as-is. + + Returns + ------- + out : array + An N-dimensional array containing the discrete linear convolution of + `in1` with `in2`. + + """ + if not len(axes): + return in1 * in2 + + complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [ + sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + if not complex_result: + fft, ifft = sp_fft.rfftn, sp_fft.irfftn + else: + fft, ifft = sp_fft.fftn, sp_fft.ifftn + + sp1 = fft(in1, fshape, axes=axes) + sp2 = fft(in2, fshape, axes=axes) + + ret = ifft(sp1 * sp2, fshape, axes=axes) + + if calc_fast_len: + fslice = tuple([slice(sz) for sz in shape]) + ret = ret[fslice] + + return ret + + +def _apply_conv_mode(ret, s1, s2, mode, axes): + """Calculate the convolution result shape based on the `mode` argument. + + Returns the result sliced to the correct size for the given mode. + + Parameters + ---------- + ret : array + The result array, with the appropriate shape for the 'full' mode. + s1 : list of int + The shape of the first input. + s2 : list of int + The shape of the second input. + mode : str {'full', 'valid', 'same'} + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the convolution. + + Returns + ------- + ret : array + A copy of `res`, sliced to the correct size for the given `mode`. + + """ + if mode == "full": + return ret.copy() + elif mode == "same": + return _centered(ret, s1).copy() + elif mode == "valid": + shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 + for a in range(ret.ndim)] + return _centered(ret, shape_valid).copy() + else: + raise ValueError("acceptable mode flags are 'valid'," + " 'same', or 'full'") + + +def fftconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using FFT. + + Convolve `in1` and `in2` using the fast Fourier transform method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + but can be slower when only a few output values are needed, and can only + output float arrays (int or object array inputs will be cast to float). + + As of v0.19, `convolve` automatically chooses this method or the direct + method based on an estimation of which is faster. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Examples + -------- + Autocorrelation of white noise is an impulse. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(1000) + >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) + >>> ax_mag.set_title('Autocorrelation') + >>> fig.tight_layout() + >>> fig.show() + + Gaussian blur implemented using FFT convolution. Notice the dark borders + around the image, due to the zero-padding beyond its boundaries. + The `convolve2d` function allows for other types of image boundaries, + but is far slower. + + >>> from scipy import datasets + >>> face = datasets.face(gray=True) + >>> kernel = np.outer(signal.windows.gaussian(70, 8), + ... signal.windows.gaussian(70, 8)) + >>> blurred = signal.fftconvolve(face, kernel, mode='same') + + >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_kernel.imshow(kernel, cmap='gray') + >>> ax_kernel.set_title('Gaussian kernel') + >>> ax_kernel.set_axis_off() + >>> ax_blurred.imshow(blurred, cmap='gray') + >>> ax_blurred.set_title('Blurred') + >>> ax_blurred.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 + for i in range(in1.ndim)] + + ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _calc_oa_lens(s1, s2): + """Calculate the optimal FFT lengths for overlap-add convolution. + + The calculation is done for a single dimension. + + Parameters + ---------- + s1 : int + Size of the dimension for the first array. + s2 : int + Size of the dimension for the second array. + + Returns + ------- + block_size : int + The size of the FFT blocks. + overlap : int + The amount of overlap between two blocks. + in1_step : int + The size of each step for the first array. + in2_step : int + The size of each step for the first array. + + """ + # Set up the arguments for the conventional FFT approach. + fallback = (s1+s2-1, None, s1, s2) + + # Use conventional FFT convolve if sizes are same. + if s1 == s2 or s1 == 1 or s2 == 1: + return fallback + + if s2 > s1: + s1, s2 = s2, s1 + swapped = True + else: + swapped = False + + # There cannot be a useful block size if s2 is more than half of s1. + if s2 >= s1/2: + return fallback + + # Derivation of optimal block length + # For original formula see: + # https://en.wikipedia.org/wiki/Overlap-add_method + # + # Formula: + # K = overlap = s2-1 + # N = block_size + # C = complexity + # e = exponential, exp(1) + # + # C = (N*(log2(N)+1))/(N-K) + # C = (N*log2(2N))/(N-K) + # C = N/(N-K) * log2(2N) + # C1 = N/(N-K) + # C2 = log2(2N) = ln(2N)/ln(2) + # + # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2 + # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2)) + # + # dC/dN = dC1/dN*C2 + dC2/dN*C1 + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # + # Solve for minimum, where dC/dN = 0 + # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K + # 0 = N - K*ln(2N) - K + # 0 = N - K*(ln(2N) + 1) + # 0 = N - K*ln(2Ne) + # N = K*ln(2Ne) + # N/K = ln(2Ne) + # + # e^(N/K) = e^ln(2Ne) + # e^(N/K) = 2Ne + # 1/e^(N/K) = 1/(2*N*e) + # e^(N/-K) = 1/(2*N*e) + # e^(N/-K) = K/N*1/(2*K*e) + # N/K*e^(N/-K) = 1/(2*e*K) + # N/-K*e^(N/-K) = -1/(2*e*K) + # + # Using Lambert W function + # https://en.wikipedia.org/wiki/Lambert_W_function + # x = W(y) It is the solution to y = x*e^x + # x = N/-K + # y = -1/(2*e*K) + # + # N/-K = W(-1/(2*e*K)) + # + # N = -K*W(-1/(2*e*K)) + overlap = s2-1 + opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real + block_size = sp_fft.next_fast_len(math.ceil(opt_size)) + + # Use conventional FFT convolve if there is only going to be one block. + if block_size >= s1: + return fallback + + if not swapped: + in1_step = block_size-s2+1 + in2_step = s2 + else: + in1_step = s2 + in2_step = block_size-s2+1 + + return block_size, overlap, in1_step, in2_step + + +def oaconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using the overlap-add method. + + Convolve `in1` and `in2` using the overlap-add method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + and generally much faster than `fftconvolve` when one array is much + larger than the other, but can be slower when only a few output values are + needed or when the arrays are very similar in shape, and can only + output float arrays (int or object array inputs will be cast to float). + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + fftconvolve : An implementation of convolution using FFT. + + Notes + ----- + .. versionadded:: 1.4.0 + + References + ---------- + .. [1] Wikipedia, "Overlap-add_method". + https://en.wikipedia.org/wiki/Overlap-add_method + .. [2] Richard G. Lyons. Understanding Digital Signal Processing, + Third Edition, 2011. Chapter 13.10. + ISBN 13: 978-0137-02741-5 + + Examples + -------- + Convolve a 100,000 sample signal with a 512-sample filter. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(100000) + >>> filt = signal.firwin(512, 0.01) + >>> fsig = signal.oaconvolve(sig, filt) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(fsig) + >>> ax_mag.set_title('Filtered noise') + >>> fig.tight_layout() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + elif in1.shape == in2.shape: # Equivalent to fftconvolve + return fftconvolve(in1, in2, mode=mode, axes=axes) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=True) + + s1 = in1.shape + s2 = in2.shape + + if not axes: + ret = in1 * in2 + return _apply_conv_mode(ret, s1, s2, mode, axes) + + # Calculate this now since in1 is changed later + shape_final = [None if i not in axes else + s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + # Calculate the block sizes for the output, steps, first and second inputs. + # It is simpler to calculate them all together than doing them in separate + # loops due to all the special cases that need to be handled. + optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else + _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim)) + block_size, overlaps, \ + in1_step, in2_step = zip(*optimal_sizes) + + # Fall back to fftconvolve if there is only one block in every dimension. + if in1_step == s1 and in2_step == s2: + return fftconvolve(in1, in2, mode=mode, axes=axes) + + # Figure out the number of steps and padding. + # This would get too complicated in a list comprehension. + nsteps1 = [] + nsteps2 = [] + pad_size1 = [] + pad_size2 = [] + for i in range(in1.ndim): + if i not in axes: + pad_size1 += [(0, 0)] + pad_size2 += [(0, 0)] + continue + + if s1[i] > in1_step[i]: + curnstep1 = math.ceil((s1[i]+1)/in1_step[i]) + if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: + curnstep1 += 1 + + curpad1 = curnstep1*in1_step[i] - s1[i] + else: + curnstep1 = 1 + curpad1 = 0 + + if s2[i] > in2_step[i]: + curnstep2 = math.ceil((s2[i]+1)/in2_step[i]) + if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: + curnstep2 += 1 + + curpad2 = curnstep2*in2_step[i] - s2[i] + else: + curnstep2 = 1 + curpad2 = 0 + + nsteps1 += [curnstep1] + nsteps2 += [curnstep2] + pad_size1 += [(0, curpad1)] + pad_size2 += [(0, curpad2)] + + # Pad the array to a size that can be reshaped to the desired shape + # if necessary. + if not all(curpad == (0, 0) for curpad in pad_size1): + in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0) + + if not all(curpad == (0, 0) for curpad in pad_size2): + in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0) + + # Reshape the overlap-add parts to input block sizes. + split_axes = [iax+i for i, iax in enumerate(axes)] + fft_axes = [iax+1 for iax in split_axes] + + # We need to put each new dimension before the corresponding dimension + # being reshaped in order to get the data in the right layout at the end. + reshape_size1 = list(in1_step) + reshape_size2 = list(in2_step) + for i, iax in enumerate(split_axes): + reshape_size1.insert(iax, nsteps1[i]) + reshape_size2.insert(iax, nsteps2[i]) + + in1 = in1.reshape(*reshape_size1) + in2 = in2.reshape(*reshape_size2) + + # Do the convolution. + fft_shape = [block_size[i] for i in axes] + ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False) + + # Do the overlap-add. + for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): + overlap = overlaps[ax] + if overlap is None: + continue + + ret, overpart = np.split(ret, [-overlap], ax_fft) + overpart = np.split(overpart, [-1], ax_split)[0] + + ret_overpart = np.split(ret, [overlap], ax_fft)[0] + ret_overpart = np.split(ret_overpart, [1], ax_split)[1] + ret_overpart += overpart + + # Reshape back to the correct dimensionality. + shape_ret = [ret.shape[i] if i not in fft_axes else + ret.shape[i]*ret.shape[i-1] + for i in range(ret.ndim) if i not in split_axes] + ret = ret.reshape(*shape_ret) + + # Slice to the correct size. + slice_final = tuple([slice(islice) for islice in shape_final]) + ret = ret[slice_final] + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _numeric_arrays(arrays, kinds='buifc'): + """ + See if a list of arrays are all numeric. + + Parameters + ---------- + arrays : array or list of arrays + arrays to check if numeric. + kinds : string-like + The dtypes of the arrays to be checked. If the dtype.kind of + the ndarrays are not in this string the function returns False and + otherwise returns True. + """ + if isinstance(arrays, np.ndarray): + return arrays.dtype.kind in kinds + for array_ in arrays: + if array_.dtype.kind not in kinds: + return False + return True + + +def _conv_ops(x_shape, h_shape, mode): + """ + Find the number of operations required for direct/fft methods of + convolution. The direct operations were recorded by making a dummy class to + record the number of operations by overriding ``__mul__`` and ``__add__``. + The FFT operations rely on the (well-known) computational complexity of the + FFT (and the implementation of ``_freq_domain_conv``). + + """ + if mode == "full": + out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + elif mode == "valid": + out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)] + elif mode == "same": + out_shape = x_shape + else: + raise ValueError("Acceptable mode flags are 'valid'," + f" 'same', or 'full', not mode={mode}") + + s1, s2 = x_shape, h_shape + if len(x_shape) == 1: + s1, s2 = s1[0], s2[0] + if mode == "full": + direct_ops = s1 * s2 + elif mode == "valid": + direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2 + elif mode == "same": + direct_ops = (s1 * s2 if s1 < s2 else + s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)) + else: + if mode == "full": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "valid": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "same": + direct_ops = _prod(s1) * _prod(s2) + + full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + N = _prod(full_out_shape) + fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape + return fft_ops, direct_ops + + +def _fftconv_faster(x, h, mode): + """ + See if using fftconvolve or convolve is faster. + + Parameters + ---------- + x : np.ndarray + Signal + h : np.ndarray + Kernel + mode : str + Mode passed to convolve + + Returns + ------- + fft_faster : bool + + Notes + ----- + See docstring of `choose_conv_method` for details on tuning hardware. + + See pull request 11031 for more detail: + https://github.com/scipy/scipy/pull/11031. + + """ + fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode) + offset = -1e-3 if x.ndim == 1 else -1e-4 + constants = { + "valid": (1.89095737e-9, 2.1364985e-10, offset), + "full": (1.7649070e-9, 2.1414831e-10, offset), + "same": (3.2646654e-9, 2.8478277e-10, offset) + if h.size <= x.size + else (3.21635404e-9, 1.1773253e-8, -1e-5), + } if x.ndim == 1 else { + "valid": (1.85927e-9, 2.11242e-8, offset), + "full": (1.99817e-9, 1.66174e-8, offset), + "same": (2.04735e-9, 1.55367e-8, offset), + } + O_fft, O_direct, O_offset = constants[mode] + return O_fft * fft_ops < O_direct * direct_ops + O_offset + + +def _reverse_and_conj(x): + """ + Reverse array `x` in all dimensions and perform the complex conjugate + """ + reverse = (slice(None, None, -1),) * x.ndim + return x[reverse].conj() + + +def _np_conv_ok(volume, kernel, mode): + """ + See if numpy supports convolution of `volume` and `kernel` (i.e. both are + 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the + size of the larger input, while SciPy's uses the size of the first input. + + Invalid mode strings will return False and be caught by the calling func. + """ + if volume.ndim == kernel.ndim == 1: + if mode in ('full', 'valid'): + return True + elif mode == 'same': + return volume.size >= kernel.size + else: + return False + + +def _timeit_fast(stmt="pass", setup="pass", repeat=3): + """ + Returns the time the statement/function took, in seconds. + + Faster, less precise version of IPython's timeit. `stmt` can be a statement + written as a string or a callable. + + Will do only 1 loop (like IPython's timeit) with no repetitions + (unlike IPython) for very slow functions. For fast functions, only does + enough loops to take 5 ms, which seems to produce similar results (on + Windows at least), and avoids doing an extraneous cycle that isn't + measured. + + """ + timer = timeit.Timer(stmt, setup) + + # determine number of calls per rep so total time for 1 rep >= 5 ms + x = 0 + for p in range(0, 10): + number = 10**p + x = timer.timeit(number) # seconds + if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one + break + if x > 1: # second + # If it's macroscopic, don't bother with repetitions + best = x + else: + number *= 10 + r = timer.repeat(repeat, number) + best = min(r) + + sec = best / number + return sec + + +def choose_conv_method(in1, in2, mode='full', measure=False): + """ + Find the fastest convolution/correlation method. + + This primarily exists to be called during the ``method='auto'`` option in + `convolve` and `correlate`. It can also be used to determine the value of + ``method`` for many different convolutions of the same dtype/shape. + In addition, it supports timing the convolution to adapt the value of + ``method`` to a particular set of inputs and/or hardware. + + Parameters + ---------- + in1 : array_like + The first argument passed into the convolution function. + in2 : array_like + The second argument passed into the convolution function. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + measure : bool, optional + If True, run and time the convolution of `in1` and `in2` with both + methods and return the fastest. If False (default), predict the fastest + method using precomputed values. + + Returns + ------- + method : str + A string indicating which convolution method is fastest, either + 'direct' or 'fft' + times : dict, optional + A dictionary containing the times (in seconds) needed for each method. + This value is only returned if ``measure=True``. + + See Also + -------- + convolve + correlate + + Notes + ----- + Generally, this method is 99% accurate for 2D signals and 85% accurate + for 1D signals for randomly chosen input sizes. For precision, use + ``measure=True`` to find the fastest method by timing the convolution. + This can be used to avoid the minimal overhead of finding the fastest + ``method`` later, or to adapt the value of ``method`` to a particular set + of inputs. + + Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this + function. These experiments measured the ratio between the time required + when using ``method='auto'`` and the time required for the fastest method + (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these + experiments, we found: + + * There is a 95% chance of this ratio being less than 1.5 for 1D signals + and a 99% chance of being less than 2.5 for 2D signals. + * The ratio was always less than 2.5/5 for 1D/2D signals respectively. + * This function is most inaccurate for 1D convolutions that take between 1 + and 10 milliseconds with ``method='direct'``. A good proxy for this + (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``. + + The 2D results almost certainly generalize to 3D/4D/etc because the + implementation is the same (the 1D implementation is different). + + All the numbers above are specific to the EC2 machine. However, we did find + that this function generalizes fairly decently across hardware. The speed + tests were of similar quality (and even slightly better) than the same + tests performed on the machine to tune this function's numbers (a mid-2014 + 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor). + + There are cases when `fftconvolve` supports the inputs but this function + returns `direct` (e.g., to protect against floating point integer + precision). + + .. versionadded:: 0.19 + + Examples + -------- + Estimate the fastest method for a given input: + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> img = rng.random((32, 32)) + >>> filter = rng.random((8, 8)) + >>> method = signal.choose_conv_method(img, filter, mode='same') + >>> method + 'fft' + + This can then be applied to other arrays of the same dtype and shape: + + >>> img2 = rng.random((32, 32)) + >>> filter2 = rng.random((8, 8)) + >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method) + >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method) + + The output of this function (``method``) works with `correlate` and + `convolve`. + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + _reject_objects(volume, 'choose_conv_method') + _reject_objects(kernel, 'choose_conv_method') + + if measure: + times = {} + for method in ['fft', 'direct']: + times[method] = _timeit_fast(lambda: convolve(volume, kernel, + mode=mode, method=method)) + + chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' + return chosen_method, times + + # for integer input, + # catch when more precision required than float provides (representing an + # integer as float can lose precision in fftconvolve if larger than 2**52) + if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): + max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) + max_value *= int(min(volume.size, kernel.size)) + if max_value > 2**np.finfo('float').nmant - 1: + return 'direct' + + if _numeric_arrays([volume, kernel], kinds='b'): + return 'direct' + + if _numeric_arrays([volume, kernel]): + if _fftconv_faster(volume, kernel, mode): + return 'fft' + + return 'direct' + + +def convolve(in1, in2, mode='full', method='auto'): + """ + Convolve two N-dimensional arrays. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + convolve : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Warns + ----- + RuntimeWarning + Use of the FFT convolution on input containing NAN or INF will lead + to the entire output being NAN or INF. Use method='direct' when your + input contains NAN or INF values. + + See Also + -------- + numpy.polymul : performs polynomial multiplication (same operation, but + also accepts poly1d objects) + choose_conv_method : chooses the fastest appropriate convolution method + fftconvolve : Always uses the FFT method. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Notes + ----- + By default, `convolve` and `correlate` use ``method='auto'``, which calls + `choose_conv_method` to choose the fastest method using pre-computed + values (`choose_conv_method` can also measure real-world timing with a + keyword argument). Because `fftconvolve` relies on floating point numbers, + there are certain constraints that may force ``method='direct'`` (more detail + in `choose_conv_method` docstring). + + Examples + -------- + Smooth a square pulse using a Hann window: + + >>> import numpy as np + >>> from scipy import signal + >>> sig = np.repeat([0., 1., 0.], 100) + >>> win = signal.windows.hann(50) + >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original pulse') + >>> ax_orig.margins(0, 0.1) + >>> ax_win.plot(win) + >>> ax_win.set_title('Filter impulse response') + >>> ax_win.margins(0, 0.1) + >>> ax_filt.plot(filtered) + >>> ax_filt.set_title('Filtered signal') + >>> ax_filt.margins(0, 0.1) + >>> fig.tight_layout() + >>> fig.show() + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + _reject_objects(volume, 'correlate') + _reject_objects(kernel, 'correlate') + + if volume.ndim == kernel.ndim == 0: + return volume * kernel + elif volume.ndim != kernel.ndim: + raise ValueError("volume and kernel should have the same " + "dimensionality") + + if _inputs_swap_needed(mode, volume.shape, kernel.shape): + # Convolution is commutative; order doesn't have any effect on output + volume, kernel = kernel, volume + + if method == 'auto': + method = choose_conv_method(volume, kernel, mode=mode) + + if method == 'fft': + out = fftconvolve(volume, kernel, mode=mode) + result_type = np.result_type(volume, kernel) + if result_type.kind in {'u', 'i'}: + out = np.around(out) + + if np.isnan(out.flat[0]) or np.isinf(out.flat[0]): + warnings.warn("Use of fft convolution on input with NAN or inf" + " results in NAN or inf output. Consider using" + " method='direct' instead.", + category=RuntimeWarning, stacklevel=2) + + return out.astype(result_type) + elif method == 'direct': + # fastpath to faster numpy.convolve for 1d inputs when possible + if _np_conv_ok(volume, kernel, mode): + return np.convolve(volume, kernel, mode) + + return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def order_filter(a, domain, rank): + """ + Perform an order filter on an N-D array. + + Perform an order filter on the array in. The domain argument acts as a + mask centered over each pixel. The non-zero elements of domain are + used to select elements surrounding each input pixel which are placed + in a list. The list is sorted, and the output for that pixel is the + element corresponding to rank in the sorted list. + + Parameters + ---------- + a : ndarray + The N-dimensional input array. + domain : array_like + A mask array with the same number of dimensions as `a`. + Each dimension should have an odd number of elements. + rank : int + A non-negative integer which selects the element from the + sorted list (0 corresponds to the smallest element, 1 is the + next smallest element, etc.). + + Returns + ------- + out : ndarray + The results of the order filter in an array with the same + shape as `a`. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> domain = np.identity(3) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + >>> signal.order_filter(x, domain, 0) + array([[ 0, 0, 0, 0, 0], + [ 0, 0, 1, 2, 0], + [ 0, 5, 6, 7, 0], + [ 0, 10, 11, 12, 0], + [ 0, 0, 0, 0, 0]]) + >>> signal.order_filter(x, domain, 2) + array([[ 6, 7, 8, 9, 4], + [ 11, 12, 13, 14, 9], + [ 16, 17, 18, 19, 14], + [ 21, 22, 23, 24, 19], + [ 20, 21, 22, 23, 24]]) + + """ + domain = np.asarray(domain) + for dimsize in domain.shape: + if (dimsize % 2) != 1: + raise ValueError("Each dimension of domain argument " + "should have an odd number of elements.") + + a = np.asarray(a) + if not (np.issubdtype(a.dtype, np.integer) + or a.dtype in [np.float32, np.float64]): + raise ValueError(f"dtype={a.dtype} is not supported by order_filter") + + result = ndimage.rank_filter(a, rank, footprint=domain, mode='constant') + return result + + +def medfilt(volume, kernel_size=None): + """ + Perform a median filter on an N-dimensional array. + + Apply a median filter to the input array using a local window-size + given by `kernel_size`. The array will automatically be zero-padded. + + Parameters + ---------- + volume : array_like + An N-dimensional input array. + kernel_size : array_like, optional + A scalar or an N-length list giving the size of the median filter + window in each dimension. Elements of `kernel_size` should be odd. + If `kernel_size` is a scalar, then this scalar is used as the size in + each dimension. Default size is 3 for each dimension. + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + Warns + ----- + UserWarning + If array size is smaller than kernel size along any dimension + + See Also + -------- + scipy.ndimage.median_filter + scipy.signal.medfilt2d + + Notes + ----- + The more general function `scipy.ndimage.median_filter` has a more + efficient implementation of a median filter and therefore runs much faster. + + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes, + the specialised function `scipy.signal.medfilt2d` may be faster. + + """ + volume = np.atleast_1d(volume) + if not (np.issubdtype(volume.dtype, np.integer) + or volume.dtype in [np.float32, np.float64]): + raise ValueError(f"dtype={volume.dtype} is not supported by medfilt") + + if kernel_size is None: + kernel_size = [3] * volume.ndim + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), volume.ndim) + + for k in range(volume.ndim): + if (kernel_size[k] % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + if any(k > s for k, s in zip(kernel_size, volume.shape)): + warnings.warn('kernel_size exceeds volume extent: the volume will be ' + 'zero-padded.', + stacklevel=2) + + size = math.prod(kernel_size) + result = ndimage.rank_filter(volume, size // 2, size=kernel_size, + mode='constant') + + return result + + +def wiener(im, mysize=None, noise=None): + """ + Perform a Wiener filter on an N-dimensional array. + + Apply a Wiener filter to the N-dimensional array `im`. + + Parameters + ---------- + im : ndarray + An N-dimensional array. + mysize : int or array_like, optional + A scalar or an N-length list giving the size of the Wiener filter + window in each dimension. Elements of mysize should be odd. + If mysize is a scalar, then this scalar is used as the size + in each dimension. + noise : float, optional + The noise-power to use. If None, then noise is estimated as the + average of the local variance of the input. + + Returns + ------- + out : ndarray + Wiener filtered result with the same shape as `im`. + + Notes + ----- + This implementation is similar to wiener2 in Matlab/Octave. + For more details see [1]_ + + References + ---------- + .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing, + Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548. + + Examples + -------- + >>> from scipy.datasets import face + >>> from scipy.signal import wiener + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> img = rng.random((40, 40)) #Create a random image + >>> filtered_img = wiener(img, (5, 5)) #Filter the image + >>> f, (plot1, plot2) = plt.subplots(1, 2) + >>> plot1.imshow(img) + >>> plot2.imshow(filtered_img) + >>> plt.show() + + """ + im = np.asarray(im) + if mysize is None: + mysize = [3] * im.ndim + mysize = np.asarray(mysize) + if mysize.shape == (): + mysize = np.repeat(mysize.item(), im.ndim) + + # Estimate the local mean + size = math.prod(mysize) + lMean = correlate(im, np.ones(mysize), 'same') / size + + # Estimate the local variance + lVar = (correlate(im ** 2, np.ones(mysize), 'same') / size - lMean ** 2) + + # Estimate the noise power if needed. + if noise is None: + noise = np.mean(np.ravel(lVar), axis=0) + + res = (im - lMean) + res *= (1 - noise / lVar) + res += lMean + out = np.where(lVar < noise, lMean, res) + + return out + + +def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Convolve two 2-dimensional arrays. + + Convolve `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + out : ndarray + A 2-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Compute the gradient of an image by 2D convolution with a complex Scharr + operator. (Horizontal operator is real, vertical is imaginary.) Use + symmetric boundary condition to avoid creating edges at the image + boundaries. + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy import datasets + >>> ascent = datasets.ascent() + >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], + ... [-10+0j, 0+ 0j, +10 +0j], + ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy + >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) + >>> ax_orig.imshow(ascent, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_mag.imshow(np.absolute(grad), cmap='gray') + >>> ax_mag.set_title('Gradient magnitude') + >>> ax_mag.set_axis_off() + >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles + >>> ax_ang.set_title('Gradient orientation') + >>> ax_ang.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('convolve2d inputs must both be 2-D arrays') + + if _inputs_swap_needed(mode, in1.shape, in2.shape): + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) + return out + + +def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Cross-correlate two 2-dimensional arrays. + + Cross correlate `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + correlate2d : ndarray + A 2-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + Notes + ----- + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Use 2D cross-correlation to find the location of a template in a noisy + image: + + >>> import numpy as np + >>> from scipy import signal, datasets, ndimage + >>> rng = np.random.default_rng() + >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean() + >>> face = ndimage.zoom(face[30:500, 400:950], 0.5) # extract the face + >>> template = np.copy(face[135:165, 140:175]) # right eye + >>> template -= template.mean() + >>> face = face + rng.standard_normal(face.shape) * 50 # add noise + >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') + >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_template.imshow(template, cmap='gray') + >>> ax_template.set_title('Template') + >>> ax_template.set_axis_off() + >>> ax_corr.imshow(corr, cmap='gray') + >>> ax_corr.set_title('Cross-correlation') + >>> ax_corr.set_axis_off() + >>> ax_orig.plot(x, y, 'ro') + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('correlate2d inputs must both be 2-D arrays') + + swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) + if swapped_inputs: + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) + + if swapped_inputs: + out = out[::-1, ::-1] + + return out + + +def medfilt2d(input, kernel_size=3): + """ + Median filter a 2-dimensional array. + + Apply a median filter to the `input` array using a local window-size + given by `kernel_size` (must be odd). The array is zero-padded + automatically. + + Parameters + ---------- + input : array_like + A 2-dimensional input array. + kernel_size : array_like, optional + A scalar or a list of length 2, giving the size of the + median filter window in each dimension. Elements of + `kernel_size` should be odd. If `kernel_size` is a scalar, + then this scalar is used as the size in each dimension. + Default is a kernel of size (3, 3). + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + See Also + -------- + scipy.ndimage.median_filter + + Notes + ----- + This is faster than `medfilt` when the input dtype is ``uint8``, + ``float32``, or ``float64``; for other types, this falls back to + `medfilt`. In some situations, `scipy.ndimage.median_filter` may be + faster than this function. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + + # Replaces i,j with the median out of 5*5 window + + >>> signal.medfilt2d(x, kernel_size=5) + array([[ 0, 0, 2, 0, 0], + [ 0, 3, 7, 4, 0], + [ 2, 8, 12, 9, 4], + [ 0, 8, 12, 9, 0], + [ 0, 0, 12, 0, 0]]) + + # Replaces i,j with the median out of default 3*3 window + + >>> signal.medfilt2d(x) + array([[ 0, 1, 2, 3, 0], + [ 1, 6, 7, 8, 4], + [ 6, 11, 12, 13, 9], + [11, 16, 17, 18, 14], + [ 0, 16, 17, 18, 0]]) + + # Replaces i,j with the median out of default 5*3 window + + >>> signal.medfilt2d(x, kernel_size=[5,3]) + array([[ 0, 1, 2, 3, 0], + [ 0, 6, 7, 8, 3], + [ 5, 11, 12, 13, 8], + [ 5, 11, 12, 13, 8], + [ 0, 11, 12, 13, 0]]) + + # Replaces i,j with the median out of default 3*5 window + + >>> signal.medfilt2d(x, kernel_size=[3,5]) + array([[ 0, 0, 2, 1, 0], + [ 1, 5, 7, 6, 3], + [ 6, 10, 12, 11, 8], + [11, 15, 17, 16, 13], + [ 0, 15, 17, 16, 0]]) + + # As seen in the examples, + # kernel numbers must be odd and not exceed original array dim + + """ + image = np.asarray(input) + + # checking dtype.type, rather than just dtype, is necessary for + # excluding np.longdouble with MS Visual C. + if image.dtype.type not in (np.ubyte, np.float32, np.float64): + return medfilt(image, kernel_size) + + if kernel_size is None: + kernel_size = [3] * 2 + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), 2) + + for size in kernel_size: + if (size % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + + return _sigtools._medfilt2d(image, kernel_size) + + +def lfilter(b, a, x, axis=-1, zi=None): + """ + Filter data along one-dimension with an IIR or FIR filter. + + Filter a data sequence, `x`, using a digital filter. This works for many + fundamental data types (including Object type). The filter is a direct + form II transposed implementation of the standard difference equation + (see Notes). + + The function `sosfilt` (and filter design using ``output='sos'``) should be + preferred over `lfilter` for most filtering tasks, as second-order sections + have fewer numerical problems. + + Parameters + ---------- + b : array_like + The numerator coefficient vector in a 1-D sequence. + a : array_like + The denominator coefficient vector in a 1-D sequence. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the filter delays. It is a vector + (or array of vectors for an N-dimensional input) of length + ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then + initial rest is assumed. See `lfiltic` for more information. + + Returns + ------- + y : array + The output of the digital filter. + zf : array, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + lfiltic : Construct initial conditions for `lfilter`. + lfilter_zi : Compute initial state (steady state of step response) for + `lfilter`. + filtfilt : A forward-backward filter, to obtain a filter with zero phase. + savgol_filter : A Savitzky-Golay filter. + sosfilt: Filter data using cascaded second-order sections. + sosfiltfilt: A forward-backward filter using second-order sections. + + Notes + ----- + The filter function is implemented as a direct II transposed structure. + This means that the filter implements:: + + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + - a[1]*y[n-1] - ... - a[N]*y[n-N] + + where `M` is the degree of the numerator, `N` is the degree of the + denominator, and `n` is the sample number. It is implemented using + the following difference equations (assuming M = N):: + + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] + ... + d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] + d[N-1][n] = b[N] * x[n] - a[N] * y[n] + + where `d` are the state variables. + + The rational transfer function describing this filter in the + z-transform domain is:: + + -1 -M + b[0] + b[1]z + ... + b[M] z + Y(z) = -------------------------------- X(z) + -1 -N + a[0] + a[1]z + ... + a[N] z + + Examples + -------- + Generate a noisy signal to be filtered: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> t = np.linspace(-1, 1, 201) + >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + + ... 0.18*np.cos(2*np.pi*3.85*t)) + >>> xn = x + rng.standard_normal(len(t)) * 0.08 + + Create an order 3 lowpass butterworth filter: + + >>> b, a = signal.butter(3, 0.05) + + Apply the filter to xn. Use lfilter_zi to choose the initial condition of + the filter: + + >>> zi = signal.lfilter_zi(b, a) + >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) + + Apply the filter again, to have a result filtered at an order the same as + filtfilt: + + >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) + + Use filtfilt to apply the filter: + + >>> y = signal.filtfilt(b, a, xn) + + Plot the original signal and the various filtered versions: + + >>> plt.figure + >>> plt.plot(t, xn, 'b', alpha=0.75) + >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') + >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', + ... 'filtfilt'), loc='best') + >>> plt.grid(True) + >>> plt.show() + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + _reject_objects(x, 'lfilter') + _reject_objects(a, 'lfilter') + _reject_objects(b, 'lfilter') + + if len(a) == 1: + # This path only supports types fdgFDGO to mirror _linear_filter below. + # Any of b, a, x, or zi can set the dtype, but there is no default + # casting of other types; instead a NotImplementedError is raised. + b = np.asarray(b) + a = np.asarray(a) + if b.ndim != 1 and a.ndim != 1: + raise ValueError('object of too small depth for desired array') + x = _validate_x(x) + inputs = [b, a, x] + if zi is not None: + # _linear_filter does not broadcast zi, but does do expansion of + # singleton dims. + zi = np.asarray(zi) + if zi.ndim != x.ndim: + raise ValueError('object of too small depth for desired array') + expected_shape = list(x.shape) + expected_shape[axis] = b.shape[0] - 1 + expected_shape = tuple(expected_shape) + # check the trivial case where zi is the right shape first + if zi.shape != expected_shape: + strides = zi.ndim * [None] + if axis < 0: + axis += zi.ndim + for k in range(zi.ndim): + if k == axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == 1: + strides[k] = 0 + else: + raise ValueError('Unexpected shape for zi: expected ' + f'{expected_shape}, found {zi.shape}.') + zi = np.lib.stride_tricks.as_strided(zi, expected_shape, + strides) + inputs.append(zi) + dtype = np.result_type(*inputs) + + if dtype.char not in 'fdgFDGO': + raise NotImplementedError(f"input type '{dtype}' not supported") + + b = np.array(b, dtype=dtype) + a = np.asarray(a, dtype=dtype) + b /= a[0] + x = np.asarray(x, dtype=dtype) + + out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) + ind = out_full.ndim * [slice(None)] + if zi is not None: + ind[axis] = slice(zi.shape[axis]) + out_full[tuple(ind)] += zi + + ind[axis] = slice(out_full.shape[axis] - len(b) + 1) + out = out_full[tuple(ind)] + + if zi is None: + return out + else: + ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) + zf = out_full[tuple(ind)] + return out, zf + else: + if zi is None: + return _sigtools._linear_filter(b, a, x, axis) + else: + return _sigtools._linear_filter(b, a, x, axis, zi) + + +def lfiltic(b, a, y, x=None): + """ + Construct initial conditions for lfilter given input and output vectors. + + Given a linear filter (b, a) and initial conditions on the output `y` + and the input `x`, return the initial conditions on the state vector zi + which is used by `lfilter` to generate the output given the input. + + Parameters + ---------- + b : array_like + Linear filter term. + a : array_like + Linear filter term. + y : array_like + Initial conditions. + + If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. + + If `y` is too short, it is padded with zeros. + x : array_like, optional + Initial conditions. + + If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. + + If `x` is not given, its initial conditions are assumed zero. + + If `x` is too short, it is padded with zeros. + + Returns + ------- + zi : ndarray + The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, + where ``K = max(M, N)``. + + See Also + -------- + lfilter, lfilter_zi + + """ + N = np.size(a) - 1 + M = np.size(b) - 1 + K = max(M, N) + y = np.asarray(y) + + if x is None: + result_type = np.result_type(np.asarray(b), np.asarray(a), y) + if result_type.kind in 'bui': + result_type = np.float64 + x = np.zeros(M, dtype=result_type) + else: + x = np.asarray(x) + + result_type = np.result_type(np.asarray(b), np.asarray(a), y, x) + if result_type.kind in 'bui': + result_type = np.float64 + x = x.astype(result_type) + + L = np.size(x) + if L < M: + x = np.r_[x, np.zeros(M - L)] + + y = y.astype(result_type) + zi = np.zeros(K, result_type) + + L = np.size(y) + if L < N: + y = np.r_[y, np.zeros(N - L)] + + for m in range(M): + zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) + + for m in range(N): + zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) + + return zi + + +def deconvolve(signal, divisor): + """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. + + Returns the quotient and remainder such that + ``signal = convolve(divisor, quotient) + remainder`` + + Parameters + ---------- + signal : (N,) array_like + Signal data, typically a recorded signal + divisor : (N,) array_like + Divisor data, typically an impulse response or filter that was + applied to the original signal + + Returns + ------- + quotient : ndarray + Quotient, typically the recovered original signal + remainder : ndarray + Remainder + + See Also + -------- + numpy.polydiv : performs polynomial division (same operation, but + also accepts poly1d objects) + + Examples + -------- + Deconvolve a signal that's been filtered: + + >>> from scipy import signal + >>> original = [0, 1, 0, 0, 1, 1, 0, 0] + >>> impulse_response = [2, 1] + >>> recorded = signal.convolve(impulse_response, original) + >>> recorded + array([0, 2, 1, 0, 2, 3, 1, 0, 0]) + >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) + >>> recovered + array([ 0., 1., 0., 0., 1., 1., 0., 0.]) + + """ + num = np.atleast_1d(signal) + den = np.atleast_1d(divisor) + if num.ndim > 1: + raise ValueError("signal must be 1-D.") + if den.ndim > 1: + raise ValueError("divisor must be 1-D.") + N = len(num) + D = len(den) + if D > N: + quot = [] + rem = num + else: + input = np.zeros(N - D + 1, float) + input[0] = 1 + quot = lfilter(num, den, input) + rem = num - convolve(den, quot, mode='full') + return quot, rem + + +def hilbert(x, N=None, axis=-1): + r"""FFT-based computation of the analytic signal. + + The analytic signal is calculated by filtering out the negative frequencies and + doubling the amplitudes of the positive frequencies in the FFT domain. + The imaginary part of the result is the hilbert transform of the real-valued input + signal. + + The transformation is done along the last axis by default. + + Parameters + ---------- + x : array_like + Signal data. Must be real. + N : int, optional + Number of Fourier components. Default: ``x.shape[axis]`` + axis : int, optional + Axis along which to do the transformation. Default: -1. + + Returns + ------- + xa : ndarray + Analytic signal of `x`, of each 1-D array along `axis` + + Notes + ----- + The analytic signal ``x_a(t)`` of a real-valued signal ``x(t)`` + can be expressed as [1]_ + + .. math:: x_a = F^{-1}(F(x) 2U) = x + i y\ , + + where `F` is the Fourier transform, `U` the unit step function, + and `y` the Hilbert transform of `x`. [2]_ + + In other words, the negative half of the frequency spectrum is zeroed + out, turning the real-valued signal into a complex-valued signal. The Hilbert + transformed signal can be obtained from ``np.imag(hilbert(x))``, and the + original signal from ``np.real(hilbert(x))``. + + References + ---------- + .. [1] Wikipedia, "Analytic signal". + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Wikipedia, "Hilbert Transform". + https://en.wikipedia.org/wiki/Hilbert_transform + .. [3] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. + .. [4] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal + Processing, Third Edition, 2009. Chapter 12. + ISBN 13: 978-1292-02572-8 + + See Also + -------- + envelope: Compute envelope of a real- or complex-valued signal. + + Examples + -------- + In this example we use the Hilbert transform to determine the amplitude + envelope and instantaneous frequency of an amplitude-modulated signal. + + Let's create a chirp of which the frequency increases from 20 Hz to 100 Hz and + apply an amplitude modulation: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import hilbert, chirp + ... + >>> duration, fs = 1, 400 # 1 s signal with sampling frequency of 400 Hz + >>> t = np.arange(int(fs*duration)) / fs # timestamps of samples + >>> signal = chirp(t, 20.0, t[-1], 100.0) + >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) + + The amplitude envelope is given by the magnitude of the analytic signal. The + instantaneous frequency can be obtained by differentiating the + instantaneous phase in respect to time. The instantaneous phase corresponds + to the phase angle of the analytic signal. + + >>> analytic_signal = hilbert(signal) + >>> amplitude_envelope = np.abs(analytic_signal) + >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) + >>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs + ... + >>> fig, (ax0, ax1) = plt.subplots(nrows=2, sharex='all', tight_layout=True) + >>> ax0.set_title("Amplitude-modulated Chirp Signal") + >>> ax0.set_ylabel("Amplitude") + >>> ax0.plot(t, signal, label='Signal') + >>> ax0.plot(t, amplitude_envelope, label='Envelope') + >>> ax0.legend() + >>> ax1.set(xlabel="Time in seconds", ylabel="Phase in rad", ylim=(0, 120)) + >>> ax1.plot(t[1:], instantaneous_frequency, 'C2-', label='Instantaneous Phase') + >>> ax1.legend() + >>> plt.show() + + """ + x = np.asarray(x) + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape[axis] + if N <= 0: + raise ValueError("N must be positive.") + + Xf = sp_fft.fft(x, N, axis=axis) + h = np.zeros(N, dtype=Xf.dtype) + if N % 2 == 0: + h[0] = h[N // 2] = 1 + h[1:N // 2] = 2 + else: + h[0] = 1 + h[1:(N + 1) // 2] = 2 + + if x.ndim > 1: + ind = [np.newaxis] * x.ndim + ind[axis] = slice(None) + h = h[tuple(ind)] + x = sp_fft.ifft(Xf * h, axis=axis) + return x + + +def hilbert2(x, N=None): + """ + Compute the '2-D' analytic signal of `x` + + Parameters + ---------- + x : array_like + 2-D signal data. + N : int or tuple of two ints, optional + Number of Fourier components. Default is ``x.shape`` + + Returns + ------- + xa : ndarray + Analytic signal of `x` taken along axes (0,1). + + References + ---------- + .. [1] Wikipedia, "Analytic signal", + https://en.wikipedia.org/wiki/Analytic_signal + + """ + x = np.atleast_2d(x) + if x.ndim > 2: + raise ValueError("x must be 2-D.") + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape + elif isinstance(N, int): + if N <= 0: + raise ValueError("N must be positive.") + N = (N, N) + elif len(N) != 2 or np.any(np.asarray(N) <= 0): + raise ValueError("When given as a tuple, N must hold exactly " + "two positive integers") + + Xf = sp_fft.fft2(x, N, axes=(0, 1)) + h1 = np.zeros(N[0], dtype=Xf.dtype) + h2 = np.zeros(N[1], dtype=Xf.dtype) + for h in (h1, h2): + N1 = h.shape[0] + if N1 % 2 == 0: + h[0] = h[N1 // 2] = 1 + h[1:N1 // 2] = 2 + else: + h[0] = 1 + h[1:(N1 + 1) // 2] = 2 + + h = h1[:, np.newaxis] * h2[np.newaxis, :] + k = x.ndim + while k > 2: + h = h[:, np.newaxis] + k -= 1 + x = sp_fft.ifft2(Xf * h, axes=(0, 1)) + return x + + +def envelope(z: np.ndarray, bp_in: tuple[int | None, int | None] = (1, None), *, + n_out: int | None = None, squared: bool = False, + residual: Literal['lowpass', 'all', None] = 'lowpass', + axis: int = -1) -> np.ndarray: + r"""Compute the envelope of a real- or complex-valued signal. + + Parameters + ---------- + z : ndarray + Real- or complex-valued input signal, which is assumed to be made up of ``n`` + samples and having sampling interval ``T``. `z` may also be a multidimensional + array with the time axis being defined by `axis`. + bp_in : tuple[int | None, int | None], optional + 2-tuple defining the frequency band ``bp_in[0]:bp_in[1]`` of the input filter. + The corner frequencies are specified as integer multiples of ``1/(n*T)`` with + ``-n//2 <= bp_in[0] < bp_in[1] <= (n+1)//2`` being the allowed frequency range. + ``None`` entries are replaced with ``-n//2`` or ``(n+1)//2`` respectively. The + default of ``(1, None)`` removes the mean value as well as the negative + frequency components. + n_out : int | None, optional + If not ``None`` the output will be resampled to `n_out` samples. The default + of ``None`` sets the output to the same length as the input `z`. + squared : bool, optional + If set, the square of the envelope is returned. The bandwidth of the squared + envelope is often smaller than the non-squared envelope bandwidth due to the + nonlinear nature of the utilized absolute value function. I.e., the embedded + square root function typically produces addiational harmonics. + The default is ``False``. + residual : Literal['lowpass', 'all', None], optional + This option determines what kind of residual, i.e., the signal part which the + input bandpass filter removes, is returned. ``'all'`` returns everything except + the contents of the frequency band ``bp_in[0]:bp_in[1]``, ``'lowpass'`` + returns the contents of the frequency band ``< bp_in[0]``. If ``None`` then + only the envelope is returned. Default: ``'lowpass'``. + axis : int, optional + Axis of `z` over which to compute the envelope. Default is last the axis. + + Returns + ------- + ndarray + If parameter `residual` is ``None`` then an array ``z_env`` with the same shape + as the input `z` is returned, containing its envelope. Otherwise, an array with + shape ``(2, *z.shape)``, containing the arrays ``z_env`` and ``z_res``, stacked + along the first axis, is returned. + It allows unpacking, i.e., ``z_env, z_res = envelope(z, residual='all')``. + The residual ``z_res`` contains the signal part which the input bandpass filter + removed, depending on the parameter `residual`. Note that for real-valued + signals, a real-valued residual is returned. Hence, the negative frequency + components of `bp_in` are ignored. + + Notes + ----- + Any complex-valued signal :math:`z(t)` can be described by a real-valued + instantaneous amplitude :math:`a(t)` and a real-valued instantaneous phase + :math:`\phi(t)`, i.e., :math:`z(t) = a(t) \exp\!\big(j \phi(t)\big)`. The + envelope is defined as the absolute value of the amplitude :math:`|a(t)| = |z(t)|`, + which is at the same time the absolute value of the signal. Hence, :math:`|a(t)|` + "envelopes" the class of all signals with amplitude :math:`a(t)` and arbitrary + phase :math:`\phi(t)`. + For real-valued signals, :math:`x(t) = a(t) \cos\!\big(\phi(t)\big)` is the + analogous formulation. Hence, :math:`|a(t)|` can be determined by converting + :math:`x(t)` into an analytic signal :math:`z_a(t)` by means of a Hilbert + transform, i.e., + :math:`z_a(t) = a(t) \cos\!\big(\phi(t)\big) + j a(t) \sin\!\big(\phi(t) \big)`, + which produces a complex-valued signal with the same envelope :math:`|a(t)|`. + + The implementation is based on computing the FFT of the input signal and then + performing the necessary operations in Fourier space. Hence, the typical FFT + caveats need to be taken into account: + + * The signal is assumed to be periodic. Discontinuities between signal start and + end can lead to unwanted results due to Gibbs phenomenon. + * The FFT is slow if the signal length is prime or very long. Also, the memory + demands are typically higher than a comparable FIR/IIR filter based + implementation. + * The frequency spacing ``1 / (n*T)`` for corner frequencies of the bandpass filter + corresponds to the frequencies produced by ``scipy.fft.fftfreq(len(z), T)``. + + If the envelope of a complex-valued signal `z` with no bandpass filtering is + desired, i.e., ``bp_in=(None, None)``, then the envelope corresponds to the + absolute value. Hence, it is more efficient to use ``np.abs(z)`` instead of this + function. + + Although computing the envelope based on the analytic signal [1]_ is the natural + method for real-valued signals, other methods are also frequently used. The most + popular alternative is probably the so-called "square-law" envelope detector and + its relatives [2]_. They do not always compute the correct result for all kinds of + signals, but are usually correct and typically computationally more efficient for + most kinds of narrowband signals. The definition for an envelope presented here is + common where instantaneous amplitude and phase are of interest (e.g., as described + in [3]_). There exist also other concepts, which rely on the general mathematical + idea of an envelope [4]_: A pragmatic approach is to determine all upper and lower + signal peaks and use a spline interpolation to determine the curves [5]_. + + + References + ---------- + .. [1] "Analytic Signal", Wikipedia, + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Lyons, Richard, "Digital envelope detection: The good, the bad, and the + ugly", IEEE Signal Processing Magazine 34.4 (2017): 183-187. + `PDF `__ + .. [3] T.G. Kincaid, "The complex representation of signals.", + TIS R67# MH5, General Electric Co. (1966). + `PDF `__ + .. [4] "Envelope (mathematics)", Wikipedia, + https://en.wikipedia.org/wiki/Envelope_(mathematics) + .. [5] Yang, Yanli. "A signal theoretic approach for envelope analysis of + real-valued signals." IEEE Access 5 (2017): 5623-5630. + `PDF `__ + + + See Also + -------- + hilbert: Compute analytic signal by means of Hilbert transform. + + + Examples + -------- + The following plot illustrates the envelope of a signal with variable frequency and + a low-frequency drift. To separate the drift from the envelope, a 4 Hz highpass + filter is used. The low-pass residuum of the input bandpass filter is utilized to + determine an asymmetric upper and lower bound to enclose the signal. Due to the + smoothness of the resulting envelope, it is down-sampled from 500 to 40 samples. + Note that the instantaneous amplitude ``a_x`` and the computed envelope ``x_env`` + are not perfectly identical. This is due to the signal not being perfectly periodic + as well as the existence of some spectral overlapping of ``x_carrier`` and + ``x_drift``. Hence, they cannot be completely separated by a bandpass filter. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal.windows import gaussian + >>> from scipy.signal import envelope + ... + >>> n, n_out = 500, 40 # number of signal samples and envelope samples + >>> T = 2 / n # sampling interval for 2 s duration + >>> t = np.arange(n) * T # time stamps + >>> a_x = gaussian(len(t), 0.4/T) # instantaneous amplitude + >>> phi_x = 30*np.pi*t + 35*np.cos(2*np.pi*0.25*t) # instantaneous phase + >>> x_carrier = a_x * np.cos(phi_x) + >>> x_drift = 0.3 * gaussian(len(t), 0.4/T) # drift + >>> x = x_carrier + x_drift + ... + >>> bp_in = (int(4 * (n*T)), None) # 4 Hz highpass input filter + >>> x_env, x_res = envelope(x, bp_in, n_out=n_out) + >>> t_out = np.arange(n_out) * (n / n_out) * T + ... + >>> fg0, ax0 = plt.subplots(1, 1, tight_layout=True) + >>> ax0.set_title(r"$4\,$Hz Highpass Envelope of Drifting Signal") + >>> ax0.set(xlabel="Time in seconds", xlim=(0, n*T), ylabel="Amplitude") + >>> ax0.plot(t, x, 'C0-', alpha=0.5, label="Signal") + >>> ax0.plot(t, x_drift, 'C2--', alpha=0.25, label="Drift") + >>> ax0.plot(t_out, x_res+x_env, 'C1.-', alpha=0.5, label="Envelope") + >>> ax0.plot(t_out, x_res-x_env, 'C1.-', alpha=0.5, label=None) + >>> ax0.grid(True) + >>> ax0.legend() + >>> plt.show() + + The second example provides a geometric envelope interpretation of complex-valued + signals: The following two plots show the complex-valued signal as a blue + 3d-trajectory and the envelope as an orange round tube with varying diameter, i.e., + as :math:`|a(t)| \exp(j\rho(t))`, with :math:`\rho(t)\in[-\pi,\pi]`. Also, the + projection into the 2d real and imaginary coordinate planes of trajectory and tube + is depicted. Every point of the complex-valued signal touches the tube's surface. + + The left plot shows an analytic signal, i.e, the phase difference between + imaginary and real part is always 90 degrees, resulting in a spiraling trajectory. + It can be seen that in this case the real part has also the expected envelope, + i.e., representing the absolute value of the instantaneous amplitude. + + The right plot shows the real part of that analytic signal being interpreted + as a complex-vauled signal, i.e., having zero imaginary part. There the resulting + envelope is not as smooth as in the analytic case and the instantaneous amplitude + in the real plane is not recovered. If ``z_re`` had been passed as a real-valued + signal, i.e., as ``z_re = z.real`` instead of ``z_re = z.real + 0j``, the result + would have been identical to the left plot. The reason for this is that real-valued + signals are interpreted as being the real part of a complex-valued analytic signal. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal.windows import gaussian + >>> from scipy.signal import envelope + ... + >>> n, T = 1000, 1/1000 # number of samples and sampling interval + >>> t = np.arange(n) * T # time stamps for 1 s duration + >>> f_c = 3 # Carrier frequency for signal + >>> z = gaussian(len(t), 0.3/T) * np.exp(2j*np.pi*f_c*t) # analytic signal + >>> z_re = z.real + 0j # complex signal with zero imaginary part + ... + >>> e_a, e_r = (envelope(z_, (None, None), residual=None) for z_ in (z, z_re)) + ... + >>> # Generate grids to visualize envelopes as 2d and 3d surfaces: + >>> E2d_t, E2_amp = np.meshgrid(t, [-1, 1]) + >>> E2d_1 = np.ones_like(E2_amp) + >>> E3d_t, E3d_phi = np.meshgrid(t, np.linspace(-np.pi, np.pi, 300)) + >>> ma = 1.8 # maximum axis values in real and imaginary direction + ... + >>> fg0 = plt.figure(figsize=(6.2, 4.)) + >>> ax00 = fg0.add_subplot(1, 2, 1, projection='3d') + >>> ax01 = fg0.add_subplot(1, 2, 2, projection='3d', sharex=ax00, + ... sharey=ax00, sharez=ax00) + >>> ax00.set_title("Analytic Signal") + >>> ax00.set(xlim=(0, 1), ylim=(-ma, ma), zlim=(-ma, ma)) + >>> ax01.set_title("Real-valued Signal") + >>> for z_, e_, ax_ in zip((z, z.real), (e_a, e_r), (ax00, ax01)): + ... ax_.set(xlabel="Time $t$", ylabel="Real Amp. $x(t)$", + ... zlabel="Imag. Amp. $y(t)$") + ... ax_.plot(t, z_.real, 'C0-', zs=-ma, zdir='z', alpha=0.5, label="Real") + ... ax_.plot_surface(E2d_t, e_*E2_amp, -ma*E2d_1, color='C1', alpha=0.25) + ... ax_.plot(t, z_.imag, 'C0-', zs=+ma, zdir='y', alpha=0.5, label="Imag.") + ... ax_.plot_surface(E2d_t, ma*E2d_1, e_*E2_amp, color='C1', alpha=0.25) + ... ax_.plot(t, z_.real, z_.imag, 'C0-', label="Signal") + ... ax_.plot_surface(E3d_t, e_*np.cos(E3d_phi), e_*np.sin(E3d_phi), + ... color='C1', alpha=0.5, shade=True, label="Envelope") + ... ax_.view_init(elev=22.7, azim=-114.3) + >>> fg0.subplots_adjust(left=0.08, right=0.97, wspace=0.15) + >>> plt.show() + """ + if not (-z.ndim <= axis < z.ndim): + raise ValueError(f"Invalid parameter {axis=} for {z.shape=}!") + if not (z.shape[axis] > 0): + raise ValueError(f"z.shape[axis] not > 0 for {z.shape=}, {axis=}!") + if len(bp_in) != 2 or not all((isinstance(b_, int) or b_ is None) for b_ in bp_in): + raise ValueError(f"{bp_in=} isn't a 2-tuple of type (int | None, int | None)!") + if not ((isinstance(n_out, int) and 0 < n_out) or n_out is None): + raise ValueError(f"{n_out=} is not a positive integer or None!") + if residual not in ('lowpass', 'all', None): + raise ValueError(f"{residual=} not in ['lowpass', 'all', None]!") + + n = z.shape[axis] # number of time samples of input + n_out = n if n_out is None else n_out + fak = n_out / n # scaling factor for resampling + + bp = slice(bp_in[0] if bp_in[0] is not None else -(n//2), + bp_in[1] if bp_in[1] is not None else (n+1)//2) + if not (-n//2 <= bp.start < bp.stop <= (n+1)//2): + raise ValueError("`-n//2 <= bp_in[0] < bp_in[1] <= (n+1)//2` does not hold " + + f"for n={z.shape[axis]=} and {bp_in=}!") + + # moving active axis to end allows to use `...` for indexing: + z = np.moveaxis(z, axis, -1) + + if np.iscomplexobj(z): + Z = sp_fft.fft(z) + else: # avoid calculating negative frequency bins for real signals: + Z = np.zeros_like(z, dtype=sp_fft.rfft(z.flat[:1]).dtype) + Z[..., :n//2 + 1] = sp_fft.rfft(z) + if bp.start > 0: # make signal analytic within bp_in band: + Z[..., bp] *= 2 + elif bp.stop > 0: + Z[..., 1:bp.stop] *= 2 + if not (bp.start <= 0 < bp.stop): # envelope is invariant to freq. shifts. + z_bb = sp_fft.ifft(Z[..., bp], n=n_out) * fak # baseband signal + else: + bp_shift = slice(bp.start + n//2, bp.stop + n//2) + z_bb = sp_fft.ifft(sp_fft.fftshift(Z, axes=-1)[..., bp_shift], n=n_out) * fak + + z_env = np.abs(z_bb) if not squared else z_bb.real ** 2 + z_bb.imag ** 2 + z_env = np.moveaxis(z_env, -1, axis) + + # Calculate the residual from the input bandpass filter: + if residual is None: + return z_env + if not (bp.start <= 0 < bp.stop): + Z[..., bp] = 0 + else: + Z[..., :bp.stop], Z[..., bp.start:] = 0, 0 + if residual == 'lowpass': + if bp.stop > 0: + Z[..., bp.stop:(n+1) // 2] = 0 + else: + Z[..., bp.start:], Z[..., 0:(n + 1) // 2] = 0, 0 + + z_res = fak * (sp_fft.ifft(Z, n=n_out) if np.iscomplexobj(z) else + sp_fft.irfft(Z, n=n_out)) + return np.stack((z_env, np.moveaxis(z_res, -1, axis)), axis=0) + +def _cmplx_sort(p): + """Sort roots based on magnitude. + + Parameters + ---------- + p : array_like + The roots to sort, as a 1-D array. + + Returns + ------- + p_sorted : ndarray + Sorted roots. + indx : ndarray + Array of indices needed to sort the input `p`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [1, 4, 1+1.j, 3] + >>> p_sorted, indx = signal.cmplx_sort(vals) + >>> p_sorted + array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) + >>> indx + array([0, 2, 3, 1]) + """ + p = np.asarray(p) + indx = np.argsort(abs(p)) + return np.take(p, indx, 0), indx + + +def unique_roots(p, tol=1e-3, rtype='min'): + """Determine unique roots and their multiplicities from a list of roots. + + Parameters + ---------- + p : array_like + The list of roots. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. Refer to Notes about + the details on roots grouping. + rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max', 'maximum': pick the maximum of those roots + - 'min', 'minimum': pick the minimum of those roots + - 'avg', 'mean': take the average of those roots + + When finding minimum or maximum among complex roots they are compared + first by the real part and then by the imaginary part. + + Returns + ------- + unique : ndarray + The list of unique roots. + multiplicity : ndarray + The multiplicity of each root. + + Notes + ----- + If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to + ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it + doesn't necessarily mean that ``a`` is close to ``c``. It means that roots + grouping is not unique. In this function we use "greedy" grouping going + through the roots in the order they are given in the input `p`. + + This utility function is not specific to roots but can be used for any + sequence of values for which uniqueness and multiplicity has to be + determined. For a more general routine, see `numpy.unique`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] + >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') + + Check which roots have multiplicity larger than 1: + + >>> uniq[mult > 1] + array([ 1.305]) + """ + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + p = np.asarray(p) + + points = np.empty((len(p), 2)) + points[:, 0] = np.real(p) + points[:, 1] = np.imag(p) + tree = cKDTree(points) + + p_unique = [] + p_multiplicity = [] + used = np.zeros(len(p), dtype=bool) + for i in range(len(p)): + if used[i]: + continue + + group = tree.query_ball_point(points[i], tol) + group = [x for x in group if not used[x]] + + p_unique.append(reduce(p[group])) + p_multiplicity.append(len(group)) + + used[group] = True + + return np.asarray(p_unique), np.asarray(p_multiplicity) + + +def invres(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(s) and a(s) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `invresz`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residue, invresz, unique_roots + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'f') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k, denominator) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor) + + return numerator, denominator + + +def _compute_factors(roots, multiplicity, include_powers=False): + """Compute the total polynomial divided by factors for each root.""" + current = np.array([1]) + suffixes = [current] + for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): + monomial = np.array([1, -pole]) + for _ in range(mult): + current = np.polymul(current, monomial) + suffixes.append(current) + suffixes = suffixes[::-1] + + factors = [] + current = np.array([1]) + for pole, mult, suffix in zip(roots, multiplicity, suffixes): + monomial = np.array([1, -pole]) + block = [] + for i in range(mult): + if i == 0 or include_powers: + block.append(np.polymul(current, suffix)) + current = np.polymul(current, monomial) + factors.extend(reversed(block)) + + return factors, current + + +def _compute_residues(poles, multiplicity, numerator): + denominator_factors, _ = _compute_factors(poles, multiplicity) + numerator = numerator.astype(poles.dtype) + + residues = [] + for pole, mult, factor in zip(poles, multiplicity, + denominator_factors): + if mult == 1: + residues.append(np.polyval(numerator, pole) / + np.polyval(factor, pole)) + else: + numer = numerator.copy() + monomial = np.array([1, -pole]) + factor, d = np.polydiv(factor, monomial) + + block = [] + for _ in range(mult): + numer, n = np.polydiv(numer, monomial) + r = n[0] / d[0] + numer = np.polysub(numer, r * factor) + block.append(r) + + residues.extend(reversed(block)) + + return np.asarray(residues) + + +def residue(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(s) / a(s). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `residuez`. + + See Notes for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invres, residuez, numpy.poly, unique_roots + + Notes + ----- + The "deflation through subtraction" algorithm is used for + computations --- method 6 in [1]_. + + The form of partial fraction expansion depends on poles multiplicity in + the exact mathematical sense. However there is no way to exactly + determine multiplicity of roots of a polynomial in numerical computing. + Thus you should think of the result of `residue` with given `tol` as + partial fraction expansion computed for the denominator composed of the + computed poles with empirically determined multiplicity. The choice of + `tol` can drastically change the result if there are close poles. + + References + ---------- + .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a + review of computational methodology and efficiency", Journal of + Computational and Applied Mathematics, Vol. 9, 1983. + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'f') + a = np.trim_zeros(np.atleast_1d(a), 'f') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + if len(b) < len(a): + k = np.empty(0) + else: + k, b = np.polydiv(b, a) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(unique_poles, multiplicity, b) + + index = 0 + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + index += mult + + return residues / a[0], poles, k + + +def residuez(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(z) / a(z). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `residue`. + + See Notes of `residue` for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invresz, residue, unique_roots + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'b') + a = np.trim_zeros(np.atleast_1d(a), 'b') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + elif a[0] == 0: + raise ValueError("First coefficient of determinant `a` must be " + "non-zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + b_rev = b[::-1] + a_rev = a[::-1] + + if len(b_rev) < len(a_rev): + k_rev = np.empty(0) + else: + k_rev, b_rev = np.polydiv(b_rev, a_rev) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) + + index = 0 + powers = np.empty(len(residues), dtype=int) + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + powers[index:index + mult] = 1 + np.arange(mult) + index += mult + + residues *= (-poles) ** powers / a_rev[0] + + return residues, poles, k_rev[::-1] + + +def _group_poles(poles, tol, rtype): + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + unique = [] + multiplicity = [] + + pole = poles[0] + block = [pole] + for i in range(1, len(poles)): + if abs(poles[i] - pole) <= tol: + block.append(pole) + else: + unique.append(reduce(block)) + multiplicity.append(len(block)) + pole = poles[i] + block = [pole] + + unique.append(reduce(block)) + multiplicity.append(len(block)) + + return np.asarray(unique), np.asarray(multiplicity) + + +def invresz(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(z) and a(z) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `invres`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residuez, unique_roots, invres + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'b') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k[::-1], denominator[::-1]) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor[::-1]) + + return numerator[::-1], denominator + + +def resample(x, num, t=None, axis=0, window=None, domain='time'): + """ + Resample `x` to `num` samples using Fourier method along the given axis. + + The resampled signal starts at the same value as `x` but is sampled + with a spacing of ``len(x) / num * (spacing of x)``. Because a + Fourier method is used, the signal is assumed to be periodic. + + Parameters + ---------- + x : array_like + The data to be resampled. + num : int + The number of samples in the resampled signal. + t : array_like, optional + If `t` is given, it is assumed to be the equally spaced sample + positions associated with the signal data in `x`. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. See below for details. + domain : string, optional + A string indicating the domain of the input `x`: + ``time`` Consider the input `x` as time-domain (Default), + ``freq`` Consider the input `x` as frequency-domain. + + Returns + ------- + resampled_x or (resampled_x, resampled_t) + Either the resampled array, or, if `t` was given, a tuple + containing the resampled array and the corresponding resampled + positions. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The argument `window` controls a Fourier-domain window that tapers + the Fourier spectrum before zero-padding to alleviate ringing in + the resampled values for sampled signals you didn't intend to be + interpreted as band-limited. + + If `window` is a function, then it is called with a vector of inputs + indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). + + If `window` is an array of the same length as `x.shape[axis]` it is + assumed to be the window to be applied directly in the Fourier + domain (with dc and low-frequency first). + + For any other type of `window`, the function `scipy.signal.get_window` + is called to generate the window. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * len(x) / num``. + + If `t` is not None, then it is used solely to calculate the resampled + positions `resampled_t` + + As noted, `resample` uses FFT transformations, which can be very + slow if the number of input or output samples is large and prime; + see :func:`~scipy.fft.fft`. In such cases, it can be faster to first downsample + a signal of length ``n`` with :func:`~scipy.signal.resample_poly` by a factor of + ``n//num`` before using `resample`. Note that this approach changes the + characteristics of the antialiasing filter. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle: + + >>> import numpy as np + >>> from scipy import signal + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f = signal.resample(y, 100) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') + >>> plt.legend(['data', 'resampled'], loc='best') + >>> plt.show() + + Consider the following signal ``y`` where ``len(y)`` is a large prime number: + + >>> N = 55949 + >>> freq = 100 + >>> x = np.linspace(0, 1, N) + >>> y = np.cos(2 * np.pi * freq * x) + + Due to ``N`` being prime, + + >>> num = 5000 + >>> f = signal.resample(signal.resample_poly(y, 1, N // num), num) + + runs significantly faster than + + >>> f = signal.resample(y, num) + """ + + if domain not in ('time', 'freq'): + raise ValueError("Acceptable domain flags are 'time' or" + f" 'freq', not domain={domain}") + + x = np.asarray(x) + Nx = x.shape[axis] + + # Check if we can use faster real FFT + real_input = np.isrealobj(x) + + if domain == 'time': + # Forward transform + if real_input: + X = sp_fft.rfft(x, axis=axis) + else: # Full complex FFT + X = sp_fft.fft(x, axis=axis) + else: # domain == 'freq' + X = x + + # Apply window to spectrum + if window is not None: + if callable(window): + W = window(sp_fft.fftfreq(Nx)) + elif isinstance(window, np.ndarray): + if window.shape != (Nx,): + raise ValueError('window must have the same length as data') + W = window + else: + W = sp_fft.ifftshift(get_window(window, Nx)) + + newshape_W = [1] * x.ndim + newshape_W[axis] = X.shape[axis] + if real_input: + # Fold the window back on itself to mimic complex behavior + W_real = W.copy() + W_real[1:] += W_real[-1:0:-1] + W_real[1:] *= 0.5 + X *= W_real[:newshape_W[axis]].reshape(newshape_W) + else: + X *= W.reshape(newshape_W) + + # Copy each half of the original spectrum to the output spectrum, either + # truncating high frequencies (downsampling) or zero-padding them + # (upsampling) + + # Placeholder array for output spectrum + newshape = list(x.shape) + if real_input: + newshape[axis] = num // 2 + 1 + else: + newshape[axis] = num + Y = np.zeros(newshape, X.dtype) + + # Copy positive frequency components (and Nyquist, if present) + N = min(num, Nx) + nyq = N // 2 + 1 # Slice index that includes Nyquist if present + sl = [slice(None)] * x.ndim + sl[axis] = slice(0, nyq) + Y[tuple(sl)] = X[tuple(sl)] + if not real_input: + # Copy negative frequency components + if N > 2: # (slice expression doesn't collapse to empty array) + sl[axis] = slice(nyq - N, None) + Y[tuple(sl)] = X[tuple(sl)] + + # Split/join Nyquist component(s) if present + # So far we have set Y[+N/2]=X[+N/2] + if N % 2 == 0: + if num < Nx: # downsampling + if real_input: + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 2. + else: + # select the component of Y at frequency +N/2, + # add the component of X at -N/2 + sl[axis] = slice(-N//2, -N//2 + 1) + Y[tuple(sl)] += X[tuple(sl)] + elif Nx < num: # upsampling + # select the component at frequency +N/2 and halve it + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 0.5 + if not real_input: + temp = Y[tuple(sl)] + # set the component at -N/2 equal to the component at +N/2 + sl[axis] = slice(num-N//2, num-N//2 + 1) + Y[tuple(sl)] = temp + + # Inverse transform + if real_input: + y = sp_fft.irfft(Y, num, axis=axis) + else: + y = sp_fft.ifft(Y, axis=axis, overwrite_x=True) + + y *= (float(num) / float(Nx)) + + if t is None: + return y + else: + new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] + return y, new_t + + +def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0), + padtype='constant', cval=None): + """ + Resample `x` along the given axis using polyphase filtering. + + The signal `x` is upsampled by the factor `up`, a zero-phase low-pass + FIR filter is applied, and then it is downsampled by the factor `down`. + The resulting sample rate is ``up / down`` times the original sample + rate. By default, values beyond the boundary of the signal are assumed + to be zero during the filtering step. + + Parameters + ---------- + x : array_like + The data to be resampled. + up : int + The upsampling factor. + down : int + The downsampling factor. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : string, tuple, or array_like, optional + Desired window to use to design the low-pass filter, or the FIR filter + coefficients to employ. See below for details. + padtype : string, optional + `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of + the other signal extension modes supported by `scipy.signal.upfirdn`. + Changes assumptions on values beyond the boundary. If `constant`, + assumed to be `cval` (default zero). If `line` assumed to continue a + linear trend defined by the first and last points. `mean`, `median`, + `maximum` and `minimum` work as in `np.pad` and assume that the values + beyond the boundary are the mean, median, maximum or minimum + respectively of the array along the axis. + + .. versionadded:: 1.4.0 + cval : float, optional + Value to use if `padtype='constant'`. Default is zero. + + .. versionadded:: 1.4.0 + + Returns + ------- + resampled_x : array + The resampled array. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample : Resample up or down using the FFT method. + + Notes + ----- + This polyphase method will likely be faster than the Fourier method + in `scipy.signal.resample` when the number of samples is large and + prime, or when the number of samples is large and `up` and `down` + share a large greatest common denominator. The length of the FIR + filter used will depend on ``max(up, down) // gcd(up, down)``, and + the number of operations during polyphase filtering will depend on + the filter length and `down` (see `scipy.signal.upfirdn` for details). + + The argument `window` specifies the FIR low-pass filter design. + + If `window` is an array_like it is assumed to be the FIR filter + coefficients. Note that the FIR filter is applied after the upsampling + step, so it should be designed to operate on a signal at a sampling + frequency higher than the original by a factor of `up//gcd(up, down)`. + This function's output will be centered with respect to this array, so it + is best to pass a symmetric filter with an odd number of samples if, as + is usually the case, a zero-phase filter is desired. + + For any other type of `window`, the functions `scipy.signal.get_window` + and `scipy.signal.firwin` are called to generate the appropriate filter + coefficients. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * down / float(up)``. + + Examples + -------- + By default, the end of the resampled data rises to meet the first + sample of the next cycle for the FFT method, and gets closer to zero + for the polyphase method: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f_fft = signal.resample(y, 100) + >>> f_poly = signal.resample_poly(y, 100, 20) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') + >>> plt.plot(x, y, 'ko-') + >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries + >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') + >>> plt.show() + + This default behaviour can be changed by using the padtype option: + + >>> N = 5 + >>> x = np.linspace(0, 1, N, endpoint=False) + >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x) + >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x) + >>> Y = np.stack([y, y2], axis=-1) + >>> up = 4 + >>> xr = np.linspace(0, 1, N*up, endpoint=False) + + >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant') + >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean') + >>> y4 = signal.resample_poly(Y, up, 1, padtype='line') + + >>> for i in [0,1]: + ... plt.figure() + ... plt.plot(xr, y4[:,i], 'g.', label='line') + ... plt.plot(xr, y3[:,i], 'y.', label='mean') + ... plt.plot(xr, y2[:,i], 'r.', label='constant') + ... plt.plot(x, Y[:,i], 'k-') + ... plt.legend() + >>> plt.show() + + """ + x = np.asarray(x) + if up != int(up): + raise ValueError("up must be an integer") + if down != int(down): + raise ValueError("down must be an integer") + up = int(up) + down = int(down) + if up < 1 or down < 1: + raise ValueError('up and down must be >= 1') + if cval is not None and padtype != 'constant': + raise ValueError('cval has no effect when padtype is ', padtype) + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = math.gcd(up, down) + up //= g_ + down //= g_ + if up == down == 1: + return x.copy() + n_in = x.shape[axis] + n_out = n_in * up + n_out = n_out // down + bool(n_out % down) + + if isinstance(window, (list | np.ndarray)): + window = np.array(window) # use array to force a copy (we modify it) + if window.ndim > 1: + raise ValueError('window must be 1-D') + half_len = (window.size - 1) // 2 + h = window + else: + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + if np.issubdtype(x.dtype, np.complexfloating): + h = firwin(2 * half_len + 1, f_c, + window=window).astype(x.dtype) # match dtype of x + elif np.issubdtype(x.dtype, np.floating): + h = firwin(2 * half_len + 1, f_c, + window=window).astype(x.dtype) # match dtype of x + else: + h = firwin(2 * half_len + 1, f_c, + window=window) + h *= up + + # Zero-pad our filter to put the output samples at the center + n_pre_pad = (down - half_len % down) + n_post_pad = 0 + n_pre_remove = (half_len + n_pre_pad) // down + # We should rarely need to do this given our filter lengths... + while _output_len(len(h) + n_pre_pad + n_post_pad, n_in, + up, down) < n_out + n_pre_remove: + n_post_pad += 1 + h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, + np.zeros(n_post_pad, dtype=h.dtype))) + n_pre_remove_end = n_pre_remove + n_out + + # Remove background depending on the padtype option + funcs = {'mean': np.mean, 'median': np.median, + 'minimum': np.amin, 'maximum': np.amax} + upfirdn_kwargs = {'mode': 'constant', 'cval': 0} + if padtype in funcs: + background_values = funcs[padtype](x, axis=axis, keepdims=True) + elif padtype in _upfirdn_modes: + upfirdn_kwargs = {'mode': padtype} + if padtype == 'constant': + if cval is None: + cval = 0 + upfirdn_kwargs['cval'] = cval + else: + raise ValueError( + 'padtype must be one of: maximum, mean, median, minimum, ' + + ', '.join(_upfirdn_modes)) + + if padtype in funcs: + x = x - background_values + + # filter then remove excess + y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs) + keep = [slice(None), ]*x.ndim + keep[axis] = slice(n_pre_remove, n_pre_remove_end) + y_keep = y[tuple(keep)] + + # Add background back + if padtype in funcs: + y_keep += background_values + + return y_keep + + +def vectorstrength(events, period): + ''' + Determine the vector strength of the events corresponding to the given + period. + + The vector strength is a measure of phase synchrony, how well the + timing of the events is synchronized to a single period of a periodic + signal. + + If multiple periods are used, calculate the vector strength of each. + This is called the "resonating vector strength". + + Parameters + ---------- + events : 1D array_like + An array of time points containing the timing of the events. + period : float or array_like + The period of the signal that the events should synchronize to. + The period is in the same units as `events`. It can also be an array + of periods, in which case the outputs are arrays of the same length. + + Returns + ------- + strength : float or 1D array + The strength of the synchronization. 1.0 is perfect synchronization + and 0.0 is no synchronization. If `period` is an array, this is also + an array with each element containing the vector strength at the + corresponding period. + phase : float or array + The phase that the events are most strongly synchronized to in radians. + If `period` is an array, this is also an array with each element + containing the phase for the corresponding period. + + References + ---------- + van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector + strength: Auditory system, electric fish, and noise. + Chaos 21, 047508 (2011); + :doi:`10.1063/1.3670512`. + van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: + biological and mathematical perspectives. Biol Cybern. + 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. + van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens + when we vary the "probing" frequency while keeping the spike times + fixed. Biol Cybern. 2013 Aug;107(4):491-94. + :doi:`10.1007/s00422-013-0560-8`. + ''' + events = np.asarray(events) + period = np.asarray(period) + if events.ndim > 1: + raise ValueError('events cannot have dimensions more than 1') + if period.ndim > 1: + raise ValueError('period cannot have dimensions more than 1') + + # we need to know later if period was originally a scalar + scalarperiod = not period.ndim + + events = np.atleast_2d(events) + period = np.atleast_2d(period) + if (period <= 0).any(): + raise ValueError('periods must be positive') + + # this converts the times to vectors + vectors = np.exp(np.dot(2j*np.pi/period.T, events)) + + # the vector strength is just the magnitude of the mean of the vectors + # the vector phase is the angle of the mean of the vectors + vectormean = np.mean(vectors, axis=1) + strength = abs(vectormean) + phase = np.angle(vectormean) + + # if the original period was a scalar, return scalars + if scalarperiod: + strength = strength[0] + phase = phase[0] + return strength, phase + + +def detrend(data: np.ndarray, axis: int = -1, + type: Literal['linear', 'constant'] = 'linear', + bp: ArrayLike | int = 0, overwrite_data: bool = False) -> np.ndarray: + r"""Remove linear or constant trend along axis from data. + + Parameters + ---------- + data : array_like + The input data. + axis : int, optional + The axis along which to detrend the data. By default this is the + last axis (-1). + type : {'linear', 'constant'}, optional + The type of detrending. If ``type == 'linear'`` (default), + the result of a linear least-squares fit to `data` is subtracted + from `data`. + If ``type == 'constant'``, only the mean of `data` is subtracted. + bp : array_like of ints, optional + A sequence of break points. If given, an individual linear fit is + performed for each part of `data` between two break points. + Break points are specified as indices into `data`. This parameter + only has an effect when ``type == 'linear'``. + overwrite_data : bool, optional + If True, perform in place detrending and avoid a copy. Default is False + + Returns + ------- + ret : ndarray + The detrended input data. + + Notes + ----- + Detrending can be interpreted as subtracting a least squares fit polynomial: + Setting the parameter `type` to 'constant' corresponds to fitting a zeroth degree + polynomial, 'linear' to a first degree polynomial. Consult the example below. + + See Also + -------- + numpy.polynomial.polynomial.Polynomial.fit: Create least squares fit polynomial. + + + Examples + -------- + The following example detrends the function :math:`x(t) = \sin(\pi t) + 1/4`: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import detrend + ... + >>> t = np.linspace(-0.5, 0.5, 21) + >>> x = np.sin(np.pi*t) + 1/4 + ... + >>> x_d_const = detrend(x, type='constant') + >>> x_d_linear = detrend(x, type='linear') + ... + >>> fig1, ax1 = plt.subplots() + >>> ax1.set_title(r"Detrending $x(t)=\sin(\pi t) + 1/4$") + >>> ax1.set(xlabel="t", ylabel="$x(t)$", xlim=(t[0], t[-1])) + >>> ax1.axhline(y=0, color='black', linewidth=.5) + >>> ax1.axvline(x=0, color='black', linewidth=.5) + >>> ax1.plot(t, x, 'C0.-', label="No detrending") + >>> ax1.plot(t, x_d_const, 'C1x-', label="type='constant'") + >>> ax1.plot(t, x_d_linear, 'C2+-', label="type='linear'") + >>> ax1.legend() + >>> plt.show() + + Alternatively, NumPy's `~numpy.polynomial.polynomial.Polynomial` can be used for + detrending as well: + + >>> pp0 = np.polynomial.Polynomial.fit(t, x, deg=0) # fit degree 0 polynomial + >>> np.allclose(x_d_const, x - pp0(t)) # compare with constant detrend + True + >>> pp1 = np.polynomial.Polynomial.fit(t, x, deg=1) # fit degree 1 polynomial + >>> np.allclose(x_d_linear, x - pp1(t)) # compare with linear detrend + True + + Note that `~numpy.polynomial.polynomial.Polynomial` also allows fitting higher + degree polynomials. Consult its documentation on how to extract the polynomial + coefficients. + """ + if type not in ['linear', 'l', 'constant', 'c']: + raise ValueError("Trend type must be 'linear' or 'constant'.") + data = np.asarray(data) + dtype = data.dtype.char + if dtype not in 'dfDF': + dtype = 'd' + if type in ['constant', 'c']: + ret = data - np.mean(data, axis, keepdims=True) + return ret + else: + dshape = data.shape + N = dshape[axis] + bp = np.sort(np.unique(np.concatenate(np.atleast_1d(0, bp, N)))) + if np.any(bp > N): + raise ValueError("Breakpoints must be less than length " + "of data along given axis.") + + # Restructure data so that axis is along first dimension and + # all other dimensions are collapsed into second dimension + rnk = len(dshape) + if axis < 0: + axis = axis + rnk + newdata = np.moveaxis(data, axis, 0) + newdata_shape = newdata.shape + newdata = newdata.reshape(N, -1) + + if not overwrite_data: + newdata = newdata.copy() # make sure we have a copy + if newdata.dtype.char not in 'dfDF': + newdata = newdata.astype(dtype) + +# Nreg = len(bp) - 1 + # Find leastsq fit and remove it for each piece + for m in range(len(bp) - 1): + Npts = bp[m + 1] - bp[m] + A = np.ones((Npts, 2), dtype) + A[:, 0] = np.arange(1, Npts + 1, dtype=dtype) / Npts + sl = slice(bp[m], bp[m + 1]) + coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) + newdata[sl] = newdata[sl] - A @ coef + + # Put data back in original shape. + newdata = newdata.reshape(newdata_shape) + ret = np.moveaxis(newdata, 0, axis) + return ret + + +def lfilter_zi(b, a): + """ + Construct initial conditions for lfilter for step response steady-state. + + Compute an initial state `zi` for the `lfilter` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + b, a : array_like (1-D) + The IIR filter coefficients. See `lfilter` for more + information. + + Returns + ------- + zi : 1-D ndarray + The initial state for the filter. + + See Also + -------- + lfilter, lfiltic, filtfilt + + Notes + ----- + A linear filter with order m has a state space representation (A, B, C, D), + for which the output y of the filter can be expressed as:: + + z(n+1) = A*z(n) + B*x(n) + y(n) = C*z(n) + D*x(n) + + where z(n) is a vector of length m, A has shape (m, m), B has shape + (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is + a scalar). lfilter_zi solves:: + + zi = A*zi + B + + In other words, it finds the initial condition for which the response + to an input of all ones is a constant. + + Given the filter coefficients `a` and `b`, the state space matrices + for the transposed direct form II implementation of the linear filter, + which is the implementation used by scipy.signal.lfilter, are:: + + A = scipy.linalg.companion(a).T + B = b[1:] - a[1:]*b[0] + + assuming ``a[0]`` is 1.0; if ``a[0]`` is not 1, `a` and `b` are first + divided by a[0]. + + Examples + -------- + The following code creates a lowpass Butterworth filter. Then it + applies that filter to an array whose values are all 1.0; the + output is also all 1.0, as expected for a lowpass filter. If the + `zi` argument of `lfilter` had not been given, the output would have + shown the transient signal. + + >>> from numpy import array, ones + >>> from scipy.signal import lfilter, lfilter_zi, butter + >>> b, a = butter(5, 0.25) + >>> zi = lfilter_zi(b, a) + >>> y, zo = lfilter(b, a, ones(10), zi=zi) + >>> y + array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) + + Another example: + + >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) + >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) + >>> y + array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, + 0.44399389, 0.35505241]) + + Note that the `zi` argument to `lfilter` was computed using + `lfilter_zi` and scaled by ``x[0]``. Then the output `y` has no + transient until the input drops from 0.5 to 0.0. + + """ + + # FIXME: Can this function be replaced with an appropriate + # use of lfiltic? For example, when b,a = butter(N,Wn), + # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). + # + + # We could use scipy.signal.normalize, but it uses warnings in + # cases where a ValueError is more appropriate, and it allows + # b to be 2D. + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("Numerator b must be 1-D.") + a = np.atleast_1d(a) + if a.ndim != 1: + raise ValueError("Denominator a must be 1-D.") + + while len(a) > 1 and a[0] == 0.0: + a = a[1:] + if a.size < 1: + raise ValueError("There must be at least one nonzero `a` coefficient.") + + if a[0] != 1.0: + # Normalize the coefficients so a[0] == 1. + b = b / a[0] + a = a / a[0] + + n = max(len(a), len(b)) + + # Pad a or b with zeros so they are the same length. + if len(a) < n: + a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)] + elif len(b) < n: + b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)] + + IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T + B = b[1:] - a[1:] * b[0] + # Solve zi = A*zi + B + zi = np.linalg.solve(IminusA, B) + + # For future reference: we could also use the following + # explicit formulas to solve the linear system: + # + # zi = np.zeros(n - 1) + # zi[0] = B.sum() / IminusA[:,0].sum() + # asum = 1.0 + # csum = 0.0 + # for k in range(1,n-1): + # asum += a[k] + # csum += b[k] - a[k]*b[0] + # zi[k] = asum*zi[0] - csum + + return zi + + +def sosfilt_zi(sos): + """ + Construct initial conditions for sosfilt for step response steady-state. + + Compute an initial state `zi` for the `sosfilt` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + zi : ndarray + Initial conditions suitable for use with ``sosfilt``, shape + ``(n_sections, 2)``. + + See Also + -------- + sosfilt, zpk2sos + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Filter a rectangular pulse that begins at time 0, with and without + the use of the `zi` argument of `scipy.signal.sosfilt`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sos = signal.butter(9, 0.125, output='sos') + >>> zi = signal.sosfilt_zi(sos) + >>> x = (np.arange(250) < 100).astype(int) + >>> f1 = signal.sosfilt(sos, x) + >>> f2, zo = signal.sosfilt(sos, x, zi=zi) + + >>> plt.plot(x, 'k--', label='x') + >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') + >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + sos = np.asarray(sos) + if sos.ndim != 2 or sos.shape[1] != 6: + raise ValueError('sos must be shape (n_sections, 6)') + + if sos.dtype.kind in 'bui': + sos = sos.astype(np.float64) + + n_sections = sos.shape[0] + zi = np.empty((n_sections, 2), dtype=sos.dtype) + scale = 1.0 + for section in range(n_sections): + b = sos[section, :3] + a = sos[section, 3:] + zi[section] = scale * lfilter_zi(b, a) + # If H(z) = B(z)/A(z) is this section's transfer function, then + # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady + # state value of this section's step response. + scale *= b.sum() / a.sum() + + return zi + + +def _filtfilt_gust(b, a, x, axis=-1, irlen=None): + """Forward-backward IIR filter that uses Gustafsson's method. + + Apply the IIR filter defined by ``(b,a)`` to `x` twice, first forward + then backward, using Gustafsson's initial conditions [1]_. + + Let ``y_fb`` be the result of filtering first forward and then backward, + and let ``y_bf`` be the result of filtering first backward then forward. + Gustafsson's method is to compute initial conditions for the forward + pass and the backward pass such that ``y_fb == y_bf``. + + Parameters + ---------- + b : scalar or 1-D ndarray + Numerator coefficients of the filter. + a : scalar or 1-D ndarray + Denominator coefficients of the filter. + x : ndarray + Data to be filtered. + axis : int, optional + Axis of `x` to be filtered. Default is -1. + irlen : int or None, optional + The length of the nonnegligible part of the impulse response. + If `irlen` is None, or if the length of the signal is less than + ``2 * irlen``, then no part of the impulse response is ignored. + + Returns + ------- + y : ndarray + The filtered data. + x0 : ndarray + Initial condition for the forward filter. + x1 : ndarray + Initial condition for the backward filter. + + Notes + ----- + Typically the return values `x0` and `x1` are not needed by the + caller. The intended use of these return values is in unit tests. + + References + ---------- + .. [1] F. Gustaffson. Determining the initial states in forward-backward + filtering. Transactions on Signal Processing, 46(4):988-992, 1996. + + """ + # In the comments, "Gustafsson's paper" and [1] refer to the + # paper referenced in the docstring. + + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + order = max(len(b), len(a)) - 1 + if order == 0: + # The filter is just scalar multiplication, with no state. + scale = (b[0] / a[0])**2 + y = scale * x + return y, np.array([]), np.array([]) + + if axis != -1 or axis != x.ndim - 1: + # Move the axis containing the data to the end. + x = np.swapaxes(x, axis, x.ndim - 1) + + # n is the number of samples in the data to be filtered. + n = x.shape[-1] + + if irlen is None or n <= 2*irlen: + m = n + else: + m = irlen + + # Create Obs, the observability matrix (called O in the paper). + # This matrix can be interpreted as the operator that propagates + # an arbitrary initial state to the output, assuming the input is + # zero. + # In Gustafsson's paper, the forward and backward filters are not + # necessarily the same, so he has both O_f and O_b. We use the same + # filter in both directions, so we only need O. The same comment + # applies to S below. + Obs = np.zeros((m, order)) + zi = np.zeros(order) + zi[0] = 1 + Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0] + for k in range(1, order): + Obs[k:, k] = Obs[:-k, 0] + + # Obsr is O^R (Gustafsson's notation for row-reversed O) + Obsr = Obs[::-1] + + # Create S. S is the matrix that applies the filter to the reversed + # propagated initial conditions. That is, + # out = S.dot(zi) + # is the same as + # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. + # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. + + # Equations (5) & (6) of [1] + S = lfilter(b, a, Obs[::-1], axis=0) + + # Sr is S^R (row-reversed S) + Sr = S[::-1] + + # M is [(S^R - O), (O^R - S)] + if m == n: + M = np.hstack((Sr - Obs, Obsr - S)) + else: + # Matrix described in section IV of [1]. + M = np.zeros((2*m, 2*order)) + M[:m, :order] = Sr - Obs + M[m:, order:] = Obsr - S + + # Naive forward-backward and backward-forward filters. + # These have large transients because the filters use zero initial + # conditions. + y_f = lfilter(b, a, x) + y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] + + y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] + y_bf = lfilter(b, a, y_b) + + delta_y_bf_fb = y_bf - y_fb + if m == n: + delta = delta_y_bf_fb + else: + start_m = delta_y_bf_fb[..., :m] + end_m = delta_y_bf_fb[..., -m:] + delta = np.concatenate((start_m, end_m), axis=-1) + + # ic_opt holds the "optimal" initial conditions. + # The following code computes the result shown in the formula + # of the paper between equations (6) and (7). + if delta.ndim == 1: + ic_opt = linalg.lstsq(M, delta)[0] + else: + # Reshape delta so it can be used as an array of multiple + # right-hand-sides in linalg.lstsq. + delta2d = delta.reshape(-1, delta.shape[-1]).T + ic_opt0 = linalg.lstsq(M, delta2d)[0].T + ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) + + # Now compute the filtered signal using equation (7) of [1]. + # First, form [S^R, O^R] and call it W. + if m == n: + W = np.hstack((Sr, Obsr)) + else: + W = np.zeros((2*m, 2*order)) + W[:m, :order] = Sr + W[m:, order:] = Obsr + + # Equation (7) of [1] says + # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] + # `wic` is (almost) the product on the right. + # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), + # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, + # so wic has shape (..., m). + wic = ic_opt.dot(W.T) + + # `wic` is "almost" the product of W and the optimal ICs in equation + # (7)--if we're using a truncated impulse response (m < n), `wic` + # contains only the adjustments required for the ends of the signal. + # Here we form y_opt, taking this into account if necessary. + y_opt = y_fb + if m == n: + y_opt += wic + else: + y_opt[..., :m] += wic[..., :m] + y_opt[..., -m:] += wic[..., -m:] + + x0 = ic_opt[..., :order] + x1 = ic_opt[..., -order:] + if axis != -1 or axis != x.ndim - 1: + # Restore the data axis to its original position. + x0 = np.swapaxes(x0, axis, x.ndim - 1) + x1 = np.swapaxes(x1, axis, x.ndim - 1) + y_opt = np.swapaxes(y_opt, axis, x.ndim - 1) + + return y_opt, x0, x1 + + +def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', + irlen=None): + """ + Apply a digital filter forward and backward to a signal. + + This function applies a linear digital filter twice, once forward and + once backwards. The combined filter has zero phase and a filter order + twice that of the original. + + The function provides options for handling the edges of the signal. + + The function `sosfiltfilt` (and filter design using ``output='sos'``) + should be preferred over `filtfilt` for most filtering tasks, as + second-order sections have fewer numerical problems. + + Parameters + ---------- + b : (N,) array_like + The numerator coefficient vector of the filter. + a : (N,) array_like + The denominator coefficient vector of the filter. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is ``3 * max(len(a), len(b))``. + method : str, optional + Determines the method for handling the edges of the signal, either + "pad" or "gust". When `method` is "pad", the signal is padded; the + type of padding is determined by `padtype` and `padlen`, and `irlen` + is ignored. When `method` is "gust", Gustafsson's method is used, + and `padtype` and `padlen` are ignored. + irlen : int or None, optional + When `method` is "gust", `irlen` specifies the length of the + impulse response of the filter. If `irlen` is None, no part + of the impulse response is ignored. For a long signal, specifying + `irlen` can significantly improve the performance of the filter. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt + + Notes + ----- + When `method` is "pad", the function pads the data along the given axis + in one of three ways: odd, even or constant. The odd and even extensions + have the corresponding symmetry about the end point of the data. The + constant extension extends the data with the values at the end points. On + both the forward and backward passes, the initial condition of the + filter is found by using `lfilter_zi` and scaling it by the end point of + the extended data. + + When `method` is "gust", Gustafsson's method [1]_ is used. Initial + conditions are chosen for the forward and backward passes so that the + forward-backward filter gives the same result as the backward-forward + filter. + + The option to use Gustaffson's method was added in scipy version 0.16.0. + + References + ---------- + .. [1] F. Gustaffson, "Determining the initial states in forward-backward + filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, + 1996. + + Examples + -------- + The examples will use several functions from `scipy.signal`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + First we create a one second signal that is the sum of two pure sine + waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. + + >>> t = np.linspace(0, 1.0, 2001) + >>> xlow = np.sin(2 * np.pi * 5 * t) + >>> xhigh = np.sin(2 * np.pi * 250 * t) + >>> x = xlow + xhigh + + Now create a lowpass Butterworth filter with a cutoff of 0.125 times + the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`. + The result should be approximately ``xlow``, with no phase shift. + + >>> b, a = signal.butter(8, 0.125) + >>> y = signal.filtfilt(b, a, x, padlen=150) + >>> np.abs(y - xlow).max() + 9.1086182074789912e-06 + + We get a fairly clean result for this artificial example because + the odd extension is exact, and with the moderately long padding, + the filter's transients have dissipated by the time the actual data + is reached. In general, transient effects at the edges are + unavoidable. + + The following example demonstrates the option ``method="gust"``. + + First, create a filter. + + >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied. + + `sig` is a random input signal to be filtered. + + >>> rng = np.random.default_rng() + >>> n = 60 + >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum() + + Apply `filtfilt` to `sig`, once using the Gustafsson method, and + once using padding, and plot the results for comparison. + + >>> fgust = signal.filtfilt(b, a, sig, method="gust") + >>> fpad = signal.filtfilt(b, a, sig, padlen=50) + >>> plt.plot(sig, 'k-', label='input') + >>> plt.plot(fgust, 'b-', linewidth=4, label='gust') + >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad') + >>> plt.legend(loc='best') + >>> plt.show() + + The `irlen` argument can be used to improve the performance + of Gustafsson's method. + + Estimate the impulse response length of the filter. + + >>> z, p, k = signal.tf2zpk(b, a) + >>> eps = 1e-9 + >>> r = np.max(np.abs(p)) + >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + >>> approx_impulse_len + 137 + + Apply the filter to a longer signal, with and without the `irlen` + argument. The difference between `y1` and `y2` is small. For long + signals, using `irlen` gives a significant performance improvement. + + >>> x = rng.standard_normal(4000) + >>> y1 = signal.filtfilt(b, a, x, method='gust') + >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len) + >>> print(np.max(np.abs(y1 - y2))) + 2.875334415008979e-10 + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + x = np.asarray(x) + + if method not in ["pad", "gust"]: + raise ValueError("method must be 'pad' or 'gust'.") + + if method == "gust": + y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + return y + + # method == "pad" + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=max(len(a), len(b))) + + # Get the steady state of the filter's step response. + zi = lfilter_zi(b, a) + + # Reshape zi and create x0 so that zi*x0 broadcasts + # to the correct value for the 'zi' keyword argument + # to lfilter. + zi_shape = [1] * x.ndim + zi_shape[axis] = zi.size + zi = np.reshape(zi, zi_shape) + x0 = axis_slice(ext, stop=1, axis=axis) + + # Forward filter. + (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) + + # Backward filter. + # Create y0 so zi*y0 broadcasts appropriately. + y0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) + + # Reverse y. + y = axis_reverse(y, axis=axis) + + if edge > 0: + # Slice the actual signal from the extended signal. + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + + return y + + +def _validate_pad(padtype, padlen, x, axis, ntaps): + """Helper to validate padding for filtfilt""" + if padtype not in ['even', 'odd', 'constant', None]: + raise ValueError(f"Unknown value '{padtype}' given to padtype. " + "padtype must be 'even', 'odd', 'constant', or None.") + + if padtype is None: + padlen = 0 + + if padlen is None: + # Original padding; preserved for backwards compatibility. + edge = ntaps * 3 + else: + edge = padlen + + # x's 'axis' dimension must be bigger than edge. + if x.shape[axis] <= edge: + raise ValueError("The length of the input vector x must be greater " + "than padlen, which is %d." % edge) + + if padtype is not None and edge > 0: + # Make an extension of length `edge` at each + # end of the input array. + if padtype == 'even': + ext = even_ext(x, edge, axis=axis) + elif padtype == 'odd': + ext = odd_ext(x, edge, axis=axis) + else: + ext = const_ext(x, edge, axis=axis) + else: + ext = x + return edge, ext + + +def _validate_x(x): + x = np.asarray(x) + if x.ndim == 0: + raise ValueError('x must be at least 1-D') + return x + + +def sosfilt(sos, x, axis=-1, zi=None): + """ + Filter data along one dimension using cascaded second-order sections. + + Filter a data sequence, `x`, using a digital IIR filter defined by + `sos`. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the cascaded filter delays. It is a (at + least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where + ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` + replaced by 2. If `zi` is None or is not given then initial rest + (i.e. all zeros) is assumed. + Note that these initial conditions are *not* the same as the initial + conditions given by `lfiltic` or `lfilter_zi`. + + Returns + ------- + y : ndarray + The output of the digital filter. + zf : ndarray, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, freqz_sos + + Notes + ----- + The filter function is implemented as a series of second-order filters + with direct-form II transposed structure. It is designed to minimize + numerical precision errors for high-order filters. + + .. versionadded:: 0.16.0 + + Examples + -------- + Plot a 13th-order filter's impulse response using both `lfilter` and + `sosfilt`, showing the instability that results from trying to do a + 13th-order filter in a single stage (the numerical error pushes some poles + outside of the unit circle): + + >>> import matplotlib.pyplot as plt + >>> from scipy import signal + >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') + >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos') + >>> x = signal.unit_impulse(700) + >>> y_tf = signal.lfilter(b, a, x) + >>> y_sos = signal.sosfilt(sos, x) + >>> plt.plot(y_tf, 'r', label='TF') + >>> plt.plot(y_sos, 'k', label='SOS') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + _reject_objects(sos, 'sosfilt') + _reject_objects(x, 'sosfilt') + if zi is not None: + _reject_objects(zi, 'sosfilt') + + x = _validate_x(x) + sos, n_sections = _validate_sos(sos) + x_zi_shape = list(x.shape) + x_zi_shape[axis] = 2 + x_zi_shape = tuple([n_sections] + x_zi_shape) + inputs = [sos, x] + if zi is not None: + inputs.append(np.asarray(zi)) + dtype = np.result_type(*inputs) + if dtype.char not in 'fdgFDGO': + raise NotImplementedError(f"input type '{dtype}' not supported") + if zi is not None: + zi = np.array(zi, dtype) # make a copy so that we can operate in place + if zi.shape != x_zi_shape: + raise ValueError('Invalid zi shape. With axis=%r, an input with ' + 'shape %r, and an sos array with %d sections, zi ' + 'must have shape %r, got %r.' % + (axis, x.shape, n_sections, x_zi_shape, zi.shape)) + return_zi = True + else: + zi = np.zeros(x_zi_shape, dtype=dtype) + return_zi = False + axis = axis % x.ndim # make positive + x = np.moveaxis(x, axis, -1) + zi = np.moveaxis(zi, [0, axis + 1], [-2, -1]) + x_shape, zi_shape = x.shape, zi.shape + x = np.reshape(x, (-1, x.shape[-1])) + x = np.array(x, dtype, order='C') # make a copy, can modify in place + zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2))) + sos = sos.astype(dtype, copy=False) + _sosfilt(sos, x, zi) + x.shape = x_shape + x = np.moveaxis(x, -1, axis) + if return_zi: + zi.shape = zi_shape + zi = np.moveaxis(zi, [-2, -1], [0, axis + 1]) + out = (x, zi) + else: + out = x + return out + + +def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): + """ + A forward-backward digital filter using cascaded second-order sections. + + See `filtfilt` for more complete information about this method. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is:: + + 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), + (sos[:, 5] == 0).sum())) + + The extra subtraction at the end attempts to compensate for poles + and zeros at the origin (e.g. for odd-order filters) to yield + equivalent estimates of `padlen` to those of `filtfilt` for + second-order section filters built with `scipy.signal` functions. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + filtfilt, sosfilt, sosfilt_zi, freqz_sos + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import sosfiltfilt, butter + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Create an interesting signal to filter. + + >>> n = 201 + >>> t = np.linspace(0, 1, n) + >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n) + + Create a lowpass Butterworth filter, and use it to filter `x`. + + >>> sos = butter(4, 0.125, output='sos') + >>> y = sosfiltfilt(sos, x) + + For comparison, apply an 8th order filter using `sosfilt`. The filter + is initialized using the mean of the first four values of `x`. + + >>> from scipy.signal import sosfilt, sosfilt_zi + >>> sos8 = butter(8, 0.125, output='sos') + >>> zi = x[:4].mean() * sosfilt_zi(sos8) + >>> y2, zo = sosfilt(sos8, x, zi=zi) + + Plot the results. Note that the phase of `y` matches the input, while + `y2` has a significant phase delay. + + >>> plt.plot(t, x, alpha=0.5, label='x(t)') + >>> plt.plot(t, y, label='y(t)') + >>> plt.plot(t, y2, label='y2(t)') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.xlabel('t') + >>> plt.show() + + """ + sos, n_sections = _validate_sos(sos) + x = _validate_x(x) + + # `method` is "pad"... + ntaps = 2 * n_sections + 1 + ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=ntaps) + + # These steps follow the same form as filtfilt with modifications + zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...) + zi_shape = [1] * x.ndim + zi_shape[axis] = 2 + zi.shape = [n_sections] + zi_shape + x_0 = axis_slice(ext, stop=1, axis=axis) + (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) + y_0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) + y = axis_reverse(y, axis=axis) + if edge > 0: + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + return y + + +def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): + """ + Downsample the signal after applying an anti-aliasing filter. + + By default, an order 8 Chebyshev type I filter is used. A 30 point FIR + filter with Hamming window is used if `ftype` is 'fir'. + + Parameters + ---------- + x : array_like + The signal to be downsampled, as an N-dimensional array. + q : int + The downsampling factor. When using IIR downsampling, it is recommended + to call `decimate` multiple times for downsampling factors higher than + 13. + n : int, optional + The order of the filter (1 less than the length for 'fir'). Defaults to + 8 for 'iir' and 20 times the downsampling factor for 'fir'. + ftype : str {'iir', 'fir'} or ``dlti`` instance, optional + If 'iir' or 'fir', specifies the type of lowpass filter. If an instance + of an `dlti` object, uses that object to filter before downsampling. + axis : int, optional + The axis along which to decimate. + zero_phase : bool, optional + Prevent phase shift by filtering with `filtfilt` instead of `lfilter` + when using an IIR filter, and shifting the outputs back by the filter's + group delay when using an FIR filter. The default value of ``True`` is + recommended, since a phase shift is generally not desired. + + .. versionadded:: 0.18.0 + + Returns + ------- + y : ndarray + The down-sampled signal. + + See Also + -------- + resample : Resample up or down using the FFT method. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The ``zero_phase`` keyword was added in 0.18.0. + The possibility to use instances of ``dlti`` as ``ftype`` was added in + 0.18.0. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Define wave parameters. + + >>> wave_duration = 3 + >>> sample_rate = 100 + >>> freq = 2 + >>> q = 5 + + Calculate number of samples. + + >>> samples = wave_duration*sample_rate + >>> samples_decimated = int(samples/q) + + Create cosine wave. + + >>> x = np.linspace(0, wave_duration, samples, endpoint=False) + >>> y = np.cos(x*np.pi*freq*2) + + Decimate cosine wave. + + >>> ydem = signal.decimate(y, q) + >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False) + + Plot original and decimated waves. + + >>> plt.plot(x, y, '.-', xnew, ydem, 'o-') + >>> plt.xlabel('Time, Seconds') + >>> plt.legend(['data', 'decimated'], loc='best') + >>> plt.show() + + """ + + x = np.asarray(x) + q = operator.index(q) + + if n is not None: + n = operator.index(n) + + result_type = x.dtype + if not np.issubdtype(result_type, np.inexact) \ + or result_type.type == np.float16: + # upcast integers and float16 to float64 + result_type = np.float64 + + if ftype == 'fir': + if n is None: + half_len = 10 * q # reasonable cutoff for our sinc-like function + n = 2 * half_len + b, a = firwin(n+1, 1. / q, window='hamming'), 1. + b = np.asarray(b, dtype=result_type) + a = np.asarray(a, dtype=result_type) + elif ftype == 'iir': + iir_use_sos = True + if n is None: + n = 8 + sos = cheby1(n, 0.05, 0.8 / q, output='sos') + sos = np.asarray(sos, dtype=result_type) + elif isinstance(ftype, dlti): + system = ftype._as_zpk() + if system.poles.shape[0] == 0: + # FIR + system = ftype._as_tf() + b, a = system.num, system.den + ftype = 'fir' + elif (any(np.iscomplex(system.poles)) + or any(np.iscomplex(system.poles)) + or np.iscomplex(system.gain)): + # sosfilt & sosfiltfilt don't handle complex coeffs + iir_use_sos = False + system = ftype._as_tf() + b, a = system.num, system.den + else: + iir_use_sos = True + sos = zpk2sos(system.zeros, system.poles, system.gain) + sos = np.asarray(sos, dtype=result_type) + else: + raise ValueError('invalid ftype') + + sl = [slice(None)] * x.ndim + + if ftype == 'fir': + b = b / a + if zero_phase: + y = resample_poly(x, 1, q, axis=axis, window=b) + else: + # upfirdn is generally faster than lfilter by a factor equal to the + # downsampling factor, since it only calculates the needed outputs + n_out = x.shape[axis] // q + bool(x.shape[axis] % q) + y = upfirdn(b, x, up=1, down=q, axis=axis) + sl[axis] = slice(None, n_out, None) + + else: # IIR case + if zero_phase: + if iir_use_sos: + y = sosfiltfilt(sos, x, axis=axis) + else: + y = filtfilt(b, a, x, axis=axis) + else: + if iir_use_sos: + y = sosfilt(sos, x, axis=axis) + else: + y = lfilter(b, a, x, axis=axis) + + sl[axis] = slice(None, None, q) + + return y[tuple(sl)] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9697a8a7dda74e71dadf9522d0ba943d2946a581 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spectral_py.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spectral_py.py new file mode 100644 index 0000000000000000000000000000000000000000..5151b2b335172a485b5d13408d35290812cadc43 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spectral_py.py @@ -0,0 +1,2291 @@ +"""Tools for spectral analysis. +""" +import numpy as np +import numpy.typing as npt +from scipy import fft as sp_fft +from . import _signaltools +from .windows import get_window +from ._arraytools import const_ext, even_ext, odd_ext, zero_ext +import warnings +from typing import Literal + + +__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA'] + + +def lombscargle( + x: npt.ArrayLike, + y: npt.ArrayLike, + freqs: npt.ArrayLike, + precenter: bool = False, + normalize: bool | Literal["power", "normalize", "amplitude"] = False, + *, + weights: npt.NDArray | None = None, + floating_mean: bool = False, +) -> npt.NDArray: + """ + Compute the generalized Lomb-Scargle periodogram. + + The Lomb-Scargle periodogram was developed by Lomb [1]_ and further + extended by Scargle [2]_ to find, and test the significance of weak + periodic signals with uneven temporal sampling. The algorithm used + here is based on a weighted least-squares fit of the form + ``y(ω) = a*cos(ω*x) + b*sin(ω*x) + c``, where the fit is calculated for + each frequency independently. This algorithm was developed by Zechmeister + and Kürster which improves the Lomb-Scargle periodogram by enabling + the weighting of individual samples and calculating an unknown y offset + (also called a "floating-mean" model) [3]_. For more details, and practical + considerations, see the excellent reference on the Lomb-Scargle periodogram [4]_. + + When *normalize* is False (or "power") (default) the computed periodogram + is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic + signal with amplitude A for sufficiently large N. Where N is the length of x or y. + + When *normalize* is True (or "normalize") the computed periodogram is normalized + by the residuals of the data around a constant reference model (at zero). + + When *normalize* is "amplitude" the computed periodogram is the complex + representation of the amplitude and phase. + + Input arrays should be 1-D of a real floating data type, which are converted into + float64 arrays before processing. + + Parameters + ---------- + x : array_like + Sample times. + y : array_like + Measurement values. Values are assumed to have a baseline of ``y = 0``. If + there is a possibility of a y offset, it is recommended to set `floating_mean` + to True. + freqs : array_like + Angular frequencies (e.g., having unit rad/s=2π/s for `x` having unit s) for + output periodogram. Frequencies are normally >= 0, as any peak at ``-freq`` will + also exist at ``+freq``. + precenter : bool, optional + Pre-center measurement values by subtracting the mean, if True. This is + a legacy parameter and unnecessary if `floating_mean` is True. + normalize : bool | str, optional + Compute normalized or complex (amplitude + phase) periodogram. + Valid options are: ``False``/``"power"``, ``True``/``"normalize"``, or + ``"amplitude"``. + weights : array_like, optional + Weights for each sample. Weights must be nonnegative. + floating_mean : bool, optional + Determines a y offset for each frequency independently, if True. + Else the y offset is assumed to be `0`. + + Returns + ------- + pgram : array_like + Lomb-Scargle periodogram. + + Raises + ------ + ValueError + If any of the input arrays x, y, freqs, or weights are not 1D, or if any are + zero length. Or, if the input arrays x, y, and weights do not have the same + shape as each other. + ValueError + If any weight is < 0, or the sum of the weights is <= 0. + ValueError + If the normalize parameter is not one of the allowed options. + + See Also + -------- + periodogram: Power spectral density using a periodogram + welch: Power spectral density by Welch's method + csd: Cross spectral density by Welch's method + + Notes + ----- + The algorithm used will not automatically account for any unknown y offset, unless + floating_mean is True. Therefore, for most use cases, if there is a possibility of + a y offset, it is recommended to set floating_mean to True. If precenter is True, + it performs the operation ``y -= y.mean()``. However, precenter is a legacy + parameter, and unnecessary when floating_mean is True. Furthermore, the mean + removed by precenter does not account for sample weights, nor will it correct for + any bias due to consistently missing observations at peaks and/or troughs. When the + normalize parameter is "amplitude", for any frequency in freqs that is below + ``(2*pi)/(x.max() - x.min())``, the predicted amplitude will tend towards infinity. + The concept of a "Nyquist frequency" limit (see Nyquist-Shannon sampling theorem) + is not generally applicable to unevenly sampled data. Therefore, with unevenly + sampled data, valid frequencies in freqs can often be much higher than expected. + + References + ---------- + .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced + data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 + :doi:`10.1007/bf00648343` + + .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - + Statistical aspects of spectral analysis of unevenly spaced data", + The Astrophysical Journal, vol 263, pp. 835-853, 1982 + :doi:`10.1086/160554` + + .. [3] M. Zechmeister and M. Kürster, "The generalised Lomb-Scargle periodogram. + A new formalism for the floating-mean and Keplerian periodograms," + Astronomy and Astrophysics, vol. 496, pp. 577-584, 2009 + :doi:`10.1051/0004-6361:200811296` + + .. [4] J.T. VanderPlas, "Understanding the Lomb-Scargle Periodogram," + The Astrophysical Journal Supplement Series, vol. 236, no. 1, p. 16, + May 2018 + :doi:`10.3847/1538-4365/aab766` + + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + + First define some input parameters for the signal: + + >>> A = 2. # amplitude + >>> c = 2. # offset + >>> w0 = 1. # rad/sec + >>> nin = 150 + >>> nout = 1002 + + Randomly generate sample times: + + >>> x = rng.uniform(0, 10*np.pi, nin) + + Plot a sine wave for the selected times: + + >>> y = A * np.cos(w0*x) + c + + Define the array of frequencies for which to compute the periodogram: + + >>> w = np.linspace(0.25, 10, nout) + + Calculate Lomb-Scargle periodogram for each of the normalize options: + + >>> from scipy.signal import lombscargle + >>> pgram_power = lombscargle(x, y, w, normalize=False) + >>> pgram_norm = lombscargle(x, y, w, normalize=True) + >>> pgram_amp = lombscargle(x, y, w, normalize='amplitude') + ... + >>> pgram_power_f = lombscargle(x, y, w, normalize=False, floating_mean=True) + >>> pgram_norm_f = lombscargle(x, y, w, normalize=True, floating_mean=True) + >>> pgram_amp_f = lombscargle(x, y, w, normalize='amplitude', floating_mean=True) + + Now make a plot of the input data: + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_t, ax_p, ax_n, ax_a) = plt.subplots(4, 1, figsize=(5, 6)) + >>> ax_t.plot(x, y, 'b+') + >>> ax_t.set_xlabel('Time [s]') + >>> ax_t.set_ylabel('Amplitude') + + Then plot the periodogram for each of the normalize options, as well as with and + without floating_mean=True: + + >>> ax_p.plot(w, pgram_power, label='default') + >>> ax_p.plot(w, pgram_power_f, label='floating_mean=True') + >>> ax_p.set_xlabel('Angular frequency [rad/s]') + >>> ax_p.set_ylabel('Power') + >>> ax_p.legend(prop={'size': 7}) + ... + >>> ax_n.plot(w, pgram_norm, label='default') + >>> ax_n.plot(w, pgram_norm_f, label='floating_mean=True') + >>> ax_n.set_xlabel('Angular frequency [rad/s]') + >>> ax_n.set_ylabel('Normalized') + >>> ax_n.legend(prop={'size': 7}) + ... + >>> ax_a.plot(w, np.abs(pgram_amp), label='default') + >>> ax_a.plot(w, np.abs(pgram_amp_f), label='floating_mean=True') + >>> ax_a.set_xlabel('Angular frequency [rad/s]') + >>> ax_a.set_ylabel('Amplitude') + >>> ax_a.legend(prop={'size': 7}) + ... + >>> plt.tight_layout() + >>> plt.show() + + """ + + # if no weights are provided, assume all data points are equally important + if weights is None: + weights = np.ones_like(y, dtype=np.float64) + else: + # if provided, make sure weights is an array and cast to float64 + weights = np.asarray(weights, dtype=np.float64) + + # make sure other inputs are arrays and cast to float64 + # done before validation, in case they were not arrays + x = np.asarray(x, dtype=np.float64) + y = np.asarray(y, dtype=np.float64) + freqs = np.asarray(freqs, dtype=np.float64) + + # validate input shapes + if not (x.ndim == 1 and x.size > 0 and x.shape == y.shape == weights.shape): + raise ValueError("Parameters x, y, weights must be 1-D arrays of " + "equal non-zero length!") + if not (freqs.ndim == 1 and freqs.size > 0): + raise ValueError("Parameter freqs must be a 1-D array of non-zero length!") + + # validate weights + if not (np.all(weights >= 0) and np.sum(weights) > 0): + raise ValueError("Parameter weights must have only non-negative entries " + "which sum to a positive value!") + + # validate normalize parameter + if isinstance(normalize, bool): + # if bool, convert to str literal + normalize = "normalize" if normalize else "power" + + if normalize not in ["power", "normalize", "amplitude"]: + raise ValueError( + "Normalize must be: False (or 'power'), True (or 'normalize'), " + "or 'amplitude'." + ) + + # weight vector must sum to 1 + weights *= 1.0 / weights.sum() + + # if requested, perform precenter + if precenter: + y -= y.mean() + + # transform arrays + # row vector + freqs = freqs.reshape(1, -1) + # column vectors + x = x.reshape(-1, 1) + y = y.reshape(-1, 1) + weights = weights.reshape(-1, 1) + + # store frequent intermediates + weights_y = weights * y + freqst = freqs * x + coswt = np.cos(freqst) + sinwt = np.sin(freqst) + + Y = np.dot(weights.T, y) # Eq. 7 + CC = np.dot(weights.T, coswt * coswt) # Eq. 13 + SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq.14 + CS = np.dot(weights.T, coswt * sinwt) # Eq. 15 + + if floating_mean: + C = np.dot(weights.T, coswt) # Eq. 8 + S = np.dot(weights.T, sinwt) # Eq. 9 + CC -= C * C # Eq. 13 + SS -= S * S # Eq. 14 + CS -= C * S # Eq. 15 + + # calculate tau (phase offset to eliminate CS variable) + tau = 0.5 * np.arctan2(2.0 * CS, CC - SS) # Eq. 19 + freqst_tau = freqst - tau + + # coswt and sinwt are now offset by tau, which eliminates CS + coswt_tau = np.cos(freqst_tau) + sinwt_tau = np.sin(freqst_tau) + + YC = np.dot(weights_y.T, coswt_tau) # Eq. 11 + YS = np.dot(weights_y.T, sinwt_tau) # Eq. 12 + CC = np.dot(weights.T, coswt_tau * coswt_tau) # Eq. 13, CC range is [0.5, 1.0] + SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq. 14, SS range is [0.0, 0.5] + + if floating_mean: + C = np.dot(weights.T, coswt_tau) # Eq. 8 + S = np.dot(weights.T, sinwt_tau) # Eq. 9 + YC -= Y * C # Eq. 11 + YS -= Y * S # Eq. 12 + CC -= C * C # Eq. 13, CC range is now [0.0, 1.0] + SS -= S * S # Eq. 14, SS range is now [0.0, 0.5] + + # to prevent division by zero errors with a and b, as well as correcting for + # numerical precision errors that lead to CC or SS being approximately -0.0, + # make sure CC and SS are both > 0 + epsneg = np.finfo(dtype=y.dtype).epsneg + CC[CC < epsneg] = epsneg + SS[SS < epsneg] = epsneg + + # calculate a and b + # where: y(w) = a*cos(w) + b*sin(w) + c + a = YC / CC # Eq. A.4 and 6, eliminating CS + b = YS / SS # Eq. A.4 and 6, eliminating CS + # c = Y - a * C - b * S + + # store final value as power in A^2 (i.e., (y units)^2) + pgram = 2.0 * (a * YC + b * YS) + + # squeeze back to a vector + pgram = np.squeeze(pgram) + + if normalize == "power": # (default) + # return the legacy power units ((A**2) * N/4) + + pgram *= float(x.shape[0]) / 4.0 + + elif normalize == "normalize": + # return the normalized power (power at current frequency wrt the entire signal) + # range will be [0, 1] + + YY = np.dot(weights_y.T, y) # Eq. 10 + if floating_mean: + YY -= Y * Y # Eq. 10 + + pgram *= 0.5 / np.squeeze(YY) # Eq. 20 + + else: # normalize == "amplitude": + # return the complex representation of the best-fit amplitude and phase + + # squeeze back to vectors + a = np.squeeze(a) + b = np.squeeze(b) + tau = np.squeeze(tau) + + # calculate the complex representation, and correct for tau rotation + pgram = (a + 1j * b) * np.exp(1j * tau) + + return pgram + + +def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant', + return_onesided=True, scaling='density', axis=-1): + """ + Estimate power spectral density using a periodogram. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be equal to the length + of the axis over which the periodogram is computed. Defaults + to 'boxcar'. + nfft : int, optional + Length of the FFT used. If `None` the length of `x` will be + used. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the squared magnitude + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of `x`. + + See Also + -------- + welch: Estimate power spectral density using Welch's method + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of the power spectral density and + the magnitude (squared) spectrum. + + .. versionadded:: 0.12.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.periodogram(x, fs) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([1e-7, 1e2]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[25000:]) + 0.000985320699252543 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.ylim([1e-4, 1e1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + """ + x = np.asarray(x) + + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape) + + if window is None: + window = 'boxcar' + + if nfft is None: + nperseg = x.shape[axis] + elif nfft == x.shape[axis]: + nperseg = nfft + elif nfft > x.shape[axis]: + nperseg = x.shape[axis] + elif nfft < x.shape[axis]: + s = [np.s_[:]]*len(x.shape) + s[axis] = np.s_[:nfft] + x = x[tuple(s)] + nperseg = nfft + nfft = None + + if hasattr(window, 'size'): + if window.size != nperseg: + raise ValueError('the size of the window must be the same size ' + 'of the input on the specified axis') + + return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0, + nfft=nfft, detrend=detrend, return_onesided=return_onesided, + scaling=scaling, axis=axis) + + +def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate power spectral density using Welch's method. + + Welch's method [1]_ computes an estimate of the power spectral + density by dividing the data into overlapping segments, computing a + modified periodogram for each segment and averaging the + periodograms. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the squared magnitude + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of x. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + If `noverlap` is 0, this method is equivalent to Bartlett's method + [2]_. + + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of the power spectral density and + the (squared) magnitude spectrum. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika, vol. 37, pp. 1-16, 1950. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[256:]) + 0.0009924865443739191 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + If we now introduce a discontinuity in the signal, by increasing the + amplitude of a small portion of the signal by 50, we can see the + corruption of the mean average power spectral density, but using a + median average better estimates the normal behaviour. + + >>> x[int(N//2):int(N//2)+10] *= 50. + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median') + >>> plt.semilogy(f, Pxx_den, label='mean') + >>> plt.semilogy(f_med, Pxx_den_med, label='median') + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.legend() + >>> plt.show() + + """ + freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + return_onesided=return_onesided, scaling=scaling, + axis=axis, average=average) + + return freqs, Pxx.real + + +def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate the cross power spectral density, Pxy, using Welch's method. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross spectrum + ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are + measured in V and `fs` is measured in Hz. Defaults to 'density' + axis : int, optional + Axis along which the CSD is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. If the spectrum is + complex, the average is computed separately for the real and + imaginary parts. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxy : ndarray + Cross spectral density or cross power spectrum of x,y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. [Equivalent to + csd(x,x)] + coherence: Magnitude squared coherence by Welch's method. + + Notes + ----- + By convention, Pxy is computed with the conjugate FFT of X + multiplied by the FFT of Y. + + If the input series differ in length, the shorter series will be + zero-padded to match. + + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of a spectral density and an (amplitude) spectrum. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of + Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the magnitude of the cross spectral density. + + >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, np.abs(Pxy)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('CSD [V**2/Hz]') + >>> plt.show() + + """ + freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, + nfft, detrend, return_onesided, scaling, + axis, mode='psd') + + # Average over windows. + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == 'median': + # np.median must be passed real arrays for the desired result + bias = _median_bias(Pxy.shape[-1]) + if np.iscomplexobj(Pxy): + Pxy = (np.median(np.real(Pxy), axis=-1) + + 1j * np.median(np.imag(Pxy), axis=-1)) + else: + Pxy = np.median(Pxy, axis=-1) + Pxy /= bias + elif average == 'mean': + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError(f'average must be "median" or "mean", got {average}') + else: + Pxy = np.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, mode='psd'): + """Compute a spectrogram with consecutive Fourier transforms (legacy function). + + Spectrograms can be used as a way of visualizing the change of a + nonstationary signal's frequency content over time. + + .. legacy:: function + + :class:`ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features also including a :meth:`~ShortTimeFFT.spectrogram` method. + A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of + the :ref:`user_guide`. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + Defaults to a Tukey window with shape parameter of 0.25. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 8``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Sxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Sxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density'. + axis : int, optional + Axis along which the spectrogram is computed; the default is over + the last axis (i.e. ``axis=-1``). + mode : str, optional + Defines what kind of return values are expected. Options are + ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is + equivalent to the output of `stft` with no padding or boundary + extension. 'magnitude' returns the absolute magnitude of the + STFT. 'angle' and 'phase' return the complex angle of the STFT, + with and without unwrapping, respectively. + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Sxx : ndarray + Spectrogram of x. By default, the last axis of Sxx corresponds + to the segment times. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features, + which also includes a :meth:`~ShortTimeFFT.spectrogram` + method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. In contrast to welch's method, where the + entire data stream is averaged over, one may wish to use a smaller + overlap (or perhaps none at all) when computing a spectrogram, to + maintain some statistical independence between individual segments. + It is for this reason that the default window is a Tukey window with + 1/8th of a window's length overlap at each end. + + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fftshift + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the spectrogram. + + >>> f, t, Sxx = signal.spectrogram(x, fs) + >>> plt.pcolormesh(t, f, Sxx, shading='gouraud') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Note, if using output that is not one sided, then use the following: + + >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False) + >>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + """ + modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase'] + if mode not in modelist: + raise ValueError(f'unknown value for mode {mode}, must be one of {modelist}') + + # need to set default for nperseg before setting default for noverlap below + window, nperseg = _triage_segments(window, nperseg, + input_length=x.shape[axis]) + + # Less overlap than welch, so samples are more statistically independent + if noverlap is None: + noverlap = nperseg // 8 + + if mode == 'psd': + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='psd') + + else: + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='stft') + + if mode == 'magnitude': + Sxx = np.abs(Sxx) + elif mode in ['angle', 'phase']: + Sxx = np.angle(Sxx) + if mode == 'phase': + # Sxx has one additional dimension for time strides + if axis < 0: + axis -= 1 + Sxx = np.unwrap(Sxx, axis=axis) + + # mode =='complex' is same as `stft`, doesn't need modification + + return freqs, time, Sxx + + +def check_COLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Constant OverLap Add (COLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies COLA within `tol`, + `False` otherwise + + See Also + -------- + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, it is sufficient that the signal windowing obeys the constraint of + "Constant OverLap Add" (COLA). This ensures that every point in the input + data is equally weighted, thereby avoiding aliasing and allowing full + reconstruction. + + Some examples of windows that satisfy COLA: + - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... + - Bartlett window at overlap of 1/2, 3/4, 5/6, ... + - Hann window at 1/2, 2/3, 3/4, ... + - Any Blackman family window at 2/3 overlap + - Any window with ``noverlap = nperseg-1`` + + A very comprehensive list of other windows may be found in [2]_, + wherein the COLA condition is satisfied when the "Amplitude + Flatness" is unity. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> from scipy import signal + + Confirm COLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_COLA(signal.windows.boxcar(100), 100, 75) + True + + COLA is not true for 25% (1/4) overlap, though: + + >>> signal.check_COLA(signal.windows.boxcar(100), 100, 25) + False + + "Symmetrical" Hann window (for filter design) is not COLA: + + >>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60) + False + + "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for + overlap of 1/2, 2/3, 3/4, etc.: + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60) + True + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80) + True + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90) + True + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):] + + deviation = binsums - np.median(binsums) + return np.max(np.abs(deviation)) < tol + + +def check_NOLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies the NOLA constraint within + `tol`, `False` otherwise + + See Also + -------- + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + for all :math:`n`, where :math:`w` is the window function, :math:`t` is the + frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` - + `noverlap`). + + This ensures that the normalization factors in the denominator of the + overlap-add inversion equation are not zero. Only very pathological windows + will fail the NOLA constraint. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + Confirm NOLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75) + True + + NOLA is also true for 25% (1/4) overlap: + + >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25) + True + + "Symmetrical" Hann window (for filter design) is also NOLA: + + >>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60) + True + + As long as there is overlap, it takes quite a pathological window to fail + NOLA: + + >>> w = np.ones(64, dtype="float") + >>> w[::2] = 0 + >>> signal.check_NOLA(w, 64, 32) + False + + If there is not enough overlap, a window with zeros at the ends will not + work: + + >>> signal.check_NOLA(signal.windows.hann(64), 64, 0) + False + >>> signal.check_NOLA(signal.windows.hann(64), 64, 1) + False + >>> signal.check_NOLA(signal.windows.hann(64), 64, 2) + True + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg') + if noverlap < 0: + raise ValueError('noverlap must be a nonnegative integer') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):]**2 + + return np.min(binsums) > tol + + +def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, + detrend=False, return_onesided=True, boundary='zeros', padded=True, + axis=-1, scaling='spectrum'): + r"""Compute the Short Time Fourier Transform (legacy function). + + STFTs can be used as a way of quantifying the change of a + nonstationary signal's frequency and phase content over time. + + .. legacy:: function + + `ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features. A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of the + :ref:`user_guide`. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to 256. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. When + specified, the COLA constraint must be met (see Notes below). + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to `False`. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is + extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `True`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`, as is the + default. + axis : int, optional + Axis along which the STFT is computed; the default is over the + last axis (i.e. ``axis=-1``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + .. versionadded:: 1.9.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Zxx : ndarray + STFT of `x`. By default, the last axis of `Zxx` corresponds + to the segment times. + + See Also + -------- + istft: Inverse Short Time Fourier Transform + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + welch: Power spectral density by Welch's method. + spectrogram: Spectrogram by Welch's method. + csd: Cross spectral density by Welch's method. + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "Nonzero + OverLap Add" (NOLA), and the input signal must have complete + windowing coverage (i.e. ``(x.shape[axis] - nperseg) % + (nperseg-noverlap) == 0``). The `padded` argument may be used to + accomplish this. + + Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop + size :math:`H` = `nperseg - noverlap`, the windowed frame at time index + :math:`t` is given by + + .. math:: x_{t}[n]=x[n]w[n-tH] + + The overlap-add (OLA) reconstruction equation is given by + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + The NOLA constraint ensures that every normalization term that appears + in the denominator of the OLA reconstruction equation is nonzero. Whether a + choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can + be tested with `check_NOLA`. + + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = rng.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the STFT's magnitude. + + >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000) + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Compare the energy of the signal `x` with the energy of its STFT: + + >>> E_x = sum(x**2) / fs # Energy of x + >>> # Calculate a two-sided STFT with PSD scaling: + >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000, return_onesided=False, + ... scaling='psd') + >>> # Integrate numerically over abs(Zxx)**2: + >>> df, dt = f[1] - f[0], t[1] - t[0] + >>> E_Zxx = sum(np.sum(Zxx.real**2 + Zxx.imag**2, axis=0) * df) * dt + >>> # The energy is the same, but the numerical errors are quite large: + >>> np.isclose(E_x, E_Zxx, rtol=1e-2) + True + + """ + if scaling == 'psd': + scaling = 'density' + elif scaling != 'spectrum': + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, + nfft, detrend, return_onesided, + scaling=scaling, axis=axis, + mode='stft', boundary=boundary, + padded=padded) + + return freqs, time, Zxx + + +def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2, + scaling='spectrum'): + r"""Perform the inverse Short Time Fourier transform (legacy function). + + .. legacy:: function + + `ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features. A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of the + :ref:`user_guide`. + + Parameters + ---------- + Zxx : array_like + STFT of the signal to be reconstructed. If a purely real array + is passed, it will be cast to a complex data type. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. Must match the window used to generate the + STFT for faithful inversion. + nperseg : int, optional + Number of data points corresponding to each STFT segment. This + parameter must be specified if the number of data points per + segment is odd, or if the STFT was padded via ``nfft > + nperseg``. If `None`, the value depends on the shape of + `Zxx` and `input_onesided`. If `input_onesided` is `True`, + ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, + ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. + noverlap : int, optional + Number of points to overlap between segments. If `None`, half + of the segment length. Defaults to `None`. When specified, the + COLA constraint must be met (see Notes below), and should match + the parameter used to generate the STFT. Defaults to `None`. + nfft : int, optional + Number of FFT points corresponding to each STFT segment. This + parameter must be specified if the STFT was padded via ``nfft > + nperseg``. If `None`, the default values are the same as for + `nperseg`, detailed above, with one exception: if + `input_onesided` is True and + ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on + that value. This case allows the proper inversion of an + odd-length unpadded STFT using ``nfft=None``. Defaults to + `None`. + input_onesided : bool, optional + If `True`, interpret the input array as one-sided FFTs, such + as is returned by `stft` with ``return_onesided=True`` and + `numpy.fft.rfft`. If `False`, interpret the input as a a + two-sided FFT. Defaults to `True`. + boundary : bool, optional + Specifies whether the input signal was extended at its + boundaries by supplying a non-`None` ``boundary`` argument to + `stft`. Defaults to `True`. + time_axis : int, optional + Where the time segments of the STFT is located; the default is + the last axis (i.e. ``axis=-1``). + freq_axis : int, optional + Where the frequency axis of the STFT is located; the default is + the penultimate axis (i.e. ``axis=-2``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + Returns + ------- + t : ndarray + Array of output data times. + x : ndarray + iSTFT of `Zxx`. + + See Also + -------- + stft: Short Time Fourier Transform + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT with + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + This ensures that the normalization factors that appear in the denominator + of the overlap-add reconstruction equation + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + are not zero. The NOLA constraint can be checked with the `check_NOLA` + function. + + An STFT which has been modified (via masking or otherwise) is not + guaranteed to correspond to a exactly realizible signal. This + function implements the iSTFT via the least-squares estimation + algorithm detailed in [2]_, which produces a signal that minimizes + the mean squared error between the STFT of the returned signal and + the modified STFT. + + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by + 0.001 V**2/Hz of white noise sampled at 1024 Hz. + + >>> fs = 1024 + >>> N = 10*fs + >>> nperseg = 512 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> carrier = amp * np.sin(2*np.pi*50*time) + >>> noise = rng.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> x = carrier + noise + + Compute the STFT, and plot its magnitude + + >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg) + >>> plt.figure() + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') + >>> plt.ylim([f[1], f[-1]]) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.yscale('log') + >>> plt.show() + + Zero the components that are 10% or less of the carrier magnitude, + then convert back to a time series via inverse STFT + + >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0) + >>> _, xrec = signal.istft(Zxx, fs) + + Compare the cleaned signal with the original and true carrier signals. + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([2, 2.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + Note that the cleaned signal does not start as abruptly as the original, + since some of the coefficients of the transient were also removed: + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([0, 0.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + """ + # Make sure input is an ndarray of appropriate complex dtype + Zxx = np.asarray(Zxx) + 0j + freq_axis = int(freq_axis) + time_axis = int(time_axis) + + if Zxx.ndim < 2: + raise ValueError('Input stft must be at least 2d!') + + if freq_axis == time_axis: + raise ValueError('Must specify differing time and frequency axes!') + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2*(Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if nfft is None: + if (input_onesided) and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Rearrange axes if necessary + if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2: + # Turn negative indices to positive for the call to transpose + if freq_axis < 0: + freq_axis = Zxx.ndim + freq_axis + if time_axis < 0: + time_axis = Zxx.ndim + time_axis + zouter = list(range(Zxx.ndim)) + for ax in sorted([time_axis, freq_axis], reverse=True): + zouter.pop(ax) + Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis]) + + # Get window as array + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError(f'window must have length of {nperseg}') + + ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft + xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] + + # Initialize output and normalization arrays + outputlength = nperseg + (nseg-1)*nstep + x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype) + norm = np.zeros(outputlength, dtype=xsubs.dtype) + + if np.result_type(win, xsubs) != xsubs.dtype: + win = win.astype(xsubs.dtype) + + if scaling == 'spectrum': + xsubs *= win.sum() + elif scaling == 'psd': + xsubs *= np.sqrt(fs * sum(win**2)) + else: + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + # Construct the output from the ifft segments + # This loop could perhaps be vectorized/strided somehow... + for ii in range(nseg): + # Window the ifft + x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win + norm[..., ii*nstep:ii*nstep+nperseg] += win**2 + + # Remove extension points + if boundary: + x = x[..., nperseg//2:-(nperseg//2)] + norm = norm[..., nperseg//2:-(nperseg//2)] + + # Divide out normalization where non-tiny + if np.sum(norm > 1e-10) != len(norm): + warnings.warn( + "NOLA condition failed, STFT may not be invertible." + + (" Possibly due to missing boundary" if not boundary else ""), + stacklevel=2 + ) + x /= np.where(norm > 1e-10, norm, 1.0) + + if input_onesided: + x = x.real + + # Put axes back + if x.ndim > 1: + if time_axis != Zxx.ndim-1: + if freq_axis < time_axis: + time_axis -= 1 + x = np.moveaxis(x, -1, time_axis) + + time = np.arange(x.shape[0])/float(fs) + return time, x + + +def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', axis=-1): + r""" + Estimate the magnitude squared coherence estimate, Cxy, of + discrete-time signals X and Y using Welch's method. + + ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power + spectral density estimates of X and Y, and `Pxy` is the cross + spectral density estimate of X and Y. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + axis : int, optional + Axis along which the coherence is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Cxy : ndarray + Magnitude squared coherence of x and y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of + Signals" Prentice Hall, 2005 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the coherence. + + >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, Cxy) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Coherence') + >>> plt.show() + + """ + freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + axis=axis) + _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, + nfft=nfft, detrend=detrend, axis=axis) + _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) + + Cxy = np.abs(Pxy)**2 / Pxx / Pyy + + return freqs, Cxy + + +def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, mode='psd', boundary=None, + padded=False): + """Calculate various forms of windowed FFTs for PSD, CSD, etc. + + This is a helper function that implements the commonality between + the stft, psd, csd, and spectrogram functions. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Parameters + ---------- + x : array_like + Array or sequence containing the data to be analyzed. + y : array_like + Array or sequence containing the data to be analyzed. If this is + the same object in memory as `x` (i.e. ``_spectral_helper(x, + x, ...)``), the extra computations are spared. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross + spectrum ('spectrum') where `Pxy` has units of V**2, if `x` + and `y` are measured in V and `fs` is measured in Hz. + Defaults to 'density' + axis : int, optional + Axis along which the FFTs are computed; the default is over the + last axis (i.e. ``axis=-1``). + mode: str {'psd', 'stft'}, optional + Defines what kind of return values are expected. Defaults to + 'psd'. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + `None`. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `False`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`. + + Returns + ------- + freqs : ndarray + Array of sample frequencies. + t : ndarray + Array of times corresponding to each data segment + result : ndarray + Array of output data, contents dependent on *mode* kwarg. + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + if mode not in ['psd', 'stft']: + raise ValueError(f"Unknown value for mode {mode}, must be one of: " + "{'psd', 'stft'}") + + boundary_funcs = {'even': even_ext, + 'odd': odd_ext, + 'constant': const_ext, + 'zeros': zero_ext, + None: None} + + if boundary not in boundary_funcs: + raise ValueError(f"Unknown boundary option '{boundary}', " + f"must be one of: {list(boundary_funcs.keys())}") + + # If x and y are the same object we can save ourselves some computation. + same_data = y is x + + if not same_data and mode != 'psd': + raise ValueError("x and y must be equal if mode is 'stft'") + + axis = int(axis) + + # Ensure we have np.arrays, get outdtype + x = np.asarray(x) + if not same_data: + y = np.asarray(y) + outdtype = np.result_type(x, y, np.complex64) + else: + outdtype = np.result_type(x, np.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape + except ValueError as e: + raise ValueError('x and y cannot be broadcast together.') from e + + if same_data: + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = np.moveaxis(np.empty(outshape), -1, axis) + return emptyout, emptyout, emptyout + + if x.ndim > 1: + if axis != -1: + x = np.moveaxis(x, axis, -1) + if not same_data and y.ndim > 1: + y = np.moveaxis(y, axis, -1) + + # Check if x and y are the same length, zero-pad if necessary + if not same_data: + if x.shape[-1] != y.shape[-1]: + if x.shape[-1] < y.shape[-1]: + pad_shape = list(x.shape) + pad_shape[-1] = y.shape[-1] - x.shape[-1] + x = np.concatenate((x, np.zeros(pad_shape)), -1) + else: + pad_shape = list(y.shape) + pad_shape[-1] = x.shape[-1] - y.shape[-1] + y = np.concatenate((y, np.zeros(pad_shape)), -1) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + x = ext_func(x, nperseg//2, axis=-1) + if not same_data: + y = ext_func(y, nperseg//2, axis=-1) + + if padded: + # Pad to integer number of windowed segments + # I.e. make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) + if not same_data: + zeros_shape = list(y.shape[:-1]) + [nadd] + y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1) + + # Handle detrending and window functions + if not detrend: + def detrend_func(d): + return d + elif not hasattr(detrend, '__call__'): + def detrend_func(d): + return _signaltools.detrend(d, type=detrend, axis=-1) + elif axis != -1: + # Wrap this function so that it receives a shape that it could + # reasonably expect to receive. + def detrend_func(d): + d = np.moveaxis(d, -1, axis) + d = detrend(d) + return np.moveaxis(d, axis, -1) + else: + detrend_func = detrend + + if np.result_type(win, np.complex64) != outdtype: + win = win.astype(outdtype) + + if scaling == 'density': + scale = 1.0 / (fs * (win*win).sum()) + elif scaling == 'spectrum': + scale = 1.0 / win.sum()**2 + else: + raise ValueError(f'Unknown scaling: {scaling!r}') + + if mode == 'stft': + scale = np.sqrt(scale) + + if return_onesided: + if np.iscomplexobj(x): + sides = 'twosided' + warnings.warn('Input data is complex, switching to return_onesided=False', + stacklevel=3) + else: + sides = 'onesided' + if not same_data: + if np.iscomplexobj(y): + sides = 'twosided' + warnings.warn('Input data is complex, switching to ' + 'return_onesided=False', + stacklevel=3) + else: + sides = 'twosided' + + if sides == 'twosided': + freqs = sp_fft.fftfreq(nfft, 1/fs) + elif sides == 'onesided': + freqs = sp_fft.rfftfreq(nfft, 1/fs) + + # Perform the windowed FFTs + result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) + + if not same_data: + # All the same operations on the y data + result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft, + sides) + result = np.conjugate(result) * result_y + elif mode == 'psd': + result = np.conjugate(result) * result + + result *= scale + if sides == 'onesided' and mode == 'psd': + if nfft % 2: + result[..., 1:] *= 2 + else: + # Last point is unpaired Nyquist freq point, don't double + result[..., 1:-1] *= 2 + + time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, + nperseg - noverlap)/float(fs) + if boundary is not None: + time -= (nperseg/2) / fs + + result = result.astype(outdtype) + + # All imaginary parts are zero anyways + if same_data and mode != 'stft': + result = result.real + + # Output is going to have new last axis for time/window index, so a + # negative axis index shifts down one + if axis < 0: + axis -= 1 + + # Roll frequency axis back to axis where the data came from + result = np.moveaxis(result, -1, axis) + + return freqs, time, result + + +def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): + """ + Calculate windowed FFT, for internal use by + `scipy.signal._spectral_helper`. + + This is a helper function that does the main FFT calculation for + `_spectral helper`. All input validation is performed there, and the + data axis is assumed to be the last axis of x. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Returns + ------- + result : ndarray + Array of FFT data + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + # Created sliding window view of array + if nperseg == 1 and noverlap == 0: + result = x[..., np.newaxis] + else: + step = nperseg - noverlap + result = np.lib.stride_tricks.sliding_window_view( + x, window_shape=nperseg, axis=-1, writeable=True + ) + result = result[..., 0::step, :] + + # Detrend each data segment individually + result = detrend_func(result) + + # Apply window by multiplication + result = win * result + + # Perform the fft. Acts on last axis by default. Zero-pads automatically + if sides == 'twosided': + func = sp_fft.fft + else: + result = result.real + func = sp_fft.rfft + result = func(result, n=nfft) + + return result + + +def _triage_segments(window, nperseg, input_length): + """ + Parses window and nperseg arguments for spectrogram and _spectral_helper. + This is a helper function, not meant to be called externally. + + Parameters + ---------- + window : string, tuple, or ndarray + If window is specified by a string or tuple and nperseg is not + specified, nperseg is set to the default of 256 and returns a window of + that length. + If instead the window is array_like and nperseg is not specified, then + nperseg is set to the length of the window. A ValueError is raised if + the user supplies both an array_like window and a value for nperseg but + nperseg does not equal the length of the window. + + nperseg : int + Length of each segment + + input_length: int + Length of input signal, i.e. x.shape[-1]. Used to test for errors. + + Returns + ------- + win : ndarray + window. If function was called with string or tuple than this will hold + the actual array used as a window. + + nperseg : int + Length of each segment. If window is str or tuple, nperseg is set to + 256. If window is array_like, nperseg is set to the length of the + window. + """ + # parse window; if array like, then set nperseg = win.shape + if isinstance(window, str) or isinstance(window, tuple): + # if nperseg not specified + if nperseg is None: + nperseg = 256 # then change to default + if nperseg > input_length: + warnings.warn(f'nperseg = {nperseg:d} is greater than input length ' + f' = {input_length:d}, using nperseg = {input_length:d}', + stacklevel=3) + nperseg = input_length + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if input_length < win.shape[-1]: + raise ValueError('window is longer than input signal') + if nperseg is None: + nperseg = win.shape[0] + elif nperseg is not None: + if nperseg != win.shape[0]: + raise ValueError("value specified for nperseg is different" + " from length of window") + return win, nperseg + + +def _median_bias(n): + """ + Returns the bias of the median of a set of periodograms relative to + the mean. + + See Appendix B from [1]_ for details. + + Parameters + ---------- + n : int + Numbers of periodograms being averaged. + + Returns + ------- + bias : float + Calculated bias. + + References + ---------- + .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton. + "FINDCHIRP: an algorithm for detection of gravitational waves from + inspiraling compact binaries", Physical Review D 85, 2012, + :arxiv:`gr-qc/0509116` + """ + ii_2 = 2 * np.arange(1., (n-1) // 2 + 1) + return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c776c84deedabf62573fdee3f6b0bea1089d0092 Binary files /dev/null and b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so differ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.pyi b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c4225577db7ea188a2add225ecec1fbec855de06 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline.pyi @@ -0,0 +1,34 @@ + +import numpy as np +from numpy.typing import NDArray + +FloatingArray = NDArray[np.float32] | NDArray[np.float64] +ComplexArray = NDArray[np.complex64] | NDArray[np.complex128] +FloatingComplexArray = FloatingArray | ComplexArray + + +def symiirorder1_ic(signal: FloatingComplexArray, + c0: float, + z1: float, + precision: float) -> FloatingComplexArray: + ... + + +def symiirorder2_ic_fwd(signal: FloatingArray, + r: float, + omega: float, + precision: float) -> FloatingArray: + ... + + +def symiirorder2_ic_bwd(signal: FloatingArray, + r: float, + omega: float, + precision: float) -> FloatingArray: + ... + + +def sepfir2d(input: FloatingComplexArray, + hrow: FloatingComplexArray, + hcol: FloatingComplexArray) -> FloatingComplexArray: + ... diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline_filters.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7884c5cc5544e9fc6d37476016e7a2d0e81d57 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_spline_filters.py @@ -0,0 +1,808 @@ +from numpy import (asarray, pi, zeros_like, + array, arctan2, tan, ones, arange, floor, + r_, atleast_1d, sqrt, exp, greater, cos, add, sin, + moveaxis, abs, arctan, complex64, float32) +import numpy as np + +from scipy._lib._util import normalize_axis_index + +# From splinemodule.c +from ._spline import sepfir2d, symiirorder1_ic, symiirorder2_ic_fwd, symiirorder2_ic_bwd +from ._signaltools import lfilter, sosfilt, lfiltic +from ._arraytools import axis_slice, axis_reverse + +from scipy.interpolate import BSpline + + +__all__ = ['spline_filter', 'gauss_spline', + 'cspline1d', 'qspline1d', 'qspline2d', 'cspline2d', + 'cspline1d_eval', 'qspline1d_eval', 'symiirorder1', 'symiirorder2'] + + +def spline_filter(Iin, lmbda=5.0): + """Smoothing spline (cubic) filtering of a rank-2 array. + + Filter an input data set, `Iin`, using a (cubic) smoothing spline of + fall-off `lmbda`. + + Parameters + ---------- + Iin : array_like + input data set + lmbda : float, optional + spline smoothing fall-off value, default is `5.0`. + + Returns + ------- + res : ndarray + filtered input data + + Examples + -------- + We can filter an multi dimensional signal (ex: 2D image) using cubic + B-spline filter: + + >>> import numpy as np + >>> from scipy.signal import spline_filter + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter = spline_filter(orig_img, lmbda=0.1) + >>> f, ax = plt.subplots(1, 2, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter, "spline filter"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if Iin.dtype not in [np.float32, np.float64, np.complex64, np.complex128]: + raise TypeError(f"Invalid data type for Iin: {Iin.dtype = }") + + # XXX: note that complex-valued computations are done in single precision + # this is historic, and the root reason is unclear, + # see https://github.com/scipy/scipy/issues/9209 + # Attempting to work in complex double precision leads to symiirorder1 + # failing to converge for the boundary conditions. + intype = Iin.dtype + hcol = array([1.0, 4.0, 1.0], np.float32) / 6.0 + if intype == np.complex128: + Iin = Iin.astype(np.complex64) + + ck = cspline2d(Iin, lmbda) + out = sepfir2d(ck, hcol, hcol) + out = out.astype(intype) + return out + + +_splinefunc_cache = {} + + +def gauss_spline(x, n): + r"""Gaussian approximation to B-spline basis function of order n. + + Parameters + ---------- + x : array_like + a knot vector + n : int + The order of the spline. Must be non-negative, i.e., n >= 0 + + Returns + ------- + res : ndarray + B-spline basis function values approximated by a zero-mean Gaussian + function. + + Notes + ----- + The B-spline basis function can be approximated well by a zero-mean + Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12` + for large `n` : + + .. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma}) + + References + ---------- + .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen + F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: + Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational + Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer + Science, vol 4485. Springer, Berlin, Heidelberg + .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html + + Examples + -------- + We can calculate B-Spline basis functions approximated by a gaussian + distribution: + + >>> import numpy as np + >>> from scipy.signal import gauss_spline + >>> knots = np.array([-1.0, 0.0, -1.0]) + >>> gauss_spline(knots, 3) + array([0.15418033, 0.6909883, 0.15418033]) # may vary + + """ + x = asarray(x) + signsq = (n + 1) / 12.0 + return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq) + + +def _cubic(x): + x = asarray(x, dtype=float) + b = BSpline.basis_element([-2, -1, 0, 1, 2], extrapolate=False) + out = b(x) + out[(x < -2) | (x > 2)] = 0 + return out + + +def _quadratic(x): + x = abs(asarray(x, dtype=float)) + b = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5], extrapolate=False) + out = b(x) + out[(x < -1.5) | (x > 1.5)] = 0 + return out + + +def _coeff_smooth(lam): + xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam) + omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi)) + rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam) + rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi) + return rho, omeg + + +def _hc(k, cs, rho, omega): + return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) * + greater(k, -1)) + + +def _hs(k, cs, rho, omega): + c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / + (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4)) + gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega) + ak = abs(k) + return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak)) + + +def _cubic_smooth_coeff(signal, lamb): + rho, omega = _coeff_smooth(lamb) + cs = 1 - 2 * rho * cos(omega) + rho * rho + K = len(signal) + k = arange(K) + + zi_2 = (_hc(0, cs, rho, omega) * signal[0] + + add.reduce(_hc(k + 1, cs, rho, omega) * signal)) + zi_1 = (_hc(0, cs, rho, omega) * signal[0] + + _hc(1, cs, rho, omega) * signal[1] + + add.reduce(_hc(k + 2, cs, rho, omega) * signal)) + + # Forward filter: + # for n in range(2, K): + # yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] - + # rho * rho * yp[n - 2]) + zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2]) + zi = zi.reshape(1, -1) + + sos = r_[cs, 0, 0, 1, -2 * rho * cos(omega), rho * rho] + sos = sos.reshape(1, -1) + + yp, _ = sosfilt(sos, signal[2:], zi=zi) + yp = r_[zi_2, zi_1, yp] + + # Reverse filter: + # for n in range(K - 3, -1, -1): + # y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] - + # rho * rho * y[n + 2]) + + zi_2 = add.reduce((_hs(k, cs, rho, omega) + + _hs(k + 1, cs, rho, omega)) * signal[::-1]) + zi_1 = add.reduce((_hs(k - 1, cs, rho, omega) + + _hs(k + 2, cs, rho, omega)) * signal[::-1]) + + zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2]) + zi = zi.reshape(1, -1) + y, _ = sosfilt(sos, yp[-3::-1], zi=zi) + y = r_[y[::-1], zi_1, zi_2] + return y + + +def _cubic_coeff(signal): + zi = -2 + sqrt(3) + K = len(signal) + powers = zi ** arange(K) + + if K == 1: + yplus = signal[0] + zi * add.reduce(powers * signal) + output = zi / (zi - 1) * yplus + return atleast_1d(output) + + # Forward filter: + # yplus[0] = signal[0] + zi * add.reduce(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + + state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal))) + + b = ones(1) + a = r_[1, -zi] + yplus, _ = lfilter(b, a, signal, zi=state) + + # Reverse filter: + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + out_last = zi / (zi - 1) * yplus[K - 1] + state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last)) + + b = asarray([-zi]) + output, _ = lfilter(b, a, yplus[-2::-1], zi=state) + output = r_[output[::-1], out_last] + return output * 6.0 + + +def _quadratic_coeff(signal): + zi = -3 + 2 * sqrt(2.0) + K = len(signal) + powers = zi ** arange(K) + + if K == 1: + yplus = signal[0] + zi * add.reduce(powers * signal) + output = zi / (zi - 1) * yplus + return atleast_1d(output) + + # Forward filter: + # yplus[0] = signal[0] + zi * add.reduce(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + + state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal))) + + b = ones(1) + a = r_[1, -zi] + yplus, _ = lfilter(b, a, signal, zi=state) + + # Reverse filter: + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + out_last = zi / (zi - 1) * yplus[K - 1] + state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last)) + + b = asarray([-zi]) + output, _ = lfilter(b, a, yplus[-2::-1], zi=state) + output = r_[output[::-1], out_last] + return output * 8.0 + + +def compute_root_from_lambda(lamb): + tmp = sqrt(3 + 144 * lamb) + xi = 1 - 96 * lamb + 24 * lamb * tmp + omega = arctan(sqrt((144 * lamb - 1.0) / xi)) + tmp2 = sqrt(xi) + r = ((24 * lamb - 1 - tmp2) / (24 * lamb) * + sqrt(48*lamb + 24 * lamb * tmp) / tmp2) + return r, omega + + +def cspline1d(signal, lamb=0.0): + """ + Compute cubic spline coefficients for rank-1 array. + + Find the cubic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient, default is 0.0. + + Returns + ------- + c : ndarray + Cubic spline coefficients. + + See Also + -------- + cspline1d_eval : Evaluate a cubic spline at the new set of points. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a cubic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cspline1d, cspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = cspline1d_eval(cspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + if lamb != 0.0: + return _cubic_smooth_coeff(signal, lamb) + else: + return _cubic_coeff(signal) + + +def qspline1d(signal, lamb=0.0): + """Compute quadratic spline coefficients for rank-1 array. + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient (must be zero for now). + + Returns + ------- + c : ndarray + Quadratic spline coefficients. + + See Also + -------- + qspline1d_eval : Evaluate a quadratic spline at the new set of points. + + Notes + ----- + Find the quadratic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a quadratic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import qspline1d, qspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = qspline1d_eval(qspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + if lamb != 0.0: + raise ValueError("Smoothing quadratic splines not supported yet.") + else: + return _quadratic_coeff(signal) + + +def collapse_2d(x, axis): + x = moveaxis(x, axis, -1) + x_shape = x.shape + x = x.reshape(-1, x.shape[-1]) + if not x.flags.c_contiguous: + x = x.copy() + return x, x_shape + + +def symiirorder_nd(func, input, *args, axis=-1, **kwargs): + axis = normalize_axis_index(axis, input.ndim) + input_shape = input.shape + input_ndim = input.ndim + if input.ndim > 1: + input, input_shape = collapse_2d(input, axis) + + out = func(input, *args, **kwargs) + + if input_ndim > 1: + out = out.reshape(input_shape) + out = moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + return out + + +def qspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D quadratic (2nd order) B-spline. + + Return the second-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if precision < 0.0 or precision >= 1.0: + if signal.dtype in [float32, complex64]: + precision = 1e-3 + else: + precision = 1e-6 + + if lamb > 0: + raise ValueError('lambda must be negative or zero') + + # normal quadratic spline + r = -3 + 2 * sqrt(2.0) + c0 = -r * 8.0 + z1 = r + + out = symiirorder_nd(symiirorder1, signal, c0, z1, precision, axis=-1) + out = symiirorder_nd(symiirorder1, out, c0, z1, precision, axis=0) + return out + + +def cspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D cubic (3rd order) B-spline. + + Return the third-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if precision < 0.0 or precision >= 1.0: + if signal.dtype in [float32, complex64]: + precision = 1e-3 + else: + precision = 1e-6 + + if lamb <= 1 / 144.0: + # Normal cubic spline + r = -2 + sqrt(3.0) + out = symiirorder_nd( + symiirorder1, signal, -r * 6.0, r, precision=precision, axis=-1) + out = symiirorder_nd( + symiirorder1, out, -r * 6.0, r, precision=precision, axis=0) + return out + + r, omega = compute_root_from_lambda(lamb) + out = symiirorder_nd(symiirorder2, signal, r, omega, + precision=precision, axis=-1) + out = symiirorder_nd(symiirorder2, out, r, omega, + precision=precision, axis=0) + return out + + +def cspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a cubic spline at the new set of points. + + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + Parameters + ---------- + cj : ndarray + cublic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a cubic spline points. + + See Also + -------- + cspline1d : Compute cubic spline coefficients for rank-1 array. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a cubic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cspline1d, cspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = cspline1d_eval(cspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + newx = (asarray(newx) - x0) / float(dx) + res = zeros_like(newx, dtype=cj.dtype) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = cspline1d_eval(cj, -newx[cond1]) + res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx, dtype=cj.dtype) + jlower = floor(newx - 2).astype(int) + 1 + for i in range(4): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _cubic(newx - thisj) + res[cond3] = result + return res + + +def qspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a quadratic spline at the new set of points. + + Parameters + ---------- + cj : ndarray + Quadratic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a quadratic spline points. + + See Also + -------- + qspline1d : Compute quadratic spline coefficients for rank-1 array. + + Notes + ----- + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of:: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a quadratic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import qspline1d, qspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = qspline1d_eval(qspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + newx = (asarray(newx) - x0) / dx + res = zeros_like(newx) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = qspline1d_eval(cj, -newx[cond1]) + res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx) + jlower = floor(newx - 1.5).astype(int) + 1 + for i in range(3): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _quadratic(newx - thisj) + res[cond3] = result + return res + + +def symiirorder1(signal, c0, z1, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of first-order sections. + + The second section uses a reversed sequence. This implements a system with + the following transfer function and mirror-symmetric boundary conditions:: + + c0 + H(z) = --------------------- + (1-z1/z) (1 - z1 z) + + The resulting signal will have mirror symmetric boundary conditions + as well. + + Parameters + ---------- + signal : ndarray + The input signal. If 2D, then the filter will be applied in a batched + fashion across the last axis. + c0, z1 : scalar + Parameters in the transfer function. + precision : + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if np.abs(z1) >= 1: + raise ValueError('|z1| must be less than 1.0') + + if signal.ndim > 2: + raise ValueError('Input must be 1D or 2D') + + squeeze_dim = False + if signal.ndim == 1: + signal = signal[None, :] + squeeze_dim = True + + if np.issubdtype(signal.dtype, np.integer): + signal = signal.astype(np.promote_types(signal.dtype, np.float32)) + + y0 = symiirorder1_ic(signal, z1, precision) + + # Apply first the system 1 / (1 - z1 * z^-1) + b = np.ones(1, dtype=signal.dtype) + a = np.r_[1, -z1] + a = a.astype(signal.dtype) + + # Compute the initial state for lfilter. + zii = y0 * z1 + + y1, _ = lfilter(b, a, axis_slice(signal, 1), zi=zii) + y1 = np.c_[y0, y1] + + # Compute backward symmetric condition and apply the system + # c0 / (1 - z1 * z) + b = np.asarray([c0], dtype=signal.dtype) + out_last = -c0 / (z1 - 1.0) * axis_slice(y1, -1) + + # Compute the initial state for lfilter. + zii = out_last * z1 + + # Apply the system c0 / (1 - z1 * z) by reversing the output of the previous stage + out, _ = lfilter(b, a, axis_slice(y1, -2, step=-1), zi=zii) + out = np.c_[axis_reverse(out), out_last] + + if squeeze_dim: + out = out[0] + + return out + + +def symiirorder2(input, r, omega, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of second-order sections. + + The second section uses a reversed sequence. This implements the following + transfer function:: + + cs^2 + H(z) = --------------------------------------- + (1 - a2/z - a3/z^2) (1 - a2 z - a3 z^2 ) + + where:: + + a2 = 2 * r * cos(omega) + a3 = - r ** 2 + cs = 1 - 2 * r * cos(omega) + r ** 2 + + Parameters + ---------- + input : ndarray + The input signal. + r, omega : float + Parameters in the transfer function. + precision : float + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if r >= 1.0: + raise ValueError('r must be less than 1.0') + + if input.ndim > 2: + raise ValueError('Input must be 1D or 2D') + + if not input.flags.c_contiguous: + input = input.copy() + + squeeze_dim = False + if input.ndim == 1: + input = input[None, :] + squeeze_dim = True + + if np.issubdtype(input.dtype, np.integer): + input = input.astype(np.promote_types(input.dtype, np.float32)) + + rsq = r * r + a2 = 2 * r * np.cos(omega) + a3 = -rsq + cs = np.atleast_1d(1 - 2 * r * np.cos(omega) + rsq) + sos = np.atleast_2d(np.r_[cs, 0, 0, 1, -a2, -a3]).astype(input.dtype) + + # Find the starting (forward) conditions. + ic_fwd = symiirorder2_ic_fwd(input, r, omega, precision) + + # Apply first the system cs / (1 - a2 * z^-1 - a3 * z^-2) + # Compute the initial conditions in the form expected by sosfilt + # coef = np.asarray([[a3, a2], [0, a3]], dtype=input.dtype) + coef = np.r_[a3, a2, 0, a3].reshape(2, 2).astype(input.dtype) + zi = np.matmul(coef, ic_fwd[:, :, None])[:, :, 0] + + y_fwd, _ = sosfilt(sos, axis_slice(input, 2), zi=zi[None]) + y_fwd = np.c_[ic_fwd, y_fwd] + + # Then compute the symmetric backward starting conditions + ic_bwd = symiirorder2_ic_bwd(input, r, omega, precision) + + # Apply the system cs / (1 - a2 * z^1 - a3 * z^2) + # Compute the initial conditions in the form expected by sosfilt + zi = np.matmul(coef, ic_bwd[:, :, None])[:, :, 0] + y, _ = sosfilt(sos, axis_slice(y_fwd, -3, step=-1), zi=zi[None]) + out = np.c_[axis_reverse(y), axis_reverse(ic_bwd)] + + if squeeze_dim: + out = out[0] + + return out diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_upfirdn.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_upfirdn.py new file mode 100644 index 0000000000000000000000000000000000000000..d64cc142ff194b1404e380507289ddbaffab3359 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_upfirdn.py @@ -0,0 +1,216 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np + +from ._upfirdn_apply import _output_len, _apply, mode_enum + +__all__ = ['upfirdn', '_output_len'] + +_upfirdn_modes = [ + 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', + 'antisymmetric', 'antireflect', 'line', +] + + +def _pad_h(h, up): + """Store coefficients in a transposed, flipped arrangement. + + For example, suppose upRate is 3, and the + input number of coefficients is 10, represented as h[0], ..., h[9]. + + Then the internal buffer will look like this:: + + h[9], h[6], h[3], h[0], // flipped phase 0 coefs + 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) + 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) + + """ + h_padlen = len(h) + (-len(h) % up) + h_full = np.zeros(h_padlen, h.dtype) + h_full[:len(h)] = h + h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() + return h_full + + +def _check_mode(mode): + mode = mode.lower() + enum = mode_enum(mode) + return enum + + +class _UpFIRDn: + """Helper for resampling.""" + + def __init__(self, h, x_dtype, up, down): + h = np.asarray(h) + if h.ndim != 1 or h.size == 0: + raise ValueError('h must be 1-D with non-zero length') + self._output_type = np.result_type(h.dtype, x_dtype, np.float32) + h = np.asarray(h, self._output_type) + self._up = int(up) + self._down = int(down) + if self._up < 1 or self._down < 1: + raise ValueError('Both up and down must be >= 1') + # This both transposes, and "flips" each phase for filtering + self._h_trans_flip = _pad_h(h, self._up) + self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) + self._h_len_orig = len(h) + + def apply_filter(self, x, axis=-1, mode='constant', cval=0): + """Apply the prepared filter to the specified axis of N-D signal x.""" + output_len = _output_len(self._h_len_orig, x.shape[axis], + self._up, self._down) + # Explicit use of np.int64 for output_shape dtype avoids OverflowError + # when allocating large array on platforms where intp is 32 bits. + output_shape = np.asarray(x.shape, dtype=np.int64) + output_shape[axis] = output_len + out = np.zeros(output_shape, dtype=self._output_type, order='C') + axis = axis % x.ndim + mode = _check_mode(mode) + _apply(np.asarray(x, self._output_type), + self._h_trans_flip, out, + self._up, self._down, axis, mode, cval) + return out + + +def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): + """Upsample, FIR filter, and downsample. + + Parameters + ---------- + h : array_like + 1-D FIR (finite-impulse response) filter coefficients. + x : array_like + Input signal array. + up : int, optional + Upsampling rate. Default is 1. + down : int, optional + Downsampling rate. Default is 1. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + mode : str, optional + The signal extension mode to use. The set + ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to + modes provided by `numpy.pad`. ``"smooth"`` implements a smooth + extension by extending based on the slope of the last 2 points at each + end of the array. ``"antireflect"`` and ``"antisymmetric"`` are + anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode + `"line"` extends the signal based on a linear trend defined by the + first and last points along the ``axis``. + + .. versionadded:: 1.4.0 + cval : float, optional + The constant value to use when ``mode == "constant"``. + + .. versionadded:: 1.4.0 + + Returns + ------- + y : ndarray + The output signal array. Dimensions will be the same as `x` except + for along `axis`, which will change size according to the `h`, + `up`, and `down` parameters. + + Notes + ----- + The algorithm is an implementation of the block diagram shown on page 129 + of the Vaidyanathan text [1]_ (Figure 4.3-8d). + + The direct approach of upsampling by factor of P with zero insertion, + FIR filtering of length ``N``, and downsampling by factor of Q is + O(N*Q) per output sample. The polyphase implementation used here is + O(N/P). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, + Prentice Hall, 1993. + + Examples + -------- + Simple operations: + + >>> import numpy as np + >>> from scipy.signal import upfirdn + >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter + array([ 1., 2., 3., 2., 1.]) + >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion + array([ 1., 0., 0., 2., 0., 0., 3.]) + >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold + array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) + >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation + array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5]) + >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 + array([ 0., 3., 6., 9.]) + >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 + array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5]) + + Apply a single filter to multiple signals: + + >>> x = np.reshape(np.arange(8), (4, 2)) + >>> x + array([[0, 1], + [2, 3], + [4, 5], + [6, 7]]) + + Apply along the last dimension of ``x``: + + >>> h = [1, 1] + >>> upfirdn(h, x, 2) + array([[ 0., 0., 1., 1.], + [ 2., 2., 3., 3.], + [ 4., 4., 5., 5.], + [ 6., 6., 7., 7.]]) + + Apply along the 0th dimension of ``x``: + + >>> upfirdn(h, x, 2, axis=0) + array([[ 0., 1.], + [ 0., 1.], + [ 2., 3.], + [ 2., 3.], + [ 4., 5.], + [ 4., 5.], + [ 6., 7.], + [ 6., 7.]]) + """ + x = np.asarray(x) + ufd = _UpFIRDn(h, x.dtype, up, down) + # This is equivalent to (but faster than) using np.apply_along_axis + return ufd.apply_filter(x, axis, mode, cval) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_waveforms.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a6be46cfd38674ee8c3ae89c9762461440c1e620 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_waveforms.py @@ -0,0 +1,696 @@ +# Author: Travis Oliphant +# 2003 +# +# Feb. 2010: Updated by Warren Weckesser: +# Rewrote much of chirp() +# Added sweep_poly() +import numpy as np +from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ + exp, cos, sin, polyval, polyint + + +__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse'] + + +def sawtooth(t, width=1): + """ + Return a periodic sawtooth or triangle waveform. + + The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the + interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval + ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + Time. + width : array_like, optional + Width of the rising ramp as a proportion of the total cycle. + Default is 1, producing a rising ramp, while 0 produces a falling + ramp. `width` = 0.5 produces a triangle wave. + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the sawtooth waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500) + >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) + + """ + t, w = asarray(t), asarray(width) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # take t modulo 2*pi + tmod = mod(t, 2 * pi) + + # on the interval 0 to width*2*pi function is + # tmod / (pi*w) - 1 + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + tsub = extract(mask2, tmod) + wsub = extract(mask2, w) + place(y, mask2, tsub / (pi * wsub) - 1) + + # on the interval width*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + + mask3 = (1 - mask1) & (1 - mask2) + tsub = extract(mask3, tmod) + wsub = extract(mask3, w) + place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) + return y + + +def square(t, duty=0.5): + """ + Return a periodic square-wave waveform. + + The square wave has a period ``2*pi``, has value +1 from 0 to + ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in + the interval [0,1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + The input time array. + duty : array_like, optional + Duty cycle. Default is 0.5 (50% duty cycle). + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the square waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500, endpoint=False) + >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) + >>> plt.ylim(-2, 2) + + A pulse-width modulated sine wave: + + >>> plt.figure() + >>> sig = np.sin(2 * np.pi * t) + >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, sig) + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, pwm) + >>> plt.ylim(-1.5, 1.5) + + """ + t, w = asarray(t), asarray(duty) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # on the interval 0 to duty*2*pi function is 1 + tmod = mod(t, 2 * pi) + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + place(y, mask2, 1) + + # on the interval duty*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + mask3 = (1 - mask1) & (1 - mask2) + place(y, mask3, -1) + return y + + +def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, + retenv=False): + """ + Return a Gaussian modulated sinusoid: + + ``exp(-a t^2) exp(1j*2*pi*fc*t).`` + + If `retquad` is True, then return the real and imaginary parts + (in-phase and quadrature). + If `retenv` is True, then return the envelope (unmodulated signal). + Otherwise, return the real part of the modulated sinusoid. + + Parameters + ---------- + t : ndarray or the string 'cutoff' + Input array. + fc : float, optional + Center frequency (e.g. Hz). Default is 1000. + bw : float, optional + Fractional bandwidth in frequency domain of pulse (e.g. Hz). + Default is 0.5. + bwr : float, optional + Reference level at which fractional bandwidth is calculated (dB). + Default is -6. + tpr : float, optional + If `t` is 'cutoff', then the function returns the cutoff + time for when the pulse amplitude falls below `tpr` (in dB). + Default is -60. + retquad : bool, optional + If True, return the quadrature (imaginary) as well as the real part + of the signal. Default is False. + retenv : bool, optional + If True, return the envelope of the signal. Default is False. + + Returns + ------- + yI : ndarray + Real part of signal. Always returned. + yQ : ndarray + Imaginary part of signal. Only returned if `retquad` is True. + yenv : ndarray + Envelope of signal. Only returned if `retenv` is True. + + Examples + -------- + Plot real component, imaginary component, and envelope for a 5 Hz pulse, + sampled at 100 Hz for 2 seconds: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) + >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) + >>> plt.plot(t, i, t, q, t, e, '--') + + """ + if fc < 0: + raise ValueError(f"Center frequency (fc={fc:.2f}) must be >=0.") + if bw <= 0: + raise ValueError(f"Fractional bandwidth (bw={bw:.2f}) must be > 0.") + if bwr >= 0: + raise ValueError(f"Reference level for bandwidth (bwr={bwr:.2f}) " + "must be < 0 dB") + + # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) + + ref = pow(10.0, bwr / 20.0) + # fdel = fc*bw/2: g(fdel) = ref --- solve this for a + # + # pi^2/a * fc^2 * bw^2 /4=-log(ref) + a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) + + if isinstance(t, str): + if t == 'cutoff': # compute cut_off point + # Solve exp(-a tc**2) = tref for tc + # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) + if tpr >= 0: + raise ValueError("Reference level for time cutoff must " + "be < 0 dB") + tref = pow(10.0, tpr / 20.0) + return sqrt(-log(tref) / a) + else: + raise ValueError("If `t` is a string, it must be 'cutoff'") + + yenv = exp(-a * t * t) + yI = yenv * cos(2 * pi * fc * t) + yQ = yenv * sin(2 * pi * fc * t) + if not retquad and not retenv: + return yI + if not retquad and retenv: + return yI, yenv + if retquad and not retenv: + return yI, yQ + if retquad and retenv: + return yI, yQ, yenv + + +def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True, *, + complex=False): + r"""Frequency-swept cosine generator. + + In the following, 'Hz' should be interpreted as 'cycles per unit'; + there is no requirement here that the unit is one second. The + important distinction is that the units of rotation are cycles, not + radians. Likewise, `t` could be a measurement of space instead of time. + + Parameters + ---------- + t : array_like + Times at which to evaluate the waveform. + f0 : float + Frequency (e.g. Hz) at time t=0. + t1 : float + Time at which `f1` is specified. + f1 : float + Frequency (e.g. Hz) of the waveform at time `t1`. + method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional + Kind of frequency sweep. If not given, `linear` is assumed. See + Notes below for more details. + phi : float, optional + Phase offset, in degrees. Default is 0. + vertex_zero : bool, optional + This parameter is only used when `method` is 'quadratic'. + It determines whether the vertex of the parabola that is the graph + of the frequency is at t=0 or t=t1. + complex : bool, optional + This parameter creates a complex-valued analytic signal instead of a + real-valued signal. It allows the use of complex baseband (in communications + domain). Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + y : ndarray + A numpy array containing the signal evaluated at `t` with the requested + time-varying frequency. More precisely, the function returns + ``exp(1j*phase + 1j*(pi/180)*phi) if complex else cos(phase + (pi/180)*phi)`` + where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``. + The instantaneous frequency ``f(t)`` is defined below. + + See Also + -------- + sweep_poly + + Notes + ----- + There are four possible options for the parameter `method`, which have a (long) + standard form and some allowed abbreviations. The formulas for the instantaneous + frequency :math:`f(t)` of the generated signal are as follows: + + 1. Parameter `method` in ``('linear', 'lin', 'li')``: + + .. math:: + f(t) = f_0 + \beta\, t \quad\text{with}\quad + \beta = \frac{f_1 - f_0}{t_1} + + Frequency :math:`f(t)` varies linearly over time with a constant rate + :math:`\beta`. + + 2. Parameter `method` in ``('quadratic', 'quad', 'q')``: + + .. math:: + f(t) = + \begin{cases} + f_0 + \beta\, t^2 & \text{if vertex_zero is True,}\\ + f_1 + \beta\, (t_1 - t)^2 & \text{otherwise,} + \end{cases} + \quad\text{with}\quad + \beta = \frac{f_1 - f_0}{t_1^2} + + The graph of the frequency f(t) is a parabola through :math:`(0, f_0)` and + :math:`(t_1, f_1)`. By default, the vertex of the parabola is at + :math:`(0, f_0)`. If `vertex_zero` is ``False``, then the vertex is at + :math:`(t_1, f_1)`. + To use a more general quadratic function, or an arbitrary + polynomial, use the function `scipy.signal.sweep_poly`. + + 3. Parameter `method` in ``('logarithmic', 'log', 'lo')``: + + .. math:: + f(t) = f_0 \left(\frac{f_1}{f_0}\right)^{t/t_1} + + :math:`f_0` and :math:`f_1` must be nonzero and have the same sign. + This signal is also known as a geometric or exponential chirp. + + 4. Parameter `method` in ``('hyperbolic', 'hyp')``: + + .. math:: + f(t) = \frac{\alpha}{\beta\, t + \gamma} \quad\text{with}\quad + \alpha = f_0 f_1 t_1, \ \beta = f_0 - f_1, \ \gamma = f_1 t_1 + + :math:`f_0` and :math:`f_1` must be nonzero. + + + Examples + -------- + For the first example, a linear chirp ranging from 6 Hz to 1 Hz over 10 seconds is + plotted: + + >>> import numpy as np + >>> from matplotlib.pyplot import tight_layout + >>> from scipy.signal import chirp, square, ShortTimeFFT + >>> from scipy.signal.windows import gaussian + >>> import matplotlib.pyplot as plt + ... + >>> N, T = 1000, 0.01 # number of samples and sampling interval for 10 s signal + >>> t = np.arange(N) * T # timestamps + ... + >>> x_lin = chirp(t, f0=6, f1=1, t1=10, method='linear') + ... + >>> fg0, ax0 = plt.subplots() + >>> ax0.set_title(r"Linear Chirp from $f(0)=6\,$Hz to $f(10)=1\,$Hz") + >>> ax0.set(xlabel="Time $t$ in Seconds", ylabel=r"Amplitude $x_\text{lin}(t)$") + >>> ax0.plot(t, x_lin) + >>> plt.show() + + The following four plots each show the short-time Fourier transform of a chirp + ranging from 45 Hz to 5 Hz with different values for the parameter `method` + (and `vertex_zero`): + + >>> x_qu0 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=True) + >>> x_qu1 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=False) + >>> x_log = chirp(t, f0=45, f1=5, t1=N*T, method='logarithmic') + >>> x_hyp = chirp(t, f0=45, f1=5, t1=N*T, method='hyperbolic') + ... + >>> win = gaussian(50, std=12, sym=True) + >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T, mfft=800, scale_to='magnitude') + >>> ts = ("'quadratic', vertex_zero=True", "'quadratic', vertex_zero=False", + ... "'logarithmic'", "'hyperbolic'") + >>> fg1, ax1s = plt.subplots(2, 2, sharex='all', sharey='all', + ... figsize=(6, 5), layout="constrained") + >>> for x_, ax_, t_ in zip([x_qu0, x_qu1, x_log, x_hyp], ax1s.ravel(), ts): + ... aSx = abs(SFT.stft(x_)) + ... im_ = ax_.imshow(aSx, origin='lower', aspect='auto', extent=SFT.extent(N), + ... cmap='plasma') + ... ax_.set_title(t_) + ... if t_ == "'hyperbolic'": + ... fg1.colorbar(im_, ax=ax1s, label='Magnitude $|S_z(t,f)|$') + >>> _ = fg1.supxlabel("Time $t$ in Seconds") # `_ =` is needed to pass doctests + >>> _ = fg1.supylabel("Frequency $f$ in Hertz") + >>> plt.show() + + Finally, the short-time Fourier transform of a complex-valued linear chirp + ranging from -30 Hz to 30 Hz is depicted: + + >>> z_lin = chirp(t, f0=-30, f1=30, t1=N*T, method="linear", complex=True) + >>> SFT.fft_mode = 'centered' # needed to work with complex signals + >>> aSz = abs(SFT.stft(z_lin)) + ... + >>> fg2, ax2 = plt.subplots() + >>> ax2.set_title(r"Linear Chirp from $-30\,$Hz to $30\,$Hz") + >>> ax2.set(xlabel="Time $t$ in Seconds", ylabel="Frequency $f$ in Hertz") + >>> im2 = ax2.imshow(aSz, origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='viridis') + >>> fg2.colorbar(im2, label='Magnitude $|S_z(t,f)|$') + >>> plt.show() + + Note that using negative frequencies makes only sense with complex-valued signals. + Furthermore, the magnitude of the complex exponential function is one whereas the + magnitude of the real-valued cosine function is only 1/2. + """ + # 'phase' is computed in _chirp_phase, to make testing easier. + phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) + np.deg2rad(phi) + return np.exp(1j*phase) if complex else np.cos(phase) + + +def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): + """ + Calculate the phase used by `chirp` to generate its output. + + See `chirp` for a description of the arguments. + + """ + t = asarray(t) + f0 = float(f0) + t1 = float(t1) + f1 = float(f1) + if method in ['linear', 'lin', 'li']: + beta = (f1 - f0) / t1 + phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) + + elif method in ['quadratic', 'quad', 'q']: + beta = (f1 - f0) / (t1 ** 2) + if vertex_zero: + phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) + else: + phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) + + elif method in ['logarithmic', 'log', 'lo']: + if f0 * f1 <= 0.0: + raise ValueError("For a logarithmic chirp, f0 and f1 must be " + "nonzero and have the same sign.") + if f0 == f1: + phase = 2 * pi * f0 * t + else: + beta = t1 / log(f1 / f0) + phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) + + elif method in ['hyperbolic', 'hyp']: + if f0 == 0 or f1 == 0: + raise ValueError("For a hyperbolic chirp, f0 and f1 must be " + "nonzero.") + if f0 == f1: + # Degenerate case: constant frequency. + phase = 2 * pi * f0 * t + else: + # Singular point: the instantaneous frequency blows up + # when t == sing. + sing = -f1 * t1 / (f0 - f1) + phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) + + else: + raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', " + f"or 'hyperbolic', but a value of {method!r} was given.") + + return phase + + +def sweep_poly(t, poly, phi=0): + """ + Frequency-swept cosine generator, with a time-dependent frequency. + + This function generates a sinusoidal function whose instantaneous + frequency varies with time. The frequency at time `t` is given by + the polynomial `poly`. + + Parameters + ---------- + t : ndarray + Times at which to evaluate the waveform. + poly : 1-D array_like or instance of numpy.poly1d + The desired frequency expressed as a polynomial. If `poly` is + a list or ndarray of length n, then the elements of `poly` are + the coefficients of the polynomial, and the instantaneous + frequency is + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of numpy.poly1d, then the + instantaneous frequency is + + ``f(t) = poly(t)`` + + phi : float, optional + Phase offset, in degrees, Default: 0. + + Returns + ------- + sweep_poly : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral + (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. + + See Also + -------- + chirp + + Notes + ----- + .. versionadded:: 0.8.0 + + If `poly` is a list or ndarray of length `n`, then the elements of + `poly` are the coefficients of the polynomial, and the instantaneous + frequency is: + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of `numpy.poly1d`, then the instantaneous + frequency is: + + ``f(t) = poly(t)`` + + Finally, the output `s` is: + + ``cos(phase + (pi/180)*phi)`` + + where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, + ``f(t)`` as defined above. + + Examples + -------- + Compute the waveform with instantaneous frequency:: + + f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 + + over the interval 0 <= t <= 10. + + >>> import numpy as np + >>> from scipy.signal import sweep_poly + >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) + >>> t = np.linspace(0, 10, 5001) + >>> w = sweep_poly(t, p) + + Plot it: + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, w) + >>> plt.title("Sweep Poly\\nwith frequency " + + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, p(t), 'r', label='f(t)') + >>> plt.legend() + >>> plt.xlabel('t') + >>> plt.tight_layout() + >>> plt.show() + + """ + # 'phase' is computed in _sweep_poly_phase, to make testing easier. + phase = _sweep_poly_phase(t, poly) + # Convert to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _sweep_poly_phase(t, poly): + """ + Calculate the phase used by sweep_poly to generate its output. + + See `sweep_poly` for a description of the arguments. + + """ + # polyint handles lists, ndarrays and instances of poly1d automatically. + intpoly = polyint(poly) + phase = 2 * pi * polyval(intpoly, t) + return phase + + +def unit_impulse(shape, idx=None, dtype=float): + r""" + Unit impulse signal (discrete delta function) or unit basis vector. + + Parameters + ---------- + shape : int or tuple of int + Number of samples in the output (1-D), or a tuple that represents the + shape of the output (N-D). + idx : None or int or tuple of int or 'mid', optional + Index at which the value is 1. If None, defaults to the 0th element. + If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in + all dimensions. If an int, the impulse will be at `idx` in all + dimensions. + dtype : data-type, optional + The desired data-type for the array, e.g., ``numpy.int8``. Default is + ``numpy.float64``. + + Returns + ------- + y : ndarray + Output array containing an impulse signal. + + Notes + ----- + In digital signal processing literature the unit impulse signal is often + represented by the Kronecker delta. [1]_ I.e., a signal :math:`u_k[n]`, + which is zero everywhere except being one at the :math:`k`-th sample, + can be expressed as + + .. math:: + + u_k[n] = \delta[n-k] \equiv \delta_{n,k}\ . + + Furthermore, the unit impulse is frequently interpreted as the discrete-time + version of the continuous-time Dirac distribution. [2]_ + + References + ---------- + .. [1] "Kronecker delta", *Wikipedia*, + https://en.wikipedia.org/wiki/Kronecker_delta#Digital_signal_processing + .. [2] "Dirac delta function" *Wikipedia*, + https://en.wikipedia.org/wiki/Dirac_delta_function#Relationship_to_the_Kronecker_delta + + .. versionadded:: 0.19.0 + + Examples + -------- + An impulse at the 0th element (:math:`\\delta[n]`): + + >>> from scipy import signal + >>> signal.unit_impulse(8) + array([ 1., 0., 0., 0., 0., 0., 0., 0.]) + + Impulse offset by 2 samples (:math:`\\delta[n-2]`): + + >>> signal.unit_impulse(7, 2) + array([ 0., 0., 1., 0., 0., 0., 0.]) + + 2-dimensional impulse, centered: + + >>> signal.unit_impulse((3, 3), 'mid') + array([[ 0., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 0.]]) + + Impulse at (2, 2), using broadcasting: + + >>> signal.unit_impulse((4, 4), 2) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 1., 0.], + [ 0., 0., 0., 0.]]) + + Plot the impulse response of a 4th-order Butterworth lowpass filter: + + >>> imp = signal.unit_impulse(100, 'mid') + >>> b, a = signal.butter(4, 0.2) + >>> response = signal.lfilter(b, a, imp) + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-50, 50), imp) + >>> plt.plot(np.arange(-50, 50), response) + >>> plt.margins(0.1, 0.1) + >>> plt.xlabel('Time [samples]') + >>> plt.ylabel('Amplitude') + >>> plt.grid(True) + >>> plt.show() + + """ + out = zeros(shape, dtype) + + shape = np.atleast_1d(shape) + + if idx is None: + idx = (0,) * len(shape) + elif idx == 'mid': + idx = tuple(shape // 2) + elif not hasattr(idx, "__iter__"): + idx = (idx,) * len(shape) + + out[idx] = 1 + return out diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_wavelets.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..2b9f8fa32672e3f252f0f4ec4e387e0d474dc21e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/_wavelets.py @@ -0,0 +1,29 @@ +import numpy as np +from scipy.signal import convolve + + +def _ricker(points, a): + A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) + wsq = a**2 + vec = np.arange(0, points) - (points - 1.0) / 2 + xsq = vec**2 + mod = (1 - xsq / wsq) + gauss = np.exp(-xsq / (2 * wsq)) + total = A * mod * gauss + return total + + +def _cwt(data, wavelet, widths, dtype=None, **kwargs): + # Determine output type + if dtype is None: + if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG': + dtype = np.complex128 + else: + dtype = np.float64 + + output = np.empty((len(widths), len(data)), dtype=dtype) + for ind, width in enumerate(widths): + N = np.min([10 * width, len(data)]) + wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1]) + output[ind] = convolve(data, wavelet_data, mode='same') + return output diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/bsplines.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..0328d45c107bda78cbdbd374148237ca09ac411d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/bsplines.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'spline_filter', 'gauss_spline', + 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval', + 'cspline2d', 'sepfir2d' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="bsplines", + private_modules=["_spline_filters"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..41dc230a7f24a7ac3209f821d8d0f9417130afbd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/filter_design.py @@ -0,0 +1,28 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'freqz_sos', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', + 'gammatone', 'iircomb', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="filter_design", + private_modules=["_filter_design"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..2214b82998bdefd2c6d6171cc952adf827269736 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="fir_filter_design", + private_modules=["_fir_filter_design"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/lti_conversion.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..7080990afc9e23e51e8a45aaa146b64c58dda3cf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/lti_conversion.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete', 'tf2zpk', 'zpk2tf', 'normalize' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="lti_conversion", + private_modules=["_lti_conversion"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/ltisys.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..5123068de559f124bf444c12ef9824c3d14de64f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/ltisys.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode', + 'tf2zpk', 'zpk2tf', 'normalize', 'freqs', + 'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize', + 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="ltisys", + private_modules=["_ltisys"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/signaltools.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..85d426f5fb2605c639fc6dbd1b4d0284a3f11e1b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/signaltools.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', + 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength', + 'dlti', 'upfirdn', 'get_window', 'cheby1', 'firwin' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="signaltools", + private_modules=["_signaltools"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spectral.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..299ebed781b00a1f1f35e96c54f4c20d9bd9d0fc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spectral.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA', + 'get_window', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="spectral", + private_modules=["_spectral_py"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spline.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spline.py new file mode 100644 index 0000000000000000000000000000000000000000..7afd0d0a14beecd5bc4522050eaf3b195f1a3601 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/spline.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in the future +# versions of SciPy. Use the `scipy.signal` namespace for importing the +# functions included below. + +import warnings + +from . import _spline + +__all__ = ['sepfir2d'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name not in __all__: + raise AttributeError( + f"scipy.signal.spline is deprecated and has no attribute {name}. " + "Try looking in scipy.signal instead.") + + warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " + "the `scipy.signal.spline` namespace is deprecated.", + category=DeprecationWarning, stacklevel=2) + return getattr(_spline, name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py new file mode 100644 index 0000000000000000000000000000000000000000..d129de74e5df00c22bc0b82c7d3f7b52483941f9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py @@ -0,0 +1,122 @@ +""" +Some signal functions implemented using mpmath. +""" + +try: + import mpmath +except ImportError: + mpmath = None + + +def _prod(seq): + """Returns the product of the elements in the sequence `seq`.""" + p = 1 + for elem in seq: + p *= elem + return p + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles. + + This is simply len(p) - len(z), which must be nonnegative. + A ValueError is raised if len(p) < len(z). + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + return degree + + +def _zpkbilinear(z, p, k, fs): + """Bilinear transformation to convert a filter from analog to digital.""" + + degree = _relative_degree(z, p) + + fs2 = 2*fs + + # Bilinear transform the poles and zeros + z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] + p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z.extend([-1] * degree) + + # Compensate for gain change + numer = _prod(fs2 - z1 for z1 in z) + denom = _prod(fs2 - p1 for p1 in p) + k_z = k * numer / denom + + return z_z, p_z, k_z.real + + +def _zpklp2lp(z, p, k, wo=1): + """Transform a lowpass filter to a different cutoff frequency.""" + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = [wo * z1 for z1 in z] + p_lp = [wo * p1 for p1 in p] + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def _butter_analog_poles(n): + """ + Poles of an analog Butterworth lowpass filter. + + This is the same calculation as scipy.signal.buttap(n) or + scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, + and only the poles are returned. + """ + poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] + return poles + + +def butter_lp(n, Wn): + """ + Lowpass Butterworth digital filter design. + + This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), + but it uses mpmath, and the results are returned in lists instead of NumPy + arrays. + """ + zeros = [] + poles = _butter_analog_poles(n) + k = 1 + fs = 2 + warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) + z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) + z, p, k = _zpkbilinear(z, p, k, fs=fs) + return z, p, k + + +def zpkfreqz(z, p, k, worN=None): + """ + Frequency response of a filter in zpk format, using mpmath. + + This is the same calculation as scipy.signal.freqz, but the input is in + zpk format, the calculation is performed using mpath, and the results are + returned in lists instead of NumPy arrays. + """ + if worN is None or isinstance(worN, int): + N = worN or 512 + ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] + else: + ws = worN + + h = [] + for wk in ws: + zm1 = mpmath.exp(1j * wk) + numer = _prod([zm1 - t for t in z]) + denom = _prod([zm1 - t for t in p]) + hk = k * numer / denom + h.append(hk) + return ws, h diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..0b4008afd4728acb5b63a6626440b214f2d90f6f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py @@ -0,0 +1,417 @@ +import numpy as np +from scipy._lib._array_api import ( + assert_array_almost_equal, assert_almost_equal, xp_assert_close +) + +import pytest +from scipy.signal import cont2discrete as c2d +from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti +from scipy.signal import tf2ss, impulse, dimpulse, step, dstep + +# Author: Jeffrey Armstrong +# March 29, 2011 + + +class TestC2D: + @pytest.mark.thread_unsafe # due to Cython fused types, see cython#6506 + def test_zoh(self): + ac = np.eye(2, dtype=np.float64) + bc = np.full((2, 1), 0.5, dtype=np.float64) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.324360635350064) + # c and d in discrete should be equal to their continuous counterparts + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cc, cd) + assert_array_almost_equal(dc, dd) + assert_almost_equal(dt_requested, dt) + + def test_foh(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.420839287058789) + cd_truth = cc + dd_truth = np.array([[0.260262223725224], + [0.297442541400256], + [-0.144098411624840]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_impulse(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [0.0]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.412180317675032) + cd_truth = cc + dd_truth = np.array([[0.4375], [0.5], [0.3125]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='impulse') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_gbt(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + alpha = 1.0 / 3.0 + + ad_truth = 1.6 * np.eye(2) + bd_truth = np.full((2, 1), 0.3) + cd_truth = np.array([[0.9, 1.2], + [1.2, 1.2], + [1.2, 0.3]]) + dd_truth = np.array([[0.175], + [0.2], + [-0.205]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='gbt', alpha=alpha) + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_euler(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 1.5 * np.eye(2) + bd_truth = np.full((2, 1), 0.25) + cd_truth = np.array([[0.75, 1.0], + [1.0, 1.0], + [1.0, 0.25]]) + dd_truth = dc + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='euler') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_backward_diff(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 2.0 * np.eye(2) + bd_truth = np.full((2, 1), 0.5) + cd_truth = np.array([[1.5, 2.0], + [2.0, 2.0], + [2.0, 0.5]]) + dd_truth = np.array([[0.875], + [1.0], + [0.295]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='backward_diff') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_bilinear(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = (5.0 / 3.0) * np.eye(2) + bd_truth = np.full((2, 1), 1.0 / 3.0) + cd_truth = np.array([[1.0, 4.0 / 3.0], + [4.0 / 3.0, 4.0 / 3.0], + [4.0 / 3.0, 1.0 / 3.0]]) + dd_truth = np.array([[0.291666666666667], + [1.0 / 3.0], + [-0.121666666666667]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + # Same continuous system again, but change sampling rate + + ad_truth = 1.4 * np.eye(2) + bd_truth = np.full((2, 1), 0.2) + cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) + dd_truth = np.array([[0.175], [0.2], [-0.205]]) + + dt_requested = 1.0 / 3.0 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_transferfunction(self): + numc = np.array([0.25, 0.25, 0.5]) + denc = np.array([0.75, 0.75, 1.0]) + + numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) + dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) + + dt_requested = 0.5 + + num, den, dt = c2d((numc, denc), dt_requested, method='zoh') + + assert_array_almost_equal(numd, num) + assert_array_almost_equal(dend, den) + assert_almost_equal(dt_requested, dt) + + def test_zerospolesgain(self): + zeros_c = np.array([0.5, -0.5]) + poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k_c = 1.0 + + zeros_d = [1.23371727305860, 0.735356894461267] + polls_d = [0.938148335039729 + 0.346233593780536j, + 0.938148335039729 - 0.346233593780536j] + k_d = 1.0 + + dt_requested = 0.5 + + zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, + method='zoh') + + assert_array_almost_equal(zeros_d, zeros) + assert_array_almost_equal(polls_d, poles) + assert_almost_equal(k_d, k) + assert_almost_equal(dt_requested, dt) + + def test_gbt_with_sio_tf_and_zpk(self): + """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" + # State space coefficients for the continuous SIO system. + A = -1.0 + B = 1.0 + C = 1.0 + D = 0.5 + + # The continuous transfer function coefficients. + cnum, cden = ss2tf(A, B, C, D) + + # Continuous zpk representation + cz, cp, ck = ss2zpk(A, B, C, D) + + h = 1.0 + alpha = 0.25 + + # Explicit formulas, in the scalar case. + Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) + Bd = h * B / (1 - alpha * h * A) + Cd = C / (1 - alpha * h * A) + Dd = D + alpha * C * Bd + + # Convert the explicit solution to tf + dnum, dden = ss2tf(Ad, Bd, Cd, Dd) + + # Compute the discrete tf using cont2discrete. + c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) + + xp_assert_close(dnum, c2dnum) + xp_assert_close(dden, c2dden) + + # Convert explicit solution to zpk. + dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) + + # Compute the discrete zpk using cont2discrete. + c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) + + xp_assert_close(dz, c2dz) + xp_assert_close(dp, c2dp) + xp_assert_close(dk, c2dk) + + def test_discrete_approx(self): + """ + Test that the solution to the discrete approximation of a continuous + system actually approximates the solution to the continuous system. + This is an indirect test of the correctness of the implementation + of cont2discrete. + """ + + def u(t): + return np.sin(2.5 * t) + + a = np.array([[-0.01]]) + b = np.array([[1.0]]) + c = np.array([[1.0]]) + d = np.array([[0.2]]) + x0 = 1.0 + + t = np.linspace(0, 10.0, 101) + dt = t[1] - t[0] + u1 = u(t) + + # Use lsim to compute the solution to the continuous system. + t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0) + + # Convert the continuous system to a discrete approximation. + dsys = c2d((a, b, c, d), dt, method='bilinear') + + # Use dlsim with the pairwise averaged input to compute the output + # of the discrete system. + u2 = 0.5 * (u1[:-1] + u1[1:]) + t2 = t[:-1] + td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) + + # ymid is the average of consecutive terms of the "exact" output + # computed by lsim2. This is what the discrete approximation + # actually approximates. + ymid = 0.5 * (yout[:-1] + yout[1:]) + + xp_assert_close(yd2.ravel(), ymid, rtol=1e-4) + + def test_simo_tf(self): + # See gh-5753 + tf = ([[1, 0], [1, 1]], [1, 1]) + num, den, dt = c2d(tf, 0.01) + + assert dt == 0.01 # sanity check + xp_assert_close(den, [1, -0.990404983], rtol=1e-3) + xp_assert_close(num, [[1, -1], [1, -0.99004983]], rtol=1e-3) + + def test_multioutput(self): + ts = 0.01 # time step + + tf = ([[1, -3], [1, 5]], [1, 1]) + num, den, dt = c2d(tf, ts) + + tf1 = (tf[0][0], tf[1]) + num1, den1, dt1 = c2d(tf1, ts) + + tf2 = (tf[0][1], tf[1]) + num2, den2, dt2 = c2d(tf2, ts) + + # Sanity checks + assert dt == dt1 + assert dt == dt2 + + # Check that we get the same results + xp_assert_close(num, np.vstack((num1, num2)), rtol=1e-13) + + # Single input, so the denominator should + # not be multidimensional like the numerator + xp_assert_close(den, den1, rtol=1e-13) + xp_assert_close(den, den2, rtol=1e-13) + +class TestC2dLti: + def test_c2d_ss(self): + # StateSpace + A = np.array([[-0.3, 0.1], [0.2, -0.7]]) + B = np.array([[0], [1]]) + C = np.array([[1, 0]]) + D = 0 + + A_res = np.array([[0.985136404135682, 0.004876671474795], + [0.009753342949590, 0.965629718236502]]) + B_res = np.array([[0.000122937599964], [0.049135527547844]]) + + sys_ssc = lti(A, B, C, D) + sys_ssd = sys_ssc.to_discrete(0.05) + + xp_assert_close(sys_ssd.A, A_res) + xp_assert_close(sys_ssd.B, B_res) + xp_assert_close(sys_ssd.C, C) + xp_assert_close(sys_ssd.D, np.zeros_like(sys_ssd.D)) + + def test_c2d_tf(self): + + sys = lti([0.5, 0.3], [1.0, 0.4]) + sys = sys.to_discrete(0.005) + + # Matlab results + num_res = np.array([0.5, -0.485149004980066]) + den_res = np.array([1.0, -0.980198673306755]) + + # Somehow a lot of numerical errors + xp_assert_close(sys.den, den_res, atol=0.02) + xp_assert_close(sys.num, num_res, atol=0.02) + + +class TestC2dInvariants: + # Some test cases for checking the invariances. + # Array of triplets: (system, sample time, number of samples) + cases = [ + (tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10), + (tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10), + (tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10), + ] + + # Check that systems discretized with the impulse-invariant + # method really hold the invariant + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_impulse_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = impulse(sys, T=time) + _, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'), + n=len(time)) + xp_assert_close(sample_time * yout_cont.ravel(), yout_disc[0].ravel()) + + # Step invariant should hold for ZOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_step_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = step(sys, T=time) + _, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time)) + xp_assert_close(yout_cont.ravel(), yout_disc[0].ravel()) + + # Linear invariant should hold for FOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_linear_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont, _ = lsim(sys, T=time, U=time) + _, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time) + xp_assert_close(yout_cont.ravel(), yout_disc.ravel()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..35087d99fec5057131f0735d43e0faa33a74ef82 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py @@ -0,0 +1,221 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +''' +A unit test module for czt.py +''' +import pytest +from scipy._lib._array_api import xp_assert_close +from scipy.fft import fft +from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT) +import numpy as np + + +def check_czt(x): + # Check that czt is the equivalent of normal fft + y = fft(x) + y1 = czt(x) + xp_assert_close(y1, y, rtol=1e-13) + + # Check that interpolated czt is the equivalent of normal fft + y = fft(x, 100*len(x)) + y1 = czt(x, 100*len(x)) + xp_assert_close(y1, y, rtol=1e-12) + + +def check_zoom_fft(x): + # Check that zoom_fft is the equivalent of normal fft + y = fft(x) + y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, [0, 2]) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + + # Test fn scalar + y1 = zoom_fft(x, 2-2./len(y), endpoint=True) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, 2) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + + # Check that zoom_fft with oversampling is equivalent to zero padding + over = 10 + yover = fft(x, over*len(x)) + y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True) + xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10) + y2 = zoom_fft(x, [0, 2], m=len(yover)) + xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10) + + # Check that zoom_fft works on a subrange + w = np.linspace(0, 2-2./len(x), len(x)) + f1, f2 = w[3], w[6] + y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True) + idx3 = slice(3*over, 6*over+1) + xp_assert_close(y3, yover[idx3], rtol=1e-13) + + +def test_1D(): + # Test of 1D version of the transforms + + rng = np.random.RandomState(0) # Deterministic randomness + + # Random signals + lengths = rng.randint(8, 200, 20) + np.append(lengths, 1) + for length in lengths: + x = rng.random(length) + check_zoom_fft(x) + check_czt(x) + + # Gauss + t = np.linspace(-2, 2, 128) + x = np.exp(-t**2/0.01) + check_zoom_fft(x) + + # Linear + x = [1, 2, 3, 4, 5, 6, 7] + check_zoom_fft(x) + + # Check near powers of two + check_zoom_fft(range(126-31)) + check_zoom_fft(range(127-31)) + check_zoom_fft(range(128-31)) + check_zoom_fft(range(129-31)) + check_zoom_fft(range(130-31)) + + # Check transform on n-D array input + x = np.reshape(np.arange(3*2*28), (3, 2, 28)) + y1 = zoom_fft(x, [0, 2-2./28]) + y2 = zoom_fft(x[2, 0, :], [0, 2-2./28]) + xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + y1 = zoom_fft(x, [0, 2], endpoint=False) + y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False) + xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + # Random (not a test condition) + x = rng.rand(101) + check_zoom_fft(x) + + # Spikes + t = np.linspace(0, 1, 128) + x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13) + check_zoom_fft(x) + + # Sines + x = np.zeros(100, dtype=complex) + x[[1, 5, 21]] = 1 + check_zoom_fft(x) + + # Sines plus complex component + x += 1j*np.linspace(0, 0.5, x.shape[0]) + check_zoom_fft(x) + + +def test_large_prime_lengths(): + rng = np.random.RandomState(0) # Deterministic randomness + for N in (101, 1009, 10007): + x = rng.rand(N) + y = fft(x) + y1 = czt(x) + xp_assert_close(y, y1, rtol=1e-12) + + +@pytest.mark.slow +def test_czt_vs_fft(): + rng = np.random.RandomState(123) # Deterministic randomness + random_lengths = rng.exponential(100000, size=10).astype('int') + for n in random_lengths: + a = rng.randn(n) + xp_assert_close(czt(a), fft(a), rtol=1e-11) + + +def test_empty_input(): + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([]) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([], 0.5) + + +def test_0_rank_input(): + with pytest.raises(IndexError, match='tuple index out of range'): + czt(5) + with pytest.raises(IndexError, match='tuple index out of range'): + zoom_fft(5, 0.5) + + +@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0], + np.concatenate((np.array([0, 0, 1]), + np.zeros(100))))) +@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021)) +@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1)) +# Step that tests away from the unit circle, but not so far it explodes from +# numerical error +@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j)) +def test_czt_math(impulse, m, w, a): + # z-transform of an impulse is 1 everywhere + xp_assert_close(czt(impulse[2:], m=m, w=w, a=a), + np.ones(m, dtype=np.complex128), rtol=1e-10) + + # z-transform of a delayed impulse is z**-1 + xp_assert_close(czt(impulse[1:], m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-1, rtol=1e-10) + + # z-transform of a 2-delayed impulse is z**-2 + xp_assert_close(czt(impulse, m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-2, rtol=1e-10) + + +def test_int_args(): + # Integer argument `a` was producing all 0s + xp_assert_close(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15) + xp_assert_close(czt_points(11, w=2), + 1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30) + + +def test_czt_points(): + for N in (1, 2, 3, 8, 11, 100, 101, 10007): + xp_assert_close(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N), + rtol=1e-30) + + xp_assert_close(czt_points(7, w=1), np.ones(7, dtype=np.complex128), rtol=1e-30) + xp_assert_close(czt_points(11, w=2.), + 1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30) + + func = CZT(12, m=11, w=2., a=1) + xp_assert_close(func.points(), 1/(2**np.arange(11)), rtol=1e-30) + + +@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))]) +def test_CZT_size_mismatch(cls, args): + # Data size doesn't match function's expected size + myfunc = cls(*args) + with pytest.raises(ValueError, match='CZT defined for'): + myfunc(np.arange(5)) + + +def test_invalid_range(): + with pytest.raises(ValueError, match='2-length sequence'): + ZoomFFT(100, [1, 2, 3]) + + +@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0]) +def test_czt_points_errors(m): + # Invalid number of points + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt_points(m) + + +@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0]) +def test_nonsense_size(size): + # Numpy and Scipy fft() give ValueError for 0 output size, so we do, too + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(size, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(size, 0.2, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(3, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(3, 0.2, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([1, 2, 3], size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([1, 2, 3], 0.2, size) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..62613b5bb64ec2980dc8a306e7a2a997b5713d2c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py @@ -0,0 +1,4485 @@ +import warnings + +from scipy._lib import _pep440 +import numpy as np +from numpy.testing import ( + assert_array_almost_equal_nulp, assert_warns, suppress_warnings +) +import pytest +from pytest import raises as assert_raises +from scipy._lib._array_api import ( + xp_assert_close, xp_assert_equal, + assert_array_almost_equal, +) + +from numpy import array, spacing, sin, pi, sort, sqrt +from scipy.signal import (argrelextrema, BadCoefficients, bessel, besselap, bilinear, + buttap, butter, buttord, cheb1ap, cheb1ord, cheb2ap, + cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord, + firwin, freqs_zpk, freqs, freqz, freqz_zpk, + gammatone, group_delay, iircomb, iirdesign, iirfilter, + iirnotch, iirpeak, lp2bp, lp2bs, lp2hp, lp2lp, normalize, + medfilt, order_filter, + sos2tf, sos2zpk, sosfreqz, freqz_sos, tf2sos, tf2zpk, zpk2sos, + zpk2tf, bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, + lp2bs_zpk) +from scipy.signal._filter_design import (_cplxreal, _cplxpair, _norm_factor, + _bessel_poly, _bessel_zeros) + +try: + import mpmath +except ImportError: + mpmath = None + + +def mpmath_check(min_ver): + return pytest.mark.skipif( + mpmath is None + or _pep440.parse(mpmath.__version__) < _pep440.Version(min_ver), + reason=f"mpmath version >= {min_ver} required", + ) + + +class TestCplxPair: + + def test_trivial_input(self): + assert _cplxpair([]).size == 0 + assert _cplxpair(1) == 1 + + def test_output_order(self): + xp_assert_close(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j]) + + a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2] + b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2] + xp_assert_close(_cplxpair(a), b) + + # points spaced around the unit circle + z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7) + z1 = np.copy(z) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + + # Should be able to pair up all the conjugates + x = np.random.rand(10000) + 1j * np.random.rand(10000) + y = x.conj() + z = np.random.rand(10000) + x = np.concatenate((x, y, z)) + np.random.shuffle(x) + c = _cplxpair(x) + + # Every other element of head should be conjugates: + xp_assert_close(c[0:20000:2], np.conj(c[1:20000:2])) + # Real parts of head should be in sorted order: + xp_assert_close(c[0:20000:2].real, np.sort(c[0:20000:2].real)) + # Tail should be sorted real numbers: + xp_assert_close(c[20000:], np.sort(c[20000:])) + + def test_real_integer_input(self): + xp_assert_equal(_cplxpair([2, 0, 1]), [0, 1, 2]) + + def test_tolerances(self): + eps = spacing(1) + xp_assert_close(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps), + [-1j, 1j, 1+1j*eps]) + + # sorting close to 0 + xp_assert_close(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j]) + xp_assert_close(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j]) + xp_assert_close(_cplxpair([+1j, -1j]), [-1j, +1j]) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j]) + + # Not conjugates + assert_raises(ValueError, _cplxpair, [4+5j, 4+5j]) + assert_raises(ValueError, _cplxpair, [1-7j, 1-7j]) + + # No pairs + assert_raises(ValueError, _cplxpair, [1+3j]) + assert_raises(ValueError, _cplxpair, [1-3j]) + + +class TestCplxReal: + + def test_trivial_input(self): + assert all(x.size == 0 for x in _cplxreal([])) + + x = _cplxreal(1) + assert x[0].size == 0 + xp_assert_equal(x[1], np.asarray([1])) + + + def test_output_order(self): + zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1]))) + xp_assert_close(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1]) + + eps = spacing(1) + + a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j, + 1, 4, 2, 3, 0, 0, + 2+3j, 2-3j, + 1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order + 3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j, + 2-3j, 2+3j] + zc, zr = _cplxreal(a) + xp_assert_close(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j, + 3+1j]) + xp_assert_close(zr, [0.0, 0, 1, 2, 3, 4]) + + z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j, + 0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j, + 4+eps-2j, 4-1j, 4-eps+2j]) + + zc, zr = _cplxreal(z) + xp_assert_close(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j, + 4+2j]) + xp_assert_equal(zr, np.asarray([])) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j]) + + # No pairs + assert_raises(ValueError, _cplxreal, [1+3j]) + assert_raises(ValueError, _cplxreal, [1-3j]) + + def test_real_integer_input(self): + zc, zr = _cplxreal([2, 0, 1, 4]) + xp_assert_equal(zc, []) + xp_assert_equal(zr, [0, 1, 2, 4]) + + +class TestTf2zpk: + + @pytest.mark.parametrize('dt', (np.float64, np.complex128)) + def test_simple(self, dt): + z_r = np.array([0.5, -0.5]) + p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + # Sort the zeros/poles so that we don't fail the test if the order + # changes + z_r.sort() + p_r.sort() + b = np.poly(z_r).astype(dt) + a = np.poly(p_r).astype(dt) + + z, p, k = tf2zpk(b, a) + z.sort() + # The real part of `p` is ~0.0, so sort by imaginary part + p = p[np.argsort(p.imag)] + + assert_array_almost_equal(z, z_r) + assert_array_almost_equal(p, p_r) + assert_array_almost_equal(k, 1.) + assert k.dtype == dt + + def test_bad_filter(self): + # Regression test for #651: better handling of badly conditioned + # filter coefficients. + with suppress_warnings(): + warnings.simplefilter("error", BadCoefficients) + assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0]) + + +class TestZpk2Tf: + + def test_identity(self): + """Test the identity transfer function.""" + z = [] + p = [] + k = 1. + b, a = zpk2tf(z, p, k) + b_r = np.array([1.]) # desired result + a_r = np.array([1.]) # desired result + # The test for the *type* of the return values is a regression + # test for ticket #1095. In the case p=[], zpk2tf used to + # return the scalar 1.0 instead of array([1.0]). + xp_assert_equal(b, b_r) + assert isinstance(b, np.ndarray) + xp_assert_equal(a, a_r) + assert isinstance(a, np.ndarray) + + +class TestSos2Zpk: + + def test_basic(self): + sos = [[1, 0, 1, 1, 0, -0.81], + [1, 0, 0, 1, 0, +0.49]] + z, p, k = sos2zpk(sos) + z2 = [1j, -1j, 0, 0] + p2 = [0.9, -0.9, 0.7j, -0.7j] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + assert_array_almost_equal(k, k2) + + sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873], + [1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873], + [1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]] + z, p, k = sos2zpk(sos) + z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + 0.8090 - 0.5878j, -1.0000 + 0.0000j, 0] + p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + 0.7922 - 0.5755j, -0.9791 + 0.0000j, 0] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + + sos = array([[1, 2, 3, 1, 0.2, 0.3], + [4, 5, 6, 1, 0.4, 0.5]]) + z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j, + -0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j]) + p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j, + -0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j]) + k = 4 + z2, p2, k2 = sos2zpk(sos) + xp_assert_close(_cplxpair(z2), z) + xp_assert_close(_cplxpair(p2), p) + assert k2 == k + + @pytest.mark.thread_unsafe + def test_fewer_zeros(self): + """Test not the expected number of p/z (effectively at origin).""" + sos = butter(3, 0.1, output='sos') + z, p, k = sos2zpk(sos) + assert len(z) == 4 + assert len(p) == 4 + + sos = butter(12, [5., 30.], 'bandpass', fs=1200., analog=False, + output='sos') + with pytest.warns(BadCoefficients, match='Badly conditioned'): + z, p, k = sos2zpk(sos) + assert len(z) == 24 + assert len(p) == 24 + + +class TestSos2Tf: + + def test_basic(self): + sos = [[1, 1, 1, 1, 0, -1], + [-2, 3, 1, 1, 10, 1]] + b, a = sos2tf(sos) + assert_array_almost_equal(b, [-2, 1, 2, 4, 1]) + assert_array_almost_equal(a, [1, 10, 0, -10, -1]) + + +class TestTf2Sos: + + def test_basic(self): + num = [2, 16, 44, 56, 32] + den = [3, 3, -15, 18, -12] + sos = tf2sos(num, den) + sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000], + [1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]] + assert_array_almost_equal(sos, sos2, decimal=4) + + b = [1, -3, 11, -27, 18] + a = [16, 12, 2, -4, -1] + sos = tf2sos(b, a) + sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250], + [1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]] + # assert_array_almost_equal(sos, sos2, decimal=4) + + @pytest.mark.parametrize('b, a, analog, sos', + [([1], [1], False, [[1., 0., 0., 1., 0., 0.]]), + ([1], [1], True, [[0., 0., 1., 0., 0., 1.]]), + ([1], [1., 0., -1.01, 0, 0.01], False, + [[1., 0., 0., 1., 0., -0.01], + [1., 0., 0., 1., 0., -1]]), + ([1], [1., 0., -1.01, 0, 0.01], True, + [[0., 0., 1., 1., 0., -1], + [0., 0., 1., 1., 0., -0.01]])]) + def test_analog(self, b, a, analog, sos): + sos2 = tf2sos(b, a, analog=analog) + assert_array_almost_equal(sos, sos2, decimal=4) + + +class TestZpk2Sos: + + @pytest.mark.parametrize('dt', 'fdgFDG') + @pytest.mark.parametrize('pairing, analog', + [('nearest', False), + ('keep_odd', False), + ('minimal', False), + ('minimal', True)]) + def test_dtypes(self, dt, pairing, analog): + z = np.array([-1, -1]).astype(dt) + ct = dt.upper() # the poles have to be complex + p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j]).astype(ct) + k = np.array(1).astype(dt) + sos = zpk2sos(z, p, k, pairing=pairing, analog=analog) + sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + def test_basic(self): + for pairing in ('nearest', 'keep_odd'): + # + # Cases that match octave + # + + z = [-1, -1] + p = [0.57149 + 0.29360j, 0.57149 - 0.29360j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1j, -1j] + p = [0.9, -0.9, 0.7j, -0.7j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 1, 1, 0, +0.49], + [1, 0, 0, 1, 0, -0.81]] # octave + # sos2 = [[0, 0, 1, 1, -0.9, 0], + # [1, 0, 1, 1, 0.9, 0]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [] + p = [0.8, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., 1., 0.3125], + [1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1., 1., 0.9j, -0.9j] + p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 0.81, 1, -0.2, 0.82], + [1, -2, 1, 1, -1.98, 0.9802]] # octave + # sos2 = [[1, -2, 1, 1, -0.2, 0.82], + # [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [0.9+0.1j, 0.9-0.1j, -0.9] + p = [0.75+0.25j, 0.75-0.25j, 0.9] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + if pairing == 'keep_odd': + sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625], + [1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + else: # pairing == 'nearest' + sos2 = [[1, 0.9, 0, 1, -1.5, 0.625], + [1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm + assert_array_almost_equal(sos, sos2, decimal=4) + + # + # Cases that differ from octave: + # + + z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + +0.8090 - 0.5878j, -1.0000 + 0.0000j] + p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + +0.7922 - 0.5755j, -0.9791 + 0.0000j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + # sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870], + # [1, -1.618, 1, 1, -1.5844, 0.95878], + # [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails + sos2 = [[1, 1, 0, 1, +0.97915, 0], + [1, 0.61803, 1, 1, +0.60515, 0.95873], + [1, -1.61803, 1, 1, -1.58430, 0.95873]] + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [-1 - 1.4142j, -1 + 1.4142j, + -0.625 - 1.0533j, -0.625 + 1.0533j] + p = [-0.2 - 0.6782j, -0.2 + 0.6782j, + -0.1 - 0.5385j, -0.1 + 0.5385j] + k = 4 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[4, 8, 12, 1, 0.2, 0.3], + [1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB + # sos2 = [[4, 8, 12, 1, 0.4, 0.5], + # [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave + xp_assert_close(sos, sos2, rtol=1e-4, atol=1e-4) + + z = [] + p = [0.2, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., -0.2, 0.], + [1., 0., 0., 1., 1., 0.3125]] + # sos2 = [[1., 0., 0., 1., 1., 0.3125], + # [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + # The next two examples are adapted from Leland B. Jackson, + # "Digital Filters and Signal Processing (1995) p.400: + # http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false + + deg2rad = np.pi / 180. + k = 1. + + # first example + thetas = [22.5, 45, 77.5] + mags = [0.8, 0.6, 0.9] + z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas]) + z = np.concatenate((z, np.conj(z))) + p = np.array([mag * np.exp(theta * deg2rad * 1j) + for theta, mag in zip(thetas, mags)]) + p = np.concatenate((p, np.conj(p))) + sos = zpk2sos(z, p, k) + # sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave, + # [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, -1.84776, 1, 1, -1.47821, 0.64]] + # Note that pole-zero pairing matches, but ordering is different + sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36], + [1, -1.84776, 1, 1, -1.47821, 0.64], + [1, -0.43288, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + # second example + z = np.array([np.exp(theta * deg2rad * 1j) + for theta in (85., 10.)]) + z = np.concatenate((z, np.conj(z), [1, -1])) + sos = zpk2sos(z, p, k) + + # sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong", + # [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, 0, -1, 1, -1.47821, 0.64000]] + # Our pole-zero pairing matches the text, Octave does not + sos2 = [[1, 0, -1, 1, -0.84853, 0.36], + [1, -1.96962, 1, 1, -1.47821, 0.64], + [1, -0.17431, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + # these examples are taken from the doc string, and show the + # effect of the 'pairing' argument + @pytest.mark.parametrize('pairing, sos', + [('nearest', + np.array([[1., 1., 0.5, 1., -0.75, 0.], + [1., 1., 0., 1., -1.6, 0.65]])), + ('keep_odd', + np.array([[1., 1., 0, 1., -0.75, 0.], + [1., 1., 0.5, 1., -1.6, 0.65]])), + ('minimal', + np.array([[0., 1., 1., 0., 1., -0.75], + [1., 1., 0.5, 1., -1.6, 0.65]]))]) + def test_pairing(self, pairing, sos): + z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) + p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) + sos2 = zpk2sos(z1, p1, 1, pairing=pairing) + assert_array_almost_equal(sos, sos2, decimal=4) + + @pytest.mark.parametrize('p, sos_dt', + [([-1, 1, -0.1, 0.1], + [[0., 0., 1., 1., 0., -0.01], + [0., 0., 1., 1., 0., -1]]), + ([-0.7071+0.7071j, -0.7071-0.7071j, -0.1j, 0.1j], + [[0., 0., 1., 1., 0., 0.01], + [0., 0., 1., 1., 1.4142, 1.]])]) + def test_analog(self, p, sos_dt): + # test `analog` argument + # for discrete time, poles closest to unit circle should appear last + # for cont. time, poles closest to imaginary axis should appear last + sos2_dt = zpk2sos([], p, 1, pairing='minimal', analog=False) + sos2_ct = zpk2sos([], p, 1, pairing='minimal', analog=True) + assert_array_almost_equal(sos_dt, sos2_dt, decimal=4) + assert_array_almost_equal(sos_dt[::-1], sos2_ct, decimal=4) + + def test_bad_args(self): + with pytest.raises(ValueError, match=r'pairing must be one of'): + zpk2sos([1], [2], 1, pairing='no_such_pairing') + + with pytest.raises(ValueError, match=r'.*pairing must be "minimal"'): + zpk2sos([1], [2], 1, pairing='keep_odd', analog=True) + + with pytest.raises(ValueError, + match=r'.*must have len\(p\)>=len\(z\)'): + zpk2sos([1, 1], [2], 1, analog=True) + + with pytest.raises(ValueError, match=r'k must be real'): + zpk2sos([1], [2], k=1j) + + +class TestFreqs: + + def test_basic(self): + _, h = freqs([1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + num = [1] + den = [1, 1] + w, H = freqs(num, den, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + num = [1] + den = [1, 1] + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs(num, den, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqs([1.0], [1.0], worN=8, plot=plot) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs([1.0], [1.0]) + w2, h2 = freqs([1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs([1.0], [1.0], worN=N) + assert len(w) == 8 + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs([1.0], [1.0], worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqs_zpk: + + def test_basic(self): + _, h = freqs_zpk([1.0], [1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + z = [] + p = [-1] + k = 1 + w, H = freqs_zpk(z, p, k, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + z = [] + p = [-1] + k = 1 + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs_zpk(z, p, k, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_vs_freqs(self): + b, a = cheby1(4, 5, 100, analog=True, output='ba') + z, p, k = cheby1(4, 5, 100, analog=True, output='zpk') + + w1, h1 = freqs(b, a) + w2, h2 = freqs_zpk(z, p, k) + xp_assert_close(w1, w2) + xp_assert_close(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs_zpk([1.0], [1.0], [1.0]) + w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs_zpk([], [], 1, worN=N) + assert len(w) == 8 + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs_zpk([], [], 1, worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqz: + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz([1.0], worN=N) + assert w.shape == (N,) + + def test_basic(self): + w, h = freqz([1.0], worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + w, h = freqz([1.0], worN=9) + assert_array_almost_equal(w, np.pi * np.arange(9) / 9.) + assert_array_almost_equal(h, np.ones(9)) + + for a in [1, np.ones(2)]: + w, h = freqz(np.ones(2), a, worN=0) + assert w.shape == (0,) + assert h.shape == (0,) + assert h.dtype == np.dtype('complex128') + + t = np.linspace(0, 1, 4, endpoint=False) + for b, a, h_whole in zip( + ([1., 0, 0, 0], np.sin(2 * np.pi * t)), + ([1., 0, 0, 0], [0.5, 0, 0, 0]), + ([1., 1., 1., 1.], [0, -4j, 0, 4j])): + w, h = freqz(b, a, worN=4, whole=True) + expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + # simultaneously check int-like support + w, h = freqz(b, a, worN=np.int32(4), whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + w, h = freqz(b, a, worN=w, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + + def test_basic_whole(self): + w, h = freqz([1.0], worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqz, [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqz([1.0], worN=8, plot=plot) + + def test_fft_wrapping(self): + # Some simple real FIR filters + bs = list() # filters + as_ = list() + hs_whole = list() + hs_half = list() + # 3 taps + t = np.linspace(0, 1, 3, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(3.) + hs_whole.append([0, -0.5j, 0.5j]) + hs_half.append([0, np.sqrt(1./12.), -0.5j]) + # 4 taps + t = np.linspace(0, 1, 4, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(0.5) + hs_whole.append([0, -4j, 0, 4j]) + hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)]) + del t + for ii, b in enumerate(bs): + # whole + a = as_[ii] + expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=True) # polyval + err_msg = f'b = {b}, a={a}' + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=True) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + # non-whole + expected_w = np.linspace(0, np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=False) # polyval + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=False) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + + # some random FIR filters (real + complex) + # assume polyval is accurate + rng = np.random.RandomState(0) + for ii in range(2, 10): # number of taps + b = rng.randn(ii) + for kk in range(2): + a = rng.randn(1) if kk == 0 else rng.randn(3) + for jj in range(2): + if jj == 1: + b = b + rng.randn(ii) * 1j + # whole + expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=True) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + # half + expected_w = np.linspace(0, np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=False) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + + def test_broadcasting1(self): + # Test broadcasting with worN an integer or a 1-D array, + # b and a are n-dimensional arrays. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + a = np.random.rand(2, 1) + for whole in [False, True]: + # Test with worN being integers (one fast for FFT and one not), + # a 1-D array, and an empty array. + for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]: + w, h = freqz(b, a, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ak = a[:, 0] + ww, hh = freqz(bk, ak, worN=worN, whole=whole) + xp_assert_close(ww, w) + xp_assert_close(hh, h[k]) + + def test_broadcasting2(self): + # Test broadcasting with worN an integer or a 1-D array, + # b is an n-dimensional array, and a is left at the default value. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + for whole in [False, True]: + for worN in [16, 17, np.linspace(0, 1, 10)]: + w, h = freqz(b, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ww, hh = freqz(bk, worN=worN, whole=whole) + xp_assert_close(ww, w) + xp_assert_close(hh, h[k]) + + def test_broadcasting3(self): + # Test broadcasting where b.shape[-1] is the same length + # as worN, and a is left at the default value. + np.random.seed(123) + N = 16 + b = np.random.rand(3, N) + for whole in [False, True]: + for worN in [N, np.linspace(0, 1, N)]: + w, h = freqz(b, worN=worN, whole=whole) + assert w.size == N + for k in range(N): + bk = b[:, k] + ww, hh = freqz(bk, worN=w[k], whole=whole) + xp_assert_close(ww, np.asarray(w[k])[None]) + xp_assert_close(hh, np.asarray(h[k])[None]) + + def test_broadcasting4(self): + # Test broadcasting with worN a 2-D array. + np.random.seed(123) + b = np.random.rand(4, 2, 1, 1) + a = np.random.rand(5, 2, 1, 1) + for whole in [False, True]: + for worN in [np.random.rand(6, 7), np.empty((6, 0))]: + w, h = freqz(b, a, worN=worN, whole=whole) + xp_assert_close(w, worN, rtol=1e-14) + assert h.shape == (2,) + worN.shape + for k in range(2): + ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], + worN=worN.ravel(), + whole=whole) + xp_assert_close(ww, worN.ravel(), rtol=1e-14) + xp_assert_close(hh, h[k, :, :].ravel()) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz([1.0], 1) + w2, h2 = freqz([1.0], 1, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311, + 0.039479155677484369] + a = [1.0, -1.3199152021838287, 0.80341991081938424, + -0.16767146321568049] + + # N = None, whole=False + w1, h1 = freqz(b, a, fs=fs) + w2, h2 = freqz(b, a) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz(b, a, whole=True, fs=fs) + w2, h2 = freqz(b, a, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz(b, a, 5, fs=fs) + w2, h2 = freqz(b, a, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz(b, a, 5, whole=True, fs=fs) + w2, h2 = freqz(b, a, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz(b, a, w, fs=fs) + w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz([1.0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = freqz([1.0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz([1.0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_nyquist(self): + w, h = freqz([1.0], worN=8, include_nyquist=True) + assert_array_almost_equal(w, np.pi * np.arange(8) / 7.) + assert_array_almost_equal(h, np.ones(8)) + w, h = freqz([1.0], worN=9, include_nyquist=True) + assert_array_almost_equal(w, np.pi * np.arange(9) / 8.) + assert_array_almost_equal(h, np.ones(9)) + + for a in [1, np.ones(2)]: + w, h = freqz(np.ones(2), a, worN=0, include_nyquist=True) + assert w.shape == (0,) + assert h.shape == (0,) + assert h.dtype == np.dtype('complex128') + + w1, h1 = freqz([1.0], worN=8, whole = True, include_nyquist=True) + w2, h2 = freqz([1.0], worN=8, whole = True, include_nyquist=False) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + # https://github.com/scipy/scipy/issues/17289 + # https://github.com/scipy/scipy/issues/15273 + @pytest.mark.parametrize('whole,nyquist,worN', + [(False, False, 32), + (False, True, 32), + (True, False, 32), + (True, True, 32), + (False, False, 257), + (False, True, 257), + (True, False, 257), + (True, True, 257)]) + def test_17289(self, whole, nyquist, worN): + d = [0, 1] + w, Drfft = freqz(d, worN=32, whole=whole, include_nyquist=nyquist) + _, Dpoly = freqz(d, worN=w) + xp_assert_close(Drfft, Dpoly) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz([1.0], fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none."): + freqz([1.0], fs=None) + + +class Testfreqz_sos: + + def test_freqz_sos_basic(self): + # Compare the results of freqz and freqz_sos for a low order + # Butterworth filter. + + N = 500 + + b, a = butter(4, 0.2) + sos = butter(4, 0.2, output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = freqz_sos(sos, worN=N) + xp_assert_equal(w2, w) + xp_assert_close(h2, h, rtol=1e-10, atol=1e-14) + + b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass') + sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = freqz_sos(sos, worN=N) + xp_assert_equal(w2, w) + xp_assert_close(h2, h, rtol=1e-10, atol=1e-14) + # must have at least one section + assert_raises(ValueError, freqz_sos, sos[:0]) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + N = 500 + + sos = butter(4, 0.2, output='sos') + w1, h1 = freqz_sos(sos, worN=N) + w2, h2 = sosfreqz(sos, worN=N) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_freqz_sos_design(self): + # Compare freqz_sos output against expected values for different + # filter types + + # from cheb2ord + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + sos = cheby2(N, 60, Wn, 'stop', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w <= 0.1]), np.asarray(0.), atol=3.01, + check_shape=False) + xp_assert_close(20 * np.log10(h[w >= 0.6]), np.asarray(0.), atol=3.01, + check_shape=False) + xp_assert_close(h[(w >= 0.2) & (w <= 0.5)], + np.asarray(0.), atol=1e-3, + check_shape=False) # <= -60 dB + + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150) + sos = cheby2(N, 150, Wn, 'stop', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + xp_assert_close(dB[w <= 0.1], np.asarray(0.0), atol=3.01, check_shape=False) + xp_assert_close(dB[w >= 0.6], np.asarray(0.0), atol=3.01, check_shape=False) + assert np.all(dB[(w >= 0.2) & (w <= 0.5)] < -149.9) + + # from cheb1ord + N, Wn = cheb1ord(0.2, 0.3, 3, 40) + sos = cheby1(N, 3, Wn, 'low', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w <= 0.2]), np.asarray(0.0), atol=3.01, + check_shape=False) + xp_assert_close(h[w >= 0.3], np.asarray(0.0), atol=1e-2, + check_shape=False) # <= -40 dB + + N, Wn = cheb1ord(0.2, 0.3, 1, 150) + sos = cheby1(N, 1, Wn, 'low', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + xp_assert_close(dB[w <= 0.2], np.asarray(0.0), atol=1.01, + check_shape=False) + assert np.all(dB[w >= 0.3] < -149.9) + + # adapted from ellipord + N, Wn = ellipord(0.3, 0.2, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w >= 0.3]), np.asarray(0.0), atol=3.01, + check_shape=False) + xp_assert_close(h[w <= 0.1], np.asarray(0.0), atol=1.5e-3, + check_shape=False) # <= -60 dB (approx) + + # adapted from buttord + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40) + sos = butter(N, Wn, 'band', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + + h014 = h[w <= 0.14] + xp_assert_close(h014, np.zeros_like(h014), atol=1e-2) # <= -40 dB + h06 = h[w >= 0.6] + xp_assert_close(h06, np.zeros_like(h06), atol=1e-2) # <= -40 dB + h0205 = 20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]) + xp_assert_close(h0205, np.zeros_like(h0205), atol=3.01) + + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100) + sos = butter(N, Wn, 'band', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + + assert np.all(dB[(w > 0) & (w <= 0.14)] < -99.9) + assert np.all(dB[w >= 0.6] < -99.9) + db0205 = dB[(w >= 0.2) & (w <= 0.5)] + xp_assert_close(db0205, np.zeros_like(db0205), atol=3.01) + + def test_freqz_sos_design_ellip(self): + N, Wn = ellipord(0.3, 0.1, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + + h03 = 20 * np.log10(h[w >= 0.3]) + xp_assert_close(h03, np.zeros_like(h03), atol=3.01) + h01 = h[w <= 0.1] + xp_assert_close(h01, np.zeros_like(h01), atol=1.5e-3) # <= -60 dB (approx) + + N, Wn = ellipord(0.3, 0.2, .5, 150) + sos = ellip(N, .5, 150, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + + db03 = dB[w >= 0.3] + xp_assert_close(db03, np.zeros_like(db03), atol=.55) + # Allow some numerical slop in the upper bound -150, so this is + # a check that dB[w <= 0.2] is less than or almost equal to -150. + assert dB[w <= 0.2].max() < -150*(1 - 1e-12) + + @mpmath_check("0.10") + def test_freqz_sos_against_mp(self): + # Compare the result of freqz_sos applied to a high order Butterworth + # filter against the result computed using mpmath. (signal.freqz fails + # miserably with such high order filters.) + from . import mpsig + N = 500 + order = 25 + Wn = 0.15 + with mpmath.workdps(80): + z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn) + w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N) + w_mp = np.array([float(x) for x in w_mp]) + h_mp = np.array([complex(x) for x in h_mp]) + + sos = butter(order, Wn, output='sos') + w, h = freqz_sos(sos, worN=N) + xp_assert_close(w, w_mp, rtol=1e-12, atol=1e-14) + xp_assert_close(h, h_mp, rtol=1e-12, atol=1e-14) + + def test_fs_param(self): + fs = 900 + sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762, + 1.0, -0.37256600288916636, 0.0], + [1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]] + + # N = None, whole=False + w1, h1 = freqz_sos(sos, fs=fs) + w2, h2 = freqz_sos(sos) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz_sos(sos, whole=True, fs=fs) + w2, h2 = freqz_sos(sos, whole=True) + xp_assert_close(h1, h2, atol=1e-27) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz_sos(sos, 5, fs=fs) + w2, h2 = freqz_sos(sos, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz_sos(sos, 5, whole=True, fs=fs) + w2, h2 = freqz_sos(sos, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz_sos(sos, w, fs=fs) + w2, h2 = freqz_sos(sos, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_fs_validation(self): + sos = butter(4, 0.2, output='sos') + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz_sos(sos, fs=np.array([10, 20])) + + +class TestFreqz_zpk: + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N) + assert w.shape == (N,) + + def test_basic(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_basic_whole(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_vs_freqz(self): + b, a = cheby1(4, 5, 0.5, analog=False, output='ba') + z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk') + + w1, h1 = freqz(b, a) + w2, h2 = freqz_zpk(z, p, k) + xp_assert_close(w1, w2) + xp_assert_close(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz_zpk([0.5], [0.5], 1.0) + w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + z = [-1, -1, -1] + p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636, + 0.4747869998473389-0.4752230717749344j] + k = 0.03934683014103762 + + # N = None, whole=False + w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=False) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz_zpk(z, p, k, 5, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz_zpk(z, p, k, w, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz_zpk([], [], 1, worN=N) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + + w, h = freqz_zpk([], [], 1, worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False)) + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz_zpk([], [], 1, worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz_zpk([1.0], [1.0], [1.0], fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none."): + freqz_zpk([1.0], [1.0], [1.0], fs=None) + + +class TestNormalize: + + def test_allclose(self): + """Test for false positive on allclose in normalize() in + filter_design.py""" + # Test to make sure the allclose call within signal.normalize does not + # choose false positives. Then check against a known output from MATLAB + # to make sure the fix doesn't break anything. + + # These are the coefficients returned from + # `[b,a] = cheby1(8, 0.5, 0.048)' + # in MATLAB. There are at least 15 significant figures in each + # coefficient, so it makes sense to test for errors on the order of + # 1e-13 (this can always be relaxed if different platforms have + # different rounding errors) + b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11]) + a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01]) + + # This is the input to signal.normalize after passing through the + # equivalent steps in signal.iirfilter as was done for MATLAB + b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05, + 4.3520780422820447e-05, 8.7041560845640893e-05, + 1.0880195105705122e-04, 8.7041560845640975e-05, + 4.3520780422820447e-05, 1.2434508692234413e-05, + 1.5543135865293012e-06]) + a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05, + 1.9182761917308895e+06, -3.7451128364682454e+06, + 4.5776121393762771e+06, -3.5869706138592605e+06, + 1.7596511818472347e+06, -4.9409793515707983e+05, + 6.0799461347219651e+04]) + + b_output, a_output = normalize(b_norm_in, a_norm_in) + + # The test on b works for decimal=14 but the one for a does not. For + # the sake of consistency, both of these are decimal=13. If something + # breaks on another platform, it is probably fine to relax this lower. + assert_array_almost_equal(b_matlab, b_output, decimal=13) + assert_array_almost_equal(a_matlab, a_output, decimal=13) + + def test_errors(self): + """Test the error cases.""" + # all zero denominator + assert_raises(ValueError, normalize, [1, 2], 0) + + # denominator not 1 dimensional + assert_raises(ValueError, normalize, [1, 2], [[1]]) + + # numerator too many dimensions + assert_raises(ValueError, normalize, [[[1, 2]]], 1) + + +class TestLp2lp: + + def test_basic(self): + b = [1] + a = [1, np.sqrt(2), 1] + b_lp, a_lp = lp2lp(b, a, 0.38574256627112119) + assert_array_almost_equal(b_lp, [0.1488], decimal=4) + assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4) + + +class TestLp2hp: + + def test_basic(self): + b = [0.25059432325190018] + a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018] + b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000) + xp_assert_close(b_hp, [1.0, 0, 0, 0]) + xp_assert_close(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4) + + +class TestLp2bp: + + def test_basic(self): + b = [1] + a = [1, 2, 2, 1] + b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000) + xp_assert_close(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6) + xp_assert_close(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13, + 1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4) + + +class TestLp2bs: + + def test_basic(self): + b = [1] + a = [1, 1] + b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251) + assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5) + assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5) + + +class TestBilinear: + + def test_basic(self): + b = [0.14879732743343033] + a = [1, 0.54552236880522209, 0.14879732743343033] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821], + decimal=5) + assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4) + + b = [1, 0, 0.17407467530697837] + a = [1, 0.18460575326152251, 0.17407467530697837] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413], + decimal=4) + assert_array_almost_equal(a_z, [1, -1.2158, 0.72826], + decimal=4) + + def test_fs_validation(self): + b = [0.14879732743343033] + a = [1, 0.54552236880522209, 0.14879732743343033] + with pytest.raises(ValueError, match="Sampling.*single scalar"): + bilinear(b, a, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + bilinear(b, a, fs=None) + + +class TestLp2lp_zpk: + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5) + xp_assert_equal(z_lp, []) + xp_assert_close(sort(p_lp), sort(p)*5) + xp_assert_close(k_lp, 25.) + + # Pseudo-Chebyshev with both poles and zeros + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20) + xp_assert_close(sort(z_lp), sort([-40j, +40j])) + xp_assert_close(sort(p_lp), sort([-15, -10-10j, -10+10j])) + xp_assert_close(k_lp, 60.) + + def test_fs_validation(self): + z = [-2j, +2j] + p = [-0.75, -0.5 - 0.5j, -0.5 + 0.5j] + k = 3 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + bilinear_zpk(z, p, k, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + bilinear_zpk(z, p, k, fs=None) + + +class TestLp2hp_zpk: + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5) + xp_assert_equal(z_hp, np.asarray([0.0, 0.0])) + xp_assert_close(sort(p_hp), sort(p)*5) + xp_assert_close(k_hp, 1.0) + + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6) + xp_assert_close(sort(z_hp), sort([-3j, 0, +3j])) + xp_assert_close(sort(p_hp), sort([-8, -6-6j, -6+6j])) + xp_assert_close(k_hp, 32.0) + + +class TestLp2bp_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8) + xp_assert_close(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j])) + xp_assert_close(sort(p_bp), sort([-3 + 6j*sqrt(6), + -3 - 6j*sqrt(6), + +2j+sqrt(-8j-225)-2, + -2j+sqrt(+8j-225)-2, + +2j-sqrt(-8j-225)-2, + -2j-sqrt(+8j-225)-2, ])) + xp_assert_close(k_bp, 24.0) + + +class TestLp2bs_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12) + + xp_assert_close(sort(z_bs), sort([+35j, -35j, + +3j+sqrt(1234)*1j, + -3j+sqrt(1234)*1j, + +3j-sqrt(1234)*1j, + -3j-sqrt(1234)*1j])) + xp_assert_close(sort(p_bs), sort([+3j*sqrt(129) - 8, + -3j*sqrt(129) - 8, + (-6 + 6j) - sqrt(-1225 - 72j), + (-6 - 6j) - sqrt(-1225 + 72j), + (-6 + 6j) + sqrt(-1225 - 72j), + (-6 - 6j) + sqrt(-1225 + 72j), ])) + xp_assert_close(k_bs, 32.0) + + +class TestBilinear_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_d, p_d, k_d = bilinear_zpk(z, p, k, 10) + + xp_assert_close(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j), + -1])) + xp_assert_close(sort(p_d), sort([77/83, + (1j/2 + 39/2) / (41/2 - 1j/2), + (39/2 - 1j/2) / (1j/2 + 41/2), ])) + xp_assert_close(k_d, 9696/69803) + + +class TestPrototypeType: + + def test_output_type(self): + # Prototypes should consistently output arrays, not lists + # https://github.com/scipy/scipy/pull/441 + for func in (buttap, + besselap, + lambda N: cheb1ap(N, 1), + lambda N: cheb2ap(N, 20), + lambda N: ellipap(N, 1, 20)): + for N in range(7): + z, p, k = func(N) + assert isinstance(z, np.ndarray) + assert isinstance(p, np.ndarray) + + +def dB(x): + # Return magnitude in decibels, avoiding divide-by-zero warnings + # (and deal with some "not less-ordered" errors when -inf shows up) + return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny)) + + +class TestButtord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'lowpass', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs) + + assert N == 16 + xp_assert_close(Wn, + 2.0002776782743284e-01, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'highpass', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs) + + assert N == 18 + xp_assert_close(Wn, + 2.9996603079132672e-01, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandpass', False) + w, h = freqz(b, a) + w /= np.pi + + assert np.all((-rp - 0.1) < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < (-rs + 0.1)) + + assert N == 18 + xp_assert_close(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandstop', False) + w, h = freqz(b, a) + w /= np.pi + + assert np.all(-rp < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs) + + assert N == 20 + xp_assert_close(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01], + rtol=1e-6) + + def test_analog(self): + wp = 200 + ws = 600 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, True) + b, a = butter(N, Wn, 'lowpass', True) + w, h = freqs(b, a) + assert np.all(-rp < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs) + + assert N == 7 + xp_assert_close(Wn, 2.0006785355671877e+02, rtol=1e-15) + + n, Wn = buttord(1, 550/450, 1, 26, analog=True) + assert n == 19 + xp_assert_close(Wn, 1.0361980524629517, rtol=1e-15) + + xp_assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55) + + def test_fs_param(self): + wp = [4410, 11025] + ws = [2205, 13230] + rp = 3 + rs = 80 + fs = 44100 + N, Wn = buttord(wp, ws, rp, rs, False, fs=fs) + b, a = butter(N, Wn, 'bandpass', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 18 + xp_assert_close(Wn, [4409.722701715714, 11025.47178084662], + rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + @pytest.mark.thread_unsafe + def test_runtime_warnings(self): + msg = "Order is zero.*|divide by zero encountered" + with pytest.warns(RuntimeWarning, match=msg): + buttord(0.0, 1.0, 3, 60) + + def test_ellip_butter(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = buttord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 14 + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + buttord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestCheb1ord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'low', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 0.2, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'high', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'band', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'stop', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 10 + xp_assert_close(Wn, [0.14758232569947785, 0.6], rtol=1e-5) + + def test_analog(self): + wp = 700 + ws = 100 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, True) + b, a = cheby1(N, rp, Wn, 'high', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 4 + xp_assert_close(Wn, 700.0, rtol=1e-15) + + xp_assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17) + + def test_fs_param(self): + wp = 4800 + ws = 7200 + rp = 3 + rs = 60 + fs = 48000 + N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby1(N, rp, Wn, 'low', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 4800.0, rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_cheb1(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 7 + + n2, w2 = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert not (wn == w2).all() + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + cheb1ord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestCheb2ord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 0.28647639976553163, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 0.20697492182903282, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, [0.14876937565923479, 0.59748447842351482], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 10 + xp_assert_close(Wn, [0.19926249974781743, 0.50125246585567362], + rtol=1e-6) + + def test_analog(self): + wp = [20, 50] + ws = [10, 60] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, True) + b, a = cheby2(N, rs, Wn, 'bp', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 11 + xp_assert_close(Wn, [1.673740595370124e+01, 5.974641487254268e+01], + rtol=1e-15) + + def test_fs_param(self): + wp = 150 + ws = 100 + rp = 3 + rs = 70 + fs = 1000 + N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 103.4874609145164, rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_cheb2(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 7 + + n1, w1 = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert not (wn == w1).all() + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + cheb2ord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestEllipord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 5 + xp_assert_close(Wn, 0.2, rtol=1e-15) + + def test_lowpass_1000dB(self): + # failed when ellipkm1 wasn't used in ellipord and ellipap + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 1000 + N, Wn = ellipord(wp, ws, rp, rs, False) + sos = ellip(N, rp, rs, Wn, 'lp', False, output='sos') + w, h = freqz_sos(sos) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 6 + xp_assert_close(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 6 + xp_assert_close(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 7 + xp_assert_close(Wn, [0.14758232794342988, 0.6], rtol=1e-5) + + def test_analog(self): + wp = [1000, 6000] + ws = [2000, 5000] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, True) + b, a = ellip(N, rp, rs, Wn, 'bs', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, [1666.6666, 6000]) + + assert ellipord(1, 1.2, 1, 80, analog=True)[0] == 9 + + def test_fs_param(self): + wp = [400, 2400] + ws = [800, 2000] + rp = 3 + rs = 90 + fs = 8000 + N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs) + b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 7 + xp_assert_close(Wn, [590.3293117737195, 2400], rtol=1e-5) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_butter(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = ellipord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 5 + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + ellipord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestBessel: + + def test_degenerate(self): + for norm in ('delay', 'phase', 'mag'): + # 0-order filter is just a passthrough + b, a = bessel(0, 1, analog=True, norm=norm) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = bessel(1, 1, analog=True, norm=norm) + xp_assert_close(b, np.asarray([1.0]), rtol=1e-15) + xp_assert_close(a, np.asarray([1.0, 1]), rtol=1e-15) + + z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm) + xp_assert_equal(z, np.asarray([])) + xp_assert_close(p, np.asarray([-0.3+0j]), rtol=1e-14) + xp_assert_close(k, 0.3, rtol=1e-14) + + def test_high_order(self): + # high even order, 'phase' + z, p, k = bessel(24, 100, analog=True, output='zpk') + z2 = [] + p2 = [ + -9.055312334014323e+01 + 4.844005815403969e+00j, + -8.983105162681878e+01 + 1.454056170018573e+01j, + -8.837357994162065e+01 + 2.426335240122282e+01j, + -8.615278316179575e+01 + 3.403202098404543e+01j, + -8.312326467067703e+01 + 4.386985940217900e+01j, + -7.921695461084202e+01 + 5.380628489700191e+01j, + -7.433392285433246e+01 + 6.388084216250878e+01j, + -6.832565803501586e+01 + 7.415032695116071e+01j, + -6.096221567378025e+01 + 8.470292433074425e+01j, + -5.185914574820616e+01 + 9.569048385258847e+01j, + -4.027853855197555e+01 + 1.074195196518679e+02j, + -2.433481337524861e+01 + 1.207298683731973e+02j, + ] + k2 = 9.999999999999989e+47 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + # high odd order, 'phase' + z, p, k = bessel(23, 1000, analog=True, output='zpk') + z2 = [] + p2 = [ + -2.497697202208956e+02 + 1.202813187870698e+03j, + -4.126986617510172e+02 + 1.065328794475509e+03j, + -5.304922463809596e+02 + 9.439760364018479e+02j, + -9.027564978975828e+02 + 1.010534334242318e+02j, + -8.909283244406079e+02 + 2.023024699647598e+02j, + -8.709469394347836e+02 + 3.039581994804637e+02j, + -8.423805948131370e+02 + 4.062657947488952e+02j, + -8.045561642249877e+02 + 5.095305912401127e+02j, + -7.564660146766259e+02 + 6.141594859516342e+02j, + -6.965966033906477e+02 + 7.207341374730186e+02j, + -6.225903228776276e+02 + 8.301558302815096e+02j, + -9.066732476324988e+02] + k2 = 9.999999999999983e+68 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + # high even order, 'delay' (Orchard 1965 "The Roots of the + # Maximally Flat-Delay Polynomials" Table 1) + z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.876706, + -20.826543 + 1.735732j, + -20.675502 + 3.473320j, + -20.421895 + 5.214702j, + -20.062802 + 6.961982j, + -19.593895 + 8.717546j, + -19.009148 + 10.484195j, + -18.300400 + 12.265351j, + -17.456663 + 14.065350j, + -16.463032 + 15.889910j, + -15.298849 + 17.746914j, + -13.934466 + 19.647827j, + -12.324914 + 21.610519j, + -10.395893 + 23.665701j, + - 8.005600 + 25.875019j, + - 4.792045 + 28.406037j, + ] + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + # high odd order, 'delay' + z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.201029 + 0.867750j, + -20.097257 + 2.604235j, + -19.888485 + 4.343721j, + -19.572188 + 6.088363j, + -19.144380 + 7.840570j, + -18.599342 + 9.603147j, + -17.929195 + 11.379494j, + -17.123228 + 13.173901j, + -16.166808 + 14.992008j, + -15.039580 + 16.841580j, + -13.712245 + 18.733902j, + -12.140295 + 20.686563j, + -10.250119 + 22.729808j, + - 7.901170 + 24.924391j, + - 4.734679 + 27.435615j, + ] + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + def test_refs(self): + # Compare to http://www.crbond.com/papers/bsf2.pdf + # "Delay Normalized Bessel Polynomial Coefficients" + bond_b = np.asarray([10395.0]) + bond_a = np.asarray([1.0, 21, 210, 1260, 4725, 10395, 10395]) + b, a = bessel(6, 1, norm='delay', analog=True) + xp_assert_close(b, bond_b) + xp_assert_close(a, bond_a) + + # "Delay Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.5000000000 + 0.8660254038j], + 3: [-1.8389073227 + 1.7543809598j, -2.3221853546], + 4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j], + 5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j, + -3.6467385953], + 6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j, + -4.2483593959 + 0.8675096732j], + 7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j, + -4.7582905282 + 1.7392860611j, -4.9717868585], + 8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j, + -5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j], + 9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j, + -5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j, + -6.2970191817], + 10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j, + -5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j, + -6.9220449054 + 0.8676651955j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # "Frequency Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.1016013306 + 0.6360098248j], + 3: [-1.0474091610 + 0.9992644363j, -1.3226757999], + 4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j], + 5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j, + -1.5023162714], + 6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j, + -1.5714904036 + 0.3208963742j], + 7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j, + -1.6120387662 + 0.5892445069j, -1.6843681793], + 8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j, + -1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j], + 9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j, + -1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j, + -1.8566005012], + 10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j, + -1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j, + -1.9276196914 + 0.2416234710j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # Compare to https://www.ranecommercial.com/legacy/note147.html + # "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order" + a = np.asarray([1, 1, 1/3]) + b2, a2 = bessel(2, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, 1, 2/5, 1/15]) + b2, a2 = bessel(3, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, 1, 9/21, 2/21, 1/105]) + b2, a2 = bessel(4, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, np.sqrt(3), 1]) + b2, a2 = bessel(2, 1, norm='phase', analog=True) + xp_assert_close(a[::-1], a2/b2) + + # TODO: Why so inaccurate? Is reference flawed? + a = np.asarray([1, 2.481, 2.463, 1.018]) + b2, a2 = bessel(3, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # TODO: Why so inaccurate? Is reference flawed? + a = np.asarray([1, 3.240, 4.5, 3.240, 1.050]) + b2, a2 = bessel(4, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # Table of -3 dB factors: + N, scale = 2, np.asarray([1.272, 1.272], dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale2, scale, decimal=3) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 3, np.asarray([1.413, 1.413, 1.413], dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale2, scale, decimal=2) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 4, np.asarray([1.533]*4, dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale, scale2, decimal=1) + + def test_hardcoded(self): + # Compare to values from original hardcoded implementation + originals = { + 0: [], + 1: [-1], + 2: [-.8660254037844386467637229 + .4999999999999999999999996j], + 3: [-.9416000265332067855971980, + -.7456403858480766441810907 + .7113666249728352680992154j], + 4: [-.6572111716718829545787788 + .8301614350048733772399715j, + -.9047587967882449459642624 + .2709187330038746636700926j], + 5: [-.9264420773877602247196260, + -.8515536193688395541722677 + .4427174639443327209850002j, + -.5905759446119191779319432 + .9072067564574549539291747j], + 6: [-.9093906830472271808050953 + .1856964396793046769246397j, + -.7996541858328288520243325 + .5621717346937317988594118j, + -.5385526816693109683073792 + .9616876881954277199245657j], + 7: [-.9194871556490290014311619, + -.8800029341523374639772340 + .3216652762307739398381830j, + -.7527355434093214462291616 + .6504696305522550699212995j, + -.4966917256672316755024763 + 1.002508508454420401230220j], + 8: [-.9096831546652910216327629 + .1412437976671422927888150j, + -.8473250802359334320103023 + .4259017538272934994996429j, + -.7111381808485399250796172 + .7186517314108401705762571j, + -.4621740412532122027072175 + 1.034388681126901058116589j], + 9: [-.9154957797499037686769223, + -.8911217017079759323183848 + .2526580934582164192308115j, + -.8148021112269012975514135 + .5085815689631499483745341j, + -.6743622686854761980403401 + .7730546212691183706919682j, + -.4331415561553618854685942 + 1.060073670135929666774323j], + 10: [-.9091347320900502436826431 + .1139583137335511169927714j, + -.8688459641284764527921864 + .3430008233766309973110589j, + -.7837694413101441082655890 + .5759147538499947070009852j, + -.6417513866988316136190854 + .8175836167191017226233947j, + -.4083220732868861566219785 + 1.081274842819124562037210j], + 11: [-.9129067244518981934637318, + -.8963656705721166099815744 + .2080480375071031919692341j, + -.8453044014712962954184557 + .4178696917801248292797448j, + -.7546938934722303128102142 + .6319150050721846494520941j, + -.6126871554915194054182909 + .8547813893314764631518509j, + -.3868149510055090879155425 + 1.099117466763120928733632j], + 12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j, + -.8802534342016826507901575 + .2871779503524226723615457j, + -.8217296939939077285792834 + .4810212115100676440620548j, + -.7276681615395159454547013 + .6792961178764694160048987j, + -.5866369321861477207528215 + .8863772751320727026622149j, + -.3679640085526312839425808 + 1.114373575641546257595657j], + 13: [-.9110914665984182781070663, + -.8991314665475196220910718 + .1768342956161043620980863j, + -.8625094198260548711573628 + .3547413731172988997754038j, + -.7987460692470972510394686 + .5350752120696801938272504j, + -.7026234675721275653944062 + .7199611890171304131266374j, + -.5631559842430199266325818 + .9135900338325109684927731j, + -.3512792323389821669401925 + 1.127591548317705678613239j], + 14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j, + -.8869506674916445312089167 + .2470079178765333183201435j, + -.8441199160909851197897667 + .4131653825102692595237260j, + -.7766591387063623897344648 + .5819170677377608590492434j, + -.6794256425119233117869491 + .7552857305042033418417492j, + -.5418766775112297376541293 + .9373043683516919569183099j, + -.3363868224902037330610040 + 1.139172297839859991370924j], + 15: [-.9097482363849064167228581, + -.9006981694176978324932918 + .1537681197278439351298882j, + -.8731264620834984978337843 + .3082352470564267657715883j, + -.8256631452587146506294553 + .4642348752734325631275134j, + -.7556027168970728127850416 + .6229396358758267198938604j, + -.6579196593110998676999362 + .7862895503722515897065645j, + -.5224954069658330616875186 + .9581787261092526478889345j, + -.3229963059766444287113517 + 1.149416154583629539665297j], + 16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j, + -.8911723070323647674780132 + .2167089659900576449410059j, + -.8584264231521330481755780 + .3621697271802065647661080j, + -.8074790293236003885306146 + .5092933751171800179676218j, + -.7356166304713115980927279 + .6591950877860393745845254j, + -.6379502514039066715773828 + .8137453537108761895522580j, + -.5047606444424766743309967 + .9767137477799090692947061j, + -.3108782755645387813283867 + 1.158552841199330479412225j], + 17: [-.9087141161336397432860029, + -.9016273850787285964692844 + .1360267995173024591237303j, + -.8801100704438627158492165 + .2725347156478803885651973j, + -.8433414495836129204455491 + .4100759282910021624185986j, + -.7897644147799708220288138 + .5493724405281088674296232j, + -.7166893842372349049842743 + .6914936286393609433305754j, + -.6193710717342144521602448 + .8382497252826992979368621j, + -.4884629337672704194973683 + .9932971956316781632345466j, + -.2998489459990082015466971 + 1.166761272925668786676672j], + 18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j, + -.8939764278132455733032155 + .1930374640894758606940586j, + -.8681095503628830078317207 + .3224204925163257604931634j, + -.8281885016242836608829018 + .4529385697815916950149364j, + -.7726285030739558780127746 + .5852778162086640620016316j, + -.6987821445005273020051878 + .7204696509726630531663123j, + -.6020482668090644386627299 + .8602708961893664447167418j, + -.4734268069916151511140032 + 1.008234300314801077034158j, + -.2897592029880489845789953 + 1.174183010600059128532230j], + 19: [-.9078934217899404528985092, + -.9021937639390660668922536 + .1219568381872026517578164j, + -.8849290585034385274001112 + .2442590757549818229026280j, + -.8555768765618421591093993 + .3672925896399872304734923j, + -.8131725551578197705476160 + .4915365035562459055630005j, + -.7561260971541629355231897 + .6176483917970178919174173j, + -.6818424412912442033411634 + .7466272357947761283262338j, + -.5858613321217832644813602 + .8801817131014566284786759j, + -.4595043449730988600785456 + 1.021768776912671221830298j, + -.2804866851439370027628724 + 1.180931628453291873626003j], + 20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j, + -.8959150941925768608568248 + .1740317175918705058595844j, + -.8749560316673332850673214 + .2905559296567908031706902j, + -.8427907479956670633544106 + .4078917326291934082132821j, + -.7984251191290606875799876 + .5264942388817132427317659j, + -.7402780309646768991232610 + .6469975237605228320268752j, + -.6658120544829934193890626 + .7703721701100763015154510j, + -.5707026806915714094398061 + .8982829066468255593407161j, + -.4465700698205149555701841 + 1.034097702560842962315411j, + -.2719299580251652601727704 + 1.187099379810885886139638j], + 21: [-.9072262653142957028884077, + -.9025428073192696303995083 + .1105252572789856480992275j, + -.8883808106664449854431605 + .2213069215084350419975358j, + -.8643915813643204553970169 + .3326258512522187083009453j, + -.8299435470674444100273463 + .4448177739407956609694059j, + -.7840287980408341576100581 + .5583186348022854707564856j, + -.7250839687106612822281339 + .6737426063024382240549898j, + -.6506315378609463397807996 + .7920349342629491368548074j, + -.5564766488918562465935297 + .9148198405846724121600860j, + -.4345168906815271799687308 + 1.045382255856986531461592j, + -.2640041595834031147954813 + 1.192762031948052470183960j], + 22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j, + -.8972983138153530955952835 + .1584351912289865608659759j, + -.8799661455640176154025352 + .2644363039201535049656450j, + -.8534754036851687233084587 + .3710389319482319823405321j, + -.8171682088462720394344996 + .4785619492202780899653575j, + -.7700332930556816872932937 + .5874255426351153211965601j, + -.7105305456418785989070935 + .6982266265924524000098548j, + -.6362427683267827226840153 + .8118875040246347267248508j, + -.5430983056306302779658129 + .9299947824439872998916657j, + -.4232528745642628461715044 + 1.055755605227545931204656j, + -.2566376987939318038016012 + 1.197982433555213008346532j], + 23: [-.9066732476324988168207439, + -.9027564979912504609412993 + .1010534335314045013252480j, + -.8909283242471251458653994 + .2023024699381223418195228j, + -.8709469395587416239596874 + .3039581993950041588888925j, + -.8423805948021127057054288 + .4062657948237602726779246j, + -.8045561642053176205623187 + .5095305912227258268309528j, + -.7564660146829880581478138 + .6141594859476032127216463j, + -.6965966033912705387505040 + .7207341374753046970247055j, + -.6225903228771341778273152 + .8301558302812980678845563j, + -.5304922463810191698502226 + .9439760364018300083750242j, + -.4126986617510148836149955 + 1.065328794475513585531053j, + -.2497697202208956030229911 + 1.202813187870697831365338j], + 24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j, + -.8983105104397872954053307 + .1454056133873610120105857j, + -.8837358034555706623131950 + .2426335234401383076544239j, + -.8615278304016353651120610 + .3403202112618624773397257j, + -.8312326466813240652679563 + .4386985933597305434577492j, + -.7921695462343492518845446 + .5380628490968016700338001j, + -.7433392285088529449175873 + .6388084216222567930378296j, + -.6832565803536521302816011 + .7415032695091650806797753j, + -.6096221567378335562589532 + .8470292433077202380020454j, + -.5185914574820317343536707 + .9569048385259054576937721j, + -.4027853855197518014786978 + 1.074195196518674765143729j, + -.2433481337524869675825448 + 1.207298683731972524975429j], + 25: [-.9062073871811708652496104, + -.9028833390228020537142561 + 93077131185102967450643820e-27j, + -.8928551459883548836774529 + .1863068969804300712287138j, + -.8759497989677857803656239 + .2798521321771408719327250j, + -.8518616886554019782346493 + .3738977875907595009446142j, + -.8201226043936880253962552 + .4686668574656966589020580j, + -.7800496278186497225905443 + .5644441210349710332887354j, + -.7306549271849967721596735 + .6616149647357748681460822j, + -.6704827128029559528610523 + .7607348858167839877987008j, + -.5972898661335557242320528 + .8626676330388028512598538j, + -.5073362861078468845461362 + .9689006305344868494672405j, + -.3934529878191079606023847 + 1.082433927173831581956863j, + -.2373280669322028974199184 + 1.211476658382565356579418j], + } + for N in originals: + p1 = sorted(np.union1d(originals[N], + np.conj(originals[N])), key=np.imag) + p2 = sorted(besselap(N)[1], key=np.imag) + xp_assert_close(p1, + p2, rtol=1e-14, check_dtype=False) + + def test_norm_phase(self): + # Test some orders and frequencies and see that they have the right + # phase at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='phase') + w = np.linspace(0, w0, 100) + w, h = freqs(b, a, w) + phase = np.unwrap(np.angle(h)) + xp_assert_close(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1) + + def test_norm_mag(self): + # Test some orders and frequencies and see that they have the right + # mag at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='mag') + w = (0, w0) + w, h = freqs(b, a, w) + mag = abs(h) + xp_assert_close(mag, (1, 1/np.sqrt(2))) + + def test_norm_delay(self): + # Test some orders and frequencies and see that they have the right + # delay at DC + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='delay') + w = np.linspace(0, 10*w0, 1000) + w, h = freqs(b, a, w) + delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w) + xp_assert_close(delay[0], 1/w0, rtol=1e-4) + + def test_norm_factor(self): + mpmath_values = { + 1: 1.0, 2: 1.361654128716130520, 3: 1.755672368681210649, + 4: 2.113917674904215843, 5: 2.427410702152628137, + 6: 2.703395061202921876, 7: 2.951722147038722771, + 8: 3.179617237510651330, 9: 3.391693138911660101, + 10: 3.590980594569163482, 11: 3.779607416439620092, + 12: 3.959150821144285315, 13: 4.130825499383535980, + 14: 4.295593409533637564, 15: 4.454233021624377494, + 16: 4.607385465472647917, 17: 4.755586548961147727, + 18: 4.899289677284488007, 19: 5.038882681488207605, + 20: 5.174700441742707423, 21: 5.307034531360917274, + 22: 5.436140703250035999, 23: 5.562244783787878196, + 24: 5.685547371295963521, 25: 5.806227623775418541, + 50: 8.268963160013226298, 51: 8.352374541546012058, + } + for N in mpmath_values: + z, p, k = besselap(N, 'delay') + xp_assert_close(mpmath_values[N], _norm_factor(p, k), rtol=1e-13) + + def test_bessel_poly(self): + xp_assert_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1]) + xp_assert_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105]) + + def test_bessel_zeros(self): + xp_assert_equal(_bessel_zeros(0), []) + + def test_invalid(self): + assert_raises(ValueError, besselap, 5, 'nonsense') + assert_raises(ValueError, besselap, -5) + assert_raises(ValueError, besselap, 3.2) + assert_raises(ValueError, _bessel_poly, -3) + assert_raises(ValueError, _bessel_poly, 3.3) + + @pytest.mark.fail_slow(10) + def test_fs_param(self): + for norm in ('phase', 'mag', 'delay'): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = bessel(N, fc, btype, norm=norm, fs=fs) + ba2 = bessel(N, fc/(fs/2), btype, norm=norm) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = bessel(N, fc, btype, norm=norm, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = bessel(N, fcnorm, btype, norm=norm) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + +class TestButter: + + def test_degenerate(self): + # 0-order filter is just a passthrough + b, a = butter(0, 1, analog=True) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = butter(1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = butter(1, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1.0])) + xp_assert_close(p, [3.249196962329063e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 3.375401518835469e-01, rtol=1e-14) + + def test_basic(self): + # analog s-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert len(p) == N + # All poles should be at distance wn from origin + assert_array_almost_equal(abs(p), np.asarray(wn)) + assert all(np.real(p) <= 0) # No poles in right half of S-plane + assert_array_almost_equal(wn**N, k) + + # digital z-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'high', analog=False, output='zpk') + xp_assert_equal(np.ones(N), z) # All zeros exactly at DC + assert all(np.abs(p) <= 1) # No poles outside unit circle + + b1, a1 = butter(2, 1, analog=True) + assert_array_almost_equal(b1, [1]) + assert_array_almost_equal(a1, [1, np.sqrt(2), 1]) + + b2, a2 = butter(5, 1, analog=True) + assert_array_almost_equal(b2, [1]) + assert_array_almost_equal(a2, [1, 3.2361, 5.2361, + 5.2361, 3.2361, 1], decimal=4) + + b3, a3 = butter(10, 1, analog=True) + assert_array_almost_equal(b3, [1]) + assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824, + 74.2334, 64.8824, 42.8021, 20.4317, + 6.3925, 1], decimal=4) + + b2, a2 = butter(19, 1.0441379169150726, analog=True) + assert_array_almost_equal(b2, [2.2720], decimal=4) + assert_array_almost_equal(a2, 1.0e+004 * np.array([ + 0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570, + 0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044, + 1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153, + 0.0026, 0.0002]), decimal=0) + + b, a = butter(5, 0.4) + assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194, + 0.2194, 0.1097, 0.0219], decimal=4) + assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738, + -0.3864, 0.1112, -0.0113], decimal=4) + + def test_highpass(self): + # highpass, high even order + z, p, k = butter(28, 0.43, 'high', output='zpk') + z2 = np.ones(28) + p2 = [ + 2.068257195514592e-01 + 9.238294351481734e-01j, + 2.068257195514592e-01 - 9.238294351481734e-01j, + 1.874933103892023e-01 + 8.269455076775277e-01j, + 1.874933103892023e-01 - 8.269455076775277e-01j, + 1.717435567330153e-01 + 7.383078571194629e-01j, + 1.717435567330153e-01 - 7.383078571194629e-01j, + 1.588266870755982e-01 + 6.564623730651094e-01j, + 1.588266870755982e-01 - 6.564623730651094e-01j, + 1.481881532502603e-01 + 5.802343458081779e-01j, + 1.481881532502603e-01 - 5.802343458081779e-01j, + 1.394122576319697e-01 + 5.086609000582009e-01j, + 1.394122576319697e-01 - 5.086609000582009e-01j, + 1.321840881809715e-01 + 4.409411734716436e-01j, + 1.321840881809715e-01 - 4.409411734716436e-01j, + 1.262633413354405e-01 + 3.763990035551881e-01j, + 1.262633413354405e-01 - 3.763990035551881e-01j, + 1.214660449478046e-01 + 3.144545234797277e-01j, + 1.214660449478046e-01 - 3.144545234797277e-01j, + 1.104868766650320e-01 + 2.771505404367791e-02j, + 1.104868766650320e-01 - 2.771505404367791e-02j, + 1.111768629525075e-01 + 8.331369153155753e-02j, + 1.111768629525075e-01 - 8.331369153155753e-02j, + 1.125740630842972e-01 + 1.394219509611784e-01j, + 1.125740630842972e-01 - 1.394219509611784e-01j, + 1.147138487992747e-01 + 1.963932363793666e-01j, + 1.147138487992747e-01 - 1.963932363793666e-01j, + 1.176516491045901e-01 + 2.546021573417188e-01j, + 1.176516491045901e-01 - 2.546021573417188e-01j, + ] + k2 = 1.446671081817286e-06 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-7) + xp_assert_close(k, k2, rtol=1e-10) + + # highpass, high odd order + z, p, k = butter(27, 0.56, 'high', output='zpk') + z2 = np.ones(27) + p2 = [ + -1.772572785680147e-01 + 9.276431102995948e-01j, + -1.772572785680147e-01 - 9.276431102995948e-01j, + -1.600766565322114e-01 + 8.264026279893268e-01j, + -1.600766565322114e-01 - 8.264026279893268e-01j, + -1.461948419016121e-01 + 7.341841939120078e-01j, + -1.461948419016121e-01 - 7.341841939120078e-01j, + -1.348975284762046e-01 + 6.493235066053785e-01j, + -1.348975284762046e-01 - 6.493235066053785e-01j, + -1.256628210712206e-01 + 5.704921366889227e-01j, + -1.256628210712206e-01 - 5.704921366889227e-01j, + -1.181038235962314e-01 + 4.966120551231630e-01j, + -1.181038235962314e-01 - 4.966120551231630e-01j, + -1.119304913239356e-01 + 4.267938916403775e-01j, + -1.119304913239356e-01 - 4.267938916403775e-01j, + -1.069237739782691e-01 + 3.602914879527338e-01j, + -1.069237739782691e-01 - 3.602914879527338e-01j, + -1.029178030691416e-01 + 2.964677964142126e-01j, + -1.029178030691416e-01 - 2.964677964142126e-01j, + -9.978747500816100e-02 + 2.347687643085738e-01j, + -9.978747500816100e-02 - 2.347687643085738e-01j, + -9.743974496324025e-02 + 1.747028739092479e-01j, + -9.743974496324025e-02 - 1.747028739092479e-01j, + -9.580754551625957e-02 + 1.158246860771989e-01j, + -9.580754551625957e-02 - 1.158246860771989e-01j, + -9.484562207782568e-02 + 5.772118357151691e-02j, + -9.484562207782568e-02 - 5.772118357151691e-02j, + -9.452783117928215e-02 + ] + k2 = 9.585686688851069e-09 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-8) + xp_assert_close(k, k2) + + def test_bandpass(self): + z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, + -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [ + 4.979909925436156e-01 + 8.367609424799387e-01j, + 4.979909925436156e-01 - 8.367609424799387e-01j, + 4.913338722555539e-01 + 7.866774509868817e-01j, + 4.913338722555539e-01 - 7.866774509868817e-01j, + 5.035229361778706e-01 + 7.401147376726750e-01j, + 5.035229361778706e-01 - 7.401147376726750e-01j, + 5.307617160406101e-01 + 7.029184459442954e-01j, + 5.307617160406101e-01 - 7.029184459442954e-01j, + 5.680556159453138e-01 + 6.788228792952775e-01j, + 5.680556159453138e-01 - 6.788228792952775e-01j, + 6.100962560818854e-01 + 6.693849403338664e-01j, + 6.100962560818854e-01 - 6.693849403338664e-01j, + 6.904694312740631e-01 + 6.930501690145245e-01j, + 6.904694312740631e-01 - 6.930501690145245e-01j, + 6.521767004237027e-01 + 6.744414640183752e-01j, + 6.521767004237027e-01 - 6.744414640183752e-01j, + ] + k2 = 3.398854055800844e-08 + xp_assert_equal(z, z2, check_dtype=False) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-13) + + # bandpass analog + z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk') + z2 = np.zeros(4, dtype=z.dtype) + p2 = [ + -4.179137760733086e+00 + 1.095935899082837e+02j, + -4.179137760733086e+00 - 1.095935899082837e+02j, + -9.593598668443835e+00 + 1.034745398029734e+02j, + -9.593598668443835e+00 - 1.034745398029734e+02j, + -8.883991981781929e+00 + 9.582087115567160e+01j, + -8.883991981781929e+00 - 9.582087115567160e+01j, + -3.474530886568715e+00 + 9.111599925805801e+01j, + -3.474530886568715e+00 - 9.111599925805801e+01j, + ] + k2 = 1.600000000000001e+05 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag)) + xp_assert_close(k, k2, rtol=1e-15) + + def test_bandstop(self): + z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk') + z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j] + p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j, + -1.766850742887729e-01 - 9.466951258673900e-01j, + 1.467897662432886e-01 + 9.515917126462422e-01j, + 1.467897662432886e-01 - 9.515917126462422e-01j, + -1.370083529426906e-01 + 8.880376681273993e-01j, + -1.370083529426906e-01 - 8.880376681273993e-01j, + 1.086774544701390e-01 + 8.915240810704319e-01j, + 1.086774544701390e-01 - 8.915240810704319e-01j, + -7.982704457700891e-02 + 8.506056315273435e-01j, + -7.982704457700891e-02 - 8.506056315273435e-01j, + 5.238812787110331e-02 + 8.524011102699969e-01j, + 5.238812787110331e-02 - 8.524011102699969e-01j, + -1.357545000491310e-02 + 8.382287744986582e-01j, + -1.357545000491310e-02 - 8.382287744986582e-01j] + k2 = 4.577122512960063e-01 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag)) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + def test_ba_output(self): + b, a = butter(4, [100, 300], 'bandpass', analog=True) + b2 = [1.6e+09, 0, 0, 0, 0] + a2 = [1.000000000000000e+00, 5.226251859505511e+02, + 2.565685424949238e+05, 6.794127417357160e+07, + 1.519411254969542e+10, 2.038238225207147e+12, + 2.309116882454312e+14, 1.411088002066486e+16, + 8.099999999999991e+17] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = butter(N, fc, btype, fs=fs) + ba2 = butter(N, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = butter(N, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = butter(N, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + +class TestCheby1: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + b, a = cheby1(0, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = cheby1(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby1(1, 0.1, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1.0])) + xp_assert_close(p, [-5.390126972799615e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 7.695063486399808e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk') + xp_assert_equal(np.ones(N), z) # All zeros exactly at DC + assert all(np.abs(p) <= 1) # No poles outside unit circle + + # Same test as TestNormalize + b, a = cheby1(8, 0.5, 0.048) + assert_array_almost_equal(b, [ + 2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11], decimal=14) + assert_array_almost_equal(a, [ + 1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01], decimal=14) + + b, a = cheby1(4, 1, [0.4, 0.7], btype='band') + assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0, + -0.0335, 0, 0.0084], decimal=4) + assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137, + 1.8653, 1.8982, 0.5676, 0.4103], + decimal=4) + + b2, a2 = cheby1(5, 3, 1, analog=True) + assert_array_almost_equal(b2, [0.0626], decimal=4) + assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080, + 0.0626], decimal=4) + + b, a = cheby1(8, 0.5, 0.1) + assert_array_almost_equal(b, 1.0e-006 * np.array([ + 0.00703924326028, 0.05631394608227, 0.19709881128793, + 0.39419762257586, 0.49274702821983, 0.39419762257586, + 0.19709881128793, 0.05631394608227, 0.00703924326028]), + decimal=13) + assert_array_almost_equal(a, [ + 1.00000000000000, -7.44912258934158, 24.46749067762108, + -46.27560200466141, 55.11160187999928, -42.31640010161038, + 20.45543300484147, -5.69110270561444, 0.69770374759022], + decimal=13) + + b, a = cheby1(8, 0.5, 0.25) + assert_array_almost_equal(b, 1.0e-003 * np.array([ + 0.00895261138923, 0.07162089111382, 0.25067311889837, + 0.50134623779673, 0.62668279724591, 0.50134623779673, + 0.25067311889837, 0.07162089111382, 0.00895261138923]), + decimal=13) + assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545, + 16.58122329202101, -27.71423273542923, + 30.39509758355313, -22.34729670426879, + 10.74509800434910, -3.08924633697497, + 0.40707685889802], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk') + z2 = np.ones(24) + p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j, + -6.136558509657073e-01 - 2.700091504942893e-01j, + -3.303348340927516e-01 + 6.659400861114254e-01j, + -3.303348340927516e-01 - 6.659400861114254e-01j, + 8.779713780557169e-03 + 8.223108447483040e-01j, + 8.779713780557169e-03 - 8.223108447483040e-01j, + 2.742361123006911e-01 + 8.356666951611864e-01j, + 2.742361123006911e-01 - 8.356666951611864e-01j, + 4.562984557158206e-01 + 7.954276912303594e-01j, + 4.562984557158206e-01 - 7.954276912303594e-01j, + 5.777335494123628e-01 + 7.435821817961783e-01j, + 5.777335494123628e-01 - 7.435821817961783e-01j, + 6.593260977749194e-01 + 6.955390907990932e-01j, + 6.593260977749194e-01 - 6.955390907990932e-01j, + 7.149590948466562e-01 + 6.559437858502012e-01j, + 7.149590948466562e-01 - 6.559437858502012e-01j, + 7.532432388188739e-01 + 6.256158042292060e-01j, + 7.532432388188739e-01 - 6.256158042292060e-01j, + 7.794365244268271e-01 + 6.042099234813333e-01j, + 7.794365244268271e-01 - 6.042099234813333e-01j, + 7.967253874772997e-01 + 5.911966597313203e-01j, + 7.967253874772997e-01 - 5.911966597313203e-01j, + 8.069756417293870e-01 + 5.862214589217275e-01j, + 8.069756417293870e-01 - 5.862214589217275e-01j] + k2 = 6.190427617192018e-04 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + xp_assert_close(k, k2, rtol=1e-10) + + # high odd order + z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk') + z2 = np.ones(23) + p2 = [-7.676400532011010e-01, + -6.754621070166477e-01 + 3.970502605619561e-01j, + -6.754621070166477e-01 - 3.970502605619561e-01j, + -4.528880018446727e-01 + 6.844061483786332e-01j, + -4.528880018446727e-01 - 6.844061483786332e-01j, + -1.986009130216447e-01 + 8.382285942941594e-01j, + -1.986009130216447e-01 - 8.382285942941594e-01j, + 2.504673931532608e-02 + 8.958137635794080e-01j, + 2.504673931532608e-02 - 8.958137635794080e-01j, + 2.001089429976469e-01 + 9.010678290791480e-01j, + 2.001089429976469e-01 - 9.010678290791480e-01j, + 3.302410157191755e-01 + 8.835444665962544e-01j, + 3.302410157191755e-01 - 8.835444665962544e-01j, + 4.246662537333661e-01 + 8.594054226449009e-01j, + 4.246662537333661e-01 - 8.594054226449009e-01j, + 4.919620928120296e-01 + 8.366772762965786e-01j, + 4.919620928120296e-01 - 8.366772762965786e-01j, + 5.385746917494749e-01 + 8.191616180796720e-01j, + 5.385746917494749e-01 - 8.191616180796720e-01j, + 5.855636993537203e-01 + 8.060680937701062e-01j, + 5.855636993537203e-01 - 8.060680937701062e-01j, + 5.688812849391721e-01 + 8.086497795114683e-01j, + 5.688812849391721e-01 - 8.086497795114683e-01j] + k2 = 1.941697029206324e-05 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + xp_assert_close(k, k2, rtol=1e-10) + + z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk') + z2 = np.zeros(10) + p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j, + -3.144743169501551e+03 - 3.511680029092744e+03j, + -5.633065604514602e+02 + 2.023615191183945e+03j, + -5.633065604514602e+02 - 2.023615191183945e+03j, + -1.946412183352025e+02 + 1.372309454274755e+03j, + -1.946412183352025e+02 - 1.372309454274755e+03j, + -7.987162953085479e+01 + 1.105207708045358e+03j, + -7.987162953085479e+01 - 1.105207708045358e+03j, + -2.250315039031946e+01 + 1.001723931471477e+03j, + -2.250315039031946e+01 - 1.001723931471477e+03j] + k2 = 8.912509381337453e-01 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-15) + + def test_bandpass(self): + z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [3.077784854851463e-01 + 9.453307017592942e-01j, + 3.077784854851463e-01 - 9.453307017592942e-01j, + 3.280567400654425e-01 + 9.272377218689016e-01j, + 3.280567400654425e-01 - 9.272377218689016e-01j, + 3.677912763284301e-01 + 9.038008865279966e-01j, + 3.677912763284301e-01 - 9.038008865279966e-01j, + 4.194425632520948e-01 + 8.769407159656157e-01j, + 4.194425632520948e-01 - 8.769407159656157e-01j, + 4.740921994669189e-01 + 8.496508528630974e-01j, + 4.740921994669189e-01 - 8.496508528630974e-01j, + 5.234866481897429e-01 + 8.259608422808477e-01j, + 5.234866481897429e-01 - 8.259608422808477e-01j, + 5.844717632289875e-01 + 8.052901363500210e-01j, + 5.844717632289875e-01 - 8.052901363500210e-01j, + 5.615189063336070e-01 + 8.100667803850766e-01j, + 5.615189063336070e-01 - 8.100667803850766e-01j] + k2 = 5.007028718074307e-09 + xp_assert_equal(z, z2, check_dtype=False) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-13) + + def test_bandstop(self): + z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk') + z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j] + p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j, + -8.942974551472813e-02 - 3.482480481185926e-01j, + 1.293775154041798e-01 + 8.753499858081858e-01j, + 1.293775154041798e-01 - 8.753499858081858e-01j, + 3.399741945062013e-02 + 9.690316022705607e-01j, + 3.399741945062013e-02 - 9.690316022705607e-01j, + 4.167225522796539e-04 + 9.927338161087488e-01j, + 4.167225522796539e-04 - 9.927338161087488e-01j, + -3.912966549550960e-01 + 8.046122859255742e-01j, + -3.912966549550960e-01 - 8.046122859255742e-01j, + -3.307805547127368e-01 + 9.133455018206508e-01j, + -3.307805547127368e-01 - 9.133455018206508e-01j, + -3.072658345097743e-01 + 9.443589759799366e-01j, + -3.072658345097743e-01 - 9.443589759799366e-01j] + k2 = 3.619438310405028e-01 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-13) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=0, atol=5e-16) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True) + b2 = [1.000000000000006e+00, 0, + 3.255000000000020e+05, 0, + 4.238010000000026e+10, 0, + 2.758944510000017e+15, 0, + 8.980364380050052e+19, 0, + 1.169243442282517e+24 + ] + a2 = [1.000000000000000e+00, 4.630555945694342e+02, + 4.039266454794788e+05, 1.338060988610237e+08, + 5.844333551294591e+10, 1.357346371637638e+13, + 3.804661141892782e+15, 5.670715850340080e+17, + 1.114411200988328e+20, 8.316815934908471e+21, + 1.169243442282517e+24 + ] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + ba2 = cheby1(N, 1, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby1(N, 1, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + +class TestCheby2: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Stopband ripple factor doesn't matter + b, a = cheby2(0, 123.456, 1, analog=True) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = cheby2(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby2(1, 50, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1], dtype=np.complex128)) + xp_assert_close(p, [9.967826460175649e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 1.608676991217512e-03, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk') + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk') + assert all(np.abs(p) <= 1) # No poles outside unit circle + + B, A = cheby2(18, 100, 0.5) + assert_array_almost_equal(B, [ + 0.00167583914216, 0.01249479541868, 0.05282702120282, + 0.15939804265706, 0.37690207631117, 0.73227013789108, + 1.20191856962356, 1.69522872823393, 2.07598674519837, + 2.21972389625291, 2.07598674519838, 1.69522872823395, + 1.20191856962359, 0.73227013789110, 0.37690207631118, + 0.15939804265707, 0.05282702120282, 0.01249479541868, + 0.00167583914216], decimal=13) + assert_array_almost_equal(A, [ + 1.00000000000000, -0.27631970006174, 3.19751214254060, + -0.15685969461355, 4.13926117356269, 0.60689917820044, + 2.95082770636540, 0.89016501910416, 1.32135245849798, + 0.51502467236824, 0.38906643866660, 0.15367372690642, + 0.07255803834919, 0.02422454070134, 0.00756108751837, + 0.00179848550988, 0.00033713574499, 0.00004258794833, + 0.00000281030149], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk') + z2 = [9.981088955489852e-01 + 6.147058341984388e-02j, + 9.981088955489852e-01 - 6.147058341984388e-02j, + 9.832702870387426e-01 + 1.821525257215483e-01j, + 9.832702870387426e-01 - 1.821525257215483e-01j, + 9.550760158089112e-01 + 2.963609353922882e-01j, + 9.550760158089112e-01 - 2.963609353922882e-01j, + 9.162054748821922e-01 + 4.007087817803773e-01j, + 9.162054748821922e-01 - 4.007087817803773e-01j, + 8.700619897368064e-01 + 4.929423232136168e-01j, + 8.700619897368064e-01 - 4.929423232136168e-01j, + 5.889791753434985e-01 + 8.081482110427953e-01j, + 5.889791753434985e-01 - 8.081482110427953e-01j, + 5.984900456570295e-01 + 8.011302423760501e-01j, + 5.984900456570295e-01 - 8.011302423760501e-01j, + 6.172880888914629e-01 + 7.867371958365343e-01j, + 6.172880888914629e-01 - 7.867371958365343e-01j, + 6.448899971038180e-01 + 7.642754030030161e-01j, + 6.448899971038180e-01 - 7.642754030030161e-01j, + 6.804845629637927e-01 + 7.327624168637228e-01j, + 6.804845629637927e-01 - 7.327624168637228e-01j, + 8.202619107108660e-01 + 5.719881098737678e-01j, + 8.202619107108660e-01 - 5.719881098737678e-01j, + 7.228410452536148e-01 + 6.910143437705678e-01j, + 7.228410452536148e-01 - 6.910143437705678e-01j, + 7.702121399578629e-01 + 6.377877856007792e-01j, + 7.702121399578629e-01 - 6.377877856007792e-01j] + p2 = [7.365546198286450e-01 + 4.842085129329526e-02j, + 7.365546198286450e-01 - 4.842085129329526e-02j, + 7.292038510962885e-01 + 1.442201672097581e-01j, + 7.292038510962885e-01 - 1.442201672097581e-01j, + 7.151293788040354e-01 + 2.369925800458584e-01j, + 7.151293788040354e-01 - 2.369925800458584e-01j, + 6.955051820787286e-01 + 3.250341363856910e-01j, + 6.955051820787286e-01 - 3.250341363856910e-01j, + 6.719122956045220e-01 + 4.070475750638047e-01j, + 6.719122956045220e-01 - 4.070475750638047e-01j, + 6.461722130611300e-01 + 4.821965916689270e-01j, + 6.461722130611300e-01 - 4.821965916689270e-01j, + 5.528045062872224e-01 + 8.162920513838372e-01j, + 5.528045062872224e-01 - 8.162920513838372e-01j, + 5.464847782492791e-01 + 7.869899955967304e-01j, + 5.464847782492791e-01 - 7.869899955967304e-01j, + 5.488033111260949e-01 + 7.520442354055579e-01j, + 5.488033111260949e-01 - 7.520442354055579e-01j, + 6.201874719022955e-01 + 5.500894392527353e-01j, + 6.201874719022955e-01 - 5.500894392527353e-01j, + 5.586478152536709e-01 + 7.112676877332921e-01j, + 5.586478152536709e-01 - 7.112676877332921e-01j, + 5.958145844148228e-01 + 6.107074340842115e-01j, + 5.958145844148228e-01 - 6.107074340842115e-01j, + 5.747812938519067e-01 + 6.643001536914696e-01j, + 5.747812938519067e-01 - 6.643001536914696e-01j] + k2 = 9.932997786497189e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + xp_assert_close(k, k2, rtol=1e-11) + + # high odd order + z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk') + z2 = [9.690690376586687e-01 + 2.467897896011971e-01j, + 9.690690376586687e-01 - 2.467897896011971e-01j, + 9.999999999999492e-01, + 8.835111277191199e-01 + 4.684101698261429e-01j, + 8.835111277191199e-01 - 4.684101698261429e-01j, + 7.613142857900539e-01 + 6.483830335935022e-01j, + 7.613142857900539e-01 - 6.483830335935022e-01j, + 6.232625173626231e-01 + 7.820126817709752e-01j, + 6.232625173626231e-01 - 7.820126817709752e-01j, + 4.864456563413621e-01 + 8.737108351316745e-01j, + 4.864456563413621e-01 - 8.737108351316745e-01j, + 3.618368136816749e-01 + 9.322414495530347e-01j, + 3.618368136816749e-01 - 9.322414495530347e-01j, + 2.549486883466794e-01 + 9.669545833752675e-01j, + 2.549486883466794e-01 - 9.669545833752675e-01j, + 1.676175432109457e-01 + 9.858520980390212e-01j, + 1.676175432109457e-01 - 9.858520980390212e-01j, + 1.975218468277521e-03 + 9.999980492540941e-01j, + 1.975218468277521e-03 - 9.999980492540941e-01j, + 1.786959496651858e-02 + 9.998403260399917e-01j, + 1.786959496651858e-02 - 9.998403260399917e-01j, + 9.967933660557139e-02 + 9.950196127985684e-01j, + 9.967933660557139e-02 - 9.950196127985684e-01j, + 5.013970951219547e-02 + 9.987422137518890e-01j, + 5.013970951219547e-02 - 9.987422137518890e-01j] + p2 = [4.218866331906864e-01, + 4.120110200127552e-01 + 1.361290593621978e-01j, + 4.120110200127552e-01 - 1.361290593621978e-01j, + 3.835890113632530e-01 + 2.664910809911026e-01j, + 3.835890113632530e-01 - 2.664910809911026e-01j, + 3.399195570456499e-01 + 3.863983538639875e-01j, + 3.399195570456499e-01 - 3.863983538639875e-01j, + 2.855977834508353e-01 + 4.929444399540688e-01j, + 2.855977834508353e-01 - 4.929444399540688e-01j, + 2.255765441339322e-01 + 5.851631870205766e-01j, + 2.255765441339322e-01 - 5.851631870205766e-01j, + 1.644087535815792e-01 + 6.637356937277153e-01j, + 1.644087535815792e-01 - 6.637356937277153e-01j, + -7.293633845273095e-02 + 9.739218252516307e-01j, + -7.293633845273095e-02 - 9.739218252516307e-01j, + 1.058259206358626e-01 + 7.304739464862978e-01j, + 1.058259206358626e-01 - 7.304739464862978e-01j, + -5.703971947785402e-02 + 9.291057542169088e-01j, + -5.703971947785402e-02 - 9.291057542169088e-01j, + 5.263875132656864e-02 + 7.877974334424453e-01j, + 5.263875132656864e-02 - 7.877974334424453e-01j, + -3.007943405982616e-02 + 8.846331716180016e-01j, + -3.007943405982616e-02 - 8.846331716180016e-01j, + 6.857277464483946e-03 + 8.383275456264492e-01j, + 6.857277464483946e-03 - 8.383275456264492e-01j] + k2 = 6.507068761705037e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + xp_assert_close(k, k2, rtol=1e-11) + + def test_bandpass(self): + z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999999e-01, + 3.676588029658514e-01 + 9.299607543341383e-01j, + 3.676588029658514e-01 - 9.299607543341383e-01j, + 7.009689684982283e-01 + 7.131917730894889e-01j, + 7.009689684982283e-01 - 7.131917730894889e-01j, + 7.815697973765858e-01 + 6.238178033919218e-01j, + 7.815697973765858e-01 - 6.238178033919218e-01j, + 8.063793628819866e-01 + 5.913986160941200e-01j, + 8.063793628819866e-01 - 5.913986160941200e-01j, + 1.000000000000001e+00, + 9.944493019920448e-01 + 1.052168511576739e-01j, + 9.944493019920448e-01 - 1.052168511576739e-01j, + 9.854674703367308e-01 + 1.698642543566085e-01j, + 9.854674703367308e-01 - 1.698642543566085e-01j, + 9.762751735919308e-01 + 2.165335665157851e-01j, + 9.762751735919308e-01 - 2.165335665157851e-01j, + 9.792277171575134e-01 + 2.027636011479496e-01j, + 9.792277171575134e-01 - 2.027636011479496e-01j] + p2 = [8.143803410489621e-01 + 5.411056063397541e-01j, + 8.143803410489621e-01 - 5.411056063397541e-01j, + 7.650769827887418e-01 + 5.195412242095543e-01j, + 7.650769827887418e-01 - 5.195412242095543e-01j, + 6.096241204063443e-01 + 3.568440484659796e-01j, + 6.096241204063443e-01 - 3.568440484659796e-01j, + 6.918192770246239e-01 + 4.770463577106911e-01j, + 6.918192770246239e-01 - 4.770463577106911e-01j, + 6.986241085779207e-01 + 1.146512226180060e-01j, + 6.986241085779207e-01 - 1.146512226180060e-01j, + 8.654645923909734e-01 + 1.604208797063147e-01j, + 8.654645923909734e-01 - 1.604208797063147e-01j, + 9.164831670444591e-01 + 1.969181049384918e-01j, + 9.164831670444591e-01 - 1.969181049384918e-01j, + 9.630425777594550e-01 + 2.317513360702271e-01j, + 9.630425777594550e-01 - 2.317513360702271e-01j, + 9.438104703725529e-01 + 2.193509900269860e-01j, + 9.438104703725529e-01 - 2.193509900269860e-01j] + k2 = 9.345352824659604e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-11) + + def test_bandstop(self): + z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk') + z2 = [6.230544895101009e-01 + 7.821784343111114e-01j, + 6.230544895101009e-01 - 7.821784343111114e-01j, + 9.086608545660115e-01 + 4.175349702471991e-01j, + 9.086608545660115e-01 - 4.175349702471991e-01j, + 9.478129721465802e-01 + 3.188268649763867e-01j, + 9.478129721465802e-01 - 3.188268649763867e-01j, + -6.230544895100982e-01 + 7.821784343111109e-01j, + -6.230544895100982e-01 - 7.821784343111109e-01j, + -9.086608545660116e-01 + 4.175349702472088e-01j, + -9.086608545660116e-01 - 4.175349702472088e-01j, + -9.478129721465784e-01 + 3.188268649763897e-01j, + -9.478129721465784e-01 - 3.188268649763897e-01j] + p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j, + -9.464094036167638e-01 - 1.720048695084344e-01j, + -8.715844103386737e-01 + 1.370665039509297e-01j, + -8.715844103386737e-01 - 1.370665039509297e-01j, + -8.078751204586425e-01 + 5.729329866682983e-02j, + -8.078751204586425e-01 - 5.729329866682983e-02j, + 9.464094036167665e-01 + 1.720048695084332e-01j, + 9.464094036167665e-01 - 1.720048695084332e-01j, + 8.078751204586447e-01 + 5.729329866683007e-02j, + 8.078751204586447e-01 - 5.729329866683007e-02j, + 8.715844103386721e-01 + 1.370665039509331e-01j, + 8.715844103386721e-01 - 1.370665039509331e-01j] + k2 = 2.917823332763358e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-11) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby2(5, 20, [2010, 2100], 'stop', True) + b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12, + 2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04, + 1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02, + 7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09, + 1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15, + 1.339913493808585e+33] + a2 = [1.000000000000000e+00, 1.849550755473371e+02, + 2.113222918998538e+07, 3.125114149732283e+09, + 1.785133457155609e+14, 1.979158697776348e+16, + 7.535048322653831e+20, 5.567966191263037e+22, + 1.589246884221346e+27, 5.871210648525566e+28, + 1.339913493808590e+33] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + ba2 = cheby2(N, 20, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby2(N, 20, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + +class TestEllip: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + # Stopband ripple factor doesn't matter + b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = ellip(1, 1, 55, 0.3, output='zpk') + xp_assert_close(z, [-9.999999999999998e-01], rtol=1e-14) + xp_assert_close(p, [-6.660721153525525e-04], rtol=1e-10) + xp_assert_close(k, 5.003330360576763e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk') + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk') + assert all(np.abs(p) <= 1) # No poles outside unit circle + + b3, a3 = ellip(5, 3, 26, 1, analog=True) + assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0, + 0.2409], decimal=4) + assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012, + 0.2409], decimal=4) + + b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop') + assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042, + 0.3469, 0.3310], decimal=4) + assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323, + 0.1131, -0.0060], decimal=4) + + def test_highpass(self): + # high even order + z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk') + z2 = [9.761875332501075e-01 + 2.169283290099910e-01j, + 9.761875332501075e-01 - 2.169283290099910e-01j, + 8.413503353963494e-01 + 5.404901600661900e-01j, + 8.413503353963494e-01 - 5.404901600661900e-01j, + 7.160082576305009e-01 + 6.980918098681732e-01j, + 7.160082576305009e-01 - 6.980918098681732e-01j, + 6.456533638965329e-01 + 7.636306264739803e-01j, + 6.456533638965329e-01 - 7.636306264739803e-01j, + 6.127321820971366e-01 + 7.902906256703928e-01j, + 6.127321820971366e-01 - 7.902906256703928e-01j, + 5.983607817490196e-01 + 8.012267936512676e-01j, + 5.983607817490196e-01 - 8.012267936512676e-01j, + 5.922577552594799e-01 + 8.057485658286990e-01j, + 5.922577552594799e-01 - 8.057485658286990e-01j, + 5.896952092563588e-01 + 8.076258788449631e-01j, + 5.896952092563588e-01 - 8.076258788449631e-01j, + 5.886248765538837e-01 + 8.084063054565607e-01j, + 5.886248765538837e-01 - 8.084063054565607e-01j, + 5.881802711123132e-01 + 8.087298490066037e-01j, + 5.881802711123132e-01 - 8.087298490066037e-01j, + 5.879995719101164e-01 + 8.088612386766461e-01j, + 5.879995719101164e-01 - 8.088612386766461e-01j, + 5.879354086709576e-01 + 8.089078780868164e-01j, + 5.879354086709576e-01 - 8.089078780868164e-01j] + p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j, + -3.184805259081650e-01 - 4.206951906775851e-01j, + 1.417279173459985e-01 + 7.903955262836452e-01j, + 1.417279173459985e-01 - 7.903955262836452e-01j, + 4.042881216964651e-01 + 8.309042239116594e-01j, + 4.042881216964651e-01 - 8.309042239116594e-01j, + 5.128964442789670e-01 + 8.229563236799665e-01j, + 5.128964442789670e-01 - 8.229563236799665e-01j, + 5.569614712822724e-01 + 8.155957702908510e-01j, + 5.569614712822724e-01 - 8.155957702908510e-01j, + 5.750478870161392e-01 + 8.118633973883931e-01j, + 5.750478870161392e-01 - 8.118633973883931e-01j, + 5.825314018170804e-01 + 8.101960910679270e-01j, + 5.825314018170804e-01 - 8.101960910679270e-01j, + 5.856397379751872e-01 + 8.094825218722543e-01j, + 5.856397379751872e-01 - 8.094825218722543e-01j, + 5.869326035251949e-01 + 8.091827531557583e-01j, + 5.869326035251949e-01 - 8.091827531557583e-01j, + 5.874697218855733e-01 + 8.090593298213502e-01j, + 5.874697218855733e-01 - 8.090593298213502e-01j, + 5.876904783532237e-01 + 8.090127161018823e-01j, + 5.876904783532237e-01 - 8.090127161018823e-01j, + 5.877753105317594e-01 + 8.090050577978136e-01j, + 5.877753105317594e-01 - 8.090050577978136e-01j] + k2 = 4.918081266957108e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + # high odd order + z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk') + z2 = [9.999999999998661e-01, + 6.603717261750994e-01 + 7.509388678638675e-01j, + 6.603717261750994e-01 - 7.509388678638675e-01j, + 2.788635267510325e-01 + 9.603307416968041e-01j, + 2.788635267510325e-01 - 9.603307416968041e-01j, + 1.070215532544218e-01 + 9.942567008268131e-01j, + 1.070215532544218e-01 - 9.942567008268131e-01j, + 4.049427369978163e-02 + 9.991797705105507e-01j, + 4.049427369978163e-02 - 9.991797705105507e-01j, + 1.531059368627931e-02 + 9.998827859909265e-01j, + 1.531059368627931e-02 - 9.998827859909265e-01j, + 5.808061438534933e-03 + 9.999831330689181e-01j, + 5.808061438534933e-03 - 9.999831330689181e-01j, + 2.224277847754599e-03 + 9.999975262909676e-01j, + 2.224277847754599e-03 - 9.999975262909676e-01j, + 8.731857107534554e-04 + 9.999996187732845e-01j, + 8.731857107534554e-04 - 9.999996187732845e-01j, + 3.649057346914968e-04 + 9.999999334218996e-01j, + 3.649057346914968e-04 - 9.999999334218996e-01j, + 1.765538109802615e-04 + 9.999999844143768e-01j, + 1.765538109802615e-04 - 9.999999844143768e-01j, + 1.143655290967426e-04 + 9.999999934602630e-01j, + 1.143655290967426e-04 - 9.999999934602630e-01j] + p2 = [-6.322017026545028e-01, + -4.648423756662754e-01 + 5.852407464440732e-01j, + -4.648423756662754e-01 - 5.852407464440732e-01j, + -2.249233374627773e-01 + 8.577853017985717e-01j, + -2.249233374627773e-01 - 8.577853017985717e-01j, + -9.234137570557621e-02 + 9.506548198678851e-01j, + -9.234137570557621e-02 - 9.506548198678851e-01j, + -3.585663561241373e-02 + 9.821494736043981e-01j, + -3.585663561241373e-02 - 9.821494736043981e-01j, + -1.363917242312723e-02 + 9.933844128330656e-01j, + -1.363917242312723e-02 - 9.933844128330656e-01j, + -5.131505238923029e-03 + 9.975221173308673e-01j, + -5.131505238923029e-03 - 9.975221173308673e-01j, + -1.904937999259502e-03 + 9.990680819857982e-01j, + -1.904937999259502e-03 - 9.990680819857982e-01j, + -6.859439885466834e-04 + 9.996492201426826e-01j, + -6.859439885466834e-04 - 9.996492201426826e-01j, + -2.269936267937089e-04 + 9.998686250679161e-01j, + -2.269936267937089e-04 - 9.998686250679161e-01j, + -5.687071588789117e-05 + 9.999527573294513e-01j, + -5.687071588789117e-05 - 9.999527573294513e-01j, + -6.948417068525226e-07 + 9.999882737700173e-01j, + -6.948417068525226e-07 - 9.999882737700173e-01j] + k2 = 1.220910020289434e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + def test_bandpass(self): + z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999991e-01, + 6.856610961780020e-01 + 7.279209168501619e-01j, + 6.856610961780020e-01 - 7.279209168501619e-01j, + 7.850346167691289e-01 + 6.194518952058737e-01j, + 7.850346167691289e-01 - 6.194518952058737e-01j, + 7.999038743173071e-01 + 6.001281461922627e-01j, + 7.999038743173071e-01 - 6.001281461922627e-01j, + 9.999999999999999e-01, + 9.862938983554124e-01 + 1.649980183725925e-01j, + 9.862938983554124e-01 - 1.649980183725925e-01j, + 9.788558330548762e-01 + 2.045513580850601e-01j, + 9.788558330548762e-01 - 2.045513580850601e-01j, + 9.771155231720003e-01 + 2.127093189691258e-01j, + 9.771155231720003e-01 - 2.127093189691258e-01j] + p2 = [8.063992755498643e-01 + 5.858071374778874e-01j, + 8.063992755498643e-01 - 5.858071374778874e-01j, + 8.050395347071724e-01 + 5.639097428109795e-01j, + 8.050395347071724e-01 - 5.639097428109795e-01j, + 8.113124936559144e-01 + 4.855241143973142e-01j, + 8.113124936559144e-01 - 4.855241143973142e-01j, + 8.665595314082394e-01 + 3.334049560919331e-01j, + 8.665595314082394e-01 - 3.334049560919331e-01j, + 9.412369011968871e-01 + 2.457616651325908e-01j, + 9.412369011968871e-01 - 2.457616651325908e-01j, + 9.679465190411238e-01 + 2.228772501848216e-01j, + 9.679465190411238e-01 - 2.228772501848216e-01j, + 9.747235066273385e-01 + 2.178937926146544e-01j, + 9.747235066273385e-01 - 2.178937926146544e-01j] + k2 = 8.354782670263239e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk') + z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j, + -5.583607317695175e-14 - 1.433755965989225e+02j, + 5.740106416459296e-14 + 1.261678754570291e+02j, + 5.740106416459296e-14 - 1.261678754570291e+02j, + -2.199676239638652e-14 + 6.974861996895196e+01j, + -2.199676239638652e-14 - 6.974861996895196e+01j, + -3.372595657044283e-14 + 7.926145989044531e+01j, + -3.372595657044283e-14 - 7.926145989044531e+01j, + 0] + p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j, + -8.814960004852743e-01 - 1.104124501436066e+02j, + -2.477372459140184e+00 + 1.065638954516534e+02j, + -2.477372459140184e+00 - 1.065638954516534e+02j, + -3.072156842945799e+00 + 9.995404870405324e+01j, + -3.072156842945799e+00 - 9.995404870405324e+01j, + -2.180456023925693e+00 + 9.379206865455268e+01j, + -2.180456023925693e+00 - 9.379206865455268e+01j, + -7.230484977485752e-01 + 9.056598800801140e+01j, + -7.230484977485752e-01 - 9.056598800801140e+01j] + k2 = 3.774571622827070e-02 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-4) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-6) + xp_assert_close(k, k2, rtol=1e-3) + + def test_bandstop(self): + z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk') + z2 = [3.528578094286510e-01 + 9.356769561794296e-01j, + 3.528578094286510e-01 - 9.356769561794296e-01j, + 3.769716042264783e-01 + 9.262248159096587e-01j, + 3.769716042264783e-01 - 9.262248159096587e-01j, + 4.406101783111199e-01 + 8.976985411420985e-01j, + 4.406101783111199e-01 - 8.976985411420985e-01j, + 5.539386470258847e-01 + 8.325574907062760e-01j, + 5.539386470258847e-01 - 8.325574907062760e-01j, + 6.748464963023645e-01 + 7.379581332490555e-01j, + 6.748464963023645e-01 - 7.379581332490555e-01j, + 7.489887970285254e-01 + 6.625826604475596e-01j, + 7.489887970285254e-01 - 6.625826604475596e-01j, + 7.913118471618432e-01 + 6.114127579150699e-01j, + 7.913118471618432e-01 - 6.114127579150699e-01j, + 7.806804740916381e-01 + 6.249303940216475e-01j, + 7.806804740916381e-01 - 6.249303940216475e-01j] + + p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j, + -1.025299146693730e-01 - 5.662682444754943e-01j, + 1.698463595163031e-01 + 8.926678667070186e-01j, + 1.698463595163031e-01 - 8.926678667070186e-01j, + 2.750532687820631e-01 + 9.351020170094005e-01j, + 2.750532687820631e-01 - 9.351020170094005e-01j, + 3.070095178909486e-01 + 9.457373499553291e-01j, + 3.070095178909486e-01 - 9.457373499553291e-01j, + 7.695332312152288e-01 + 2.792567212705257e-01j, + 7.695332312152288e-01 - 2.792567212705257e-01j, + 8.083818999225620e-01 + 4.990723496863960e-01j, + 8.083818999225620e-01 - 4.990723496863960e-01j, + 8.066158014414928e-01 + 5.649811440393374e-01j, + 8.066158014414928e-01 - 5.649811440393374e-01j, + 8.062787978834571e-01 + 5.855780880424964e-01j, + 8.062787978834571e-01 - 5.855780880424964e-01j] + k2 = 2.068622545291259e-01 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-6) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-5) + xp_assert_close(k, k2, rtol=1e-5) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = ellip(5, 1, 40, [201, 240], 'stop', True) + b2 = [ + 1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13, + 2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08, + 2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03, + 1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01, + 2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06, + 2.612380874940186e+23 + ] + a2 = [ + 1.000000000000000e+00, 1.337266601804649e+02, + 2.486725353510667e+05, 2.628059713728125e+07, + 2.436169536928770e+10, 1.913554568577315e+12, + 1.175208184614438e+15, 6.115751452473410e+16, + 2.791577695211466e+19, 7.241811142725384e+20, + 2.612380874940182e+23 + ] + xp_assert_close(b, b2, rtol=1e-6) + xp_assert_close(a, a2, rtol=1e-4) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + ba2 = ellip(N, 1, 20, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = ellip(N, 1, 20, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iirnotch(0.06, 30, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + iirnotch(0.06, 30, fs=None) + + +def test_sos_consistency(): + # Consistency checks of output='sos' for the specialized IIR filter + # design functions. + design_funcs = [(bessel, (0.1,)), + (butter, (0.1,)), + (cheby1, (45.0, 0.1)), + (cheby2, (0.087, 0.1)), + (ellip, (0.087, 45, 0.1))] + for func, args in design_funcs: + name = func.__name__ + + b, a = func(2, *args, output='ba') + sos = func(2, *args, output='sos') + xp_assert_close(sos, [np.hstack((b, a))], err_msg=f"{name}(2,...)") + + zpk = func(3, *args, output='zpk') + sos = func(3, *args, output='sos') + xp_assert_close(sos, zpk2sos(*zpk), err_msg=f"{name}(3,...)") + + zpk = func(4, *args, output='zpk') + sos = func(4, *args, output='sos') + xp_assert_close(sos, zpk2sos(*zpk), err_msg=f"{name}(4,...)") + + +class TestIIRNotch: + + def test_ba_output(self): + # Compare coefficients with Matlab ones + # for the equivalent input: + b, a = iirnotch(0.06, 30) + b2 = [ + 9.9686824e-01, -1.9584219e+00, + 9.9686824e-01 + ] + a2 = [ + 1.0000000e+00, -1.9584219e+00, + 9.9373647e-01 + ] + + xp_assert_close(b, b2, rtol=1e-8) + xp_assert_close(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coefficients + b, a = iirnotch(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [200, # w0 = 0.200 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 400] # w0 = 0.400 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 1 + xp_assert_close(abs(hp[0]), np.asarray(1.), rtol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), np.asarray(1.), rtol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + xp_assert_close(abs(hp[2]), np.asarray(0.0), atol=1e-10, check_0d=False) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirnotch, w0=2, Q=30) + assert_raises(ValueError, iirnotch, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirnotch, w0="blabla", Q=30) + assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coefficients + b, a = iirnotch(1500, 30, fs=10000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=10000) + + # Pick 5 point + p = [200, # w0 = 1000 + 295, # w0 = 1475 + 300, # w0 = 1500 + 305, # w0 = 1525 + 400] # w0 = 2000 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1500 and should be close to 1 + xp_assert_close(abs(hp[0]), np.ones_like(abs(hp[0])), rtol=1e-2, + check_0d=False) + xp_assert_close(abs(hp[4]), np.ones_like(abs(hp[4])), rtol=1e-2, + check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + xp_assert_close(abs(hp[2]), np.asarray(0.0), atol=1e-10, check_0d=False) + + +class TestIIRPeak: + + def test_ba_output(self): + # Compare coefficients with Matlab ones + # for the equivalent input: + b, a = iirpeak(0.06, 30) + b2 = [ + 3.131764229e-03, 0, + -3.131764229e-03 + ] + a2 = [ + 1.0000000e+00, -1.958421917e+00, + 9.9373647e-01 + ] + xp_assert_close(b, b2, rtol=1e-8) + xp_assert_close(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coefficients + b, a = iirpeak(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [30, # w0 = 0.030 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 800] # w0 = 0.800 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 0 + xp_assert_close(abs(hp[0]), + np.zeros_like(abs(hp[0])), atol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), + np.zeros_like(abs(hp[4])), atol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + xp_assert_close(abs(hp[2]), np.asarray(1.0), rtol=1e-10, check_0d=False) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirpeak, w0=2, Q=30) + assert_raises(ValueError, iirpeak, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirpeak, w0="blabla", Q=30) + assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coefficients + b, a = iirpeak(1200, 30, fs=8000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=8000) + + # Pick 5 point + p = [30, # w0 = 120 + 295, # w0 = 1180 + 300, # w0 = 1200 + 305, # w0 = 1220 + 800] # w0 = 3200 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1200 and should be close to 0 + xp_assert_close(abs(hp[0]), + np.zeros_like(abs(hp[0])), atol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), + np.zeros_like(abs(hp[4])), atol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + xp_assert_close(abs(hp[2]), + np.ones_like(abs(hp[2])), rtol=1e-10, check_0d=False) + + +class TestIIRComb: + # Test erroneous input cases + def test_invalid_input(self): + # w0 is <= 0 or >= fs / 2 + fs = 1000 + for args in [(-fs, 30), (0, 35), (fs / 2, 40), (fs, 35)]: + with pytest.raises(ValueError, match='w0 must be between '): + iircomb(*args, fs=fs) + + # fs is not divisible by w0 + for args in [(120, 30), (157, 35)]: + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(*args, fs=fs) + + # https://github.com/scipy/scipy/issues/14043#issuecomment-1107349140 + # Previously, fs=44100, w0=49.999 was rejected, but fs=2, + # w0=49.999/int(44100/2) was accepted. Now it is rejected, too. + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(w0=49.999/int(44100/2), Q=30) + + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(w0=49.999, Q=30, fs=44100) + + # Filter type is not notch or peak + for args in [(0.2, 30, 'natch'), (0.5, 35, 'comb')]: + with pytest.raises(ValueError, match='ftype must be '): + iircomb(*args) + + # Verify that the filter's frequency response contains a + # notch at the cutoff frequency + @pytest.mark.parametrize('ftype', ('notch', 'peak')) + def test_frequency_response(self, ftype): + # Create a notching or peaking comb filter at 1000 Hz + b, a = iircomb(1000, 30, ftype=ftype, fs=10000) + + # Compute the frequency response + freqs, response = freqz(b, a, 1000, fs=10000) + + # Find the notch using argrelextrema + comb_points = argrelextrema(abs(response), np.less)[0] + + # Verify that the first notch sits at 1000 Hz + comb1 = comb_points[0] + xp_assert_close(freqs[comb1], np.asarray(1000.), check_0d=False) + + # Verify pass_zero parameter + @pytest.mark.parametrize('ftype,pass_zero,peak,notch', + [('peak', True, 123.45, 61.725), + ('peak', False, 61.725, 123.45), + ('peak', None, 61.725, 123.45), + ('notch', None, 61.725, 123.45), + ('notch', True, 123.45, 61.725), + ('notch', False, 61.725, 123.45)]) + def test_pass_zero(self, ftype, pass_zero, peak, notch): + # Create a notching or peaking comb filter + b, a = iircomb(123.45, 30, ftype=ftype, fs=1234.5, pass_zero=pass_zero) + + # Compute the frequency response + freqs, response = freqz(b, a, [peak, notch], fs=1234.5) + + # Verify that expected notches are notches and peaks are peaks + assert abs(response[0]) > 0.99 + assert abs(response[1]) < 1e-10 + + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + def test_iir_symmetry(self): + b, a = iircomb(400, 30, fs=24000) + z, p, k = tf2zpk(b, a) + xp_assert_equal(sorted(z), sorted(z.conj())) + xp_assert_equal(sorted(p), sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + # Verify filter coefficients with MATLAB's iircomb function + def test_ba_output(self): + b_notch, a_notch = iircomb(60, 35, ftype='notch', fs=600) + b_notch2 = [0.957020174408697, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.957020174408697] + a_notch2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.914040348817395] + xp_assert_close(b_notch, b_notch2) + xp_assert_close(a_notch, a_notch2) + + b_peak, a_peak = iircomb(60, 35, ftype='peak', fs=600) + b_peak2 = [0.0429798255913026, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.0429798255913026] + a_peak2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.914040348817395] + xp_assert_close(b_peak, b_peak2) + xp_assert_close(a_peak, a_peak2) + + # Verify that https://github.com/scipy/scipy/issues/14043 is fixed + def test_nearest_divisor(self): + # Create a notching comb filter + b, a = iircomb(50/int(44100/2), 50.0, ftype='notch') + + # Compute the frequency response at an upper harmonic of 50 + freqs, response = freqz(b, a, [22000], fs=44100) + + # Before bug fix, this would produce N = 881, so that 22 kHz was ~0 dB. + # Now N = 882 correctly and 22 kHz should be a notch <-220 dB + assert abs(response[0]) < 1e-10 + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iircomb(1000, 30, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + iircomb(1000, 30, fs=None) + + +class TestIIRDesign: + + def test_exceptions(self): + with pytest.raises(ValueError, match="the same shape"): + iirdesign(0.2, [0.1, 0.3], 1, 40) + with pytest.raises(ValueError, match="the same shape"): + iirdesign(np.array([[0.3, 0.6], [0.3, 0.6]]), + np.array([[0.4, 0.5], [0.4, 0.5]]), 1, 40) + + # discrete filter with non-positive frequency + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(-0.1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, 0, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, -0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, 0], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40) + + # analog filter with negative frequency + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(-0.1, 0.5, 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, -0.5, 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40, analog=True) + + # discrete filter with fs=None, freq > 1 + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(1.1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(0.1, 1, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(0.1, 1.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([1.1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 1], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 1.1], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [1.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [0.1, 1], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [0.1, 1.5], 1, 40) + + # discrete filter with fs>2, wp, ws < fs/2 must pass + iirdesign(100, 500, 1, 40, fs=2000) + iirdesign(500, 100, 1, 40, fs=2000) + iirdesign([200, 400], [100, 500], 1, 40, fs=2000) + iirdesign([100, 500], [200, 400], 1, 40, fs=2000) + + # discrete filter with fs>2, freq > fs/2: this must raise + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(1000, 400, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(1100, 500, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(100, 1000, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(100, 1100, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([1000, 400], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([1100, 400], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 1000], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 1100], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [1000, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [1100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [100, 1000], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [100, 1100], 1, 40, fs=2000) + + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.1, 0.4], [0.5, 0.6], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.5, 0.6], [0.1, 0.4], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.3, 0.6], [0.4, 0.7], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.4, 0.7], [0.3, 0.6], 1, 40) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iirfilter(1, 1, btype="low", fs=np.array([10, 20])) + + +class TestIIRFilter: + + def test_symmetry(self): + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + for N in np.arange(1, 26): + for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'): + z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='zpk') + xp_assert_equal(sorted(z), + sorted(z.conj())) + xp_assert_equal(sorted(p), + sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='ba') + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + def test_int_inputs(self): + # Using integer frequency arguments and large N should not produce + # numpy integers that wraparound to negative numbers + k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel', + output='zpk')[2] + k2 = 9.999999999999989e+47 + xp_assert_close(np.asarray(k), np.asarray(k2)) + # if fs is specified then the normalization of Wn to have + # 0 <= Wn <= 1 should not cause an integer overflow + # the following line should not raise an exception + iirfilter(20, [1000000000, 1100000000], btype='bp', + analog=False, fs=6250000000) + + def test_invalid_wn_size(self): + # low and high have 1 Wn, band and stop have 2 Wn + assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low') + assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high') + assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp') + assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True) + + def test_invalid_wn_range(self): + # For digital filters, 0 <= Wn <= 1 + assert_raises(ValueError, iirfilter, 1, 2, btype='low') + assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band') + assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band') + assert_raises(ValueError, iirfilter, 1, -1, btype='high') + assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band') + assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop') + + # analog=True with non-positive critical frequencies + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, 0, btype='low', analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, -1, btype='low', analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [0, 100], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [-1, 100], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [10, 0], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [10, -1], analog=True) + + def test_analog_sos(self): + # first order Butterworth filter with Wn = 1 has tf 1/(s+1) + sos = [[0., 0., 1., 0., 1., 1.]] + sos2 = iirfilter(N=1, Wn=1, btype='low', analog=True, output='sos') + assert_array_almost_equal(sos, sos2) + + def test_wn1_ge_wn0(self): + # gh-15773: should raise error if Wn[0] >= Wn[1] + with pytest.raises(ValueError, + match=r"Wn\[0\] must be less than Wn\[1\]"): + iirfilter(2, [0.5, 0.5]) + with pytest.raises(ValueError, + match=r"Wn\[0\] must be less than Wn\[1\]"): + iirfilter(2, [0.6, 0.5]) + + +class TestGroupDelay: + def test_identity_filter(self): + w, gd = group_delay((1, 1)) + assert_array_almost_equal(w, pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + w, gd = group_delay((1, 1), whole=True) + assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + + def test_fir(self): + # Let's design linear phase FIR and check that the group delay + # is constant. + N = 100 + b = firwin(N + 1, 0.1) + w, gd = group_delay((b, 1)) + xp_assert_close(gd, np.ones_like(gd)*(0.5 * N)) + + def test_iir(self): + # Let's design Butterworth filter and test the group delay at + # some points against MATLAB answer. + b, a = butter(4, 0.1) + w = np.linspace(0, pi, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w) + matlab_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, matlab_gd) + + @pytest.mark.thread_unsafe + def test_singular(self): + # Let's create a filter with zeros and poles on the unit circle and + # check if warnings are raised at those frequencies. + z1 = np.exp(1j * 0.1 * pi) + z2 = np.exp(1j * 0.25 * pi) + p1 = np.exp(1j * 0.5 * pi) + p2 = np.exp(1j * 0.8 * pi) + b = np.convolve([1, -z1], [1, -z2]) + a = np.convolve([1, -p1], [1, -p2]) + w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi]) + + w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, gd1 = group_delay((1, 1)) + w2, gd2 = group_delay((1, 1), None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(gd1, gd2) + + def test_fs_param(self): + # Let's design Butterworth filter and test the group delay at + # some points against the normalized frequency answer. + b, a = butter(4, 4800, fs=96000) + w = np.linspace(0, 96000/2, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w, fs=96000) + norm_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, norm_gd) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, gd = group_delay((1, 1), N) + assert_array_almost_equal(w, pi * np.arange(8) / 8) + assert_array_almost_equal(gd, np.zeros(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, gd = group_delay((1, 1), w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(gd, [0]) + + def test_complex_coef(self): + # gh-19586: handle complex coef TFs + # + # for g(z) = (alpha*z+1)/(1+conjugate(alpha)), group delay is + # given by function below. + # + # def gd_expr(w, alpha): + # num = 1j*(abs(alpha)**2-1)*np.exp(1j*w) + # den = (alpha*np.exp(1j*w)+1)*(np.exp(1j*w)+np.conj(alpha)) + # return -np.imag(num/den) + + # arbitrary non-real alpha + alpha = -0.6143077933232609+0.3355978770229421j + # 8 points from from -pi to pi + wref = np.array([-3.141592653589793 , + -2.356194490192345 , + -1.5707963267948966, + -0.7853981633974483, + 0. , + 0.7853981633974483, + 1.5707963267948966, + 2.356194490192345 ]) + gdref = array([0.18759548150354619, + 0.17999770352712252, + 0.23598047471879877, + 0.46539443069907194, + 1.9511492420564165 , + 3.478129975138865 , + 0.6228594960517333 , + 0.27067831839471224]) + b = [alpha,1] + a = [1, np.conjugate(alpha)] + gdtest = group_delay((b,a), wref)[1] + # need nulp=14 for macOS arm64 wheel builds; added 2 for some + # robustness on other platforms. + assert_array_almost_equal_nulp(gdtest, gdref, nulp=16) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + group_delay((1, 1), fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + group_delay((1, 1), fs=None) + + +class TestGammatone: + # Test erroneous input cases. + def test_invalid_input(self): + # Cutoff frequency is <= 0 or >= fs / 2. + fs = 16000 + for args in [(-fs, 'iir'), (0, 'fir'), (fs / 2, 'iir'), (fs, 'fir')]: + with pytest.raises(ValueError, match='The frequency must be ' + 'between '): + gammatone(*args, fs=fs) + + # Filter type is not fir or iir + for args in [(440, 'fie'), (220, 'it')]: + with pytest.raises(ValueError, match='ftype must be '): + gammatone(*args, fs=fs) + + # Order is <= 0 or > 24 for FIR filter. + for args in [(440, 'fir', -50), (220, 'fir', 0), (110, 'fir', 25), + (55, 'fir', 50)]: + with pytest.raises(ValueError, match='Invalid order: '): + gammatone(*args, numtaps=None, fs=fs) + + # Verify that the filter's frequency response is approximately + # 1 at the cutoff frequency. + def test_frequency_response(self): + fs = 16000 + ftypes = ['fir', 'iir'] + for ftype in ftypes: + # Create a gammatone filter centered at 1000 Hz. + b, a = gammatone(1000, ftype, fs=fs) + + # Calculate the frequency response. + freqs, response = freqz(b, a) + + # Determine peak magnitude of the response + # and corresponding frequency. + response_max = np.max(np.abs(response)) + freq_hz = freqs[np.argmax(np.abs(response))] / ((2 * np.pi) / fs) + + # Check that the peak magnitude is 1 and the frequency is 1000 Hz. + xp_assert_close(response_max, + np.ones_like(response_max), rtol=1e-2, check_0d=False) + xp_assert_close(freq_hz, + 1000*np.ones_like(freq_hz), rtol=1e-2, check_0d=False) + + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + def test_iir_symmetry(self): + b, a = gammatone(440, 'iir', fs=24000) + z, p, k = tf2zpk(b, a) + xp_assert_equal(sorted(z), sorted(z.conj())) + xp_assert_equal(sorted(p), sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + # Verify FIR filter coefficients with the paper's + # Mathematica implementation + def test_fir_ba_output(self): + b, _ = gammatone(15, 'fir', fs=1000) + b2 = [0.0, 2.2608075649884e-04, + 1.5077903981357e-03, 4.2033687753998e-03, + 8.1508962726503e-03, 1.2890059089154e-02, + 1.7833890391666e-02, 2.2392613558564e-02, + 2.6055195863104e-02, 2.8435872863284e-02, + 2.9293319149544e-02, 2.852976858014e-02, + 2.6176557156294e-02, 2.2371510270395e-02, + 1.7332485267759e-02] + xp_assert_close(b, b2) + + # Verify IIR filter coefficients with the paper's MATLAB implementation + def test_iir_ba_output(self): + b, a = gammatone(440, 'iir', fs=16000) + b2 = [1.31494461367464e-06, -5.03391196645395e-06, + 7.00649426000897e-06, -4.18951968419854e-06, + 9.02614910412011e-07] + a2 = [1.0, -7.65646235454218, + 25.7584699322366, -49.7319214483238, + 60.2667361289181, -46.9399590980486, + 22.9474798808461, -6.43799381299034, + 0.793651554625368] + xp_assert_close(b, b2) + xp_assert_close(a, a2) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + gammatone(440, 'iir', fs=np.array([10, 20])) + + +class TestOrderFilter: + def test_doc_example(self): + x = np.arange(25).reshape(5, 5) + domain = np.identity(3) + + # minimum of elements 1,3,9 (zero-padded) on phone pad + # 7,5,3 on numpad + expected = np.array( + [[0., 0., 0., 0., 0.], + [0., 0., 1., 2., 0.], + [0., 5., 6., 7., 0.], + [0., 10., 11., 12., 0.], + [0., 0., 0., 0., 0.]], + ) + xp_assert_close(order_filter(x, domain, 0), expected, check_dtype=False) + + # maximum of elements 1,3,9 (zero-padded) on phone pad + # 7,5,3 on numpad + expected = np.array( + [[6., 7., 8., 9., 4.], + [11., 12., 13., 14., 9.], + [16., 17., 18., 19., 14.], + [21., 22., 23., 24., 19.], + [20., 21., 22., 23., 24.]], + ) + xp_assert_close(order_filter(x, domain, 2), expected, check_dtype=False) + + # and, just to complete the set, median of zero-padded elements + expected = np.array( + [[0, 1, 2, 3, 0], + [5, 6, 7, 8, 3], + [10, 11, 12, 13, 8], + [15, 16, 17, 18, 13], + [0, 15, 16, 17, 18]], + ) + xp_assert_close(order_filter(x, domain, 1), expected) + + def test_medfilt_order_filter(self): + x = np.arange(25).reshape(5, 5) + + # median of zero-padded elements 1,5,9 on phone pad + # 7,5,3 on numpad + expected = np.array( + [[0, 1, 2, 3, 0], + [1, 6, 7, 8, 4], + [6, 11, 12, 13, 9], + [11, 16, 17, 18, 14], + [0, 16, 17, 18, 0]], + ) + xp_assert_close(medfilt(x, 3), expected) + + xp_assert_close( + order_filter(x, np.ones((3, 3)), 4), + expected + ) + + def test_order_filter_asymmetric(self): + x = np.arange(25).reshape(5, 5) + domain = np.array( + [[1, 1, 0], + [0, 1, 0], + [0, 0, 0]], + ) + + expected = np.array( + [[0, 0, 0, 0, 0], + [0, 0, 1, 2, 3], + [0, 5, 6, 7, 8], + [0, 10, 11, 12, 13], + [0, 15, 16, 17, 18]] + ) + xp_assert_close(order_filter(x, domain, 0), expected) + + expected = np.array( + [[0, 0, 0, 0, 0], + [0, 1, 2, 3, 4], + [5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]] + ) + xp_assert_close(order_filter(x, domain, 1), expected) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2b60e63cdcb36e39c6775fa99394d123c2956b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py @@ -0,0 +1,654 @@ +import numpy as np +from numpy.testing import assert_warns +from scipy._lib._array_api import ( + xp_assert_close, xp_assert_equal, + assert_almost_equal, assert_array_almost_equal, +) +from pytest import raises as assert_raises +import pytest + +from scipy.fft import fft +from scipy.special import sinc +from scipy.signal import (kaiser_beta, kaiser_atten, kaiserord, + firwin, firwin2, freqz, remez, firls, minimum_phase +) + + +def test_kaiser_beta(): + b = kaiser_beta(58.7) + assert_almost_equal(b, 0.1102 * 50.0) + b = kaiser_beta(22.0) + assert_almost_equal(b, 0.5842 + 0.07886) + b = kaiser_beta(21.0) + assert b == 0.0 + b = kaiser_beta(10.0) + assert b == 0.0 + + +def test_kaiser_atten(): + a = kaiser_atten(1, 1.0) + assert a == 7.95 + a = kaiser_atten(2, 1/np.pi) + assert a == 2.285 + 7.95 + + +def test_kaiserord(): + assert_raises(ValueError, kaiserord, 1.0, 1.0) + numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi) + assert (numtaps, beta) == (2, 0.0) + + +class TestFirwin: + + def check_response(self, h, expected_response, tol=.05): + N = len(h) + alpha = 0.5 * (N-1) + m = np.arange(0,N) - alpha # time indices of taps + for freq, expected in expected_response: + actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq))) + mse = abs(actual-expected)**2 + assert mse < tol, f'response not as expected, mse={mse:g} > {tol:g}' + + def test_response(self): + N = 51 + f = .5 + # increase length just to try even/odd + h = firwin(N, f) # low-pass from 0 to f + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+1, f, window='nuttall') # specific window + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass + self.check_response(h, [(.25,0), (.75,1)]) + + f1, f2, f3, f4 = .2, .4, .6, .8 + h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter + self.check_response(h, [(.1,0), (.3,1), (.5,0)]) + + h = firwin(N+4, [f1, f2]) # band-stop filter + self.check_response(h, [(.1,1), (.3,0), (.5,1)]) + + h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False) + self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)]) + + h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter + self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)]) + + h = firwin(N+7, 0.1, width=.03) # low-pass + self.check_response(h, [(.05,1), (.75,0)]) + + h = firwin(N+8, 0.1, pass_zero=False) # high-pass + self.check_response(h, [(.05,0), (.75,1)]) + + def mse(self, h, bands): + """Compute mean squared error versus ideal response across frequency + band. + h -- coefficients + bands -- list of (left, right) tuples relative to 1==Nyquist of + passbands + """ + w, H = freqz(h, worN=1024) + f = w/np.pi + passIndicator = np.zeros(len(w), bool) + for left, right in bands: + passIndicator |= (f >= left) & (f < right) + Hideal = np.where(passIndicator, 1, 0) + mse = np.mean(abs(abs(H)-Hideal)**2) + return mse + + def test_scaling(self): + """ + For one lowpass, bandpass, and highpass example filter, this test + checks two things: + - the mean squared error over the frequency domain of the unscaled + filter is smaller than the scaled filter (true for rectangular + window) + - the response of the scaled filter is exactly unity at the center + of the first passband + """ + N = 11 + cases = [ + ([.5], True, (0, 1)), + ([0.2, .6], False, (.4, 1)), + ([.5], False, (1, 1)), + ] + for cutoff, pass_zero, expected_response in cases: + h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones') + hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones') + if len(cutoff) == 1: + if pass_zero: + cutoff = [0] + cutoff + else: + cutoff = cutoff + [1] + msg = 'least squares violation' + assert self.mse(h, [cutoff]) < self.mse(hs, [cutoff]), msg + self.check_response(hs, [expected_response], 1e-12) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firwin(51, .5, fs=np.array([10, 20])) + + +class TestFirWinMore: + """Different author, different style, different tests...""" + + def test_lowpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) + taps = firwin(ntaps, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_highpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + + # Ensure that ntaps is odd. + ntaps |= 1 + + kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) + taps = firwin(ntaps, pass_zero=False, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='highpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_bandpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False) + taps = firwin(ntaps, pass_zero=False, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5, + 0.7-width/2, 0.7+width/2, 0.8, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_bandstop_multi(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta), + scale=False) + taps = firwin(ntaps, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35, + 0.5-width/2, 0.5+width/2, 0.65, + 0.8-width/2, 0.8+width/2, 0.9, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + decimal=5) + + taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs) + xp_assert_close(taps, taps_str) + + def test_fs_nyq(self): + """Test the fs and nyq keywords.""" + nyquist = 1000 + width = 40.0 + relative_width = width/nyquist + ntaps, beta = kaiserord(120, relative_width) + taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), + pass_zero=False, scale=False, fs=2*nyquist) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500, + 700-width/2, 700+width/2, 800, 1000]) + freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + def test_bad_cutoff(self): + """Test that invalid cutoff argument raises ValueError.""" + # cutoff values must be greater than 0 and less than 1. + assert_raises(ValueError, firwin, 99, -0.5) + assert_raises(ValueError, firwin, 99, 1.5) + # Don't allow 0 or 1 in cutoff. + assert_raises(ValueError, firwin, 99, [0, 0.5]) + assert_raises(ValueError, firwin, 99, [0.5, 1]) + # cutoff values must be strictly increasing. + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2]) + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5]) + # Must have at least one cutoff value. + assert_raises(ValueError, firwin, 99, []) + # 2D array not allowed. + assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]]) + # cutoff values must be less than nyq. + assert_raises(ValueError, firwin, 99, 50.0, fs=80) + assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50) + + def test_even_highpass_raises_value_error(self): + """Test that attempt to create a highpass filter with an even number + of taps raises a ValueError exception.""" + assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False) + assert_raises(ValueError, firwin, 40, [.25, 0.5]) + + def test_bad_pass_zero(self): + """Test degenerate pass_zero cases.""" + with assert_raises(ValueError, match='pass_zero must be'): + firwin(41, 0.5, pass_zero='foo') + with assert_raises(TypeError, match='cannot be interpreted'): + firwin(41, 0.5, pass_zero=1.) + for pass_zero in ('lowpass', 'highpass'): + with assert_raises(ValueError, match='cutoff must have one'): + firwin(41, [0.5, 0.6], pass_zero=pass_zero) + for pass_zero in ('bandpass', 'bandstop'): + with assert_raises(ValueError, match='must have at least two'): + firwin(41, [0.5], pass_zero=pass_zero) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firwin2(51, .5, 1, fs=np.array([10, 20])) + + +class TestFirwin2: + + def test_invalid_args(self): + # `freq` and `gain` have different lengths. + with assert_raises(ValueError, match='must be of same length'): + firwin2(50, [0, 0.5, 1], [0.0, 1.0]) + # `nfreqs` is less than `ntaps`. + with assert_raises(ValueError, match='ntaps must be less than nfreqs'): + firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33) + # Decreasing value in `freq` + with assert_raises(ValueError, match='must be nondecreasing'): + firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0]) + # Value in `freq` repeated more than once. + with assert_raises(ValueError, match='must not occur more than twice'): + firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0]) + # `freq` does not start at 0.0. + with assert_raises(ValueError, match='start with 0'): + firwin2(50, [0.5, 1.0], [0.0, 1.0]) + # `freq` does not end at fs/2. + with assert_raises(ValueError, match='end with fs/2'): + firwin2(50, [0.0, 0.5], [0.0, 1.0]) + # Value 0 is repeated in `freq` + with assert_raises(ValueError, match='0 must not be repeated'): + firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) + # Value fs/2 is repeated in `freq` + with assert_raises(ValueError, match='fs/2 must not be repeated'): + firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0]) + # Value in `freq` that is too close to a repeated number + with assert_raises(ValueError, match='cannot contain numbers ' + 'that are too close'): + firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0], + [1.0, 1.0, 1.0, 0.0, 0.0]) + + # Type II filter, but the gain at nyquist frequency is not zero. + with assert_raises(ValueError, match='Type II filter'): + firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0]) + + # Type III filter, but the gains at nyquist and zero rate are not zero. + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True) + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True) + + # Type IV filter, but the gain at zero rate is not zero. + with assert_raises(ValueError, match='Type IV filter'): + firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) + + def test01(self): + width = 0.04 + beta = 12.0 + ntaps = 400 + # Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w + # increases from w=0.5 to w=1 (w=1 is the Nyquist frequency). + freq = [0.0, 0.5, 1.0] + gain = [1.0, 1.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, + 0.75, 1.0-width/2]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5) + + def test02(self): + width = 0.04 + beta = 12.0 + # ntaps must be odd for positive gain at Nyquist. + ntaps = 401 + # An ideal highpass filter. + freq = [0.0, 0.5, 0.5, 1.0] + gain = [0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test03(self): + width = 0.02 + ntaps, beta = kaiserord(120, width) + # ntaps must be odd for positive gain at Nyquist. + ntaps = int(ntaps) | 1 + freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45, + 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test04(self): + """Test firwin2 when window=None.""" + ntaps = 5 + # Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0] + freq = [0.0, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193) + alpha = 0.5 * (ntaps - 1) + m = np.arange(0, ntaps) - alpha + h = 0.5 * sinc(0.5 * m) + assert_array_almost_equal(h, taps) + + def test05(self): + """Test firwin2 for calculating Type IV filters""" + ntaps = 1500 + + freq = [0.0, 1.0] + gain = [0.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1]) + + freqs, response = freqz(taps, worN=2048) + assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4) + + def test06(self): + """Test firwin2 for calculating Type III filters""" + ntaps = 1501 + + freq = [0.0, 0.5, 0.55, 1.0] + gain = [0.0, 0.5, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert taps[ntaps // 2] == 0.0 + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1]) + + freqs, response1 = freqz(taps, worN=2048) + response2 = np.interp(freqs / np.pi, freq, gain) + assert_array_almost_equal(abs(response1), response2, decimal=3) + + def test_fs_nyq(self): + taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0) + assert_array_almost_equal(taps1, taps2) + + def test_tuple(self): + taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0)) + taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) + assert_array_almost_equal(taps1, taps2) + + def test_input_modyfication(self): + freq1 = np.array([0.0, 0.5, 0.5, 1.0]) + freq2 = np.array(freq1) + firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0]) + xp_assert_equal(freq1, freq2) + + +class TestRemez: + + def test_bad_args(self): + assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka') + + def test_hilbert(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design an unity gain hilbert bandpass filter from w to 0.5-w + h = remez(11, [a, 0.5-a], [1], type='hilbert') + + # make sure the filter has correct # of taps + assert len(h) == N, "Number of Taps" + + # make sure it is type III (anti-symmetric tap coefficients) + assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1]) + + # Since the requested response is symmetric, all even coefficients + # should be zero (or in this case really small) + assert (abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero" + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = abs(H) + + # should have a zero at 0 and pi (in this case close to zero) + assert (Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi" + + # check that the pass band is close to unity + idx = np.logical_and(f > a, f < 0.5-a) + assert (abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity" + + def test_compare(self): + # test comparison to MATLAB + k = [0.024590270518440, -0.041314581814658, -0.075943803756711, + -0.003530911231040, 0.193140296954975, 0.373400753484939, + 0.373400753484939, 0.193140296954975, -0.003530911231040, + -0.075943803756711, -0.041314581814658, 0.024590270518440] + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + xp_assert_close(h, k) + + h = [-0.038976016082299, 0.018704846485491, -0.014644062687875, + 0.002879152556419, 0.016849978528150, -0.043276706138248, + 0.073641298245579, -0.103908158578635, 0.129770906801075, + -0.147163447297124, 0.153302248456347, -0.147163447297124, + 0.129770906801075, -0.103908158578635, 0.073641298245579, + -0.043276706138248, 0.016849978528150, 0.002879152556419, + -0.014644062687875, 0.018704846485491, -0.038976016082299] + xp_assert_close(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + remez(11, .1, 1, fs=np.array([10, 20])) + +class TestFirls: + + def test_bad_args(self): + # even numtaps + assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0]) + # odd bands + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0]) + # len(bands) != len(desired) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0]) + # non-monotonic bands + assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0]) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4) + assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4) + assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4) + # negative desired + assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1]) + # len(weight) != len(pairs) + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[1, 2]) + # negative weight + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[-1]) + + def test_firls(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design a halfband symmetric low-pass filter + h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0) + + # make sure the filter has correct # of taps + assert h.shape[0] == N + + # make sure it is symmetric + midx = (N-1) // 2 + assert_array_almost_equal(h[:midx], h[:-midx-1:-1]) + + # make sure the center tap is 0.5 + assert_almost_equal(h[midx], 0.5) + + # For halfband symmetric, odd coefficients (except the center) + # should be zero (really small) + hodd = np.hstack((h[1:midx:2], h[-midx+1::2])) + assert_array_almost_equal(hodd, np.zeros_like(hodd)) + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = np.abs(H) + + # check that the pass band is close to unity + idx = np.logical_and(f > 0, f < a) + assert_array_almost_equal(Hmag[idx], np.ones_like(Hmag[idx]), decimal=3) + + # check that the stop band is close to zero + idx = np.logical_and(f > 0.5-a, f < 0.5) + assert_array_almost_equal(Hmag[idx], np.zeros_like(Hmag[idx]), decimal=3) + + def test_compare(self): + # compare to OCTAVE output + taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], weight=[1, 2]) + # >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]); + known_taps = [-6.26930101730182e-04, -1.03354450635036e-01, + -9.81576747564301e-03, 3.17271686090449e-01, + 5.11409425599933e-01, 3.17271686090449e-01, + -9.81576747564301e-03, -1.03354450635036e-01, + -6.26930101730182e-04] + xp_assert_close(taps, known_taps) + + # compare to MATLAB output + taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], weight=[1, 2]) + # >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]); + known_taps = [ + 0.058545300496815, -0.014233383714318, -0.104688258464392, + 0.012403323025279, 0.317930861136062, 0.488047220029700, + 0.317930861136062, 0.012403323025279, -0.104688258464392, + -0.014233383714318, 0.058545300496815] + xp_assert_close(taps, known_taps) + + # With linear changes: + taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20) + # >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0]) + known_taps = [ + 1.156090832768218, -4.1385894727395849, 7.5288619164321826, + -8.5530572592947856, 7.5288619164321826, -4.1385894727395849, + 1.156090832768218] + xp_assert_close(taps, known_taps) + + def test_rank_deficient(self): + # solve() runs but warns (only sometimes, so here we don't use match) + x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0]) + w, h = freqz(x, fs=2.) + absh2 = np.abs(h[:2]) + xp_assert_close(absh2, np.ones_like(absh2), atol=1e-5) + absh2 = np.abs(h[-2:]) + xp_assert_close(absh2, np.zeros_like(absh2), atol=1e-6, rtol=1e-7) + # switch to pinvh (tolerances could be higher with longer + # filters, but using shorter ones is faster computationally and + # the idea is the same) + x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0]) + w, h = freqz(x, fs=2.) + mask = w < 0.01 + assert mask.sum() > 3 + habs = np.abs(h[mask]) + xp_assert_close(habs, np.ones_like(habs), atol=1e-4) + mask = w > 0.99 + assert mask.sum() > 3 + habs = np.abs(h[mask]) + xp_assert_close(habs, np.zeros_like(habs), atol=1e-4) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firls(11, .1, 1, fs=np.array([10, 20])) + +class TestMinimumPhase: + @pytest.mark.thread_unsafe + def test_bad_args(self): + # not enough taps + assert_raises(ValueError, minimum_phase, [1.]) + assert_raises(ValueError, minimum_phase, [1., 1.]) + assert_raises(ValueError, minimum_phase, np.full(10, 1j)) + assert_raises(ValueError, minimum_phase, 'foo') + assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8) + assert_raises(ValueError, minimum_phase, np.ones(10), method='foo') + assert_warns(RuntimeWarning, minimum_phase, np.arange(3)) + with pytest.raises(ValueError, match="is only supported when"): + minimum_phase(np.ones(3), method='hilbert', half=False) + + def test_homomorphic(self): + # check that it can recover frequency responses of arbitrary + # linear-phase filters + + # for some cases we can get the actual filter back + h = [1, -1] + h_new = minimum_phase(np.convolve(h, h[::-1])) + xp_assert_close(h_new, np.asarray(h, dtype=np.float64), rtol=0.05) + + # but in general we only guarantee we get the magnitude back + rng = np.random.RandomState(0) + for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101): + h = rng.randn(n) + h_linear = np.convolve(h, h[::-1]) + h_new = minimum_phase(h_linear) + xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h)), rtol=1e-4) + h_new = minimum_phase(h_linear, half=False) + assert len(h_linear) == len(h_new) + xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h_linear)), rtol=1e-4) + + def test_hilbert(self): + # compare to MATLAB output of reference implementation + + # f=[0 0.3 0.5 1]; + # a=[1 1 0 0]; + # h=remez(11,f,a); + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + k = [0.349585548646686, 0.373552164395447, 0.326082685363438, + 0.077152207480935, -0.129943946349364, -0.059355880509749] + m = minimum_phase(h, 'hilbert') + xp_assert_close(m, k, rtol=5e-3) + + # f=[0 0.8 0.9 1]; + # a=[0 0 1 1]; + # h=remez(20,f,a); + h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.) + k = [0.232486803906329, -0.133551833687071, 0.151871456867244, + -0.157957283165866, 0.151739294892963, -0.129293146705090, + 0.100787844523204, -0.065832656741252, 0.035361328741024, + -0.014977068692269, -0.158416139047557] + m = minimum_phase(h, 'hilbert', n_fft=2**19) + xp_assert_close(m, k, rtol=2e-3) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..826b39cb0e066b3a6198bbcf1a293f6e0497076b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py @@ -0,0 +1,1225 @@ +import warnings + +import numpy as np +from numpy.testing import suppress_warnings +import pytest +from pytest import raises as assert_raises +from scipy._lib._array_api import( + assert_almost_equal, xp_assert_equal, xp_assert_close +) + +from scipy.signal import (ss2tf, tf2ss, lti, + dlti, bode, freqresp, lsim, impulse, step, + abcd_normalize, place_poles, + TransferFunction, StateSpace, ZerosPolesGain) +from scipy.signal._filter_design import BadCoefficients +import scipy.linalg as linalg + + +def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8): + """ + Check each pole in P1 is close to a pole in P2 with a 1e-8 + relative tolerance or 1e-8 absolute tolerance (useful for zero poles). + These tolerances are very strict but the systems tested are known to + accept these poles so we should not be far from what is requested. + """ + P2 = P2.copy() + for p1 in P1: + found = False + for p2_idx in range(P2.shape[0]): + if np.allclose([np.real(p1), np.imag(p1)], + [np.real(P2[p2_idx]), np.imag(P2[p2_idx])], + rtol, atol): + found = True + np.delete(P2, p2_idx) + break + if not found: + raise ValueError("Can't find pole " + str(p1) + " in " + str(P2)) + + +class TestPlacePoles: + + def _check(self, A, B, P, **kwargs): + """ + Perform the most common tests on the poles computed by place_poles + and return the Bunch object for further specific tests + """ + fsf = place_poles(A, B, P, **kwargs) + expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix)) + _assert_poles_close(expected, fsf.requested_poles) + _assert_poles_close(expected, fsf.computed_poles) + _assert_poles_close(P,fsf.requested_poles) + return fsf + + def test_real(self): + # Test real pole placement using KNV and YT0 algorithm and example 1 in + # section 4 of the reference publication (see place_poles docstring) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2) + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + # Check that both KNV and YT compute correct K matrix + self._check(A, B, P, method='KNV0') + self._check(A, B, P, method='YT') + + # Try to reach the specific case in _YT_real where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + # on some architectures this can lead to a RuntimeWarning invalid + # value in divide (see gh-7590), so suppress it for now + with np.errstate(invalid='ignore'): + self._check(A, B, (2,2,3,3)) + + def test_complex(self): + # Test complex pole placement on a linearized car model, taken from L. + # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE + # editions p 184/185 + A = np.array([[0, 7, 0, 0], + [0, 0, 0, 7/3.], + [0, 0, 0, 0], + [0, 0, 0, 0]]) + B = np.array([[0, 0], + [0, 0], + [1, 0], + [0, 1]]) + # Test complex poles on YT + P = np.array([-3, -1, -2-1j, -2+1j]) + # on macOS arm64 this can lead to a RuntimeWarning invalid + # value in divide, so suppress it for now + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + # Try to reach the specific case in _YT_complex where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + P = [0-1e-6j,0+1e-6j,-10,10] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P, maxiter=1000) + + # Try to reach the specific case in _YT_complex where the rank two + # update yields two null vectors. This test was found via Monte Carlo. + + A = np.array( + [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546, + -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300, + -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709, + -291, -338, -153, -1804, -1106, -1168, -867, -2297] + ).reshape(6,6) + + B = np.array( + [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637, + -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502, + -952, -1374, -62, -964, -930, -939, -792, -756, -1437, + -491, -1543, -686] + ).reshape(6,5) + P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j] + self._check(A, B, P) + + # Use a lot of poles to go through all cases for update_order + # in _YT_loop + + big_A = np.ones((11,11))-np.eye(11) + big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:] + big_A[:6,:6] = A + big_B[:6,:5] = B + + P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(big_A, big_B, P) + + #check with only complex poles and only real poles + P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j, + -10-10j,-20-20j,-30-30j,-40-40j,-50-50j] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + + # need a 5x5 array to ensure YT handles properly when there + # is only one real pole and several complex + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0, + 0,0,0,5,0,0,0,0,9]).reshape(5,5) + B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2) + P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + place_poles(A, B, P) + + # same test with an odd number of real poles > 1 + # this is another specific case of YT + P = np.array([-2, -3, -4, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + def test_tricky_B(self): + # check we handle as we should the 1 column B matrices and + # n column B matrices (with n such as shape(A)=(n, n)) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4, + 5, 6, 7, 8]).reshape(4, 4) + + # KNV or YT are not called here, it's a specific case with only + # one unique solution + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + fsf = self._check(A, B, P) + # rtol and nb_iter should be set to np.nan as the identity can be + # used as transfer matrix + assert np.isnan(fsf.rtol) + assert np.isnan(fsf.nb_iter) + + # check with complex poles too as they trigger a specific case in + # the specific case :-) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + assert np.isnan(fsf.rtol) + assert np.isnan(fsf.nb_iter) + + #now test with a B matrix with only one column (no optimisation) + B = B[:,0].reshape(4,1) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + + # we can't optimize anything, check they are set to 0 as expected + assert fsf.rtol == 0 + assert fsf.nb_iter == 0 + + @pytest.mark.thread_unsafe + def test_errors(self): + # Test input mistakes from user + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) + B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) + + #should fail as the method keyword is invalid + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + method="foo") + + #should fail as poles are not 1D array + assert_raises(ValueError, place_poles, A, B, + np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1)) + + #should fail as A is not a 2D array + assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B, + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as B is not a 2D array + assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis], + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as there are too many poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3)) + + #should fail as there are not enough poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3)) + + #should fail as the rtol is greater than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + rtol=42) + + #should fail as maxiter is smaller than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + maxiter=-42) + + # should fail as ndim(B) is two + assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2)) + + # uncontrollable system + assert_raises(ValueError, place_poles, np.ones((4,4)), + np.ones((4,2)), (1,2,3,4)) + + # Should not raise ValueError as the poles can be placed but should + # raise a warning as the convergence is not reached + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42) + assert len(w) == 1 + assert issubclass(w[-1].category, UserWarning) + assert ("Convergence was not reached after maxiter iterations" + in str(w[-1].message)) + assert fsf.nb_iter == 42 + + # should fail as a complex misses its conjugate + assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2)) + + # should fail as A is not square + assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5)) + + # should fail as B has not the same number of lines as A + assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5)) + + # should fail as KNV0 does not support complex poles + assert_raises(ValueError, place_poles, A, B, + (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0") + + +class TestSS2TF: + + def check_matrix_shapes(self, p, q, r): + ss2tf(np.zeros((p, p)), + np.zeros((p, q)), + np.zeros((r, p)), + np.zeros((r, q)), 0) + + def test_shapes(self): + # Each tuple holds: + # number of states, number of inputs, number of outputs + for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]: + self.check_matrix_shapes(p, q, r) + + def test_basic(self): + # Test a round trip through tf2ss and ss2tf. + b = np.array([1.0, 3.0, 5.0]) + a = np.array([1.0, 2.0, 3.0]) + + A, B, C, D = tf2ss(b, a) + xp_assert_close(A, [[-2., -3], [1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0]], rtol=1e-13) + xp_assert_close(C, [[1., 2]], rtol=1e-13) + xp_assert_close(D, [[1.]], rtol=1e-14) + + bb, aa = ss2tf(A, B, C, D) + xp_assert_close(bb[0], b, rtol=1e-13) + xp_assert_close(aa, a, rtol=1e-13) + + def test_zero_order_round_trip(self): + # See gh-5760 + tf = (2, 1) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[0.]], rtol=1e-13) + xp_assert_close(B, [[0.]], rtol=1e-13) + xp_assert_close(C, [[0.]], rtol=1e-13) + xp_assert_close(D, [[2.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[2., 0]], rtol=1e-13) + xp_assert_close(den, [1., 0], rtol=1e-13) + + tf = ([[5], [2]], 1) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[0.]], rtol=1e-13) + xp_assert_close(B, [[0.]], rtol=1e-13) + xp_assert_close(C, [[0.], [0]], rtol=1e-13) + xp_assert_close(D, [[5.], [2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[5., 0], [2, 0]], rtol=1e-13) + xp_assert_close(den, [1., 0], rtol=1e-13) + + def test_simo_round_trip(self): + # See gh-5753 + tf = ([[1, 2], [1, 1]], [1, 2]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-2.]], rtol=1e-13) + xp_assert_close(B, [[1.]], rtol=1e-13) + xp_assert_close(C, [[0.], [-1.]], rtol=1e-13) + xp_assert_close(D, [[1.], [1.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[1., 2.], [1., 1.]], rtol=1e-13) + xp_assert_close(den, [1., 2.], rtol=1e-13) + + tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-1., -1.], [1., 0.]], rtol=1e-13) + xp_assert_close(B, [[1.], [0.]], rtol=1e-13) + xp_assert_close(C, [[-1., 0.], [0., 0.]], rtol=1e-13) + xp_assert_close(D, [[1.], [1.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[1., 0., 1.], [1., 1., 1.]], rtol=1e-13) + xp_assert_close(den, [1., 1., 1.], rtol=1e-13) + + tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-2., -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0], [0]], rtol=1e-13) + xp_assert_close(C, [[1., 2, 3], [1, 2, 3]], rtol=1e-13) + xp_assert_close(D, [[0.], [0]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 2, 3, 4], rtol=1e-13) + + tf = (np.array([1, [2, 3]], dtype=object), [1, 6]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-6.]], rtol=1e-31) + xp_assert_close(B, [[1.]], rtol=1e-31) + xp_assert_close(C, [[1.], [-9]], rtol=1e-31) + xp_assert_close(D, [[0.], [2]], rtol=1e-31) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1], [2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 6], rtol=1e-13) + + tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-6., -5], [1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0]], rtol=1e-13) + xp_assert_close(C, [[1., -3], [-4, -2]], rtol=1e-13) + xp_assert_close(D, [[0.], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1, -3], [1, 2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 6, 5], rtol=1e-13) + + def test_all_int_arrays(self): + A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]] + B = [[0], [0], [1]] + C = [[5, 1, 0]] + D = [[0]] + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14) + xp_assert_close(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13) + + def test_multioutput(self): + # Regression test for gh-2669. + + # 4 states + A = np.array([[-1.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 2.0, 0.0], + [-4.0, 0.0, 3.0, 0.0], + [-8.0, 8.0, 0.0, 4.0]]) + + # 1 input + B = np.array([[0.3], + [0.0], + [7.0], + [0.0]]) + + # 3 outputs + C = np.array([[0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [8.0, 8.0, 0.0, 0.0]]) + + D = np.array([[0.0], + [0.0], + [1.0]]) + + # Get the transfer functions for all the outputs in one call. + b_all, a = ss2tf(A, B, C, D) + + # Get the transfer functions for each output separately. + b0, a0 = ss2tf(A, B, C[0], D[0]) + b1, a1 = ss2tf(A, B, C[1], D[1]) + b2, a2 = ss2tf(A, B, C[2], D[2]) + + # Check that we got the same results. + xp_assert_close(a0, a, rtol=1e-13) + xp_assert_close(a1, a, rtol=1e-13) + xp_assert_close(a2, a, rtol=1e-13) + xp_assert_close(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14) + + +class TestLsim: + digits_accuracy = 7 + + def lti_nowarn(self, *args): + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(*args) + return system + + def test_first_order(self): + # y' = -y + # exact solution is y(t) = exp(-t) + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_x = np.exp(-tout) + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_x) + + def test_second_order(self): + t = np.linspace(0, 10, 1001) + u = np.zeros_like(t) + # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. + # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution + # is (1-t)*exp(-t). + system = self.lti_nowarn([1.0], [1.0, 2.0, 1.0]) + tout, y, x = lsim(system, u, t, X0=[1.0, 0.0]) + expected_x = (1.0 - tout) * np.exp(-tout) + assert_almost_equal(x[:, 0], expected_x) + + def test_integrator(self): + # integrator: y' = u + system = self.lti_nowarn(0., 1., 1., 0.) + t = np.linspace(0,5) + u = t + tout, y, x = lsim(system, u, t) + expected_x = 0.5 * tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_x, decimal=self.digits_accuracy) + + def test_two_states(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1, 2)) + + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 10.0, 21) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, U=u, T=t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0 * tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:, 0], expected_x0) + assert_almost_equal(x[:, 1], expected_x1) + + def test_double_integrator(self): + # double integrator: y'' = 2u + A = np.array([[0., 1.], [0., 0.]]) + B = np.array([[0.], [1.]]) + C = np.array([[2., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.ones_like(t) + tout, y, x = lsim(system, u, t) + expected_x = np.transpose(np.array([0.5 * tout**2, tout])) + expected_y = tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_y, decimal=self.digits_accuracy) + + def test_jordan_block(self): + # Non-diagonalizable A matrix + # x1' + x1 = x2 + # x2' + x2 = u + # y = x1 + # Exact solution with u = 0 is y(t) = t exp(-t) + A = np.array([[-1., 1.], [0., -1.]]) + B = np.array([[0.], [1.]]) + C = np.array([[1., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[0.0, 1.0]) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_miso(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1,2)) + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 5.0, 101) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, u, t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0*tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:,0], expected_x0) + assert_almost_equal(x[:,1], expected_x1) + + def test_nonzero_initial_time(self): + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(1,2) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_nonequal_timesteps(self): + t = np.array([0.0, 1.0, 1.0, 3.0]) + u = np.array([0.0, 0.0, 1.0, 1.0]) + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0, 0.0]) + with assert_raises(ValueError, + match="Time steps are not equally spaced."): + tout, y, x = lsim(system, u, t, X0=[1.0]) + + +class TestImpulse: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = impulse(system, T=t) + assert tout.shape == (n,) + assert_almost_equal(tout, t) + expected_y = np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=3.0) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=[3.0]) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0,0.0]) + tout, y = impulse(system) + expected_y = np.ones_like(tout) + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact impulse response is t*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = impulse(system, X0=[3], T=[5, 6]) + tout, y = impulse(system, X0=[3], T=[5]) + + def test_array_like2(self): + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system, X0=3, T=5) + + +class TestStep: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system) + expected_y = 1.0 - np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = step(system, T=t) + assert tout.shape == (n,) + assert_almost_equal(tout, t) + expected_y = 1 - np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=3.0) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=[3.0]) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + # Exact step response is x(t) = t. + system = ([1.0],[1.0,0.0]) + tout, y = step(system) + expected_y = tout + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact step response is 1 - (1 + t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = step(system) + expected_y = 1 - (1 + tout) * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = step(system, T=[5, 6]) + + def test_complex_input(self): + # Test that complex input doesn't raise an error. + # `step` doesn't seem to have been designed for complex input, but this + # works and may be used, so add regression test. See gh-2654. + step(([], [-1], 1+0j)) + + +class TestLti: + def test_lti_instantiation(self): + # Test that lti can be instantiated with sequences, scalars. + # See PR-225. + + # TransferFunction + s = lti([1], [-1]) + assert isinstance(s, TransferFunction) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + # ZerosPolesGain + s = lti(np.array([]), np.array([-1]), 1) + assert isinstance(s, ZerosPolesGain) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + # StateSpace + s = lti([], [-1], 1) + s = lti([1], [-1], 1, 3) + assert isinstance(s, StateSpace) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + +class TestStateSpace: + def test_initialization(self): + # Check that all initializations work + StateSpace(1, 1, 1, 1) + StateSpace([1], [2], [3], [4]) + StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]])) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert StateSpace(s) is not s + assert s.to_ss() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + assert s.dt is None + + def test_operators(self): + # Test +/-/* operators on systems + + class BadType: + pass + + s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]), + ) + + s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]) + ) + + s_discrete = s1.to_discrete(0.1) + s2_discrete = s2.to_discrete(0.2) + s3_discrete = s2.to_discrete(0.1) + + # Impulse response + t = np.linspace(0, 1, 100) + u = np.zeros_like(t) + u[0] = 1 + + # Test multiplication + for typ in (int, float, complex, np.float32, np.complex128, np.array): + xp_assert_close(lsim(typ(2) * s1, U=u, T=t)[1], + typ(2) * lsim(s1, U=u, T=t)[1]) + + xp_assert_close(lsim(s1 * typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] * typ(2)) + + xp_assert_close(lsim(s1 / typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] / typ(2)) + + with assert_raises(TypeError): + typ(2) / s1 + + xp_assert_close(lsim(s1 * 2, U=u, T=t)[1], + lsim(s1, U=2 * u, T=t)[1]) + + xp_assert_close(lsim(s1 * s2, U=u, T=t)[1], + lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], + atol=1e-5) + + with assert_raises(TypeError): + s1 / s1 + + with assert_raises(TypeError): + s1 * s_discrete + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete * s2_discrete + + with assert_raises(TypeError): + s1 * BadType() + + with assert_raises(TypeError): + BadType() * s1 + + with assert_raises(TypeError): + s1 / BadType() + + with assert_raises(TypeError): + BadType() / s1 + + # Test addition + xp_assert_close(lsim(s1 + 2, U=u, T=t)[1], + 2 * u + lsim(s1, U=u, T=t)[1]) + + # Check for dimension mismatch + with assert_raises(ValueError): + s1 + np.array([1, 2]) + + with assert_raises(ValueError): + np.array([1, 2]) + s1 + + with assert_raises(TypeError): + s1 + s_discrete + + with assert_raises(ValueError): + s1 / np.array([[1, 2], [3, 4]]) + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete + s2_discrete + + with assert_raises(TypeError): + s1 + BadType() + + with assert_raises(TypeError): + BadType() + s1 + + xp_assert_close(lsim(s1 + s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) + + # Test subtraction + xp_assert_close(lsim(s1 - 2, U=u, T=t)[1], + -2 * u + lsim(s1, U=u, T=t)[1]) + + xp_assert_close(lsim(2 - s1, U=u, T=t)[1], + 2 * u + lsim(-s1, U=u, T=t)[1]) + + xp_assert_close(lsim(s1 - s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) + + with assert_raises(TypeError): + s1 - BadType() + + with assert_raises(TypeError): + BadType() - s1 + + s = s_discrete + s3_discrete + assert s.dt == 0.1 + + s = s_discrete * s3_discrete + assert s.dt == 0.1 + + s = 3 * s_discrete + assert s.dt == 0.1 + + s = -s_discrete + assert s.dt == 0.1 + +class TestTransferFunction: + def test_initialization(self): + # Check that all initializations work + TransferFunction(1, 1) + TransferFunction([1], [2]) + TransferFunction(np.array([1]), np.array([2])) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1]) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert TransferFunction(s) is not s + assert s.to_tf() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1]) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + + +class TestZerosPolesGain: + def test_initialization(self): + # Check that all initializations work + ZerosPolesGain(1, 1, 1) + ZerosPolesGain([1], [2], 1) + ZerosPolesGain(np.array([1]), np.array([2]), 1) + + def test_conversion(self): + #Check the conversion functions + s = ZerosPolesGain(1, 2, 3) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert ZerosPolesGain(s) is not s + assert s.to_zpk() is not s + + +class Test_abcd_normalize: + def setup_method(self): + self.A = np.array([[1.0, 2.0], [3.0, 4.0]]) + self.B = np.array([[-1.0], [5.0]]) + self.C = np.array([[4.0, 5.0]]) + self.D = np.array([[2.5]]) + + def test_no_matrix_fails(self): + assert_raises(ValueError, abcd_normalize) + + def test_A_nosquare_fails(self): + assert_raises(ValueError, abcd_normalize, [1, -1], + self.B, self.C, self.D) + + def test_AB_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_AC_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + [[4.0], [5.0]], self.D) + + def test_CD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + self.C, [2.5, 0]) + + def test_BD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_normalized_matrices_unchanged(self): + A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D) + xp_assert_equal(A, self.A) + xp_assert_equal(B, self.B) + xp_assert_equal(C, self.C) + xp_assert_equal(D, self.D) + + def test_shapes(self): + A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0) + xp_assert_equal(A.shape[0], A.shape[1]) + xp_assert_equal(A.shape[0], B.shape[0]) + xp_assert_equal(A.shape[0], C.shape[1]) + xp_assert_equal(C.shape[0], D.shape[0]) + xp_assert_equal(B.shape[1], D.shape[1]) + + def test_zero_dimension_is_not_none1(self): + B_ = np.zeros((2, 0)) + D_ = np.zeros((0, 0)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_) + xp_assert_equal(A, self.A) + xp_assert_equal(B, B_) + xp_assert_equal(D, D_) + assert C.shape[0] == D_.shape[0] + assert C.shape[1] == self.A.shape[0] + + def test_zero_dimension_is_not_none2(self): + B_ = np.zeros((2, 0)) + C_ = np.zeros((0, 2)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_) + xp_assert_equal(A, self.A) + xp_assert_equal(B, B_) + xp_assert_equal(C, C_) + assert D.shape[0] == C_.shape[0] + assert D.shape[1] == B_.shape[1] + + def test_missing_A(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + + def test_missing_B(self): + A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D) + assert B.shape[0] == A.shape[0] + assert B.shape[1] == D.shape[1] + assert B.shape == (self.A.shape[0], self.D.shape[1]) + + def test_missing_C(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D) + assert C.shape[0] == D.shape[0] + assert C.shape[1] == A.shape[0] + assert C.shape == (self.D.shape[0], self.A.shape[0]) + + def test_missing_D(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C) + assert D.shape[0] == C.shape[0] + assert D.shape[1] == B.shape[1] + assert D.shape == (self.C.shape[0], self.B.shape[1]) + + def test_missing_AB(self): + A, B, C, D = abcd_normalize(C=self.C, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert B.shape[1] == D.shape[1] + assert A.shape == (self.C.shape[1], self.C.shape[1]) + assert B.shape == (self.C.shape[1], self.D.shape[1]) + + def test_missing_AC(self): + A, B, C, D = abcd_normalize(B=self.B, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert C.shape[0] == D.shape[0] + assert C.shape[1] == A.shape[0] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + assert C.shape == (self.D.shape[0], self.B.shape[0]) + + def test_missing_AD(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert D.shape[0] == C.shape[0] + assert D.shape[1] == B.shape[1] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + assert D.shape == (self.C.shape[0], self.B.shape[1]) + + def test_missing_BC(self): + A, B, C, D = abcd_normalize(A=self.A, D=self.D) + assert B.shape[0] == A.shape[0] + assert B.shape[1] == D.shape[1] + assert C.shape[0] == D.shape[0] + assert C.shape[1], A.shape[0] + assert B.shape == (self.A.shape[0], self.D.shape[1]) + assert C.shape == (self.D.shape[0], self.A.shape[0]) + + def test_missing_ABC_fails(self): + assert_raises(ValueError, abcd_normalize, D=self.D) + + def test_missing_BD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C) + + def test_missing_CD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B) + + +class Test_bode: + + def test_01(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # cutoff: 1 rad/s, slope: -20 dB/decade + # H(s=0.1) ~= 0 dB + # H(s=1) ~= -3 dB + # H(s=10) ~= -20 dB + # H(s=100) ~= -40 dB + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + expected_mag = [0, -3, -20, -40] + assert_almost_equal(mag, expected_mag, decimal=1) + + def test_02(self): + # Test bode() phase calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # angle(H(s=0.1)) ~= -5.7 deg + # angle(H(s=1)) ~= -45 deg + # angle(H(s=10)) ~= -84.3 deg + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, mag, phase = bode(system, w=w) + expected_phase = [-5.7, -45, -84.3] + assert_almost_equal(phase, expected_phase, decimal=1) + + def test_03(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + def test_04(self): + # Test bode() phase calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi + assert_almost_equal(phase, expected_phase) + + def test_05(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.logspace(-2, 1, n) + w, mag, phase = bode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_06(self): + # Test that bode() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, mag, phase = bode(system, n=2) + assert w[0] == 0.01 # a fail would give not-a-number + + def test_07(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = lti([1], [1, 0, 100]) + w, mag, phase = bode(system, n=2) + + def test_08(self): + # Test that bode() return continuous phase, issues/2331. + system = lti([], [-10, -30, -40, -60, -70], 1) + w, mag, phase = system.bode(w=np.logspace(-3, 40, 100)) + assert_almost_equal(min(phase), -450, decimal=15) + + def test_from_state_space(self): + # Ensure that bode works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) + # is the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0], [0.0], [1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, mag, phase = bode(system, n=100) + + expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6))) + assert_almost_equal(mag, expected_magnitude) + + +class Test_freqresp: + + def test_output_manual(self): + # Test freqresp() output calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # re(H(s=0.1)) ~= 0.99 + # re(H(s=1)) ~= 0.5 + # re(H(s=10)) ~= 0.0099 + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, H = freqresp(system, w=w) + expected_re = [0.99, 0.5, 0.0099] + expected_im = [-0.099, -0.5, -0.099] + assert_almost_equal(H.real, expected_re, decimal=1) + assert_almost_equal(H.imag, expected_im, decimal=1) + + def test_output(self): + # Test freqresp() output calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = np.polyval(system.num, s) / np.polyval(system.den, s) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + system = lti([1], [1, 1]) + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_zero(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, H = freqresp(system, n=2) + assert w[0] == 0.01 # a fail would give not-a-number + + def test_from_state_space(self): + # Ensure that freqresp works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) is + # the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0],[0.0],[1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, H = freqresp(system, n=100) + s = w * 1j + expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3)) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_from_zpk(self): + # 4th order low-pass filter: H(s) = 1 / (s + 1) + system = lti([],[-1]*4,[1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = 1 / (s + 1)**4 + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..7610b3f898571d10d75a64f00d900168c7142fbe --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py @@ -0,0 +1,71 @@ +import numpy as np +from pytest import raises as assert_raises +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from numpy.fft import fft, ifft + +from scipy.signal import max_len_seq + + +class TestMLS: + + def test_mls_inputs(self): + # can't all be zero state + assert_raises(ValueError, max_len_seq, + 10, state=np.zeros(10)) + # wrong size state + assert_raises(ValueError, max_len_seq, 10, + state=np.ones(3)) + # wrong length + assert_raises(ValueError, max_len_seq, 10, length=-1) + xp_assert_equal(max_len_seq(10, length=0)[0], + np.asarray([], dtype=np.int8) + ) + # unknown taps + assert_raises(ValueError, max_len_seq, 64) + # bad taps + assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1]) + + def test_mls_output(self): + # define some alternate working taps + alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4], + 8: [7, 5, 3]} + # assume the other bit levels work, too slow to test higher orders... + for nbits in range(2, 8): + for state in [None, np.round(np.random.rand(nbits))]: + for taps in [None, alt_taps[nbits]]: + if state is not None and np.all(state == 0): + state[0] = 1 # they can't all be zero + orig_m = max_len_seq(nbits, state=state, + taps=taps)[0] + m = 2. * orig_m - 1. # convert to +/- 1 representation + # First, make sure we got all 1's or -1 + err_msg = "mls had non binary terms" + xp_assert_equal(np.abs(m), np.ones_like(m), + err_msg=err_msg) + # Test via circular cross-correlation, which is just mult. + # in the frequency domain with one signal conjugated + tester = np.real(ifft(fft(m) * np.conj(fft(m)))) + out_len = 2**nbits - 1 + # impulse amplitude == test_len + err_msg = "mls impulse has incorrect value" + xp_assert_close(tester[0], + float(out_len), + err_msg=err_msg + ) + # steady-state is -1 + err_msg = "mls steady-state has incorrect value" + xp_assert_close(tester[1:], + np.full(out_len - 1, -1, dtype=tester.dtype), + err_msg=err_msg) + # let's do the split thing using a couple options + for n in (1, 2**(nbits - 1)): + m1, s1 = max_len_seq(nbits, state=state, taps=taps, + length=n) + m2, s2 = max_len_seq(nbits, state=s1, taps=taps, + length=1) + m3, s3 = max_len_seq(nbits, state=s2, taps=taps, + length=out_len - n - 1) + new_m = np.concatenate((m1, m2, m3)) + xp_assert_equal(orig_m, new_m) + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..8de5a2379c2d43c0a10b5b3facd0b33be0778a36 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py @@ -0,0 +1,915 @@ +import copy + +import numpy as np +import pytest +from pytest import raises, warns +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from scipy.signal._peak_finding import ( + argrelmax, + argrelmin, + peak_prominences, + peak_widths, + _unpack_condition_args, + find_peaks, + find_peaks_cwt, + _identify_ridge_lines +) +from scipy.signal.windows import gaussian +from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning + + +def _gen_gaussians(center_locs, sigmas, total_length): + xdata = np.arange(0, total_length).astype(float) + out_data = np.zeros(total_length, dtype=float) + for ind, sigma in enumerate(sigmas): + tmp = (xdata - center_locs[ind]) / sigma + out_data += np.exp(-(tmp**2)) + return out_data + + +def _gen_gaussians_even(sigmas, total_length): + num_peaks = len(sigmas) + delta = total_length / (num_peaks + 1) + center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int) + out_data = _gen_gaussians(center_locs, sigmas, total_length) + return out_data, center_locs + + +def _gen_ridge_line(start_locs, max_locs, length, distances, gaps): + """ + Generate coordinates for a ridge line. + + Will be a series of coordinates, starting a start_loc (length 2). + The maximum distance between any adjacent columns will be + `max_distance`, the max distance between adjacent rows + will be `map_gap'. + + `max_locs` should be the size of the intended matrix. The + ending coordinates are guaranteed to be less than `max_locs`, + although they may not approach `max_locs` at all. + """ + + def keep_bounds(num, max_val): + out = max(num, 0) + out = min(out, max_val) + return out + + gaps = copy.deepcopy(gaps) + distances = copy.deepcopy(distances) + + locs = np.zeros([length, 2], dtype=int) + locs[0, :] = start_locs + total_length = max_locs[0] - start_locs[0] - sum(gaps) + if total_length < length: + raise ValueError('Cannot generate ridge line according to constraints') + dist_int = length / len(distances) - 1 + gap_int = length / len(gaps) - 1 + for ind in range(1, length): + nextcol = locs[ind - 1, 1] + nextrow = locs[ind - 1, 0] + 1 + if (ind % dist_int == 0) and (len(distances) > 0): + nextcol += ((-1)**ind)*distances.pop() + if (ind % gap_int == 0) and (len(gaps) > 0): + nextrow += gaps.pop() + nextrow = keep_bounds(nextrow, max_locs[0]) + nextcol = keep_bounds(nextcol, max_locs[1]) + locs[ind, :] = [nextrow, nextcol] + + return [locs[:, 0], locs[:, 1]] + + +class TestLocalMaxima1d: + + def test_empty(self): + """Test with empty signal.""" + x = np.array([], dtype=np.float64) + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([]), check_dtype=False) + assert array.base is None + + def test_linear(self): + """Test with linear signal.""" + x = np.linspace(0, 100) + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([], dtype=np.intp)) + assert array.base is None + + def test_simple(self): + """Test with simple signal.""" + x = np.linspace(-10, 10, 50) + x[2::3] += 1 + expected = np.arange(2, 50, 3, dtype=np.intp) + for array in _local_maxima_1d(x): + # For plateaus of size 1, the edges are identical with the + # midpoints + xp_assert_equal(array, expected, check_dtype=False) + assert array.base is None + + def test_flat_maxima(self): + """Test if flat maxima are detected correctly.""" + x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10, + -5, -5, -5, -5, -5, -10]) + midpoints, left_edges, right_edges = _local_maxima_1d(x) + xp_assert_equal(midpoints, np.array([2, 4, 8, 12, 18]), check_dtype=False) + xp_assert_equal(left_edges, np.array([2, 4, 7, 11, 16]), check_dtype=False) + xp_assert_equal(right_edges, np.array([2, 5, 9, 14, 20]), check_dtype=False) + + @pytest.mark.parametrize('x', [ + np.array([1., 0, 2]), + np.array([3., 3, 0, 4, 4]), + np.array([5., 5, 5, 0, 6, 6, 6]), + ]) + def test_signal_edges(self, x): + """Test if behavior on signal edges is correct.""" + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([], dtype=np.intp)) + assert array.base is None + + def test_exceptions(self): + """Test input validation and raised exceptions.""" + with raises(ValueError, match="wrong number of dimensions"): + _local_maxima_1d(np.ones((1, 1))) + with raises(ValueError, match="expected 'const float64_t'"): + _local_maxima_1d(np.ones(1, dtype=int)) + with raises(TypeError, match="list"): + _local_maxima_1d([1., 2.]) + with raises(TypeError, match="'x' must not be None"): + _local_maxima_1d(None) + + +class TestRidgeLines: + + def test_empty(self): + test_matr = np.zeros([20, 100]) + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 0 + + def test_minimal(self): + test_matr = np.zeros([20, 100]) + test_matr[0, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 1 + + test_matr = np.zeros([20, 100]) + test_matr[0:2, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 1 + + def test_single_pass(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 0, 1] + test_matr = np.zeros([20, 50]) + 1e-12 + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_distances = np.full(20, max(distances)) + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert len(identified_lines) == 1 + for iline_, line_ in zip(identified_lines[0], line): + xp_assert_equal(iline_, line_, check_dtype=False) + + def test_single_bigdist(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 4] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 3 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the distance is too large + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert len(identified_lines) == 2 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggap(self): + distances = [0, 1, 2, 5] + max_gap = 3 + gaps = [0, 4, 2, 1] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 6 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the gap is too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert len(identified_lines) == 2 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggaps(self): + distances = [0] + max_gap = 1 + gaps = [3, 6] + test_matr = np.zeros([50, 50]) + length = 30 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 1 + max_distances = np.full(50, max_dist) + #This should get 3 lines, since the gaps are too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert len(identified_lines) == 3 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + +class TestArgrel: + + def test_empty(self): + # Regression test for gh-2832. + # When there are no relative extrema, make sure that + # the number of empty arrays returned matches the + # dimension of the input. + + empty_array = np.array([], dtype=int) + + z1 = np.zeros(5) + + i = argrelmin(z1) + xp_assert_equal(len(i), 1) + xp_assert_equal(i[0], empty_array, check_dtype=False) + + z2 = np.zeros((3, 5)) + + row, col = argrelmin(z2, axis=0) + xp_assert_equal(row, empty_array, check_dtype=False) + xp_assert_equal(col, empty_array, check_dtype=False) + + row, col = argrelmin(z2, axis=1) + xp_assert_equal(row, empty_array, check_dtype=False) + xp_assert_equal(col, empty_array, check_dtype=False) + + def test_basic(self): + # Note: the docstrings for the argrel{min,max,extrema} functions + # do not give a guarantee of the order of the indices, so we'll + # sort them before testing. + + x = np.array([[1, 2, 2, 3, 2], + [2, 1, 2, 2, 3], + [3, 2, 1, 2, 2], + [2, 3, 2, 1, 2], + [1, 2, 3, 2, 1]]) + + row, col = argrelmax(x, axis=0) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [4, 0, 1], check_dtype=False) + + row, col = argrelmax(x, axis=1) + order = np.argsort(row) + xp_assert_equal(row[order], [0, 3, 4], check_dtype=False) + xp_assert_equal(col[order], [3, 1, 2], check_dtype=False) + + row, col = argrelmin(x, axis=0) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [1, 2, 3], check_dtype=False) + + row, col = argrelmin(x, axis=1) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [1, 2, 3], check_dtype=False) + + def test_highorder(self): + order = 2 + sigmas = [1.0, 2.0, 10.0, 5.0, 15.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 500) + test_data[act_locs + order] = test_data[act_locs]*0.99999 + test_data[act_locs - order] = test_data[act_locs]*0.99999 + rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0] + + assert len(rel_max_locs) == len(act_locs) + assert (rel_max_locs == act_locs).all() + + def test_2d_gaussians(self): + sigmas = [1.0, 2.0, 10.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 100) + rot_factor = 20 + rot_range = np.arange(0, len(test_data)) - rot_factor + test_data_2 = np.vstack([test_data, test_data[rot_range]]) + rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1) + + for rw in range(0, test_data_2.shape[0]): + inds = (rel_max_rows == rw) + + assert len(rel_max_cols[inds]) == len(act_locs) + assert (act_locs == (rel_max_cols[inds] - rot_factor*rw)).all() + + +class TestPeakProminences: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + out = peak_prominences([1, 2, 3], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert arr.size == 0 + assert arr.dtype == dtype + + out = peak_prominences([], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert arr.size == 0 + assert arr.dtype == dtype + + def test_basic(self): + """ + Test if height of prominences is correctly calculated in signal with + rising baseline (peak widths are 1 sample). + """ + # Prepare basic signal + x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1]) + peaks = np.array([1, 2, 4, 6]) + lbases = np.array([0, 0, 0, 5]) + rbases = np.array([3, 3, 5, 7]) + proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0) + # Test if calculation matches handcrafted result + out = peak_prominences(x, peaks) + xp_assert_equal(out[0], proms, check_dtype=False) + xp_assert_equal(out[1], lbases, check_dtype=False) + xp_assert_equal(out[2], rbases, check_dtype=False) + + def test_edge_cases(self): + """ + Test edge cases. + """ + # Peaks have same height, prominence and bases + x = [0, 2, 1, 2, 1, 2, 0] + peaks = [1, 3, 5] + proms, lbases, rbases = peak_prominences(x, peaks) + xp_assert_equal(proms, np.asarray([2.0, 2, 2]), check_dtype=False) + xp_assert_equal(lbases, [0, 0, 0], check_dtype=False) + xp_assert_equal(rbases, [6, 6, 6], check_dtype=False) + + # Peaks have same height & prominence but different bases + x = [0, 1, 0, 1, 0, 1, 0] + peaks = np.array([1, 3, 5]) + proms, lbases, rbases = peak_prominences(x, peaks) + xp_assert_equal(proms, np.asarray([1.0, 1, 1])) + xp_assert_equal(lbases, peaks - 1, check_dtype=False) + xp_assert_equal(rbases, peaks + 1, check_dtype=False) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([-9, 9, 9, 0, 3, 1], 2) + peaks = np.repeat([1, 2, 4], 2) + proms, lbases, rbases = peak_prominences(x[::2], peaks[::2]) + xp_assert_equal(proms, np.asarray([9.0, 9, 2])) + xp_assert_equal(lbases, [0, 0, 3], check_dtype=False) + xp_assert_equal(rbases, [3, 3, 5], check_dtype=False) + + def test_wlen(self): + """ + Test if wlen actually shrinks the evaluation range correctly. + """ + x = [0, 1, 2, 3, 1, 0, -1] + peak = [3] + # Test rounding behavior of wlen + proms = peak_prominences(x, peak) + for prom, val in zip(proms, [3.0, 0, 6]): + assert prom == val + + for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]: + proms = peak_prominences(x, peak, wlen) + for prom, val in zip(proms, [3. - i, 0 + i, 6 - i]): + assert prom == val + + def test_exceptions(self): + """ + Verify that exceptions and warnings are raised. + """ + # x with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([[0, 1, 1, 0]], [1, 2]) + # peaks with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([0, 1, 1, 0], [[1, 2]]) + # x with dimension < 1 + with raises(ValueError, match='1-D array'): + peak_prominences(3, [0,]) + + # empty x with supplied + with raises(ValueError, match='not a valid index'): + peak_prominences([], [0]) + # invalid indices with non-empty x + for p in [-100, -1, 3, 1000]: + with raises(ValueError, match='not a valid index'): + peak_prominences([1, 0, 2], [p]) + + # peaks is not cast-able to np.intp + with raises(TypeError, match='cannot safely cast'): + peak_prominences([0, 1, 1, 0], [1.1, 2.3]) + + # wlen < 3 + with raises(ValueError, match='wlen'): + peak_prominences(np.arange(10), [3, 5], wlen=1) + + @pytest.mark.thread_unsafe + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a prominence of 0" + for p in [0, 1, 2]: + with warns(PeakPropertyWarning, match=msg): + peak_prominences([1, 0, 2], [p,]) + with warns(PeakPropertyWarning, match=msg): + peak_prominences([0, 1, 1, 1, 0], [2], wlen=2) + + +class TestPeakWidths: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + widths = peak_widths([], [])[0] + assert isinstance(widths, np.ndarray) + assert widths.size == 0 + widths = peak_widths([1, 2, 3], [])[0] + assert isinstance(widths, np.ndarray) + assert widths.size == 0 + out = peak_widths([], []) + for arr in out: + assert isinstance(arr, np.ndarray) + assert arr.size == 0 + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_basic(self): + """ + Test a simple use case with easy to verify results at different relative + heights. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1]) + prominence = 2 + for rel_height, width_true, lip_true, rip_true in [ + (0., 0., 3., 3.), # raises warning + (0.25, 1., 2.5, 3.5), + (0.5, 2., 2., 4.), + (0.75, 3., 1.5, 4.5), + (1., 4., 1., 5.), + (2., 5., 1., 6.), + (3., 5., 1., 6.) + ]: + width_calc, height, lip_calc, rip_calc = peak_widths( + x, [3], rel_height) + xp_assert_close(width_calc, np.asarray([width_true])) + xp_assert_close(height, np.asarray([2 - rel_height * prominence])) + xp_assert_close(lip_calc, np.asarray([lip_true])) + xp_assert_close(rip_calc, np.asarray([rip_true])) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([0, 100, 50], 4) + peaks = np.repeat([1], 3) + result = peak_widths(x[::4], peaks[::3]) + xp_assert_equal(result, + np.asarray([[0.75], [75], [0.75], [1.5]]) + ) + + def test_exceptions(self): + """ + Verify that argument validation works as intended. + """ + with raises(ValueError, match='1-D array'): + # x with dimension > 1 + peak_widths(np.zeros((3, 4)), np.ones(3)) + with raises(ValueError, match='1-D array'): + # x with dimension < 1 + peak_widths(3, [0]) + with raises(ValueError, match='1-D array'): + # peaks with dimension > 1 + peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp)) + with raises(ValueError, match='1-D array'): + # peaks with dimension < 1 + peak_widths(np.arange(10), 3) + with raises(ValueError, match='not a valid index'): + # peak pos exceeds x.size + peak_widths(np.arange(10), [8, 11]) + with raises(ValueError, match='not a valid index'): + # empty x with peaks supplied + peak_widths([], [1, 2]) + with raises(TypeError, match='cannot safely cast'): + # peak cannot be safely cast to intp + peak_widths(np.arange(10), [1.1, 2.3]) + with raises(ValueError, match='rel_height'): + # rel_height is < 0 + peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1) + with raises(TypeError, match='None'): + # prominence data contains None + peak_widths([1, 2, 1], [1], prominence_data=(None, None, None)) + + @pytest.mark.thread_unsafe + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a width of 0" + with warns(PeakPropertyWarning, match=msg): + # Case: rel_height is 0 + peak_widths([0, 1, 0], [1], rel_height=0) + with warns(PeakPropertyWarning, match=msg): + # Case: prominence is 0 and bases are identical + peak_widths( + [0, 1, 1, 1, 0], [2], + prominence_data=(np.array([0.], np.float64), + np.array([2], np.intp), + np.array([2], np.intp)) + ) + + def test_mismatching_prominence_data(self): + """Test with mismatching peak and / or prominence data.""" + x = [0, 1, 0] + peak = [1] + for i, (prominences, left_bases, right_bases) in enumerate([ + ((1.,), (-1,), (2,)), # left base not in x + ((1.,), (0,), (3,)), # right base not in x + ((1.,), (2,), (0,)), # swapped bases same as peak + ((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks + ((1., 1.), (0,), (2,)), # arrays with different shapes + ((1.,), (0, 0), (2,)), # arrays with different shapes + ((1.,), (0,), (2, 2)) # arrays with different shapes + ]): + # Make sure input is matches output of signal.peak_prominences + prominence_data = (np.array(prominences, dtype=np.float64), + np.array(left_bases, dtype=np.intp), + np.array(right_bases, dtype=np.intp)) + # Test for correct exception + if i < 3: + match = "prominence data is invalid for peak" + else: + match = "arrays in `prominence_data` must have the same shape" + with raises(ValueError, match=match): + peak_widths(x, peak, prominence_data=prominence_data) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_intersection_rules(self): + """Test if x == eval_height counts as an intersection.""" + # Flatt peak with two possible intersection points if evaluated at 1 + x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0] + # relative height is 0 -> width is 0 as well, raises warning + xp_assert_close(peak_widths(x, peaks=[5], rel_height=0), + [(0.,), (3.,), (5.,), (5.,)]) + # width_height == x counts as intersection -> nearest 1 is chosen + xp_assert_close(peak_widths(x, peaks=[5], rel_height=2/3), + [(4.,), (1.,), (3.,), (7.,)]) + + +def test_unpack_condition_args(): + """ + Verify parsing of condition arguments for `scipy.signal.find_peaks` function. + """ + x = np.arange(10) + amin_true = x + amax_true = amin_true + 10 + peaks = amin_true[1::2] + + # Test unpacking with None or interval + assert (None, None) == _unpack_condition_args((None, None), x, peaks) + assert (1, None) == _unpack_condition_args(1, x, peaks) + assert (1, None) == _unpack_condition_args((1, None), x, peaks) + assert (None, 2) == _unpack_condition_args((None, 2), x, peaks) + assert (3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks) + + # Test if borders are correctly reduced with `peaks` + amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks) + xp_assert_equal(amin_calc, amin_true[peaks]) + xp_assert_equal(amax_calc, amax_true[peaks]) + + # Test raises if array borders don't match x + with raises(ValueError, match="array size of lower"): + _unpack_condition_args(amin_true, np.arange(11), peaks) + with raises(ValueError, match="array size of upper"): + _unpack_condition_args((None, amin_true), np.arange(11), peaks) + + +class TestFindPeaks: + + # Keys of optionally returned properties + property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds', + 'prominences', 'left_bases', 'right_bases', 'widths', + 'width_heights', 'left_ips', 'right_ips'} + + def test_constant(self): + """ + Test behavior for signal without local maxima. + """ + open_interval = (None, None) + peaks, props = find_peaks(np.ones(10), + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert peaks.size == 0 + for key in self.property_keys: + assert props[key].size == 0 + + def test_plateau_size(self): + """ + Test plateau size condition for peaks. + """ + # Prepare signal with peaks with peak_height == plateau_size + plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111]) + x = np.zeros(plateau_sizes.size * 2 + 1) + x[1::2] = plateau_sizes + repeats = np.ones(x.size, dtype=int) + repeats[1::2] = x[1::2] + x = np.repeat(x, repeats) + + # Test full output + peaks, props = find_peaks(x, plateau_size=(None, None)) + xp_assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100], check_dtype=False) + xp_assert_equal(props["plateau_sizes"], plateau_sizes, check_dtype=False) + xp_assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2, + check_dtype=False) + xp_assert_equal(props["right_edges"], peaks + plateau_sizes // 2, + check_dtype=False) + + # Test conditions + xp_assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100], + check_dtype=False) + xp_assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7], + check_dtype=False) + xp_assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33], + check_dtype=False) + + def test_height_condition(self): + """ + Test height condition for peaks. + """ + x = (0., 1/3, 0., 2.5, 0, 4., 0) + peaks, props = find_peaks(x, height=(None, None)) + xp_assert_equal(peaks, np.array([1, 3, 5]), check_dtype=False) + xp_assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]), + check_dtype=False) + + def test_threshold_condition(self): + """ + Test threshold condition for peaks. + """ + x = (0, 2, 1, 4, -1) + peaks, props = find_peaks(x, threshold=(None, None)) + xp_assert_equal(peaks, np.array([1, 3]), check_dtype=False) + xp_assert_equal(props['left_thresholds'], np.array([2.0, 3.0])) + xp_assert_equal(props['right_thresholds'], np.array([1.0, 5.0])) + xp_assert_equal(find_peaks(x, threshold=2)[0], np.array([3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=3.5)[0], np.array([], dtype=int), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([], dtype=int), + check_dtype=False) + + def test_distance_condition(self): + """ + Test distance condition for peaks. + """ + # Peaks of different height with constant distance 3 + peaks_all = np.arange(1, 21, 3) + x = np.zeros(21) + x[peaks_all] += np.linspace(1, 2, peaks_all.size) + + # Test if peaks with "minimal" distance are still selected (distance = 3) + xp_assert_equal(find_peaks(x, distance=3)[0], peaks_all, check_dtype=False) + + # Select every second peak (distance > 3) + peaks_subset = find_peaks(x, distance=3.0001)[0] + # Test if peaks_subset is subset of peaks_all + assert np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0 + + # Test if every second peak was removed + dfs = np.diff(peaks_subset) + xp_assert_equal(dfs, 6*np.ones_like(dfs)) + + # Test priority of peak removal + x = [-2, 1, -1, 0, -3] + peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size + assert peaks_subset.size == 1 and peaks_subset[0] == 1 + + def test_prominence_condition(self): + """ + Test prominence condition for peaks. + """ + x = np.linspace(0, 10, 100) + peaks_true = np.arange(1, 99, 2) + offset = np.linspace(1, 10, peaks_true.size) + x[peaks_true] += offset + prominences = x[peaks_true] - x[peaks_true + 1] + interval = (3, 9) + keep = np.nonzero( + (interval[0] <= prominences) & (prominences <= interval[1])) + + peaks_calc, properties = find_peaks(x, prominence=interval) + xp_assert_equal(peaks_calc, peaks_true[keep], check_dtype=False) + xp_assert_equal(properties['prominences'], prominences[keep], check_dtype=False) + xp_assert_equal(properties['left_bases'], + np.zeros_like(properties['left_bases'])) + xp_assert_equal(properties['right_bases'], peaks_true[keep] + 1, + check_dtype=False) + + def test_width_condition(self): + """ + Test width condition for peaks. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0]) + peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75) + assert peaks.size == 1 + xp_assert_equal(peaks, 7*np.ones_like(peaks)) + xp_assert_close(props['widths'], np.asarray([1.35])) + xp_assert_close(props['width_heights'], np.asarray([1.])) + xp_assert_close(props['left_ips'], np.asarray([6.4])) + xp_assert_close(props['right_ips'], np.asarray([7.75])) + + def test_properties(self): + """ + Test returned properties. + """ + open_interval = (None, None) + x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9] + peaks, props = find_peaks(x, + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert len(props) == len(self.property_keys) + for key in self.property_keys: + assert peaks.size == props[key].size + + def test_raises(self): + """ + Test exceptions raised by function. + """ + with raises(ValueError, match="1-D array"): + find_peaks(np.array(1)) + with raises(ValueError, match="1-D array"): + find_peaks(np.ones((2, 2))) + with raises(ValueError, match="distance"): + find_peaks(np.arange(10), distance=-1) + + @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0", + "ignore:some peaks have a width of 0") + def test_wlen_smaller_plateau(self): + """ + Test behavior of prominence and width calculation if the given window + length is smaller than a peak's plateau size. + + Regression test for gh-9110. + """ + peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None), + width=(None, None), wlen=2) + xp_assert_equal(peaks, 2 * np.ones_like(peaks)) + xp_assert_equal(props["prominences"], np.zeros_like(props["prominences"])) + xp_assert_equal(props["widths"], np.zeros_like(props["widths"])) + xp_assert_equal(props["width_heights"], np.ones_like(props["width_heights"])) + for key in ("left_bases", "right_bases", "left_ips", "right_ips"): + xp_assert_equal(props[key], peaks, check_dtype=False) + + @pytest.mark.parametrize("kwargs", [ + {}, + {"distance": 3.0}, + {"prominence": (None, None)}, + {"width": (None, 2)}, + + ]) + def test_readonly_array(self, kwargs): + """ + Test readonly arrays are accepted. + """ + x = np.linspace(0, 10, 15) + x_readonly = x.copy() + x_readonly.flags.writeable = False + + peaks, _ = find_peaks(x) + peaks_readonly, _ = find_peaks(x_readonly, **kwargs) + + xp_assert_close(peaks, peaks_readonly) + + +class TestFindPeaksCwt: + + def test_find_peaks_exact(self): + """ + Generate a series of gaussians and attempt to find the peak locations. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0, + min_length=None) + xp_assert_equal(found_locs, act_locs, + check_dtype=False, + err_msg="Found maximum locations did not equal those expected" + ) + + def test_find_peaks_withnoise(self): + """ + Verify that peak locations are (approximately) found + for a series of gaussians with added noise. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + noise_amp = 0.07 + np.random.seed(18181911) + test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) + found_locs = find_peaks_cwt(test_data, widths, min_length=15, + gap_thresh=1, min_snr=noise_amp / 5) + + err_msg ='Different number of peaks found than expected' + assert len(found_locs) == len(act_locs), err_msg + diffs = np.abs(found_locs - act_locs) + max_diffs = np.array(sigmas) / 5 + np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' + + f'by more than {max_diffs}') + + def test_find_peaks_nopeak(self): + """ + Verify that no peak is found in + data that's just noise. + """ + noise_amp = 1.0 + num_points = 100 + rng = np.random.RandomState(181819141) + test_data = (rng.rand(num_points) - 0.5)*(2*noise_amp) + widths = np.arange(10, 50) + found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30) + assert len(found_locs) == 0 + + def test_find_peaks_with_non_default_wavelets(self): + x = gaussian(200, 2) + widths = np.array([1, 2, 3, 4]) + a = find_peaks_cwt(x, widths, wavelet=gaussian) + + xp_assert_equal(a, np.asarray([100]), check_dtype=False) + + def test_find_peaks_window_size(self): + """ + Verify that window_size is passed correctly to private function and + affects the result. + """ + sigmas = [2.0, 2.0] + num_points = 1000 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas), 0.2) + noise_amp = 0.05 + rng = np.random.RandomState(18181911) + test_data += (rng.rand(num_points) - 0.5)*(2*noise_amp) + + # Possibly contrived negative region to throw off peak finding + # when window_size is too large + test_data[250:320] -= 1 + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=None) + with pytest.raises(AssertionError): + assert found_locs.size == act_locs.size + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=20) + assert found_locs.size == act_locs.size + + def test_find_peaks_with_one_width(self): + """ + Verify that the `width` argument + in `find_peaks_cwt` can be a float + """ + xs = np.arange(0, np.pi, 0.05) + test_data = np.sin(xs) + widths = 1 + found_locs = find_peaks_cwt(test_data, widths) + + np.testing.assert_equal(found_locs, 32) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py new file mode 100644 index 0000000000000000000000000000000000000000..a2cadd325a7e36c877df8532ba957712831c2dad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py @@ -0,0 +1,51 @@ +# Regressions tests on result types of some signal functions + +import numpy as np + +from scipy.signal import (decimate, + lfilter_zi, + lfiltic, + sos2tf, + sosfilt_zi) + + +def test_decimate(): + ones_f32 = np.ones(32, dtype=np.float32) + assert decimate(ones_f32, 2).dtype == np.float32 + + ones_i64 = np.ones(32, dtype=np.int64) + assert decimate(ones_i64, 2).dtype == np.float64 + + +def test_lfilter_zi(): + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + assert lfilter_zi(b_f32, a_f32).dtype == np.float32 + + +def test_lfiltic(): + # this would return f32 when given a mix of f32 / f64 args + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + x_f32 = np.ones(32, dtype=np.float32) + + b_f64 = b_f32.astype(np.float64) + a_f64 = a_f32.astype(np.float64) + x_f64 = x_f32.astype(np.float64) + + assert lfiltic(b_f64, a_f32, x_f32).dtype == np.float64 + assert lfiltic(b_f32, a_f64, x_f32).dtype == np.float64 + assert lfiltic(b_f32, a_f32, x_f64).dtype == np.float64 + assert lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64 + + +def test_sos2tf(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + b, a = sos2tf(sos_f32) + assert b.dtype == np.float32 + assert a.dtype == np.float32 + + +def test_sosfilt_zi(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + assert sosfilt_zi(sos_f32).dtype == np.float32 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..61d958e35d91b7d537e0bd3551b6cec3f45a4983 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py @@ -0,0 +1,362 @@ +import pytest +import numpy as np +from numpy.testing import (assert_equal, + assert_array_equal, +) + +from scipy._lib._array_api import ( + assert_almost_equal, assert_array_almost_equal, xp_assert_close +) + +from scipy.ndimage import convolve1d # type: ignore[attr-defined] + +from scipy.signal import savgol_coeffs, savgol_filter +from scipy.signal._savitzky_golay import _polyder + + +def check_polyder(p, m, expected): + dp = _polyder(p, m) + assert_array_equal(dp, expected) + + +def test_polyder(): + cases = [ + ([5], 0, [5]), + ([5], 1, [0]), + ([3, 2, 1], 0, [3, 2, 1]), + ([3, 2, 1], 1, [6, 2]), + ([3, 2, 1], 2, [6]), + ([3, 2, 1], 3, [0]), + ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), + ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), + ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), + ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]), + ] + for p, m, expected in cases: + check_polyder(np.array(p).T, m, np.array(expected).T) + + +#-------------------------------------------------------------------- +# savgol_coeffs tests +#-------------------------------------------------------------------- + +def alt_sg_coeffs(window_length, polyorder, pos): + """This is an alternative implementation of the SG coefficients. + + It uses numpy.polyfit and numpy.polyval. The results should be + equivalent to those of savgol_coeffs(), but this implementation + is slower. + + window_length should be odd. + + """ + if pos is None: + pos = window_length // 2 + t = np.arange(window_length) + unit = (t == pos).astype(int) + h = np.polyval(np.polyfit(t, unit, polyorder), t) + return h + + +def test_sg_coeffs_trivial(): + # Test a trivial case of savgol_coeffs: polyorder = window_length - 1 + h = savgol_coeffs(1, 0) + xp_assert_close(h, [1.0]) + + h = savgol_coeffs(3, 2) + xp_assert_close(h, [0.0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4) + xp_assert_close(h, [0.0, 0, 1, 0, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1) + xp_assert_close(h, [0.0, 0, 0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1, use='dot') + xp_assert_close(h, [0.0, 1, 0, 0, 0], atol=1e-10) + + +def compare_coeffs_to_alt(window_length, order): + # For the given window_length and order, compare the results + # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1. + # Also include pos=None. + for pos in [None] + list(range(window_length)): + h1 = savgol_coeffs(window_length, order, pos=pos, use='dot') + h2 = alt_sg_coeffs(window_length, order, pos=pos) + xp_assert_close(h1, h2, atol=1e-10, + err_msg=("window_length = %d, order = %d, pos = %s" % + (window_length, order, pos))) + + +def test_sg_coeffs_compare(): + # Compare savgol_coeffs() to alt_sg_coeffs(). + for window_length in range(1, 8, 2): + for order in range(window_length): + compare_coeffs_to_alt(window_length, order) + + +def test_sg_coeffs_exact(): + polyorder = 4 + window_length = 9 + halflen = window_length // 2 + + x = np.linspace(0, 21, 43) + delta = x[1] - x[0] + + # The data is a cubic polynomial. We'll use an order 4 + # SG filter, so the filtered values should equal the input data + # (except within half window_length of the edges). + y = 0.5 * x ** 3 - x + h = savgol_coeffs(window_length, polyorder) + y0 = convolve1d(y, h) + xp_assert_close(y0[halflen:-halflen], y[halflen:-halflen]) + + # Check the same input, but use deriv=1. dy is the exact result. + dy = 1.5 * x ** 2 - 1 + h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta) + y1 = convolve1d(y, h) + xp_assert_close(y1[halflen:-halflen], dy[halflen:-halflen]) + + # Check the same input, but use deriv=2. d2y is the exact result. + d2y = 3.0 * x + h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta) + y2 = convolve1d(y, h) + xp_assert_close(y2[halflen:-halflen], d2y[halflen:-halflen]) + + +def test_sg_coeffs_deriv(): + # The data in `x` is a sampled parabola, so using savgol_coeffs with an + # order 2 or higher polynomial should give exact results. + i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0]) + x = i ** 2 / 4 + dx = i / 2 + d2x = np.full_like(i, 0.5) + for pos in range(x.size): + coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot') + xp_assert_close(coeffs0.dot(x), x[pos], atol=1e-10) + coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1) + xp_assert_close(coeffs1.dot(x), dx[pos], atol=1e-10) + coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2) + xp_assert_close(coeffs2.dot(x), d2x[pos], atol=1e-10) + + +def test_sg_coeffs_deriv_gt_polyorder(): + """ + If deriv > polyorder, the coefficients should be all 0. + This is a regression test for a bug where, e.g., + savgol_coeffs(5, polyorder=1, deriv=2) + raised an error. + """ + coeffs = savgol_coeffs(5, polyorder=1, deriv=2) + assert_array_equal(coeffs, np.zeros(5)) + coeffs = savgol_coeffs(7, polyorder=4, deriv=6) + assert_array_equal(coeffs, np.zeros(7)) + + +def test_sg_coeffs_large(): + # Test that for large values of window_length and polyorder the array of + # coefficients returned is symmetric. The aim is to ensure that + # no potential numeric overflow occurs. + coeffs0 = savgol_coeffs(31, 9) + assert_array_almost_equal(coeffs0, coeffs0[::-1]) + coeffs1 = savgol_coeffs(31, 9, deriv=1) + assert_array_almost_equal(coeffs1, -coeffs1[::-1]) + +# -------------------------------------------------------------------- +# savgol_coeffs tests for even window length +# -------------------------------------------------------------------- + + +def test_sg_coeffs_even_window_length(): + # Simple case - deriv=0, polyorder=0, 1 + window_lengths = [4, 6, 8, 10, 12, 14, 16] + for length in window_lengths: + h_p_d = savgol_coeffs(length, 0, 0) + xp_assert_close(h_p_d, np.ones_like(h_p_d) / length) + + # Verify with closed forms + # deriv=1, polyorder=1, 2 + def h_p_d_closed_form_1(k, m): + return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1)) + + # deriv=2, polyorder=2 + def h_p_d_closed_form_2(k, m): + numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2) + denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1) + return numer/denom + + for length in window_lengths: + m = length//2 + expected_output = [h_p_d_closed_form_1(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 1, 1) + xp_assert_close(expected_output, actual_output) + actual_output = savgol_coeffs(length, 2, 1) + xp_assert_close(expected_output, actual_output) + + expected_output = [h_p_d_closed_form_2(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 2, 2) + xp_assert_close(expected_output, actual_output) + actual_output = savgol_coeffs(length, 3, 2) + xp_assert_close(expected_output, actual_output) + +#-------------------------------------------------------------------- +# savgol_filter tests +#-------------------------------------------------------------------- + + +def test_sg_filter_trivial(): + """ Test some trivial edge cases for savgol_filter().""" + x = np.array([1.0]) + y = savgol_filter(x, 1, 0) + assert_equal(y, [1.0]) + + # Input is a single value. With a window length of 3 and polyorder 1, + # the value in y is from the straight-line fit of (-1,0), (0,3) and + # (1, 0) at 0. This is just the average of the three values, hence 1.0. + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_almost_equal(y, [1.0], decimal=15) + + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='nearest') + assert_almost_equal(y, [3.0], decimal=15) + + x = np.array([1.0] * 3) + y = savgol_filter(x, 3, 1, mode='wrap') + assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15) + + +def test_sg_filter_basic(): + # Some basic test cases for savgol_filter(). + x = np.array([1.0, 2.0, 1.0]) + y = savgol_filter(x, 3, 1, mode='constant') + xp_assert_close(y, [1.0, 4.0 / 3, 1.0]) + + y = savgol_filter(x, 3, 1, mode='mirror') + xp_assert_close(y, [5.0 / 3, 4.0 / 3, 5.0 / 3]) + + y = savgol_filter(x, 3, 1, mode='wrap') + xp_assert_close(y, [4.0 / 3, 4.0 / 3, 4.0 / 3]) + + +def test_sg_filter_2d(): + x = np.array([[1.0, 2.0, 1.0], + [2.0, 4.0, 2.0]]) + expected = np.array([[1.0, 4.0 / 3, 1.0], + [2.0, 8.0 / 3, 2.0]]) + y = savgol_filter(x, 3, 1, mode='constant') + xp_assert_close(y, expected) + + y = savgol_filter(x.T, 3, 1, mode='constant', axis=0) + xp_assert_close(y, expected.T) + + +def test_sg_filter_interp_edges(): + # Another test with low degree polynomial data, for which we can easily + # give the exact results. In this test, we use mode='interp', so + # savgol_filter should match the exact solution for the entire data set, + # including the edges. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + # Polynomial test data. + x = np.array([t, + 3 * t ** 2, + t ** 3 - t]) + dx = np.array([np.ones_like(t), + 6 * t, + 3 * t ** 2 - 1.0]) + d2x = np.array([np.zeros_like(t), + np.full_like(t, 6), + 6 * t]) + + window_length = 7 + + y = savgol_filter(x, window_length, 3, axis=-1, mode='interp') + xp_assert_close(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=1, delta=delta) + xp_assert_close(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=2, delta=delta) + xp_assert_close(y2, d2x, atol=1e-12) + + # Transpose everything, and test again with axis=0. + + x = x.T + dx = dx.T + d2x = d2x.T + + y = savgol_filter(x, window_length, 3, axis=0, mode='interp') + xp_assert_close(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=1, delta=delta) + xp_assert_close(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=2, delta=delta) + xp_assert_close(y2, d2x, atol=1e-12) + + +def test_sg_filter_interp_edges_3d(): + # Test mode='interp' with a 3-D array. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + x1 = np.array([t, -t]) + x2 = np.array([t ** 2, 3 * t ** 2 + 5]) + x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t]) + dx1 = np.array([np.ones_like(t), -np.ones_like(t)]) + dx2 = np.array([2 * t, 6 * t]) + dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5]) + + # z has shape (3, 2, 21) + z = np.array([x1, x2, x3]) + dz = np.array([dx1, dx2, dx3]) + + y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + # z has shape (3, 21, 2) + z = np.array([x1.T, x2.T, x3.T]) + dz = np.array([dx1.T, dx2.T, dx3.T]) + + y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + # z has shape (21, 3, 2) + z = z.swapaxes(0, 1).copy() + dz = dz.swapaxes(0, 1).copy() + + y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + +def test_sg_filter_valid_window_length_3d(): + """Tests that the window_length check is using the correct axis.""" + + x = np.ones((10, 20, 30)) + + savgol_filter(x, window_length=29, polyorder=3, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[-1]. + savgol_filter(x, window_length=31, polyorder=3, mode='interp') + + savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[0]. + savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp') diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..7953fcb5ff7634f1c01d96920c1dd069cacf0c0c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py @@ -0,0 +1,3933 @@ +import sys + +from concurrent.futures import ThreadPoolExecutor, as_completed +from decimal import Decimal +from itertools import product +from math import gcd + +import pytest +from pytest import raises as assert_raises +from numpy.testing import ( + assert_equal, + assert_almost_equal, assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_array_less, + suppress_warnings) +from numpy import array, arange +import numpy as np + +from scipy import fft as sp_fft +from scipy.ndimage import correlate1d +from scipy.optimize import fmin, linear_sum_assignment +from scipy import signal +from scipy.signal import ( + correlate, correlate2d, correlation_lags, convolve, convolve2d, + fftconvolve, oaconvolve, choose_conv_method, envelope, + hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, + invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, + sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue, + residuez) +from scipy.signal.windows import hann +from scipy.signal._signaltools import (_filtfilt_gust, _compute_factors, + _group_poles) +from scipy.signal._upfirdn import _upfirdn_modes +from scipy._lib import _testutils +from scipy._lib._array_api import xp_assert_close +from scipy._lib._util import ComplexWarning, np_long, np_ulong + + +class _TestConvolve: + + def test_basic(self): + a = [3, 4, 5, 6, 5, 4] + b = [1, 2, 3] + c = convolve(a, b) + assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) + + def test_same(self): + a = [3, 4, 5] + b = [1, 2, 3, 4] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 34])) + + def test_same_eq(self): + a = [3, 4, 5] + b = [1, 2, 3] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 22])) + + def test_complex(self): + x = array([1 + 1j, 2 + 1j, 3 + 1j]) + y = array([1 + 1j, 2 + 1j]) + z = convolve(x, y) + assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) + + def test_zero_rank(self): + a = 1289 + b = 4567 + c = convolve(a, b) + assert_equal(c, a * b) + + def test_broadcastable(self): + a = np.arange(27).reshape(3, 3, 3) + b = np.arange(3) + for i in range(3): + b_shape = [1]*3 + b_shape[i] = 3 + x = convolve(a, b.reshape(b_shape), method='direct') + y = convolve(a, b.reshape(b_shape), method='fft') + assert_allclose(x, y) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + c = convolve(a, b) + assert_equal(c, a * b) + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve(a, b) + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + assert_array_equal(c, d) + + def test_input_swapping(self): + small = arange(8).reshape(2, 2, 2) + big = 1j * arange(27).reshape(3, 3, 3) + big += arange(27)[::-1].reshape(3, 3, 3) + + out_array = array( + [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], + [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], + [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], + [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], + + [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], + [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], + [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], + [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], + + [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], + [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], + [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], + [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], + + [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], + [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], + [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], + [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) + + assert_array_equal(convolve(small, big, 'full'), out_array) + assert_array_equal(convolve(big, small, 'full'), out_array) + assert_array_equal(convolve(small, big, 'same'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'same'), + out_array[0:3, 0:3, 0:3]) + assert_array_equal(convolve(small, big, 'valid'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'valid'), + out_array[1:3, 1:3, 1:3]) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, convolve, a, b, mode='spam') + assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') + assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') + assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') + assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') + + +class TestConvolve(_TestConvolve): + + def test_valid_mode2(self): + # See gh-5897 + a = [1, 2, 3, 6, 5, 3] + b = [2, 3, 4, 5, 3, 4, 2, 2, 1] + expected = [70, 78, 73, 65] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + a = [1 + 5j, 2 - 1j, 3 + 0j] + b = [2 - 3j, 1 + 0j] + expected = [2 - 3j, 8 - 10j] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + def test_same_mode(self): + a = [1, 2, 3, 3, 1, 2] + b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] + c = convolve(a, b, 'same') + d = array([57, 61, 63, 57, 45, 36]) + assert_array_equal(c, d) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) + + def test_convolve_method(self, n=100): + # this types data structure was manually encoded instead of + # using custom filters on the soon-to-be-removed np.sctypes + types = {'uint16', 'uint64', 'int64', 'int32', + 'complex128', 'float64', 'float16', + 'complex64', 'float32', 'int16', + 'uint8', 'uint32', 'int8', 'bool'} + args = [(t1, t2, mode) for t1 in types for t2 in types + for mode in ['valid', 'full', 'same']] + + # These are random arrays, which means test is much stronger than + # convolving testing by convolving two np.ones arrays + rng = np.random.RandomState(42) + array_types = {'i': rng.choice([0, 1], size=n), + 'f': rng.randn(n)} + array_types['b'] = array_types['u'] = array_types['i'] + array_types['c'] = array_types['f'] + 0.5j*array_types['f'] + + for t1, t2, mode in args: + x1 = array_types[np.dtype(t1).kind].astype(t1) + x2 = array_types[np.dtype(t2).kind].astype(t2) + + results = {key: convolve(x1, x2, method=key, mode=mode) + for key in ['fft', 'direct']} + + assert_equal(results['fft'].dtype, results['direct'].dtype) + + if 'bool' in t1 and 'bool' in t2: + assert_equal(choose_conv_method(x1, x2), 'direct') + continue + + # Found by experiment. Found approx smallest value for (rtol, atol) + # threshold to have tests pass. + if any([t in {'complex64', 'float32'} for t in [t1, t2]]): + kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} + elif 'float16' in [t1, t2]: + # atol is default for np.allclose + kwargs = {'rtol': 1e-3, 'atol': 1e-3} + else: + # defaults for np.allclose (different from assert_allclose) + kwargs = {'rtol': 1e-5, 'atol': 1e-8} + + assert_allclose(results['fft'], results['direct'], **kwargs) + + def test_convolve_method_large_input(self): + # This is really a test that convolving two large integers goes to the + # direct method even if they're in the fft method. + for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: + z = np.array([2**n], dtype=np.int64) + fft = convolve(z, z, method='fft') + direct = convolve(z, z, method='direct') + + # this is the case when integer precision gets to us + # issue #6076 has more detail, hopefully more tests after resolved + if n < 50: + assert_equal(fft, direct) + assert_equal(fft, 2**(2*n)) + assert_equal(direct, 2**(2*n)) + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, convolve, [1], 2, method='direct') + assert_raises(ValueError, convolve, 1, [2], method='direct') + assert_raises(ValueError, convolve, [1], 2, method='fft') + assert_raises(ValueError, convolve, 1, [2], method='fft') + assert_raises(ValueError, convolve, [1], [[2]]) + assert_raises(ValueError, convolve, [3], 2) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + convolve(a, b) + + +class _TestConvolve2d: + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + e = convolve2d(a, b) + assert_array_equal(e, d) + + def test_valid_mode(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = [[1, 2, 3], [3, 4, 5]] + h = array([[62, 80, 98, 116, 134]]) + + g = convolve2d(e, f, 'valid') + assert_array_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_valid_mode_complx(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j + h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) + + g = convolve2d(e, f, 'valid') + assert_array_almost_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_fillvalue(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + fillval = 1 + c = convolve2d(a, b, 'full', 'fill', fillval) + d = array([[24, 26, 31, 34, 32], + [28, 40, 62, 64, 52], + [32, 46, 67, 62, 48]]) + assert_array_equal(c, d) + + def test_fillvalue_errors(self): + msg = "could not cast `fillvalue` directly to the output " + with np.testing.suppress_warnings() as sup: + sup.filter(ComplexWarning, "Casting complex values") + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=1j) + + msg = "`fillvalue` must be scalar or an array with " + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=[1, 2]) + + def test_fillvalue_empty(self): + # Check that fillvalue being empty raises an error: + assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], + fillvalue=[]) + + def test_wrap_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'wrap') + d = array([[80, 80, 74, 80, 80], + [68, 68, 62, 68, 68], + [80, 80, 74, 80, 80]]) + assert_array_equal(c, d) + + def test_sym_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'symm') + d = array([[34, 30, 44, 62, 66], + [52, 48, 62, 80, 84], + [82, 78, 92, 110, 114]]) + assert_array_equal(c, d) + + @pytest.mark.parametrize('func', [convolve2d, correlate2d]) + @pytest.mark.parametrize('boundary, expected', + [('symm', [[37.0, 42.0, 44.0, 45.0]]), + ('wrap', [[43.0, 44.0, 42.0, 39.0]])]) + def test_same_with_boundary(self, func, boundary, expected): + # Test boundary='symm' and boundary='wrap' with a "long" kernel. + # The size of the kernel requires that the values in the "image" + # be extended more than once to handle the requested boundary method. + # This is a regression test for gh-8684 and gh-8814. + image = np.array([[2.0, -1.0, 3.0, 4.0]]) + kernel = np.ones((1, 21)) + result = func(image, kernel, mode='same', boundary=boundary) + # The expected results were calculated "by hand". Because the + # kernel is all ones, the same result is expected for convolve2d + # and correlate2d. + assert_array_equal(result, expected) + + def test_boundary_extension_same(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 10*3+1, dtype=float).reshape(10, 3) + b = np.arange(1, 10*10+1, dtype=float).reshape(10, 10) + c = convolve2d(a, b, mode='same', boundary='wrap') + assert_array_equal(c, ndi.convolve(a, b, mode='wrap', origin=(-1, -1))) + + def test_boundary_extension_full(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 3*3+1, dtype=float).reshape(3, 3) + b = np.arange(1, 6*6+1, dtype=float).reshape(6, 6) + c = convolve2d(a, b, mode='full', boundary='wrap') + apad = np.pad(a, ((3, 3), (3, 3)), 'wrap') + assert_array_equal(c, ndi.convolve(apad, b, mode='wrap')[:-1, :-1]) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) + + +class TestConvolve2d(_TestConvolve2d): + + def test_same_mode(self): + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + g = convolve2d(e, f, 'same') + h = array([[22, 28, 34], + [80, 98, 116]]) + assert_array_equal(g, h) + + def test_valid_mode2(self): + # See gh-5897 + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + expected = [[62, 80, 98, 116, 134]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] + f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] + expected = [[27 - 1j, 46. + 2j]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + # See gh-5897 + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + def test_consistency_convolve_funcs(self): + # Compare np.convolve, signal.convolve, signal.convolve2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.convolve(a, b, mode=mode), + signal.convolve(a, b, mode=mode)) + assert_almost_equal(np.squeeze( + signal.convolve2d([a], [b], mode=mode)), + signal.convolve(a, b, mode=mode)) + + def test_invalid_dims(self): + assert_raises(ValueError, convolve2d, 3, 4) + assert_raises(ValueError, convolve2d, [3], [4]) + assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("Can't create large array for test") + def test_large_array(self): + # Test indexing doesn't overflow an int (gh-10761) + n = 2**31 // (1000 * np.int64().itemsize) + _testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6) + + # Create a chequered pattern of 1s and 0s + a = np.zeros(1001 * n, dtype=np.int64) + a[::2] = 1 + a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8)) + + count = signal.convolve2d(a, [[1, 1]]) + fails = np.where(count > 1) + assert fails[0].size == 0 + + +class TestFFTConvolve: + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_real_axes(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_complex(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_complex_axes(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same_axes(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same_axes(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real_same_mode(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + if axes == '': + out = fftconvolve(a, b, 'same') + else: + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + if axes == '': + out = fftconvolve(b, a, 'same') + else: + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) + def test_real_same_mode_axes(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected_1 = np.tile(expected_1, [2, 1]) + expected_2 = np.tile(expected_2, [2, 1]) + + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_real(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1]]) + def test_valid_mode_real_axes(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_complex(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_valid_mode_complex_axes(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + def test_valid_mode_ignore_nonaxes(self): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [1, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=1) + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(fftconvolve([], []).size == 0) + assert_(fftconvolve([5, 6], []).size == 0) + assert_(fftconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_random_data(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + if axes == '': + out = fftconvolve(a, b, 'full') + else: + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_random_data_axes(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [[1, 4], + [4, 1], + [1, -1], + [-1, 1], + [-4, 4], + [4, -4], + [-4, -1], + [-1, -4]]) + def test_random_data_multidim_axes(self, axes): + a_shape, b_shape = (123, 22), (132, 11) + np.random.seed(1234) + a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape) + b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape) + expected = convolve2d(a, b, 'full') + + a = a[:, :, None, None, None] + b = b[:, :, None, None, None] + expected = expected[:, :, None, None, None] + + a = np.moveaxis(a.swapaxes(0, 2), 1, 4) + b = np.moveaxis(b.swapaxes(0, 2), 1, 4) + expected = np.moveaxis(expected.swapaxes(0, 2), 1, 4) + + # use 1 for dimension 2 in a and 3 in b to test broadcasting + a = np.tile(a, [2, 1, 3, 1, 1]) + b = np.tile(b, [2, 1, 1, 4, 1]) + expected = np.tile(expected, [2, 1, 3, 4, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_allclose(out, expected, rtol=1e-10, atol=1e-10) + + @pytest.mark.slow + @pytest.mark.parametrize( + 'n', + list(range(1, 100)) + + list(range(1000, 1500)) + + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) + def test_many_sizes(self, n): + a = np.random.rand(n) + 1j * np.random.rand(n) + b = np.random.rand(n) + 1j * np.random.rand(n) + expected = np.convolve(a, b, 'full') + + out = fftconvolve(a, b, 'full') + assert_allclose(out, expected, atol=1e-10) + + out = fftconvolve(a, b, 'full', axes=[0]) + assert_allclose(out, expected, atol=1e-10) + + @pytest.mark.thread_unsafe + def test_fft_nan(self): + n = 1000 + rng = np.random.default_rng(43876432987) + sig_nan = rng.standard_normal(n) + + for val in [np.nan, np.inf]: + sig_nan[100] = val + coeffs = signal.firwin(200, 0.2) + + msg = "Use of fft convolution.*|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=msg): + signal.convolve(sig_nan, coeffs, mode='same', method='fft') + +def fftconvolve_err(*args, **kwargs): + raise RuntimeError('Fell back to fftconvolve') + + +def gen_oa_shapes(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if abs(a - b) > 3] + + +def gen_oa_shapes_2d(sizes): + shapes0 = gen_oa_shapes(sizes) + shapes1 = gen_oa_shapes(sizes) + shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in + zip(shapes0, shapes1)] + + modes = ['full', 'valid', 'same'] + return [ishapes+(imode,) for ishapes, imode in product(shapes, modes) + if imode != 'valid' or + (ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or + (ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])] + + +def gen_oa_shapes_eq(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if a >= b] + + +class TestOAConvolve: + @pytest.mark.slow() + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes_eq(list(range(100)) + + list(range(100, 1000, 23))) + ) + def test_real_manylens(self, shape_a_0, shape_b_0): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + + expected = fftconvolve(a, b) + out = oaconvolve(a, b) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4, 1])) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_noaxes(self, shape_a_0, shape_b_0, + is_complex, mode, monkeypatch): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0) + b = b + 1j*np.random.rand(shape_b_0) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [0, 1]) + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_extra, shape_b_extra, + is_complex, mode, monkeypatch): + ax_a = [shape_a_extra]*2 + ax_b = [shape_b_extra]*2 + ax_a[axes] = shape_a_0 + ax_b[axes] = shape_b_0 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_noaxes(self, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + is_complex, monkeypatch): + a = np.random.rand(shape_a_0, shape_a_1) + b = np.random.rand(shape_b_0, shape_b_1) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0, shape_a_1) + b = b + 1j*np.random.rand(shape_b_0, shape_b_1) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]]) + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + shape_a_extra, shape_b_extra, + is_complex, monkeypatch): + ax_a = [shape_a_extra]*3 + ax_b = [shape_b_extra]*3 + ax_a[axes[0]] = shape_a_0 + ax_b[axes[0]] = shape_b_0 + ax_a[axes[1]] = shape_a_1 + ax_b[axes[1]] = shape_b_1 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(oaconvolve([], []).size == 0) + assert_(oaconvolve([5, 6], []).size == 0) + assert_(oaconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + +class TestAllFreqConvolves: + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes(self, convapproach): + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + with assert_raises(ValueError, + match="For 'valid' mode, one must be at least " + "as large as the other in every dimension"): + convapproach(a, b, mode='valid') + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes_axes(self, convapproach): + a = np.zeros([5, 6, 2, 1]) + b = np.zeros([5, 6, 3, 1]) + with assert_raises(ValueError, + match=r"incompatible shapes for in1 and in2:" + r" \(5L?, 6L?, 2L?, 1L?\) and" + r" \(5L?, 6L?, 3L?, 1L?\)"): + convapproach(a, b, axes=[0, 1]) + + @pytest.mark.parametrize('a,b', + [([1], 2), + (1, [2]), + ([3], [[2]])]) + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_mismatched_dims(self, a, b, convapproach): + with assert_raises(ValueError, + match="in1 and in2 should have the same" + " dimensionality"): + convapproach(a, b) + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_flags(self, convapproach): + with assert_raises(ValueError, + match="acceptable mode flags are 'valid'," + " 'same', or 'full'"): + convapproach([1], [2], mode='chips') + + with assert_raises(ValueError, + match="when provided, axes cannot be empty"): + convapproach([1], [2], axes=[]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + convapproach([1], [2], axes=[0, 0]) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + @pytest.mark.parametrize('dtype', [np.longdouble, np.clongdouble]) + def test_longdtype_input(self, dtype): + x = np.random.random((27, 27)).astype(dtype) + y = np.random.random((4, 4)).astype(dtype) + if np.iscomplexobj(dtype()): + x += .1j + y -= .1j + + res = fftconvolve(x, y) + assert_allclose(res, convolve(x, y, method='direct')) + assert res.dtype == dtype + + +class TestMedFilt: + + IN = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], + [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], + [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], + [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], + [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], + [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], + [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], + [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], + [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], + [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] + + OUT = [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], + [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], + [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], + [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], + [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], + [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], + [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], + [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], + [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], + [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]] + + KERNEL_SIZE = [7,3] + + def test_basic(self): + d = signal.medfilt(self.IN, self.KERNEL_SIZE) + e = signal.medfilt2d(np.array(self.IN, float), self.KERNEL_SIZE) + assert_array_equal(d, self.OUT) + assert_array_equal(d, e) + + @pytest.mark.parametrize('dtype', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64]) + def test_types(self, dtype): + # volume input and output types match + in_typed = np.array(self.IN, dtype=dtype) + assert_equal(signal.medfilt(in_typed).dtype, dtype) + assert_equal(signal.medfilt2d(in_typed).dtype, dtype) + + @pytest.mark.parametrize('dtype', [np.bool_, np.complex64, np.complex128, + np.clongdouble, np.float16, np.object_, + "float96", "float128"]) + def test_invalid_dtypes(self, dtype): + # We can only test this on platforms that support a native type of float96 or + # float128; comparing to np.longdouble allows us to filter out non-native types + if (dtype in ["float96", "float128"] + and np.finfo(np.longdouble).dtype != dtype): + pytest.skip(f"Platform does not support {dtype}") + + in_typed = np.array(self.IN, dtype=dtype) + with pytest.raises(ValueError, match="not supported"): + signal.medfilt(in_typed) + + with pytest.raises(ValueError, match="not supported"): + signal.medfilt2d(in_typed) + + def test_none(self): + # gh-1651, trac #1124. Ensure this does not segfault. + msg = "dtype=object is not supported by medfilt" + with assert_raises(ValueError, match=msg): + signal.medfilt(None) + + def test_odd_strides(self): + # Avoid a regression with possible contiguous + # numpy arrays that have odd strides. The stride value below gets + # us into wrong memory if used (but it does not need to be used) + dummy = np.arange(10, dtype=np.float64) + a = dummy[5:6] + a.strides = 16 + assert_(signal.medfilt(a, 1) == 5.) + + @pytest.mark.parametrize("dtype", [np.ubyte, np.float32, np.float64]) + def test_medfilt2d_parallel(self, dtype): + in_typed = np.array(self.IN, dtype=dtype) + expected = np.array(self.OUT, dtype=dtype) + + # This is used to simplify the indexing calculations. + assert in_typed.shape == expected.shape + + # We'll do the calculation in four chunks. M1 and N1 are the dimensions + # of the first output chunk. We have to extend the input by half the + # kernel size to be able to calculate the full output chunk. + M1 = expected.shape[0] // 2 + N1 = expected.shape[1] // 2 + offM = self.KERNEL_SIZE[0] // 2 + 1 + offN = self.KERNEL_SIZE[1] // 2 + 1 + + def apply(chunk): + # in = slice of in_typed to use. + # sel = slice of output to crop it to the correct region. + # out = slice of output array to store in. + M, N = chunk + if M == 0: + Min = slice(0, M1 + offM) + Msel = slice(0, -offM) + Mout = slice(0, M1) + else: + Min = slice(M1 - offM, None) + Msel = slice(offM, None) + Mout = slice(M1, None) + if N == 0: + Nin = slice(0, N1 + offN) + Nsel = slice(0, -offN) + Nout = slice(0, N1) + else: + Nin = slice(N1 - offN, None) + Nsel = slice(offN, None) + Nout = slice(N1, None) + + # Do the calculation, but do not write to the output in the threads. + chunk_data = in_typed[Min, Nin] + med = signal.medfilt2d(chunk_data, self.KERNEL_SIZE) + return med[Msel, Nsel], Mout, Nout + + # Give each chunk to a different thread. + output = np.zeros_like(expected) + with ThreadPoolExecutor(max_workers=4) as pool: + chunks = {(0, 0), (0, 1), (1, 0), (1, 1)} + futures = {pool.submit(apply, chunk) for chunk in chunks} + + # Store each result in the output as it arrives. + for future in as_completed(futures): + data, Mslice, Nslice = future.result() + output[Mslice, Nslice] = data + + assert_array_equal(output, expected) + + +class TestWiener: + + def test_basic(self): + g = array([[5, 6, 4, 3], + [3, 5, 6, 2], + [2, 3, 5, 6], + [1, 6, 9, 7]], 'd') + h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], + [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], + [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], + [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) + assert_array_almost_equal(signal.wiener(g), h, decimal=6) + assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) + + +padtype_options = ["mean", "median", "minimum", "maximum", "line"] +padtype_options += _upfirdn_modes + + +class TestResample: + def test_basic(self): + # Some basic tests + + # Regression test for issue #3603. + # window.shape must equal to sig.shape[0] + sig = np.arange(128) + num = 256 + win = signal.get_window(('kaiser', 8.0), 160) + assert_raises(ValueError, signal.resample, sig, num, window=win) + + # Other degenerate conditions + assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) + assert_raises(ValueError, signal.resample_poly, sig, 1, 0) + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='') + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, + padtype='mean', cval=10) + + # test for issue #6505 - should not modify window.shape when axis ≠ 0 + sig2 = np.tile(np.arange(160), (2, 1)) + signal.resample(sig2, num, axis=-1, window=win) + assert_(win.shape == (160,)) + + @pytest.mark.parametrize('window', (None, 'hamming')) + @pytest.mark.parametrize('N', (20, 19)) + @pytest.mark.parametrize('num', (100, 101, 10, 11)) + def test_rfft(self, N, num, window): + # Make sure the speed up using rfft gives the same result as the normal + # way using fft + x = np.linspace(0, 10, N, endpoint=False) + y = np.cos(-x**2/6.0) + assert_allclose(signal.resample(y, num, window=window), + signal.resample(y + 0j, num, window=window).real) + + y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)]) + y_complex = y + 0j + assert_allclose( + signal.resample(y, num, axis=1, window=window), + signal.resample(y_complex, num, axis=1, window=window).real, + atol=1e-9) + + def test_input_domain(self): + # Test if both input domain modes produce the same results. + tsig = np.arange(256) + 0j + fsig = sp_fft.fft(tsig) + num = 256 + assert_allclose( + signal.resample(fsig, num, domain='freq'), + signal.resample(tsig, num, domain='time'), + atol=1e-9) + + @pytest.mark.parametrize('nx', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('ny', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('dtype', ('float', 'complex')) + def test_dc(self, nx, ny, dtype): + x = np.array([1] * nx, dtype) + y = signal.resample(x, ny) + assert_allclose(y, [1] * ny) + + @pytest.mark.thread_unsafe # due to Cython fused types, see cython#6506 + @pytest.mark.parametrize('padtype', padtype_options) + def test_mutable_window(self, padtype): + # Test that a mutable window is not modified + impulse = np.zeros(3) + window = np.random.RandomState(0).randn(2) + window_orig = window.copy() + signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype) + assert_array_equal(window, window_orig) + + @pytest.mark.parametrize('padtype', padtype_options) + def test_output_float32(self, padtype): + # Test that float32 inputs yield a float32 output + x = np.arange(10, dtype=np.float32) + h = np.array([1, 1, 1], dtype=np.float32) + y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype) + assert y.dtype == np.float32 + + @pytest.mark.parametrize('padtype', padtype_options) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_output_match_dtype(self, padtype, dtype): + # Test that the dtype of x is preserved per issue #14733 + x = np.arange(10, dtype=dtype) + y = signal.resample_poly(x, 1, 2, padtype=padtype) + assert y.dtype == x.dtype + + @pytest.mark.parametrize( + "method, ext, padtype", + [("fft", False, None)] + + list( + product( + ["polyphase"], [False, True], padtype_options, + ) + ), + ) + def test_resample_methods(self, method, ext, padtype): + # Test resampling of sinusoids and random noise (1-sec) + rate = 100 + rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] + + # Sinusoids, windowed to avoid edge artifacts + t = np.arange(rate) / float(rate) + freqs = np.array((1., 10., 40.))[:, np.newaxis] + x = np.sin(2 * np.pi * freqs * t) * hann(rate) + + for rate_to in rates_to: + t_to = np.arange(rate_to) / float(rate_to) + y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) + if method == 'fft': + y_resamps = signal.resample(x, rate_to, axis=-1) + else: + if ext and rate_to != rate: + # Match default window design + g = gcd(rate_to, rate) + up = rate_to // g + down = rate // g + max_rate = max(up, down) + f_c = 1. / max_rate + half_len = 10 * max_rate + window = signal.firwin(2 * half_len + 1, f_c, + window=('kaiser', 5.0)) + polyargs = {'window': window, 'padtype': padtype} + else: + polyargs = {'padtype': padtype} + + y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, + **polyargs) + + for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): + if freq >= 0.5 * rate_to: + y_to.fill(0.) # mostly low-passed away + if padtype in ['minimum', 'maximum']: + assert_allclose(y_resamp, y_to, atol=3e-1) + else: + assert_allclose(y_resamp, y_to, atol=1e-3) + else: + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=(corr, rate, rate_to)) + + # Random data + rng = np.random.RandomState(0) + x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind + for rate_to in rates_to: + # random data + t_to = np.arange(rate_to) / float(rate_to) + y_to = np.interp(t_to, t, x) + if method == 'fft': + y_resamp = signal.resample(x, rate_to) + else: + y_resamp = signal.resample_poly(x, rate_to, rate, + padtype=padtype) + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=corr) + + # More tests of fft method (Master 0.18.1 fails these) + if method == 'fft': + x1 = np.array([1.+0.j, 0.+0.j]) + y1_test = signal.resample(x1, 4) + # upsampling a complex array + y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j]) + assert_allclose(y1_test, y1_true, atol=1e-12) + x2 = np.array([1., 0.5, 0., 0.5]) + y2_test = signal.resample(x2, 2) # downsampling a real array + y2_true = np.array([1., 0.]) + assert_allclose(y2_test, y2_true, atol=1e-12) + + def test_poly_vs_filtfilt(self): + # Check that up=1.0 gives same answer as filtfilt + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + down_factors = [2, 11, 79] + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + # resample_poly assumes zeros outside of signl, whereas filtfilt + # can only constant-pad. Make them equivalent: + x[0] = 0 + x[-1] = 0 + + for down in down_factors: + h = signal.firwin(31, 1. / down, window='hamming') + yf = filtfilt(h, 1.0, x, padtype='constant')[::down] + + # Need to pass convolved version of filter to resample_poly, + # since filtfilt does forward and backward, but resample_poly + # only goes forward + hc = convolve(h, h[::-1]) + y = signal.resample_poly(x, 1, down, window=hc) + assert_allclose(yf, y, atol=1e-7, rtol=1e-7) + + def test_correlate1d(self): + for down in [2, 4]: + for nx in range(1, 40, down): + for nweights in (32, 33): + x = np.random.random((nx,)) + weights = np.random.random((nweights,)) + y_g = correlate1d(x, weights[::-1], mode='constant') + y_s = signal.resample_poly( + x, up=1, down=down, window=weights) + assert_allclose(y_g[::down], y_s) + + @pytest.mark.parametrize('dtype', [np.int32, np.float32]) + def test_gh_15620(self, dtype): + data = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + actual = signal.resample_poly(data, + up=2, + down=1, + padtype='smooth') + assert np.count_nonzero(actual) > 0 + + +class TestCSpline1DEval: + + def test_basic(self): + y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) + x = arange(len(y)) + dx = x[1] - x[0] + cj = signal.cspline1d(y) + + x2 = arange(len(y) * 10.0) / 10.0 + y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) + + # make sure interpolated values are on knot points + assert_array_almost_equal(y2[::10], y, decimal=5) + + def test_complex(self): + # create some smoothly varying complex signal to interpolate + x = np.arange(2) + y = np.zeros(x.shape, dtype=np.complex64) + T = 10.0 + f = 1.0 / T + y = np.exp(2.0J * np.pi * f * x) + + # get the cspline transform + cy = signal.cspline1d(y) + + # determine new test x value and interpolate + xnew = np.array([0.5]) + ynew = signal.cspline1d_eval(cy, xnew) + + assert_equal(ynew.dtype, y.dtype) + +class TestOrderFilt: + + def test_basic(self): + assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), + [2, 3, 2]) + + +class _TestLinearFilter: + + def generate(self, shape): + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + return self.convert_dtype(x) + + def convert_dtype(self, arr): + if self.dtype == np.dtype('O'): + arr = np.asarray(arr) + out = np.empty(arr.shape, self.dtype) + iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], + [['readonly'],['writeonly']]) + for x, y in iter: + y[...] = self.type(x[()]) + return out + else: + return np.asarray(arr, dtype=self.dtype) + + def test_rank_1_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, -0.5]) + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_IIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([0.5, -0.5]) + zi = self.convert_dtype([1, 2]) + y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) + zf_r = self.convert_dtype([13, -10]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_1_FIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1, 1]) + a = self.convert_dtype([1]) + zi = self.convert_dtype([1, 1]) + y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) + zf_r = self.convert_dtype([9, 5]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_0(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], + [6, 4, 2]]) + y = lfilter(b, a, x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + def test_rank_2_IIR_axis_1(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]]) + y = lfilter(b, a, x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank_2_IIR_axis_0_init_cond(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((4,1))) + + y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], + [19, -17, 19]]) + zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] + y, zf = lfilter(b, a, x, axis=1, zi=zi) + assert_array_almost_equal(y_r2_a0_1, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_1_init_cond(self): + x = self.generate((4,3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((1,3))) + + y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], + [1, 3, 5], [5, 3, 1]]) + zf_r = self.convert_dtype([[-23, -23, -23]]) + y, zf = lfilter(b, a, x, axis=0, zi=zi) + assert_array_almost_equal(y_r2_a0_0, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_IIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_IIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 1 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_FIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_FIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 2 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1, 1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_zi_pseudobroadcast(self): + x = self.generate((4, 5, 20)) + b,a = signal.butter(8, 0.2, output='ba') + b = self.convert_dtype(b) + a = self.convert_dtype(a) + zi_size = b.shape[0] - 1 + + # lfilter requires x.ndim == zi.ndim exactly. However, zi can have + # length 1 dimensions. + zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) + zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) + + y_full, zf_full = lfilter(b, a, x, zi=zi_full) + y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) + + assert_array_almost_equal(y_sing, y_full) + assert_array_almost_equal(zf_full, zf_sing) + + # lfilter does not prepend ones + assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) + + def test_scalar_a(self): + # a can be a scalar. + x = self.generate(6) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) + + y = lfilter(b, a[0], x) + assert_array_almost_equal(y, y_r) + + def test_zi_some_singleton_dims(self): + # lfilter doesn't really broadcast (no prepending of 1's). But does + # do singleton expansion if x and zi have the same ndim. This was + # broken only if a subset of the axes were singletons (gh-4681). + x = self.convert_dtype(np.zeros((3,2,5), 'l')) + b = self.convert_dtype(np.ones(5, 'l')) + a = self.convert_dtype(np.array([1,0,0])) + zi = np.ones((3,1,4), 'l') + zi[1,:,:] *= 2 + zi[2,:,:] *= 3 + zi = self.convert_dtype(zi) + + zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) + y_expected = np.zeros((3,2,5), 'l') + y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] + y_expected = self.convert_dtype(y_expected) + + # IIR + y_iir, zf_iir = lfilter(b, a, x, -1, zi) + assert_array_almost_equal(y_iir, y_expected) + assert_array_almost_equal(zf_iir, zf_expected) + + # FIR + y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) + assert_array_almost_equal(y_fir, y_expected) + assert_array_almost_equal(zf_fir, zf_expected) + + def base_bad_size_zi(self, b, a, x, axis, zi): + b = self.convert_dtype(b) + a = self.convert_dtype(a) + x = self.convert_dtype(x) + zi = self.convert_dtype(zi) + assert_raises(ValueError, lfilter, b, a, x, axis, zi) + + def test_bad_size_zi(self): + # rank 1 + x1 = np.arange(6) + self.base_bad_size_zi([1], [1], x1, -1, [1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) + + # rank 2 + x2 = np.arange(12).reshape((4,3)) + # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) + self.base_bad_size_zi([1], [1], x2, 0, [0]) + + # for each of these there are 5 cases tested (in this order): + # 1. not deep enough, right # elements + # 2. too deep, right # elements + # 3. right depth, right # elements, transposed + # 4. right depth, too few elements + # 5. right depth, too many elements + + self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) + self.base_bad_size_zi([1], [1], x2, 1, [0]) + + self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + def test_empty_zi(self): + # Regression test for #880: empty array for zi crashes. + x = self.generate((5,)) + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + zi = self.convert_dtype([]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, x) + assert_equal(zf.dtype, self.dtype) + assert_equal(zf.size, 0) + + def test_lfiltic_bad_zi(self): + # Regression test for #3699: bad initial conditions + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + # "y" sets the datatype of zi, so it truncates if int + zi = lfiltic(b, a, [1., 0]) + zi_1 = lfiltic(b, a, [1, 0]) + zi_2 = lfiltic(b, a, [True, False]) + assert_array_equal(zi, zi_1) + assert_array_equal(zi, zi_2) + + def test_short_x_FIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([7, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_short_x_IIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1, 1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([-67, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_do_not_modify_a_b_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + b0 = b.copy() + a = self.convert_dtype([0.5, -0.5]) + a0 = a.copy() + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + def test_do_not_modify_a_b_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, 1]) + b0 = b.copy() + a = self.convert_dtype([2]) + a0 = a.copy() + y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + @pytest.mark.parametrize("a", [1.0, [1.0], np.array(1.0)]) + @pytest.mark.parametrize("b", [1.0, [1.0], np.array(1.0)]) + def test_scalar_input(self, a, b): + data = np.random.randn(10) + assert_allclose( + lfilter(np.array([1.0]), np.array([1.0]), data), + lfilter(b, a, data)) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + lfilter(a, b, [1, 2, 3, 4]) + + +class TestLinearFilterFloat32(_TestLinearFilter): + dtype = np.dtype('f') + + +class TestLinearFilterFloat64(_TestLinearFilter): + dtype = np.dtype('d') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterFloatExtended(_TestLinearFilter): + dtype = np.dtype('g') + + +class TestLinearFilterComplex64(_TestLinearFilter): + dtype = np.dtype('F') + + +class TestLinearFilterComplex128(_TestLinearFilter): + dtype = np.dtype('D') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterComplexExtended(_TestLinearFilter): + dtype = np.dtype('G') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterDecimal(_TestLinearFilter): + dtype = np.dtype('O') + + def type(self, x): + return Decimal(str(x)) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterObject(_TestLinearFilter): + dtype = np.dtype('O') + type = float + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +def test_lfilter_bad_object(): + # lfilter: object arrays with non-numeric objects raise TypeError. + # Regression test for ticket #1452. + if hasattr(sys, 'abiflags') and 'd' in sys.abiflags: + pytest.skip('test is flaky when run with python3-dbg') + assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) + assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) + assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) + + +_pmf = pytest.mark.filterwarnings('ignore::DeprecationWarning') + +def test_lfilter_notimplemented_input(): + # Should not crash, gh-7991 + assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) + + +@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64, + pytest.param(np.longdouble, marks=_pmf), + pytest.param(Decimal, marks=_pmf)] +) +class TestCorrelateReal: + def _setup_rank1(self, dt): + a = np.linspace(0, 3, 4).astype(dt) + b = np.linspace(1, 2, 2).astype(dt) + + y_r = np.array([0, 2, 5, 8, 3]).astype(dt) + return a, b, y_r + + def equal_tolerance(self, res_dt): + # default value of keyword + decimal = 6 + try: + dt_info = np.finfo(res_dt) + if hasattr(dt_info, 'resolution'): + decimal = int(-0.5*np.log10(dt_info.resolution)) + except Exception: + pass + return decimal + + def equal_tolerance_fft(self, res_dt): + # FFT implementations convert longdouble arguments down to + # double so don't expect better precision, see gh-9520 + if res_dt == np.longdouble: + return self.equal_tolerance(np.float64) + else: + return self.equal_tolerance(res_dt) + + def test_method(self, dt): + if dt == Decimal: + method = choose_conv_method([Decimal(4)], [Decimal(3)]) + assert_equal(method, 'direct') + else: + a, b, y_r = self._setup_rank3(dt) + y_fft = correlate(a, b, method='fft') + y_direct = correlate(a, b, method='direct') + + assert_array_almost_equal(y_r, + y_fft, + decimal=self.equal_tolerance_fft(y_fft.dtype),) + assert_array_almost_equal(y_r, + y_direct, + decimal=self.equal_tolerance(y_direct.dtype),) + assert_equal(y_fft.dtype, dt) + assert_equal(y_direct.dtype, dt) + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r[1:4]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[1:4][::-1]) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r[:-1]) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + def _setup_rank3(self, dt): + a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( + dt) + b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( + dt) + + y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], + [46., 432., 1062., 1840., 2672., 1698., 864., 266.], + [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], + [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], + [202., 664., 1290., 1984., 2688., 1590., 712., 150.], + [114., 344., 642., 960., 1280., 726., 296., 38.]], + + [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], + [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], + [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], + [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], + [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], + [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], + + [[22., 214., 528., 916., 1332., 846., 430., 132.], + [86., 484., 1098., 1832., 2600., 1602., 772., 206.], + [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], + [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], + [230., 692., 1290., 1928., 2568., 1458., 596., 78.], + [126., 354., 636., 924., 1212., 654., 234., 0.]]], + dtype=np.float64).astype(dt) + + return a, b, y_r + + def test_rank3_valid(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) + assert_equal(y.dtype, dt) + + def test_rank3_same(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "same") + assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) + assert_equal(y.dtype, dt) + + def test_rank3_all(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b) + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + +class TestCorrelate: + # Tests that don't depend on dtype + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, correlate, a, b, mode='spam') + assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') + assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') + assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') + assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, correlate, [1], 2, method='direct') + assert_raises(ValueError, correlate, 1, [2], method='direct') + assert_raises(ValueError, correlate, [1], 2, method='fft') + assert_raises(ValueError, correlate, 1, [2], method='fft') + assert_raises(ValueError, correlate, [1], [[2]]) + assert_raises(ValueError, correlate, [3], 2) + + def test_numpy_fastpath(self): + a = [1, 2, 3] + b = [4, 5] + assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) + + a = [1, 2, 3] + b = [4, 5, 6] + assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) + assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) + assert_allclose(correlate(a, b, mode='valid'), [32]) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + correlate(a, b) + + +@pytest.mark.parametrize("mode", ["valid", "same", "full"]) +@pytest.mark.parametrize("behind", [True, False]) +@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001]) +def test_correlation_lags(mode, behind, input_size): + # generate random data + rng = np.random.RandomState(0) + in1 = rng.standard_normal(input_size) + offset = int(input_size/10) + # generate offset version of array to correlate with + if behind: + # y is behind x + in2 = np.concatenate([rng.standard_normal(offset), in1]) + expected = -offset + else: + # y is ahead of x + in2 = in1[offset:] + expected = offset + # cross correlate, returning lag information + correlation = correlate(in1, in2, mode=mode) + lags = correlation_lags(in1.size, in2.size, mode=mode) + # identify the peak + lag_index = np.argmax(correlation) + # Check as expected + assert_equal(lags[lag_index], expected) + # Correlation and lags shape should match + assert_equal(lags.shape, correlation.shape) + + +def test_correlation_lags_invalid_mode(): + with pytest.raises(ValueError, match="Mode asdfgh is invalid"): + correlation_lags(100, 100, mode="asdfgh") + + +@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, + pytest.param(np.clongdouble, marks=_pmf)]) +class TestCorrelateComplex: + # The decimal precision to be used for comparing results. + # This value will be passed as the 'decimal' keyword argument of + # assert_array_almost_equal(). + # Since correlate may chose to use FFT method which converts + # longdoubles to doubles internally don't expect better precision + # for longdouble than for double (see gh-9520). + + def decimal(self, dt): + if dt == np.clongdouble: + dt = np.cdouble + return int(2 * np.finfo(dt).precision / 3) + + def _setup_rank1(self, dt, mode): + np.random.seed(9) + a = np.random.randn(10).astype(dt) + a += 1j * np.random.randn(10).astype(dt) + b = np.random.randn(8).astype(dt) + b += 1j * np.random.randn(8).astype(dt) + + y_r = (correlate(a.real, b.real, mode=mode) + + correlate(a.imag, b.imag, mode=mode)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + + correlate(a.imag, b.real, mode=mode)) + return a, b, y_r + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt, 'valid') + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt, 'same') + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt, 'full') + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_swap_full(self, dt): + d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) + k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) + y = correlate(d, k) + assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) + + def test_swap_same(self, dt): + d = [0.+0.j, 1.+1.j, 2.+2.j] + k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] + y = correlate(d, k, mode="same") + assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) + + def test_rank3(self, dt): + a = np.random.randn(10, 8, 6).astype(dt) + a += 1j * np.random.randn(10, 8, 6).astype(dt) + b = np.random.randn(8, 6, 4).astype(dt) + b += 1j * np.random.randn(8, 6, 4).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + def test_rank0(self, dt): + a = np.array(np.random.randn()).astype(dt) + a += 1j * np.array(np.random.randn()).astype(dt) + b = np.array(np.random.randn()).astype(dt) + b += 1j * np.array(np.random.randn()).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * np.array(-correlate(a.real, b.imag) + + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + assert_equal(correlate([1], [2j]), correlate(1, 2j)) + assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) + assert_equal(correlate([3j], [4]), correlate(3j, 4)) + + +class TestCorrelate2d: + + def test_consistency_correlate_funcs(self): + # Compare np.correlate, signal.correlate, signal.correlate2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.correlate(a, b, mode=mode), + signal.correlate(a, b, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], + mode=mode)), + signal.correlate(a, b, mode=mode)) + + # See gh-5897 + if mode == 'valid': + assert_almost_equal(np.correlate(b, a, mode=mode), + signal.correlate(b, a, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], + mode=mode)), + signal.correlate(b, a, mode=mode)) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) + + def test_complex_input(self): + assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) + assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) + assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) + + +class TestLFilterZI: + + def test_basic(self): + a = np.array([1.0, -1.0, 0.5]) + b = np.array([1.0, 0.0, 2.0]) + zi_expected = np.array([5.0, -1.0]) + zi = lfilter_zi(b, a) + assert_array_almost_equal(zi, zi_expected) + + def test_scale_invariance(self): + # Regression test. There was a bug in which b was not correctly + # rescaled when a[0] was nonzero. + b = np.array([2, 8, 5]) + a = np.array([1, 1, 8]) + zi1 = lfilter_zi(b, a) + zi2 = lfilter_zi(2*b, 2*a) + assert_allclose(zi2, zi1, rtol=1e-12) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_types(self, dtype): + b = np.zeros((8), dtype=dtype) + a = np.array([1], dtype=dtype) + assert_equal(np.real(signal.lfilter_zi(b, a)).dtype, dtype) + + +class TestFiltFilt: + filtfilt_kind = 'tf' + + def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, + method='pad', irlen=None): + if self.filtfilt_kind == 'tf': + b, a = zpk2tf(*zpk) + return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) + elif self.filtfilt_kind == 'sos': + sos = zpk2sos(*zpk) + return sosfiltfilt(sos, x, axis, padtype, padlen) + + def test_basic(self): + zpk = tf2zpk([1, 2, 3], [1, 2, 3]) + out = self.filtfilt(zpk, np.arange(12)) + assert_allclose(out, arange(12), atol=5.28e-11) + + def test_sine(self): + rate = 2000 + t = np.linspace(0, 1.0, rate + 1) + # A signal with low frequency and a high frequency. + xlow = np.sin(5 * 2 * np.pi * t) + xhigh = np.sin(250 * 2 * np.pi * t) + x = xlow + xhigh + + zpk = butter(8, 0.125, output='zpk') + # r is the magnitude of the largest pole. + r = np.abs(zpk[1]).max() + eps = 1e-5 + # n estimates the number of steps for the + # transient to decay by a factor of eps. + n = int(np.ceil(np.log(eps) / np.log(r))) + + # High order lowpass filter... + y = self.filtfilt(zpk, x, padlen=n) + # Result should be just xlow. + err = np.abs(y - xlow).max() + assert_(err < 1e-4) + + # A 2D case. + x2d = np.vstack([xlow, xlow + xhigh]) + y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) + assert_equal(y2d.shape, x2d.shape) + err = np.abs(y2d - xlow).max() + assert_(err < 1e-4) + + # Use the previous result to check the use of the axis keyword. + # (Regression test for ticket #1620) + y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) + assert_equal(y2d, y2dt.T) + + def test_axis(self): + # Test the 'axis' keyword on a 3D array. + x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) + zpk = butter(3, 0.125, output='zpk') + y0 = self.filtfilt(zpk, x, padlen=0, axis=0) + y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) + assert_array_equal(y0, np.swapaxes(y1, 0, 1)) + y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) + assert_array_equal(y0, np.swapaxes(y2, 0, 2)) + + def test_acoeff(self): + if self.filtfilt_kind != 'tf': + return # only necessary for TF + # test for 'a' coefficient as single number + out = signal.filtfilt([.5, .5], 1, np.arange(10)) + assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) + + def test_gust_simple(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The input array has length 2. The exact solution for this case + # was computed "by hand". + x = np.array([1.0, 2.0]) + b = np.array([0.5]) + a = np.array([1.0, -0.5]) + y, z1, z2 = _filtfilt_gust(b, a, x) + assert_allclose([z1[0], z2[0]], + [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) + assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], + 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) + + def test_gust_scalars(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The filter coefficients are both scalars, so the filter simply + # multiplies its input by b/a. When it is used in filtfilt, the + # factor is (b/a)**2. + x = np.arange(12) + b = 3.0 + a = 2.0 + y = filtfilt(b, a, x, method="gust") + expected = (b/a)**2 * x + assert_allclose(y, expected) + + +class TestSOSFiltFilt(TestFiltFilt): + filtfilt_kind = 'sos' + + def test_equivalence(self): + """Test equivalence between sosfiltfilt and filtfilt""" + x = np.random.RandomState(0).randn(1000) + for order in range(1, 6): + zpk = signal.butter(order, 0.35, output='zpk') + b, a = zpk2tf(*zpk) + sos = zpk2sos(*zpk) + y = filtfilt(b, a, x) + y_sos = sosfiltfilt(sos, x) + assert_allclose(y, y_sos, atol=1e-12, err_msg=f'order={order}') + + +def filtfilt_gust_opt(b, a, x): + """ + An alternative implementation of filtfilt with Gustafsson edges. + + This function computes the same result as + `scipy.signal._signaltools._filtfilt_gust`, but only 1-d arrays + are accepted. The problem is solved using `fmin` from `scipy.optimize`. + `_filtfilt_gust` is significantly faster than this implementation. + """ + def filtfilt_gust_opt_func(ics, b, a, x): + """Objective function used in filtfilt_gust_opt.""" + m = max(len(a), len(b)) - 1 + z0f = ics[:m] + z0b = ics[m:] + y_f = lfilter(b, a, x, zi=z0f)[0] + y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] + + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y_bf = lfilter(b, a, y_b, zi=z0f)[0] + value = np.sum((y_fb - y_bf)**2) + return value + + m = max(len(a), len(b)) - 1 + zi = lfilter_zi(b, a) + ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) + result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), + xtol=1e-10, ftol=1e-12, + maxfun=10000, maxiter=10000, + full_output=True, disp=False) + opt, fopt, niter, funcalls, warnflag = result + if warnflag > 0: + raise RuntimeError("minimization failed in filtfilt_gust_opt: " + "warnflag=%d" % warnflag) + z0f = opt[:m] + z0b = opt[m:] + + # Apply the forward-backward filter using the computed initial + # conditions. + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y = lfilter(b, a, y_b, zi=z0f)[0] + + return y, z0f, z0b + + +def check_filtfilt_gust(b, a, shape, axis, irlen=None): + # Generate x, the data to be filtered. + np.random.seed(123) + x = np.random.randn(*shape) + + # Apply filtfilt to x. This is the main calculation to be checked. + y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) + + # Also call the private function so we can test the ICs. + yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + + # filtfilt_gust_opt is an independent implementation that gives the + # expected result, but it only handles 1-D arrays, so use some looping + # and reshaping shenanigans to create the expected output arrays. + xx = np.swapaxes(x, axis, -1) + out_shape = xx.shape[:-1] + yo = np.empty_like(xx) + m = max(len(a), len(b)) - 1 + zo1 = np.empty(out_shape + (m,)) + zo2 = np.empty(out_shape + (m,)) + for indx in product(*[range(d) for d in out_shape]): + yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) + yo = np.swapaxes(yo, -1, axis) + zo1 = np.swapaxes(zo1, -1, axis) + zo2 = np.swapaxes(zo2, -1, axis) + + assert_allclose(y, yo, rtol=1e-8, atol=1e-9) + assert_allclose(yg, yo, rtol=1e-8, atol=1e-9) + assert_allclose(zg1, zo1, rtol=1e-8, atol=1e-9) + assert_allclose(zg2, zo2, rtol=1e-8, atol=1e-9) + + +@pytest.mark.fail_slow(10) +def test_choose_conv_method(): + for mode in ['valid', 'same', 'full']: + for ndim in [1, 2]: + n, k, true_method = 8, 6, 'direct' + x = np.random.randn(*((n,) * ndim)) + h = np.random.randn(*((k,) * ndim)) + + method = choose_conv_method(x, h, mode=mode) + assert_equal(method, true_method) + + method_try, times = choose_conv_method(x, h, mode=mode, measure=True) + assert_(method_try in {'fft', 'direct'}) + assert_(isinstance(times, dict)) + assert_('fft' in times.keys() and 'direct' in times.keys()) + + x = np.array([2**51], dtype=np.int64) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +@pytest.mark.thread_unsafe +def test_choose_conv_dtype_deprecation(): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + choose_conv_method(a, b) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +def test_choose_conv_method_2(): + for mode in ['valid', 'same', 'full']: + x = [Decimal(3), Decimal(2)] + h = [Decimal(1), Decimal(4)] + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + n = 10 + for not_fft_conv_supp in ["complex256", "complex192"]: + if hasattr(np, not_fft_conv_supp): + x = np.ones(n, dtype=not_fft_conv_supp) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +@pytest.mark.fail_slow(10) +def test_filtfilt_gust(): + # Design a filter. + z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') + + # Find the approximate impulse response length of the filter. + eps = 1e-10 + r = np.max(np.abs(p)) + approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + + np.random.seed(123) + + b, a = zpk2tf(z, p, k) + for irlen in [None, approx_impulse_len]: + signal_len = 5 * approx_impulse_len + + # 1-d test case + check_filtfilt_gust(b, a, (signal_len,), 0, irlen) + + # 3-d test case; test each axis. + for axis in range(3): + shape = [2, 2, 2] + shape[axis] = signal_len + check_filtfilt_gust(b, a, shape, axis, irlen) + + # Test case with length less than 2*approx_impulse_len. + # In this case, `filtfilt_gust` should behave the same as if + # `irlen=None` was given. + length = 2*approx_impulse_len - 50 + check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) + + +class TestDecimate: + def test_bad_args(self): + x = np.arange(12) + assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) + assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) + + def test_basic_IIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_basic_FIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_shape(self): + # Regression test for ticket #1480. + z = np.zeros((30, 30)) + d0 = signal.decimate(z, 2, axis=0, zero_phase=False) + assert_equal(d0.shape, (15, 30)) + d1 = signal.decimate(z, 2, axis=1, zero_phase=False) + assert_equal(d1.shape, (30, 15)) + + def test_phaseshift_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=False) + + def test_zero_phase_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=True) + + def test_phaseshift_IIR(self): + self._test_phaseshift(method='iir', zero_phase=False) + + def test_zero_phase_IIR(self): + self._test_phaseshift(method='iir', zero_phase=True) + + def _test_phaseshift(self, method, zero_phase): + rate = 120 + rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 + + t_tot = 100 # Need to let antialiasing filters settle + t = np.arange(rate*t_tot+1) / float(rate) + + # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts + freqs = np.array(rates_to) * 0.8 / 2 + d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) + * signal.windows.tukey(t.size, 0.1)) + + for rate_to in rates_to: + q = rate // rate_to + t_to = np.arange(rate_to*t_tot+1) / float(rate_to) + d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) + * signal.windows.tukey(t_to.size, 0.1)) + + # Set up downsampling filters, match v0.17 defaults + if method == 'fir': + n = 30 + system = signal.dlti(signal.firwin(n + 1, 1. / q, + window='hamming'), 1.) + elif method == 'iir': + n = 8 + wc = 0.8*np.pi/q + system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) + + # Calculate expected phase response, as unit complex vector + if zero_phase is False: + _, h_resps = signal.freqz(system.num, system.den, + freqs/rate*2*np.pi) + h_resps /= np.abs(h_resps) + else: + h_resps = np.ones_like(freqs) + + y_resamps = signal.decimate(d.real, q, n, ftype=system, + zero_phase=zero_phase) + + # Get phase from complex inner product, like CSD + h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) + h_resamps /= np.abs(h_resamps) + subnyq = freqs < 0.5*rate_to + + # Complex vectors should be aligned, only compare below nyquist + assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, + atol=1e-3, rtol=1e-3) + + def test_auto_n(self): + # Test that our value of n is a reasonable choice (depends on + # the downsampling factor) + sfreq = 100. + n = 1000 + t = np.arange(n) / sfreq + # will alias for decimations (>= 15) + x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) + assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) + x_out = signal.decimate(x, 30, ftype='fir') + assert_array_less(np.linalg.norm(x_out), 0.01) + + def test_long_float32(self): + # regression: gh-15072. With 32-bit float and either lfilter + # or filtfilt, this is numerically unstable + x = signal.decimate(np.ones(10_000, dtype=np.float32), 10) + assert not any(np.isnan(x)) + + def test_float16_upcast(self): + # float16 must be upcast to float64 + x = signal.decimate(np.ones(100, dtype=np.float16), 10) + assert x.dtype.type == np.float64 + + def test_complex_iir_dlti(self): + # regression: gh-17845 + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + + z, p, k = signal.butter(2, 2*np.pi*fwidth/2, output='zpk', fs=fs) + z = z.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + p = p.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + system = signal.dlti(z, p, k) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.lfilter(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.filtfilt(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_allclose(yzp, yzpref, rtol=1e-10, atol=1e-13) + + def test_complex_fir_dlti(self): + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + numtaps = 20 + + # FIR filter about 0Hz + bbase = signal.firwin(numtaps, fwidth/2, fs=fs) + + # rotate these to desired frequency + zbase = np.roots(bbase) + zrot = zbase * np.exp(2j * np.pi * fcentre/fs) + # FIR filter about 50Hz, maintaining passband gain of 0dB + bz = bbase[0] * np.poly(zrot) + + system = signal.dlti(bz, 1) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.upfirdn(bz, u, up=1, down=2)[:100] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.resample_poly(u, 1, 2, window=bz) + + assert_equal(yzp, yzpref) + + +class TestHilbert: + + def test_bad_args(self): + x = np.array([1.0 + 0.0j]) + assert_raises(ValueError, hilbert, x) + x = np.arange(8.0) + assert_raises(ValueError, hilbert, x, N=0) + + def test_hilbert_theoretical(self): + # test cases by Ariel Rokem + decimal = 14 + + pi = np.pi + t = np.arange(0, 2 * pi, pi / 256) + a0 = np.sin(t) + a1 = np.cos(t) + a2 = np.sin(2 * t) + a3 = np.cos(2 * t) + a = np.vstack([a0, a1, a2, a3]) + + h = hilbert(a) + h_abs = np.abs(h) + h_angle = np.angle(h) + h_real = np.real(h) + + # The real part should be equal to the original signals: + assert_almost_equal(h_real, a, decimal) + # The absolute value should be one everywhere, for this input: + assert_almost_equal(h_abs, np.ones(a.shape), decimal) + # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in + # the first 256 bins: + assert_almost_equal(h_angle[0, :256], + np.arange(-pi / 2, pi / 2, pi / 256), + decimal) + # For the 'slow' cosine - the phase should go from 0 to pi in the + # same interval: + assert_almost_equal( + h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) + # The 'fast' sine should make this phase transition in half the time: + assert_almost_equal(h_angle[2, :128], + np.arange(-pi / 2, pi / 2, pi / 128), + decimal) + # Ditto for the 'fast' cosine: + assert_almost_equal( + h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) + + # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia + assert_almost_equal(h[1].imag, a0, decimal) + + def test_hilbert_axisN(self): + # tests for axis and N arguments + a = np.arange(18).reshape(3, 6) + # test axis + aa = hilbert(a, axis=-1) + assert_equal(hilbert(a.T, axis=0), aa.T) + # test 1d + assert_almost_equal(hilbert(a[0]), aa[0], 14) + + # test N + aan = hilbert(a, N=20, axis=-1) + assert_equal(aan.shape, [3, 20]) + assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) + # the next test is just a regression test, + # no idea whether numbers make sense + a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, + 1.000000000000000e+00 - 2.047794505137069j, + 1.999999999999999e+00 - 2.244055555687583j, + 3.000000000000000e+00 - 1.262750302935009j, + 4.000000000000000e+00 - 1.066489252384493j, + 5.000000000000000e+00 + 2.918022706971047j, + 8.881784197001253e-17 + 3.845658908989067j, + -9.444121133484362e-17 + 0.985044202202061j, + -1.776356839400251e-16 + 1.332257797702019j, + -3.996802888650564e-16 + 0.501905089898885j, + 1.332267629550188e-16 + 0.668696078880782j, + -1.192678053963799e-16 + 0.235487067862679j, + -1.776356839400251e-16 + 0.286439612812121j, + 3.108624468950438e-16 + 0.031676888064907j, + 1.332267629550188e-16 - 0.019275656884536j, + -2.360035624836702e-16 - 0.1652588660287j, + 0.000000000000000e+00 - 0.332049855010597j, + 3.552713678800501e-16 - 0.403810179797771j, + 8.881784197001253e-17 - 0.751023775297729j, + 9.444121133484362e-17 - 0.79252210110103j]) + assert_almost_equal(aan[0], a0hilb, 14, 'N regression') + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert_types(self, dtype): + in_typed = np.zeros(8, dtype=dtype) + assert_equal(np.real(signal.hilbert(in_typed)).dtype, dtype) + + +class TestHilbert2: + + def test_bad_args(self): + # x must be real. + x = np.array([[1.0 + 0.0j]]) + assert_raises(ValueError, hilbert2, x) + + # x must be rank 2. + x = np.arange(24).reshape(2, 3, 4) + assert_raises(ValueError, hilbert2, x) + + # Bad value for N. + x = np.arange(16).reshape(4, 4) + assert_raises(ValueError, hilbert2, x, N=0) + assert_raises(ValueError, hilbert2, x, N=(2, 0)) + assert_raises(ValueError, hilbert2, x, N=(2,)) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert2_types(self, dtype): + in_typed = np.zeros((2, 32), dtype=dtype) + assert_equal(np.real(signal.hilbert2(in_typed)).dtype, dtype) + + +class TestEnvelope: + """Unit tests for function `._signaltools.envelope()`. """ + + @staticmethod + def assert_close(actual, desired, msg): + """Little helper to compare to arrays with proper tolerances""" + xp_assert_close(actual, desired, atol=1e-12, rtol=1e-12, err_msg=msg) + + def test_envelope_invalid_parameters(self): + """For `envelope()` Raise all exceptions that are used to verify function + parameters. """ + with pytest.raises(ValueError, + match=r"Invalid parameter axis=2 for z.shape=.*"): + envelope(np.ones(3), axis=2) + with pytest.raises(ValueError, + match=r"z.shape\[axis\] not > 0 for z.shape=.*"): + envelope(np.ones((3, 0)), axis=1) + for bp_in in [(0, 1, 2), (0, 2.), (None, 2.)]: + ts = ', '.join(map(str, bp_in)) + with pytest.raises(ValueError, + match=rf"bp_in=\({ts}\) isn't a 2-tuple of.*"): + # noinspection PyTypeChecker + envelope(np.ones(4), bp_in=bp_in) + with pytest.raises(ValueError, + match="n_out=10.0 is not a positive integer or.*"): + # noinspection PyTypeChecker + envelope(np.ones(4), n_out=10.) + for bp_in in [(-1, 3), (1, 1), (0, 10)]: + with pytest.raises(ValueError, + match=r"`-n//2 <= bp_in\[0\] < bp_in\[1\] <=.*"): + envelope(np.ones(4), bp_in=bp_in) + with pytest.raises(ValueError, match="residual='undefined' not in .*"): + # noinspection PyTypeChecker + envelope(np.ones(4), residual='undefined') + + def test_envelope_verify_parameters(self): + """Ensure that the various parametrizations produce compatible results. """ + Z, Zr_a = [4, 2, 2, 3, 0], [4, 0, 0, 6, 0, 0, 0, 0] + z = sp_fft.irfft(Z) + n = len(z) + + # the reference envelope: + ze2_0, zr_0 = envelope(z, (1, 3), residual='all', squared=True) + self.assert_close(sp_fft.rfft(ze2_0), np.array([4, 2, 0, 0, 0]).astype(complex), + msg="Envelope calculation error") + self.assert_close(sp_fft.rfft(zr_0), np.array([4, 0, 0, 3, 0]).astype(complex), + msg="Residual calculation error") + + ze_1, zr_1 = envelope(z, (1, 3), residual='all', squared=False) + self.assert_close(ze_1**2, ze2_0, + msg="Unsquared versus Squared envelope calculation error") + self.assert_close(zr_1, zr_0, + msg="Unsquared versus Squared residual calculation error") + + ze2_2, zr_2 = envelope(z, (1, 3), residual='all', squared=True, n_out=3*n) + self.assert_close(ze2_2[::3], ze2_0, + msg="3x up-sampled envelope calculation error") + self.assert_close(zr_2[::3], zr_0, + msg="3x up-sampled residual calculation error") + + ze2_3, zr_3 = envelope(z, (1, 3), residual='lowpass', squared=True) + self.assert_close(ze2_3, ze2_0, + msg="`residual='lowpass'` envelope calculation error") + self.assert_close(sp_fft.rfft(zr_3), np.array([4, 0, 0, 0, 0]).astype(complex), + msg="`residual='lowpass'` residual calculation error") + + ze2_4 = envelope(z, (1, 3), residual=None, squared=True) + self.assert_close(ze2_4, ze2_0, + msg="`residual=None` envelope calculation error") + + # compare complex analytic signal to real version + Z_a = np.copy(Z) + Z_a[1:] *= 2 + z_a = sp_fft.ifft(Z_a, n=n) # analytic signal of Z + self.assert_close(z_a.real, z, + msg="Reference analytic signal error") + ze2_a, zr_a = envelope(z_a, (1, 3), residual='all', squared=True) + self.assert_close(ze2_a, ze2_0.astype(complex), # dtypes must match + msg="Complex envelope calculation error") + self.assert_close(sp_fft.fft(zr_a), np.array(Zr_a).astype(complex), + msg="Complex residual calculation error") + + @pytest.mark.parametrize( + " Z, bp_in, Ze2_desired, Zr_desired", + [([1, 0, 2, 2, 0], (1, None), [4, 2, 0, 0, 0], [1, 0, 0, 0, 0]), + ([4, 0, 2, 0, 0], (0, None), [4, 0, 2, 0, 0], [0, 0, 0, 0, 0]), + ([4, 0, 0, 2, 0], (None, None), [4, 0, 0, 2, 0], [0, 0, 0, 0, 0]), + ([0, 0, 2, 2, 0], (1, 3), [2, 0, 0, 0, 0], [0, 0, 0, 2, 0]), + ([4, 0, 2, 2, 0], (-3, 3), [4, 0, 2, 0, 0], [0, 0, 0, 2, 0]), + ([4, 0, 3, 4, 0], (None, 1), [2, 0, 0, 0, 0], [0, 0, 3, 4, 0]), + ([4, 0, 3, 4, 0], (None, 0), [0, 0, 0, 0, 0], [4, 0, 3, 4, 0])]) + def test_envelope_real_signals(self, Z, bp_in, Ze2_desired, Zr_desired): + """Test envelope calculation with real-valued test signals. + + The comparisons are performed in the Fourier space, since it makes evaluating + the bandpass filter behavior straightforward. Note that also the squared + envelope can be easily calculated by hand, if one recalls that coefficients of + a complex-valued Fourier series representing the signal can be directly + determined by an FFT and that the absolute square of a Fourier series is again + a Fourier series. + """ + z = sp_fft.irfft(Z) + ze2, zr = envelope(z, bp_in, residual='all', squared=True) + ze2_lp, zr_lp = envelope(z, bp_in, residual='lowpass', squared=True) + Ze2, Zr, Ze2_lp, Zr_lp = (sp_fft.rfft(z_) for z_ in (ze2, zr, ze2_lp, zr_lp)) + + Ze2_desired = np.array(Ze2_desired).astype(complex) + Zr_desired = np.array(Zr_desired).astype(complex) + self.assert_close(Ze2, Ze2_desired, + msg="Envelope calculation error (residual='all')") + self.assert_close(Zr, Zr_desired, + msg="Residual calculation error (residual='all')") + + if bp_in[1] is not None: + Zr_desired[bp_in[1]:] = 0 + self.assert_close(Ze2_lp, Ze2_desired, + msg="Envelope calculation error (residual='lowpass')") + self.assert_close(Zr_lp, Zr_desired, + msg="Residual calculation error (residual='lowpass')") + + @pytest.mark.parametrize( + " Z, bp_in, Ze2_desired, Zr_desired", + [([0, 5, 0, 5, 0], (None, None), [5, 0, 10, 0, 5], [0, 0, 0, 0, 0]), + ([1, 5, 0, 5, 2], (-1, 2), [5, 0, 10, 0, 5], [1, 0, 0, 0, 2]), + ([1, 2, 6, 0, 6, 3], (-1, 2), [0, 6, 0, 12, 0, 6], [1, 2, 0, 0, 0, 3]) + ]) + def test_envelope_complex_signals(self, Z, bp_in, Ze2_desired, Zr_desired): + """Test envelope calculation with complex-valued test signals. + + We only need to test for the complex envelope here, since the ``Nones``s in the + bandpass filter were already tested in the previous test. + """ + z = sp_fft.ifft(sp_fft.ifftshift(Z)) + ze2, zr = envelope(z, bp_in, residual='all', squared=True) + Ze2, Zr = (sp_fft.fftshift(sp_fft.fft(z_)) for z_ in (ze2, zr)) + + self.assert_close(Ze2, np.array(Ze2_desired).astype(complex), + msg="Envelope calculation error") + self.assert_close(Zr, np.array(Zr_desired).astype(complex), + msg="Residual calculation error") + + def test_envelope_verify_axis_parameter(self): + """Test for multi-channel envelope calculations. """ + z = sp_fft.irfft([[1, 0, 2, 2, 0], [7, 0, 4, 4, 0]]) + Ze2_desired = np.array([[4, 2, 0, 0, 0], [16, 8, 0, 0, 0]], + dtype=complex) + Zr_desired = np.array([[1, 0, 0, 0, 0], [7, 0, 0, 0, 0]], dtype=complex) + + ze2, zr = envelope(z, squared=True, axis=1) + ye2T, yrT = envelope(z.T, squared=True, axis=0) + Ze2, Ye2, Zr, Yr = (sp_fft.rfft(z_) for z_ in (ze2, ye2T.T, zr, yrT.T)) + + self.assert_close(Ze2, Ze2_desired, msg="2d envelope calculation error") + self.assert_close(Zr, Zr_desired, msg="2d residual calculation error") + self.assert_close(Ye2, Ze2_desired, msg="Transposed 2d envelope calc. error") + self.assert_close(Yr, Zr_desired, msg="Transposed 2d residual calc. error") + + def test_envelope_verify_axis_parameter_complex(self): + """Test for multi-channel envelope calculations with complex values. """ + z = sp_fft.ifft(sp_fft.ifftshift([[1, 5, 0, 5, 2], [1, 10, 0, 10, 2]], axes=1)) + Ze2_des = np.array([[5, 0, 10, 0, 5], [20, 0, 40, 0, 20],], + dtype=complex) + Zr_des = np.array([[1, 0, 0, 0, 2], [1, 0, 0, 0, 2]], dtype=complex) + + kw = dict(bp_in=(-1, 2), residual='all', squared=True) + ze2, zr = envelope(z, axis=1, **kw) + ye2T, yrT = envelope(z.T, axis=0, **kw) + Ze2, Ye2, Zr, Yr = (sp_fft.fftshift(sp_fft.fft(z_), axes=1) + for z_ in (ze2, ye2T.T, zr, yrT.T)) + + self.assert_close(Ze2, Ze2_des, msg="2d envelope calculation error") + self.assert_close(Zr, Zr_des, msg="2d residual calculation error") + self.assert_close(Ye2, Ze2_des, msg="Transposed 2d envelope calc. error") + self.assert_close(Yr, Zr_des, msg="Transposed 2d residual calc. error") + + @pytest.mark.parametrize('X', [[4, 0, 0, 1, 2], [4, 0, 0, 2, 1, 2]]) + def test_compare_envelope_hilbert(self, X): + """Compare output of `envelope()` and `hilbert()`. """ + x = sp_fft.irfft(X) + e_hil = np.abs(hilbert(x)) + e_env = envelope(x, (None, None), residual=None) + self.assert_close(e_hil, e_env, msg="Hilbert-Envelope comparison error") + + +class TestPartialFractionExpansion: + @staticmethod + def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7): + r_true = np.asarray(r_true) + p_true = np.asarray(p_true) + + distance = np.hypot(abs(p[:, None] - p_true), + abs(r[:, None] - r_true)) + + rows, cols = linear_sum_assignment(distance) + assert_almost_equal(p[rows], p_true[cols], decimal=decimal) + assert_almost_equal(r[rows], r_true[cols], decimal=decimal) + + def test_compute_factors(self): + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1]) + assert_equal(len(factors), 3) + assert_almost_equal(factors[0], np.poly([2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1], + include_powers=True) + assert_equal(len(factors), 6) + assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 2, 2, 3])) + assert_almost_equal(factors[2], np.poly([2, 2, 3])) + assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3])) + assert_almost_equal(factors[4], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + def test_group_poles(self): + unique, multiplicity = _group_poles( + [1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min') + assert_equal(unique, [1.0, 2.0, 3.0]) + assert_equal(multiplicity, [3, 2, 1]) + + def test_residue_general(self): + # Test are taken from issue #4464, note that poles in scipy are + # in increasing by absolute value order, opposite to MATLAB. + r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4) + assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4) + assert_almost_equal(k, [-1.2500], decimal=4) + + r, p, k = residue([-4, 8], [1, 6, 8]) + assert_almost_equal(r, [8, -12]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 1], [1, -1, -2]) + assert_almost_equal(r, [1, 3]) + assert_almost_equal(p, [-1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406]) + self.assert_rp_almost_equal( + r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25], + [0.5 - 0.2j, 0.5 + 0.2j, 0.7]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 1], [1, 5, 8, 4]) + self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348], + [1, -0.7, -0.14, 0.048]) + assert_almost_equal(r, [-3, 4, 1]) + assert_almost_equal(p, [0.2, -0.3, 0.8]) + assert_almost_equal(k, [3, 1]) + + r, p, k = residue([1], [1, 2, -3]) + assert_almost_equal(r, [0.25, -0.25]) + assert_almost_equal(p, [1, -3]) + assert_equal(k.size, 0) + + r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, 8, 6], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1], [1, -3, 2]) + assert_almost_equal(r, [-2, 5]) + assert_almost_equal(p, [1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-4, 13]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [2]) + + r, p, k = residue([7, 2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-11, 69]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [7, 23]) + + r, p, k = residue([2, 3, -1], [1, -3, 4, -2]) + self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j], + [1, 1 - 1j, 1 + 1j]) + assert_almost_equal(k.size, 0) + + def test_residue_leading_zeros(self): + # Leading zeros in numerator or denominator must not affect the answer. + r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3]) + r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3]) + r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_resiude_degenerate(self): + # Several tests for zero numerator and denominator. + r, p, k = residue([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residue(1, 0) + + def test_residuez_general(self): + r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j]) + self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j], + [1j, 1, 1]) + assert_almost_equal(k, [2j]) + + r, p, k = residuez([1, 2, 1], [1, -1, 0.3561]) + self.assert_rp_almost_equal(r, p, + [-0.9041 - 5.9928j, -0.9041 + 5.9928j], + [0.5 + 0.3257j, 0.5 - 0.3257j], + decimal=4) + assert_almost_equal(k, [2.8082], decimal=4) + + r, p, k = residuez([1, -1], [1, -5, 6]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [2, 3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3, 4], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -10, -4, 4], [2, -2, -4]) + assert_almost_equal(r, [0.5, -1.5]) + assert_almost_equal(p, [-1, 2]) + assert_almost_equal(k, [1.5, -1]) + + r, p, k = residuez([18], [18, 3, -4, -1]) + self.assert_rp_almost_equal(r, p, + [0.36, 0.24, 0.4], [0.5, -1/3, -1/3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4])) + assert_almost_equal(r, [-10/3, 16/3]) + assert_almost_equal(p, [-0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -2, 1], [1, -1]) + assert_almost_equal(r, [0]) + assert_almost_equal(p, [1]) + assert_almost_equal(k, [1, -1]) + + r, p, k = residuez(1, [1, -1j]) + assert_almost_equal(r, [1]) + assert_almost_equal(p, [1j]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -1, 0.25]) + assert_almost_equal(r, [0, 1]) + assert_almost_equal(p, [0.5, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -0.75, .125]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-10, 9]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [2]) + + r, p, k = residuez([6, 2], [1, -2, 1]) + assert_almost_equal(r, [-2, 8]) + assert_almost_equal(p, [1, 1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-24, 15]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [10, 2]) + + r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [0.2618 + 0.1902j, 0.2618 - 0.1902j, + 0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j], + [-0.8090 + 0.5878j, -0.8090 - 0.5878j, + 1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j], + decimal=4) + assert_equal(k.size, 0) + + def test_residuez_trailing_zeros(self): + # Trailing zeros in numerator or denominator must not affect the + # answer. + r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3]) + r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0]) + r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_residuez_degenerate(self): + r, p, k = residuez([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residuez(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residuez(1, 0) + + with pytest.raises(ValueError, + match="First coefficient of determinant `a` must " + "be non-zero."): + residuez(1, [0, 1, 2, 3]) + + def test_inverse_unique_roots_different_rtypes(self): + # This test was inspired by GitHub issue 2496. + r = [3 / 10, -1 / 6, -2 / 15] + p = [0, -2, -5] + k = [] + b_expected = [0, 1, 3] + a_expected = [1, 7, 10, 0] + + # With the default tolerance, the rtype does not matter + # for this example. + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + def test_inverse_repeated_roots_different_rtypes(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + b_expected = [0, 0, 1, 3] + b_expected_z = [-1/6, -2/3, 11/6, 3] + a_expected = [1, 9, 24, 20, 0] + + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected, atol=1e-14) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected_z, atol=1e-14) + assert_allclose(a, a_expected) + + def test_inverse_bad_rtype(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + with pytest.raises(ValueError, match="`rtype` must be one of"): + invres(r, p, k, rtype='median') + with pytest.raises(ValueError, match="`rtype` must be one of"): + invresz(r, p, k, rtype='median') + + def test_invresz_one_coefficient_bug(self): + # Regression test for issue in gh-4646. + r = [1] + p = [2] + k = [0] + b, a = invresz(r, p, k) + assert_allclose(b, [1.0]) + assert_allclose(a, [1.0, -2.0]) + + def test_invres(self): + b, a = invres([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j, + -292 + 219j, 192 - 268j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invres([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1, 0, -4, 3 + 1j]) + assert_almost_equal(a, [1, -2, 1]) + + def test_invresz(self): + b, a = invresz([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j, + -354 + 228j, 234 - 297j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invresz([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1j, 1, -3, 2]) + assert_almost_equal(a, [1, -2, 1]) + + def test_inverse_scalar_arguments(self): + b, a = invres(1, 1, 1) + assert_almost_equal(b, [1, 0]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz(1, 1, 1) + assert_almost_equal(b, [2, -1]) + assert_almost_equal(a, [1, -1]) + + +class TestVectorstrength: + + def test_single_1dperiod(self): + events = np.array([.5]) + period = 5. + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_single_2dperiod(self): + events = np.array([.5]) + period = [1, 2, 5.] + targ_strength = [1.] * 3 + targ_phase = np.array([.5, .25, .1]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_array_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_1dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = 2 + targ_strength = 1. + targ_phase = .125 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_2dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = [1, 2, ] + targ_strength = [1.] * 2 + targ_phase = np.array([.25, .125]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_1dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = 1 + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_2dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = [1, .5] + targ_strength = [1.] * 2 + targ_phase = np.array([.1, .2]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_1dperiod(self): + events = np.array([.25, .5, .75]) + period = 1 + targ_strength = 1. / 3. + targ_phase = .5 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_2dperiod(self): + events = np.array([.25, .5, .75]) + period = [1., 1., 1., 1.] + targ_strength = [1. / 3.] * 4 + targ_phase = np.array([.5, .5, .5, .5]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_opposite_1dperiod(self): + events = np.array([0, .25, .5, .75]) + period = 1. + targ_strength = 0 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + + def test_opposite_2dperiod(self): + events = np.array([0, .25, .5, .75]) + period = [1.] * 10 + targ_strength = [0.] * 10 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + + def test_2d_events_ValueError(self): + events = np.array([[1, 2]]) + period = 1. + assert_raises(ValueError, vectorstrength, events, period) + + def test_2d_period_ValueError(self): + events = 1. + period = np.array([[1]]) + assert_raises(ValueError, vectorstrength, events, period) + + def test_zero_period_ValueError(self): + events = 1. + period = 0 + assert_raises(ValueError, vectorstrength, events, period) + + def test_negative_period_ValueError(self): + events = 1. + period = -1 + assert_raises(ValueError, vectorstrength, events, period) + + +def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0): + """Wrap assert_allclose while casting object arrays.""" + if actual.dtype.kind == 'O': + dtype = np.array(actual.flat[0]).dtype + actual, desired = actual.astype(dtype), desired.astype(dtype) + assert_allclose(actual, desired, rtol, atol) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +@pytest.mark.parametrize('func', (sosfilt, lfilter)) +def test_nonnumeric_dtypes(func): + x = [Decimal(1), Decimal(2), Decimal(3)] + b = [Decimal(1), Decimal(2), Decimal(3)] + a = [Decimal(1), Decimal(2), Decimal(3)] + x = np.array(x) + assert x.dtype.kind == 'O' + desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float)) + if func is sosfilt: + actual = sosfilt([b + a], x) + else: + actual = lfilter(b, a, x) + assert all(isinstance(x, Decimal) for x in actual) + assert_allclose(actual.astype(float), desired.astype(float)) + # Degenerate cases + if func is lfilter: + args = [1., 1.] + else: + args = [tf2sos(1., 1.)] + + with pytest.raises(ValueError, match='must be at least 1-D'): + func(*args, x=1.) + + +@pytest.mark.parametrize('dt', 'fdFD') +class TestSOSFilt: + + # The test_rank* tests are pulled from _TestLinearFilter + def test_rank1(self, dt): + x = np.linspace(0, 5, 6).astype(dt) + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, -0.5]).astype(dt) + + # Test simple IIR + y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt) + sos = tf2sos(b, a) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + # Test simple FIR + b = np.array([1, 1]).astype(dt) + # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: + a = np.array([1, 0]).astype(dt) + y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + b = [1, 1, 0] + a = [1, 0, 0] + x = np.ones(8) + sos = np.concatenate((b, a)) + sos.shape = (1, 6) + y = sosfilt(sos, x) + assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) + + def test_rank2(self, dt): + shape = (4, 3) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + x = x.astype(dt) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], + dtype=dt) + + y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]], dtype=dt) + + y = sosfilt(tf2sos(b, a), x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + y = sosfilt(tf2sos(b, a), x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank3(self, dt): + shape = (4, 3, 2) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + # Test last axis + y = sosfilt(tf2sos(b, a), x) + for i in range(x.shape[0]): + for j in range(x.shape[1]): + assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) + + def test_initial_conditions(self, dt): + b1, a1 = signal.butter(2, 0.25, 'low') + b2, a2 = signal.butter(2, 0.75, 'low') + b3, a3 = signal.butter(2, 0.75, 'low') + b = np.convolve(np.convolve(b1, b2), b3) + a = np.convolve(np.convolve(a1, a2), a3) + sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) + + x = np.random.rand(50).astype(dt) + + # Stopping filtering and continuing + y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) + y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, lfilter(b, a, x)) + + y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) + y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, y_sos) + + # Use a step function + zi = sosfilt_zi(sos) + x = np.ones(8, dt) + y, zf = sosfilt(sos, x, zi=zi) + + assert_allclose_cast(y, np.ones(8)) + assert_allclose_cast(zf, zi) + + # Initial condition shape matching + x.shape = (1, 1) + x.shape # 3D + assert_raises(ValueError, sosfilt, sos, x, zi=zi) + zi_nd = zi.copy() + zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) + assert_raises(ValueError, sosfilt, sos, x, + zi=zi_nd[:, :, :, [0, 1, 1]]) + y, zf = sosfilt(sos, x, zi=zi_nd) + assert_allclose_cast(y[0, 0], np.ones(8)) + assert_allclose_cast(zf[:, 0, 0, :], zi) + + def test_initial_conditions_3d_axis1(self, dt): + # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. + + # Input array is x. + x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) + x = x.astype(dt) + + # Design a filter in ZPK format and convert to SOS + zpk = signal.butter(6, 0.35, output='zpk') + sos = zpk2sos(*zpk) + nsections = sos.shape[0] + + # Filter along this axis. + axis = 1 + + # Initial conditions, all zeros. + shp = list(x.shape) + shp[axis] = 2 + shp = [nsections] + shp + z0 = np.zeros(shp) + + # Apply the filter to x. + yf, zf = sosfilt(sos, x, axis=axis, zi=z0) + + # Apply the filter to x in two stages. + y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) + y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) + + # y should equal yf, and z2 should equal zf. + y = np.concatenate((y1, y2), axis=axis) + assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13) + assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13) + + # let's try the "step" initial condition + zi = sosfilt_zi(sos) + zi.shape = [nsections, 1, 2, 1] + zi = zi * x[:, 0:1, :] + y = sosfilt(sos, x, axis=axis, zi=zi)[0] + # check it against the TF form + b, a = zpk2tf(*zpk) + zi = lfilter_zi(b, a) + zi.shape = [1, zi.size, 1] + zi = zi * x[:, 0:1, :] + y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] + assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13) + + def test_bad_zi_shape(self, dt): + # The shape of zi is checked before using any values in the + # arguments, so np.empty is fine for creating the arguments. + x = np.empty((3, 15, 3), dt) + sos = np.zeros((4, 6)) + zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) + with pytest.raises(ValueError, match='should be all ones'): + sosfilt(sos, x, zi=zi, axis=1) + sos[:, 3] = 1. + with pytest.raises(ValueError, match='Invalid zi shape'): + sosfilt(sos, x, zi=zi, axis=1) + + def test_sosfilt_zi(self, dt): + sos = signal.butter(6, 0.2, output='sos') + zi = sosfilt_zi(sos) + + y, zf = sosfilt(sos, np.ones(40, dt), zi=zi) + assert_allclose_cast(zf, zi, rtol=1e-13) + + # Expected steady state value of the step response of this filter: + ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) + assert_allclose_cast(y, ss, rtol=1e-13) + + # zi as array-like + _, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist()) + assert_allclose_cast(zf, zi, rtol=1e-13) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self, dt): + # gh-21211 + sos = np.asarray([1, 2, 3, 1, 5, 3], dtype=object).reshape(1, 6) + x = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + sosfilt(sos, x) + + +class TestDeconvolve: + + def test_basic(self): + # From docstring example + original = [0, 1, 0, 0, 1, 1, 0, 0] + impulse_response = [2, 1] + recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] + recovered, remainder = signal.deconvolve(recorded, impulse_response) + assert_allclose(recovered, original) + + def test_n_dimensional_signal(self): + recorded = [[0, 0], [0, 0]] + impulse_response = [0, 0] + with pytest.raises(ValueError, match="signal must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + def test_n_dimensional_divisor(self): + recorded = [0, 0] + impulse_response = [[0, 0], [0, 0]] + with pytest.raises(ValueError, match="divisor must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + +class TestDetrend: + + def test_basic(self): + detrended = detrend(array([1, 2, 3])) + detrended_exact = array([0, 0, 0]) + assert_array_almost_equal(detrended, detrended_exact) + + def test_copy(self): + x = array([1, 1.2, 1.5, 1.6, 2.4]) + copy_array = detrend(x, overwrite_data=False) + inplace = detrend(x, overwrite_data=True) + assert_array_almost_equal(copy_array, inplace) + + @pytest.mark.parametrize('kind', ['linear', 'constant']) + @pytest.mark.parametrize('axis', [0, 1, 2]) + def test_axis(self, axis, kind): + data = np.arange(5*6*7).reshape(5, 6, 7) + detrended = detrend(data, type=kind, axis=axis) + assert detrended.shape == data.shape + + def test_bp(self): + data = [0, 1, 2] + [5, 0, -5, -10] + detrended = detrend(data, type='linear', bp=3) + assert_allclose(detrended, 0, atol=1e-14) + + # repeat with ndim > 1 and axis + data = np.asarray(data)[None, :, None] + + detrended = detrend(data, type="linear", bp=3, axis=1) + assert_allclose(detrended, 0, atol=1e-14) + + # breakpoint index > shape[axis]: raises + with assert_raises(ValueError): + detrend(data, type="linear", bp=3) + + @pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]]) + def test_detrend_array_bp(self, bp): + # regression test for https://github.com/scipy/scipy/issues/18675 + rng = np.random.RandomState(12345) + x = rng.rand(10) + # bp = np.array([0, 2]) + + res = detrend(x, bp=bp) + res_scipy_191 = np.array([-4.44089210e-16, -2.22044605e-16, + -1.11128506e-01, -1.69470553e-01, 1.14710683e-01, 6.35468419e-02, + 3.53533144e-01, -3.67877935e-02, -2.00417675e-02, -1.94362049e-01]) + + assert_allclose(res, res_scipy_191, atol=1e-14) + + +class TestUniqueRoots: + def test_real_no_repeat(self): + p = [-1.0, -0.5, 0.3, 1.2, 10.0] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_real_repeat(self): + p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_complex_no_repeat(self): + p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_complex_repeat(self): + p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0, + 0.5 + 0.5j, 0.45 + 0.55j] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, + [-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal( + unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_gh_4915(self): + p = np.roots(np.convolve(np.ones(5), np.ones(5))) + true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)] + + unique, multiplicity = unique_roots(p) + unique = np.sort(unique) + + assert_almost_equal(np.sort(unique), true_roots, decimal=7) + assert_equal(multiplicity, [2, 2, 2, 2]) + + def test_complex_roots_extra(self): + unique, multiplicity = unique_roots([1.0, 1.0j, 1.0]) + assert_almost_equal(unique, [1.0, 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1) + assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + def test_single_unique_root(self): + p = np.random.rand(100) + 1j * np.random.rand(100) + unique, multiplicity = unique_roots(p, 2) + assert_almost_equal(unique, [np.min(p)], decimal=15) + assert_equal(multiplicity, [100]) + + +def test_gh_22684(): + actual = signal.resample_poly(np.arange(2000, dtype=np.complex64), 6, 4) + assert actual.dtype == np.complex64 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_upfirdn.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_upfirdn.py new file mode 100644 index 0000000000000000000000000000000000000000..0aaec38f06ba72a8bcd984d91b596e98eb3bbc54 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_upfirdn.py @@ -0,0 +1,288 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np +from itertools import product + +from scipy._lib._array_api import xp_assert_close +from pytest import raises as assert_raises +import pytest + +from scipy.signal import upfirdn, firwin +from scipy.signal._upfirdn import _output_len, _upfirdn_modes +from scipy.signal._upfirdn_apply import _pad_test + + +def upfirdn_naive(x, h, up=1, down=1): + """Naive upfirdn processing in Python. + + Note: arg order (x, h) differs to facilitate apply_along_axis use. + """ + h = np.asarray(h) + out = np.zeros(len(x) * up, x.dtype) + out[::up] = x + out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)] + return out + + +class UpFIRDnCase: + """Test _UpFIRDn object""" + def __init__(self, up, down, h, x_dtype): + self.up = up + self.down = down + self.h = np.atleast_1d(h) + self.x_dtype = x_dtype + self.rng = np.random.RandomState(17) + + def __call__(self): + # tiny signal + self.scrub(np.ones(1, self.x_dtype)) + # ones + self.scrub(np.ones(10, self.x_dtype)) # ones + # randn + x = self.rng.randn(10).astype(self.x_dtype) + if self.x_dtype in (np.complex64, np.complex128): + x += 1j * self.rng.randn(10) + self.scrub(x) + # ramp + self.scrub(np.arange(10).astype(self.x_dtype)) + # 3D, random + size = (2, 3, 5) + x = self.rng.randn(*size).astype(self.x_dtype) + if self.x_dtype in (np.complex64, np.complex128): + x += 1j * self.rng.randn(*size) + for axis in range(len(size)): + self.scrub(x, axis=axis) + x = x[:, ::2, 1::3].T + for axis in range(len(size)): + self.scrub(x, axis=axis) + + def scrub(self, x, axis=-1): + yr = np.apply_along_axis(upfirdn_naive, axis, x, + self.h, self.up, self.down) + want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down) + assert yr.shape[axis] == want_len + y = upfirdn(self.h, x, self.up, self.down, axis=axis) + assert y.shape[axis] == want_len + assert y.shape == yr.shape + dtypes = (self.h.dtype, x.dtype) + if all(d == np.complex64 for d in dtypes): + assert y.dtype == np.complex64 + elif np.complex64 in dtypes and np.float32 in dtypes: + assert y.dtype == np.complex64 + elif all(d == np.float32 for d in dtypes): + assert y.dtype == np.float32 + elif np.complex128 in dtypes or np.complex64 in dtypes: + assert y.dtype == np.complex128 + else: + assert y.dtype == np.float64 + xp_assert_close(yr.astype(y.dtype), y) + + +_UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex) + + +class TestUpfirdn: + + def test_valid_input(self): + assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1 + assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1 + assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1) + + @pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5]) + @pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5]) + def test_singleton(self, len_h, len_x): + # gh-9844: lengths producing expected outputs + h = np.zeros(len_h) + h[len_h // 2] = 1. # make h a delta + x = np.ones(len_x) + y = upfirdn(h, x, 1, 1) + want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant') + xp_assert_close(y, want) + + def test_shift_x(self): + # gh-9844: shifted x can change values? + y = upfirdn([1, 1], [1.], 1, 1) + xp_assert_close(y, np.asarray([1.0, 1.0])) # was [0, 1] in the issue + y = upfirdn([1, 1], [0., 1.], 1, 1) + xp_assert_close(y, np.asarray([0.0, 1.0, 1.0])) + + # A bunch of lengths/factors chosen because they exposed differences + # between the "old way" and new way of computing length, and then + # got `expected` from MATLAB + @pytest.mark.parametrize('len_h, len_x, up, down, expected', [ + (2, 2, 5, 2, [1, 0, 0, 0]), + (2, 3, 6, 3, [1, 0, 1, 0, 1]), + (2, 4, 4, 3, [1, 0, 0, 0, 1]), + (3, 2, 6, 2, [1, 0, 0, 1, 0]), + (4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]), + ]) + def test_length_factors(self, len_h, len_x, up, down, expected): + # gh-9844: weird factors + h = np.zeros(len_h) + h[0] = 1. + x = np.ones(len_x) + y = upfirdn(h, x, up, down) + expected = np.asarray(expected, dtype=np.float64) + xp_assert_close(y, expected) + + @pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB + (2, 5015), + (11, 912), + (79, 127), + ]) + def test_vs_convolve(self, down, want_len): + # Check that up=1.0 gives same answer as convolve + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + h = firwin(31, 1. / down, window='hamming') + yl = upfirdn_naive(x, h, 1, down) + y = upfirdn(h, x, up=1, down=down) + assert y.shape == (want_len,) + assert yl.shape[0] == y.shape[0] + xp_assert_close(yl, y, atol=1e-7, rtol=1e-7) + + @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES) + @pytest.mark.parametrize('h', (1., 1j)) + @pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)]) + def test_vs_naive_delta(self, x_dtype, h, up, down): + UpFIRDnCase(up, down, h, x_dtype)() + + @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES) + @pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES) + @pytest.mark.parametrize('p_max, q_max', + list(product((10, 100), (10, 100)))) + def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max): + tests = self._random_factors(p_max, q_max, h_dtype, x_dtype) + for test in tests: + test() + + def _random_factors(self, p_max, q_max, h_dtype, x_dtype): + n_rep = 3 + longest_h = 25 + random_state = np.random.RandomState(17) + tests = [] + + for _ in range(n_rep): + # Randomize the up/down factors somewhat + p_add = q_max if p_max > q_max else 1 + q_add = p_max if q_max > p_max else 1 + p = random_state.randint(p_max) + p_add + q = random_state.randint(q_max) + q_add + + # Generate random FIR coefficients + len_h = random_state.randint(longest_h) + 1 + h = np.atleast_1d(random_state.randint(len_h)) + h = h.astype(h_dtype) + if h_dtype is complex: + h += 1j * random_state.randint(len_h) + + tests.append(UpFIRDnCase(p, q, h, x_dtype)) + + return tests + + @pytest.mark.parametrize('mode', _upfirdn_modes) + def test_extensions(self, mode): + """Test vs. manually computed results for modes not in numpy's pad.""" + x = np.array([1, 2, 3, 1], dtype=float) + npre, npost = 6, 6 + y = _pad_test(x, npre=npre, npost=npost, mode=mode) + if mode == 'antisymmetric': + y_expected = np.asarray( + [3.0, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2]) + elif mode == 'antireflect': + y_expected = np.asarray( + [1.0, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1]) + elif mode == 'smooth': + y_expected = np.asarray( + [-5.0, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11]) + elif mode == "line": + lin_slope = (x[-1] - x[0]) / (len(x) - 1) + left = x[0] + np.arange(-npre, 0, 1) * lin_slope + right = x[-1] + np.arange(1, npost + 1) * lin_slope + y_expected = np.concatenate((left, x, right)) + else: + y_expected = np.pad(x, (npre, npost), mode=mode) + xp_assert_close(y, y_expected) + + @pytest.mark.parametrize( + 'size, h_len, mode, dtype', + product( + [8], + [4, 5, 26], # include cases with h_len > 2*size + _upfirdn_modes, + [np.float32, np.float64, np.complex64, np.complex128], + ) + ) + def test_modes(self, size, h_len, mode, dtype): + random_state = np.random.RandomState(5) + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + h = np.arange(1, 1 + h_len, dtype=x.real.dtype) + + y = upfirdn(h, x, up=1, down=1, mode=mode) + # expected result: pad the input, filter with zero padding, then crop + npad = h_len - 1 + if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']: + # use _pad_test test function for modes not supported by np.pad. + xpad = _pad_test(x, npre=npad, npost=npad, mode=mode) + else: + xpad = np.pad(x, npad, mode=mode) + ypad = upfirdn(h, xpad, up=1, down=1, mode='constant') + y_expected = ypad[npad:-npad] + + atol = rtol = np.finfo(dtype).eps * 1e2 + xp_assert_close(y, y_expected, atol=atol, rtol=rtol) + + +def test_output_len_long_input(): + # Regression test for gh-17375. On Windows, a large enough input + # that should have been well within the capabilities of 64 bit integers + # would result in a 32 bit overflow because of a bug in Cython 0.29.32. + len_h = 1001 + in_len = 10**8 + up = 320 + down = 441 + out_len = _output_len(len_h, in_len, up, down) + # The expected value was computed "by hand" from the formula + # (((in_len - 1) * up + len_h) - 1) // down + 1 + assert out_len == 72562360 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..b30f7b9ceba80822f05ecfcabaad76fb87d5f335 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py @@ -0,0 +1,380 @@ +import numpy as np +from pytest import raises as assert_raises +from scipy._lib._array_api import ( + assert_almost_equal, xp_assert_equal, xp_assert_close +) + +import scipy.signal._waveforms as waveforms + + +# These chirp_* functions are the instantaneous frequencies of the signals +# returned by chirp(). + +def chirp_linear(t, f0, f1, t1): + f = f0 + (f1 - f0) * t / t1 + return f + + +def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): + if vertex_zero: + f = f0 + (f1 - f0) * t**2 / t1**2 + else: + f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 + return f + + +def chirp_geometric(t, f0, f1, t1): + f = f0 * (f1/f0)**(t/t1) + return f + + +def chirp_hyperbolic(t, f0, f1, t1): + f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) + return f + + +def compute_frequency(t, theta): + """ + Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t). + """ + # Assume theta and t are 1-D NumPy arrays. + # Assume that t is uniformly spaced. + dt = t[1] - t[0] + f = np.diff(theta)/(2*np.pi) / dt + tf = 0.5*(t[1:] + t[:-1]) + return tf, f + + +class TestChirp: + + def test_linear_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') + assert_almost_equal(w, 1.0) + + def test_linear_freq_01(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_linear_freq_02(self): + method = 'linear' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_linear_complex_power(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + w_real = waveforms.chirp(t, f0, t1, f1, method, complex=False) + w_complex = waveforms.chirp(t, f0, t1, f1, method, complex=True) + w_pwr_r = np.var(w_real) + w_pwr_c = np.var(w_complex) + + # Making sure that power of the real part is not affected with + # complex conversion operation + err = w_pwr_r - np.real(w_pwr_c) + + assert(err < 1e-6) + + def test_linear_complex_at_zero(self): + w = waveforms.chirp(t=0, f0=-10.0, f1=1.0, t1=1.0, method='linear', + complex=True) + xp_assert_close(w, 1.0+0.0j) # dtype must match + + def test_quadratic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') + assert_almost_equal(w, 1.0) + + def test_quadratic_at_zero2(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', + vertex_zero=False) + assert_almost_equal(w, 1.0) + + def test_quadratic_complex_at_zero(self): + w = waveforms.chirp(t=0, f0=-1.0, f1=2.0, t1=1.0, method='quadratic', + complex=True) + xp_assert_close(w, 1.0+0j) + + def test_quadratic_freq_01(self): + method = 'quadratic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_quadratic_freq_02(self): + method = 'quadratic' + f0 = 20.0 + f1 = 10.0 + t1 = 10.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') + assert_almost_equal(w, 1.0) + + def test_logarithmic_freq_01(self): + method = 'logarithmic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_freq_02(self): + method = 'logarithmic' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_freq_03(self): + method = 'logarithmic' + f0 = 100.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_hyperbolic_at_zero(self): + w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') + assert_almost_equal(w, 1.0) + + def test_hyperbolic_freq_01(self): + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 10000) + # f0 f1 + cases = [[10.0, 1.0], + [1.0, 10.0], + [-10.0, -1.0], + [-1.0, -10.0]] + for f0, f1 in cases: + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + expected = chirp_hyperbolic(tf, f0, f1, t1) + xp_assert_close(f, expected, atol=1e-7) + + def test_hyperbolic_zero_freq(self): + # f0=0 or f1=0 must raise a ValueError. + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 5) + assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method) + assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method) + + def test_unknown_method(self): + method = "foo" + f0 = 10.0 + f1 = 20.0 + t1 = 1.0 + t = np.linspace(0, t1, 10) + assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) + + def test_integer_t1(self): + f0 = 10.0 + f1 = 20.0 + t = np.linspace(-1, 1, 11) + t1 = 3.0 + float_result = waveforms.chirp(t, f0, t1, f1) + t1 = 3 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 't1=3' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f0(self): + f1 = 20.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f0 = 10.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f0 = 10 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f1(self): + f0 = 10.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f1 = 20.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f1 = 20 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f1=20' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_all(self): + f0 = 10 + t1 = 3 + f1 = 20 + t = np.linspace(-1, 1, 11) + float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestSweepPoly: + + def test_sweep_poly_quad1(self): + p = np.poly1d([1.0, 0.0, 1.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_const(self): + p = np.poly1d(2.0) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_linear(self): + p = np.poly1d([-1.0, 10.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_quad2(self): + p = np.poly1d([1.0, 0.0, -2.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic(self): + p = np.poly1d([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic2(self): + """Use an array of coefficients instead of a poly1d.""" + p = np.array([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic3(self): + """Use a list of coefficients instead of a poly1d.""" + p = [2.0, 1.0, 0.0, -2.0] + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + +class TestGaussPulse: + + def test_integer_fc(self): + float_result = waveforms.gausspulse('cutoff', fc=1000.0) + int_result = waveforms.gausspulse('cutoff', fc=1000) + err_msg = "Integer input 'fc=1000' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bw(self): + float_result = waveforms.gausspulse('cutoff', bw=1.0) + int_result = waveforms.gausspulse('cutoff', bw=1) + err_msg = "Integer input 'bw=1' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bwr(self): + float_result = waveforms.gausspulse('cutoff', bwr=-6.0) + int_result = waveforms.gausspulse('cutoff', bwr=-6) + err_msg = "Integer input 'bwr=-6' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_tpr(self): + float_result = waveforms.gausspulse('cutoff', tpr=-60.0) + int_result = waveforms.gausspulse('cutoff', tpr=-60) + err_msg = "Integer input 'tpr=-60' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestUnitImpulse: + + def test_no_index(self): + xp_assert_equal(waveforms.unit_impulse(7), + np.asarray([1.0, 0, 0, 0, 0, 0, 0])) + xp_assert_equal(waveforms.unit_impulse((3, 3)), + np.asarray([[1.0, 0, 0], [0, 0, 0], [0, 0, 0]])) + + def test_index(self): + xp_assert_equal(waveforms.unit_impulse(10, 3), + np.asarray([0.0, 0, 0, 1, 0, 0, 0, 0, 0, 0])) + xp_assert_equal(waveforms.unit_impulse((3, 3), (1, 1)), + np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]])) + + # Broadcasting + imp = waveforms.unit_impulse((4, 4), 2) + xp_assert_equal(imp, np.asarray([[0.0, 0, 0, 0], + [0.0, 0, 0, 0], + [0.0, 0, 1, 0], + [0.0, 0, 0, 0]])) + + def test_mid(self): + xp_assert_equal(waveforms.unit_impulse((3, 3), 'mid'), + np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]])) + xp_assert_equal(waveforms.unit_impulse(9, 'mid'), + np.asarray([0.0, 0, 0, 0, 1, 0, 0, 0, 0])) + + def test_dtype(self): + imp = waveforms.unit_impulse(7) + assert np.issubdtype(imp.dtype, np.floating) + + imp = waveforms.unit_impulse(5, 3, dtype=int) + assert np.issubdtype(imp.dtype, np.integer) + + imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex) + assert np.issubdtype(imp.dtype, np.complexfloating) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..7a357d2eaf4a530930d612358b8ca69a18b5248e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py @@ -0,0 +1,59 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_array_almost_equal + +import scipy.signal._wavelets as wavelets + + +class TestWavelets: + def test_ricker(self): + w = wavelets._ricker(1.0, 1) + expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) + assert_array_equal(w, expected) + + lengths = [5, 11, 15, 51, 101] + for length in lengths: + w = wavelets._ricker(length, 1.0) + assert len(w) == length + max_loc = np.argmax(w) + assert max_loc == (length // 2) + + points = 100 + w = wavelets._ricker(points, 2.0) + half_vec = np.arange(0, points // 2) + # Wavelet should be symmetric + assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) + + # Check zeros + aas = [5, 10, 15, 20, 30] + points = 99 + for a in aas: + w = wavelets._ricker(points, a) + vec = np.arange(0, points) - (points - 1.0) / 2 + exp_zero1 = np.argmin(np.abs(vec - a)) + exp_zero2 = np.argmin(np.abs(vec + a)) + assert_array_almost_equal(w[exp_zero1], 0) + assert_array_almost_equal(w[exp_zero2], 0) + + def test_cwt(self): + widths = [1.0] + def delta_wavelet(s, t): + return np.array([1]) + len_data = 100 + test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) + + # Test delta function input gives same data as output + cwt_dat = wavelets._cwt(test_data, delta_wavelet, widths) + assert cwt_dat.shape == (len(widths), len_data) + assert_array_almost_equal(test_data, cwt_dat.flatten()) + + # Check proper shape on output + widths = [1, 3, 4, 5, 10] + cwt_dat = wavelets._cwt(test_data, wavelets._ricker, widths) + assert cwt_dat.shape == (len(widths), len_data) + + widths = [len_data * 10] + # Note: this wavelet isn't defined quite right, but is fine for this test + def flat_wavelet(l, w): + return np.full(w, 1 / w) + cwt_dat = wavelets._cwt(test_data, flat_wavelet, widths) + assert_array_almost_equal(cwt_dat, np.mean(test_data)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..75c4da5327f0c1a806f72865687927bc7a380c6d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py @@ -0,0 +1,846 @@ +import numpy as np +from numpy import array +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, + assert_equal, assert_, assert_array_less, + suppress_warnings) +from pytest import raises as assert_raises + +from scipy.fft import fft +from scipy.signal import windows, get_window, resample + + +window_funcs = [ + ('boxcar', ()), + ('triang', ()), + ('parzen', ()), + ('bohman', ()), + ('blackman', ()), + ('nuttall', ()), + ('blackmanharris', ()), + ('flattop', ()), + ('bartlett', ()), + ('barthann', ()), + ('hamming', ()), + ('kaiser', (1,)), + ('dpss', (2,)), + ('gaussian', (0.5,)), + ('general_gaussian', (1.5, 2)), + ('chebwin', (1,)), + ('cosine', ()), + ('hann', ()), + ('exponential', ()), + ('taylor', ()), + ('tukey', (0.5,)), + ('lanczos', ()), + ] + + +class TestBartHann: + + def test_basic(self): + assert_allclose(windows.barthann(6, sym=True), + [0, 0.35857354213752, 0.8794264578624801, + 0.8794264578624801, 0.3585735421375199, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.barthann(7), + [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.barthann(6, False), + [0, 0.27, 0.73, 1.0, 0.73, 0.27], + rtol=1e-15, atol=1e-15) + + +class TestBartlett: + + def test_basic(self): + assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0]) + assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0]) + assert_allclose(windows.bartlett(6, False), + [0, 1/3, 2/3, 1.0, 2/3, 1/3]) + + +class TestBlackman: + + def test_basic(self): + assert_allclose(windows.blackman(6, sym=False), + [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14) + assert_allclose(windows.blackman(7, sym=False), + [0, 0.09045342435412804, 0.4591829575459636, + 0.9203636180999081, 0.9203636180999081, + 0.4591829575459636, 0.09045342435412804], atol=1e-8) + assert_allclose(windows.blackman(6), + [0, 0.2007701432625305, 0.8492298567374694, + 0.8492298567374694, 0.2007701432625305, 0], + atol=1e-14) + assert_allclose(windows.blackman(7, True), + [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14) + + +class TestBlackmanHarris: + + def test_basic(self): + assert_allclose(windows.blackmanharris(6, False), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645]) + assert_allclose(windows.blackmanharris(7, sym=False), + [6.0e-05, 0.03339172347815117, 0.332833504298565, + 0.8893697722232837, 0.8893697722232838, + 0.3328335042985652, 0.03339172347815122]) + assert_allclose(windows.blackmanharris(6), + [6.0e-05, 0.1030114893456638, 0.7938335106543362, + 0.7938335106543364, 0.1030114893456638, 6.0e-05]) + assert_allclose(windows.blackmanharris(7, sym=True), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645, + 6.0e-05]) + + +class TestTaylor: + + def test_normalized(self): + """Tests windows of small length that are normalized to 1. See the + documentation for the Taylor window for more information on + normalization. + """ + assert_allclose(windows.taylor(1, 2, 15), 1.0) + assert_allclose( + windows.taylor(5, 2, 15), + np.array([0.75803341, 0.90757699, 1.0, 0.90757699, 0.75803341]) + ) + assert_allclose( + windows.taylor(6, 2, 15), + np.array([ + 0.7504082, 0.86624416, 0.98208011, 0.98208011, 0.86624416, + 0.7504082 + ]) + ) + + def test_non_normalized(self): + """Test windows of small length that are not normalized to 1. See + the documentation for the Taylor window for more information on + normalization. + """ + assert_allclose( + windows.taylor(5, 2, 15, norm=False), + np.array([ + 0.87508054, 1.04771499, 1.15440894, 1.04771499, 0.87508054 + ]) + ) + assert_allclose( + windows.taylor(6, 2, 15, norm=False), + np.array([ + 0.86627793, 1.0, 1.13372207, 1.13372207, 1.0, 0.86627793 + ]) + ) + + def test_correctness(self): + """This test ensures the correctness of the implemented Taylor + Windowing function. A Taylor Window of 1024 points is created, its FFT + is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth + are found and checked. + + A publication from Sandia National Laboratories was used as reference + for the correctness values [1]_. + + References + ----- + .. [1] Armin Doerry, "Catalog of Window Taper Functions for + Sidelobe Control", 2017. + https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf + """ + M_win = 1024 + N_fft = 131072 + # Set norm=False for correctness as the values obtained from the + # scientific publication do not normalize the values. Normalizing + # changes the sidelobe level from the desired value. + w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False) + f = fft(w, N_fft) + spec = 20 * np.log10(np.abs(f / np.amax(f))) + + first_zero = np.argmax(np.diff(spec) > 0) + + PSLL = np.amax(spec[first_zero:-first_zero]) + + BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win + BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win + + assert_allclose(PSLL, -35.1672, atol=1) + assert_allclose(BW_3dB, 1.1822, atol=0.1) + assert_allclose(BW_18dB, 2.6112, atol=0.1) + + +class TestBohman: + + def test_basic(self): + assert_allclose(windows.bohman(6), + [0, 0.1791238937062839, 0.8343114522576858, + 0.8343114522576858, 0.1791238937062838, 0]) + assert_allclose(windows.bohman(7, sym=True), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293, 0]) + assert_allclose(windows.bohman(6, False), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293]) + + +class TestBoxcar: + + def test_basic(self): + assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1]) + + +cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, + 0.198891, 0.235450, 0.274846, 0.316836, + 0.361119, 0.407338, 0.455079, 0.503883, + 0.553248, 0.602637, 0.651489, 0.699227, + 0.745266, 0.789028, 0.829947, 0.867485, + 0.901138, 0.930448, 0.955010, 0.974482, + 0.988591, 0.997138, 1.000000, 0.997138, + 0.988591, 0.974482, 0.955010, 0.930448, + 0.901138, 0.867485, 0.829947, 0.789028, + 0.745266, 0.699227, 0.651489, 0.602637, + 0.553248, 0.503883, 0.455079, 0.407338, + 0.361119, 0.316836, 0.274846, 0.235450, + 0.198891, 0.165348, 0.134941, 0.107729, + 0.200938]) + +cheb_even_true = array([0.203894, 0.107279, 0.133904, + 0.163608, 0.196338, 0.231986, + 0.270385, 0.311313, 0.354493, + 0.399594, 0.446233, 0.493983, + 0.542378, 0.590916, 0.639071, + 0.686302, 0.732055, 0.775783, + 0.816944, 0.855021, 0.889525, + 0.920006, 0.946060, 0.967339, + 0.983557, 0.994494, 1.000000, + 1.000000, 0.994494, 0.983557, + 0.967339, 0.946060, 0.920006, + 0.889525, 0.855021, 0.816944, + 0.775783, 0.732055, 0.686302, + 0.639071, 0.590916, 0.542378, + 0.493983, 0.446233, 0.399594, + 0.354493, 0.311313, 0.270385, + 0.231986, 0.196338, 0.163608, + 0.133904, 0.107279, 0.203894]) + + +class TestChebWin: + + def test_basic(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + assert_allclose(windows.chebwin(6, 100), + [0.1046401879356917, 0.5075781475823447, 1.0, 1.0, + 0.5075781475823447, 0.1046401879356917]) + assert_allclose(windows.chebwin(7, 100), + [0.05650405062850233, 0.316608530648474, + 0.7601208123539079, 1.0, 0.7601208123539079, + 0.316608530648474, 0.05650405062850233]) + assert_allclose(windows.chebwin(6, 10), + [1.0, 0.6071201674458373, 0.6808391469897297, + 0.6808391469897297, 0.6071201674458373, 1.0]) + assert_allclose(windows.chebwin(7, 10), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651, 1.0]) + assert_allclose(windows.chebwin(6, 10, False), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651]) + + def test_cheb_odd_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(53, at=-40) + assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) + + def test_cheb_even_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(54, at=40) + assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) + + def test_cheb_odd_low_attenuation(self): + cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, + 0.610151, 0.586405, 0.519052, + 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(7, at=10) + assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4) + + def test_cheb_even_low_attenuation(self): + cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, + 0.541338, 0.541338, 0.51027, + 0.451924, 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(8, at=-10) + assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4) + + +exponential_data = { + (4, None, 0.2, False): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03]), + (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988, + 0.0820849986238988, 0.00055308437014783]), + (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342, + 0.60653065971263342, 0.22313016014842982]), + (4, 2, 0.2, False): + array([4.53999297624848542e-05, 6.73794699908546700e-03, + 1.00000000000000000e+00, 6.73794699908546700e-03]), + (4, 2, 0.2, True): None, + (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, 2, 1.0, True): None, + (5, None, 0.2, True): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03, 4.53999297624848542e-05]), + (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233, 0.1353352832366127]), + (5, 2, 0.2, True): None, + (5, 2, 1.0, True): None +} + + +def test_exponential(): + for k, v in exponential_data.items(): + if v is None: + assert_raises(ValueError, windows.exponential, *k) + else: + win = windows.exponential(*k) + assert_allclose(win, v, rtol=1e-14) + + +class TestFlatTop: + + def test_basic(self): + assert_allclose(windows.flattop(6, sym=False), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156]) + assert_allclose(windows.flattop(7, sym=False), + [-0.000421051, -0.03684078115492348, + 0.01070371671615342, 0.7808739149387698, + 0.7808739149387698, 0.01070371671615342, + -0.03684078115492348]) + assert_allclose(windows.flattop(6), + [-0.000421051, -0.0677142520762119, 0.6068721525762117, + 0.6068721525762117, -0.0677142520762119, + -0.000421051]) + assert_allclose(windows.flattop(7, True), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156, -0.000421051]) + + +class TestGaussian: + + def test_basic(self): + assert_allclose(windows.gaussian(6, 1.0), + [0.04393693362340742, 0.3246524673583497, + 0.8824969025845955, 0.8824969025845955, + 0.3246524673583497, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 1.2), + [0.04393693362340742, 0.2493522087772962, + 0.7066482778577162, 1.0, 0.7066482778577162, + 0.2493522087772962, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 3), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081, 0.6065306597126334]) + assert_allclose(windows.gaussian(6, 3, False), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081]) + + +class TestGeneralCosine: + + def test_basic(self): + assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]), + [0.4, 0.3, 1, 0.3, 0.4]) + assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False), + [0.4, 0.3, 1, 0.3]) + + +class TestGeneralHamming: + + def test_basic(self): + assert_allclose(windows.general_hamming(5, 0.7), + [0.4, 0.7, 1.0, 0.7, 0.4]) + assert_allclose(windows.general_hamming(5, 0.75, sym=False), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514]) + assert_allclose(windows.general_hamming(6, 0.75, sym=True), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514, 0.5]) + + +class TestHamming: + + def test_basic(self): + assert_allclose(windows.hamming(6, False), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31]) + assert_allclose(windows.hamming(7, sym=False), + [0.08, 0.2531946911449826, 0.6423596296199047, + 0.9544456792351128, 0.9544456792351128, + 0.6423596296199047, 0.2531946911449826]) + assert_allclose(windows.hamming(6), + [0.08, 0.3978521825875242, 0.9121478174124757, + 0.9121478174124757, 0.3978521825875242, 0.08]) + assert_allclose(windows.hamming(7, sym=True), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08]) + + +class TestHann: + + def test_basic(self): + assert_allclose(windows.hann(6, sym=False), + [0, 0.25, 0.75, 1.0, 0.75, 0.25], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(7, sym=False), + [0, 0.1882550990706332, 0.6112604669781572, + 0.9504844339512095, 0.9504844339512095, + 0.6112604669781572, 0.1882550990706332], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(6, True), + [0, 0.3454915028125263, 0.9045084971874737, + 0.9045084971874737, 0.3454915028125263, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(7), + [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], + rtol=1e-15, atol=1e-15) + + +class TestKaiser: + + def test_basic(self): + assert_allclose(windows.kaiser(6, 0.5), + [0.9403061933191572, 0.9782962393705389, + 0.9975765035372042, 0.9975765035372042, + 0.9782962393705389, 0.9403061933191572]) + assert_allclose(windows.kaiser(7, 0.5), + [0.9403061933191572, 0.9732402256999829, + 0.9932754654413773, 1.0, 0.9932754654413773, + 0.9732402256999829, 0.9403061933191572]) + assert_allclose(windows.kaiser(6, 2.7), + [0.2603047507678832, 0.6648106293528054, + 0.9582099802511439, 0.9582099802511439, + 0.6648106293528054, 0.2603047507678832]) + assert_allclose(windows.kaiser(7, 2.7), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844, 0.2603047507678832]) + assert_allclose(windows.kaiser(6, 2.7, False), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844]) + + +class TestKaiserBesselDerived: + + def test_basic(self): + M = 100 + w = windows.kaiser_bessel_derived(M, beta=4.0) + w2 = windows.get_window(('kaiser bessel derived', 4.0), + M, fftbins=False) + assert_allclose(w, w2) + + # Test for Princen-Bradley condition + assert_allclose(w[:M // 2] ** 2 + w[-M // 2:] ** 2, 1.) + + # Test actual values from other implementations + # M = 2: sqrt(2) / 2 + # M = 4: 0.518562710536, 0.855039598640 + # M = 6: 0.436168993154, 0.707106781187, 0.899864772847 + # Ref:https://github.com/scipy/scipy/pull/4747#issuecomment-172849418 + assert_allclose(windows.kaiser_bessel_derived(2, beta=np.pi / 2)[:1], + np.sqrt(2) / 2) + + assert_allclose(windows.kaiser_bessel_derived(4, beta=np.pi / 2)[:2], + [0.518562710536, 0.855039598640]) + + assert_allclose(windows.kaiser_bessel_derived(6, beta=np.pi / 2)[:3], + [0.436168993154, 0.707106781187, 0.899864772847]) + + def test_exceptions(self): + M = 100 + # Assert ValueError for odd window length + msg = ("Kaiser-Bessel Derived windows are only defined for even " + "number of points") + with assert_raises(ValueError, match=msg): + windows.kaiser_bessel_derived(M + 1, beta=4.) + + # Assert ValueError for non-symmetric setting + msg = ("Kaiser-Bessel Derived windows are only defined for " + "symmetric shapes") + with assert_raises(ValueError, match=msg): + windows.kaiser_bessel_derived(M + 1, beta=4., sym=False) + + +class TestNuttall: + + def test_basic(self): + assert_allclose(windows.nuttall(6, sym=False), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345]) + assert_allclose(windows.nuttall(7, sym=False), + [0.0003628, 0.03777576895352025, 0.3427276199688195, + 0.8918518610776603, 0.8918518610776603, + 0.3427276199688196, 0.0377757689535203]) + assert_allclose(windows.nuttall(6), + [0.0003628, 0.1105152530498718, 0.7982580969501282, + 0.7982580969501283, 0.1105152530498719, 0.0003628]) + assert_allclose(windows.nuttall(7, True), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345, 0.0003628]) + + +class TestParzen: + + def test_basic(self): + assert_allclose(windows.parzen(6), + [0.009259259259259254, 0.25, 0.8611111111111112, + 0.8611111111111112, 0.25, 0.009259259259259254]) + assert_allclose(windows.parzen(7, sym=True), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616, 0.00583090379008747]) + assert_allclose(windows.parzen(6, False), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616]) + + +class TestTriang: + + def test_basic(self): + + assert_allclose(windows.triang(6, True), + [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) + assert_allclose(windows.triang(7), + [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) + assert_allclose(windows.triang(6, sym=False), + [1/4, 1/2, 3/4, 1, 3/4, 1/2]) + + +tukey_data = { + (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]), + (4, 0.9, True): array([0.0, 0.84312081893436686, + 0.84312081893436686, 0.0]), + (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]), + (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]), + (4, 0.9, False): array([0.0, 0.58682408883346526, + 1.0, 0.58682408883346526]), + (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]), + (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]), + (5, 0.8, True): array([0.0, 0.69134171618254492, + 1.0, 0.69134171618254492, 0.0]), + (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]), + + (6, 0): [1, 1, 1, 1, 1, 1], + (7, 0): [1, 1, 1, 1, 1, 1, 1], + (6, .25): [0, 1, 1, 1, 1, 0], + (7, .25): [0, 1, 1, 1, 1, 1, 0], + (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0], + (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0], + (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0], + (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0, + 0.9698463103929542, 0.4131759111665347, 0], + (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737, + 0.3454915028125263, 0], + (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], +} + + +class TestTukey: + + def test_basic(self): + # Test against hardcoded data + for k, v in tukey_data.items(): + if v is None: + assert_raises(ValueError, windows.tukey, *k) + else: + win = windows.tukey(*k) + assert_allclose(win, v, rtol=1e-15, atol=1e-15) + + def test_extremes(self): + # Test extremes of alpha correspond to boxcar and hann + tuk0 = windows.tukey(100, 0) + box0 = windows.boxcar(100) + assert_array_almost_equal(tuk0, box0) + + tuk1 = windows.tukey(100, 1) + han1 = windows.hann(100) + assert_array_almost_equal(tuk1, han1) + + +dpss_data = { + # All values from MATLAB: + # * taper[1] of (3, 1.4, 3) sign-flipped + # * taper[3] of (5, 1.5, 5) sign-flipped + (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa: E501 + (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa: E501 + (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501 + (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501 +} + + +class TestDPSS: + + def test_basic(self): + # Test against hardcoded data + for k, v in dpss_data.items(): + win, ratios = windows.dpss(*k, return_ratios=True) + assert_allclose(win, v[0], atol=1e-7, err_msg=k) + assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k) + + def test_unity(self): + # Test unity value handling (gh-2221) + for M in range(1, 21): + # corrected w/approximation (default) + win = windows.dpss(M, M / 2.1) + expected = M % 2 # one for odd, none for even + assert_equal(np.isclose(win, 1.).sum(), expected, + err_msg=f'{win}') + # corrected w/subsample delay (slower) + win_sub = windows.dpss(M, M / 2.1, norm='subsample') + if M > 2: + # @M=2 the subsample doesn't do anything + assert_equal(np.isclose(win_sub, 1.).sum(), expected, + err_msg=f'{win_sub}') + assert_allclose(win, win_sub, rtol=0.03) # within 3% + # not the same, l2-norm + win_2 = windows.dpss(M, M / 2.1, norm=2) + expected = 1 if M == 1 else 0 + assert_equal(np.isclose(win_2, 1.).sum(), expected, + err_msg=f'{win_2}') + + def test_extremes(self): + # Test extremes of alpha + lam = windows.dpss(31, 6, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 7, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 8, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + + def test_degenerate(self): + # Test failures + assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax + assert_raises(ValueError, windows.dpss, 4, 1.5, -5) + assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1) + assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2. + assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos + assert_raises(ValueError, windows.dpss, 3, 0, 3) + assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M + + +class TestLanczos: + + def test_basic(self): + # Analytical results: + # sinc(x) = sinc(-x) + # sinc(pi) = 0, sinc(0) = 1 + # Hand computation on WolframAlpha: + # sinc(2 pi / 3) = 0.413496672 + # sinc(pi / 3) = 0.826993343 + # sinc(3 pi / 5) = 0.504551152 + # sinc(pi / 5) = 0.935489284 + assert_allclose(windows.lanczos(6, sym=False), + [0., 0.413496672, + 0.826993343, 1., 0.826993343, + 0.413496672], + atol=1e-9) + assert_allclose(windows.lanczos(6), + [0., 0.504551152, + 0.935489284, 0.935489284, + 0.504551152, 0.], + atol=1e-9) + assert_allclose(windows.lanczos(7, sym=True), + [0., 0.413496672, + 0.826993343, 1., 0.826993343, + 0.413496672, 0.], + atol=1e-9) + + def test_array_size(self): + for n in [0, 10, 11]: + assert_equal(len(windows.lanczos(n, sym=False)), n) + assert_equal(len(windows.lanczos(n, sym=True)), n) + + +class TestGetWindow: + + def test_boxcar(self): + w = windows.get_window('boxcar', 12) + assert_array_equal(w, np.ones_like(w)) + + # window is a tuple of len 1 + w = windows.get_window(('boxcar',), 16) + assert_array_equal(w, np.ones_like(w)) + + def test_cheb_odd(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', -40), 53, fftbins=False) + assert_array_almost_equal(w, cheb_odd_true, decimal=4) + + def test_cheb_even(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', 40), 54, fftbins=False) + assert_array_almost_equal(w, cheb_even_true, decimal=4) + + def test_dpss(self): + win1 = windows.get_window(('dpss', 3), 64, fftbins=False) + win2 = windows.dpss(64, 3) + assert_array_almost_equal(win1, win2, decimal=4) + + def test_kaiser_float(self): + win1 = windows.get_window(7.2, 64) + win2 = windows.kaiser(64, 7.2, False) + assert_allclose(win1, win2) + + def test_invalid_inputs(self): + # Window is not a float, tuple, or string + assert_raises(ValueError, windows.get_window, set('hann'), 8) + + # Unknown window type error + assert_raises(ValueError, windows.get_window, 'broken', 4) + + def test_array_as_window(self): + # GitHub issue 3603 + osfactor = 128 + sig = np.arange(128) + + win = windows.get_window(('kaiser', 8.0), osfactor // 2) + with assert_raises(ValueError, match='must have the same length'): + resample(sig, len(sig) * osfactor, window=win) + + def test_general_cosine(self): + assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4), + [0.4, 0.3, 1, 0.3]) + assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4, + fftbins=False), + [0.4, 0.55, 0.55, 0.4]) + + def test_general_hamming(self): + assert_allclose(get_window(('general_hamming', 0.7), 5), + [0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949]) + assert_allclose(get_window(('general_hamming', 0.7), 5, fftbins=False), + [0.4, 0.7, 1.0, 0.7, 0.4]) + + def test_lanczos(self): + assert_allclose(get_window('lanczos', 6), + [0., 0.413496672, 0.826993343, 1., 0.826993343, + 0.413496672], atol=1e-9) + assert_allclose(get_window('lanczos', 6, fftbins=False), + [0., 0.504551152, 0.935489284, 0.935489284, + 0.504551152, 0.], atol=1e-9) + assert_allclose(get_window('lanczos', 6), get_window('sinc', 6)) + + +def test_windowfunc_basics(): + for window_name, params in window_funcs: + window = getattr(windows, window_name) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + # Check symmetry for odd and even lengths + w1 = window(8, *params, sym=True) + w2 = window(7, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + w1 = window(9, *params, sym=True) + w2 = window(8, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + # Check that functions run and output lengths are correct + assert_equal(len(window(6, *params, sym=True)), 6) + assert_equal(len(window(6, *params, sym=False)), 6) + assert_equal(len(window(7, *params, sym=True)), 7) + assert_equal(len(window(7, *params, sym=False)), 7) + + # Check invalid lengths + assert_raises(ValueError, window, 5.5, *params) + assert_raises(ValueError, window, -7, *params) + + # Check degenerate cases + assert_array_equal(window(0, *params, sym=True), []) + assert_array_equal(window(0, *params, sym=False), []) + assert_array_equal(window(1, *params, sym=True), [1]) + assert_array_equal(window(1, *params, sym=False), [1]) + + # Check dtype + assert_(window(0, *params, sym=True).dtype == 'float') + assert_(window(0, *params, sym=False).dtype == 'float') + assert_(window(1, *params, sym=True).dtype == 'float') + assert_(window(1, *params, sym=False).dtype == 'float') + assert_(window(6, *params, sym=True).dtype == 'float') + assert_(window(6, *params, sym=False).dtype == 'float') + + # Check normalization + assert_array_less(window(10, *params, sym=True), 1.01) + assert_array_less(window(10, *params, sym=False), 1.01) + assert_array_less(window(9, *params, sym=True), 1.01) + assert_array_less(window(9, *params, sym=False), 1.01) + + # Check that DFT-even spectrum is purely real for odd and even + assert_allclose(fft(window(10, *params, sym=False)).imag, + 0, atol=1e-14) + assert_allclose(fft(window(11, *params, sym=False)).imag, + 0, atol=1e-14) + + +def test_needs_params(): + for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd', + 'gaussian', 'gauss', 'gss', + 'general gaussian', 'general_gaussian', + 'general gauss', 'general_gauss', 'ggs', + 'dss', 'dpss', 'general cosine', 'general_cosine', + 'chebwin', 'cheb', 'general hamming', 'general_hamming', + ]: + assert_raises(ValueError, get_window, winstr, 7) + + +def test_not_needs_params(): + for winstr in ['barthann', + 'bartlett', + 'blackman', + 'blackmanharris', + 'bohman', + 'boxcar', + 'cosine', + 'flattop', + 'hamming', + 'nuttall', + 'parzen', + 'taylor', + 'exponential', + 'poisson', + 'tukey', + 'tuk', + 'triangle', + 'lanczos', + 'sinc', + ]: + win = get_window(winstr, 7) + assert_equal(len(win), 7) + + +def test_symmetric(): + + for win in [windows.lanczos]: + # Even sampling points + w = win(4096) + error = np.max(np.abs(w-np.flip(w))) + assert_equal(error, 0.0) + + # Odd sampling points + w = win(4097) + error = np.max(np.abs(w-np.flip(w))) + assert_equal(error, 0.0) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/waveforms.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..30e71348d04276a66470a4053d97cefc60f7136e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/waveforms.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="waveforms", + private_modules=["_waveforms"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/wavelets.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..fc897a2483536df7e995faaa29af621e25fe38c7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/signal/wavelets.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="wavelets", + private_modules=["_wavelets"], all=__all__, + attribute=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/version.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/version.py new file mode 100644 index 0000000000000000000000000000000000000000..0d4dc33a36ac814b2573bf47db1011fb30c4f9b1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/scipy/version.py @@ -0,0 +1,12 @@ + +""" +Module to expose more detailed version info for the installed `scipy` +""" +version = "1.15.3" +full_version = version +short_version = version.split('.dev')[0] +git_revision = "e29dcb65a2040f04819b426a04b60d44a8f69c04" +release = 'dev' not in version and '+' not in version + +if not release: + version = full_version diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cb8dc2c6ec397db2525d54341f1fef5221877aeb --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/__init__.py @@ -0,0 +1,6 @@ +# cyextension/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/collections.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/collections.pyx new file mode 100644 index 0000000000000000000000000000000000000000..86d24852b3ffed16ef025ae65965dc0b7e0467ed --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/collections.pyx @@ -0,0 +1,409 @@ +# cyextension/collections.pyx +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +cimport cython +from cpython.long cimport PyLong_FromLongLong +from cpython.set cimport PySet_Add + +from collections.abc import Collection +from itertools import filterfalse + +cdef bint add_not_present(set seen, object item, hashfunc): + hash_value = hashfunc(item) + if hash_value not in seen: + PySet_Add(seen, hash_value) + return True + else: + return False + +cdef list cunique_list(seq, hashfunc=None): + cdef set seen = set() + if not hashfunc: + return [x for x in seq if x not in seen and not PySet_Add(seen, x)] + else: + return [x for x in seq if add_not_present(seen, x, hashfunc)] + +def unique_list(seq, hashfunc=None): + return cunique_list(seq, hashfunc) + +cdef class OrderedSet(set): + + cdef list _list + + @classmethod + def __class_getitem__(cls, key): + return cls + + def __init__(self, d=None): + set.__init__(self) + if d is not None: + self._list = cunique_list(d) + set.update(self, self._list) + else: + self._list = [] + + cpdef OrderedSet copy(self): + cdef OrderedSet cp = OrderedSet.__new__(OrderedSet) + cp._list = list(self._list) + set.update(cp, cp._list) + return cp + + @cython.final + cdef OrderedSet _from_list(self, list new_list): + cdef OrderedSet new = OrderedSet.__new__(OrderedSet) + new._list = new_list + set.update(new, new_list) + return new + + def add(self, element): + if element not in self: + self._list.append(element) + PySet_Add(self, element) + + def remove(self, element): + # set.remove will raise if element is not in self + set.remove(self, element) + self._list.remove(element) + + def pop(self): + try: + value = self._list.pop() + except IndexError: + raise KeyError("pop from an empty set") from None + set.remove(self, value) + return value + + def insert(self, Py_ssize_t pos, element): + if element not in self: + self._list.insert(pos, element) + PySet_Add(self, element) + + def discard(self, element): + if element in self: + set.remove(self, element) + self._list.remove(element) + + def clear(self): + set.clear(self) + self._list = [] + + def __getitem__(self, key): + return self._list[key] + + def __iter__(self): + return iter(self._list) + + def __add__(self, other): + return self.union(other) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._list) + + __str__ = __repr__ + + def update(self, *iterables): + for iterable in iterables: + for e in iterable: + if e not in self: + self._list.append(e) + set.add(self, e) + + def __ior__(self, iterable): + self.update(iterable) + return self + + def union(self, *other): + result = self.copy() + result.update(*other) + return result + + def __or__(self, other): + return self.union(other) + + def intersection(self, *other): + cdef set other_set = set.intersection(self, *other) + return self._from_list([a for a in self._list if a in other_set]) + + def __and__(self, other): + return self.intersection(other) + + def symmetric_difference(self, other): + cdef set other_set + if isinstance(other, set): + other_set = other + collection = other_set + elif isinstance(other, Collection): + collection = other + other_set = set(other) + else: + collection = list(other) + other_set = set(collection) + result = self._from_list([a for a in self._list if a not in other_set]) + result.update(a for a in collection if a not in self) + return result + + def __xor__(self, other): + return self.symmetric_difference(other) + + def difference(self, *other): + cdef set other_set = set.difference(self, *other) + return self._from_list([a for a in self._list if a in other_set]) + + def __sub__(self, other): + return self.difference(other) + + def intersection_update(self, *other): + set.intersection_update(self, *other) + self._list = [a for a in self._list if a in self] + + def __iand__(self, other): + self.intersection_update(other) + return self + + cpdef symmetric_difference_update(self, other): + collection = other if isinstance(other, Collection) else list(other) + set.symmetric_difference_update(self, collection) + self._list = [a for a in self._list if a in self] + self._list += [a for a in collection if a in self] + + def __ixor__(self, other): + self.symmetric_difference_update(other) + return self + + def difference_update(self, *other): + set.difference_update(self, *other) + self._list = [a for a in self._list if a in self] + + def __isub__(self, other): + self.difference_update(other) + return self + +cdef object cy_id(object item): + return PyLong_FromLongLong( (item)) + +# NOTE: cython 0.x will call __add__, __sub__, etc with the parameter swapped +# instead of the __rmeth__, so they need to check that also self is of the +# correct type. This is fixed in cython 3.x. See: +# https://docs.cython.org/en/latest/src/userguide/special_methods.html#arithmetic-methods +cdef class IdentitySet: + """A set that considers only object id() for uniqueness. + + This strategy has edge cases for builtin types- it's possible to have + two 'foo' strings in one of these sets, for example. Use sparingly. + + """ + + cdef dict _members + + def __init__(self, iterable=None): + self._members = {} + if iterable: + self.update(iterable) + + def add(self, value): + self._members[cy_id(value)] = value + + def __contains__(self, value): + return cy_id(value) in self._members + + cpdef remove(self, value): + del self._members[cy_id(value)] + + def discard(self, value): + try: + self.remove(value) + except KeyError: + pass + + def pop(self): + cdef tuple pair + try: + pair = self._members.popitem() + return pair[1] + except KeyError: + raise KeyError("pop from an empty set") + + def clear(self): + self._members.clear() + + def __eq__(self, other): + cdef IdentitySet other_ + if isinstance(other, IdentitySet): + other_ = other + return self._members == other_._members + else: + return False + + def __ne__(self, other): + cdef IdentitySet other_ + if isinstance(other, IdentitySet): + other_ = other + return self._members != other_._members + else: + return True + + cpdef issubset(self, iterable): + cdef IdentitySet other + if isinstance(iterable, self.__class__): + other = iterable + else: + other = self.__class__(iterable) + + if len(self) > len(other): + return False + for m in filterfalse(other._members.__contains__, self._members): + return False + return True + + def __le__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.issubset(other) + + def __lt__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return len(self) < len(other) and self.issubset(other) + + cpdef issuperset(self, iterable): + cdef IdentitySet other + if isinstance(iterable, self.__class__): + other = iterable + else: + other = self.__class__(iterable) + + if len(self) < len(other): + return False + for m in filterfalse(self._members.__contains__, other._members): + return False + return True + + def __ge__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.issuperset(other) + + def __gt__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return len(self) > len(other) and self.issuperset(other) + + cpdef IdentitySet union(self, iterable): + cdef IdentitySet result = self.__class__() + result._members.update(self._members) + result.update(iterable) + return result + + def __or__(self, other): + if not isinstance(other, IdentitySet) or not isinstance(self, IdentitySet): + return NotImplemented + return self.union(other) + + cpdef update(self, iterable): + for obj in iterable: + self._members[cy_id(obj)] = obj + + def __ior__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.update(other) + return self + + cpdef IdentitySet difference(self, iterable): + cdef IdentitySet result = self.__new__(self.__class__) + if isinstance(iterable, self.__class__): + other = (iterable)._members + else: + other = {cy_id(obj) for obj in iterable} + result._members = {k:v for k, v in self._members.items() if k not in other} + return result + + def __sub__(self, other): + if not isinstance(other, IdentitySet) or not isinstance(self, IdentitySet): + return NotImplemented + return self.difference(other) + + cpdef difference_update(self, iterable): + cdef IdentitySet other = self.difference(iterable) + self._members = other._members + + def __isub__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.difference_update(other) + return self + + cpdef IdentitySet intersection(self, iterable): + cdef IdentitySet result = self.__new__(self.__class__) + if isinstance(iterable, self.__class__): + other = (iterable)._members + else: + other = {cy_id(obj) for obj in iterable} + result._members = {k: v for k, v in self._members.items() if k in other} + return result + + def __and__(self, other): + if not isinstance(other, IdentitySet) or not isinstance(self, IdentitySet): + return NotImplemented + return self.intersection(other) + + cpdef intersection_update(self, iterable): + cdef IdentitySet other = self.intersection(iterable) + self._members = other._members + + def __iand__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.intersection_update(other) + return self + + cpdef IdentitySet symmetric_difference(self, iterable): + cdef IdentitySet result = self.__new__(self.__class__) + cdef dict other + if isinstance(iterable, self.__class__): + other = (iterable)._members + else: + other = {cy_id(obj): obj for obj in iterable} + result._members = {k: v for k, v in self._members.items() if k not in other} + result._members.update( + [(k, v) for k, v in other.items() if k not in self._members] + ) + return result + + def __xor__(self, other): + if not isinstance(other, IdentitySet) or not isinstance(self, IdentitySet): + return NotImplemented + return self.symmetric_difference(other) + + cpdef symmetric_difference_update(self, iterable): + cdef IdentitySet other = self.symmetric_difference(iterable) + self._members = other._members + + def __ixor__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.symmetric_difference(other) + return self + + cpdef IdentitySet copy(self): + cdef IdentitySet cp = self.__new__(self.__class__) + cp._members = self._members.copy() + return cp + + def __copy__(self): + return self.copy() + + def __len__(self): + return len(self._members) + + def __iter__(self): + return iter(self._members.values()) + + def __hash__(self): + raise TypeError("set objects are unhashable") + + def __repr__(self): + return "%s(%r)" % (type(self).__name__, list(self._members.values())) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pxd b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pxd new file mode 100644 index 0000000000000000000000000000000000000000..76f22893168891ecf32cb39be90c3b4765a79bd0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pxd @@ -0,0 +1,8 @@ +# cyextension/immutabledict.pxd +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +cdef class immutabledict(dict): + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pyx new file mode 100644 index 0000000000000000000000000000000000000000..b37eccc4c39c3cc5f114e13bb58d1b9b8c06697a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/immutabledict.pyx @@ -0,0 +1,133 @@ +# cyextension/immutabledict.pyx +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from cpython.dict cimport PyDict_New, PyDict_Update, PyDict_Size + + +def _readonly_fn(obj): + raise TypeError( + "%s object is immutable and/or readonly" % obj.__class__.__name__) + + +def _immutable_fn(obj): + raise TypeError( + "%s object is immutable" % obj.__class__.__name__) + + +class ReadOnlyContainer: + + __slots__ = () + + def _readonly(self, *a,**kw): + _readonly_fn(self) + + __delitem__ = __setitem__ = __setattr__ = _readonly + + +class ImmutableDictBase(dict): + def _immutable(self, *a,**kw): + _immutable_fn(self) + + @classmethod + def __class_getitem__(cls, key): + return cls + + __delitem__ = __setitem__ = __setattr__ = _immutable + clear = pop = popitem = setdefault = update = _immutable + + +cdef class immutabledict(dict): + def __repr__(self): + return f"immutabledict({dict.__repr__(self)})" + + @classmethod + def __class_getitem__(cls, key): + return cls + + def union(self, *args, **kw): + cdef dict to_merge = None + cdef immutabledict result + cdef Py_ssize_t args_len = len(args) + if args_len > 1: + raise TypeError( + f'union expected at most 1 argument, got {args_len}' + ) + if args_len == 1: + attribute = args[0] + if isinstance(attribute, dict): + to_merge = attribute + if to_merge is None: + to_merge = dict(*args, **kw) + + if PyDict_Size(to_merge) == 0: + return self + + # new + update is faster than immutabledict(self) + result = immutabledict() + PyDict_Update(result, self) + PyDict_Update(result, to_merge) + return result + + def merge_with(self, *other): + cdef immutabledict result = None + cdef object d + cdef bint update = False + if not other: + return self + for d in other: + if d: + if update == False: + update = True + # new + update is faster than immutabledict(self) + result = immutabledict() + PyDict_Update(result, self) + PyDict_Update( + result, (d if isinstance(d, dict) else dict(d)) + ) + + return self if update == False else result + + def copy(self): + return self + + def __reduce__(self): + return immutabledict, (dict(self), ) + + def __delitem__(self, k): + _immutable_fn(self) + + def __setitem__(self, k, v): + _immutable_fn(self) + + def __setattr__(self, k, v): + _immutable_fn(self) + + def clear(self, *args, **kw): + _immutable_fn(self) + + def pop(self, *args, **kw): + _immutable_fn(self) + + def popitem(self, *args, **kw): + _immutable_fn(self) + + def setdefault(self, *args, **kw): + _immutable_fn(self) + + def update(self, *args, **kw): + _immutable_fn(self) + + # PEP 584 + def __ior__(self, other): + _immutable_fn(self) + + def __or__(self, other): + return immutabledict(dict.__or__(self, other)) + + def __ror__(self, other): + # NOTE: this is used only in cython 3.x; + # version 0.x will call __or__ with args inversed + return immutabledict(dict.__ror__(self, other)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/processors.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/processors.pyx new file mode 100644 index 0000000000000000000000000000000000000000..3d714569fa0bbee1ce4c0f6e57c6d2cbcba5696b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/processors.pyx @@ -0,0 +1,68 @@ +# cyextension/processors.pyx +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +import datetime +from datetime import datetime as datetime_cls +from datetime import time as time_cls +from datetime import date as date_cls +import re + +from cpython.object cimport PyObject_Str +from cpython.unicode cimport PyUnicode_AsASCIIString, PyUnicode_Check, PyUnicode_Decode +from libc.stdio cimport sscanf + + +def int_to_boolean(value): + if value is None: + return None + return True if value else False + +def to_str(value): + return PyObject_Str(value) if value is not None else None + +def to_float(value): + return float(value) if value is not None else None + +cdef inline bytes to_bytes(object value, str type_name): + try: + return PyUnicode_AsASCIIString(value) + except Exception as e: + raise ValueError( + f"Couldn't parse {type_name} string '{value!r}' " + "- value is not a string." + ) from e + +def str_to_datetime(value): + if value is not None: + value = datetime_cls.fromisoformat(value) + return value + +def str_to_time(value): + if value is not None: + value = time_cls.fromisoformat(value) + return value + + +def str_to_date(value): + if value is not None: + value = date_cls.fromisoformat(value) + return value + + + +cdef class DecimalResultProcessor: + cdef object type_ + cdef str format_ + + def __cinit__(self, type_, format_): + self.type_ = type_ + self.format_ = format_ + + def process(self, object value): + if value is None: + return None + else: + return self.type_(self.format_ % value) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/resultproxy.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/resultproxy.pyx new file mode 100644 index 0000000000000000000000000000000000000000..b6e357a1f355f72d35a356feef7e982743625508 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/resultproxy.pyx @@ -0,0 +1,102 @@ +# cyextension/resultproxy.pyx +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +import operator + +cdef class BaseRow: + cdef readonly object _parent + cdef readonly dict _key_to_index + cdef readonly tuple _data + + def __init__(self, object parent, object processors, dict key_to_index, object data): + """Row objects are constructed by CursorResult objects.""" + + self._parent = parent + + self._key_to_index = key_to_index + + if processors: + self._data = _apply_processors(processors, data) + else: + self._data = tuple(data) + + def __reduce__(self): + return ( + rowproxy_reconstructor, + (self.__class__, self.__getstate__()), + ) + + def __getstate__(self): + return {"_parent": self._parent, "_data": self._data} + + def __setstate__(self, dict state): + parent = state["_parent"] + self._parent = parent + self._data = state["_data"] + self._key_to_index = parent._key_to_index + + def _values_impl(self): + return list(self) + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __hash__(self): + return hash(self._data) + + def __getitem__(self, index): + return self._data[index] + + def _get_by_key_impl_mapping(self, key): + return self._get_by_key_impl(key, 0) + + cdef _get_by_key_impl(self, object key, int attr_err): + index = self._key_to_index.get(key) + if index is not None: + return self._data[index] + self._parent._key_not_found(key, attr_err != 0) + + def __getattr__(self, name): + return self._get_by_key_impl(name, 1) + + def _to_tuple_instance(self): + return self._data + + +cdef tuple _apply_processors(proc, data): + res = [] + for i in range(len(proc)): + p = proc[i] + if p is None: + res.append(data[i]) + else: + res.append(p(data[i])) + return tuple(res) + + +def rowproxy_reconstructor(cls, state): + obj = cls.__new__(cls) + obj.__setstate__(state) + return obj + + +cdef int is_contiguous(tuple indexes): + cdef int i + for i in range(1, len(indexes)): + if indexes[i-1] != indexes[i] -1: + return 0 + return 1 + + +def tuplegetter(*indexes): + if len(indexes) == 1 or is_contiguous(indexes) != 0: + # slice form is faster but returns a list if input is list + return operator.itemgetter(slice(indexes[0], indexes[-1] + 1)) + else: + return operator.itemgetter(*indexes) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/util.pyx b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/util.pyx new file mode 100644 index 0000000000000000000000000000000000000000..cb17acd69c08eb0c4d18bcb22a2979443fb2c170 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/cyextension/util.pyx @@ -0,0 +1,91 @@ +# cyextension/util.pyx +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from collections.abc import Mapping + +from sqlalchemy import exc + +cdef tuple _Empty_Tuple = () + +cdef inline bint _mapping_or_tuple(object value): + return isinstance(value, dict) or isinstance(value, tuple) or isinstance(value, Mapping) + +cdef inline bint _check_item(object params) except 0: + cdef object item + cdef bint ret = 1 + if params: + item = params[0] + if not _mapping_or_tuple(item): + ret = 0 + raise exc.ArgumentError( + "List argument must consist only of tuples or dictionaries" + ) + return ret + +def _distill_params_20(object params): + if params is None: + return _Empty_Tuple + elif isinstance(params, list) or isinstance(params, tuple): + _check_item(params) + return params + elif isinstance(params, dict) or isinstance(params, Mapping): + return [params] + else: + raise exc.ArgumentError("mapping or list expected for parameters") + + +def _distill_raw_params(object params): + if params is None: + return _Empty_Tuple + elif isinstance(params, list): + _check_item(params) + return params + elif _mapping_or_tuple(params): + return [params] + else: + raise exc.ArgumentError("mapping or sequence expected for parameters") + +cdef class prefix_anon_map(dict): + def __missing__(self, str key): + cdef str derived + cdef int anonymous_counter + cdef dict self_dict = self + + derived = key.split(" ", 1)[1] + + anonymous_counter = self_dict.get(derived, 1) + self_dict[derived] = anonymous_counter + 1 + value = f"{derived}_{anonymous_counter}" + self_dict[key] = value + return value + + +cdef class cache_anon_map(dict): + cdef int _index + + def __init__(self): + self._index = 0 + + def get_anon(self, obj): + cdef long long idself + cdef str id_ + cdef dict self_dict = self + + idself = id(obj) + if idself in self_dict: + return self_dict[idself], True + else: + id_ = self.__missing__(idself) + return id_, False + + def __missing__(self, key): + cdef str val + cdef dict self_dict = self + + self_dict[key] = val = str(self._index) + self._index += 1 + return val + diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9174c54413a00922e07ddd41e9084b28672b9612 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/__init__.py @@ -0,0 +1,104 @@ +# dialects/mysql/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +from . import aiomysql # noqa +from . import asyncmy # noqa +from . import base # noqa +from . import cymysql # noqa +from . import mariadbconnector # noqa +from . import mysqlconnector # noqa +from . import mysqldb # noqa +from . import pymysql # noqa +from . import pyodbc # noqa +from .base import BIGINT +from .base import BINARY +from .base import BIT +from .base import BLOB +from .base import BOOLEAN +from .base import CHAR +from .base import DATE +from .base import DATETIME +from .base import DECIMAL +from .base import DOUBLE +from .base import ENUM +from .base import FLOAT +from .base import INTEGER +from .base import JSON +from .base import LONGBLOB +from .base import LONGTEXT +from .base import MEDIUMBLOB +from .base import MEDIUMINT +from .base import MEDIUMTEXT +from .base import NCHAR +from .base import NUMERIC +from .base import NVARCHAR +from .base import REAL +from .base import SET +from .base import SMALLINT +from .base import TEXT +from .base import TIME +from .base import TIMESTAMP +from .base import TINYBLOB +from .base import TINYINT +from .base import TINYTEXT +from .base import VARBINARY +from .base import VARCHAR +from .base import YEAR +from .dml import Insert +from .dml import insert +from .expression import match +from .mariadb import INET4 +from .mariadb import INET6 + +# default dialect +base.dialect = dialect = mysqldb.dialect + +__all__ = ( + "BIGINT", + "BINARY", + "BIT", + "BLOB", + "BOOLEAN", + "CHAR", + "DATE", + "DATETIME", + "DECIMAL", + "DOUBLE", + "ENUM", + "FLOAT", + "INET4", + "INET6", + "INTEGER", + "INTEGER", + "JSON", + "LONGBLOB", + "LONGTEXT", + "MEDIUMBLOB", + "MEDIUMINT", + "MEDIUMTEXT", + "NCHAR", + "NVARCHAR", + "NUMERIC", + "SET", + "SMALLINT", + "REAL", + "TEXT", + "TIME", + "TIMESTAMP", + "TINYBLOB", + "TINYINT", + "TINYTEXT", + "VARBINARY", + "VARCHAR", + "YEAR", + "dialect", + "insert", + "Insert", + "match", +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/aiomysql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/aiomysql.py new file mode 100644 index 0000000000000000000000000000000000000000..bd5e7de6b4fcb2d2d0ce9f8e0d77176f5c151cc8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/aiomysql.py @@ -0,0 +1,335 @@ +# dialects/mysql/aiomysql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +r""" +.. dialect:: mysql+aiomysql + :name: aiomysql + :dbapi: aiomysql + :connectstring: mysql+aiomysql://user:password@host:port/dbname[?key=value&key=value...] + :url: https://github.com/aio-libs/aiomysql + +The aiomysql dialect is SQLAlchemy's second Python asyncio dialect. + +Using a special asyncio mediation layer, the aiomysql dialect is usable +as the backend for the :ref:`SQLAlchemy asyncio ` +extension package. + +This dialect should normally be used only with the +:func:`_asyncio.create_async_engine` engine creation function:: + + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine( + "mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4" + ) + +""" # noqa +from collections import deque + +from .pymysql import MySQLDialect_pymysql +from ... import pool +from ... import util +from ...engine import AdaptedConnection +from ...util.concurrency import asyncio +from ...util.concurrency import await_fallback +from ...util.concurrency import await_only + + +class AsyncAdapt_aiomysql_cursor: + # TODO: base on connectors/asyncio.py + # see #10415 + server_side = False + __slots__ = ( + "_adapt_connection", + "_connection", + "await_", + "_cursor", + "_rows", + ) + + def __init__(self, adapt_connection): + self._adapt_connection = adapt_connection + self._connection = adapt_connection._connection + self.await_ = adapt_connection.await_ + + cursor = self._connection.cursor(adapt_connection.dbapi.Cursor) + + # see https://github.com/aio-libs/aiomysql/issues/543 + self._cursor = self.await_(cursor.__aenter__()) + self._rows = deque() + + @property + def description(self): + return self._cursor.description + + @property + def rowcount(self): + return self._cursor.rowcount + + @property + def arraysize(self): + return self._cursor.arraysize + + @arraysize.setter + def arraysize(self, value): + self._cursor.arraysize = value + + @property + def lastrowid(self): + return self._cursor.lastrowid + + def close(self): + # note we aren't actually closing the cursor here, + # we are just letting GC do it. to allow this to be async + # we would need the Result to change how it does "Safe close cursor". + # MySQL "cursors" don't actually have state to be "closed" besides + # exhausting rows, which we already have done for sync cursor. + # another option would be to emulate aiosqlite dialect and assign + # cursor only if we are doing server side cursor operation. + self._rows.clear() + + def execute(self, operation, parameters=None): + return self.await_(self._execute_async(operation, parameters)) + + def executemany(self, operation, seq_of_parameters): + return self.await_( + self._executemany_async(operation, seq_of_parameters) + ) + + async def _execute_async(self, operation, parameters): + async with self._adapt_connection._execute_mutex: + result = await self._cursor.execute(operation, parameters) + + if not self.server_side: + # aiomysql has a "fake" async result, so we have to pull it out + # of that here since our default result is not async. + # we could just as easily grab "_rows" here and be done with it + # but this is safer. + self._rows = deque(await self._cursor.fetchall()) + return result + + async def _executemany_async(self, operation, seq_of_parameters): + async with self._adapt_connection._execute_mutex: + return await self._cursor.executemany(operation, seq_of_parameters) + + def setinputsizes(self, *inputsizes): + pass + + def __iter__(self): + while self._rows: + yield self._rows.popleft() + + def fetchone(self): + if self._rows: + return self._rows.popleft() + else: + return None + + def fetchmany(self, size=None): + if size is None: + size = self.arraysize + + rr = self._rows + return [rr.popleft() for _ in range(min(size, len(rr)))] + + def fetchall(self): + retval = list(self._rows) + self._rows.clear() + return retval + + +class AsyncAdapt_aiomysql_ss_cursor(AsyncAdapt_aiomysql_cursor): + # TODO: base on connectors/asyncio.py + # see #10415 + __slots__ = () + server_side = True + + def __init__(self, adapt_connection): + self._adapt_connection = adapt_connection + self._connection = adapt_connection._connection + self.await_ = adapt_connection.await_ + + cursor = self._connection.cursor(adapt_connection.dbapi.SSCursor) + + self._cursor = self.await_(cursor.__aenter__()) + + def close(self): + if self._cursor is not None: + self.await_(self._cursor.close()) + self._cursor = None + + def fetchone(self): + return self.await_(self._cursor.fetchone()) + + def fetchmany(self, size=None): + return self.await_(self._cursor.fetchmany(size=size)) + + def fetchall(self): + return self.await_(self._cursor.fetchall()) + + +class AsyncAdapt_aiomysql_connection(AdaptedConnection): + # TODO: base on connectors/asyncio.py + # see #10415 + await_ = staticmethod(await_only) + __slots__ = ("dbapi", "_execute_mutex") + + def __init__(self, dbapi, connection): + self.dbapi = dbapi + self._connection = connection + self._execute_mutex = asyncio.Lock() + + def ping(self, reconnect): + return self.await_(self._connection.ping(reconnect)) + + def character_set_name(self): + return self._connection.character_set_name() + + def autocommit(self, value): + self.await_(self._connection.autocommit(value)) + + def cursor(self, server_side=False): + if server_side: + return AsyncAdapt_aiomysql_ss_cursor(self) + else: + return AsyncAdapt_aiomysql_cursor(self) + + def rollback(self): + self.await_(self._connection.rollback()) + + def commit(self): + self.await_(self._connection.commit()) + + def terminate(self): + # it's not awaitable. + self._connection.close() + + def close(self) -> None: + self.await_(self._connection.ensure_closed()) + + +class AsyncAdaptFallback_aiomysql_connection(AsyncAdapt_aiomysql_connection): + # TODO: base on connectors/asyncio.py + # see #10415 + __slots__ = () + + await_ = staticmethod(await_fallback) + + +class AsyncAdapt_aiomysql_dbapi: + def __init__(self, aiomysql, pymysql): + self.aiomysql = aiomysql + self.pymysql = pymysql + self.paramstyle = "format" + self._init_dbapi_attributes() + self.Cursor, self.SSCursor = self._init_cursors_subclasses() + + def _init_dbapi_attributes(self): + for name in ( + "Warning", + "Error", + "InterfaceError", + "DataError", + "DatabaseError", + "OperationalError", + "InterfaceError", + "IntegrityError", + "ProgrammingError", + "InternalError", + "NotSupportedError", + ): + setattr(self, name, getattr(self.aiomysql, name)) + + for name in ( + "NUMBER", + "STRING", + "DATETIME", + "BINARY", + "TIMESTAMP", + "Binary", + ): + setattr(self, name, getattr(self.pymysql, name)) + + def connect(self, *arg, **kw): + async_fallback = kw.pop("async_fallback", False) + creator_fn = kw.pop("async_creator_fn", self.aiomysql.connect) + + if util.asbool(async_fallback): + return AsyncAdaptFallback_aiomysql_connection( + self, + await_fallback(creator_fn(*arg, **kw)), + ) + else: + return AsyncAdapt_aiomysql_connection( + self, + await_only(creator_fn(*arg, **kw)), + ) + + def _init_cursors_subclasses(self): + # suppress unconditional warning emitted by aiomysql + class Cursor(self.aiomysql.Cursor): + async def _show_warnings(self, conn): + pass + + class SSCursor(self.aiomysql.SSCursor): + async def _show_warnings(self, conn): + pass + + return Cursor, SSCursor + + +class MySQLDialect_aiomysql(MySQLDialect_pymysql): + driver = "aiomysql" + supports_statement_cache = True + + supports_server_side_cursors = True + _sscursor = AsyncAdapt_aiomysql_ss_cursor + + is_async = True + has_terminate = True + + @classmethod + def import_dbapi(cls): + return AsyncAdapt_aiomysql_dbapi( + __import__("aiomysql"), __import__("pymysql") + ) + + @classmethod + def get_pool_class(cls, url): + async_fallback = url.query.get("async_fallback", False) + + if util.asbool(async_fallback): + return pool.FallbackAsyncAdaptedQueuePool + else: + return pool.AsyncAdaptedQueuePool + + def do_terminate(self, dbapi_connection) -> None: + dbapi_connection.terminate() + + def create_connect_args(self, url): + return super().create_connect_args( + url, _translate_args=dict(username="user", database="db") + ) + + def is_disconnect(self, e, connection, cursor): + if super().is_disconnect(e, connection, cursor): + return True + else: + str_e = str(e).lower() + return "not connected" in str_e + + def _found_rows_client_flag(self): + from pymysql.constants import CLIENT + + return CLIENT.FOUND_ROWS + + def get_driver_connection(self, connection): + return connection._connection + + +dialect = MySQLDialect_aiomysql diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/asyncmy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/asyncmy.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec54e694da067d91825c7f4ad35967060e1c1fc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/asyncmy.py @@ -0,0 +1,339 @@ +# dialects/mysql/asyncmy.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +r""" +.. dialect:: mysql+asyncmy + :name: asyncmy + :dbapi: asyncmy + :connectstring: mysql+asyncmy://user:password@host:port/dbname[?key=value&key=value...] + :url: https://github.com/long2ice/asyncmy + +Using a special asyncio mediation layer, the asyncmy dialect is usable +as the backend for the :ref:`SQLAlchemy asyncio ` +extension package. + +This dialect should normally be used only with the +:func:`_asyncio.create_async_engine` engine creation function:: + + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine( + "mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4" + ) + +""" # noqa +from collections import deque +from contextlib import asynccontextmanager + +from .pymysql import MySQLDialect_pymysql +from ... import pool +from ... import util +from ...engine import AdaptedConnection +from ...util.concurrency import asyncio +from ...util.concurrency import await_fallback +from ...util.concurrency import await_only + + +class AsyncAdapt_asyncmy_cursor: + # TODO: base on connectors/asyncio.py + # see #10415 + server_side = False + __slots__ = ( + "_adapt_connection", + "_connection", + "await_", + "_cursor", + "_rows", + ) + + def __init__(self, adapt_connection): + self._adapt_connection = adapt_connection + self._connection = adapt_connection._connection + self.await_ = adapt_connection.await_ + + cursor = self._connection.cursor() + + self._cursor = self.await_(cursor.__aenter__()) + self._rows = deque() + + @property + def description(self): + return self._cursor.description + + @property + def rowcount(self): + return self._cursor.rowcount + + @property + def arraysize(self): + return self._cursor.arraysize + + @arraysize.setter + def arraysize(self, value): + self._cursor.arraysize = value + + @property + def lastrowid(self): + return self._cursor.lastrowid + + def close(self): + # note we aren't actually closing the cursor here, + # we are just letting GC do it. to allow this to be async + # we would need the Result to change how it does "Safe close cursor". + # MySQL "cursors" don't actually have state to be "closed" besides + # exhausting rows, which we already have done for sync cursor. + # another option would be to emulate aiosqlite dialect and assign + # cursor only if we are doing server side cursor operation. + self._rows.clear() + + def execute(self, operation, parameters=None): + return self.await_(self._execute_async(operation, parameters)) + + def executemany(self, operation, seq_of_parameters): + return self.await_( + self._executemany_async(operation, seq_of_parameters) + ) + + async def _execute_async(self, operation, parameters): + async with self._adapt_connection._mutex_and_adapt_errors(): + if parameters is None: + result = await self._cursor.execute(operation) + else: + result = await self._cursor.execute(operation, parameters) + + if not self.server_side: + # asyncmy has a "fake" async result, so we have to pull it out + # of that here since our default result is not async. + # we could just as easily grab "_rows" here and be done with it + # but this is safer. + self._rows = deque(await self._cursor.fetchall()) + return result + + async def _executemany_async(self, operation, seq_of_parameters): + async with self._adapt_connection._mutex_and_adapt_errors(): + return await self._cursor.executemany(operation, seq_of_parameters) + + def setinputsizes(self, *inputsizes): + pass + + def __iter__(self): + while self._rows: + yield self._rows.popleft() + + def fetchone(self): + if self._rows: + return self._rows.popleft() + else: + return None + + def fetchmany(self, size=None): + if size is None: + size = self.arraysize + + rr = self._rows + return [rr.popleft() for _ in range(min(size, len(rr)))] + + def fetchall(self): + retval = list(self._rows) + self._rows.clear() + return retval + + +class AsyncAdapt_asyncmy_ss_cursor(AsyncAdapt_asyncmy_cursor): + # TODO: base on connectors/asyncio.py + # see #10415 + __slots__ = () + server_side = True + + def __init__(self, adapt_connection): + self._adapt_connection = adapt_connection + self._connection = adapt_connection._connection + self.await_ = adapt_connection.await_ + + cursor = self._connection.cursor( + adapt_connection.dbapi.asyncmy.cursors.SSCursor + ) + + self._cursor = self.await_(cursor.__aenter__()) + + def close(self): + if self._cursor is not None: + self.await_(self._cursor.close()) + self._cursor = None + + def fetchone(self): + return self.await_(self._cursor.fetchone()) + + def fetchmany(self, size=None): + return self.await_(self._cursor.fetchmany(size=size)) + + def fetchall(self): + return self.await_(self._cursor.fetchall()) + + +class AsyncAdapt_asyncmy_connection(AdaptedConnection): + # TODO: base on connectors/asyncio.py + # see #10415 + await_ = staticmethod(await_only) + __slots__ = ("dbapi", "_execute_mutex") + + def __init__(self, dbapi, connection): + self.dbapi = dbapi + self._connection = connection + self._execute_mutex = asyncio.Lock() + + @asynccontextmanager + async def _mutex_and_adapt_errors(self): + async with self._execute_mutex: + try: + yield + except AttributeError: + raise self.dbapi.InternalError( + "network operation failed due to asyncmy attribute error" + ) + + def ping(self, reconnect): + assert not reconnect + return self.await_(self._do_ping()) + + async def _do_ping(self): + async with self._mutex_and_adapt_errors(): + return await self._connection.ping(False) + + def character_set_name(self): + return self._connection.character_set_name() + + def autocommit(self, value): + self.await_(self._connection.autocommit(value)) + + def cursor(self, server_side=False): + if server_side: + return AsyncAdapt_asyncmy_ss_cursor(self) + else: + return AsyncAdapt_asyncmy_cursor(self) + + def rollback(self): + self.await_(self._connection.rollback()) + + def commit(self): + self.await_(self._connection.commit()) + + def terminate(self): + # it's not awaitable. + self._connection.close() + + def close(self) -> None: + self.await_(self._connection.ensure_closed()) + + +class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection): + __slots__ = () + + await_ = staticmethod(await_fallback) + + +def _Binary(x): + """Return x as a binary type.""" + return bytes(x) + + +class AsyncAdapt_asyncmy_dbapi: + def __init__(self, asyncmy): + self.asyncmy = asyncmy + self.paramstyle = "format" + self._init_dbapi_attributes() + + def _init_dbapi_attributes(self): + for name in ( + "Warning", + "Error", + "InterfaceError", + "DataError", + "DatabaseError", + "OperationalError", + "InterfaceError", + "IntegrityError", + "ProgrammingError", + "InternalError", + "NotSupportedError", + ): + setattr(self, name, getattr(self.asyncmy.errors, name)) + + STRING = util.symbol("STRING") + NUMBER = util.symbol("NUMBER") + BINARY = util.symbol("BINARY") + DATETIME = util.symbol("DATETIME") + TIMESTAMP = util.symbol("TIMESTAMP") + Binary = staticmethod(_Binary) + + def connect(self, *arg, **kw): + async_fallback = kw.pop("async_fallback", False) + creator_fn = kw.pop("async_creator_fn", self.asyncmy.connect) + + if util.asbool(async_fallback): + return AsyncAdaptFallback_asyncmy_connection( + self, + await_fallback(creator_fn(*arg, **kw)), + ) + else: + return AsyncAdapt_asyncmy_connection( + self, + await_only(creator_fn(*arg, **kw)), + ) + + +class MySQLDialect_asyncmy(MySQLDialect_pymysql): + driver = "asyncmy" + supports_statement_cache = True + + supports_server_side_cursors = True + _sscursor = AsyncAdapt_asyncmy_ss_cursor + + is_async = True + has_terminate = True + + @classmethod + def import_dbapi(cls): + return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy")) + + @classmethod + def get_pool_class(cls, url): + async_fallback = url.query.get("async_fallback", False) + + if util.asbool(async_fallback): + return pool.FallbackAsyncAdaptedQueuePool + else: + return pool.AsyncAdaptedQueuePool + + def do_terminate(self, dbapi_connection) -> None: + dbapi_connection.terminate() + + def create_connect_args(self, url): + return super().create_connect_args( + url, _translate_args=dict(username="user", database="db") + ) + + def is_disconnect(self, e, connection, cursor): + if super().is_disconnect(e, connection, cursor): + return True + else: + str_e = str(e).lower() + return ( + "not connected" in str_e or "network operation failed" in str_e + ) + + def _found_rows_client_flag(self): + from asyncmy.constants import CLIENT + + return CLIENT.FOUND_ROWS + + def get_driver_connection(self, connection): + return connection._connection + + +dialect = MySQLDialect_asyncmy diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a78c4e0f747f9d440847a44f4c5a17e4fa87d56d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/base.py @@ -0,0 +1,3581 @@ +# dialects/mysql/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r""" + +.. dialect:: mysql + :name: MySQL / MariaDB + :normal_support: 5.6+ / 10+ + :best_effort: 5.0.2+ / 5.0.2+ + +Supported Versions and Features +------------------------------- + +SQLAlchemy supports MySQL starting with version 5.0.2 through modern releases, +as well as all modern versions of MariaDB. See the official MySQL +documentation for detailed information about features supported in any given +server release. + +.. versionchanged:: 1.4 minimum MySQL version supported is now 5.0.2. + +MariaDB Support +~~~~~~~~~~~~~~~ + +The MariaDB variant of MySQL retains fundamental compatibility with MySQL's +protocols however the development of these two products continues to diverge. +Within the realm of SQLAlchemy, the two databases have a small number of +syntactical and behavioral differences that SQLAlchemy accommodates automatically. +To connect to a MariaDB database, no changes to the database URL are required:: + + + engine = create_engine( + "mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4" + ) + +Upon first connect, the SQLAlchemy dialect employs a +server version detection scheme that determines if the +backing database reports as MariaDB. Based on this flag, the dialect +can make different choices in those of areas where its behavior +must be different. + +.. _mysql_mariadb_only_mode: + +MariaDB-Only Mode +~~~~~~~~~~~~~~~~~ + +The dialect also supports an **optional** "MariaDB-only" mode of connection, which may be +useful for the case where an application makes use of MariaDB-specific features +and is not compatible with a MySQL database. To use this mode of operation, +replace the "mysql" token in the above URL with "mariadb":: + + engine = create_engine( + "mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4" + ) + +The above engine, upon first connect, will raise an error if the server version +detection detects that the backing database is not MariaDB. + +When using an engine with ``"mariadb"`` as the dialect name, **all mysql-specific options +that include the name "mysql" in them are now named with "mariadb"**. This means +options like ``mysql_engine`` should be named ``mariadb_engine``, etc. Both +"mysql" and "mariadb" options can be used simultaneously for applications that +use URLs with both "mysql" and "mariadb" dialects:: + + my_table = Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("textdata", String(50)), + mariadb_engine="InnoDB", + mysql_engine="InnoDB", + ) + + Index( + "textdata_ix", + my_table.c.textdata, + mysql_prefix="FULLTEXT", + mariadb_prefix="FULLTEXT", + ) + +Similar behavior will occur when the above structures are reflected, i.e. the +"mariadb" prefix will be present in the option names when the database URL +is based on the "mariadb" name. + +.. versionadded:: 1.4 Added "mariadb" dialect name supporting "MariaDB-only mode" + for the MySQL dialect. + +.. _mysql_connection_timeouts: + +Connection Timeouts and Disconnects +----------------------------------- + +MySQL / MariaDB feature an automatic connection close behavior, for connections that +have been idle for a fixed period of time, defaulting to eight hours. +To circumvent having this issue, use +the :paramref:`_sa.create_engine.pool_recycle` option which ensures that +a connection will be discarded and replaced with a new one if it has been +present in the pool for a fixed number of seconds:: + + engine = create_engine("mysql+mysqldb://...", pool_recycle=3600) + +For more comprehensive disconnect detection of pooled connections, including +accommodation of server restarts and network issues, a pre-ping approach may +be employed. See :ref:`pool_disconnects` for current approaches. + +.. seealso:: + + :ref:`pool_disconnects` - Background on several techniques for dealing + with timed out connections as well as database restarts. + +.. _mysql_storage_engines: + +CREATE TABLE arguments including Storage Engines +------------------------------------------------ + +Both MySQL's and MariaDB's CREATE TABLE syntax includes a wide array of special options, +including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, +``INSERT_METHOD``, and many more. +To accommodate the rendering of these arguments, specify the form +``mysql_argument_name="value"``. For example, to specify a table with +``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE`` +of ``1024``:: + + Table( + "mytable", + metadata, + Column("data", String(32)), + mysql_engine="InnoDB", + mysql_charset="utf8mb4", + mysql_key_block_size="1024", + ) + +When supporting :ref:`mysql_mariadb_only_mode` mode, similar keys against +the "mariadb" prefix must be included as well. The values can of course +vary independently so that different settings on MySQL vs. MariaDB may +be maintained:: + + # support both "mysql" and "mariadb-only" engine URLs + + Table( + "mytable", + metadata, + Column("data", String(32)), + mysql_engine="InnoDB", + mariadb_engine="InnoDB", + mysql_charset="utf8mb4", + mariadb_charset="utf8", + mysql_key_block_size="1024", + mariadb_key_block_size="1024", + ) + +The MySQL / MariaDB dialects will normally transfer any keyword specified as +``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the +``CREATE TABLE`` statement. A handful of these names will render with a space +instead of an underscore; to support this, the MySQL dialect has awareness of +these particular names, which include ``DATA DIRECTORY`` +(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g. +``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. +``mysql_index_directory``). + +The most common argument is ``mysql_engine``, which refers to the storage +engine for the table. Historically, MySQL server installations would default +to ``MyISAM`` for this value, although newer versions may be defaulting +to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support +of transactions and foreign keys. + +A :class:`_schema.Table` +that is created in a MySQL / MariaDB database with a storage engine +of ``MyISAM`` will be essentially non-transactional, meaning any +INSERT/UPDATE/DELETE statement referring to this table will be invoked as +autocommit. It also will have no support for foreign key constraints; while +the ``CREATE TABLE`` statement accepts foreign key options, when using the +``MyISAM`` storage engine these arguments are discarded. Reflecting such a +table will also produce no foreign key constraint information. + +For fully atomic transactions as well as support for foreign key +constraints, all participating ``CREATE TABLE`` statements must specify a +transactional engine, which in the vast majority of cases is ``InnoDB``. + +Partitioning can similarly be specified using similar options. +In the example below the create table will specify ``PARTITION_BY``, +``PARTITIONS``, ``SUBPARTITIONS`` and ``SUBPARTITION_BY``:: + + # can also use mariadb_* prefix + Table( + "testtable", + MetaData(), + Column("id", Integer(), primary_key=True, autoincrement=True), + Column("other_id", Integer(), primary_key=True, autoincrement=False), + mysql_partitions="2", + mysql_partition_by="KEY(other_id)", + mysql_subpartition_by="HASH(some_expr)", + mysql_subpartitions="2", + ) + +This will render: + +.. sourcecode:: sql + + CREATE TABLE testtable ( + id INTEGER NOT NULL AUTO_INCREMENT, + other_id INTEGER NOT NULL, + PRIMARY KEY (id, other_id) + )PARTITION BY KEY(other_id) PARTITIONS 2 SUBPARTITION BY HASH(some_expr) SUBPARTITIONS 2 + +Case Sensitivity and Table Reflection +------------------------------------- + +Both MySQL and MariaDB have inconsistent support for case-sensitive identifier +names, basing support on specific details of the underlying +operating system. However, it has been observed that no matter +what case sensitivity behavior is present, the names of tables in +foreign key declarations are *always* received from the database +as all-lower case, making it impossible to accurately reflect a +schema where inter-related tables use mixed-case identifier names. + +Therefore it is strongly advised that table names be declared as +all lower case both within SQLAlchemy as well as on the MySQL / MariaDB +database itself, especially if database reflection features are +to be used. + +.. _mysql_isolation_level: + +Transaction Isolation Level +--------------------------- + +All MySQL / MariaDB dialects support setting of transaction isolation level both via a +dialect-specific parameter :paramref:`_sa.create_engine.isolation_level` +accepted +by :func:`_sa.create_engine`, as well as the +:paramref:`.Connection.execution_options.isolation_level` argument as passed to +:meth:`_engine.Connection.execution_options`. +This feature works by issuing the +command ``SET SESSION TRANSACTION ISOLATION LEVEL `` for each new +connection. For the special AUTOCOMMIT isolation level, DBAPI-specific +techniques are used. + +To set isolation level using :func:`_sa.create_engine`:: + + engine = create_engine( + "mysql+mysqldb://scott:tiger@localhost/test", + isolation_level="READ UNCOMMITTED", + ) + +To set using per-connection execution options:: + + connection = engine.connect() + connection = connection.execution_options(isolation_level="READ COMMITTED") + +Valid values for ``isolation_level`` include: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``AUTOCOMMIT`` + +The special ``AUTOCOMMIT`` value makes use of the various "autocommit" +attributes provided by specific DBAPIs, and is currently supported by +MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it, +the database connection will return true for the value of +``SELECT @@autocommit;``. + +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. + +.. seealso:: + + :ref:`dbapi_autocommit` + +AUTO_INCREMENT Behavior +----------------------- + +When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on +the first :class:`.Integer` primary key column which is not marked as a +foreign key:: + + >>> t = Table( + ... "mytable", metadata, Column("mytable_id", Integer, primary_key=True) + ... ) + >>> t.create() + CREATE TABLE mytable ( + id INTEGER NOT NULL AUTO_INCREMENT, + PRIMARY KEY (id) + ) + +You can disable this behavior by passing ``False`` to the +:paramref:`_schema.Column.autoincrement` argument of :class:`_schema.Column`. +This flag +can also be used to enable auto-increment on a secondary column in a +multi-column key for some storage engines:: + + Table( + "mytable", + metadata, + Column("gid", Integer, primary_key=True, autoincrement=False), + Column("id", Integer, primary_key=True), + ) + +.. _mysql_ss_cursors: + +Server Side Cursors +------------------- + +Server-side cursor support is available for the mysqlclient, PyMySQL, +mariadbconnector dialects and may also be available in others. This makes use +of either the "buffered=True/False" flag if available or by using a class such +as ``MySQLdb.cursors.SSCursor`` or ``pymysql.cursors.SSCursor`` internally. + + +Server side cursors are enabled on a per-statement basis by using the +:paramref:`.Connection.execution_options.stream_results` connection execution +option:: + + with engine.connect() as conn: + result = conn.execution_options(stream_results=True).execute( + text("select * from table") + ) + +Note that some kinds of SQL statements may not be supported with +server side cursors; generally, only SQL statements that return rows should be +used with this option. + +.. deprecated:: 1.4 The dialect-level server_side_cursors flag is deprecated + and will be removed in a future release. Please use the + :paramref:`_engine.Connection.stream_results` execution option for + unbuffered cursor support. + +.. seealso:: + + :ref:`engine_stream_results` + +.. _mysql_unicode: + +Unicode +------- + +Charset Selection +~~~~~~~~~~~~~~~~~ + +Most MySQL / MariaDB DBAPIs offer the option to set the client character set for +a connection. This is typically delivered using the ``charset`` parameter +in the URL, such as:: + + e = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4" + ) + +This charset is the **client character set** for the connection. Some +MySQL DBAPIs will default this to a value such as ``latin1``, and some +will make use of the ``default-character-set`` setting in the ``my.cnf`` +file as well. Documentation for the DBAPI in use should be consulted +for specific behavior. + +The encoding used for Unicode has traditionally been ``'utf8'``. However, for +MySQL versions 5.5.3 and MariaDB 5.5 on forward, a new MySQL-specific encoding +``'utf8mb4'`` has been introduced, and as of MySQL 8.0 a warning is emitted by +the server if plain ``utf8`` is specified within any server-side directives, +replaced with ``utf8mb3``. The rationale for this new encoding is due to the +fact that MySQL's legacy utf-8 encoding only supports codepoints up to three +bytes instead of four. Therefore, when communicating with a MySQL or MariaDB +database that includes codepoints more than three bytes in size, this new +charset is preferred, if supported by both the database as well as the client +DBAPI, as in:: + + e = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4" + ) + +All modern DBAPIs should support the ``utf8mb4`` charset. + +In order to use ``utf8mb4`` encoding for a schema that was created with legacy +``utf8``, changes to the MySQL/MariaDB schema and/or server configuration may be +required. + +.. seealso:: + + `The utf8mb4 Character Set \ + `_ - \ + in the MySQL documentation + +.. _mysql_binary_introducer: + +Dealing with Binary Data Warnings and Unicode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now +emit a warning when attempting to pass binary data to the database, while a +character set encoding is also in place, when the binary data itself is not +valid for that encoding: + +.. sourcecode:: text + + default.py:509: Warning: (1300, "Invalid utf8mb4 character string: + 'F9876A'") + cursor.execute(statement, parameters) + +This warning is due to the fact that the MySQL client library is attempting to +interpret the binary string as a unicode object even if a datatype such +as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires +a binary "character set introducer" be present before any non-NULL value +that renders like this: + +.. sourcecode:: sql + + INSERT INTO table (data) VALUES (_binary %s) + +These character set introducers are provided by the DBAPI driver, assuming the +use of mysqlclient or PyMySQL (both of which are recommended). Add the query +string parameter ``binary_prefix=true`` to the URL to repair this warning:: + + # mysqlclient + engine = create_engine( + "mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true" + ) + + # PyMySQL + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true" + ) + +The ``binary_prefix`` flag may or may not be supported by other MySQL drivers. + +SQLAlchemy itself cannot render this ``_binary`` prefix reliably, as it does +not work with the NULL value, which is valid to be sent as a bound parameter. +As the MySQL driver renders parameters directly into the SQL string, it's the +most efficient place for this additional keyword to be passed. + +.. seealso:: + + `Character set introducers `_ - on the MySQL website + + +ANSI Quoting Style +------------------ + +MySQL / MariaDB feature two varieties of identifier "quoting style", one using +backticks and the other using quotes, e.g. ```some_identifier``` vs. +``"some_identifier"``. All MySQL dialects detect which version +is in use by checking the value of :ref:`sql_mode` when a connection is first +established with a particular :class:`_engine.Engine`. +This quoting style comes +into play when rendering table and column names as well as when reflecting +existing database structures. The detection is entirely automatic and +no special configuration is needed to use either quoting style. + + +.. _mysql_sql_mode: + +Changing the sql_mode +--------------------- + +MySQL supports operating in multiple +`Server SQL Modes `_ for +both Servers and Clients. To change the ``sql_mode`` for a given application, a +developer can leverage SQLAlchemy's Events system. + +In the following example, the event system is used to set the ``sql_mode`` on +the ``first_connect`` and ``connect`` events:: + + from sqlalchemy import create_engine, event + + eng = create_engine( + "mysql+mysqldb://scott:tiger@localhost/test", echo="debug" + ) + + + # `insert=True` will ensure this is the very first listener to run + @event.listens_for(eng, "connect", insert=True) + def connect(dbapi_connection, connection_record): + cursor = dbapi_connection.cursor() + cursor.execute("SET sql_mode = 'STRICT_ALL_TABLES'") + + + conn = eng.connect() + +In the example illustrated above, the "connect" event will invoke the "SET" +statement on the connection at the moment a particular DBAPI connection is +first created for a given Pool, before the connection is made available to the +connection pool. Additionally, because the function was registered with +``insert=True``, it will be prepended to the internal list of registered +functions. + + +MySQL / MariaDB SQL Extensions +------------------------------ + +Many of the MySQL / MariaDB SQL extensions are handled through SQLAlchemy's generic +function and operator support:: + + table.select(table.c.password == func.md5("plaintext")) + table.select(table.c.username.op("regexp")("^[a-d]")) + +And of course any valid SQL statement can be executed as a string as well. + +Some limited direct support for MySQL / MariaDB extensions to SQL is currently +available. + +* INSERT..ON DUPLICATE KEY UPDATE: See + :ref:`mysql_insert_on_duplicate_key_update` + +* SELECT pragma, use :meth:`_expression.Select.prefix_with` and + :meth:`_query.Query.prefix_with`:: + + select(...).prefix_with(["HIGH_PRIORITY", "SQL_SMALL_RESULT"]) + +* UPDATE with LIMIT:: + + update(...).with_dialect_options(mysql_limit=10, mariadb_limit=10) + +* DELETE + with LIMIT:: + + delete(...).with_dialect_options(mysql_limit=10, mariadb_limit=10) + + .. versionadded:: 2.0.37 Added delete with limit + +* optimizer hints, use :meth:`_expression.Select.prefix_with` and + :meth:`_query.Query.prefix_with`:: + + select(...).prefix_with("/*+ NO_RANGE_OPTIMIZATION(t4 PRIMARY) */") + +* index hints, use :meth:`_expression.Select.with_hint` and + :meth:`_query.Query.with_hint`:: + + select(...).with_hint(some_table, "USE INDEX xyz") + +* MATCH + operator support:: + + from sqlalchemy.dialects.mysql import match + + select(...).where(match(col1, col2, against="some expr").in_boolean_mode()) + + .. seealso:: + + :class:`_mysql.match` + +INSERT/DELETE...RETURNING +------------------------- + +The MariaDB dialect supports 10.5+'s ``INSERT..RETURNING`` and +``DELETE..RETURNING`` (10.0+) syntaxes. ``INSERT..RETURNING`` may be used +automatically in some cases in order to fetch newly generated identifiers in +place of the traditional approach of using ``cursor.lastrowid``, however +``cursor.lastrowid`` is currently still preferred for simple single-statement +cases for its better performance. + +To specify an explicit ``RETURNING`` clause, use the +:meth:`._UpdateBase.returning` method on a per-statement basis:: + + # INSERT..RETURNING + result = connection.execute( + table.insert().values(name="foo").returning(table.c.col1, table.c.col2) + ) + print(result.all()) + + # DELETE..RETURNING + result = connection.execute( + table.delete() + .where(table.c.name == "foo") + .returning(table.c.col1, table.c.col2) + ) + print(result.all()) + +.. versionadded:: 2.0 Added support for MariaDB RETURNING + +.. _mysql_insert_on_duplicate_key_update: + +INSERT...ON DUPLICATE KEY UPDATE (Upsert) +------------------------------------------ + +MySQL / MariaDB allow "upserts" (update or insert) +of rows into a table via the ``ON DUPLICATE KEY UPDATE`` clause of the +``INSERT`` statement. A candidate row will only be inserted if that row does +not match an existing primary or unique key in the table; otherwise, an UPDATE +will be performed. The statement allows for separate specification of the +values to INSERT versus the values for UPDATE. + +SQLAlchemy provides ``ON DUPLICATE KEY UPDATE`` support via the MySQL-specific +:func:`.mysql.insert()` function, which provides +the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.dialects.mysql import insert + + >>> insert_stmt = insert(my_table).values( + ... id="some_existing_id", data="inserted value" + ... ) + + >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( + ... data=insert_stmt.inserted.data, status="U" + ... ) + >>> print(on_duplicate_key_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (%s, %s) + ON DUPLICATE KEY UPDATE data = VALUES(data), status = %s + + +Unlike PostgreSQL's "ON CONFLICT" phrase, the "ON DUPLICATE KEY UPDATE" +phrase will always match on any primary key or unique key, and will always +perform an UPDATE if there's a match; there are no options for it to raise +an error or to skip performing an UPDATE. + +``ON DUPLICATE KEY UPDATE`` is used to perform an update of the already +existing row, using any combination of new values as well as values +from the proposed insertion. These values are normally specified using +keyword arguments passed to the +:meth:`_mysql.Insert.on_duplicate_key_update` +given column key values (usually the name of the column, unless it +specifies :paramref:`_schema.Column.key` +) as keys and literal or SQL expressions +as values: + +.. sourcecode:: pycon+sql + + >>> insert_stmt = insert(my_table).values( + ... id="some_existing_id", data="inserted value" + ... ) + + >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( + ... data="some data", + ... updated_at=func.current_timestamp(), + ... ) + + >>> print(on_duplicate_key_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (%s, %s) + ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP + +In a manner similar to that of :meth:`.UpdateBase.values`, other parameter +forms are accepted, including a single dictionary: + +.. sourcecode:: pycon+sql + + >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( + ... {"data": "some data", "updated_at": func.current_timestamp()}, + ... ) + +as well as a list of 2-tuples, which will automatically provide +a parameter-ordered UPDATE statement in a manner similar to that described +at :ref:`tutorial_parameter_ordered_updates`. Unlike the :class:`_expression.Update` +object, +no special flag is needed to specify the intent since the argument form is +this context is unambiguous: + +.. sourcecode:: pycon+sql + + >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( + ... [ + ... ("data", "some data"), + ... ("updated_at", func.current_timestamp()), + ... ] + ... ) + + >>> print(on_duplicate_key_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (%s, %s) + ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP + +.. versionchanged:: 1.3 support for parameter-ordered UPDATE clause within + MySQL ON DUPLICATE KEY UPDATE + +.. warning:: + + The :meth:`_mysql.Insert.on_duplicate_key_update` + method does **not** take into + account Python-side default UPDATE values or generation functions, e.g. + e.g. those specified using :paramref:`_schema.Column.onupdate`. + These values will not be exercised for an ON DUPLICATE KEY style of UPDATE, + unless they are manually specified explicitly in the parameters. + + + +In order to refer to the proposed insertion row, the special alias +:attr:`_mysql.Insert.inserted` is available as an attribute on +the :class:`_mysql.Insert` object; this object is a +:class:`_expression.ColumnCollection` which contains all columns of the target +table: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values( + ... id="some_id", data="inserted value", author="jlh" + ... ) + + >>> do_update_stmt = stmt.on_duplicate_key_update( + ... data="updated value", author=stmt.inserted.author + ... ) + + >>> print(do_update_stmt) + {printsql}INSERT INTO my_table (id, data, author) VALUES (%s, %s, %s) + ON DUPLICATE KEY UPDATE data = %s, author = VALUES(author) + +When rendered, the "inserted" namespace will produce the expression +``VALUES()``. + +.. versionadded:: 1.2 Added support for MySQL ON DUPLICATE KEY UPDATE clause + + + +rowcount Support +---------------- + +SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the +usual definition of "number of rows matched by an UPDATE or DELETE" statement. +This is in contradiction to the default setting on most MySQL DBAPI drivers, +which is "number of rows actually modified/deleted". For this reason, the +SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS`` +flag, or whatever is equivalent for the target dialect, upon connection. +This setting is currently hardcoded. + +.. seealso:: + + :attr:`_engine.CursorResult.rowcount` + + +.. _mysql_indexes: + +MySQL / MariaDB- Specific Index Options +----------------------------------------- + +MySQL and MariaDB-specific extensions to the :class:`.Index` construct are available. + +Index Length +~~~~~~~~~~~~~ + +MySQL and MariaDB both provide an option to create index entries with a certain length, where +"length" refers to the number of characters or bytes in each value which will +become part of the index. SQLAlchemy provides this feature via the +``mysql_length`` and/or ``mariadb_length`` parameters:: + + Index("my_index", my_table.c.data, mysql_length=10, mariadb_length=10) + + Index("a_b_idx", my_table.c.a, my_table.c.b, mysql_length={"a": 4, "b": 9}) + + Index( + "a_b_idx", my_table.c.a, my_table.c.b, mariadb_length={"a": 4, "b": 9} + ) + +Prefix lengths are given in characters for nonbinary string types and in bytes +for binary string types. The value passed to the keyword argument *must* be +either an integer (and, thus, specify the same prefix length value for all +columns of the index) or a dict in which keys are column names and values are +prefix length values for corresponding columns. MySQL and MariaDB only allow a +length for a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, +VARBINARY and BLOB. + +Index Prefixes +~~~~~~~~~~~~~~ + +MySQL storage engines permit you to specify an index prefix when creating +an index. SQLAlchemy provides this feature via the +``mysql_prefix`` parameter on :class:`.Index`:: + + Index("my_index", my_table.c.data, mysql_prefix="FULLTEXT") + +The value passed to the keyword argument will be simply passed through to the +underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL +storage engine. + +.. seealso:: + + `CREATE INDEX `_ - MySQL documentation + +Index Types +~~~~~~~~~~~~~ + +Some MySQL storage engines permit you to specify an index type when creating +an index or primary key constraint. SQLAlchemy provides this feature via the +``mysql_using`` parameter on :class:`.Index`:: + + Index( + "my_index", my_table.c.data, mysql_using="hash", mariadb_using="hash" + ) + +As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`:: + + PrimaryKeyConstraint("data", mysql_using="hash", mariadb_using="hash") + +The value passed to the keyword argument will be simply passed through to the +underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index +type for your MySQL storage engine. + +More information can be found at: + +https://dev.mysql.com/doc/refman/5.0/en/create-index.html + +https://dev.mysql.com/doc/refman/5.0/en/create-table.html + +Index Parsers +~~~~~~~~~~~~~ + +CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This +is available using the keyword argument ``mysql_with_parser``:: + + Index( + "my_index", + my_table.c.data, + mysql_prefix="FULLTEXT", + mysql_with_parser="ngram", + mariadb_prefix="FULLTEXT", + mariadb_with_parser="ngram", + ) + +.. versionadded:: 1.3 + + +.. _mysql_foreign_keys: + +MySQL / MariaDB Foreign Keys +----------------------------- + +MySQL and MariaDB's behavior regarding foreign keys has some important caveats. + +Foreign Key Arguments to Avoid +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Neither MySQL nor MariaDB support the foreign key arguments "DEFERRABLE", "INITIALLY", +or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with +:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey` +will have the effect of +these keywords being rendered in a DDL expression, which will then raise an +error on MySQL or MariaDB. In order to use these keywords on a foreign key while having +them ignored on a MySQL / MariaDB backend, use a custom compile rule:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.schema import ForeignKeyConstraint + + + @compiles(ForeignKeyConstraint, "mysql", "mariadb") + def process(element, compiler, **kw): + element.deferrable = element.initially = None + return compiler.visit_foreign_key_constraint(element, **kw) + +The "MATCH" keyword is in fact more insidious, and is explicitly disallowed +by SQLAlchemy in conjunction with the MySQL or MariaDB backends. This argument is +silently ignored by MySQL / MariaDB, but in addition has the effect of ON UPDATE and ON +DELETE options also being ignored by the backend. Therefore MATCH should +never be used with the MySQL / MariaDB backends; as is the case with DEFERRABLE and +INITIALLY, custom compilation rules can be used to correct a +ForeignKeyConstraint at DDL definition time. + +Reflection of Foreign Key Constraints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Not all MySQL / MariaDB storage engines support foreign keys. When using the +very common ``MyISAM`` MySQL storage engine, the information loaded by table +reflection will not include foreign keys. For these tables, you may supply a +:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time:: + + Table( + "mytable", + metadata, + ForeignKeyConstraint(["other_id"], ["othertable.other_id"]), + autoload_with=engine, + ) + +.. seealso:: + + :ref:`mysql_storage_engines` + +.. _mysql_unique_constraints: + +MySQL / MariaDB Unique Constraints and Reflection +---------------------------------------------------- + +SQLAlchemy supports both the :class:`.Index` construct with the +flag ``unique=True``, indicating a UNIQUE index, as well as the +:class:`.UniqueConstraint` construct, representing a UNIQUE constraint. +Both objects/syntaxes are supported by MySQL / MariaDB when emitting DDL to create +these constraints. However, MySQL / MariaDB does not have a unique constraint +construct that is separate from a unique index; that is, the "UNIQUE" +constraint on MySQL / MariaDB is equivalent to creating a "UNIQUE INDEX". + +When reflecting these constructs, the +:meth:`_reflection.Inspector.get_indexes` +and the :meth:`_reflection.Inspector.get_unique_constraints` +methods will **both** +return an entry for a UNIQUE index in MySQL / MariaDB. However, when performing +full table reflection using ``Table(..., autoload_with=engine)``, +the :class:`.UniqueConstraint` construct is +**not** part of the fully reflected :class:`_schema.Table` construct under any +circumstances; this construct is always represented by a :class:`.Index` +with the ``unique=True`` setting present in the :attr:`_schema.Table.indexes` +collection. + + +TIMESTAMP / DATETIME issues +--------------------------- + +.. _mysql_timestamp_onupdate: + +Rendering ON UPDATE CURRENT TIMESTAMP for MySQL / MariaDB's explicit_defaults_for_timestamp +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MySQL / MariaDB have historically expanded the DDL for the :class:`_types.TIMESTAMP` +datatype into the phrase "TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE +CURRENT_TIMESTAMP", which includes non-standard SQL that automatically updates +the column with the current timestamp when an UPDATE occurs, eliminating the +usual need to use a trigger in such a case where server-side update changes are +desired. + +MySQL 5.6 introduced a new flag `explicit_defaults_for_timestamp +`_ which disables the above behavior, +and in MySQL 8 this flag defaults to true, meaning in order to get a MySQL +"on update timestamp" without changing this flag, the above DDL must be +rendered explicitly. Additionally, the same DDL is valid for use of the +``DATETIME`` datatype as well. + +SQLAlchemy's MySQL dialect does not yet have an option to generate +MySQL's "ON UPDATE CURRENT_TIMESTAMP" clause, noting that this is not a general +purpose "ON UPDATE" as there is no such syntax in standard SQL. SQLAlchemy's +:paramref:`_schema.Column.server_onupdate` parameter is currently not related +to this special MySQL behavior. + +To generate this DDL, make use of the :paramref:`_schema.Column.server_default` +parameter and pass a textual clause that also includes the ON UPDATE clause:: + + from sqlalchemy import Table, MetaData, Column, Integer, String, TIMESTAMP + from sqlalchemy import text + + metadata = MetaData() + + mytable = Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column( + "last_updated", + TIMESTAMP, + server_default=text( + "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" + ), + ), + ) + +The same instructions apply to use of the :class:`_types.DateTime` and +:class:`_types.DATETIME` datatypes:: + + from sqlalchemy import DateTime + + mytable = Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column( + "last_updated", + DateTime, + server_default=text( + "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" + ), + ), + ) + +Even though the :paramref:`_schema.Column.server_onupdate` feature does not +generate this DDL, it still may be desirable to signal to the ORM that this +updated value should be fetched. This syntax looks like the following:: + + from sqlalchemy.schema import FetchedValue + + + class MyClass(Base): + __tablename__ = "mytable" + + id = Column(Integer, primary_key=True) + data = Column(String(50)) + last_updated = Column( + TIMESTAMP, + server_default=text( + "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" + ), + server_onupdate=FetchedValue(), + ) + +.. _mysql_timestamp_null: + +TIMESTAMP Columns and NULL +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MySQL historically enforces that a column which specifies the +TIMESTAMP datatype implicitly includes a default value of +CURRENT_TIMESTAMP, even though this is not stated, and additionally +sets the column as NOT NULL, the opposite behavior vs. that of all +other datatypes: + +.. sourcecode:: text + + mysql> CREATE TABLE ts_test ( + -> a INTEGER, + -> b INTEGER NOT NULL, + -> c TIMESTAMP, + -> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + -> e TIMESTAMP NULL); + Query OK, 0 rows affected (0.03 sec) + + mysql> SHOW CREATE TABLE ts_test; + +---------+----------------------------------------------------- + | Table | Create Table + +---------+----------------------------------------------------- + | ts_test | CREATE TABLE `ts_test` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL, + `c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `e` timestamp NULL DEFAULT NULL + ) ENGINE=MyISAM DEFAULT CHARSET=latin1 + +Above, we see that an INTEGER column defaults to NULL, unless it is specified +with NOT NULL. But when the column is of type TIMESTAMP, an implicit +default of CURRENT_TIMESTAMP is generated which also coerces the column +to be a NOT NULL, even though we did not specify it as such. + +This behavior of MySQL can be changed on the MySQL side using the +`explicit_defaults_for_timestamp +`_ configuration flag introduced in +MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like +any other datatype on the MySQL side with regards to defaults and nullability. + +However, to accommodate the vast majority of MySQL databases that do not +specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with +any TIMESTAMP column that does not specify ``nullable=False``. In order to +accommodate newer databases that specify ``explicit_defaults_for_timestamp``, +SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify +``nullable=False``. The following example illustrates:: + + from sqlalchemy import MetaData, Integer, Table, Column, text + from sqlalchemy.dialects.mysql import TIMESTAMP + + m = MetaData() + t = Table( + "ts_test", + m, + Column("a", Integer), + Column("b", Integer, nullable=False), + Column("c", TIMESTAMP), + Column("d", TIMESTAMP, nullable=False), + ) + + + from sqlalchemy import create_engine + + e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True) + m.create_all(e) + +output: + +.. sourcecode:: sql + + CREATE TABLE ts_test ( + a INTEGER, + b INTEGER NOT NULL, + c TIMESTAMP NULL, + d TIMESTAMP NOT NULL + ) + +""" # noqa +from __future__ import annotations + +from array import array as _array +from collections import defaultdict +from itertools import compress +import re +from typing import cast + +from . import reflection as _reflection +from .enumerated import ENUM +from .enumerated import SET +from .json import JSON +from .json import JSONIndexType +from .json import JSONPathType +from .reserved_words import RESERVED_WORDS_MARIADB +from .reserved_words import RESERVED_WORDS_MYSQL +from .types import _FloatType +from .types import _IntegerType +from .types import _MatchType +from .types import _NumericType +from .types import _StringType +from .types import BIGINT +from .types import BIT +from .types import CHAR +from .types import DATETIME +from .types import DECIMAL +from .types import DOUBLE +from .types import FLOAT +from .types import INTEGER +from .types import LONGBLOB +from .types import LONGTEXT +from .types import MEDIUMBLOB +from .types import MEDIUMINT +from .types import MEDIUMTEXT +from .types import NCHAR +from .types import NUMERIC +from .types import NVARCHAR +from .types import REAL +from .types import SMALLINT +from .types import TEXT +from .types import TIME +from .types import TIMESTAMP +from .types import TINYBLOB +from .types import TINYINT +from .types import TINYTEXT +from .types import VARCHAR +from .types import YEAR +from ... import exc +from ... import literal_column +from ... import log +from ... import schema as sa_schema +from ... import sql +from ... import util +from ...engine import cursor as _cursor +from ...engine import default +from ...engine import reflection +from ...engine.reflection import ReflectionDefaults +from ...sql import coercions +from ...sql import compiler +from ...sql import elements +from ...sql import functions +from ...sql import operators +from ...sql import roles +from ...sql import sqltypes +from ...sql import util as sql_util +from ...sql import visitors +from ...sql.compiler import InsertmanyvaluesSentinelOpts +from ...sql.compiler import SQLCompiler +from ...sql.schema import SchemaConst +from ...types import BINARY +from ...types import BLOB +from ...types import BOOLEAN +from ...types import DATE +from ...types import UUID +from ...types import VARBINARY +from ...util import topological + + +SET_RE = re.compile( + r"\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w", re.I | re.UNICODE +) + +# old names +MSTime = TIME +MSSet = SET +MSEnum = ENUM +MSLongBlob = LONGBLOB +MSMediumBlob = MEDIUMBLOB +MSTinyBlob = TINYBLOB +MSBlob = BLOB +MSBinary = BINARY +MSVarBinary = VARBINARY +MSNChar = NCHAR +MSNVarChar = NVARCHAR +MSChar = CHAR +MSString = VARCHAR +MSLongText = LONGTEXT +MSMediumText = MEDIUMTEXT +MSTinyText = TINYTEXT +MSText = TEXT +MSYear = YEAR +MSTimeStamp = TIMESTAMP +MSBit = BIT +MSSmallInteger = SMALLINT +MSTinyInteger = TINYINT +MSMediumInteger = MEDIUMINT +MSBigInteger = BIGINT +MSNumeric = NUMERIC +MSDecimal = DECIMAL +MSDouble = DOUBLE +MSReal = REAL +MSFloat = FLOAT +MSInteger = INTEGER + +colspecs = { + _IntegerType: _IntegerType, + _NumericType: _NumericType, + _FloatType: _FloatType, + sqltypes.Numeric: NUMERIC, + sqltypes.Float: FLOAT, + sqltypes.Double: DOUBLE, + sqltypes.Time: TIME, + sqltypes.Enum: ENUM, + sqltypes.MatchType: _MatchType, + sqltypes.JSON: JSON, + sqltypes.JSON.JSONIndexType: JSONIndexType, + sqltypes.JSON.JSONPathType: JSONPathType, +} + +# Everything 3.23 through 5.1 excepting OpenGIS types. +ischema_names = { + "bigint": BIGINT, + "binary": BINARY, + "bit": BIT, + "blob": BLOB, + "boolean": BOOLEAN, + "char": CHAR, + "date": DATE, + "datetime": DATETIME, + "decimal": DECIMAL, + "double": DOUBLE, + "enum": ENUM, + "fixed": DECIMAL, + "float": FLOAT, + "int": INTEGER, + "integer": INTEGER, + "json": JSON, + "longblob": LONGBLOB, + "longtext": LONGTEXT, + "mediumblob": MEDIUMBLOB, + "mediumint": MEDIUMINT, + "mediumtext": MEDIUMTEXT, + "nchar": NCHAR, + "nvarchar": NVARCHAR, + "numeric": NUMERIC, + "set": SET, + "smallint": SMALLINT, + "text": TEXT, + "time": TIME, + "timestamp": TIMESTAMP, + "tinyblob": TINYBLOB, + "tinyint": TINYINT, + "tinytext": TINYTEXT, + "uuid": UUID, + "varbinary": VARBINARY, + "varchar": VARCHAR, + "year": YEAR, +} + + +class MySQLExecutionContext(default.DefaultExecutionContext): + def post_exec(self): + if ( + self.isdelete + and cast(SQLCompiler, self.compiled).effective_returning + and not self.cursor.description + ): + # All MySQL/mariadb drivers appear to not include + # cursor.description for DELETE..RETURNING with no rows if the + # WHERE criteria is a straight "false" condition such as our EMPTY + # IN condition. manufacture an empty result in this case (issue + # #10505) + # + # taken from cx_Oracle implementation + self.cursor_fetch_strategy = ( + _cursor.FullyBufferedCursorFetchStrategy( + self.cursor, + [ + (entry.keyname, None) + for entry in cast( + SQLCompiler, self.compiled + )._result_columns + ], + [], + ) + ) + + def create_server_side_cursor(self): + if self.dialect.supports_server_side_cursors: + return self._dbapi_connection.cursor(self.dialect._sscursor) + else: + raise NotImplementedError() + + def fire_sequence(self, seq, type_): + return self._execute_scalar( + ( + "select nextval(%s)" + % self.identifier_preparer.format_sequence(seq) + ), + type_, + ) + + +class MySQLCompiler(compiler.SQLCompiler): + render_table_with_column_in_update_from = True + """Overridden from base SQLCompiler value""" + + extract_map = compiler.SQLCompiler.extract_map.copy() + extract_map.update({"milliseconds": "millisecond"}) + + def default_from(self): + """Called when a ``SELECT`` statement has no froms, + and no ``FROM`` clause is to be appended. + + """ + if self.stack: + stmt = self.stack[-1]["selectable"] + if stmt._where_criteria: + return " FROM DUAL" + + return "" + + def visit_random_func(self, fn, **kw): + return "rand%s" % self.function_argspec(fn) + + def visit_rollup_func(self, fn, **kw): + clause = ", ".join( + elem._compiler_dispatch(self, **kw) for elem in fn.clauses + ) + return f"{clause} WITH ROLLUP" + + def visit_aggregate_strings_func(self, fn, **kw): + expr, delimeter = ( + elem._compiler_dispatch(self, **kw) for elem in fn.clauses + ) + return f"group_concat({expr} SEPARATOR {delimeter})" + + def visit_sequence(self, seq, **kw): + return "nextval(%s)" % self.preparer.format_sequence(seq) + + def visit_sysdate_func(self, fn, **kw): + return "SYSDATE()" + + def _render_json_extract_from_binary(self, binary, operator, **kw): + # note we are intentionally calling upon the process() calls in the + # order in which they appear in the SQL String as this is used + # by positional parameter rendering + + if binary.type._type_affinity is sqltypes.JSON: + return "JSON_EXTRACT(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + # for non-JSON, MySQL doesn't handle JSON null at all so it has to + # be explicit + case_expression = "CASE JSON_EXTRACT(%s, %s) WHEN 'null' THEN NULL" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + if binary.type._type_affinity is sqltypes.Integer: + type_expression = ( + "ELSE CAST(JSON_EXTRACT(%s, %s) AS SIGNED INTEGER)" + % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + ) + elif binary.type._type_affinity is sqltypes.Numeric: + if ( + binary.type.scale is not None + and binary.type.precision is not None + ): + # using DECIMAL here because MySQL does not recognize NUMERIC + type_expression = ( + "ELSE CAST(JSON_EXTRACT(%s, %s) AS DECIMAL(%s, %s))" + % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + binary.type.precision, + binary.type.scale, + ) + ) + else: + # FLOAT / REAL not added in MySQL til 8.0.17 + type_expression = ( + "ELSE JSON_EXTRACT(%s, %s)+0.0000000000000000000000" + % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + ) + elif binary.type._type_affinity is sqltypes.Boolean: + # the NULL handling is particularly weird with boolean, so + # explicitly return true/false constants + type_expression = "WHEN true THEN true ELSE false" + elif binary.type._type_affinity is sqltypes.String: + # (gord): this fails with a JSON value that's a four byte unicode + # string. SQLite has the same problem at the moment + # (zzzeek): I'm not really sure. let's take a look at a test case + # that hits each backend and maybe make a requires rule for it? + type_expression = "ELSE JSON_UNQUOTE(JSON_EXTRACT(%s, %s))" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + else: + # other affinity....this is not expected right now + type_expression = "ELSE JSON_EXTRACT(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + return case_expression + " " + type_expression + " END" + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + return self._render_json_extract_from_binary(binary, operator, **kw) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + return self._render_json_extract_from_binary(binary, operator, **kw) + + def visit_on_duplicate_key_update(self, on_duplicate, **kw): + statement = self.current_executable + + if on_duplicate._parameter_ordering: + parameter_ordering = [ + coercions.expect(roles.DMLColumnRole, key) + for key in on_duplicate._parameter_ordering + ] + ordered_keys = set(parameter_ordering) + cols = [ + statement.table.c[key] + for key in parameter_ordering + if key in statement.table.c + ] + [c for c in statement.table.c if c.key not in ordered_keys] + else: + cols = statement.table.c + + clauses = [] + + requires_mysql8_alias = statement.select is None and ( + self.dialect._requires_alias_for_on_duplicate_key + ) + + if requires_mysql8_alias: + if statement.table.name.lower() == "new": + _on_dup_alias_name = "new_1" + else: + _on_dup_alias_name = "new" + + on_duplicate_update = { + coercions.expect_as_key(roles.DMLColumnRole, key): value + for key, value in on_duplicate.update.items() + } + + # traverses through all table columns to preserve table column order + for column in (col for col in cols if col.key in on_duplicate_update): + val = on_duplicate_update[column.key] + + # TODO: this coercion should be up front. we can't cache + # SQL constructs with non-bound literals buried in them + if coercions._is_literal(val): + val = elements.BindParameter(None, val, type_=column.type) + value_text = self.process(val.self_group(), use_schema=False) + else: + + def replace(obj): + if ( + isinstance(obj, elements.BindParameter) + and obj.type._isnull + ): + obj = obj._clone() + obj.type = column.type + return obj + elif ( + isinstance(obj, elements.ColumnClause) + and obj.table is on_duplicate.inserted_alias + ): + if requires_mysql8_alias: + column_literal_clause = ( + f"{_on_dup_alias_name}." + f"{self.preparer.quote(obj.name)}" + ) + else: + column_literal_clause = ( + f"VALUES({self.preparer.quote(obj.name)})" + ) + return literal_column(column_literal_clause) + else: + # element is not replaced + return None + + val = visitors.replacement_traverse(val, {}, replace) + value_text = self.process(val.self_group(), use_schema=False) + + name_text = self.preparer.quote(column.name) + clauses.append("%s = %s" % (name_text, value_text)) + + non_matching = set(on_duplicate_update) - {c.key for c in cols} + if non_matching: + util.warn( + "Additional column names not matching " + "any column keys in table '%s': %s" + % ( + self.statement.table.name, + (", ".join("'%s'" % c for c in non_matching)), + ) + ) + + if requires_mysql8_alias: + return ( + f"AS {_on_dup_alias_name} " + f"ON DUPLICATE KEY UPDATE {', '.join(clauses)}" + ) + else: + return f"ON DUPLICATE KEY UPDATE {', '.join(clauses)}" + + def visit_concat_op_expression_clauselist( + self, clauselist, operator, **kw + ): + return "concat(%s)" % ( + ", ".join(self.process(elem, **kw) for elem in clauselist.clauses) + ) + + def visit_concat_op_binary(self, binary, operator, **kw): + return "concat(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + _match_valid_flag_combinations = frozenset( + ( + # (boolean_mode, natural_language, query_expansion) + (False, False, False), + (True, False, False), + (False, True, False), + (False, False, True), + (False, True, True), + ) + ) + + _match_flag_expressions = ( + "IN BOOLEAN MODE", + "IN NATURAL LANGUAGE MODE", + "WITH QUERY EXPANSION", + ) + + def visit_mysql_match(self, element, **kw): + return self.visit_match_op_binary(element, element.operator, **kw) + + def visit_match_op_binary(self, binary, operator, **kw): + """ + Note that `mysql_boolean_mode` is enabled by default because of + backward compatibility + """ + + modifiers = binary.modifiers + + boolean_mode = modifiers.get("mysql_boolean_mode", True) + natural_language = modifiers.get("mysql_natural_language", False) + query_expansion = modifiers.get("mysql_query_expansion", False) + + flag_combination = (boolean_mode, natural_language, query_expansion) + + if flag_combination not in self._match_valid_flag_combinations: + flags = ( + "in_boolean_mode=%s" % boolean_mode, + "in_natural_language_mode=%s" % natural_language, + "with_query_expansion=%s" % query_expansion, + ) + + flags = ", ".join(flags) + + raise exc.CompileError("Invalid MySQL match flags: %s" % flags) + + match_clause = binary.left + match_clause = self.process(match_clause, **kw) + against_clause = self.process(binary.right, **kw) + + if any(flag_combination): + flag_expressions = compress( + self._match_flag_expressions, + flag_combination, + ) + + against_clause = [against_clause] + against_clause.extend(flag_expressions) + + against_clause = " ".join(against_clause) + + return "MATCH (%s) AGAINST (%s)" % (match_clause, against_clause) + + def get_from_hint_text(self, table, text): + return text + + def visit_typeclause(self, typeclause, type_=None, **kw): + if type_ is None: + type_ = typeclause.type.dialect_impl(self.dialect) + if isinstance(type_, sqltypes.TypeDecorator): + return self.visit_typeclause(typeclause, type_.impl, **kw) + elif isinstance(type_, sqltypes.Integer): + if getattr(type_, "unsigned", False): + return "UNSIGNED INTEGER" + else: + return "SIGNED INTEGER" + elif isinstance(type_, sqltypes.TIMESTAMP): + return "DATETIME" + elif isinstance( + type_, + ( + sqltypes.DECIMAL, + sqltypes.DateTime, + sqltypes.Date, + sqltypes.Time, + ), + ): + return self.dialect.type_compiler_instance.process(type_) + elif isinstance(type_, sqltypes.String) and not isinstance( + type_, (ENUM, SET) + ): + adapted = CHAR._adapt_string_for_cast(type_) + return self.dialect.type_compiler_instance.process(adapted) + elif isinstance(type_, sqltypes._Binary): + return "BINARY" + elif isinstance(type_, sqltypes.JSON): + return "JSON" + elif isinstance(type_, sqltypes.NUMERIC): + return self.dialect.type_compiler_instance.process(type_).replace( + "NUMERIC", "DECIMAL" + ) + elif ( + isinstance(type_, sqltypes.Float) + and self.dialect._support_float_cast + ): + return self.dialect.type_compiler_instance.process(type_) + else: + return None + + def visit_cast(self, cast, **kw): + type_ = self.process(cast.typeclause) + if type_ is None: + util.warn( + "Datatype %s does not support CAST on MySQL/MariaDb; " + "the CAST will be skipped." + % self.dialect.type_compiler_instance.process( + cast.typeclause.type + ) + ) + return self.process(cast.clause.self_group(), **kw) + + return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_) + + def render_literal_value(self, value, type_): + value = super().render_literal_value(value, type_) + if self.dialect._backslash_escapes: + value = value.replace("\\", "\\\\") + return value + + # override native_boolean=False behavior here, as + # MySQL still supports native boolean + def visit_true(self, element, **kw): + return "true" + + def visit_false(self, element, **kw): + return "false" + + def get_select_precolumns(self, select, **kw): + """Add special MySQL keywords in place of DISTINCT. + + .. deprecated:: 1.4 This usage is deprecated. + :meth:`_expression.Select.prefix_with` should be used for special + keywords at the start of a SELECT. + + """ + if isinstance(select._distinct, str): + util.warn_deprecated( + "Sending string values for 'distinct' is deprecated in the " + "MySQL dialect and will be removed in a future release. " + "Please use :meth:`.Select.prefix_with` for special keywords " + "at the start of a SELECT statement", + version="1.4", + ) + return select._distinct.upper() + " " + + return super().get_select_precolumns(select, **kw) + + def visit_join(self, join, asfrom=False, from_linter=None, **kwargs): + if from_linter: + from_linter.edges.add((join.left, join.right)) + + if join.full: + join_type = " FULL OUTER JOIN " + elif join.isouter: + join_type = " LEFT OUTER JOIN " + else: + join_type = " INNER JOIN " + + return "".join( + ( + self.process( + join.left, asfrom=True, from_linter=from_linter, **kwargs + ), + join_type, + self.process( + join.right, asfrom=True, from_linter=from_linter, **kwargs + ), + " ON ", + self.process(join.onclause, from_linter=from_linter, **kwargs), + ) + ) + + def for_update_clause(self, select, **kw): + if select._for_update_arg.read: + tmp = " LOCK IN SHARE MODE" + else: + tmp = " FOR UPDATE" + + if select._for_update_arg.of and self.dialect.supports_for_update_of: + tables = util.OrderedSet() + for c in select._for_update_arg.of: + tables.update(sql_util.surface_selectables_only(c)) + + tmp += " OF " + ", ".join( + self.process(table, ashint=True, use_schema=False, **kw) + for table in tables + ) + + if select._for_update_arg.nowait: + tmp += " NOWAIT" + + if select._for_update_arg.skip_locked: + tmp += " SKIP LOCKED" + + return tmp + + def limit_clause(self, select, **kw): + # MySQL supports: + # LIMIT + # LIMIT , + # and in server versions > 3.3: + # LIMIT OFFSET + # The latter is more readable for offsets but we're stuck with the + # former until we can refine dialects by server revision. + + limit_clause, offset_clause = ( + select._limit_clause, + select._offset_clause, + ) + + if limit_clause is None and offset_clause is None: + return "" + elif offset_clause is not None: + # As suggested by the MySQL docs, need to apply an + # artificial limit if one wasn't provided + # https://dev.mysql.com/doc/refman/5.0/en/select.html + if limit_clause is None: + # TODO: remove ?? + # hardwire the upper limit. Currently + # needed consistent with the usage of the upper + # bound as part of MySQL's "syntax" for OFFSET with + # no LIMIT. + return " \n LIMIT %s, %s" % ( + self.process(offset_clause, **kw), + "18446744073709551615", + ) + else: + return " \n LIMIT %s, %s" % ( + self.process(offset_clause, **kw), + self.process(limit_clause, **kw), + ) + else: + # No offset provided, so just use the limit + return " \n LIMIT %s" % (self.process(limit_clause, **kw),) + + def update_limit_clause(self, update_stmt): + limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None) + if limit is not None: + return f"LIMIT {int(limit)}" + else: + return None + + def delete_limit_clause(self, delete_stmt): + limit = delete_stmt.kwargs.get("%s_limit" % self.dialect.name, None) + if limit is not None: + return f"LIMIT {int(limit)}" + else: + return None + + def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): + kw["asfrom"] = True + return ", ".join( + t._compiler_dispatch(self, **kw) + for t in [from_table] + list(extra_froms) + ) + + def update_from_clause( + self, update_stmt, from_table, extra_froms, from_hints, **kw + ): + return None + + def delete_table_clause(self, delete_stmt, from_table, extra_froms, **kw): + """If we have extra froms make sure we render any alias as hint.""" + ashint = False + if extra_froms: + ashint = True + return from_table._compiler_dispatch( + self, asfrom=True, iscrud=True, ashint=ashint, **kw + ) + + def delete_extra_from_clause( + self, delete_stmt, from_table, extra_froms, from_hints, **kw + ): + """Render the DELETE .. USING clause specific to MySQL.""" + kw["asfrom"] = True + return "USING " + ", ".join( + t._compiler_dispatch(self, fromhints=from_hints, **kw) + for t in [from_table] + extra_froms + ) + + def visit_empty_set_expr(self, element_types, **kw): + return ( + "SELECT %(outer)s FROM (SELECT %(inner)s) " + "as _empty_set WHERE 1!=1" + % { + "inner": ", ".join( + "1 AS _in_%s" % idx + for idx, type_ in enumerate(element_types) + ), + "outer": ", ".join( + "_in_%s" % idx for idx, type_ in enumerate(element_types) + ), + } + ) + + def visit_is_distinct_from_binary(self, binary, operator, **kw): + return "NOT (%s <=> %s)" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_is_not_distinct_from_binary(self, binary, operator, **kw): + return "%s <=> %s" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def _mariadb_regexp_flags(self, flags, pattern, **kw): + return "CONCAT('(?', %s, ')', %s)" % ( + self.render_literal_value(flags, sqltypes.STRINGTYPE), + self.process(pattern, **kw), + ) + + def _regexp_match(self, op_string, binary, operator, **kw): + flags = binary.modifiers["flags"] + if flags is None: + return self._generate_generic_binary(binary, op_string, **kw) + elif self.dialect.is_mariadb: + return "%s%s%s" % ( + self.process(binary.left, **kw), + op_string, + self._mariadb_regexp_flags(flags, binary.right), + ) + else: + text = "REGEXP_LIKE(%s, %s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), + ) + if op_string == " NOT REGEXP ": + return "NOT %s" % text + else: + return text + + def visit_regexp_match_op_binary(self, binary, operator, **kw): + return self._regexp_match(" REGEXP ", binary, operator, **kw) + + def visit_not_regexp_match_op_binary(self, binary, operator, **kw): + return self._regexp_match(" NOT REGEXP ", binary, operator, **kw) + + def visit_regexp_replace_op_binary(self, binary, operator, **kw): + flags = binary.modifiers["flags"] + if flags is None: + return "REGEXP_REPLACE(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + elif self.dialect.is_mariadb: + return "REGEXP_REPLACE(%s, %s, %s)" % ( + self.process(binary.left, **kw), + self._mariadb_regexp_flags(flags, binary.right.clauses[0]), + self.process(binary.right.clauses[1], **kw), + ) + else: + return "REGEXP_REPLACE(%s, %s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), + ) + + +class MySQLDDLCompiler(compiler.DDLCompiler): + def get_column_specification(self, column, **kw): + """Builds column DDL.""" + if ( + self.dialect.is_mariadb is True + and column.computed is not None + and column._user_defined_nullable is SchemaConst.NULL_UNSPECIFIED + ): + column.nullable = True + colspec = [ + self.preparer.format_column(column), + self.dialect.type_compiler_instance.process( + column.type, type_expression=column + ), + ] + + if column.computed is not None: + colspec.append(self.process(column.computed)) + + is_timestamp = isinstance( + column.type._unwrapped_dialect_impl(self.dialect), + sqltypes.TIMESTAMP, + ) + + if not column.nullable: + colspec.append("NOT NULL") + + # see: https://docs.sqlalchemy.org/en/latest/dialects/mysql.html#mysql_timestamp_null # noqa + elif column.nullable and is_timestamp: + colspec.append("NULL") + + comment = column.comment + if comment is not None: + literal = self.sql_compiler.render_literal_value( + comment, sqltypes.String() + ) + colspec.append("COMMENT " + literal) + + if ( + column.table is not None + and column is column.table._autoincrement_column + and ( + column.server_default is None + or isinstance(column.server_default, sa_schema.Identity) + ) + and not ( + self.dialect.supports_sequences + and isinstance(column.default, sa_schema.Sequence) + and not column.default.optional + ) + ): + colspec.append("AUTO_INCREMENT") + else: + default = self.get_column_default_string(column) + + if default is not None: + if ( + self.dialect._support_default_function + and not re.match(r"^\s*[\'\"\(]", default) + and not re.search(r"ON +UPDATE", default, re.I) + and re.match(r".*\W.*", default) + ): + colspec.append(f"DEFAULT ({default})") + else: + colspec.append("DEFAULT " + default) + return " ".join(colspec) + + def post_create_table(self, table): + """Build table-level CREATE options like ENGINE and COLLATE.""" + + table_opts = [] + + opts = { + k[len(self.dialect.name) + 1 :].upper(): v + for k, v in table.kwargs.items() + if k.startswith("%s_" % self.dialect.name) + } + + if table.comment is not None: + opts["COMMENT"] = table.comment + + partition_options = [ + "PARTITION_BY", + "PARTITIONS", + "SUBPARTITIONS", + "SUBPARTITION_BY", + ] + + nonpart_options = set(opts).difference(partition_options) + part_options = set(opts).intersection(partition_options) + + for opt in topological.sort( + [ + ("DEFAULT_CHARSET", "COLLATE"), + ("DEFAULT_CHARACTER_SET", "COLLATE"), + ("CHARSET", "COLLATE"), + ("CHARACTER_SET", "COLLATE"), + ], + nonpart_options, + ): + arg = opts[opt] + if opt in _reflection._options_of_type_string: + arg = self.sql_compiler.render_literal_value( + arg, sqltypes.String() + ) + + if opt in ( + "DATA_DIRECTORY", + "INDEX_DIRECTORY", + "DEFAULT_CHARACTER_SET", + "CHARACTER_SET", + "DEFAULT_CHARSET", + "DEFAULT_COLLATE", + ): + opt = opt.replace("_", " ") + + joiner = "=" + if opt in ( + "TABLESPACE", + "DEFAULT CHARACTER SET", + "CHARACTER SET", + "COLLATE", + ): + joiner = " " + + table_opts.append(joiner.join((opt, arg))) + + for opt in topological.sort( + [ + ("PARTITION_BY", "PARTITIONS"), + ("PARTITION_BY", "SUBPARTITION_BY"), + ("PARTITION_BY", "SUBPARTITIONS"), + ("PARTITIONS", "SUBPARTITIONS"), + ("PARTITIONS", "SUBPARTITION_BY"), + ("SUBPARTITION_BY", "SUBPARTITIONS"), + ], + part_options, + ): + arg = opts[opt] + if opt in _reflection._options_of_type_string: + arg = self.sql_compiler.render_literal_value( + arg, sqltypes.String() + ) + + opt = opt.replace("_", " ") + joiner = " " + + table_opts.append(joiner.join((opt, arg))) + + return " ".join(table_opts) + + def visit_create_index(self, create, **kw): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + table = preparer.format_table(index.table) + + columns = [ + self.sql_compiler.process( + ( + elements.Grouping(expr) + if ( + isinstance(expr, elements.BinaryExpression) + or ( + isinstance(expr, elements.UnaryExpression) + and expr.modifier + not in (operators.desc_op, operators.asc_op) + ) + or isinstance(expr, functions.FunctionElement) + ) + else expr + ), + include_table=False, + literal_binds=True, + ) + for expr in index.expressions + ] + + name = self._prepared_index_name(index) + + text = "CREATE " + if index.unique: + text += "UNIQUE " + + index_prefix = index.kwargs.get("%s_prefix" % self.dialect.name, None) + if index_prefix: + text += index_prefix + " " + + text += "INDEX " + if create.if_not_exists: + text += "IF NOT EXISTS " + text += "%s ON %s " % (name, table) + + length = index.dialect_options[self.dialect.name]["length"] + if length is not None: + if isinstance(length, dict): + # length value can be a (column_name --> integer value) + # mapping specifying the prefix length for each column of the + # index + columns = ", ".join( + ( + "%s(%d)" % (expr, length[col.name]) + if col.name in length + else ( + "%s(%d)" % (expr, length[expr]) + if expr in length + else "%s" % expr + ) + ) + for col, expr in zip(index.expressions, columns) + ) + else: + # or can be an integer value specifying the same + # prefix length for all columns of the index + columns = ", ".join( + "%s(%d)" % (col, length) for col in columns + ) + else: + columns = ", ".join(columns) + text += "(%s)" % columns + + parser = index.dialect_options["mysql"]["with_parser"] + if parser is not None: + text += " WITH PARSER %s" % (parser,) + + using = index.dialect_options["mysql"]["using"] + if using is not None: + text += " USING %s" % (preparer.quote(using)) + + return text + + def visit_primary_key_constraint(self, constraint, **kw): + text = super().visit_primary_key_constraint(constraint) + using = constraint.dialect_options["mysql"]["using"] + if using: + text += " USING %s" % (self.preparer.quote(using)) + return text + + def visit_drop_index(self, drop, **kw): + index = drop.element + text = "\nDROP INDEX " + if drop.if_exists: + text += "IF EXISTS " + + return text + "%s ON %s" % ( + self._prepared_index_name(index, include_schema=False), + self.preparer.format_table(index.table), + ) + + def visit_drop_constraint(self, drop, **kw): + constraint = drop.element + if isinstance(constraint, sa_schema.ForeignKeyConstraint): + qual = "FOREIGN KEY " + const = self.preparer.format_constraint(constraint) + elif isinstance(constraint, sa_schema.PrimaryKeyConstraint): + qual = "PRIMARY KEY " + const = "" + elif isinstance(constraint, sa_schema.UniqueConstraint): + qual = "INDEX " + const = self.preparer.format_constraint(constraint) + elif isinstance(constraint, sa_schema.CheckConstraint): + if self.dialect.is_mariadb: + qual = "CONSTRAINT " + else: + qual = "CHECK " + const = self.preparer.format_constraint(constraint) + else: + qual = "" + const = self.preparer.format_constraint(constraint) + return "ALTER TABLE %s DROP %s%s" % ( + self.preparer.format_table(constraint.table), + qual, + const, + ) + + def define_constraint_match(self, constraint): + if constraint.match is not None: + raise exc.CompileError( + "MySQL ignores the 'MATCH' keyword while at the same time " + "causes ON UPDATE/ON DELETE clauses to be ignored." + ) + return "" + + def visit_set_table_comment(self, create, **kw): + return "ALTER TABLE %s COMMENT %s" % ( + self.preparer.format_table(create.element), + self.sql_compiler.render_literal_value( + create.element.comment, sqltypes.String() + ), + ) + + def visit_drop_table_comment(self, create, **kw): + return "ALTER TABLE %s COMMENT ''" % ( + self.preparer.format_table(create.element) + ) + + def visit_set_column_comment(self, create, **kw): + return "ALTER TABLE %s CHANGE %s %s" % ( + self.preparer.format_table(create.element.table), + self.preparer.format_column(create.element), + self.get_column_specification(create.element), + ) + + +class MySQLTypeCompiler(compiler.GenericTypeCompiler): + def _extend_numeric(self, type_, spec): + "Extend a numeric-type declaration with MySQL specific extensions." + + if not self._mysql_type(type_): + return spec + + if type_.unsigned: + spec += " UNSIGNED" + if type_.zerofill: + spec += " ZEROFILL" + return spec + + def _extend_string(self, type_, defaults, spec): + """Extend a string-type declaration with standard SQL CHARACTER SET / + COLLATE annotations and MySQL specific extensions. + + """ + + def attr(name): + return getattr(type_, name, defaults.get(name)) + + if attr("charset"): + charset = "CHARACTER SET %s" % attr("charset") + elif attr("ascii"): + charset = "ASCII" + elif attr("unicode"): + charset = "UNICODE" + else: + charset = None + + if attr("collation"): + collation = "COLLATE %s" % type_.collation + elif attr("binary"): + collation = "BINARY" + else: + collation = None + + if attr("national"): + # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets. + return " ".join( + [c for c in ("NATIONAL", spec, collation) if c is not None] + ) + return " ".join( + [c for c in (spec, charset, collation) if c is not None] + ) + + def _mysql_type(self, type_): + return isinstance(type_, (_StringType, _NumericType)) + + def visit_NUMERIC(self, type_, **kw): + if type_.precision is None: + return self._extend_numeric(type_, "NUMERIC") + elif type_.scale is None: + return self._extend_numeric( + type_, + "NUMERIC(%(precision)s)" % {"precision": type_.precision}, + ) + else: + return self._extend_numeric( + type_, + "NUMERIC(%(precision)s, %(scale)s)" + % {"precision": type_.precision, "scale": type_.scale}, + ) + + def visit_DECIMAL(self, type_, **kw): + if type_.precision is None: + return self._extend_numeric(type_, "DECIMAL") + elif type_.scale is None: + return self._extend_numeric( + type_, + "DECIMAL(%(precision)s)" % {"precision": type_.precision}, + ) + else: + return self._extend_numeric( + type_, + "DECIMAL(%(precision)s, %(scale)s)" + % {"precision": type_.precision, "scale": type_.scale}, + ) + + def visit_DOUBLE(self, type_, **kw): + if type_.precision is not None and type_.scale is not None: + return self._extend_numeric( + type_, + "DOUBLE(%(precision)s, %(scale)s)" + % {"precision": type_.precision, "scale": type_.scale}, + ) + else: + return self._extend_numeric(type_, "DOUBLE") + + def visit_REAL(self, type_, **kw): + if type_.precision is not None and type_.scale is not None: + return self._extend_numeric( + type_, + "REAL(%(precision)s, %(scale)s)" + % {"precision": type_.precision, "scale": type_.scale}, + ) + else: + return self._extend_numeric(type_, "REAL") + + def visit_FLOAT(self, type_, **kw): + if ( + self._mysql_type(type_) + and type_.scale is not None + and type_.precision is not None + ): + return self._extend_numeric( + type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale) + ) + elif type_.precision is not None: + return self._extend_numeric( + type_, "FLOAT(%s)" % (type_.precision,) + ) + else: + return self._extend_numeric(type_, "FLOAT") + + def visit_INTEGER(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, + "INTEGER(%(display_width)s)" + % {"display_width": type_.display_width}, + ) + else: + return self._extend_numeric(type_, "INTEGER") + + def visit_BIGINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, + "BIGINT(%(display_width)s)" + % {"display_width": type_.display_width}, + ) + else: + return self._extend_numeric(type_, "BIGINT") + + def visit_MEDIUMINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, + "MEDIUMINT(%(display_width)s)" + % {"display_width": type_.display_width}, + ) + else: + return self._extend_numeric(type_, "MEDIUMINT") + + def visit_TINYINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, "TINYINT(%s)" % type_.display_width + ) + else: + return self._extend_numeric(type_, "TINYINT") + + def visit_SMALLINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, + "SMALLINT(%(display_width)s)" + % {"display_width": type_.display_width}, + ) + else: + return self._extend_numeric(type_, "SMALLINT") + + def visit_BIT(self, type_, **kw): + if type_.length is not None: + return "BIT(%s)" % type_.length + else: + return "BIT" + + def visit_DATETIME(self, type_, **kw): + if getattr(type_, "fsp", None): + return "DATETIME(%d)" % type_.fsp + else: + return "DATETIME" + + def visit_DATE(self, type_, **kw): + return "DATE" + + def visit_TIME(self, type_, **kw): + if getattr(type_, "fsp", None): + return "TIME(%d)" % type_.fsp + else: + return "TIME" + + def visit_TIMESTAMP(self, type_, **kw): + if getattr(type_, "fsp", None): + return "TIMESTAMP(%d)" % type_.fsp + else: + return "TIMESTAMP" + + def visit_YEAR(self, type_, **kw): + if type_.display_width is None: + return "YEAR" + else: + return "YEAR(%s)" % type_.display_width + + def visit_TEXT(self, type_, **kw): + if type_.length is not None: + return self._extend_string(type_, {}, "TEXT(%d)" % type_.length) + else: + return self._extend_string(type_, {}, "TEXT") + + def visit_TINYTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "TINYTEXT") + + def visit_MEDIUMTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "MEDIUMTEXT") + + def visit_LONGTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "LONGTEXT") + + def visit_VARCHAR(self, type_, **kw): + if type_.length is not None: + return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length) + else: + raise exc.CompileError( + "VARCHAR requires a length on dialect %s" % self.dialect.name + ) + + def visit_CHAR(self, type_, **kw): + if type_.length is not None: + return self._extend_string( + type_, {}, "CHAR(%(length)s)" % {"length": type_.length} + ) + else: + return self._extend_string(type_, {}, "CHAR") + + def visit_NVARCHAR(self, type_, **kw): + # We'll actually generate the equiv. "NATIONAL VARCHAR" instead + # of "NVARCHAR". + if type_.length is not None: + return self._extend_string( + type_, + {"national": True}, + "VARCHAR(%(length)s)" % {"length": type_.length}, + ) + else: + raise exc.CompileError( + "NVARCHAR requires a length on dialect %s" % self.dialect.name + ) + + def visit_NCHAR(self, type_, **kw): + # We'll actually generate the equiv. + # "NATIONAL CHAR" instead of "NCHAR". + if type_.length is not None: + return self._extend_string( + type_, + {"national": True}, + "CHAR(%(length)s)" % {"length": type_.length}, + ) + else: + return self._extend_string(type_, {"national": True}, "CHAR") + + def visit_UUID(self, type_, **kw): + return "UUID" + + def visit_VARBINARY(self, type_, **kw): + return "VARBINARY(%d)" % type_.length + + def visit_JSON(self, type_, **kw): + return "JSON" + + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_) + + def visit_enum(self, type_, **kw): + if not type_.native_enum: + return super().visit_enum(type_) + else: + return self._visit_enumerated_values("ENUM", type_, type_.enums) + + def visit_BLOB(self, type_, **kw): + if type_.length is not None: + return "BLOB(%d)" % type_.length + else: + return "BLOB" + + def visit_TINYBLOB(self, type_, **kw): + return "TINYBLOB" + + def visit_MEDIUMBLOB(self, type_, **kw): + return "MEDIUMBLOB" + + def visit_LONGBLOB(self, type_, **kw): + return "LONGBLOB" + + def _visit_enumerated_values(self, name, type_, enumerated_values): + quoted_enums = [] + for e in enumerated_values: + if self.dialect.identifier_preparer._double_percents: + e = e.replace("%", "%%") + quoted_enums.append("'%s'" % e.replace("'", "''")) + return self._extend_string( + type_, {}, "%s(%s)" % (name, ",".join(quoted_enums)) + ) + + def visit_ENUM(self, type_, **kw): + return self._visit_enumerated_values("ENUM", type_, type_.enums) + + def visit_SET(self, type_, **kw): + return self._visit_enumerated_values("SET", type_, type_.values) + + def visit_BOOLEAN(self, type_, **kw): + return "BOOL" + + +class MySQLIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = RESERVED_WORDS_MYSQL + + def __init__(self, dialect, server_ansiquotes=False, **kw): + if not server_ansiquotes: + quote = "`" + else: + quote = '"' + + super().__init__(dialect, initial_quote=quote, escape_quote=quote) + + def _quote_free_identifiers(self, *ids): + """Unilaterally identifier-quote any number of strings.""" + + return tuple([self.quote_identifier(i) for i in ids if i is not None]) + + +class MariaDBIdentifierPreparer(MySQLIdentifierPreparer): + reserved_words = RESERVED_WORDS_MARIADB + + +@log.class_logger +class MySQLDialect(default.DefaultDialect): + """Details of the MySQL dialect. + Not used directly in application code. + """ + + name = "mysql" + supports_statement_cache = True + + supports_alter = True + + # MySQL has no true "boolean" type; we + # allow for the "true" and "false" keywords, however + supports_native_boolean = False + + # support for BIT type; mysqlconnector coerces result values automatically, + # all other MySQL DBAPIs require a conversion routine + supports_native_bit = False + + # identifiers are 64, however aliases can be 255... + max_identifier_length = 255 + max_index_name_length = 64 + max_constraint_name_length = 64 + + div_is_floordiv = False + + supports_native_enum = True + + returns_native_bytes = True + + supports_sequences = False # default for MySQL ... + # ... may be updated to True for MariaDB 10.3+ in initialize() + + sequences_optional = False + + supports_for_update_of = False # default for MySQL ... + # ... may be updated to True for MySQL 8+ in initialize() + + _requires_alias_for_on_duplicate_key = False # Only available ... + # ... in MySQL 8+ + + # MySQL doesn't support "DEFAULT VALUES" but *does* support + # "VALUES (DEFAULT)" + supports_default_values = False + supports_default_metavalue = True + + use_insertmanyvalues: bool = True + insertmanyvalues_implicit_sentinel = ( + InsertmanyvaluesSentinelOpts.ANY_AUTOINCREMENT + ) + + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + supports_multivalues_insert = True + insert_null_pk_still_autoincrements = True + + supports_comments = True + inline_comments = True + default_paramstyle = "format" + colspecs = colspecs + + cte_follows_insert = True + + statement_compiler = MySQLCompiler + ddl_compiler = MySQLDDLCompiler + type_compiler_cls = MySQLTypeCompiler + ischema_names = ischema_names + preparer = MySQLIdentifierPreparer + + is_mariadb = False + _mariadb_normalized_version_info = None + + # default SQL compilation settings - + # these are modified upon initialize(), + # i.e. first connect + _backslash_escapes = True + _server_ansiquotes = False + + construct_arguments = [ + (sa_schema.Table, {"*": None}), + (sql.Update, {"limit": None}), + (sql.Delete, {"limit": None}), + (sa_schema.PrimaryKeyConstraint, {"using": None}), + ( + sa_schema.Index, + { + "using": None, + "length": None, + "prefix": None, + "with_parser": None, + }, + ), + ] + + def __init__( + self, + json_serializer=None, + json_deserializer=None, + is_mariadb=None, + **kwargs, + ): + kwargs.pop("use_ansiquotes", None) # legacy + default.DefaultDialect.__init__(self, **kwargs) + self._json_serializer = json_serializer + self._json_deserializer = json_deserializer + self._set_mariadb(is_mariadb, None) + + def get_isolation_level_values(self, dbapi_conn): + return ( + "SERIALIZABLE", + "READ UNCOMMITTED", + "READ COMMITTED", + "REPEATABLE READ", + ) + + def set_isolation_level(self, dbapi_connection, level): + cursor = dbapi_connection.cursor() + cursor.execute(f"SET SESSION TRANSACTION ISOLATION LEVEL {level}") + cursor.execute("COMMIT") + cursor.close() + + def get_isolation_level(self, dbapi_connection): + cursor = dbapi_connection.cursor() + if self._is_mysql and self.server_version_info >= (5, 7, 20): + cursor.execute("SELECT @@transaction_isolation") + else: + cursor.execute("SELECT @@tx_isolation") + row = cursor.fetchone() + if row is None: + util.warn( + "Could not retrieve transaction isolation level for MySQL " + "connection." + ) + raise NotImplementedError() + val = row[0] + cursor.close() + if isinstance(val, bytes): + val = val.decode() + return val.upper().replace("-", " ") + + @classmethod + def _is_mariadb_from_url(cls, url): + dbapi = cls.import_dbapi() + dialect = cls(dbapi=dbapi) + + cargs, cparams = dialect.create_connect_args(url) + conn = dialect.connect(*cargs, **cparams) + try: + cursor = conn.cursor() + cursor.execute("SELECT VERSION() LIKE '%MariaDB%'") + val = cursor.fetchone()[0] + except: + raise + else: + return bool(val) + finally: + conn.close() + + def _get_server_version_info(self, connection): + # get database server version info explicitly over the wire + # to avoid proxy servers like MaxScale getting in the + # way with their own values, see #4205 + dbapi_con = connection.connection + cursor = dbapi_con.cursor() + cursor.execute("SELECT VERSION()") + val = cursor.fetchone()[0] + cursor.close() + if isinstance(val, bytes): + val = val.decode() + + return self._parse_server_version(val) + + def _parse_server_version(self, val): + version = [] + is_mariadb = False + + r = re.compile(r"[.\-+]") + tokens = r.split(val) + for token in tokens: + parsed_token = re.match( + r"^(?:(\d+)(?:a|b|c)?|(MariaDB\w*))$", token + ) + if not parsed_token: + continue + elif parsed_token.group(2): + self._mariadb_normalized_version_info = tuple(version[-3:]) + is_mariadb = True + else: + digit = int(parsed_token.group(1)) + version.append(digit) + + server_version_info = tuple(version) + + self._set_mariadb( + server_version_info and is_mariadb, server_version_info + ) + + if not is_mariadb: + self._mariadb_normalized_version_info = server_version_info + + if server_version_info < (5, 0, 2): + raise NotImplementedError( + "the MySQL/MariaDB dialect supports server " + "version info 5.0.2 and above." + ) + + # setting it here to help w the test suite + self.server_version_info = server_version_info + return server_version_info + + def _set_mariadb(self, is_mariadb, server_version_info): + if is_mariadb is None: + return + + if not is_mariadb and self.is_mariadb: + raise exc.InvalidRequestError( + "MySQL version %s is not a MariaDB variant." + % (".".join(map(str, server_version_info)),) + ) + if is_mariadb: + + if not issubclass(self.preparer, MariaDBIdentifierPreparer): + self.preparer = MariaDBIdentifierPreparer + # this would have been set by the default dialect already, + # so set it again + self.identifier_preparer = self.preparer(self) + + # this will be updated on first connect in initialize() + # if using older mariadb version + self.delete_returning = True + self.insert_returning = True + + self.is_mariadb = is_mariadb + + def do_begin_twophase(self, connection, xid): + connection.execute(sql.text("XA BEGIN :xid"), dict(xid=xid)) + + def do_prepare_twophase(self, connection, xid): + connection.execute(sql.text("XA END :xid"), dict(xid=xid)) + connection.execute(sql.text("XA PREPARE :xid"), dict(xid=xid)) + + def do_rollback_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + if not is_prepared: + connection.execute(sql.text("XA END :xid"), dict(xid=xid)) + connection.execute(sql.text("XA ROLLBACK :xid"), dict(xid=xid)) + + def do_commit_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + if not is_prepared: + self.do_prepare_twophase(connection, xid) + connection.execute(sql.text("XA COMMIT :xid"), dict(xid=xid)) + + def do_recover_twophase(self, connection): + resultset = connection.exec_driver_sql("XA RECOVER") + return [row["data"][0 : row["gtrid_length"]] for row in resultset] + + def is_disconnect(self, e, connection, cursor): + if isinstance( + e, + ( + self.dbapi.OperationalError, + self.dbapi.ProgrammingError, + self.dbapi.InterfaceError, + ), + ) and self._extract_error_code(e) in ( + 1927, + 2006, + 2013, + 2014, + 2045, + 2055, + 4031, + ): + return True + elif isinstance( + e, (self.dbapi.InterfaceError, self.dbapi.InternalError) + ): + # if underlying connection is closed, + # this is the error you get + return "(0, '')" in str(e) + else: + return False + + def _compat_fetchall(self, rp, charset=None): + """Proxy result rows to smooth over MySQL-Python driver + inconsistencies.""" + + return [_DecodingRow(row, charset) for row in rp.fetchall()] + + def _compat_fetchone(self, rp, charset=None): + """Proxy a result row to smooth over MySQL-Python driver + inconsistencies.""" + + row = rp.fetchone() + if row: + return _DecodingRow(row, charset) + else: + return None + + def _compat_first(self, rp, charset=None): + """Proxy a result row to smooth over MySQL-Python driver + inconsistencies.""" + + row = rp.first() + if row: + return _DecodingRow(row, charset) + else: + return None + + def _extract_error_code(self, exception): + raise NotImplementedError() + + def _get_default_schema_name(self, connection): + return connection.exec_driver_sql("SELECT DATABASE()").scalar() + + @reflection.cache + def has_table(self, connection, table_name, schema=None, **kw): + self._ensure_has_table_connection(connection) + + if schema is None: + schema = self.default_schema_name + + assert schema is not None + + full_name = ".".join( + self.identifier_preparer._quote_free_identifiers( + schema, table_name + ) + ) + + # DESCRIBE *must* be used because there is no information schema + # table that returns information on temp tables that is consistently + # available on MariaDB / MySQL / engine-agnostic etc. + # therefore we have no choice but to use DESCRIBE and an error catch + # to detect "False". See issue #9058 + + try: + with connection.exec_driver_sql( + f"DESCRIBE {full_name}", + execution_options={"skip_user_error_events": True}, + ) as rs: + return rs.fetchone() is not None + except exc.DBAPIError as e: + # https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html # noqa: E501 + # there are a lot of codes that *may* pop up here at some point + # but we continue to be fairly conservative. We include: + # 1146: Table '%s.%s' doesn't exist - what every MySQL has emitted + # for decades + # + # mysql 8 suddenly started emitting: + # 1049: Unknown database '%s' - for nonexistent schema + # + # also added: + # 1051: Unknown table '%s' - not known to emit + # + # there's more "doesn't exist" kinds of messages but they are + # less clear if mysql 8 would suddenly start using one of those + if self._extract_error_code(e.orig) in (1146, 1049, 1051): + return False + raise + + @reflection.cache + def has_sequence(self, connection, sequence_name, schema=None, **kw): + if not self.supports_sequences: + self._sequences_not_supported() + if not schema: + schema = self.default_schema_name + # MariaDB implements sequences as a special type of table + # + cursor = connection.execute( + sql.text( + "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES " + "WHERE TABLE_TYPE='SEQUENCE' and TABLE_NAME=:name AND " + "TABLE_SCHEMA=:schema_name" + ), + dict( + name=str(sequence_name), + schema_name=str(schema), + ), + ) + return cursor.first() is not None + + def _sequences_not_supported(self): + raise NotImplementedError( + "Sequences are supported only by the " + "MariaDB series 10.3 or greater" + ) + + @reflection.cache + def get_sequence_names(self, connection, schema=None, **kw): + if not self.supports_sequences: + self._sequences_not_supported() + if not schema: + schema = self.default_schema_name + # MariaDB implements sequences as a special type of table + cursor = connection.execute( + sql.text( + "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES " + "WHERE TABLE_TYPE='SEQUENCE' and TABLE_SCHEMA=:schema_name" + ), + dict(schema_name=schema), + ) + return [ + row[0] + for row in self._compat_fetchall( + cursor, charset=self._connection_charset + ) + ] + + def initialize(self, connection): + # this is driver-based, does not need server version info + # and is fairly critical for even basic SQL operations + self._connection_charset = self._detect_charset(connection) + + # call super().initialize() because we need to have + # server_version_info set up. in 1.4 under python 2 only this does the + # "check unicode returns" thing, which is the one area that some + # SQL gets compiled within initialize() currently + default.DefaultDialect.initialize(self, connection) + + self._detect_sql_mode(connection) + self._detect_ansiquotes(connection) # depends on sql mode + self._detect_casing(connection) + if self._server_ansiquotes: + # if ansiquotes == True, build a new IdentifierPreparer + # with the new setting + self.identifier_preparer = self.preparer( + self, server_ansiquotes=self._server_ansiquotes + ) + + self.supports_sequences = ( + self.is_mariadb and self.server_version_info >= (10, 3) + ) + + self.supports_for_update_of = ( + self._is_mysql and self.server_version_info >= (8,) + ) + + self._needs_correct_for_88718_96365 = ( + not self.is_mariadb and self.server_version_info >= (8,) + ) + + self.delete_returning = ( + self.is_mariadb and self.server_version_info >= (10, 0, 5) + ) + + self.insert_returning = ( + self.is_mariadb and self.server_version_info >= (10, 5) + ) + + self._requires_alias_for_on_duplicate_key = ( + self._is_mysql and self.server_version_info >= (8, 0, 20) + ) + + self._warn_for_known_db_issues() + + def _warn_for_known_db_issues(self): + if self.is_mariadb: + mdb_version = self._mariadb_normalized_version_info + if mdb_version > (10, 2) and mdb_version < (10, 2, 9): + util.warn( + "MariaDB %r before 10.2.9 has known issues regarding " + "CHECK constraints, which impact handling of NULL values " + "with SQLAlchemy's boolean datatype (MDEV-13596). An " + "additional issue prevents proper migrations of columns " + "with CHECK constraints (MDEV-11114). Please upgrade to " + "MariaDB 10.2.9 or greater, or use the MariaDB 10.1 " + "series, to avoid these issues." % (mdb_version,) + ) + + @property + def _support_float_cast(self): + if not self.server_version_info: + return False + elif self.is_mariadb: + # ref https://mariadb.com/kb/en/mariadb-1045-release-notes/ + return self.server_version_info >= (10, 4, 5) + else: + # ref https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-17.html#mysqld-8-0-17-feature # noqa + return self.server_version_info >= (8, 0, 17) + + @property + def _support_default_function(self): + if not self.server_version_info: + return False + elif self.is_mariadb: + # ref https://mariadb.com/kb/en/mariadb-1021-release-notes/ + return self.server_version_info >= (10, 2, 1) + else: + # ref https://dev.mysql.com/doc/refman/8.0/en/data-type-defaults.html # noqa + return self.server_version_info >= (8, 0, 13) + + @property + def _is_mariadb(self): + return self.is_mariadb + + @property + def _is_mysql(self): + return not self.is_mariadb + + @property + def _is_mariadb_102(self): + return self.is_mariadb and self._mariadb_normalized_version_info > ( + 10, + 2, + ) + + @reflection.cache + def get_schema_names(self, connection, **kw): + rp = connection.exec_driver_sql("SHOW schemas") + return [r[0] for r in rp] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + """Return a Unicode SHOW TABLES from a given schema.""" + if schema is not None: + current_schema = schema + else: + current_schema = self.default_schema_name + + charset = self._connection_charset + + rp = connection.exec_driver_sql( + "SHOW FULL TABLES FROM %s" + % self.identifier_preparer.quote_identifier(current_schema) + ) + + return [ + row[0] + for row in self._compat_fetchall(rp, charset=charset) + if row[1] == "BASE TABLE" + ] + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + if schema is None: + schema = self.default_schema_name + charset = self._connection_charset + rp = connection.exec_driver_sql( + "SHOW FULL TABLES FROM %s" + % self.identifier_preparer.quote_identifier(schema) + ) + return [ + row[0] + for row in self._compat_fetchall(rp, charset=charset) + if row[1] in ("VIEW", "SYSTEM VIEW") + ] + + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + if parsed_state.table_options: + return parsed_state.table_options + else: + return ReflectionDefaults.table_options() + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + if parsed_state.columns: + return parsed_state.columns + else: + return ReflectionDefaults.columns() + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + for key in parsed_state.keys: + if key["type"] == "PRIMARY": + # There can be only one. + cols = [s[0] for s in key["columns"]] + return {"constrained_columns": cols, "name": None} + return ReflectionDefaults.pk_constraint() + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + default_schema = None + + fkeys = [] + + for spec in parsed_state.fk_constraints: + ref_name = spec["table"][-1] + ref_schema = len(spec["table"]) > 1 and spec["table"][-2] or schema + + if not ref_schema: + if default_schema is None: + default_schema = connection.dialect.default_schema_name + if schema == default_schema: + ref_schema = schema + + loc_names = spec["local"] + ref_names = spec["foreign"] + + con_kw = {} + for opt in ("onupdate", "ondelete"): + if spec.get(opt, False) not in ("NO ACTION", None): + con_kw[opt] = spec[opt] + + fkey_d = { + "name": spec["name"], + "constrained_columns": loc_names, + "referred_schema": ref_schema, + "referred_table": ref_name, + "referred_columns": ref_names, + "options": con_kw, + } + fkeys.append(fkey_d) + + if self._needs_correct_for_88718_96365: + self._correct_for_mysql_bugs_88718_96365(fkeys, connection) + + return fkeys if fkeys else ReflectionDefaults.foreign_keys() + + def _correct_for_mysql_bugs_88718_96365(self, fkeys, connection): + # Foreign key is always in lower case (MySQL 8.0) + # https://bugs.mysql.com/bug.php?id=88718 + # issue #4344 for SQLAlchemy + + # table name also for MySQL 8.0 + # https://bugs.mysql.com/bug.php?id=96365 + # issue #4751 for SQLAlchemy + + # for lower_case_table_names=2, information_schema.columns + # preserves the original table/schema casing, but SHOW CREATE + # TABLE does not. this problem is not in lower_case_table_names=1, + # but use case-insensitive matching for these two modes in any case. + + if self._casing in (1, 2): + + def lower(s): + return s.lower() + + else: + # if on case sensitive, there can be two tables referenced + # with the same name different casing, so we need to use + # case-sensitive matching. + def lower(s): + return s + + default_schema_name = connection.dialect.default_schema_name + + # NOTE: using (table_schema, table_name, lower(column_name)) in (...) + # is very slow since mysql does not seem able to properly use indexse. + # Unpack the where condition instead. + schema_by_table_by_column = defaultdict(lambda: defaultdict(list)) + for rec in fkeys: + sch = lower(rec["referred_schema"] or default_schema_name) + tbl = lower(rec["referred_table"]) + for col_name in rec["referred_columns"]: + schema_by_table_by_column[sch][tbl].append(col_name) + + if schema_by_table_by_column: + + condition = sql.or_( + *( + sql.and_( + _info_columns.c.table_schema == schema, + sql.or_( + *( + sql.and_( + _info_columns.c.table_name == table, + sql.func.lower( + _info_columns.c.column_name + ).in_(columns), + ) + for table, columns in tables.items() + ) + ), + ) + for schema, tables in schema_by_table_by_column.items() + ) + ) + + select = sql.select( + _info_columns.c.table_schema, + _info_columns.c.table_name, + _info_columns.c.column_name, + ).where(condition) + + correct_for_wrong_fk_case = connection.execute(select) + + # in casing=0, table name and schema name come back in their + # exact case. + # in casing=1, table name and schema name come back in lower + # case. + # in casing=2, table name and schema name come back from the + # information_schema.columns view in the case + # that was used in CREATE DATABASE and CREATE TABLE, but + # SHOW CREATE TABLE converts them to *lower case*, therefore + # not matching. So for this case, case-insensitive lookup + # is necessary + d = defaultdict(dict) + for schema, tname, cname in correct_for_wrong_fk_case: + d[(lower(schema), lower(tname))]["SCHEMANAME"] = schema + d[(lower(schema), lower(tname))]["TABLENAME"] = tname + d[(lower(schema), lower(tname))][cname.lower()] = cname + + for fkey in fkeys: + rec = d[ + ( + lower(fkey["referred_schema"] or default_schema_name), + lower(fkey["referred_table"]), + ) + ] + + fkey["referred_table"] = rec["TABLENAME"] + if fkey["referred_schema"] is not None: + fkey["referred_schema"] = rec["SCHEMANAME"] + + fkey["referred_columns"] = [ + rec[col.lower()] for col in fkey["referred_columns"] + ] + + @reflection.cache + def get_check_constraints(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + + cks = [ + {"name": spec["name"], "sqltext": spec["sqltext"]} + for spec in parsed_state.ck_constraints + ] + cks.sort(key=lambda d: d["name"] or "~") # sort None as last + return cks if cks else ReflectionDefaults.check_constraints() + + @reflection.cache + def get_table_comment(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + comment = parsed_state.table_options.get(f"{self.name}_comment", None) + if comment is not None: + return {"text": comment} + else: + return ReflectionDefaults.table_comment() + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + + indexes = [] + + for spec in parsed_state.keys: + dialect_options = {} + unique = False + flavor = spec["type"] + if flavor == "PRIMARY": + continue + if flavor == "UNIQUE": + unique = True + elif flavor in ("FULLTEXT", "SPATIAL"): + dialect_options["%s_prefix" % self.name] = flavor + elif flavor is None: + pass + else: + self.logger.info( + "Converting unknown KEY type %s to a plain KEY", flavor + ) + pass + + if spec["parser"]: + dialect_options["%s_with_parser" % (self.name)] = spec[ + "parser" + ] + + index_d = {} + + index_d["name"] = spec["name"] + index_d["column_names"] = [s[0] for s in spec["columns"]] + mysql_length = { + s[0]: s[1] for s in spec["columns"] if s[1] is not None + } + if mysql_length: + dialect_options["%s_length" % self.name] = mysql_length + + index_d["unique"] = unique + if flavor: + index_d["type"] = flavor + + if dialect_options: + index_d["dialect_options"] = dialect_options + + indexes.append(index_d) + indexes.sort(key=lambda d: d["name"] or "~") # sort None as last + return indexes if indexes else ReflectionDefaults.indexes() + + @reflection.cache + def get_unique_constraints( + self, connection, table_name, schema=None, **kw + ): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw + ) + + ucs = [ + { + "name": key["name"], + "column_names": [col[0] for col in key["columns"]], + "duplicates_index": key["name"], + } + for key in parsed_state.keys + if key["type"] == "UNIQUE" + ] + ucs.sort(key=lambda d: d["name"] or "~") # sort None as last + if ucs: + return ucs + else: + return ReflectionDefaults.unique_constraints() + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + charset = self._connection_charset + full_name = ".".join( + self.identifier_preparer._quote_free_identifiers(schema, view_name) + ) + sql = self._show_create_table( + connection, None, charset, full_name=full_name + ) + if sql.upper().startswith("CREATE TABLE"): + # it's a table, not a view + raise exc.NoSuchTableError(full_name) + return sql + + def _parsed_state_or_create( + self, connection, table_name, schema=None, **kw + ): + return self._setup_parser( + connection, + table_name, + schema, + info_cache=kw.get("info_cache", None), + ) + + @util.memoized_property + def _tabledef_parser(self): + """return the MySQLTableDefinitionParser, generate if needed. + + The deferred creation ensures that the dialect has + retrieved server version information first. + + """ + preparer = self.identifier_preparer + return _reflection.MySQLTableDefinitionParser(self, preparer) + + @reflection.cache + def _setup_parser(self, connection, table_name, schema=None, **kw): + charset = self._connection_charset + parser = self._tabledef_parser + full_name = ".".join( + self.identifier_preparer._quote_free_identifiers( + schema, table_name + ) + ) + sql = self._show_create_table( + connection, None, charset, full_name=full_name + ) + if parser._check_view(sql): + # Adapt views to something table-like. + columns = self._describe_table( + connection, None, charset, full_name=full_name + ) + sql = parser._describe_to_create(table_name, columns) + return parser.parse(sql, charset) + + def _fetch_setting(self, connection, setting_name): + charset = self._connection_charset + + if self.server_version_info and self.server_version_info < (5, 6): + sql = "SHOW VARIABLES LIKE '%s'" % setting_name + fetch_col = 1 + else: + sql = "SELECT @@%s" % setting_name + fetch_col = 0 + + show_var = connection.exec_driver_sql(sql) + row = self._compat_first(show_var, charset=charset) + if not row: + return None + else: + return row[fetch_col] + + def _detect_charset(self, connection): + raise NotImplementedError() + + def _detect_casing(self, connection): + """Sniff out identifier case sensitivity. + + Cached per-connection. This value can not change without a server + restart. + + """ + # https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html + + setting = self._fetch_setting(connection, "lower_case_table_names") + if setting is None: + cs = 0 + else: + # 4.0.15 returns OFF or ON according to [ticket:489] + # 3.23 doesn't, 4.0.27 doesn't.. + if setting == "OFF": + cs = 0 + elif setting == "ON": + cs = 1 + else: + cs = int(setting) + self._casing = cs + return cs + + def _detect_collations(self, connection): + """Pull the active COLLATIONS list from the server. + + Cached per-connection. + """ + + collations = {} + charset = self._connection_charset + rs = connection.exec_driver_sql("SHOW COLLATION") + for row in self._compat_fetchall(rs, charset): + collations[row[0]] = row[1] + return collations + + def _detect_sql_mode(self, connection): + setting = self._fetch_setting(connection, "sql_mode") + + if setting is None: + util.warn( + "Could not retrieve SQL_MODE; please ensure the " + "MySQL user has permissions to SHOW VARIABLES" + ) + self._sql_mode = "" + else: + self._sql_mode = setting or "" + + def _detect_ansiquotes(self, connection): + """Detect and adjust for the ANSI_QUOTES sql mode.""" + + mode = self._sql_mode + if not mode: + mode = "" + elif mode.isdigit(): + mode_no = int(mode) + mode = (mode_no | 4 == mode_no) and "ANSI_QUOTES" or "" + + self._server_ansiquotes = "ANSI_QUOTES" in mode + + # as of MySQL 5.0.1 + self._backslash_escapes = "NO_BACKSLASH_ESCAPES" not in mode + + def _show_create_table( + self, connection, table, charset=None, full_name=None + ): + """Run SHOW CREATE TABLE for a ``Table``.""" + + if full_name is None: + full_name = self.identifier_preparer.format_table(table) + st = "SHOW CREATE TABLE %s" % full_name + + try: + rp = connection.execution_options( + skip_user_error_events=True + ).exec_driver_sql(st) + except exc.DBAPIError as e: + if self._extract_error_code(e.orig) == 1146: + raise exc.NoSuchTableError(full_name) from e + else: + raise + row = self._compat_first(rp, charset=charset) + if not row: + raise exc.NoSuchTableError(full_name) + return row[1].strip() + + def _describe_table(self, connection, table, charset=None, full_name=None): + """Run DESCRIBE for a ``Table`` and return processed rows.""" + + if full_name is None: + full_name = self.identifier_preparer.format_table(table) + st = "DESCRIBE %s" % full_name + + rp, rows = None, None + try: + try: + rp = connection.execution_options( + skip_user_error_events=True + ).exec_driver_sql(st) + except exc.DBAPIError as e: + code = self._extract_error_code(e.orig) + if code == 1146: + raise exc.NoSuchTableError(full_name) from e + + elif code == 1356: + raise exc.UnreflectableTableError( + "Table or view named %s could not be " + "reflected: %s" % (full_name, e) + ) from e + + else: + raise + rows = self._compat_fetchall(rp, charset=charset) + finally: + if rp: + rp.close() + return rows + + +class _DecodingRow: + """Return unicode-decoded values based on type inspection. + + Smooth over data type issues (esp. with alpha driver versions) and + normalize strings as Unicode regardless of user-configured driver + encoding settings. + + """ + + # Some MySQL-python versions can return some columns as + # sets.Set(['value']) (seriously) but thankfully that doesn't + # seem to come up in DDL queries. + + _encoding_compat = { + "koi8r": "koi8_r", + "koi8u": "koi8_u", + "utf16": "utf-16-be", # MySQL's uft16 is always bigendian + "utf8mb4": "utf8", # real utf8 + "utf8mb3": "utf8", # real utf8; saw this happen on CI but I cannot + # reproduce, possibly mariadb10.6 related + "eucjpms": "ujis", + } + + def __init__(self, rowproxy, charset): + self.rowproxy = rowproxy + self.charset = self._encoding_compat.get(charset, charset) + + def __getitem__(self, index): + item = self.rowproxy[index] + if isinstance(item, _array): + item = item.tostring() + + if self.charset and isinstance(item, bytes): + return item.decode(self.charset) + else: + return item + + def __getattr__(self, attr): + item = getattr(self.rowproxy, attr) + if isinstance(item, _array): + item = item.tostring() + if self.charset and isinstance(item, bytes): + return item.decode(self.charset) + else: + return item + + +_info_columns = sql.table( + "columns", + sql.column("table_schema", VARCHAR(64)), + sql.column("table_name", VARCHAR(64)), + sql.column("column_name", VARCHAR(64)), + schema="information_schema", +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/cymysql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/cymysql.py new file mode 100644 index 0000000000000000000000000000000000000000..5c00ada9f9400a42e220be0ad96c8bb3623615fa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/cymysql.py @@ -0,0 +1,84 @@ +# dialects/mysql/cymysql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +r""" + +.. dialect:: mysql+cymysql + :name: CyMySQL + :dbapi: cymysql + :connectstring: mysql+cymysql://:@/[?] + :url: https://github.com/nakagami/CyMySQL + +.. note:: + + The CyMySQL dialect is **not tested as part of SQLAlchemy's continuous + integration** and may have unresolved issues. The recommended MySQL + dialects are mysqlclient and PyMySQL. + +""" # noqa + +from .base import BIT +from .base import MySQLDialect +from .mysqldb import MySQLDialect_mysqldb +from ... import util + + +class _cymysqlBIT(BIT): + def result_processor(self, dialect, coltype): + """Convert MySQL's 64 bit, variable length binary string to a long.""" + + def process(value): + if value is not None: + v = 0 + for i in iter(value): + v = v << 8 | i + return v + return value + + return process + + +class MySQLDialect_cymysql(MySQLDialect_mysqldb): + driver = "cymysql" + supports_statement_cache = True + + description_encoding = None + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + supports_unicode_statements = True + + colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _cymysqlBIT}) + + @classmethod + def import_dbapi(cls): + return __import__("cymysql") + + def _detect_charset(self, connection): + return connection.connection.charset + + def _extract_error_code(self, exception): + return exception.errno + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.OperationalError): + return self._extract_error_code(e) in ( + 2006, + 2013, + 2014, + 2045, + 2055, + ) + elif isinstance(e, self.dbapi.InterfaceError): + # if underlying connection is closed, + # this is the error you get + return True + else: + return False + + +dialect = MySQLDialect_cymysql diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/dml.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/dml.py new file mode 100644 index 0000000000000000000000000000000000000000..cceb0818f9b1fb308e52dee3ea9b53bd50815fba --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/dml.py @@ -0,0 +1,225 @@ +# dialects/mysql/dml.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import Union + +from ... import exc +from ... import util +from ...sql._typing import _DMLTableArgument +from ...sql.base import _exclusive_against +from ...sql.base import _generative +from ...sql.base import ColumnCollection +from ...sql.base import ReadOnlyColumnCollection +from ...sql.dml import Insert as StandardInsert +from ...sql.elements import ClauseElement +from ...sql.elements import KeyedColumnElement +from ...sql.expression import alias +from ...sql.selectable import NamedFromClause +from ...util.typing import Self + + +__all__ = ("Insert", "insert") + + +def insert(table: _DMLTableArgument) -> Insert: + """Construct a MySQL/MariaDB-specific variant :class:`_mysql.Insert` + construct. + + .. container:: inherited_member + + The :func:`sqlalchemy.dialects.mysql.insert` function creates + a :class:`sqlalchemy.dialects.mysql.Insert`. This class is based + on the dialect-agnostic :class:`_sql.Insert` construct which may + be constructed using the :func:`_sql.insert` function in + SQLAlchemy Core. + + The :class:`_mysql.Insert` construct includes additional methods + :meth:`_mysql.Insert.on_duplicate_key_update`. + + """ + return Insert(table) + + +class Insert(StandardInsert): + """MySQL-specific implementation of INSERT. + + Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE. + + The :class:`~.mysql.Insert` object is created using the + :func:`sqlalchemy.dialects.mysql.insert` function. + + .. versionadded:: 1.2 + + """ + + stringify_dialect = "mysql" + inherit_cache = False + + @property + def inserted( + self, + ) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]: + """Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE + statement + + MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row + that would be inserted, via a special function called ``VALUES()``. + This attribute provides all columns in this row to be referenceable + such that they will render within a ``VALUES()`` function inside the + ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted`` + so as not to conflict with the existing + :meth:`_expression.Insert.values` method. + + .. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance + of :class:`_expression.ColumnCollection`, which provides an + interface the same as that of the :attr:`_schema.Table.c` + collection described at :ref:`metadata_tables_and_columns`. + With this collection, ordinary names are accessible like attributes + (e.g. ``stmt.inserted.some_column``), but special names and + dictionary method names should be accessed using indexed access, + such as ``stmt.inserted["column name"]`` or + ``stmt.inserted["values"]``. See the docstring for + :class:`_expression.ColumnCollection` for further examples. + + .. seealso:: + + :ref:`mysql_insert_on_duplicate_key_update` - example of how + to use :attr:`_expression.Insert.inserted` + + """ + return self.inserted_alias.columns + + @util.memoized_property + def inserted_alias(self) -> NamedFromClause: + return alias(self.table, name="inserted") + + @_generative + @_exclusive_against( + "_post_values_clause", + msgs={ + "_post_values_clause": "This Insert construct already " + "has an ON DUPLICATE KEY clause present" + }, + ) + def on_duplicate_key_update(self, *args: _UpdateArg, **kw: Any) -> Self: + r""" + Specifies the ON DUPLICATE KEY UPDATE clause. + + :param \**kw: Column keys linked to UPDATE values. The + values may be any SQL expression or supported literal Python + values. + + .. warning:: This dictionary does **not** take into account + Python-specified default UPDATE values or generation functions, + e.g. those specified using :paramref:`_schema.Column.onupdate`. + These values will not be exercised for an ON DUPLICATE KEY UPDATE + style of UPDATE, unless values are manually specified here. + + :param \*args: As an alternative to passing key/value parameters, + a dictionary or list of 2-tuples can be passed as a single positional + argument. + + Passing a single dictionary is equivalent to the keyword argument + form:: + + insert().on_duplicate_key_update({"name": "some name"}) + + Passing a list of 2-tuples indicates that the parameter assignments + in the UPDATE clause should be ordered as sent, in a manner similar + to that described for the :class:`_expression.Update` + construct overall + in :ref:`tutorial_parameter_ordered_updates`:: + + insert().on_duplicate_key_update( + [ + ("name", "some name"), + ("value", "some value"), + ] + ) + + .. versionchanged:: 1.3 parameters can be specified as a dictionary + or list of 2-tuples; the latter form provides for parameter + ordering. + + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`mysql_insert_on_duplicate_key_update` + + """ + if args and kw: + raise exc.ArgumentError( + "Can't pass kwargs and positional arguments simultaneously" + ) + + if args: + if len(args) > 1: + raise exc.ArgumentError( + "Only a single dictionary or list of tuples " + "is accepted positionally." + ) + values = args[0] + else: + values = kw + + self._post_values_clause = OnDuplicateClause( + self.inserted_alias, values + ) + return self + + +class OnDuplicateClause(ClauseElement): + __visit_name__ = "on_duplicate_key_update" + + _parameter_ordering: Optional[List[str]] = None + + update: Dict[str, Any] + stringify_dialect = "mysql" + + def __init__( + self, inserted_alias: NamedFromClause, update: _UpdateArg + ) -> None: + self.inserted_alias = inserted_alias + + # auto-detect that parameters should be ordered. This is copied from + # Update._proces_colparams(), however we don't look for a special flag + # in this case since we are not disambiguating from other use cases as + # we are in Update.values(). + if isinstance(update, list) and ( + update and isinstance(update[0], tuple) + ): + self._parameter_ordering = [key for key, value in update] + update = dict(update) + + if isinstance(update, dict): + if not update: + raise ValueError( + "update parameter dictionary must not be empty" + ) + elif isinstance(update, ColumnCollection): + update = dict(update) + else: + raise ValueError( + "update parameter must be a non-empty dictionary " + "or a ColumnCollection such as the `.c.` collection " + "of a Table object" + ) + self.update = update + + +_UpdateArg = Union[ + Mapping[Any, Any], List[Tuple[str, Any]], ColumnCollection[Any, Any] +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/enumerated.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/enumerated.py new file mode 100644 index 0000000000000000000000000000000000000000..6745cae55e7594ceb37d30d2c53028b5d32a8ac4 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/enumerated.py @@ -0,0 +1,243 @@ +# dialects/mysql/enumerated.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +import re + +from .types import _StringType +from ... import exc +from ... import sql +from ... import util +from ...sql import sqltypes + + +class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _StringType): + """MySQL ENUM type.""" + + __visit_name__ = "ENUM" + + native_enum = True + + def __init__(self, *enums, **kw): + """Construct an ENUM. + + E.g.:: + + Column("myenum", ENUM("foo", "bar", "baz")) + + :param enums: The range of valid values for this ENUM. Values in + enums are not quoted, they will be escaped and surrounded by single + quotes when generating the schema. This object may also be a + PEP-435-compliant enumerated type. + + .. versionadded: 1.1 added support for PEP-435-compliant enumerated + types. + + :param strict: This flag has no effect. + + .. versionchanged:: The MySQL ENUM type as well as the base Enum + type now validates all Python data values. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + kw.pop("strict", None) + self._enum_init(enums, kw) + _StringType.__init__(self, length=self.length, **kw) + + @classmethod + def adapt_emulated_to_native(cls, impl, **kw): + """Produce a MySQL native :class:`.mysql.ENUM` from plain + :class:`.Enum`. + + """ + kw.setdefault("validate_strings", impl.validate_strings) + kw.setdefault("values_callable", impl.values_callable) + kw.setdefault("omit_aliases", impl._omit_aliases) + return cls(**kw) + + def _object_value_for_elem(self, elem): + # mysql sends back a blank string for any value that + # was persisted that was not in the enums; that is, it does no + # validation on the incoming data, it "truncates" it to be + # the blank string. Return it straight. + if elem == "": + return elem + else: + return super()._object_value_for_elem(elem) + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[ENUM, _StringType, sqltypes.Enum] + ) + + +class SET(_StringType): + """MySQL SET type.""" + + __visit_name__ = "SET" + + def __init__(self, *values, **kw): + """Construct a SET. + + E.g.:: + + Column("myset", SET("foo", "bar", "baz")) + + The list of potential values is required in the case that this + set will be used to generate DDL for a table, or if the + :paramref:`.SET.retrieve_as_bitwise` flag is set to True. + + :param values: The range of valid values for this SET. The values + are not quoted, they will be escaped and surrounded by single + quotes when generating the schema. + + :param convert_unicode: Same flag as that of + :paramref:`.String.convert_unicode`. + + :param collation: same as that of :paramref:`.String.collation` + + :param charset: same as that of :paramref:`.VARCHAR.charset`. + + :param ascii: same as that of :paramref:`.VARCHAR.ascii`. + + :param unicode: same as that of :paramref:`.VARCHAR.unicode`. + + :param binary: same as that of :paramref:`.VARCHAR.binary`. + + :param retrieve_as_bitwise: if True, the data for the set type will be + persisted and selected using an integer value, where a set is coerced + into a bitwise mask for persistence. MySQL allows this mode which + has the advantage of being able to store values unambiguously, + such as the blank string ``''``. The datatype will appear + as the expression ``col + 0`` in a SELECT statement, so that the + value is coerced into an integer value in result sets. + This flag is required if one wishes + to persist a set that can store the blank string ``''`` as a value. + + .. warning:: + + When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is + essential that the list of set values is expressed in the + **exact same order** as exists on the MySQL database. + + """ + self.retrieve_as_bitwise = kw.pop("retrieve_as_bitwise", False) + self.values = tuple(values) + if not self.retrieve_as_bitwise and "" in values: + raise exc.ArgumentError( + "Can't use the blank value '' in a SET without " + "setting retrieve_as_bitwise=True" + ) + if self.retrieve_as_bitwise: + self._bitmap = { + value: 2**idx for idx, value in enumerate(self.values) + } + self._bitmap.update( + (2**idx, value) for idx, value in enumerate(self.values) + ) + length = max([len(v) for v in values] + [0]) + kw.setdefault("length", length) + super().__init__(**kw) + + def column_expression(self, colexpr): + if self.retrieve_as_bitwise: + return sql.type_coerce( + sql.type_coerce(colexpr, sqltypes.Integer) + 0, self + ) + else: + return colexpr + + def result_processor(self, dialect, coltype): + if self.retrieve_as_bitwise: + + def process(value): + if value is not None: + value = int(value) + + return set(util.map_bits(self._bitmap.__getitem__, value)) + else: + return None + + else: + super_convert = super().result_processor(dialect, coltype) + + def process(value): + if isinstance(value, str): + # MySQLdb returns a string, let's parse + if super_convert: + value = super_convert(value) + return set(re.findall(r"[^,]+", value)) + else: + # mysql-connector-python does a naive + # split(",") which throws in an empty string + if value is not None: + value.discard("") + return value + + return process + + def bind_processor(self, dialect): + super_convert = super().bind_processor(dialect) + if self.retrieve_as_bitwise: + + def process(value): + if value is None: + return None + elif isinstance(value, (int, str)): + if super_convert: + return super_convert(value) + else: + return value + else: + int_value = 0 + for v in value: + int_value |= self._bitmap[v] + return int_value + + else: + + def process(value): + # accept strings and int (actually bitflag) values directly + if value is not None and not isinstance(value, (int, str)): + value = ",".join(value) + + if super_convert: + return super_convert(value) + else: + return value + + return process + + def adapt(self, impltype, **kw): + kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise + return util.constructor_copy(self, impltype, *self.values, **kw) + + def __repr__(self): + return util.generic_repr( + self, + to_inspect=[SET, _StringType], + additional_kw=[ + ("retrieve_as_bitwise", False), + ], + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/expression.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/expression.py new file mode 100644 index 0000000000000000000000000000000000000000..b60a0888517fd3aab0a75a9bd5ba3f03c386af4f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/expression.py @@ -0,0 +1,143 @@ +# dialects/mysql/expression.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +from ... import exc +from ... import util +from ...sql import coercions +from ...sql import elements +from ...sql import operators +from ...sql import roles +from ...sql.base import _generative +from ...sql.base import Generative +from ...util.typing import Self + + +class match(Generative, elements.BinaryExpression): + """Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause. + + E.g.:: + + from sqlalchemy import desc + from sqlalchemy.dialects.mysql import match + + match_expr = match( + users_table.c.firstname, + users_table.c.lastname, + against="Firstname Lastname", + ) + + stmt = ( + select(users_table) + .where(match_expr.in_boolean_mode()) + .order_by(desc(match_expr)) + ) + + Would produce SQL resembling: + + .. sourcecode:: sql + + SELECT id, firstname, lastname + FROM user + WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOLEAN MODE) + ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC + + The :func:`_mysql.match` function is a standalone version of the + :meth:`_sql.ColumnElement.match` method available on all + SQL expressions, as when :meth:`_expression.ColumnElement.match` is + used, but allows to pass multiple columns + + :param cols: column expressions to match against + + :param against: expression to be compared towards + + :param in_boolean_mode: boolean, set "boolean mode" to true + + :param in_natural_language_mode: boolean , set "natural language" to true + + :param with_query_expansion: boolean, set "query expansion" to true + + .. versionadded:: 1.4.19 + + .. seealso:: + + :meth:`_expression.ColumnElement.match` + + """ + + __visit_name__ = "mysql_match" + + inherit_cache = True + + def __init__(self, *cols, **kw): + if not cols: + raise exc.ArgumentError("columns are required") + + against = kw.pop("against", None) + + if against is None: + raise exc.ArgumentError("against is required") + against = coercions.expect( + roles.ExpressionElementRole, + against, + ) + + left = elements.BooleanClauseList._construct_raw( + operators.comma_op, + clauses=cols, + ) + left.group = False + + flags = util.immutabledict( + { + "mysql_boolean_mode": kw.pop("in_boolean_mode", False), + "mysql_natural_language": kw.pop( + "in_natural_language_mode", False + ), + "mysql_query_expansion": kw.pop("with_query_expansion", False), + } + ) + + if kw: + raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw))) + + super().__init__(left, against, operators.match_op, modifiers=flags) + + @_generative + def in_boolean_mode(self) -> Self: + """Apply the "IN BOOLEAN MODE" modifier to the MATCH expression. + + :return: a new :class:`_mysql.match` instance with modifications + applied. + """ + + self.modifiers = self.modifiers.union({"mysql_boolean_mode": True}) + return self + + @_generative + def in_natural_language_mode(self) -> Self: + """Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH + expression. + + :return: a new :class:`_mysql.match` instance with modifications + applied. + """ + + self.modifiers = self.modifiers.union({"mysql_natural_language": True}) + return self + + @_generative + def with_query_expansion(self) -> Self: + """Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression. + + :return: a new :class:`_mysql.match` instance with modifications + applied. + """ + + self.modifiers = self.modifiers.union({"mysql_query_expansion": True}) + return self diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/json.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/json.py new file mode 100644 index 0000000000000000000000000000000000000000..8912af36631f93470d7882493cd2b85d59c148f8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/json.py @@ -0,0 +1,81 @@ +# dialects/mysql/json.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +from ... import types as sqltypes + + +class JSON(sqltypes.JSON): + """MySQL JSON type. + + MySQL supports JSON as of version 5.7. + MariaDB supports JSON (as an alias for LONGTEXT) as of version 10.2. + + :class:`_mysql.JSON` is used automatically whenever the base + :class:`_types.JSON` datatype is used against a MySQL or MariaDB backend. + + .. seealso:: + + :class:`_types.JSON` - main documentation for the generic + cross-platform JSON datatype. + + The :class:`.mysql.JSON` type supports persistence of JSON values + as well as the core index operations provided by :class:`_types.JSON` + datatype, by adapting the operations to render the ``JSON_EXTRACT`` + function at the database level. + + """ + + pass + + +class _FormatTypeMixin: + def _format_value(self, value): + raise NotImplementedError() + + def bind_processor(self, dialect): + super_proc = self.string_bind_processor(dialect) + + def process(value): + value = self._format_value(value) + if super_proc: + value = super_proc(value) + return value + + return process + + def literal_processor(self, dialect): + super_proc = self.string_literal_processor(dialect) + + def process(value): + value = self._format_value(value) + if super_proc: + value = super_proc(value) + return value + + return process + + +class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType): + def _format_value(self, value): + if isinstance(value, int): + value = "$[%s]" % value + else: + value = '$."%s"' % value + return value + + +class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType): + def _format_value(self, value): + return "$%s" % ( + "".join( + [ + "[%s]" % elem if isinstance(elem, int) else '."%s"' % elem + for elem in value + ] + ) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadb.py new file mode 100644 index 0000000000000000000000000000000000000000..b84dee37a7bb02e1add23141cad6e3fdf83e3ed8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadb.py @@ -0,0 +1,67 @@ +# dialects/mysql/mariadb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors +from .base import MariaDBIdentifierPreparer +from .base import MySQLDialect +from .base import MySQLTypeCompiler +from ...sql import sqltypes + + +class INET4(sqltypes.TypeEngine[str]): + """INET4 column type for MariaDB + + .. versionadded:: 2.0.37 + """ + + __visit_name__ = "INET4" + + +class INET6(sqltypes.TypeEngine[str]): + """INET6 column type for MariaDB + + .. versionadded:: 2.0.37 + """ + + __visit_name__ = "INET6" + + +class MariaDBTypeCompiler(MySQLTypeCompiler): + def visit_INET4(self, type_, **kwargs) -> str: + return "INET4" + + def visit_INET6(self, type_, **kwargs) -> str: + return "INET6" + + +class MariaDBDialect(MySQLDialect): + is_mariadb = True + supports_statement_cache = True + name = "mariadb" + preparer = MariaDBIdentifierPreparer + type_compiler_cls = MariaDBTypeCompiler + + +def loader(driver): + dialect_mod = __import__( + "sqlalchemy.dialects.mysql.%s" % driver + ).dialects.mysql + + driver_mod = getattr(dialect_mod, driver) + if hasattr(driver_mod, "mariadb_dialect"): + driver_cls = driver_mod.mariadb_dialect + return driver_cls + else: + driver_cls = driver_mod.dialect + + return type( + "MariaDBDialect_%s" % driver, + ( + MariaDBDialect, + driver_cls, + ), + {"supports_statement_cache": True}, + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadbconnector.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadbconnector.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2ad1997103a56d28e9289f82d69f2fa08de551 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -0,0 +1,277 @@ +# dialects/mysql/mariadbconnector.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +""" + +.. dialect:: mysql+mariadbconnector + :name: MariaDB Connector/Python + :dbapi: mariadb + :connectstring: mariadb+mariadbconnector://:@[:]/ + :url: https://pypi.org/project/mariadb/ + +Driver Status +------------- + +MariaDB Connector/Python enables Python programs to access MariaDB and MySQL +databases using an API which is compliant with the Python DB API 2.0 (PEP-249). +It is written in C and uses MariaDB Connector/C client library for client server +communication. + +Note that the default driver for a ``mariadb://`` connection URI continues to +be ``mysqldb``. ``mariadb+mariadbconnector://`` is required to use this driver. + +.. mariadb: https://github.com/mariadb-corporation/mariadb-connector-python + +""" # noqa +import re +from uuid import UUID as _python_UUID + +from .base import MySQLCompiler +from .base import MySQLDialect +from .base import MySQLExecutionContext +from ... import sql +from ... import util +from ...sql import sqltypes + + +mariadb_cpy_minimum_version = (1, 0, 1) + + +class _MariaDBUUID(sqltypes.UUID[sqltypes._UUID_RETURN]): + # work around JIRA issue + # https://jira.mariadb.org/browse/CONPY-270. When that issue is fixed, + # this type can be removed. + def result_processor(self, dialect, coltype): + if self.as_uuid: + + def process(value): + if value is not None: + if hasattr(value, "decode"): + value = value.decode("ascii") + value = _python_UUID(value) + return value + + return process + else: + + def process(value): + if value is not None: + if hasattr(value, "decode"): + value = value.decode("ascii") + value = str(_python_UUID(value)) + return value + + return process + + +class MySQLExecutionContext_mariadbconnector(MySQLExecutionContext): + _lastrowid = None + + def create_server_side_cursor(self): + return self._dbapi_connection.cursor(buffered=False) + + def create_default_cursor(self): + return self._dbapi_connection.cursor(buffered=True) + + def post_exec(self): + super().post_exec() + + self._rowcount = self.cursor.rowcount + + if self.isinsert and self.compiled.postfetch_lastrowid: + self._lastrowid = self.cursor.lastrowid + + def get_lastrowid(self): + return self._lastrowid + + +class MySQLCompiler_mariadbconnector(MySQLCompiler): + pass + + +class MySQLDialect_mariadbconnector(MySQLDialect): + driver = "mariadbconnector" + supports_statement_cache = True + + # set this to True at the module level to prevent the driver from running + # against a backend that server detects as MySQL. currently this appears to + # be unnecessary as MariaDB client libraries have always worked against + # MySQL databases. However, if this changes at some point, this can be + # adjusted, but PLEASE ADD A TEST in test/dialect/mysql/test_dialect.py if + # this change is made at some point to ensure the correct exception + # is raised at the correct point when running the driver against + # a MySQL backend. + # is_mariadb = True + + supports_unicode_statements = True + encoding = "utf8mb4" + convert_unicode = True + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + supports_native_decimal = True + default_paramstyle = "qmark" + execution_ctx_cls = MySQLExecutionContext_mariadbconnector + statement_compiler = MySQLCompiler_mariadbconnector + + supports_server_side_cursors = True + + colspecs = util.update_copy( + MySQLDialect.colspecs, {sqltypes.Uuid: _MariaDBUUID} + ) + + @util.memoized_property + def _dbapi_version(self): + if self.dbapi and hasattr(self.dbapi, "__version__"): + return tuple( + [ + int(x) + for x in re.findall( + r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__ + ) + ] + ) + else: + return (99, 99, 99) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.paramstyle = "qmark" + if self.dbapi is not None: + if self._dbapi_version < mariadb_cpy_minimum_version: + raise NotImplementedError( + "The minimum required version for MariaDB " + "Connector/Python is %s" + % ".".join(str(x) for x in mariadb_cpy_minimum_version) + ) + + @classmethod + def import_dbapi(cls): + return __import__("mariadb") + + def is_disconnect(self, e, connection, cursor): + if super().is_disconnect(e, connection, cursor): + return True + elif isinstance(e, self.dbapi.Error): + str_e = str(e).lower() + return "not connected" in str_e or "isn't valid" in str_e + else: + return False + + def create_connect_args(self, url): + opts = url.translate_connect_args() + opts.update(url.query) + + int_params = [ + "connect_timeout", + "read_timeout", + "write_timeout", + "client_flag", + "port", + "pool_size", + ] + bool_params = [ + "local_infile", + "ssl_verify_cert", + "ssl", + "pool_reset_connection", + "compress", + ] + + for key in int_params: + util.coerce_kw_type(opts, key, int) + for key in bool_params: + util.coerce_kw_type(opts, key, bool) + + # FOUND_ROWS must be set in CLIENT_FLAGS to enable + # supports_sane_rowcount. + client_flag = opts.get("client_flag", 0) + if self.dbapi is not None: + try: + CLIENT_FLAGS = __import__( + self.dbapi.__name__ + ".constants.CLIENT" + ).constants.CLIENT + client_flag |= CLIENT_FLAGS.FOUND_ROWS + except (AttributeError, ImportError): + self.supports_sane_rowcount = False + opts["client_flag"] = client_flag + return [[], opts] + + def _extract_error_code(self, exception): + try: + rc = exception.errno + except: + rc = -1 + return rc + + def _detect_charset(self, connection): + return "utf8mb4" + + def get_isolation_level_values(self, dbapi_connection): + return ( + "SERIALIZABLE", + "READ UNCOMMITTED", + "READ COMMITTED", + "REPEATABLE READ", + "AUTOCOMMIT", + ) + + def set_isolation_level(self, connection, level): + if level == "AUTOCOMMIT": + connection.autocommit = True + else: + connection.autocommit = False + super().set_isolation_level(connection, level) + + def do_begin_twophase(self, connection, xid): + connection.execute( + sql.text("XA BEGIN :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + + def do_prepare_twophase(self, connection, xid): + connection.execute( + sql.text("XA END :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + connection.execute( + sql.text("XA PREPARE :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + + def do_rollback_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + if not is_prepared: + connection.execute( + sql.text("XA END :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + connection.execute( + sql.text("XA ROLLBACK :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + + def do_commit_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + if not is_prepared: + self.do_prepare_twophase(connection, xid) + connection.execute( + sql.text("XA COMMIT :xid").bindparams( + sql.bindparam("xid", xid, literal_execute=True) + ) + ) + + +dialect = MySQLDialect_mariadbconnector diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py new file mode 100644 index 0000000000000000000000000000000000000000..faeae16abd58b915504f2ab8920ac2da5f8d3764 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -0,0 +1,253 @@ +# dialects/mysql/mysqlconnector.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r""" +.. dialect:: mysql+mysqlconnector + :name: MySQL Connector/Python + :dbapi: myconnpy + :connectstring: mysql+mysqlconnector://:@[:]/ + :url: https://pypi.org/project/mysql-connector-python/ + +Driver Status +------------- + +MySQL Connector/Python is supported as of SQLAlchemy 2.0.39 to the +degree which the driver is functional. There are still ongoing issues +with features such as server side cursors which remain disabled until +upstream issues are repaired. + +.. warning:: The MySQL Connector/Python driver published by Oracle is subject + to frequent, major regressions of essential functionality such as being able + to correctly persist simple binary strings which indicate it is not well + tested. The SQLAlchemy project is not able to maintain this dialect fully as + regressions in the driver prevent it from being included in continuous + integration. + +.. versionchanged:: 2.0.39 + + The MySQL Connector/Python dialect has been updated to support the + latest version of this DBAPI. Previously, MySQL Connector/Python + was not fully supported. However, support remains limited due to ongoing + regressions introduced in this driver. + +Connecting to MariaDB with MySQL Connector/Python +-------------------------------------------------- + +MySQL Connector/Python may attempt to pass an incompatible collation to the +database when connecting to MariaDB. Experimentation has shown that using +``?charset=utf8mb4&collation=utfmb4_general_ci`` or similar MariaDB-compatible +charset/collation will allow connectivity. + + +""" # noqa + +import re + +from .base import BIT +from .base import MariaDBIdentifierPreparer +from .base import MySQLCompiler +from .base import MySQLDialect +from .base import MySQLExecutionContext +from .base import MySQLIdentifierPreparer +from .mariadb import MariaDBDialect +from ... import util + + +class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): + def create_server_side_cursor(self): + return self._dbapi_connection.cursor(buffered=False) + + def create_default_cursor(self): + return self._dbapi_connection.cursor(buffered=True) + + +class MySQLCompiler_mysqlconnector(MySQLCompiler): + def visit_mod_binary(self, binary, operator, **kw): + return ( + self.process(binary.left, **kw) + + " % " + + self.process(binary.right, **kw) + ) + + +class IdentifierPreparerCommon_mysqlconnector: + @property + def _double_percents(self): + return False + + @_double_percents.setter + def _double_percents(self, value): + pass + + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value + + +class MySQLIdentifierPreparer_mysqlconnector( + IdentifierPreparerCommon_mysqlconnector, MySQLIdentifierPreparer +): + pass + + +class MariaDBIdentifierPreparer_mysqlconnector( + IdentifierPreparerCommon_mysqlconnector, MariaDBIdentifierPreparer +): + pass + + +class _myconnpyBIT(BIT): + def result_processor(self, dialect, coltype): + """MySQL-connector already converts mysql bits, so.""" + + return None + + +class MySQLDialect_mysqlconnector(MySQLDialect): + driver = "mysqlconnector" + supports_statement_cache = True + + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + + supports_native_decimal = True + + supports_native_bit = True + + # not until https://bugs.mysql.com/bug.php?id=117548 + supports_server_side_cursors = False + + default_paramstyle = "format" + statement_compiler = MySQLCompiler_mysqlconnector + + execution_ctx_cls = MySQLExecutionContext_mysqlconnector + + preparer = MySQLIdentifierPreparer_mysqlconnector + + colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _myconnpyBIT}) + + @classmethod + def import_dbapi(cls): + from mysql import connector + + return connector + + def do_ping(self, dbapi_connection): + dbapi_connection.ping(False) + return True + + def create_connect_args(self, url): + opts = url.translate_connect_args(username="user") + + opts.update(url.query) + + util.coerce_kw_type(opts, "allow_local_infile", bool) + util.coerce_kw_type(opts, "autocommit", bool) + util.coerce_kw_type(opts, "buffered", bool) + util.coerce_kw_type(opts, "client_flag", int) + util.coerce_kw_type(opts, "compress", bool) + util.coerce_kw_type(opts, "connection_timeout", int) + util.coerce_kw_type(opts, "connect_timeout", int) + util.coerce_kw_type(opts, "consume_results", bool) + util.coerce_kw_type(opts, "force_ipv6", bool) + util.coerce_kw_type(opts, "get_warnings", bool) + util.coerce_kw_type(opts, "pool_reset_session", bool) + util.coerce_kw_type(opts, "pool_size", int) + util.coerce_kw_type(opts, "raise_on_warnings", bool) + util.coerce_kw_type(opts, "raw", bool) + util.coerce_kw_type(opts, "ssl_verify_cert", bool) + util.coerce_kw_type(opts, "use_pure", bool) + util.coerce_kw_type(opts, "use_unicode", bool) + + # note that "buffered" is set to False by default in MySQL/connector + # python. If you set it to True, then there is no way to get a server + # side cursor because the logic is written to disallow that. + + # leaving this at True until + # https://bugs.mysql.com/bug.php?id=117548 can be fixed + opts["buffered"] = True + + # FOUND_ROWS must be set in ClientFlag to enable + # supports_sane_rowcount. + if self.dbapi is not None: + try: + from mysql.connector.constants import ClientFlag + + client_flags = opts.get( + "client_flags", ClientFlag.get_default() + ) + client_flags |= ClientFlag.FOUND_ROWS + opts["client_flags"] = client_flags + except Exception: + pass + + return [[], opts] + + @util.memoized_property + def _mysqlconnector_version_info(self): + if self.dbapi and hasattr(self.dbapi, "__version__"): + m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__) + if m: + return tuple(int(x) for x in m.group(1, 2, 3) if x is not None) + + def _detect_charset(self, connection): + return connection.connection.charset + + def _extract_error_code(self, exception): + return exception.errno + + def is_disconnect(self, e, connection, cursor): + errnos = (2006, 2013, 2014, 2045, 2055, 2048) + exceptions = ( + self.dbapi.OperationalError, + self.dbapi.InterfaceError, + self.dbapi.ProgrammingError, + ) + if isinstance(e, exceptions): + return ( + e.errno in errnos + or "MySQL Connection not available." in str(e) + or "Connection to MySQL is not available" in str(e) + ) + else: + return False + + def _compat_fetchall(self, rp, charset=None): + return rp.fetchall() + + def _compat_fetchone(self, rp, charset=None): + return rp.fetchone() + + def get_isolation_level_values(self, dbapi_connection): + return ( + "SERIALIZABLE", + "READ UNCOMMITTED", + "READ COMMITTED", + "REPEATABLE READ", + "AUTOCOMMIT", + ) + + def set_isolation_level(self, connection, level): + if level == "AUTOCOMMIT": + connection.autocommit = True + else: + connection.autocommit = False + super().set_isolation_level(connection, level) + + +class MariaDBDialect_mysqlconnector( + MariaDBDialect, MySQLDialect_mysqlconnector +): + supports_statement_cache = True + _allows_uuid_binds = False + preparer = MariaDBIdentifierPreparer_mysqlconnector + + +dialect = MySQLDialect_mysqlconnector +mariadb_dialect = MariaDBDialect_mysqlconnector diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqldb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqldb.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf56c1fd0942122193202550ffe8167b308630f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/mysqldb.py @@ -0,0 +1,305 @@ +# dialects/mysql/mysqldb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +""" + +.. dialect:: mysql+mysqldb + :name: mysqlclient (maintained fork of MySQL-Python) + :dbapi: mysqldb + :connectstring: mysql+mysqldb://:@[:]/ + :url: https://pypi.org/project/mysqlclient/ + +Driver Status +------------- + +The mysqlclient DBAPI is a maintained fork of the +`MySQL-Python `_ DBAPI +that is no longer maintained. `mysqlclient`_ supports Python 2 and Python 3 +and is very stable. + +.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python + +.. _mysqldb_unicode: + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + +.. _mysqldb_ssl: + +SSL Connections +---------------- + +The mysqlclient and PyMySQL DBAPIs accept an additional dictionary under the +key "ssl", which may be specified using the +:paramref:`_sa.create_engine.connect_args` dictionary:: + + engine = create_engine( + "mysql+mysqldb://scott:tiger@192.168.0.134/test", + connect_args={ + "ssl": { + "ca": "/home/gord/client-ssl/ca.pem", + "cert": "/home/gord/client-ssl/client-cert.pem", + "key": "/home/gord/client-ssl/client-key.pem", + } + }, + ) + +For convenience, the following keys may also be specified inline within the URL +where they will be interpreted into the "ssl" dictionary automatically: +"ssl_ca", "ssl_cert", "ssl_key", "ssl_capath", "ssl_cipher", +"ssl_check_hostname". An example is as follows:: + + connection_uri = ( + "mysql+mysqldb://scott:tiger@192.168.0.134/test" + "?ssl_ca=/home/gord/client-ssl/ca.pem" + "&ssl_cert=/home/gord/client-ssl/client-cert.pem" + "&ssl_key=/home/gord/client-ssl/client-key.pem" + ) + +.. seealso:: + + :ref:`pymysql_ssl` in the PyMySQL dialect + + +Using MySQLdb with Google Cloud SQL +----------------------------------- + +Google Cloud SQL now recommends use of the MySQLdb dialect. Connect +using a URL like the following: + +.. sourcecode:: text + + mysql+mysqldb://root@/?unix_socket=/cloudsql/: + +Server Side Cursors +------------------- + +The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`. + +""" + +import re + +from .base import MySQLCompiler +from .base import MySQLDialect +from .base import MySQLExecutionContext +from .base import MySQLIdentifierPreparer +from .base import TEXT +from ... import sql +from ... import util + + +class MySQLExecutionContext_mysqldb(MySQLExecutionContext): + pass + + +class MySQLCompiler_mysqldb(MySQLCompiler): + pass + + +class MySQLDialect_mysqldb(MySQLDialect): + driver = "mysqldb" + supports_statement_cache = True + supports_unicode_statements = True + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + + supports_native_decimal = True + + default_paramstyle = "format" + execution_ctx_cls = MySQLExecutionContext_mysqldb + statement_compiler = MySQLCompiler_mysqldb + preparer = MySQLIdentifierPreparer + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._mysql_dbapi_version = ( + self._parse_dbapi_version(self.dbapi.__version__) + if self.dbapi is not None and hasattr(self.dbapi, "__version__") + else (0, 0, 0) + ) + + def _parse_dbapi_version(self, version): + m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version) + if m: + return tuple(int(x) for x in m.group(1, 2, 3) if x is not None) + else: + return (0, 0, 0) + + @util.langhelpers.memoized_property + def supports_server_side_cursors(self): + try: + cursors = __import__("MySQLdb.cursors").cursors + self._sscursor = cursors.SSCursor + return True + except (ImportError, AttributeError): + return False + + @classmethod + def import_dbapi(cls): + return __import__("MySQLdb") + + def on_connect(self): + super_ = super().on_connect() + + def on_connect(conn): + if super_ is not None: + super_(conn) + + charset_name = conn.character_set_name() + + if charset_name is not None: + cursor = conn.cursor() + cursor.execute("SET NAMES %s" % charset_name) + cursor.close() + + return on_connect + + def do_ping(self, dbapi_connection): + dbapi_connection.ping() + return True + + def do_executemany(self, cursor, statement, parameters, context=None): + rowcount = cursor.executemany(statement, parameters) + if context is not None: + context._rowcount = rowcount + + def _check_unicode_returns(self, connection): + # work around issue fixed in + # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8 + # specific issue w/ the utf8mb4_bin collation and unicode returns + + collation = connection.exec_driver_sql( + "show collation where %s = 'utf8mb4' and %s = 'utf8mb4_bin'" + % ( + self.identifier_preparer.quote("Charset"), + self.identifier_preparer.quote("Collation"), + ) + ).scalar() + has_utf8mb4_bin = self.server_version_info > (5,) and collation + if has_utf8mb4_bin: + additional_tests = [ + sql.collate( + sql.cast( + sql.literal_column("'test collated returns'"), + TEXT(charset="utf8mb4"), + ), + "utf8mb4_bin", + ) + ] + else: + additional_tests = [] + return super()._check_unicode_returns(connection, additional_tests) + + def create_connect_args(self, url, _translate_args=None): + if _translate_args is None: + _translate_args = dict( + database="db", username="user", password="passwd" + ) + + opts = url.translate_connect_args(**_translate_args) + opts.update(url.query) + + util.coerce_kw_type(opts, "compress", bool) + util.coerce_kw_type(opts, "connect_timeout", int) + util.coerce_kw_type(opts, "read_timeout", int) + util.coerce_kw_type(opts, "write_timeout", int) + util.coerce_kw_type(opts, "client_flag", int) + util.coerce_kw_type(opts, "local_infile", bool) + # Note: using either of the below will cause all strings to be + # returned as Unicode, both in raw SQL operations and with column + # types like String and MSString. + util.coerce_kw_type(opts, "use_unicode", bool) + util.coerce_kw_type(opts, "charset", str) + + # Rich values 'cursorclass' and 'conv' are not supported via + # query string. + + ssl = {} + keys = [ + ("ssl_ca", str), + ("ssl_key", str), + ("ssl_cert", str), + ("ssl_capath", str), + ("ssl_cipher", str), + ("ssl_check_hostname", bool), + ] + for key, kw_type in keys: + if key in opts: + ssl[key[4:]] = opts[key] + util.coerce_kw_type(ssl, key[4:], kw_type) + del opts[key] + if ssl: + opts["ssl"] = ssl + + # FOUND_ROWS must be set in CLIENT_FLAGS to enable + # supports_sane_rowcount. + client_flag = opts.get("client_flag", 0) + + client_flag_found_rows = self._found_rows_client_flag() + if client_flag_found_rows is not None: + client_flag |= client_flag_found_rows + opts["client_flag"] = client_flag + return [[], opts] + + def _found_rows_client_flag(self): + if self.dbapi is not None: + try: + CLIENT_FLAGS = __import__( + self.dbapi.__name__ + ".constants.CLIENT" + ).constants.CLIENT + except (AttributeError, ImportError): + return None + else: + return CLIENT_FLAGS.FOUND_ROWS + else: + return None + + def _extract_error_code(self, exception): + return exception.args[0] + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + try: + # note: the SQL here would be + # "SHOW VARIABLES LIKE 'character_set%%'" + cset_name = connection.connection.character_set_name + except AttributeError: + util.warn( + "No 'character_set_name' can be detected with " + "this MySQL-Python version; " + "please upgrade to a recent version of MySQL-Python. " + "Assuming latin1." + ) + return "latin1" + else: + return cset_name() + + def get_isolation_level_values(self, dbapi_connection): + return ( + "SERIALIZABLE", + "READ UNCOMMITTED", + "READ COMMITTED", + "REPEATABLE READ", + "AUTOCOMMIT", + ) + + def set_isolation_level(self, dbapi_connection, level): + if level == "AUTOCOMMIT": + dbapi_connection.autocommit(True) + else: + dbapi_connection.autocommit(False) + super().set_isolation_level(dbapi_connection, level) + + +dialect = MySQLDialect_mysqldb diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/provision.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/provision.py new file mode 100644 index 0000000000000000000000000000000000000000..46070848cb11743e5874366e2498ff8afa869b42 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/provision.py @@ -0,0 +1,114 @@ +# dialects/mysql/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +from ... import exc +from ...testing.provision import configure_follower +from ...testing.provision import create_db +from ...testing.provision import drop_db +from ...testing.provision import generate_driver_url +from ...testing.provision import temp_table_keyword_args +from ...testing.provision import upsert + + +@generate_driver_url.for_db("mysql", "mariadb") +def generate_driver_url(url, driver, query_str): + backend = url.get_backend_name() + + # NOTE: at the moment, tests are running mariadbconnector + # against both mariadb and mysql backends. if we want this to be + # limited, do the decision making here to reject a "mysql+mariadbconnector" + # URL. Optionally also re-enable the module level + # MySQLDialect_mariadbconnector.is_mysql flag as well, which must include + # a unit and/or functional test. + + # all the Jenkins tests have been running mysqlclient Python library + # built against mariadb client drivers for years against all MySQL / + # MariaDB versions going back to MySQL 5.6, currently they can talk + # to MySQL databases without problems. + + if backend == "mysql": + dialect_cls = url.get_dialect() + if dialect_cls._is_mariadb_from_url(url): + backend = "mariadb" + + new_url = url.set( + drivername="%s+%s" % (backend, driver) + ).update_query_string(query_str) + + if driver == "mariadbconnector": + new_url = new_url.difference_update_query(["charset"]) + elif driver == "mysqlconnector": + new_url = new_url.update_query_pairs( + [("collation", "utf8mb4_general_ci")] + ) + + try: + new_url.get_dialect() + except exc.NoSuchModuleError: + return None + else: + return new_url + + +@create_db.for_db("mysql", "mariadb") +def _mysql_create_db(cfg, eng, ident): + with eng.begin() as conn: + try: + _mysql_drop_db(cfg, conn, ident) + except Exception: + pass + + with eng.begin() as conn: + conn.exec_driver_sql( + "CREATE DATABASE %s CHARACTER SET utf8mb4" % ident + ) + conn.exec_driver_sql( + "CREATE DATABASE %s_test_schema CHARACTER SET utf8mb4" % ident + ) + conn.exec_driver_sql( + "CREATE DATABASE %s_test_schema_2 CHARACTER SET utf8mb4" % ident + ) + + +@configure_follower.for_db("mysql", "mariadb") +def _mysql_configure_follower(config, ident): + config.test_schema = "%s_test_schema" % ident + config.test_schema_2 = "%s_test_schema_2" % ident + + +@drop_db.for_db("mysql", "mariadb") +def _mysql_drop_db(cfg, eng, ident): + with eng.begin() as conn: + conn.exec_driver_sql("DROP DATABASE %s_test_schema" % ident) + conn.exec_driver_sql("DROP DATABASE %s_test_schema_2" % ident) + conn.exec_driver_sql("DROP DATABASE %s" % ident) + + +@temp_table_keyword_args.for_db("mysql", "mariadb") +def _mysql_temp_table_keyword_args(cfg, eng): + return {"prefixes": ["TEMPORARY"]} + + +@upsert.for_db("mariadb") +def _upsert( + cfg, table, returning, *, set_lambda=None, sort_by_parameter_order=False +): + from sqlalchemy.dialects.mysql import insert + + stmt = insert(table) + + if set_lambda: + stmt = stmt.on_duplicate_key_update(**set_lambda(stmt.inserted)) + else: + pk1 = table.primary_key.c[0] + stmt = stmt.on_duplicate_key_update({pk1.key: pk1}) + + stmt = stmt.returning( + *returning, sort_by_parameter_order=sort_by_parameter_order + ) + return stmt diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pymysql.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pymysql.py new file mode 100644 index 0000000000000000000000000000000000000000..67cb4cdd766c40661d89d4532a23c17c7126bf3d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pymysql.py @@ -0,0 +1,136 @@ +# dialects/mysql/pymysql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r""" + +.. dialect:: mysql+pymysql + :name: PyMySQL + :dbapi: pymysql + :connectstring: mysql+pymysql://:@/[?] + :url: https://pymysql.readthedocs.io/ + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + +.. _pymysql_ssl: + +SSL Connections +------------------ + +The PyMySQL DBAPI accepts the same SSL arguments as that of MySQLdb, +described at :ref:`mysqldb_ssl`. See that section for additional examples. + +If the server uses an automatically-generated certificate that is self-signed +or does not match the host name (as seen from the client), it may also be +necessary to indicate ``ssl_check_hostname=false`` in PyMySQL:: + + connection_uri = ( + "mysql+pymysql://scott:tiger@192.168.0.134/test" + "?ssl_ca=/home/gord/client-ssl/ca.pem" + "&ssl_cert=/home/gord/client-ssl/client-cert.pem" + "&ssl_key=/home/gord/client-ssl/client-key.pem" + "&ssl_check_hostname=false" + ) + +MySQL-Python Compatibility +-------------------------- + +The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, +and targets 100% compatibility. Most behavioral notes for MySQL-python apply +to the pymysql driver as well. + +""" # noqa + +from .mysqldb import MySQLDialect_mysqldb +from ...util import langhelpers + + +class MySQLDialect_pymysql(MySQLDialect_mysqldb): + driver = "pymysql" + supports_statement_cache = True + + description_encoding = None + + @langhelpers.memoized_property + def supports_server_side_cursors(self): + try: + cursors = __import__("pymysql.cursors").cursors + self._sscursor = cursors.SSCursor + return True + except (ImportError, AttributeError): + return False + + @classmethod + def import_dbapi(cls): + return __import__("pymysql") + + @langhelpers.memoized_property + def _send_false_to_ping(self): + """determine if pymysql has deprecated, changed the default of, + or removed the 'reconnect' argument of connection.ping(). + + See #10492 and + https://github.com/PyMySQL/mysqlclient/discussions/651#discussioncomment-7308971 + for background. + + """ # noqa: E501 + + try: + Connection = __import__( + "pymysql.connections" + ).connections.Connection + except (ImportError, AttributeError): + return True + else: + insp = langhelpers.get_callable_argspec(Connection.ping) + try: + reconnect_arg = insp.args[1] + except IndexError: + return False + else: + return reconnect_arg == "reconnect" and ( + not insp.defaults or insp.defaults[0] is not False + ) + + def do_ping(self, dbapi_connection): + if self._send_false_to_ping: + dbapi_connection.ping(False) + else: + dbapi_connection.ping() + + return True + + def create_connect_args(self, url, _translate_args=None): + if _translate_args is None: + _translate_args = dict(username="user") + return super().create_connect_args( + url, _translate_args=_translate_args + ) + + def is_disconnect(self, e, connection, cursor): + if super().is_disconnect(e, connection, cursor): + return True + elif isinstance(e, self.dbapi.Error): + str_e = str(e).lower() + return ( + "already closed" in str_e or "connection was killed" in str_e + ) + else: + return False + + def _extract_error_code(self, exception): + if isinstance(exception.args[0], Exception): + exception = exception.args[0] + return exception.args[0] + + +dialect = MySQLDialect_pymysql diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pyodbc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pyodbc.py new file mode 100644 index 0000000000000000000000000000000000000000..6d44bd3837067715424398a5ebcbf7bcb0d55569 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/pyodbc.py @@ -0,0 +1,139 @@ +# dialects/mysql/pyodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r""" + + +.. dialect:: mysql+pyodbc + :name: PyODBC + :dbapi: pyodbc + :connectstring: mysql+pyodbc://:@ + :url: https://pypi.org/project/pyodbc/ + +.. note:: + + The PyODBC for MySQL dialect is **not tested as part of + SQLAlchemy's continuous integration**. + The recommended MySQL dialects are mysqlclient and PyMySQL. + However, if you want to use the mysql+pyodbc dialect and require + full support for ``utf8mb4`` characters (including supplementary + characters like emoji) be sure to use a current release of + MySQL Connector/ODBC and specify the "ANSI" (**not** "Unicode") + version of the driver in your DSN or connection string. + +Pass through exact pyodbc connection string:: + + import urllib + + connection_string = ( + "DRIVER=MySQL ODBC 8.0 ANSI Driver;" + "SERVER=localhost;" + "PORT=3307;" + "DATABASE=mydb;" + "UID=root;" + "PWD=(whatever);" + "charset=utf8mb4;" + ) + params = urllib.parse.quote_plus(connection_string) + connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params + +""" # noqa + +import re + +from .base import MySQLDialect +from .base import MySQLExecutionContext +from .types import TIME +from ... import exc +from ... import util +from ...connectors.pyodbc import PyODBCConnector +from ...sql.sqltypes import Time + + +class _pyodbcTIME(TIME): + def result_processor(self, dialect, coltype): + def process(value): + # pyodbc returns a datetime.time object; no need to convert + return value + + return process + + +class MySQLExecutionContext_pyodbc(MySQLExecutionContext): + def get_lastrowid(self): + cursor = self.create_cursor() + cursor.execute("SELECT LAST_INSERT_ID()") + lastrowid = cursor.fetchone()[0] + cursor.close() + return lastrowid + + +class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): + supports_statement_cache = True + colspecs = util.update_copy(MySQLDialect.colspecs, {Time: _pyodbcTIME}) + supports_unicode_statements = True + execution_ctx_cls = MySQLExecutionContext_pyodbc + + pyodbc_driver_name = "MySQL" + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + # Prefer 'character_set_results' for the current connection over the + # value in the driver. SET NAMES or individual variable SETs will + # change the charset without updating the driver's view of the world. + # + # If it's decided that issuing that sort of SQL leaves you SOL, then + # this can prefer the driver value. + + # set this to None as _fetch_setting attempts to use it (None is OK) + self._connection_charset = None + try: + value = self._fetch_setting(connection, "character_set_client") + if value: + return value + except exc.DBAPIError: + pass + + util.warn( + "Could not detect the connection character set. " + "Assuming latin1." + ) + return "latin1" + + def _get_server_version_info(self, connection): + return MySQLDialect._get_server_version_info(self, connection) + + def _extract_error_code(self, exception): + m = re.compile(r"\((\d+)\)").search(str(exception.args)) + c = m.group(1) + if c: + return int(c) + else: + return None + + def on_connect(self): + super_ = super().on_connect() + + def on_connect(conn): + if super_ is not None: + super_(conn) + + # declare Unicode encoding for pyodbc as per + # https://github.com/mkleehammer/pyodbc/wiki/Unicode + pyodbc_SQL_CHAR = 1 # pyodbc.SQL_CHAR + pyodbc_SQL_WCHAR = -8 # pyodbc.SQL_WCHAR + conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") + conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") + conn.setencoding(encoding="utf-8") + + return on_connect + + +dialect = MySQLDialect_pyodbc diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reflection.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reflection.py new file mode 100644 index 0000000000000000000000000000000000000000..d62390bb8457d63da4eb3e02fff96bc1e499001b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reflection.py @@ -0,0 +1,677 @@ +# dialects/mysql/reflection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +import re + +from .enumerated import ENUM +from .enumerated import SET +from .types import DATETIME +from .types import TIME +from .types import TIMESTAMP +from ... import log +from ... import types as sqltypes +from ... import util + + +class ReflectedState: + """Stores raw information about a SHOW CREATE TABLE statement.""" + + def __init__(self): + self.columns = [] + self.table_options = {} + self.table_name = None + self.keys = [] + self.fk_constraints = [] + self.ck_constraints = [] + + +@log.class_logger +class MySQLTableDefinitionParser: + """Parses the results of a SHOW CREATE TABLE statement.""" + + def __init__(self, dialect, preparer): + self.dialect = dialect + self.preparer = preparer + self._prep_regexes() + + def parse(self, show_create, charset): + state = ReflectedState() + state.charset = charset + for line in re.split(r"\r?\n", show_create): + if line.startswith(" " + self.preparer.initial_quote): + self._parse_column(line, state) + # a regular table options line + elif line.startswith(") "): + self._parse_table_options(line, state) + # an ANSI-mode table options line + elif line == ")": + pass + elif line.startswith("CREATE "): + self._parse_table_name(line, state) + elif "PARTITION" in line: + self._parse_partition_options(line, state) + # Not present in real reflection, but may be if + # loading from a file. + elif not line: + pass + else: + type_, spec = self._parse_constraints(line) + if type_ is None: + util.warn("Unknown schema content: %r" % line) + elif type_ == "key": + state.keys.append(spec) + elif type_ == "fk_constraint": + state.fk_constraints.append(spec) + elif type_ == "ck_constraint": + state.ck_constraints.append(spec) + else: + pass + return state + + def _check_view(self, sql: str) -> bool: + return bool(self._re_is_view.match(sql)) + + def _parse_constraints(self, line): + """Parse a KEY or CONSTRAINT line. + + :param line: A line of SHOW CREATE TABLE output + """ + + # KEY + m = self._re_key.match(line) + if m: + spec = m.groupdict() + # convert columns into name, length pairs + # NOTE: we may want to consider SHOW INDEX as the + # format of indexes in MySQL becomes more complex + spec["columns"] = self._parse_keyexprs(spec["columns"]) + if spec["version_sql"]: + m2 = self._re_key_version_sql.match(spec["version_sql"]) + if m2 and m2.groupdict()["parser"]: + spec["parser"] = m2.groupdict()["parser"] + if spec["parser"]: + spec["parser"] = self.preparer.unformat_identifiers( + spec["parser"] + )[0] + return "key", spec + + # FOREIGN KEY CONSTRAINT + m = self._re_fk_constraint.match(line) + if m: + spec = m.groupdict() + spec["table"] = self.preparer.unformat_identifiers(spec["table"]) + spec["local"] = [c[0] for c in self._parse_keyexprs(spec["local"])] + spec["foreign"] = [ + c[0] for c in self._parse_keyexprs(spec["foreign"]) + ] + return "fk_constraint", spec + + # CHECK constraint + m = self._re_ck_constraint.match(line) + if m: + spec = m.groupdict() + return "ck_constraint", spec + + # PARTITION and SUBPARTITION + m = self._re_partition.match(line) + if m: + # Punt! + return "partition", line + + # No match. + return (None, line) + + def _parse_table_name(self, line, state): + """Extract the table name. + + :param line: The first line of SHOW CREATE TABLE + """ + + regex, cleanup = self._pr_name + m = regex.match(line) + if m: + state.table_name = cleanup(m.group("name")) + + def _parse_table_options(self, line, state): + """Build a dictionary of all reflected table-level options. + + :param line: The final line of SHOW CREATE TABLE output. + """ + + options = {} + + if line and line != ")": + rest_of_line = line + for regex, cleanup in self._pr_options: + m = regex.search(rest_of_line) + if not m: + continue + directive, value = m.group("directive"), m.group("val") + if cleanup: + value = cleanup(value) + options[directive.lower()] = value + rest_of_line = regex.sub("", rest_of_line) + + for nope in ("auto_increment", "data directory", "index directory"): + options.pop(nope, None) + + for opt, val in options.items(): + state.table_options["%s_%s" % (self.dialect.name, opt)] = val + + def _parse_partition_options(self, line, state): + options = {} + new_line = line[:] + + while new_line.startswith("(") or new_line.startswith(" "): + new_line = new_line[1:] + + for regex, cleanup in self._pr_options: + m = regex.search(new_line) + if not m or "PARTITION" not in regex.pattern: + continue + + directive = m.group("directive") + directive = directive.lower() + is_subpartition = directive == "subpartition" + + if directive == "partition" or is_subpartition: + new_line = new_line.replace(") */", "") + new_line = new_line.replace(",", "") + if is_subpartition and new_line.endswith(")"): + new_line = new_line[:-1] + if self.dialect.name == "mariadb" and new_line.endswith(")"): + if ( + "MAXVALUE" in new_line + or "MINVALUE" in new_line + or "ENGINE" in new_line + ): + # final line of MariaDB partition endswith ")" + new_line = new_line[:-1] + + defs = "%s_%s_definitions" % (self.dialect.name, directive) + options[defs] = new_line + + else: + directive = directive.replace(" ", "_") + value = m.group("val") + if cleanup: + value = cleanup(value) + options[directive] = value + break + + for opt, val in options.items(): + part_def = "%s_partition_definitions" % (self.dialect.name) + subpart_def = "%s_subpartition_definitions" % (self.dialect.name) + if opt == part_def or opt == subpart_def: + # builds a string of definitions + if opt not in state.table_options: + state.table_options[opt] = val + else: + state.table_options[opt] = "%s, %s" % ( + state.table_options[opt], + val, + ) + else: + state.table_options["%s_%s" % (self.dialect.name, opt)] = val + + def _parse_column(self, line, state): + """Extract column details. + + Falls back to a 'minimal support' variant if full parse fails. + + :param line: Any column-bearing line from SHOW CREATE TABLE + """ + + spec = None + m = self._re_column.match(line) + if m: + spec = m.groupdict() + spec["full"] = True + else: + m = self._re_column_loose.match(line) + if m: + spec = m.groupdict() + spec["full"] = False + if not spec: + util.warn("Unknown column definition %r" % line) + return + if not spec["full"]: + util.warn("Incomplete reflection of column definition %r" % line) + + name, type_, args = spec["name"], spec["coltype"], spec["arg"] + + try: + col_type = self.dialect.ischema_names[type_] + except KeyError: + util.warn( + "Did not recognize type '%s' of column '%s'" % (type_, name) + ) + col_type = sqltypes.NullType + + # Column type positional arguments eg. varchar(32) + if args is None or args == "": + type_args = [] + elif args[0] == "'" and args[-1] == "'": + type_args = self._re_csv_str.findall(args) + else: + type_args = [int(v) for v in self._re_csv_int.findall(args)] + + # Column type keyword options + type_kw = {} + + if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)): + if type_args: + type_kw["fsp"] = type_args.pop(0) + + for kw in ("unsigned", "zerofill"): + if spec.get(kw, False): + type_kw[kw] = True + for kw in ("charset", "collate"): + if spec.get(kw, False): + type_kw[kw] = spec[kw] + if issubclass(col_type, (ENUM, SET)): + type_args = _strip_values(type_args) + + if issubclass(col_type, SET) and "" in type_args: + type_kw["retrieve_as_bitwise"] = True + + type_instance = col_type(*type_args, **type_kw) + + col_kw = {} + + # NOT NULL + col_kw["nullable"] = True + # this can be "NULL" in the case of TIMESTAMP + if spec.get("notnull", False) == "NOT NULL": + col_kw["nullable"] = False + # For generated columns, the nullability is marked in a different place + if spec.get("notnull_generated", False) == "NOT NULL": + col_kw["nullable"] = False + + # AUTO_INCREMENT + if spec.get("autoincr", False): + col_kw["autoincrement"] = True + elif issubclass(col_type, sqltypes.Integer): + col_kw["autoincrement"] = False + + # DEFAULT + default = spec.get("default", None) + + if default == "NULL": + # eliminates the need to deal with this later. + default = None + + comment = spec.get("comment", None) + + if comment is not None: + comment = cleanup_text(comment) + + sqltext = spec.get("generated") + if sqltext is not None: + computed = dict(sqltext=sqltext) + persisted = spec.get("persistence") + if persisted is not None: + computed["persisted"] = persisted == "STORED" + col_kw["computed"] = computed + + col_d = dict( + name=name, type=type_instance, default=default, comment=comment + ) + col_d.update(col_kw) + state.columns.append(col_d) + + def _describe_to_create(self, table_name, columns): + """Re-format DESCRIBE output as a SHOW CREATE TABLE string. + + DESCRIBE is a much simpler reflection and is sufficient for + reflecting views for runtime use. This method formats DDL + for columns only- keys are omitted. + + :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples. + SHOW FULL COLUMNS FROM rows must be rearranged for use with + this function. + """ + + buffer = [] + for row in columns: + (name, col_type, nullable, default, extra) = ( + row[i] for i in (0, 1, 2, 4, 5) + ) + + line = [" "] + line.append(self.preparer.quote_identifier(name)) + line.append(col_type) + if not nullable: + line.append("NOT NULL") + if default: + if "auto_increment" in default: + pass + elif col_type.startswith("timestamp") and default.startswith( + "C" + ): + line.append("DEFAULT") + line.append(default) + elif default == "NULL": + line.append("DEFAULT") + line.append(default) + else: + line.append("DEFAULT") + line.append("'%s'" % default.replace("'", "''")) + if extra: + line.append(extra) + + buffer.append(" ".join(line)) + + return "".join( + [ + ( + "CREATE TABLE %s (\n" + % self.preparer.quote_identifier(table_name) + ), + ",\n".join(buffer), + "\n) ", + ] + ) + + def _parse_keyexprs(self, identifiers): + """Unpack '"col"(2),"col" ASC'-ish strings into components.""" + + return [ + (colname, int(length) if length else None, modifiers) + for colname, length, modifiers in self._re_keyexprs.findall( + identifiers + ) + ] + + def _prep_regexes(self): + """Pre-compile regular expressions.""" + + self._re_columns = [] + self._pr_options = [] + + _final = self.preparer.final_quote + + quotes = dict( + zip( + ("iq", "fq", "esc_fq"), + [ + re.escape(s) + for s in ( + self.preparer.initial_quote, + _final, + self.preparer._escape_identifier(_final), + ) + ], + ) + ) + + self._pr_name = _pr_compile( + r"^CREATE (?:\w+ +)?TABLE +" + r"%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($" % quotes, + self.preparer._unescape_identifier, + ) + + self._re_is_view = _re_compile(r"^CREATE(?! TABLE)(\s.*)?\sVIEW") + + # `col`,`col2`(32),`col3`(15) DESC + # + self._re_keyexprs = _re_compile( + r"(?:" + r"(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)" + r"(?:\((\d+)\))?(?: +(ASC|DESC))?(?=\,|$))+" % quotes + ) + + # 'foo' or 'foo','bar' or 'fo,o','ba''a''r' + self._re_csv_str = _re_compile(r"\x27(?:\x27\x27|[^\x27])*\x27") + + # 123 or 123,456 + self._re_csv_int = _re_compile(r"\d+") + + # `colname` [type opts] + # (NOT NULL | NULL) + # DEFAULT ('value' | CURRENT_TIMESTAMP...) + # COMMENT 'comment' + # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT) + # STORAGE (DISK|MEMORY) + self._re_column = _re_compile( + r" " + r"%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +" + r"(?P\w+)" + r"(?:\((?P(?:\d+|\d+,\d+|" + r"(?:'(?:''|[^'])*',?)+))\))?" + r"(?: +(?PUNSIGNED))?" + r"(?: +(?PZEROFILL))?" + r"(?: +CHARACTER SET +(?P[\w_]+))?" + r"(?: +COLLATE +(?P[\w_]+))?" + r"(?: +(?P(?:NOT )?NULL))?" + r"(?: +DEFAULT +(?P" + r"(?:NULL|'(?:''|[^'])*'|\(.+?\)|[\-\w\.\(\)]+" + r"(?: +ON UPDATE [\-\w\.\(\)]+)?)" + r"))?" + r"(?: +(?:GENERATED ALWAYS)? ?AS +(?P\(" + r".*\))? ?(?PVIRTUAL|STORED)?" + r"(?: +(?P(?:NOT )?NULL))?" + r")?" + r"(?: +(?PAUTO_INCREMENT))?" + r"(?: +COMMENT +'(?P(?:''|[^'])*)')?" + r"(?: +COLUMN_FORMAT +(?P\w+))?" + r"(?: +STORAGE +(?P\w+))?" + r"(?: +(?P.*))?" + r",?$" % quotes + ) + + # Fallback, try to parse as little as possible + self._re_column_loose = _re_compile( + r" " + r"%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +" + r"(?P\w+)" + r"(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?" + r".*?(?P(?:NOT )NULL)?" % quotes + ) + + # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))? + # (`col` (ASC|DESC)?, `col` (ASC|DESC)?) + # KEY_BLOCK_SIZE size | WITH PARSER name /*!50100 WITH PARSER name */ + self._re_key = _re_compile( + r" " + r"(?:(?P\S+) )?KEY" + r"(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?" + r"(?: +USING +(?P\S+))?" + r" +\((?P.+?)\)" + r"(?: +USING +(?P\S+))?" + r"(?: +KEY_BLOCK_SIZE *[ =]? *(?P\S+))?" + r"(?: +WITH PARSER +(?P\S+))?" + r"(?: +COMMENT +(?P(\x27\x27|\x27([^\x27])*?\x27)+))?" + r"(?: +/\*(?P.+)\*/ *)?" + r",?$" % quotes + ) + + # https://forums.mysql.com/read.php?20,567102,567111#msg-567111 + # It means if the MySQL version >= \d+, execute what's in the comment + self._re_key_version_sql = _re_compile( + r"\!\d+ " r"(?: *WITH PARSER +(?P\S+) *)?" + ) + + # CONSTRAINT `name` FOREIGN KEY (`local_col`) + # REFERENCES `remote` (`remote_col`) + # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE + # ON DELETE CASCADE ON UPDATE RESTRICT + # + # unique constraints come back as KEYs + kw = quotes.copy() + kw["on"] = "RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT" + self._re_fk_constraint = _re_compile( + r" " + r"CONSTRAINT +" + r"%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +" + r"FOREIGN KEY +" + r"\((?P[^\)]+?)\) REFERENCES +" + r"(?P%(iq)s[^%(fq)s]+%(fq)s" + r"(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +" + r"\((?P(?:%(iq)s[^%(fq)s]+%(fq)s(?: *, *)?)+)\)" + r"(?: +(?PMATCH \w+))?" + r"(?: +ON DELETE (?P%(on)s))?" + r"(?: +ON UPDATE (?P%(on)s))?" % kw + ) + + # CONSTRAINT `CONSTRAINT_1` CHECK (`x` > 5)' + # testing on MariaDB 10.2 shows that the CHECK constraint + # is returned on a line by itself, so to match without worrying + # about parenthesis in the expression we go to the end of the line + self._re_ck_constraint = _re_compile( + r" " + r"CONSTRAINT +" + r"%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +" + r"CHECK +" + r"\((?P.+)\),?" % kw + ) + + # PARTITION + # + # punt! + self._re_partition = _re_compile(r"(?:.*)(?:SUB)?PARTITION(?:.*)") + + # Table-level options (COLLATE, ENGINE, etc.) + # Do the string options first, since they have quoted + # strings we need to get rid of. + for option in _options_of_type_string: + self._add_option_string(option) + + for option in ( + "ENGINE", + "TYPE", + "AUTO_INCREMENT", + "AVG_ROW_LENGTH", + "CHARACTER SET", + "DEFAULT CHARSET", + "CHECKSUM", + "COLLATE", + "DELAY_KEY_WRITE", + "INSERT_METHOD", + "MAX_ROWS", + "MIN_ROWS", + "PACK_KEYS", + "ROW_FORMAT", + "KEY_BLOCK_SIZE", + "STATS_SAMPLE_PAGES", + ): + self._add_option_word(option) + + for option in ( + "PARTITION BY", + "SUBPARTITION BY", + "PARTITIONS", + "SUBPARTITIONS", + "PARTITION", + "SUBPARTITION", + ): + self._add_partition_option_word(option) + + self._add_option_regex("UNION", r"\([^\)]+\)") + self._add_option_regex("TABLESPACE", r".*? STORAGE DISK") + self._add_option_regex( + "RAID_TYPE", + r"\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+", + ) + + _optional_equals = r"(?:\s*(?:=\s*)|\s+)" + + def _add_option_string(self, directive): + regex = r"(?P%s)%s" r"'(?P(?:[^']|'')*?)'(?!')" % ( + re.escape(directive), + self._optional_equals, + ) + self._pr_options.append(_pr_compile(regex, cleanup_text)) + + def _add_option_word(self, directive): + regex = r"(?P%s)%s" r"(?P\w+)" % ( + re.escape(directive), + self._optional_equals, + ) + self._pr_options.append(_pr_compile(regex)) + + def _add_partition_option_word(self, directive): + if directive == "PARTITION BY" or directive == "SUBPARTITION BY": + regex = r"(?%s)%s" r"(?P\w+.*)" % ( + re.escape(directive), + self._optional_equals, + ) + elif directive == "SUBPARTITIONS" or directive == "PARTITIONS": + regex = r"(?%s)%s" r"(?P\d+)" % ( + re.escape(directive), + self._optional_equals, + ) + else: + regex = r"(?%s)(?!\S)" % (re.escape(directive),) + self._pr_options.append(_pr_compile(regex)) + + def _add_option_regex(self, directive, regex): + regex = r"(?P%s)%s" r"(?P%s)" % ( + re.escape(directive), + self._optional_equals, + regex, + ) + self._pr_options.append(_pr_compile(regex)) + + +_options_of_type_string = ( + "COMMENT", + "DATA DIRECTORY", + "INDEX DIRECTORY", + "PASSWORD", + "CONNECTION", +) + + +def _pr_compile(regex, cleanup=None): + """Prepare a 2-tuple of compiled regex and callable.""" + + return (_re_compile(regex), cleanup) + + +def _re_compile(regex): + """Compile a string to regex, I and UNICODE.""" + + return re.compile(regex, re.I | re.UNICODE) + + +def _strip_values(values): + "Strip reflected values quotes" + strip_values = [] + for a in values: + if a[0:1] == '"' or a[0:1] == "'": + # strip enclosing quotes and unquote interior + a = a[1:-1].replace(a[0] * 2, a[0]) + strip_values.append(a) + return strip_values + + +def cleanup_text(raw_text: str) -> str: + if "\\" in raw_text: + raw_text = re.sub( + _control_char_regexp, lambda s: _control_char_map[s[0]], raw_text + ) + return raw_text.replace("''", "'") + + +_control_char_map = { + "\\\\": "\\", + "\\0": "\0", + "\\a": "\a", + "\\b": "\b", + "\\t": "\t", + "\\n": "\n", + "\\v": "\v", + "\\f": "\f", + "\\r": "\r", + # '\\e':'\e', +} +_control_char_regexp = re.compile( + "|".join(re.escape(k) for k in _control_char_map) +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reserved_words.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reserved_words.py new file mode 100644 index 0000000000000000000000000000000000000000..34fecf42724bdbe796425aed50db7aec2e2ba635 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/reserved_words.py @@ -0,0 +1,571 @@ +# dialects/mysql/reserved_words.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +# generated using: +# https://gist.github.com/kkirsche/4f31f2153ed7a3248be1ec44ca6ddbc9 +# +# https://mariadb.com/kb/en/reserved-words/ +# includes: Reserved Words, Oracle Mode (separate set unioned) +# excludes: Exceptions, Function Names +# mypy: ignore-errors + +RESERVED_WORDS_MARIADB = { + "accessible", + "add", + "all", + "alter", + "analyze", + "and", + "as", + "asc", + "asensitive", + "before", + "between", + "bigint", + "binary", + "blob", + "both", + "by", + "call", + "cascade", + "case", + "change", + "char", + "character", + "check", + "collate", + "column", + "condition", + "constraint", + "continue", + "convert", + "create", + "cross", + "current_date", + "current_role", + "current_time", + "current_timestamp", + "current_user", + "cursor", + "database", + "databases", + "day_hour", + "day_microsecond", + "day_minute", + "day_second", + "dec", + "decimal", + "declare", + "default", + "delayed", + "delete", + "desc", + "describe", + "deterministic", + "distinct", + "distinctrow", + "div", + "do_domain_ids", + "double", + "drop", + "dual", + "each", + "else", + "elseif", + "enclosed", + "escaped", + "except", + "exists", + "exit", + "explain", + "false", + "fetch", + "float", + "float4", + "float8", + "for", + "force", + "foreign", + "from", + "fulltext", + "general", + "grant", + "group", + "having", + "high_priority", + "hour_microsecond", + "hour_minute", + "hour_second", + "if", + "ignore", + "ignore_domain_ids", + "ignore_server_ids", + "in", + "index", + "infile", + "inner", + "inout", + "insensitive", + "insert", + "int", + "int1", + "int2", + "int3", + "int4", + "int8", + "integer", + "intersect", + "interval", + "into", + "is", + "iterate", + "join", + "key", + "keys", + "kill", + "leading", + "leave", + "left", + "like", + "limit", + "linear", + "lines", + "load", + "localtime", + "localtimestamp", + "lock", + "long", + "longblob", + "longtext", + "loop", + "low_priority", + "master_heartbeat_period", + "master_ssl_verify_server_cert", + "match", + "maxvalue", + "mediumblob", + "mediumint", + "mediumtext", + "middleint", + "minute_microsecond", + "minute_second", + "mod", + "modifies", + "natural", + "no_write_to_binlog", + "not", + "null", + "numeric", + "offset", + "on", + "optimize", + "option", + "optionally", + "or", + "order", + "out", + "outer", + "outfile", + "over", + "page_checksum", + "parse_vcol_expr", + "partition", + "position", + "precision", + "primary", + "procedure", + "purge", + "range", + "read", + "read_write", + "reads", + "real", + "recursive", + "ref_system_id", + "references", + "regexp", + "release", + "rename", + "repeat", + "replace", + "require", + "resignal", + "restrict", + "return", + "returning", + "revoke", + "right", + "rlike", + "rows", + "row_number", + "schema", + "schemas", + "second_microsecond", + "select", + "sensitive", + "separator", + "set", + "show", + "signal", + "slow", + "smallint", + "spatial", + "specific", + "sql", + "sql_big_result", + "sql_calc_found_rows", + "sql_small_result", + "sqlexception", + "sqlstate", + "sqlwarning", + "ssl", + "starting", + "stats_auto_recalc", + "stats_persistent", + "stats_sample_pages", + "straight_join", + "table", + "terminated", + "then", + "tinyblob", + "tinyint", + "tinytext", + "to", + "trailing", + "trigger", + "true", + "undo", + "union", + "unique", + "unlock", + "unsigned", + "update", + "usage", + "use", + "using", + "utc_date", + "utc_time", + "utc_timestamp", + "values", + "varbinary", + "varchar", + "varcharacter", + "varying", + "when", + "where", + "while", + "window", + "with", + "write", + "xor", + "year_month", + "zerofill", +}.union( + { + "body", + "elsif", + "goto", + "history", + "others", + "package", + "period", + "raise", + "rowtype", + "system", + "system_time", + "versioning", + "without", + } +) + +# https://dev.mysql.com/doc/refman/8.3/en/keywords.html +# https://dev.mysql.com/doc/refman/8.0/en/keywords.html +# https://dev.mysql.com/doc/refman/5.7/en/keywords.html +# https://dev.mysql.com/doc/refman/5.6/en/keywords.html +# includes: MySQL x.0 Keywords and Reserved Words +# excludes: MySQL x.0 New Keywords and Reserved Words, +# MySQL x.0 Removed Keywords and Reserved Words +RESERVED_WORDS_MYSQL = { + "accessible", + "add", + "admin", + "all", + "alter", + "analyze", + "and", + "array", + "as", + "asc", + "asensitive", + "before", + "between", + "bigint", + "binary", + "blob", + "both", + "by", + "call", + "cascade", + "case", + "change", + "char", + "character", + "check", + "collate", + "column", + "condition", + "constraint", + "continue", + "convert", + "create", + "cross", + "cube", + "cume_dist", + "current_date", + "current_time", + "current_timestamp", + "current_user", + "cursor", + "database", + "databases", + "day_hour", + "day_microsecond", + "day_minute", + "day_second", + "dec", + "decimal", + "declare", + "default", + "delayed", + "delete", + "dense_rank", + "desc", + "describe", + "deterministic", + "distinct", + "distinctrow", + "div", + "double", + "drop", + "dual", + "each", + "else", + "elseif", + "empty", + "enclosed", + "escaped", + "except", + "exists", + "exit", + "explain", + "false", + "fetch", + "first_value", + "float", + "float4", + "float8", + "for", + "force", + "foreign", + "from", + "fulltext", + "function", + "general", + "generated", + "get", + "get_master_public_key", + "grant", + "group", + "grouping", + "groups", + "having", + "high_priority", + "hour_microsecond", + "hour_minute", + "hour_second", + "if", + "ignore", + "ignore_server_ids", + "in", + "index", + "infile", + "inner", + "inout", + "insensitive", + "insert", + "int", + "int1", + "int2", + "int3", + "int4", + "int8", + "integer", + "intersect", + "interval", + "into", + "io_after_gtids", + "io_before_gtids", + "is", + "iterate", + "join", + "json_table", + "key", + "keys", + "kill", + "lag", + "last_value", + "lateral", + "lead", + "leading", + "leave", + "left", + "like", + "limit", + "linear", + "lines", + "load", + "localtime", + "localtimestamp", + "lock", + "long", + "longblob", + "longtext", + "loop", + "low_priority", + "master_bind", + "master_heartbeat_period", + "master_ssl_verify_server_cert", + "match", + "maxvalue", + "mediumblob", + "mediumint", + "mediumtext", + "member", + "middleint", + "minute_microsecond", + "minute_second", + "mod", + "modifies", + "natural", + "no_write_to_binlog", + "not", + "nth_value", + "ntile", + "null", + "numeric", + "of", + "on", + "optimize", + "optimizer_costs", + "option", + "optionally", + "or", + "order", + "out", + "outer", + "outfile", + "over", + "parse_gcol_expr", + "parallel", + "partition", + "percent_rank", + "persist", + "persist_only", + "precision", + "primary", + "procedure", + "purge", + "qualify", + "range", + "rank", + "read", + "read_write", + "reads", + "real", + "recursive", + "references", + "regexp", + "release", + "rename", + "repeat", + "replace", + "require", + "resignal", + "restrict", + "return", + "revoke", + "right", + "rlike", + "role", + "row", + "row_number", + "rows", + "schema", + "schemas", + "second_microsecond", + "select", + "sensitive", + "separator", + "set", + "show", + "signal", + "slow", + "smallint", + "spatial", + "specific", + "sql", + "sql_after_gtids", + "sql_before_gtids", + "sql_big_result", + "sql_calc_found_rows", + "sql_small_result", + "sqlexception", + "sqlstate", + "sqlwarning", + "ssl", + "starting", + "stored", + "straight_join", + "system", + "table", + "terminated", + "then", + "tinyblob", + "tinyint", + "tinytext", + "to", + "trailing", + "trigger", + "true", + "undo", + "union", + "unique", + "unlock", + "unsigned", + "update", + "usage", + "use", + "using", + "utc_date", + "utc_time", + "utc_timestamp", + "values", + "varbinary", + "varchar", + "varcharacter", + "varying", + "virtual", + "when", + "where", + "while", + "window", + "with", + "write", + "xor", + "year_month", + "zerofill", +} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/types.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/types.py new file mode 100644 index 0000000000000000000000000000000000000000..ace6824a7405cc11ec254d52785d915e1acadc0d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/types.py @@ -0,0 +1,773 @@ +# dialects/mysql/types.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +import datetime + +from ... import exc +from ... import util +from ...sql import sqltypes + + +class _NumericType: + """Base for MySQL numeric types. + + This is the base both for NUMERIC as well as INTEGER, hence + it's a mixin. + + """ + + def __init__(self, unsigned=False, zerofill=False, **kw): + self.unsigned = unsigned + self.zerofill = zerofill + super().__init__(**kw) + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[_NumericType, sqltypes.Numeric] + ) + + +class _FloatType(_NumericType, sqltypes.Float): + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + if isinstance(self, (REAL, DOUBLE)) and ( + (precision is None and scale is not None) + or (precision is not None and scale is None) + ): + raise exc.ArgumentError( + "You must specify both precision and scale or omit " + "both altogether." + ) + super().__init__(precision=precision, asdecimal=asdecimal, **kw) + self.scale = scale + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[_FloatType, _NumericType, sqltypes.Float] + ) + + +class _IntegerType(_NumericType, sqltypes.Integer): + def __init__(self, display_width=None, **kw): + self.display_width = display_width + super().__init__(**kw) + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[_IntegerType, _NumericType, sqltypes.Integer] + ) + + +class _StringType(sqltypes.String): + """Base for MySQL string types.""" + + def __init__( + self, + charset=None, + collation=None, + ascii=False, # noqa + binary=False, + unicode=False, + national=False, + **kw, + ): + self.charset = charset + + # allow collate= or collation= + kw.setdefault("collation", kw.pop("collate", collation)) + + self.ascii = ascii + self.unicode = unicode + self.binary = binary + self.national = national + super().__init__(**kw) + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[_StringType, sqltypes.String] + ) + + +class _MatchType(sqltypes.Float, sqltypes.MatchType): + def __init__(self, **kw): + # TODO: float arguments? + sqltypes.Float.__init__(self) + sqltypes.MatchType.__init__(self) + + +class NUMERIC(_NumericType, sqltypes.NUMERIC): + """MySQL NUMERIC type.""" + + __visit_name__ = "NUMERIC" + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a NUMERIC. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__( + precision=precision, scale=scale, asdecimal=asdecimal, **kw + ) + + +class DECIMAL(_NumericType, sqltypes.DECIMAL): + """MySQL DECIMAL type.""" + + __visit_name__ = "DECIMAL" + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a DECIMAL. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__( + precision=precision, scale=scale, asdecimal=asdecimal, **kw + ) + + +class DOUBLE(_FloatType, sqltypes.DOUBLE): + """MySQL DOUBLE type.""" + + __visit_name__ = "DOUBLE" + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a DOUBLE. + + .. note:: + + The :class:`.DOUBLE` type by default converts from float + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__( + precision=precision, scale=scale, asdecimal=asdecimal, **kw + ) + + +class REAL(_FloatType, sqltypes.REAL): + """MySQL REAL type.""" + + __visit_name__ = "REAL" + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a REAL. + + .. note:: + + The :class:`.REAL` type by default converts from float + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__( + precision=precision, scale=scale, asdecimal=asdecimal, **kw + ) + + +class FLOAT(_FloatType, sqltypes.FLOAT): + """MySQL FLOAT type.""" + + __visit_name__ = "FLOAT" + + def __init__(self, precision=None, scale=None, asdecimal=False, **kw): + """Construct a FLOAT. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__( + precision=precision, scale=scale, asdecimal=asdecimal, **kw + ) + + def bind_processor(self, dialect): + return None + + +class INTEGER(_IntegerType, sqltypes.INTEGER): + """MySQL INTEGER type.""" + + __visit_name__ = "INTEGER" + + def __init__(self, display_width=None, **kw): + """Construct an INTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__(display_width=display_width, **kw) + + +class BIGINT(_IntegerType, sqltypes.BIGINT): + """MySQL BIGINTEGER type.""" + + __visit_name__ = "BIGINT" + + def __init__(self, display_width=None, **kw): + """Construct a BIGINTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__(display_width=display_width, **kw) + + +class MEDIUMINT(_IntegerType): + """MySQL MEDIUMINTEGER type.""" + + __visit_name__ = "MEDIUMINT" + + def __init__(self, display_width=None, **kw): + """Construct a MEDIUMINTEGER + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__(display_width=display_width, **kw) + + +class TINYINT(_IntegerType): + """MySQL TINYINT type.""" + + __visit_name__ = "TINYINT" + + def __init__(self, display_width=None, **kw): + """Construct a TINYINT. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__(display_width=display_width, **kw) + + +class SMALLINT(_IntegerType, sqltypes.SMALLINT): + """MySQL SMALLINTEGER type.""" + + __visit_name__ = "SMALLINT" + + def __init__(self, display_width=None, **kw): + """Construct a SMALLINTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super().__init__(display_width=display_width, **kw) + + +class BIT(sqltypes.TypeEngine): + """MySQL BIT type. + + This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater + for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a + MSTinyInteger() type. + + """ + + __visit_name__ = "BIT" + + def __init__(self, length=None): + """Construct a BIT. + + :param length: Optional, number of bits. + + """ + self.length = length + + def result_processor(self, dialect, coltype): + """Convert a MySQL's 64 bit, variable length binary string to a + long.""" + + if dialect.supports_native_bit: + return None + + def process(value): + if value is not None: + v = 0 + for i in value: + if not isinstance(i, int): + i = ord(i) # convert byte to int on Python 2 + v = v << 8 | i + return v + return value + + return process + + +class TIME(sqltypes.TIME): + """MySQL TIME type.""" + + __visit_name__ = "TIME" + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL TIME type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the TIME type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + """ + super().__init__(timezone=timezone) + self.fsp = fsp + + def result_processor(self, dialect, coltype): + time = datetime.time + + def process(value): + # convert from a timedelta value + if value is not None: + microseconds = value.microseconds + seconds = value.seconds + minutes = seconds // 60 + return time( + minutes // 60, + minutes % 60, + seconds - minutes * 60, + microsecond=microseconds, + ) + else: + return None + + return process + + +class TIMESTAMP(sqltypes.TIMESTAMP): + """MySQL TIMESTAMP type.""" + + __visit_name__ = "TIMESTAMP" + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL TIMESTAMP type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6.4 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the TIMESTAMP type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + """ + super().__init__(timezone=timezone) + self.fsp = fsp + + +class DATETIME(sqltypes.DATETIME): + """MySQL DATETIME type.""" + + __visit_name__ = "DATETIME" + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL DATETIME type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6.4 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the DATETIME type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + """ + super().__init__(timezone=timezone) + self.fsp = fsp + + +class YEAR(sqltypes.TypeEngine): + """MySQL YEAR type, for single byte storage of years 1901-2155.""" + + __visit_name__ = "YEAR" + + def __init__(self, display_width=None): + self.display_width = display_width + + +class TEXT(_StringType, sqltypes.TEXT): + """MySQL TEXT type, for character storage encoded up to 2^16 bytes.""" + + __visit_name__ = "TEXT" + + def __init__(self, length=None, **kw): + """Construct a TEXT. + + :param length: Optional, if provided the server may optimize storage + by substituting the smallest TEXT type sufficient to store + ``length`` bytes of characters. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super().__init__(length=length, **kw) + + +class TINYTEXT(_StringType): + """MySQL TINYTEXT type, for character storage encoded up to 2^8 bytes.""" + + __visit_name__ = "TINYTEXT" + + def __init__(self, **kwargs): + """Construct a TINYTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super().__init__(**kwargs) + + +class MEDIUMTEXT(_StringType): + """MySQL MEDIUMTEXT type, for character storage encoded up + to 2^24 bytes.""" + + __visit_name__ = "MEDIUMTEXT" + + def __init__(self, **kwargs): + """Construct a MEDIUMTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super().__init__(**kwargs) + + +class LONGTEXT(_StringType): + """MySQL LONGTEXT type, for character storage encoded up to 2^32 bytes.""" + + __visit_name__ = "LONGTEXT" + + def __init__(self, **kwargs): + """Construct a LONGTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super().__init__(**kwargs) + + +class VARCHAR(_StringType, sqltypes.VARCHAR): + """MySQL VARCHAR type, for variable-length character data.""" + + __visit_name__ = "VARCHAR" + + def __init__(self, length=None, **kwargs): + """Construct a VARCHAR. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super().__init__(length=length, **kwargs) + + +class CHAR(_StringType, sqltypes.CHAR): + """MySQL CHAR type, for fixed-length character data.""" + + __visit_name__ = "CHAR" + + def __init__(self, length=None, **kwargs): + """Construct a CHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + super().__init__(length=length, **kwargs) + + @classmethod + def _adapt_string_for_cast(cls, type_): + # copy the given string type into a CHAR + # for the purposes of rendering a CAST expression + type_ = sqltypes.to_instance(type_) + if isinstance(type_, sqltypes.CHAR): + return type_ + elif isinstance(type_, _StringType): + return CHAR( + length=type_.length, + charset=type_.charset, + collation=type_.collation, + ascii=type_.ascii, + binary=type_.binary, + unicode=type_.unicode, + national=False, # not supported in CAST + ) + else: + return CHAR(length=type_.length) + + +class NVARCHAR(_StringType, sqltypes.NVARCHAR): + """MySQL NVARCHAR type. + + For variable-length character data in the server's configured national + character set. + """ + + __visit_name__ = "NVARCHAR" + + def __init__(self, length=None, **kwargs): + """Construct an NVARCHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + kwargs["national"] = True + super().__init__(length=length, **kwargs) + + +class NCHAR(_StringType, sqltypes.NCHAR): + """MySQL NCHAR type. + + For fixed-length character data in the server's configured national + character set. + """ + + __visit_name__ = "NCHAR" + + def __init__(self, length=None, **kwargs): + """Construct an NCHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + kwargs["national"] = True + super().__init__(length=length, **kwargs) + + +class TINYBLOB(sqltypes._Binary): + """MySQL TINYBLOB type, for binary data up to 2^8 bytes.""" + + __visit_name__ = "TINYBLOB" + + +class MEDIUMBLOB(sqltypes._Binary): + """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.""" + + __visit_name__ = "MEDIUMBLOB" + + +class LONGBLOB(sqltypes._Binary): + """MySQL LONGBLOB type, for binary data up to 2^32 bytes.""" + + __visit_name__ = "LONGBLOB" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/base.py new file mode 100644 index 0000000000000000000000000000000000000000..1d882def8d60a3d46119cf93a7284c55e9ef95e8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/base.py @@ -0,0 +1,3747 @@ +# dialects/oracle/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r""" +.. dialect:: oracle + :name: Oracle Database + :normal_support: 11+ + :best_effort: 9+ + + +Auto Increment Behavior +----------------------- + +SQLAlchemy Table objects which include integer primary keys are usually assumed +to have "autoincrementing" behavior, meaning they can generate their own +primary key values upon INSERT. For use within Oracle Database, two options are +available, which are the use of IDENTITY columns (Oracle Database 12 and above +only) or the association of a SEQUENCE with the column. + +Specifying GENERATED AS IDENTITY (Oracle Database 12 and above) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Starting from version 12, Oracle Database can make use of identity columns +using the :class:`_sql.Identity` to specify the autoincrementing behavior:: + + t = Table( + "mytable", + metadata, + Column("id", Integer, Identity(start=3), primary_key=True), + Column(...), + ..., + ) + +The CREATE TABLE for the above :class:`_schema.Table` object would be: + +.. sourcecode:: sql + + CREATE TABLE mytable ( + id INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 3), + ..., + PRIMARY KEY (id) + ) + +The :class:`_schema.Identity` object support many options to control the +"autoincrementing" behavior of the column, like the starting value, the +incrementing value, etc. In addition to the standard options, Oracle Database +supports setting :paramref:`_schema.Identity.always` to ``None`` to use the +default generated mode, rendering GENERATED AS IDENTITY in the DDL. It also supports +setting :paramref:`_schema.Identity.on_null` to ``True`` to specify ON NULL +in conjunction with a 'BY DEFAULT' identity column. + +Using a SEQUENCE (all Oracle Database versions) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Older version of Oracle Database had no "autoincrement" feature: SQLAlchemy +relies upon sequences to produce these values. With the older Oracle Database +versions, *a sequence must always be explicitly specified to enable +autoincrement*. This is divergent with the majority of documentation examples +which assume the usage of an autoincrement-capable database. To specify +sequences, use the sqlalchemy.schema.Sequence object which is passed to a +Column construct:: + + t = Table( + "mytable", + metadata, + Column("id", Integer, Sequence("id_seq", start=1), primary_key=True), + Column(...), + ..., + ) + +This step is also required when using table reflection, i.e. autoload_with=engine:: + + t = Table( + "mytable", + metadata, + Column("id", Integer, Sequence("id_seq", start=1), primary_key=True), + autoload_with=engine, + ) + +.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct + in a :class:`_schema.Column` to specify the option of an autoincrementing + column. + +.. _oracle_isolation_level: + +Transaction Isolation Level / Autocommit +---------------------------------------- + +Oracle Database supports "READ COMMITTED" and "SERIALIZABLE" modes of +isolation. The AUTOCOMMIT isolation level is also supported by the +python-oracledb and cx_Oracle dialects. + +To set using per-connection execution options:: + + connection = engine.connect() + connection = connection.execution_options(isolation_level="AUTOCOMMIT") + +For ``READ COMMITTED`` and ``SERIALIZABLE``, the Oracle Database dialects sets +the level at the session level using ``ALTER SESSION``, which is reverted back +to its default setting when the connection is returned to the connection pool. + +Valid values for ``isolation_level`` include: + +* ``READ COMMITTED`` +* ``AUTOCOMMIT`` +* ``SERIALIZABLE`` + +.. note:: The implementation for the + :meth:`_engine.Connection.get_isolation_level` method as implemented by the + Oracle Database dialects necessarily force the start of a transaction using the + Oracle Database DBMS_TRANSACTION.LOCAL_TRANSACTION_ID function; otherwise no + level is normally readable. + + Additionally, the :meth:`_engine.Connection.get_isolation_level` method will + raise an exception if the ``v$transaction`` view is not available due to + permissions or other reasons, which is a common occurrence in Oracle Database + installations. + + The python-oracledb and cx_Oracle dialects attempt to call the + :meth:`_engine.Connection.get_isolation_level` method when the dialect makes + its first connection to the database in order to acquire the + "default"isolation level. This default level is necessary so that the level + can be reset on a connection after it has been temporarily modified using + :meth:`_engine.Connection.execution_options` method. In the common event + that the :meth:`_engine.Connection.get_isolation_level` method raises an + exception due to ``v$transaction`` not being readable as well as any other + database-related failure, the level is assumed to be "READ COMMITTED". No + warning is emitted for this initial first-connect condition as it is + expected to be a common restriction on Oracle databases. + +.. versionadded:: 1.3.16 added support for AUTOCOMMIT to the cx_Oracle dialect + as well as the notion of a default isolation level + +.. versionadded:: 1.3.21 Added support for SERIALIZABLE as well as live + reading of the isolation level. + +.. versionchanged:: 1.3.22 In the event that the default isolation + level cannot be read due to permissions on the v$transaction view as + is common in Oracle installations, the default isolation level is hardcoded + to "READ COMMITTED" which was the behavior prior to 1.3.21. + +.. seealso:: + + :ref:`dbapi_autocommit` + +Identifier Casing +----------------- + +In Oracle Database, the data dictionary represents all case insensitive +identifier names using UPPERCASE text. This is in contradiction to the +expectations of SQLAlchemy, which assume a case insensitive name is represented +as lowercase text. + +As an example of case insensitive identifier names, consider the following table: + +.. sourcecode:: sql + + CREATE TABLE MyTable (Identifier INTEGER PRIMARY KEY) + +If you were to ask Oracle Database for information about this table, the +table name would be reported as ``MYTABLE`` and the column name would +be reported as ``IDENTIFIER``. Compare to most other databases such as +PostgreSQL and MySQL which would report these names as ``mytable`` and +``identifier``. The names are **not quoted, therefore are case insensitive**. +The special casing of ``MyTable`` and ``Identifier`` would only be maintained +if they were quoted in the table definition: + +.. sourcecode:: sql + + CREATE TABLE "MyTable" ("Identifier" INTEGER PRIMARY KEY) + +When constructing a SQLAlchemy :class:`.Table` object, **an all lowercase name +is considered to be case insensitive**. So the following table assumes +case insensitive names:: + + Table("mytable", metadata, Column("identifier", Integer, primary_key=True)) + +Whereas when mixed case or UPPERCASE names are used, case sensitivity is +assumed:: + + Table("MyTable", metadata, Column("Identifier", Integer, primary_key=True)) + +A similar situation occurs at the database driver level when emitting a +textual SQL SELECT statement and looking at column names in the DBAPI +``cursor.description`` attribute. A database like PostgreSQL will normalize +case insensitive names to be lowercase:: + + >>> pg_engine = create_engine("postgresql://scott:tiger@localhost/test") + >>> pg_connection = pg_engine.connect() + >>> result = pg_connection.exec_driver_sql("SELECT 1 AS SomeName") + >>> result.cursor.description + (Column(name='somename', type_code=23),) + +Whereas Oracle normalizes them to UPPERCASE:: + + >>> oracle_engine = create_engine("oracle+oracledb://scott:tiger@oracle18c/xe") + >>> oracle_connection = oracle_engine.connect() + >>> result = oracle_connection.exec_driver_sql( + ... "SELECT 1 AS SomeName FROM DUAL" + ... ) + >>> result.cursor.description + [('SOMENAME', , 127, None, 0, -127, True)] + +In order to achieve cross-database parity for the two cases of a. table +reflection and b. textual-only SQL statement round trips, SQLAlchemy performs a step +called **name normalization** when using the Oracle dialect. This process may +also apply to other third party dialects that have similar UPPERCASE handling +of case insensitive names. + +When using name normalization, SQLAlchemy attempts to detect if a name is +case insensitive by checking if all characters are UPPERCASE letters only; +if so, then it assumes this is a case insensitive name and is delivered as +a lowercase name. + +For table reflection, a tablename that is seen represented as all UPPERCASE +in Oracle Database's catalog tables will be assumed to have a case insensitive +name. This is what allows the ``Table`` definition to use lower case names +and be equally compatible from a reflection point of view on Oracle Database +and all other databases such as PostgreSQL and MySQL:: + + # matches a table created with CREATE TABLE mytable + Table("mytable", metadata, autoload_with=some_engine) + +Above, the all lowercase name ``"mytable"`` is case insensitive; it will match +a table reported by PostgreSQL as ``"mytable"`` and a table reported by +Oracle as ``"MYTABLE"``. If name normalization were not present, it would +not be possible for the above :class:`.Table` definition to be introspectable +in a cross-database way, since we are dealing with a case insensitive name +that is not reported by each database in the same way. + +Case sensitivity can be forced on in this case, such as if we wanted to represent +the quoted tablename ``"MYTABLE"`` with that exact casing, most simply by using +that casing directly, which will be seen as a case sensitive name:: + + # matches a table created with CREATE TABLE "MYTABLE" + Table("MYTABLE", metadata, autoload_with=some_engine) + +For the unusual case of a quoted all-lowercase name, the :class:`.quoted_name` +construct may be used:: + + from sqlalchemy import quoted_name + + # matches a table created with CREATE TABLE "mytable" + Table( + quoted_name("mytable", quote=True), metadata, autoload_with=some_engine + ) + +Name normalization also takes place when handling result sets from **purely +textual SQL strings**, that have no other :class:`.Table` or :class:`.Column` +metadata associated with them. This includes SQL strings executed using +:meth:`.Connection.exec_driver_sql` and SQL strings executed using the +:func:`.text` construct which do not include :class:`.Column` metadata. + +Returning to the Oracle Database SELECT statement, we see that even though +``cursor.description`` reports the column name as ``SOMENAME``, SQLAlchemy +name normalizes this to ``somename``:: + + >>> oracle_engine = create_engine("oracle+oracledb://scott:tiger@oracle18c/xe") + >>> oracle_connection = oracle_engine.connect() + >>> result = oracle_connection.exec_driver_sql( + ... "SELECT 1 AS SomeName FROM DUAL" + ... ) + >>> result.cursor.description + [('SOMENAME', , 127, None, 0, -127, True)] + >>> result.keys() + RMKeyView(['somename']) + +The single scenario where the above behavior produces inaccurate results +is when using an all-uppercase, quoted name. SQLAlchemy has no way to determine +that a particular name in ``cursor.description`` was quoted, and is therefore +case sensitive, or was not quoted, and should be name normalized:: + + >>> result = oracle_connection.exec_driver_sql( + ... 'SELECT 1 AS "SOMENAME" FROM DUAL' + ... ) + >>> result.cursor.description + [('SOMENAME', , 127, None, 0, -127, True)] + >>> result.keys() + RMKeyView(['somename']) + +For this case, a new feature will be available in SQLAlchemy 2.1 to disable +the name normalization behavior in specific cases. + + +.. _oracle_max_identifier_lengths: + +Maximum Identifier Lengths +-------------------------- + +SQLAlchemy is sensitive to the maximum identifier length supported by Oracle +Database. This affects generated SQL label names as well as the generation of +constraint names, particularly in the case where the constraint naming +convention feature described at :ref:`constraint_naming_conventions` is being +used. + +Oracle Database 12.2 increased the default maximum identifier length from 30 to +128. As of SQLAlchemy 1.4, the default maximum identifier length for the Oracle +dialects is 128 characters. Upon first connection, the maximum length actually +supported by the database is obtained. In all cases, setting the +:paramref:`_sa.create_engine.max_identifier_length` parameter will bypass this +change and the value given will be used as is:: + + engine = create_engine( + "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1", + max_identifier_length=30, + ) + +If :paramref:`_sa.create_engine.max_identifier_length` is not set, the oracledb +dialect internally uses the ``max_identifier_length`` attribute available on +driver connections since python-oracledb version 2.5. When using an older +driver version, or using the cx_Oracle dialect, SQLAlchemy will instead attempt +to use the query ``SELECT value FROM v$parameter WHERE name = 'compatible'`` +upon first connect in order to determine the effective compatibility version of +the database. The "compatibility" version is a version number that is +independent of the actual database version. It is used to assist database +migration. It is configured by an Oracle Database initialization parameter. The +compatibility version then determines the maximum allowed identifier length for +the database. If the V$ view is not available, the database version information +is used instead. + +The maximum identifier length comes into play both when generating anonymized +SQL labels in SELECT statements, but more crucially when generating constraint +names from a naming convention. It is this area that has created the need for +SQLAlchemy to change this default conservatively. For example, the following +naming convention produces two very different constraint names based on the +identifier length:: + + from sqlalchemy import Column + from sqlalchemy import Index + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import Table + from sqlalchemy.dialects import oracle + from sqlalchemy.schema import CreateIndex + + m = MetaData(naming_convention={"ix": "ix_%(column_0N_name)s"}) + + t = Table( + "t", + m, + Column("some_column_name_1", Integer), + Column("some_column_name_2", Integer), + Column("some_column_name_3", Integer), + ) + + ix = Index( + None, + t.c.some_column_name_1, + t.c.some_column_name_2, + t.c.some_column_name_3, + ) + + oracle_dialect = oracle.dialect(max_identifier_length=30) + print(CreateIndex(ix).compile(dialect=oracle_dialect)) + +With an identifier length of 30, the above CREATE INDEX looks like: + +.. sourcecode:: sql + + CREATE INDEX ix_some_column_name_1s_70cd ON t + (some_column_name_1, some_column_name_2, some_column_name_3) + +However with length of 128, it becomes:: + +.. sourcecode:: sql + + CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t + (some_column_name_1, some_column_name_2, some_column_name_3) + +Applications which have run versions of SQLAlchemy prior to 1.4 on Oracle +Database version 12.2 or greater are therefore subject to the scenario of a +database migration that wishes to "DROP CONSTRAINT" on a name that was +previously generated with the shorter length. This migration will fail when +the identifier length is changed without the name of the index or constraint +first being adjusted. Such applications are strongly advised to make use of +:paramref:`_sa.create_engine.max_identifier_length` in order to maintain +control of the generation of truncated names, and to fully review and test all +database migrations in a staging environment when changing this value to ensure +that the impact of this change has been mitigated. + +.. versionchanged:: 1.4 the default max_identifier_length for Oracle Database + is 128 characters, which is adjusted down to 30 upon first connect if the + Oracle Database, or its compatibility setting, are lower than version 12.2. + + +LIMIT/OFFSET/FETCH Support +-------------------------- + +Methods like :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset` make use +of ``FETCH FIRST N ROW / OFFSET N ROWS`` syntax assuming Oracle Database 12c or +above, and assuming the SELECT statement is not embedded within a compound +statement like UNION. This syntax is also available directly by using the +:meth:`_sql.Select.fetch` method. + +.. versionchanged:: 2.0 the Oracle Database dialects now use ``FETCH FIRST N + ROW / OFFSET N ROWS`` for all :meth:`_sql.Select.limit` and + :meth:`_sql.Select.offset` usage including within the ORM and legacy + :class:`_orm.Query`. To force the legacy behavior using window functions, + specify the ``enable_offset_fetch=False`` dialect parameter to + :func:`_sa.create_engine`. + +The use of ``FETCH FIRST / OFFSET`` may be disabled on any Oracle Database +version by passing ``enable_offset_fetch=False`` to :func:`_sa.create_engine`, +which will force the use of "legacy" mode that makes use of window functions. +This mode is also selected automatically when using a version of Oracle +Database prior to 12c. + +When using legacy mode, or when a :class:`.Select` statement with limit/offset +is embedded in a compound statement, an emulated approach for LIMIT / OFFSET +based on window functions is used, which involves creation of a subquery using +``ROW_NUMBER`` that is prone to performance issues as well as SQL construction +issues for complex statements. However, this approach is supported by all +Oracle Database versions. See notes below. + +Notes on LIMIT / OFFSET emulation (when fetch() method cannot be used) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If using :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset`, or with the +ORM the :meth:`_orm.Query.limit` and :meth:`_orm.Query.offset` methods on an +Oracle Database version prior to 12c, the following notes apply: + +* SQLAlchemy currently makes use of ROWNUM to achieve + LIMIT/OFFSET; the exact methodology is taken from + https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results . + +* the "FIRST_ROWS()" optimization keyword is not used by default. To enable + the usage of this optimization directive, specify ``optimize_limits=True`` + to :func:`_sa.create_engine`. + + .. versionchanged:: 1.4 + + The Oracle Database dialect renders limit/offset integer values using a + "post compile" scheme which renders the integer directly before passing + the statement to the cursor for execution. The ``use_binds_for_limits`` + flag no longer has an effect. + + .. seealso:: + + :ref:`change_4808`. + +.. _oracle_returning: + +RETURNING Support +----------------- + +Oracle Database supports RETURNING fully for INSERT, UPDATE and DELETE +statements that are invoked with a single collection of bound parameters (that +is, a ``cursor.execute()`` style statement; SQLAlchemy does not generally +support RETURNING with :term:`executemany` statements). Multiple rows may be +returned as well. + +.. versionchanged:: 2.0 the Oracle Database backend has full support for + RETURNING on parity with other backends. + + +ON UPDATE CASCADE +----------------- + +Oracle Database doesn't have native ON UPDATE CASCADE functionality. A trigger +based solution is available at +https://web.archive.org/web/20090317041251/https://asktom.oracle.com/tkyte/update_cascade/index.html + +When using the SQLAlchemy ORM, the ORM has limited ability to manually issue +cascading updates - specify ForeignKey objects using the +"deferrable=True, initially='deferred'" keyword arguments, +and specify "passive_updates=False" on each relationship(). + +Oracle Database 8 Compatibility +------------------------------- + +.. warning:: The status of Oracle Database 8 compatibility is not known for + SQLAlchemy 2.0. + +When Oracle Database 8 is detected, the dialect internally configures itself to +the following behaviors: + +* the use_ansi flag is set to False. This has the effect of converting all + JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN + makes use of Oracle's (+) operator. + +* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when + the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued + instead. This because these types don't seem to work correctly on Oracle 8 + even though they are available. The :class:`~sqlalchemy.types.NVARCHAR` and + :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate + NVARCHAR2 and NCLOB. + + +Synonym/DBLINK Reflection +------------------------- + +When using reflection with Table objects, the dialect can optionally search +for tables indicated by synonyms, either in local or remote schemas or +accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as +a keyword argument to the :class:`_schema.Table` construct:: + + some_table = Table( + "some_table", autoload_with=some_engine, oracle_resolve_synonyms=True + ) + +When this flag is set, the given name (such as ``some_table`` above) will be +searched not just in the ``ALL_TABLES`` view, but also within the +``ALL_SYNONYMS`` view to see if this name is actually a synonym to another +name. If the synonym is located and refers to a DBLINK, the Oracle Database +dialects know how to locate the table's information using DBLINK syntax(e.g. +``@dblink``). + +``oracle_resolve_synonyms`` is accepted wherever reflection arguments are +accepted, including methods such as :meth:`_schema.MetaData.reflect` and +:meth:`_reflection.Inspector.get_columns`. + +If synonyms are not in use, this flag should be left disabled. + +.. _oracle_constraint_reflection: + +Constraint Reflection +--------------------- + +The Oracle Database dialects can return information about foreign key, unique, +and CHECK constraints, as well as indexes on tables. + +Raw information regarding these constraints can be acquired using +:meth:`_reflection.Inspector.get_foreign_keys`, +:meth:`_reflection.Inspector.get_unique_constraints`, +:meth:`_reflection.Inspector.get_check_constraints`, and +:meth:`_reflection.Inspector.get_indexes`. + +.. versionchanged:: 1.2 The Oracle Database dialect can now reflect UNIQUE and + CHECK constraints. + +When using reflection at the :class:`_schema.Table` level, the +:class:`_schema.Table` +will also include these constraints. + +Note the following caveats: + +* When using the :meth:`_reflection.Inspector.get_check_constraints` method, + Oracle Database builds a special "IS NOT NULL" constraint for columns that + specify "NOT NULL". This constraint is **not** returned by default; to + include the "IS NOT NULL" constraints, pass the flag ``include_all=True``:: + + from sqlalchemy import create_engine, inspect + + engine = create_engine( + "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1" + ) + inspector = inspect(engine) + all_check_constraints = inspector.get_check_constraints( + "some_table", include_all=True + ) + +* in most cases, when reflecting a :class:`_schema.Table`, a UNIQUE constraint + will **not** be available as a :class:`.UniqueConstraint` object, as Oracle + Database mirrors unique constraints with a UNIQUE index in most cases (the + exception seems to be when two or more unique constraints represent the same + columns); the :class:`_schema.Table` will instead represent these using + :class:`.Index` with the ``unique=True`` flag set. + +* Oracle Database creates an implicit index for the primary key of a table; + this index is **excluded** from all index results. + +* the list of columns reflected for an index will not include column names + that start with SYS_NC. + +Table names with SYSTEM/SYSAUX tablespaces +------------------------------------------- + +The :meth:`_reflection.Inspector.get_table_names` and +:meth:`_reflection.Inspector.get_temp_table_names` +methods each return a list of table names for the current engine. These methods +are also part of the reflection which occurs within an operation such as +:meth:`_schema.MetaData.reflect`. By default, +these operations exclude the ``SYSTEM`` +and ``SYSAUX`` tablespaces from the operation. In order to change this, the +default list of tablespaces excluded can be changed at the engine level using +the ``exclude_tablespaces`` parameter:: + + # exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM + e = create_engine( + "oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1", + exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"], + ) + +.. _oracle_float_support: + +FLOAT / DOUBLE Support and Behaviors +------------------------------------ + +The SQLAlchemy :class:`.Float` and :class:`.Double` datatypes are generic +datatypes that resolve to the "least surprising" datatype for a given backend. +For Oracle Database, this means they resolve to the ``FLOAT`` and ``DOUBLE`` +types:: + + >>> from sqlalchemy import cast, literal, Float + >>> from sqlalchemy.dialects import oracle + >>> float_datatype = Float() + >>> print(cast(literal(5.0), float_datatype).compile(dialect=oracle.dialect())) + CAST(:param_1 AS FLOAT) + +Oracle's ``FLOAT`` / ``DOUBLE`` datatypes are aliases for ``NUMBER``. Oracle +Database stores ``NUMBER`` values with full precision, not floating point +precision, which means that ``FLOAT`` / ``DOUBLE`` do not actually behave like +native FP values. Oracle Database instead offers special datatypes +``BINARY_FLOAT`` and ``BINARY_DOUBLE`` to deliver real 4- and 8- byte FP +values. + +SQLAlchemy supports these datatypes directly using :class:`.BINARY_FLOAT` and +:class:`.BINARY_DOUBLE`. To use the :class:`.Float` or :class:`.Double` +datatypes in a database agnostic way, while allowing Oracle backends to utilize +one of these types, use the :meth:`.TypeEngine.with_variant` method to set up a +variant:: + + >>> from sqlalchemy import cast, literal, Float + >>> from sqlalchemy.dialects import oracle + >>> float_datatype = Float().with_variant(oracle.BINARY_FLOAT(), "oracle") + >>> print(cast(literal(5.0), float_datatype).compile(dialect=oracle.dialect())) + CAST(:param_1 AS BINARY_FLOAT) + +E.g. to use this datatype in a :class:`.Table` definition:: + + my_table = Table( + "my_table", + metadata, + Column( + "fp_data", Float().with_variant(oracle.BINARY_FLOAT(), "oracle") + ), + ) + +DateTime Compatibility +---------------------- + +Oracle Database has no datatype known as ``DATETIME``, it instead has only +``DATE``, which can actually store a date and time value. For this reason, the +Oracle Database dialects provide a type :class:`_oracle.DATE` which is a +subclass of :class:`.DateTime`. This type has no special behavior, and is only +present as a "marker" for this type; additionally, when a database column is +reflected and the type is reported as ``DATE``, the time-supporting +:class:`_oracle.DATE` type is used. + +.. _oracle_table_options: + +Oracle Database Table Options +----------------------------- + +The CREATE TABLE phrase supports the following options with Oracle Database +dialects in conjunction with the :class:`_schema.Table` construct: + + +* ``ON COMMIT``:: + + Table( + "some_table", + metadata, + ..., + prefixes=["GLOBAL TEMPORARY"], + oracle_on_commit="PRESERVE ROWS", + ) + +* + ``COMPRESS``:: + + Table( + "mytable", metadata, Column("data", String(32)), oracle_compress=True + ) + + Table("mytable", metadata, Column("data", String(32)), oracle_compress=6) + + The ``oracle_compress`` parameter accepts either an integer compression + level, or ``True`` to use the default compression level. + +* + ``TABLESPACE``:: + + Table("mytable", metadata, ..., oracle_tablespace="EXAMPLE_TABLESPACE") + + The ``oracle_tablespace`` parameter specifies the tablespace in which the + table is to be created. This is useful when you want to create a table in a + tablespace other than the default tablespace of the user. + + .. versionadded:: 2.0.37 + +.. _oracle_index_options: + +Oracle Database Specific Index Options +-------------------------------------- + +Bitmap Indexes +~~~~~~~~~~~~~~ + +You can specify the ``oracle_bitmap`` parameter to create a bitmap index +instead of a B-tree index:: + + Index("my_index", my_table.c.data, oracle_bitmap=True) + +Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not +check for such limitations, only the database will. + +Index compression +~~~~~~~~~~~~~~~~~ + +Oracle Database has a more efficient storage mode for indexes containing lots +of repeated values. Use the ``oracle_compress`` parameter to turn on key +compression:: + + Index("my_index", my_table.c.data, oracle_compress=True) + + Index( + "my_index", + my_table.c.data1, + my_table.c.data2, + unique=True, + oracle_compress=1, + ) + +The ``oracle_compress`` parameter accepts either an integer specifying the +number of prefix columns to compress, or ``True`` to use the default (all +columns for non-unique indexes, all but the last column for unique indexes). + +.. _oracle_vector_datatype: + +VECTOR Datatype +--------------- + +Oracle Database 23ai introduced a new VECTOR datatype for artificial intelligence +and machine learning search operations. The VECTOR datatype is a homogeneous array +of 8-bit signed integers, 8-bit unsigned integers (binary), 32-bit floating-point numbers, +or 64-bit floating-point numbers. + +.. seealso:: + + `Using VECTOR Data + `_ - in the documentation + for the :ref:`oracledb` driver. + +.. versionadded:: 2.0.41 + +CREATE TABLE support for VECTOR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +With the :class:`.VECTOR` datatype, you can specify the dimension for the data +and the storage format. Valid values for storage format are enum values from +:class:`.VectorStorageFormat`. To create a table that includes a +:class:`.VECTOR` column:: + + from sqlalchemy.dialects.oracle import VECTOR, VectorStorageFormat + + t = Table( + "t1", + metadata, + Column("id", Integer, primary_key=True), + Column( + "embedding", + VECTOR(dim=3, storage_format=VectorStorageFormat.FLOAT32), + ), + Column(...), + ..., + ) + +Vectors can also be defined with an arbitrary number of dimensions and formats. +This allows you to specify vectors of different dimensions with the various +storage formats mentioned above. + +**Examples** + +* In this case, the storage format is flexible, allowing any vector type data to be inserted, + such as INT8 or BINARY etc:: + + vector_col: Mapped[array.array] = mapped_column(VECTOR(dim=3)) + +* The dimension is flexible in this case, meaning that any dimension vector can be used:: + + vector_col: Mapped[array.array] = mapped_column( + VECTOR(storage_format=VectorStorageType.INT8) + ) + +* Both the dimensions and the storage format are flexible:: + + vector_col: Mapped[array.array] = mapped_column(VECTOR) + +Python Datatypes for VECTOR +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +VECTOR data can be inserted using Python list or Python ``array.array()`` objects. +Python arrays of type FLOAT (32-bit), DOUBLE (64-bit), or INT (8-bit signed integer) +are used as bind values when inserting VECTOR columns:: + + from sqlalchemy import insert, select + + with engine.begin() as conn: + conn.execute( + insert(t1), + {"id": 1, "embedding": [1, 2, 3]}, + ) + +VECTOR Indexes +~~~~~~~~~~~~~~ + +The VECTOR feature supports an Oracle-specific parameter ``oracle_vector`` +on the :class:`.Index` construct, which allows the construction of VECTOR +indexes. + +To utilize VECTOR indexing, set the ``oracle_vector`` parameter to True to use +the default values provided by Oracle. HNSW is the default indexing method:: + + from sqlalchemy import Index + + Index( + "vector_index", + t1.c.embedding, + oracle_vector=True, + ) + +The full range of parameters for vector indexes are available by using the +:class:`.VectorIndexConfig` dataclass in place of a boolean; this dataclass +allows full configuration of the index:: + + Index( + "hnsw_vector_index", + t1.c.embedding, + oracle_vector=VectorIndexConfig( + index_type=VectorIndexType.HNSW, + distance=VectorDistanceType.COSINE, + accuracy=90, + hnsw_neighbors=5, + hnsw_efconstruction=20, + parallel=10, + ), + ) + + Index( + "ivf_vector_index", + t1.c.embedding, + oracle_vector=VectorIndexConfig( + index_type=VectorIndexType.IVF, + distance=VectorDistanceType.DOT, + accuracy=90, + ivf_neighbor_partitions=5, + ), + ) + +For complete explanation of these parameters, see the Oracle documentation linked +below. + +.. seealso:: + + `CREATE VECTOR INDEX `_ - in the Oracle documentation + + + +Similarity Searching +~~~~~~~~~~~~~~~~~~~~ + +When using the :class:`_oracle.VECTOR` datatype with a :class:`.Column` or similar +ORM mapped construct, additional comparison functions are available, including: + +* ``l2_distance`` +* ``cosine_distance`` +* ``inner_product`` + +Example Usage:: + + result_vector = connection.scalars( + select(t1).order_by(t1.embedding.l2_distance([2, 3, 4])).limit(3) + ) + + for user in vector: + print(user.id, user.embedding) + +FETCH APPROXIMATE support +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Approximate vector search can only be performed when all syntax and semantic +rules are satisfied, the corresponding vector index is available, and the +query optimizer determines to perform it. If any of these conditions are +unmet, then an approximate search is not performed. In this case the query +returns exact results. + +To enable approximate searching during similarity searches on VECTORS, the +``oracle_fetch_approximate`` parameter may be used with the :meth:`.Select.fetch` +clause to add ``FETCH APPROX`` to the SELECT statement:: + + select(users_table).fetch(5, oracle_fetch_approximate=True) + +""" # noqa + +from __future__ import annotations + +from collections import defaultdict +from dataclasses import fields +from functools import lru_cache +from functools import wraps +import re + +from . import dictionary +from .types import _OracleBoolean +from .types import _OracleDate +from .types import BFILE +from .types import BINARY_DOUBLE +from .types import BINARY_FLOAT +from .types import DATE +from .types import FLOAT +from .types import INTERVAL +from .types import LONG +from .types import NCLOB +from .types import NUMBER +from .types import NVARCHAR2 # noqa +from .types import OracleRaw # noqa +from .types import RAW +from .types import ROWID # noqa +from .types import TIMESTAMP +from .types import VARCHAR2 # noqa +from .vector import VECTOR +from .vector import VectorIndexConfig +from .vector import VectorIndexType +from ... import Computed +from ... import exc +from ... import schema as sa_schema +from ... import sql +from ... import util +from ...engine import default +from ...engine import ObjectKind +from ...engine import ObjectScope +from ...engine import reflection +from ...engine.reflection import ReflectionDefaults +from ...sql import and_ +from ...sql import bindparam +from ...sql import compiler +from ...sql import expression +from ...sql import func +from ...sql import null +from ...sql import or_ +from ...sql import select +from ...sql import selectable as sa_selectable +from ...sql import sqltypes +from ...sql import util as sql_util +from ...sql import visitors +from ...sql.visitors import InternalTraversal +from ...types import BLOB +from ...types import CHAR +from ...types import CLOB +from ...types import DOUBLE_PRECISION +from ...types import INTEGER +from ...types import NCHAR +from ...types import NVARCHAR +from ...types import REAL +from ...types import VARCHAR + +RESERVED_WORDS = set( + "SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN " + "DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED " + "ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE " + "ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE " + "BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES " + "AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS " + "NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER " + "CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR " + "DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL".split() +) + +NO_ARG_FNS = set( + "UID CURRENT_DATE SYSDATE USER CURRENT_TIME CURRENT_TIMESTAMP".split() +) + + +colspecs = { + sqltypes.Boolean: _OracleBoolean, + sqltypes.Interval: INTERVAL, + sqltypes.DateTime: DATE, + sqltypes.Date: _OracleDate, +} + +ischema_names = { + "VARCHAR2": VARCHAR, + "NVARCHAR2": NVARCHAR, + "CHAR": CHAR, + "NCHAR": NCHAR, + "DATE": DATE, + "NUMBER": NUMBER, + "BLOB": BLOB, + "BFILE": BFILE, + "CLOB": CLOB, + "NCLOB": NCLOB, + "TIMESTAMP": TIMESTAMP, + "TIMESTAMP WITH TIME ZONE": TIMESTAMP, + "TIMESTAMP WITH LOCAL TIME ZONE": TIMESTAMP, + "INTERVAL DAY TO SECOND": INTERVAL, + "RAW": RAW, + "FLOAT": FLOAT, + "DOUBLE PRECISION": DOUBLE_PRECISION, + "REAL": REAL, + "LONG": LONG, + "BINARY_DOUBLE": BINARY_DOUBLE, + "BINARY_FLOAT": BINARY_FLOAT, + "ROWID": ROWID, + "VECTOR": VECTOR, +} + + +class OracleTypeCompiler(compiler.GenericTypeCompiler): + # Note: + # Oracle DATE == DATETIME + # Oracle does not allow milliseconds in DATE + # Oracle does not support TIME columns + + def visit_datetime(self, type_, **kw): + return self.visit_DATE(type_, **kw) + + def visit_float(self, type_, **kw): + return self.visit_FLOAT(type_, **kw) + + def visit_double(self, type_, **kw): + return self.visit_DOUBLE_PRECISION(type_, **kw) + + def visit_unicode(self, type_, **kw): + if self.dialect._use_nchar_for_unicode: + return self.visit_NVARCHAR2(type_, **kw) + else: + return self.visit_VARCHAR2(type_, **kw) + + def visit_INTERVAL(self, type_, **kw): + return "INTERVAL DAY%s TO SECOND%s" % ( + type_.day_precision is not None + and "(%d)" % type_.day_precision + or "", + type_.second_precision is not None + and "(%d)" % type_.second_precision + or "", + ) + + def visit_LONG(self, type_, **kw): + return "LONG" + + def visit_TIMESTAMP(self, type_, **kw): + if getattr(type_, "local_timezone", False): + return "TIMESTAMP WITH LOCAL TIME ZONE" + elif type_.timezone: + return "TIMESTAMP WITH TIME ZONE" + else: + return "TIMESTAMP" + + def visit_DOUBLE_PRECISION(self, type_, **kw): + return self._generate_numeric(type_, "DOUBLE PRECISION", **kw) + + def visit_BINARY_DOUBLE(self, type_, **kw): + return self._generate_numeric(type_, "BINARY_DOUBLE", **kw) + + def visit_BINARY_FLOAT(self, type_, **kw): + return self._generate_numeric(type_, "BINARY_FLOAT", **kw) + + def visit_FLOAT(self, type_, **kw): + kw["_requires_binary_precision"] = True + return self._generate_numeric(type_, "FLOAT", **kw) + + def visit_NUMBER(self, type_, **kw): + return self._generate_numeric(type_, "NUMBER", **kw) + + def _generate_numeric( + self, + type_, + name, + precision=None, + scale=None, + _requires_binary_precision=False, + **kw, + ): + if precision is None: + precision = getattr(type_, "precision", None) + + if _requires_binary_precision: + binary_precision = getattr(type_, "binary_precision", None) + + if precision and binary_precision is None: + # https://www.oracletutorial.com/oracle-basics/oracle-float/ + estimated_binary_precision = int(precision / 0.30103) + raise exc.ArgumentError( + "Oracle Database FLOAT types use 'binary precision', " + "which does not convert cleanly from decimal " + "'precision'. Please specify " + "this type with a separate Oracle Database variant, such " + f"as {type_.__class__.__name__}(precision={precision})." + f"with_variant(oracle.FLOAT" + f"(binary_precision=" + f"{estimated_binary_precision}), 'oracle'), so that the " + "Oracle Database specific 'binary_precision' may be " + "specified accurately." + ) + else: + precision = binary_precision + + if scale is None: + scale = getattr(type_, "scale", None) + + if precision is None: + return name + elif scale is None: + n = "%(name)s(%(precision)s)" + return n % {"name": name, "precision": precision} + else: + n = "%(name)s(%(precision)s, %(scale)s)" + return n % {"name": name, "precision": precision, "scale": scale} + + def visit_string(self, type_, **kw): + return self.visit_VARCHAR2(type_, **kw) + + def visit_VARCHAR2(self, type_, **kw): + return self._visit_varchar(type_, "", "2") + + def visit_NVARCHAR2(self, type_, **kw): + return self._visit_varchar(type_, "N", "2") + + visit_NVARCHAR = visit_NVARCHAR2 + + def visit_VARCHAR(self, type_, **kw): + return self._visit_varchar(type_, "", "") + + def _visit_varchar(self, type_, n, num): + if not type_.length: + return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n} + elif not n and self.dialect._supports_char_length: + varchar = "VARCHAR%(two)s(%(length)s CHAR)" + return varchar % {"length": type_.length, "two": num} + else: + varchar = "%(n)sVARCHAR%(two)s(%(length)s)" + return varchar % {"length": type_.length, "two": num, "n": n} + + def visit_text(self, type_, **kw): + return self.visit_CLOB(type_, **kw) + + def visit_unicode_text(self, type_, **kw): + if self.dialect._use_nchar_for_unicode: + return self.visit_NCLOB(type_, **kw) + else: + return self.visit_CLOB(type_, **kw) + + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_, **kw) + + def visit_big_integer(self, type_, **kw): + return self.visit_NUMBER(type_, precision=19, **kw) + + def visit_boolean(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) + + def visit_RAW(self, type_, **kw): + if type_.length: + return "RAW(%(length)s)" % {"length": type_.length} + else: + return "RAW" + + def visit_ROWID(self, type_, **kw): + return "ROWID" + + def visit_VECTOR(self, type_, **kw): + if type_.dim is None and type_.storage_format is None: + return "VECTOR(*,*)" + elif type_.storage_format is None: + return f"VECTOR({type_.dim},*)" + elif type_.dim is None: + return f"VECTOR(*,{type_.storage_format.value})" + else: + return f"VECTOR({type_.dim},{type_.storage_format.value})" + + +class OracleCompiler(compiler.SQLCompiler): + """Oracle compiler modifies the lexical structure of Select + statements to work under non-ANSI configured Oracle databases, if + the use_ansi flag is False. + """ + + compound_keywords = util.update_copy( + compiler.SQLCompiler.compound_keywords, + {expression.CompoundSelect.EXCEPT: "MINUS"}, + ) + + def __init__(self, *args, **kwargs): + self.__wheres = {} + super().__init__(*args, **kwargs) + + def visit_mod_binary(self, binary, operator, **kw): + return "mod(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_char_length_func(self, fn, **kw): + return "LENGTH" + self.function_argspec(fn, **kw) + + def visit_match_op_binary(self, binary, operator, **kw): + return "CONTAINS (%s, %s)" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_true(self, expr, **kw): + return "1" + + def visit_false(self, expr, **kw): + return "0" + + def get_cte_preamble(self, recursive): + return "WITH" + + def get_select_hint_text(self, byfroms): + return " ".join("/*+ %s */" % text for table, text in byfroms.items()) + + def function_argspec(self, fn, **kw): + if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS: + return compiler.SQLCompiler.function_argspec(self, fn, **kw) + else: + return "" + + def visit_function(self, func, **kw): + text = super().visit_function(func, **kw) + if kw.get("asfrom", False) and func.name.lower() != "table": + text = "TABLE (%s)" % text + return text + + def visit_table_valued_column(self, element, **kw): + text = super().visit_table_valued_column(element, **kw) + text = text + ".COLUMN_VALUE" + return text + + def default_from(self): + """Called when a ``SELECT`` statement has no froms, + and no ``FROM`` clause is to be appended. + + The Oracle compiler tacks a "FROM DUAL" to the statement. + """ + + return " FROM DUAL" + + def visit_join(self, join, from_linter=None, **kwargs): + if self.dialect.use_ansi: + return compiler.SQLCompiler.visit_join( + self, join, from_linter=from_linter, **kwargs + ) + else: + if from_linter: + from_linter.edges.add((join.left, join.right)) + + kwargs["asfrom"] = True + if isinstance(join.right, expression.FromGrouping): + right = join.right.element + else: + right = join.right + return ( + self.process(join.left, from_linter=from_linter, **kwargs) + + ", " + + self.process(right, from_linter=from_linter, **kwargs) + ) + + def _get_nonansi_join_whereclause(self, froms): + clauses = [] + + def visit_join(join): + if join.isouter: + # https://docs.oracle.com/database/121/SQLRF/queries006.htm#SQLRF52354 + # "apply the outer join operator (+) to all columns of B in + # the join condition in the WHERE clause" - that is, + # unconditionally regardless of operator or the other side + def visit_binary(binary): + if isinstance( + binary.left, expression.ColumnClause + ) and join.right.is_derived_from(binary.left.table): + binary.left = _OuterJoinColumn(binary.left) + elif isinstance( + binary.right, expression.ColumnClause + ) and join.right.is_derived_from(binary.right.table): + binary.right = _OuterJoinColumn(binary.right) + + clauses.append( + visitors.cloned_traverse( + join.onclause, {}, {"binary": visit_binary} + ) + ) + else: + clauses.append(join.onclause) + + for j in join.left, join.right: + if isinstance(j, expression.Join): + visit_join(j) + elif isinstance(j, expression.FromGrouping): + visit_join(j.element) + + for f in froms: + if isinstance(f, expression.Join): + visit_join(f) + + if not clauses: + return None + else: + return sql.and_(*clauses) + + def visit_outer_join_column(self, vc, **kw): + return self.process(vc.column, **kw) + "(+)" + + def visit_sequence(self, seq, **kw): + return self.preparer.format_sequence(seq) + ".nextval" + + def get_render_as_alias_suffix(self, alias_name_text): + """Oracle doesn't like ``FROM table AS alias``""" + + return " " + alias_name_text + + def returning_clause( + self, stmt, returning_cols, *, populate_result_map, **kw + ): + columns = [] + binds = [] + + for i, column in enumerate( + expression._select_iterables(returning_cols) + ): + if ( + self.isupdate + and isinstance(column, sa_schema.Column) + and isinstance(column.server_default, Computed) + and not self.dialect._supports_update_returning_computed_cols + ): + util.warn( + "Computed columns don't work with Oracle Database UPDATE " + "statements that use RETURNING; the value of the column " + "*before* the UPDATE takes place is returned. It is " + "advised to not use RETURNING with an Oracle Database " + "computed column. Consider setting implicit_returning " + "to False on the Table object in order to avoid implicit " + "RETURNING clauses from being generated for this Table." + ) + if column.type._has_column_expression: + col_expr = column.type.column_expression(column) + else: + col_expr = column + + outparam = sql.outparam("ret_%d" % i, type_=column.type) + self.binds[outparam.key] = outparam + binds.append( + self.bindparam_string(self._truncate_bindparam(outparam)) + ) + + # has_out_parameters would in a normal case be set to True + # as a result of the compiler visiting an outparam() object. + # in this case, the above outparam() objects are not being + # visited. Ensure the statement itself didn't have other + # outparam() objects independently. + # technically, this could be supported, but as it would be + # a very strange use case without a clear rationale, disallow it + if self.has_out_parameters: + raise exc.InvalidRequestError( + "Using explicit outparam() objects with " + "UpdateBase.returning() in the same Core DML statement " + "is not supported in the Oracle Database dialects." + ) + + self._oracle_returning = True + + columns.append(self.process(col_expr, within_columns_clause=False)) + if populate_result_map: + self._add_to_result_map( + getattr(col_expr, "name", col_expr._anon_name_label), + getattr(col_expr, "name", col_expr._anon_name_label), + ( + column, + getattr(column, "name", None), + getattr(column, "key", None), + ), + column.type, + ) + + return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds) + + def _row_limit_clause(self, select, **kw): + """Oracle Database 12c supports OFFSET/FETCH operators + Use it instead subquery with row_number + + """ + + if ( + select._fetch_clause is not None + or not self.dialect._supports_offset_fetch + ): + return super()._row_limit_clause( + select, use_literal_execute_for_simple_int=True, **kw + ) + else: + return self.fetch_clause( + select, + fetch_clause=self._get_limit_or_fetch(select), + use_literal_execute_for_simple_int=True, + **kw, + ) + + def _get_limit_or_fetch(self, select): + if select._fetch_clause is None: + return select._limit_clause + else: + return select._fetch_clause + + def fetch_clause( + self, + select, + fetch_clause=None, + require_offset=False, + use_literal_execute_for_simple_int=False, + **kw, + ): + text = super().fetch_clause( + select, + fetch_clause=fetch_clause, + require_offset=require_offset, + use_literal_execute_for_simple_int=( + use_literal_execute_for_simple_int + ), + **kw, + ) + + if select.dialect_options["oracle"]["fetch_approximate"]: + text = re.sub("FETCH FIRST", "FETCH APPROX FIRST", text) + + return text + + def translate_select_structure(self, select_stmt, **kwargs): + select = select_stmt + + if not getattr(select, "_oracle_visit", None): + if not self.dialect.use_ansi: + froms = self._display_froms_for_select( + select, kwargs.get("asfrom", False) + ) + whereclause = self._get_nonansi_join_whereclause(froms) + if whereclause is not None: + select = select.where(whereclause) + select._oracle_visit = True + + # if fetch is used this is not needed + if ( + select._has_row_limiting_clause + and not self.dialect._supports_offset_fetch + and select._fetch_clause is None + ): + limit_clause = select._limit_clause + offset_clause = select._offset_clause + + if select._simple_int_clause(limit_clause): + limit_clause = limit_clause.render_literal_execute() + + if select._simple_int_clause(offset_clause): + offset_clause = offset_clause.render_literal_execute() + + # currently using form at: + # https://blogs.oracle.com/oraclemagazine/\ + # on-rownum-and-limiting-results + + orig_select = select + select = select._generate() + select._oracle_visit = True + + # add expressions to accommodate FOR UPDATE OF + for_update = select._for_update_arg + if for_update is not None and for_update.of: + for_update = for_update._clone() + for_update._copy_internals() + + for elem in for_update.of: + if not select.selected_columns.contains_column(elem): + select = select.add_columns(elem) + + # Wrap the middle select and add the hint + inner_subquery = select.alias() + limitselect = sql.select( + *[ + c + for c in inner_subquery.c + if orig_select.selected_columns.corresponding_column(c) + is not None + ] + ) + + if ( + limit_clause is not None + and self.dialect.optimize_limits + and select._simple_int_clause(limit_clause) + ): + limitselect = limitselect.prefix_with( + expression.text( + "/*+ FIRST_ROWS(%s) */" + % self.process(limit_clause, **kwargs) + ) + ) + + limitselect._oracle_visit = True + limitselect._is_wrapper = True + + # add expressions to accommodate FOR UPDATE OF + if for_update is not None and for_update.of: + adapter = sql_util.ClauseAdapter(inner_subquery) + for_update.of = [ + adapter.traverse(elem) for elem in for_update.of + ] + + # If needed, add the limiting clause + if limit_clause is not None: + if select._simple_int_clause(limit_clause) and ( + offset_clause is None + or select._simple_int_clause(offset_clause) + ): + max_row = limit_clause + + if offset_clause is not None: + max_row = max_row + offset_clause + + else: + max_row = limit_clause + + if offset_clause is not None: + max_row = max_row + offset_clause + limitselect = limitselect.where( + sql.literal_column("ROWNUM") <= max_row + ) + + # If needed, add the ora_rn, and wrap again with offset. + if offset_clause is None: + limitselect._for_update_arg = for_update + select = limitselect + else: + limitselect = limitselect.add_columns( + sql.literal_column("ROWNUM").label("ora_rn") + ) + limitselect._oracle_visit = True + limitselect._is_wrapper = True + + if for_update is not None and for_update.of: + limitselect_cols = limitselect.selected_columns + for elem in for_update.of: + if ( + limitselect_cols.corresponding_column(elem) + is None + ): + limitselect = limitselect.add_columns(elem) + + limit_subquery = limitselect.alias() + origselect_cols = orig_select.selected_columns + offsetselect = sql.select( + *[ + c + for c in limit_subquery.c + if origselect_cols.corresponding_column(c) + is not None + ] + ) + + offsetselect._oracle_visit = True + offsetselect._is_wrapper = True + + if for_update is not None and for_update.of: + adapter = sql_util.ClauseAdapter(limit_subquery) + for_update.of = [ + adapter.traverse(elem) for elem in for_update.of + ] + + offsetselect = offsetselect.where( + sql.literal_column("ora_rn") > offset_clause + ) + + offsetselect._for_update_arg = for_update + select = offsetselect + + return select + + def limit_clause(self, select, **kw): + return "" + + def visit_empty_set_expr(self, type_, **kw): + return "SELECT 1 FROM DUAL WHERE 1!=1" + + def for_update_clause(self, select, **kw): + if self.is_subquery(): + return "" + + tmp = " FOR UPDATE" + + if select._for_update_arg.of: + tmp += " OF " + ", ".join( + self.process(elem, **kw) for elem in select._for_update_arg.of + ) + + if select._for_update_arg.nowait: + tmp += " NOWAIT" + if select._for_update_arg.skip_locked: + tmp += " SKIP LOCKED" + + return tmp + + def visit_is_distinct_from_binary(self, binary, operator, **kw): + return "DECODE(%s, %s, 0, 1) = 1" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_is_not_distinct_from_binary(self, binary, operator, **kw): + return "DECODE(%s, %s, 0, 1) = 0" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_regexp_match_op_binary(self, binary, operator, **kw): + string = self.process(binary.left, **kw) + pattern = self.process(binary.right, **kw) + flags = binary.modifiers["flags"] + if flags is None: + return "REGEXP_LIKE(%s, %s)" % (string, pattern) + else: + return "REGEXP_LIKE(%s, %s, %s)" % ( + string, + pattern, + self.render_literal_value(flags, sqltypes.STRINGTYPE), + ) + + def visit_not_regexp_match_op_binary(self, binary, operator, **kw): + return "NOT %s" % self.visit_regexp_match_op_binary( + binary, operator, **kw + ) + + def visit_regexp_replace_op_binary(self, binary, operator, **kw): + string = self.process(binary.left, **kw) + pattern_replace = self.process(binary.right, **kw) + flags = binary.modifiers["flags"] + if flags is None: + return "REGEXP_REPLACE(%s, %s)" % ( + string, + pattern_replace, + ) + else: + return "REGEXP_REPLACE(%s, %s, %s)" % ( + string, + pattern_replace, + self.render_literal_value(flags, sqltypes.STRINGTYPE), + ) + + def visit_aggregate_strings_func(self, fn, **kw): + return "LISTAGG%s" % self.function_argspec(fn, **kw) + + def _visit_bitwise(self, binary, fn_name, custom_right=None, **kw): + left = self.process(binary.left, **kw) + right = self.process( + custom_right if custom_right is not None else binary.right, **kw + ) + return f"{fn_name}({left}, {right})" + + def visit_bitwise_xor_op_binary(self, binary, operator, **kw): + return self._visit_bitwise(binary, "BITXOR", **kw) + + def visit_bitwise_or_op_binary(self, binary, operator, **kw): + return self._visit_bitwise(binary, "BITOR", **kw) + + def visit_bitwise_and_op_binary(self, binary, operator, **kw): + return self._visit_bitwise(binary, "BITAND", **kw) + + def visit_bitwise_rshift_op_binary(self, binary, operator, **kw): + raise exc.CompileError("Cannot compile bitwise_rshift in oracle") + + def visit_bitwise_lshift_op_binary(self, binary, operator, **kw): + raise exc.CompileError("Cannot compile bitwise_lshift in oracle") + + def visit_bitwise_not_op_unary_operator(self, element, operator, **kw): + raise exc.CompileError("Cannot compile bitwise_not in oracle") + + +class OracleDDLCompiler(compiler.DDLCompiler): + + def _build_vector_index_config( + self, vector_index_config: VectorIndexConfig + ) -> str: + parts = [] + sql_param_name = { + "hnsw_neighbors": "neighbors", + "hnsw_efconstruction": "efconstruction", + "ivf_neighbor_partitions": "neighbor partitions", + "ivf_sample_per_partition": "sample_per_partition", + "ivf_min_vectors_per_partition": "min_vectors_per_partition", + } + if vector_index_config.index_type == VectorIndexType.HNSW: + parts.append("ORGANIZATION INMEMORY NEIGHBOR GRAPH") + elif vector_index_config.index_type == VectorIndexType.IVF: + parts.append("ORGANIZATION NEIGHBOR PARTITIONS") + if vector_index_config.distance is not None: + parts.append(f"DISTANCE {vector_index_config.distance.value}") + + if vector_index_config.accuracy is not None: + parts.append( + f"WITH TARGET ACCURACY {vector_index_config.accuracy}" + ) + + parameters_str = [f"type {vector_index_config.index_type.name}"] + prefix = vector_index_config.index_type.name.lower() + "_" + + for field in fields(vector_index_config): + if field.name.startswith(prefix): + key = sql_param_name.get(field.name) + value = getattr(vector_index_config, field.name) + if value is not None: + parameters_str.append(f"{key} {value}") + + parameters_str = ", ".join(parameters_str) + parts.append(f"PARAMETERS ({parameters_str})") + + if vector_index_config.parallel is not None: + parts.append(f"PARALLEL {vector_index_config.parallel}") + + return " ".join(parts) + + def define_constraint_cascades(self, constraint): + text = "" + if constraint.ondelete is not None: + text += " ON DELETE %s" % constraint.ondelete + + # oracle has no ON UPDATE CASCADE - + # its only available via triggers + # https://web.archive.org/web/20090317041251/https://asktom.oracle.com/tkyte/update_cascade/index.html + if constraint.onupdate is not None: + util.warn( + "Oracle Database does not contain native UPDATE CASCADE " + "functionality - onupdates will not be rendered for foreign " + "keys. Consider using deferrable=True, initially='deferred' " + "or triggers." + ) + + return text + + def visit_drop_table_comment(self, drop, **kw): + return "COMMENT ON TABLE %s IS ''" % self.preparer.format_table( + drop.element + ) + + def visit_create_index(self, create, **kw): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + if index.dialect_options["oracle"]["bitmap"]: + text += "BITMAP " + vector_options = index.dialect_options["oracle"]["vector"] + if vector_options: + text += "VECTOR " + text += "INDEX %s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=True), + preparer.format_table(index.table, use_schema=True), + ", ".join( + self.sql_compiler.process( + expr, include_table=False, literal_binds=True + ) + for expr in index.expressions + ), + ) + if index.dialect_options["oracle"]["compress"] is not False: + if index.dialect_options["oracle"]["compress"] is True: + text += " COMPRESS" + else: + text += " COMPRESS %d" % ( + index.dialect_options["oracle"]["compress"] + ) + if vector_options: + if vector_options is True: + vector_options = VectorIndexConfig() + + text += " " + self._build_vector_index_config(vector_options) + return text + + def post_create_table(self, table): + table_opts = [] + opts = table.dialect_options["oracle"] + + if opts["on_commit"]: + on_commit_options = opts["on_commit"].replace("_", " ").upper() + table_opts.append("\n ON COMMIT %s" % on_commit_options) + + if opts["compress"]: + if opts["compress"] is True: + table_opts.append("\n COMPRESS") + else: + table_opts.append("\n COMPRESS FOR %s" % (opts["compress"])) + if opts["tablespace"]: + table_opts.append( + "\n TABLESPACE %s" % self.preparer.quote(opts["tablespace"]) + ) + return "".join(table_opts) + + def get_identity_options(self, identity_options): + text = super().get_identity_options(identity_options) + text = text.replace("NO MINVALUE", "NOMINVALUE") + text = text.replace("NO MAXVALUE", "NOMAXVALUE") + text = text.replace("NO CYCLE", "NOCYCLE") + if identity_options.order is not None: + text += " ORDER" if identity_options.order else " NOORDER" + return text.strip() + + def visit_computed_column(self, generated, **kw): + text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process( + generated.sqltext, include_table=False, literal_binds=True + ) + if generated.persisted is True: + raise exc.CompileError( + "Oracle Database computed columns do not support 'stored' " + "persistence; set the 'persisted' flag to None or False for " + "Oracle Database support." + ) + elif generated.persisted is False: + text += " VIRTUAL" + return text + + def visit_identity_column(self, identity, **kw): + if identity.always is None: + kind = "" + else: + kind = "ALWAYS" if identity.always else "BY DEFAULT" + text = "GENERATED %s" % kind + if identity.on_null: + text += " ON NULL" + text += " AS IDENTITY" + options = self.get_identity_options(identity) + if options: + text += " (%s)" % options + return text + + +class OracleIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = {x.lower() for x in RESERVED_WORDS} + illegal_initial_characters = {str(dig) for dig in range(0, 10)}.union( + ["_", "$"] + ) + + def _bindparam_requires_quotes(self, value): + """Return True if the given identifier requires quoting.""" + lc_value = value.lower() + return ( + lc_value in self.reserved_words + or value[0] in self.illegal_initial_characters + or not self.legal_characters.match(str(value)) + ) + + def format_savepoint(self, savepoint): + name = savepoint.ident.lstrip("_") + return super().format_savepoint(savepoint, name) + + +class OracleExecutionContext(default.DefaultExecutionContext): + def fire_sequence(self, seq, type_): + return self._execute_scalar( + "SELECT " + + self.identifier_preparer.format_sequence(seq) + + ".nextval FROM DUAL", + type_, + ) + + def pre_exec(self): + if self.statement and "_oracle_dblink" in self.execution_options: + self.statement = self.statement.replace( + dictionary.DB_LINK_PLACEHOLDER, + self.execution_options["_oracle_dblink"], + ) + + +class OracleDialect(default.DefaultDialect): + name = "oracle" + supports_statement_cache = True + supports_alter = True + max_identifier_length = 128 + + _supports_offset_fetch = True + + insert_returning = True + update_returning = True + delete_returning = True + + div_is_floordiv = False + + supports_simple_order_by_label = False + cte_follows_insert = True + returns_native_bytes = True + + supports_sequences = True + sequences_optional = False + postfetch_lastrowid = False + + default_paramstyle = "named" + colspecs = colspecs + ischema_names = ischema_names + requires_name_normalize = True + + supports_comments = True + + supports_default_values = False + supports_default_metavalue = True + supports_empty_insert = False + supports_identity_columns = True + + statement_compiler = OracleCompiler + ddl_compiler = OracleDDLCompiler + type_compiler_cls = OracleTypeCompiler + preparer = OracleIdentifierPreparer + execution_ctx_cls = OracleExecutionContext + + reflection_options = ("oracle_resolve_synonyms",) + + _use_nchar_for_unicode = False + + construct_arguments = [ + ( + sa_schema.Table, + { + "resolve_synonyms": False, + "on_commit": None, + "compress": False, + "tablespace": None, + }, + ), + ( + sa_schema.Index, + { + "bitmap": False, + "compress": False, + "vector": False, + }, + ), + (sa_selectable.Select, {"fetch_approximate": False}), + (sa_selectable.CompoundSelect, {"fetch_approximate": False}), + ] + + @util.deprecated_params( + use_binds_for_limits=( + "1.4", + "The ``use_binds_for_limits`` Oracle Database dialect parameter " + "is deprecated. The dialect now renders LIMIT / OFFSET integers " + "inline in all cases using a post-compilation hook, so that the " + "value is still represented by a 'bound parameter' on the Core " + "Expression side.", + ) + ) + def __init__( + self, + use_ansi=True, + optimize_limits=False, + use_binds_for_limits=None, + use_nchar_for_unicode=False, + exclude_tablespaces=("SYSTEM", "SYSAUX"), + enable_offset_fetch=True, + **kwargs, + ): + default.DefaultDialect.__init__(self, **kwargs) + self._use_nchar_for_unicode = use_nchar_for_unicode + self.use_ansi = use_ansi + self.optimize_limits = optimize_limits + self.exclude_tablespaces = exclude_tablespaces + self.enable_offset_fetch = self._supports_offset_fetch = ( + enable_offset_fetch + ) + + def initialize(self, connection): + super().initialize(connection) + + # Oracle 8i has RETURNING: + # https://docs.oracle.com/cd/A87860_01/doc/index.htm + + # so does Oracle8: + # https://docs.oracle.com/cd/A64702_01/doc/index.htm + + if self._is_oracle_8: + self.colspecs = self.colspecs.copy() + self.colspecs.pop(sqltypes.Interval) + self.use_ansi = False + + self.supports_identity_columns = self.server_version_info >= (12,) + self._supports_offset_fetch = ( + self.enable_offset_fetch and self.server_version_info >= (12,) + ) + + def _get_effective_compat_server_version_info(self, connection): + # dialect does not need compat levels below 12.2, so don't query + # in those cases + + if self.server_version_info < (12, 2): + return self.server_version_info + try: + compat = connection.exec_driver_sql( + "SELECT value FROM v$parameter WHERE name = 'compatible'" + ).scalar() + except exc.DBAPIError: + compat = None + + if compat: + try: + return tuple(int(x) for x in compat.split(".")) + except: + return self.server_version_info + else: + return self.server_version_info + + @property + def _is_oracle_8(self): + return self.server_version_info and self.server_version_info < (9,) + + @property + def _supports_table_compression(self): + return self.server_version_info and self.server_version_info >= (10, 1) + + @property + def _supports_table_compress_for(self): + return self.server_version_info and self.server_version_info >= (11,) + + @property + def _supports_char_length(self): + return not self._is_oracle_8 + + @property + def _supports_update_returning_computed_cols(self): + # on version 18 this error is no longet present while it happens on 11 + # it may work also on versions before the 18 + return self.server_version_info and self.server_version_info >= (18,) + + @property + def _supports_except_all(self): + return self.server_version_info and self.server_version_info >= (21,) + + def do_release_savepoint(self, connection, name): + # Oracle does not support RELEASE SAVEPOINT + pass + + def _check_max_identifier_length(self, connection): + if self._get_effective_compat_server_version_info(connection) < ( + 12, + 2, + ): + return 30 + else: + # use the default + return None + + def get_isolation_level_values(self, dbapi_connection): + return ["READ COMMITTED", "SERIALIZABLE"] + + def get_default_isolation_level(self, dbapi_conn): + try: + return self.get_isolation_level(dbapi_conn) + except NotImplementedError: + raise + except: + return "READ COMMITTED" + + def _execute_reflection( + self, connection, query, dblink, returns_long, params=None + ): + if dblink and not dblink.startswith("@"): + dblink = f"@{dblink}" + execution_options = { + # handle db links + "_oracle_dblink": dblink or "", + # override any schema translate map + "schema_translate_map": None, + } + + if dblink and returns_long: + # Oracle seems to error with + # "ORA-00997: illegal use of LONG datatype" when returning + # LONG columns via a dblink in a query with bind params + # This type seems to be very hard to cast into something else + # so it seems easier to just use bind param in this case + def visit_bindparam(bindparam): + bindparam.literal_execute = True + + query = visitors.cloned_traverse( + query, {}, {"bindparam": visit_bindparam} + ) + return connection.execute( + query, params, execution_options=execution_options + ) + + @util.memoized_property + def _has_table_query(self): + # materialized views are returned by all_tables + tables = ( + select( + dictionary.all_tables.c.table_name, + dictionary.all_tables.c.owner, + ) + .union_all( + select( + dictionary.all_views.c.view_name.label("table_name"), + dictionary.all_views.c.owner, + ) + ) + .subquery("tables_and_views") + ) + + query = select(tables.c.table_name).where( + tables.c.table_name == bindparam("table_name"), + tables.c.owner == bindparam("owner"), + ) + return query + + @reflection.cache + def has_table( + self, connection, table_name, schema=None, dblink=None, **kw + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + self._ensure_has_table_connection(connection) + + if not schema: + schema = self.default_schema_name + + params = { + "table_name": self.denormalize_name(table_name), + "owner": self.denormalize_schema_name(schema), + } + cursor = self._execute_reflection( + connection, + self._has_table_query, + dblink, + returns_long=False, + params=params, + ) + return bool(cursor.scalar()) + + @reflection.cache + def has_sequence( + self, connection, sequence_name, schema=None, dblink=None, **kw + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + if not schema: + schema = self.default_schema_name + + query = select(dictionary.all_sequences.c.sequence_name).where( + dictionary.all_sequences.c.sequence_name + == self.denormalize_schema_name(sequence_name), + dictionary.all_sequences.c.sequence_owner + == self.denormalize_schema_name(schema), + ) + + cursor = self._execute_reflection( + connection, query, dblink, returns_long=False + ) + return bool(cursor.scalar()) + + def _get_default_schema_name(self, connection): + return self.normalize_name( + connection.exec_driver_sql( + "select sys_context( 'userenv', 'current_schema' ) from dual" + ).scalar() + ) + + def denormalize_schema_name(self, name): + # look for quoted_name + force = getattr(name, "quote", None) + if force is None and name == "public": + # look for case insensitive, no quoting specified, "public" + return "PUBLIC" + return super().denormalize_name(name) + + @reflection.flexi_cache( + ("schema", InternalTraversal.dp_string), + ("filter_names", InternalTraversal.dp_string_list), + ("dblink", InternalTraversal.dp_string), + ) + def _get_synonyms(self, connection, schema, filter_names, dblink, **kw): + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + + has_filter_names, params = self._prepare_filter_names(filter_names) + query = select( + dictionary.all_synonyms.c.synonym_name, + dictionary.all_synonyms.c.table_name, + dictionary.all_synonyms.c.table_owner, + dictionary.all_synonyms.c.db_link, + ).where(dictionary.all_synonyms.c.owner == owner) + if has_filter_names: + query = query.where( + dictionary.all_synonyms.c.synonym_name.in_( + params["filter_names"] + ) + ) + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).mappings() + return result.all() + + @lru_cache() + def _all_objects_query( + self, owner, scope, kind, has_filter_names, has_mat_views + ): + query = ( + select(dictionary.all_objects.c.object_name) + .select_from(dictionary.all_objects) + .where(dictionary.all_objects.c.owner == owner) + ) + + # NOTE: materialized views are listed in all_objects twice; + # once as MATERIALIZE VIEW and once as TABLE + if kind is ObjectKind.ANY: + # materilaized view are listed also as tables so there is no + # need to add them to the in_. + query = query.where( + dictionary.all_objects.c.object_type.in_(("TABLE", "VIEW")) + ) + else: + object_type = [] + if ObjectKind.VIEW in kind: + object_type.append("VIEW") + if ( + ObjectKind.MATERIALIZED_VIEW in kind + and ObjectKind.TABLE not in kind + ): + # materilaized view are listed also as tables so there is no + # need to add them to the in_ if also selecting tables. + object_type.append("MATERIALIZED VIEW") + if ObjectKind.TABLE in kind: + object_type.append("TABLE") + if has_mat_views and ObjectKind.MATERIALIZED_VIEW not in kind: + # materialized view are listed also as tables, + # so they need to be filtered out + # EXCEPT ALL / MINUS profiles as faster than using + # NOT EXISTS or NOT IN with a subquery, but it's in + # general faster to get the mat view names and exclude + # them only when needed + query = query.where( + dictionary.all_objects.c.object_name.not_in( + bindparam("mat_views") + ) + ) + query = query.where( + dictionary.all_objects.c.object_type.in_(object_type) + ) + + # handles scope + if scope is ObjectScope.DEFAULT: + query = query.where(dictionary.all_objects.c.temporary == "N") + elif scope is ObjectScope.TEMPORARY: + query = query.where(dictionary.all_objects.c.temporary == "Y") + + if has_filter_names: + query = query.where( + dictionary.all_objects.c.object_name.in_( + bindparam("filter_names") + ) + ) + return query + + @reflection.flexi_cache( + ("schema", InternalTraversal.dp_string), + ("scope", InternalTraversal.dp_plain_obj), + ("kind", InternalTraversal.dp_plain_obj), + ("filter_names", InternalTraversal.dp_string_list), + ("dblink", InternalTraversal.dp_string), + ) + def _get_all_objects( + self, connection, schema, scope, kind, filter_names, dblink, **kw + ): + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + + has_filter_names, params = self._prepare_filter_names(filter_names) + has_mat_views = False + if ( + ObjectKind.TABLE in kind + and ObjectKind.MATERIALIZED_VIEW not in kind + ): + # see note in _all_objects_query + mat_views = self.get_materialized_view_names( + connection, schema, dblink, _normalize=False, **kw + ) + if mat_views: + params["mat_views"] = mat_views + has_mat_views = True + + query = self._all_objects_query( + owner, scope, kind, has_filter_names, has_mat_views + ) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False, params=params + ).scalars() + + return result.all() + + def _handle_synonyms_decorator(fn): + @wraps(fn) + def wrapper(self, *args, **kwargs): + return self._handle_synonyms(fn, *args, **kwargs) + + return wrapper + + def _handle_synonyms(self, fn, connection, *args, **kwargs): + if not kwargs.get("oracle_resolve_synonyms", False): + return fn(self, connection, *args, **kwargs) + + original_kw = kwargs.copy() + schema = kwargs.pop("schema", None) + result = self._get_synonyms( + connection, + schema=schema, + filter_names=kwargs.pop("filter_names", None), + dblink=kwargs.pop("dblink", None), + info_cache=kwargs.get("info_cache", None), + ) + + dblinks_owners = defaultdict(dict) + for row in result: + key = row["db_link"], row["table_owner"] + tn = self.normalize_name(row["table_name"]) + dblinks_owners[key][tn] = row["synonym_name"] + + if not dblinks_owners: + # No synonym, do the plain thing + return fn(self, connection, *args, **original_kw) + + data = {} + for (dblink, table_owner), mapping in dblinks_owners.items(): + call_kw = { + **original_kw, + "schema": table_owner, + "dblink": self.normalize_name(dblink), + "filter_names": mapping.keys(), + } + call_result = fn(self, connection, *args, **call_kw) + for (_, tn), value in call_result: + synonym_name = self.normalize_name(mapping[tn]) + data[(schema, synonym_name)] = value + return data.items() + + @reflection.cache + def get_schema_names(self, connection, dblink=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + query = select(dictionary.all_users.c.username).order_by( + dictionary.all_users.c.username + ) + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(row) for row in result] + + @reflection.cache + def get_table_names(self, connection, schema=None, dblink=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + # note that table_names() isn't loading DBLINKed or synonym'ed tables + if schema is None: + schema = self.default_schema_name + + den_schema = self.denormalize_schema_name(schema) + if kw.get("oracle_resolve_synonyms", False): + tables = ( + select( + dictionary.all_tables.c.table_name, + dictionary.all_tables.c.owner, + dictionary.all_tables.c.iot_name, + dictionary.all_tables.c.duration, + dictionary.all_tables.c.tablespace_name, + ) + .union_all( + select( + dictionary.all_synonyms.c.synonym_name.label( + "table_name" + ), + dictionary.all_synonyms.c.owner, + dictionary.all_tables.c.iot_name, + dictionary.all_tables.c.duration, + dictionary.all_tables.c.tablespace_name, + ) + .select_from(dictionary.all_tables) + .join( + dictionary.all_synonyms, + and_( + dictionary.all_tables.c.table_name + == dictionary.all_synonyms.c.table_name, + dictionary.all_tables.c.owner + == func.coalesce( + dictionary.all_synonyms.c.table_owner, + dictionary.all_synonyms.c.owner, + ), + ), + ) + ) + .subquery("available_tables") + ) + else: + tables = dictionary.all_tables + + query = select(tables.c.table_name) + if self.exclude_tablespaces: + query = query.where( + func.coalesce( + tables.c.tablespace_name, "no tablespace" + ).not_in(self.exclude_tablespaces) + ) + query = query.where( + tables.c.owner == den_schema, + tables.c.iot_name.is_(null()), + tables.c.duration.is_(null()), + ) + + # remove materialized views + mat_query = select( + dictionary.all_mviews.c.mview_name.label("table_name") + ).where(dictionary.all_mviews.c.owner == den_schema) + + query = ( + query.except_all(mat_query) + if self._supports_except_all + else query.except_(mat_query) + ) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(row) for row in result] + + @reflection.cache + def get_temp_table_names(self, connection, dblink=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + schema = self.denormalize_schema_name(self.default_schema_name) + + query = select(dictionary.all_tables.c.table_name) + if self.exclude_tablespaces: + query = query.where( + func.coalesce( + dictionary.all_tables.c.tablespace_name, "no tablespace" + ).not_in(self.exclude_tablespaces) + ) + query = query.where( + dictionary.all_tables.c.owner == schema, + dictionary.all_tables.c.iot_name.is_(null()), + dictionary.all_tables.c.duration.is_not(null()), + ) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(row) for row in result] + + @reflection.cache + def get_materialized_view_names( + self, connection, schema=None, dblink=None, _normalize=True, **kw + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + if not schema: + schema = self.default_schema_name + + query = select(dictionary.all_mviews.c.mview_name).where( + dictionary.all_mviews.c.owner + == self.denormalize_schema_name(schema) + ) + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + if _normalize: + return [self.normalize_name(row) for row in result] + else: + return result.all() + + @reflection.cache + def get_view_names(self, connection, schema=None, dblink=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + if not schema: + schema = self.default_schema_name + + query = select(dictionary.all_views.c.view_name).where( + dictionary.all_views.c.owner + == self.denormalize_schema_name(schema) + ) + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(row) for row in result] + + @reflection.cache + def get_sequence_names(self, connection, schema=None, dblink=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link.""" + if not schema: + schema = self.default_schema_name + query = select(dictionary.all_sequences.c.sequence_name).where( + dictionary.all_sequences.c.sequence_owner + == self.denormalize_schema_name(schema) + ) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(row) for row in result] + + def _value_or_raise(self, data, table, schema): + table = self.normalize_name(str(table)) + try: + return dict(data)[(schema, table)] + except KeyError: + raise exc.NoSuchTableError( + f"{schema}.{table}" if schema else table + ) from None + + def _prepare_filter_names(self, filter_names): + if filter_names: + fn = [self.denormalize_name(name) for name in filter_names] + return True, {"filter_names": fn} + else: + return False, {} + + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_table_options( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @lru_cache() + def _table_options_query( + self, owner, scope, kind, has_filter_names, has_mat_views + ): + query = select( + dictionary.all_tables.c.table_name, + ( + dictionary.all_tables.c.compression + if self._supports_table_compression + else sql.null().label("compression") + ), + ( + dictionary.all_tables.c.compress_for + if self._supports_table_compress_for + else sql.null().label("compress_for") + ), + dictionary.all_tables.c.tablespace_name, + ).where(dictionary.all_tables.c.owner == owner) + if has_filter_names: + query = query.where( + dictionary.all_tables.c.table_name.in_( + bindparam("filter_names") + ) + ) + if scope is ObjectScope.DEFAULT: + query = query.where(dictionary.all_tables.c.duration.is_(null())) + elif scope is ObjectScope.TEMPORARY: + query = query.where( + dictionary.all_tables.c.duration.is_not(null()) + ) + + if ( + has_mat_views + and ObjectKind.TABLE in kind + and ObjectKind.MATERIALIZED_VIEW not in kind + ): + # cant use EXCEPT ALL / MINUS here because we don't have an + # excludable row vs. the query above + # outerjoin + where null works better on oracle 21 but 11 does + # not like it at all. this is the next best thing + + query = query.where( + dictionary.all_tables.c.table_name.not_in( + bindparam("mat_views") + ) + ) + elif ( + ObjectKind.TABLE not in kind + and ObjectKind.MATERIALIZED_VIEW in kind + ): + query = query.where( + dictionary.all_tables.c.table_name.in_(bindparam("mat_views")) + ) + return query + + @_handle_synonyms_decorator + def get_multi_table_options( + self, + connection, + *, + schema, + filter_names, + scope, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + + has_filter_names, params = self._prepare_filter_names(filter_names) + has_mat_views = False + + if ( + ObjectKind.TABLE in kind + and ObjectKind.MATERIALIZED_VIEW not in kind + ): + # see note in _table_options_query + mat_views = self.get_materialized_view_names( + connection, schema, dblink, _normalize=False, **kw + ) + if mat_views: + params["mat_views"] = mat_views + has_mat_views = True + elif ( + ObjectKind.TABLE not in kind + and ObjectKind.MATERIALIZED_VIEW in kind + ): + mat_views = self.get_materialized_view_names( + connection, schema, dblink, _normalize=False, **kw + ) + params["mat_views"] = mat_views + + options = {} + default = ReflectionDefaults.table_options + + if ObjectKind.TABLE in kind or ObjectKind.MATERIALIZED_VIEW in kind: + query = self._table_options_query( + owner, scope, kind, has_filter_names, has_mat_views + ) + result = self._execute_reflection( + connection, query, dblink, returns_long=False, params=params + ) + + for table, compression, compress_for, tablespace in result: + data = default() + if compression == "ENABLED": + data["oracle_compress"] = compress_for + if tablespace: + data["oracle_tablespace"] = tablespace + options[(schema, self.normalize_name(table))] = data + if ObjectKind.VIEW in kind and ObjectScope.DEFAULT in scope: + # add the views (no temporary views) + for view in self.get_view_names(connection, schema, dblink, **kw): + if not filter_names or view in filter_names: + options[(schema, view)] = default() + + return options.items() + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + + data = self.get_multi_columns( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + def _run_batches( + self, connection, query, dblink, returns_long, mappings, all_objects + ): + each_batch = 500 + batches = list(all_objects) + while batches: + batch = batches[0:each_batch] + batches[0:each_batch] = [] + + result = self._execute_reflection( + connection, + query, + dblink, + returns_long=returns_long, + params={"all_objects": batch}, + ) + if mappings: + yield from result.mappings() + else: + yield from result + + @lru_cache() + def _column_query(self, owner): + all_cols = dictionary.all_tab_cols + all_comments = dictionary.all_col_comments + all_ids = dictionary.all_tab_identity_cols + + if self.server_version_info >= (12,): + add_cols = ( + all_cols.c.default_on_null, + sql.case( + (all_ids.c.table_name.is_(None), sql.null()), + else_=all_ids.c.generation_type + + "," + + all_ids.c.identity_options, + ).label("identity_options"), + ) + join_identity_cols = True + else: + add_cols = ( + sql.null().label("default_on_null"), + sql.null().label("identity_options"), + ) + join_identity_cols = False + + # NOTE: on oracle cannot create tables/views without columns and + # a table cannot have all column hidden: + # ORA-54039: table must have at least one column that is not invisible + # all_tab_cols returns data for tables/views/mat-views. + # all_tab_cols does not return recycled tables + + query = ( + select( + all_cols.c.table_name, + all_cols.c.column_name, + all_cols.c.data_type, + all_cols.c.char_length, + all_cols.c.data_precision, + all_cols.c.data_scale, + all_cols.c.nullable, + all_cols.c.data_default, + all_comments.c.comments, + all_cols.c.virtual_column, + *add_cols, + ).select_from(all_cols) + # NOTE: all_col_comments has a row for each column even if no + # comment is present, so a join could be performed, but there + # seems to be no difference compared to an outer join + .outerjoin( + all_comments, + and_( + all_cols.c.table_name == all_comments.c.table_name, + all_cols.c.column_name == all_comments.c.column_name, + all_cols.c.owner == all_comments.c.owner, + ), + ) + ) + if join_identity_cols: + query = query.outerjoin( + all_ids, + and_( + all_cols.c.table_name == all_ids.c.table_name, + all_cols.c.column_name == all_ids.c.column_name, + all_cols.c.owner == all_ids.c.owner, + ), + ) + + query = query.where( + all_cols.c.table_name.in_(bindparam("all_objects")), + all_cols.c.hidden_column == "NO", + all_cols.c.owner == owner, + ).order_by(all_cols.c.table_name, all_cols.c.column_id) + return query + + @_handle_synonyms_decorator + def get_multi_columns( + self, + connection, + *, + schema, + filter_names, + scope, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + query = self._column_query(owner) + + if ( + filter_names + and kind is ObjectKind.ANY + and scope is ObjectScope.ANY + ): + all_objects = [self.denormalize_name(n) for n in filter_names] + else: + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + columns = defaultdict(list) + + # all_tab_cols.data_default is LONG + result = self._run_batches( + connection, + query, + dblink, + returns_long=True, + mappings=True, + all_objects=all_objects, + ) + + def maybe_int(value): + if isinstance(value, float) and value.is_integer(): + return int(value) + else: + return value + + remove_size = re.compile(r"\(\d+\)") + + for row_dict in result: + table_name = self.normalize_name(row_dict["table_name"]) + orig_colname = row_dict["column_name"] + colname = self.normalize_name(orig_colname) + coltype = row_dict["data_type"] + precision = maybe_int(row_dict["data_precision"]) + + if coltype == "NUMBER": + scale = maybe_int(row_dict["data_scale"]) + if precision is None and scale == 0: + coltype = INTEGER() + else: + coltype = NUMBER(precision, scale) + elif coltype == "FLOAT": + # https://docs.oracle.com/cd/B14117_01/server.101/b10758/sqlqr06.htm + if precision == 126: + # The DOUBLE PRECISION datatype is a floating-point + # number with binary precision 126. + coltype = DOUBLE_PRECISION() + elif precision == 63: + # The REAL datatype is a floating-point number with a + # binary precision of 63, or 18 decimal. + coltype = REAL() + else: + # non standard precision + coltype = FLOAT(binary_precision=precision) + + elif coltype in ("VARCHAR2", "NVARCHAR2", "CHAR", "NCHAR"): + char_length = maybe_int(row_dict["char_length"]) + coltype = self.ischema_names.get(coltype)(char_length) + elif "WITH TIME ZONE" in coltype: + coltype = TIMESTAMP(timezone=True) + elif "WITH LOCAL TIME ZONE" in coltype: + coltype = TIMESTAMP(local_timezone=True) + else: + coltype = re.sub(remove_size, "", coltype) + try: + coltype = self.ischema_names[coltype] + except KeyError: + util.warn( + "Did not recognize type '%s' of column '%s'" + % (coltype, colname) + ) + coltype = sqltypes.NULLTYPE + + default = row_dict["data_default"] + if row_dict["virtual_column"] == "YES": + computed = dict(sqltext=default) + default = None + else: + computed = None + + identity_options = row_dict["identity_options"] + if identity_options is not None: + identity = self._parse_identity_options( + identity_options, row_dict["default_on_null"] + ) + default = None + else: + identity = None + + cdict = { + "name": colname, + "type": coltype, + "nullable": row_dict["nullable"] == "Y", + "default": default, + "comment": row_dict["comments"], + } + if orig_colname.lower() == orig_colname: + cdict["quote"] = True + if computed is not None: + cdict["computed"] = computed + if identity is not None: + cdict["identity"] = identity + + columns[(schema, table_name)].append(cdict) + + # NOTE: default not needed since all tables have columns + # default = ReflectionDefaults.columns + # return ( + # (key, value if value else default()) + # for key, value in columns.items() + # ) + return columns.items() + + def _parse_identity_options(self, identity_options, default_on_null): + # identity_options is a string that starts with 'ALWAYS,' or + # 'BY DEFAULT,' and continues with + # START WITH: 1, INCREMENT BY: 1, MAX_VALUE: 123, MIN_VALUE: 1, + # CYCLE_FLAG: N, CACHE_SIZE: 1, ORDER_FLAG: N, SCALE_FLAG: N, + # EXTEND_FLAG: N, SESSION_FLAG: N, KEEP_VALUE: N + parts = [p.strip() for p in identity_options.split(",")] + identity = { + "always": parts[0] == "ALWAYS", + "on_null": default_on_null == "YES", + } + + for part in parts[1:]: + option, value = part.split(":") + value = value.strip() + + if "START WITH" in option: + identity["start"] = int(value) + elif "INCREMENT BY" in option: + identity["increment"] = int(value) + elif "MAX_VALUE" in option: + identity["maxvalue"] = int(value) + elif "MIN_VALUE" in option: + identity["minvalue"] = int(value) + elif "CYCLE_FLAG" in option: + identity["cycle"] = value == "Y" + elif "CACHE_SIZE" in option: + identity["cache"] = int(value) + elif "ORDER_FLAG" in option: + identity["order"] = value == "Y" + return identity + + @reflection.cache + def get_table_comment(self, connection, table_name, schema=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_table_comment( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @lru_cache() + def _comment_query(self, owner, scope, kind, has_filter_names): + # NOTE: all_tab_comments / all_mview_comments have a row for all + # object even if they don't have comments + queries = [] + if ObjectKind.TABLE in kind or ObjectKind.VIEW in kind: + # all_tab_comments returns also plain views + tbl_view = select( + dictionary.all_tab_comments.c.table_name, + dictionary.all_tab_comments.c.comments, + ).where( + dictionary.all_tab_comments.c.owner == owner, + dictionary.all_tab_comments.c.table_name.not_like("BIN$%"), + ) + if ObjectKind.VIEW not in kind: + tbl_view = tbl_view.where( + dictionary.all_tab_comments.c.table_type == "TABLE" + ) + elif ObjectKind.TABLE not in kind: + tbl_view = tbl_view.where( + dictionary.all_tab_comments.c.table_type == "VIEW" + ) + queries.append(tbl_view) + if ObjectKind.MATERIALIZED_VIEW in kind: + mat_view = select( + dictionary.all_mview_comments.c.mview_name.label("table_name"), + dictionary.all_mview_comments.c.comments, + ).where( + dictionary.all_mview_comments.c.owner == owner, + dictionary.all_mview_comments.c.mview_name.not_like("BIN$%"), + ) + queries.append(mat_view) + if len(queries) == 1: + query = queries[0] + else: + union = sql.union_all(*queries).subquery("tables_and_views") + query = select(union.c.table_name, union.c.comments) + + name_col = query.selected_columns.table_name + + if scope in (ObjectScope.DEFAULT, ObjectScope.TEMPORARY): + temp = "Y" if scope is ObjectScope.TEMPORARY else "N" + # need distinct since materialized view are listed also + # as tables in all_objects + query = query.distinct().join( + dictionary.all_objects, + and_( + dictionary.all_objects.c.owner == owner, + dictionary.all_objects.c.object_name == name_col, + dictionary.all_objects.c.temporary == temp, + ), + ) + if has_filter_names: + query = query.where(name_col.in_(bindparam("filter_names"))) + return query + + @_handle_synonyms_decorator + def get_multi_table_comment( + self, + connection, + *, + schema, + filter_names, + scope, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + has_filter_names, params = self._prepare_filter_names(filter_names) + query = self._comment_query(owner, scope, kind, has_filter_names) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False, params=params + ) + default = ReflectionDefaults.table_comment + # materialized views by default seem to have a comment like + # "snapshot table for snapshot owner.mat_view_name" + ignore_mat_view = "snapshot table for snapshot " + return ( + ( + (schema, self.normalize_name(table)), + ( + {"text": comment} + if comment is not None + and not comment.startswith(ignore_mat_view) + else default() + ), + ) + for table, comment in result + ) + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_indexes( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @lru_cache() + def _index_query(self, owner): + return ( + select( + dictionary.all_ind_columns.c.table_name, + dictionary.all_ind_columns.c.index_name, + dictionary.all_ind_columns.c.column_name, + dictionary.all_indexes.c.index_type, + dictionary.all_indexes.c.uniqueness, + dictionary.all_indexes.c.compression, + dictionary.all_indexes.c.prefix_length, + dictionary.all_ind_columns.c.descend, + dictionary.all_ind_expressions.c.column_expression, + ) + .select_from(dictionary.all_ind_columns) + .join( + dictionary.all_indexes, + sql.and_( + dictionary.all_ind_columns.c.index_name + == dictionary.all_indexes.c.index_name, + dictionary.all_ind_columns.c.index_owner + == dictionary.all_indexes.c.owner, + ), + ) + .outerjoin( + # NOTE: this adds about 20% to the query time. Using a + # case expression with a scalar subquery only when needed + # with the assumption that most indexes are not expression + # would be faster but oracle does not like that with + # LONG datatype. It errors with: + # ORA-00997: illegal use of LONG datatype + dictionary.all_ind_expressions, + sql.and_( + dictionary.all_ind_expressions.c.index_name + == dictionary.all_ind_columns.c.index_name, + dictionary.all_ind_expressions.c.index_owner + == dictionary.all_ind_columns.c.index_owner, + dictionary.all_ind_expressions.c.column_position + == dictionary.all_ind_columns.c.column_position, + ), + ) + .where( + dictionary.all_indexes.c.table_owner == owner, + dictionary.all_indexes.c.table_name.in_( + bindparam("all_objects") + ), + ) + .order_by( + dictionary.all_ind_columns.c.index_name, + dictionary.all_ind_columns.c.column_position, + ) + ) + + @reflection.flexi_cache( + ("schema", InternalTraversal.dp_string), + ("dblink", InternalTraversal.dp_string), + ("all_objects", InternalTraversal.dp_string_list), + ) + def _get_indexes_rows(self, connection, schema, dblink, all_objects, **kw): + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + + query = self._index_query(owner) + + pks = { + row_dict["constraint_name"] + for row_dict in self._get_all_constraint_rows( + connection, schema, dblink, all_objects, **kw + ) + if row_dict["constraint_type"] == "P" + } + + # all_ind_expressions.column_expression is LONG + result = self._run_batches( + connection, + query, + dblink, + returns_long=True, + mappings=True, + all_objects=all_objects, + ) + + return [ + row_dict + for row_dict in result + if row_dict["index_name"] not in pks + ] + + @_handle_synonyms_decorator + def get_multi_indexes( + self, + connection, + *, + schema, + filter_names, + scope, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + uniqueness = {"NONUNIQUE": False, "UNIQUE": True} + enabled = {"DISABLED": False, "ENABLED": True} + is_bitmap = {"BITMAP", "FUNCTION-BASED BITMAP"} + + indexes = defaultdict(dict) + + for row_dict in self._get_indexes_rows( + connection, schema, dblink, all_objects, **kw + ): + index_name = self.normalize_name(row_dict["index_name"]) + table_name = self.normalize_name(row_dict["table_name"]) + table_indexes = indexes[(schema, table_name)] + + if index_name not in table_indexes: + table_indexes[index_name] = index_dict = { + "name": index_name, + "column_names": [], + "dialect_options": {}, + "unique": uniqueness.get(row_dict["uniqueness"], False), + } + do = index_dict["dialect_options"] + if row_dict["index_type"] in is_bitmap: + do["oracle_bitmap"] = True + if enabled.get(row_dict["compression"], False): + do["oracle_compress"] = row_dict["prefix_length"] + + else: + index_dict = table_indexes[index_name] + + expr = row_dict["column_expression"] + if expr is not None: + index_dict["column_names"].append(None) + if "expressions" in index_dict: + index_dict["expressions"].append(expr) + else: + index_dict["expressions"] = index_dict["column_names"][:-1] + index_dict["expressions"].append(expr) + + if row_dict["descend"].lower() != "asc": + assert row_dict["descend"].lower() == "desc" + cs = index_dict.setdefault("column_sorting", {}) + cs[expr] = ("desc",) + else: + assert row_dict["descend"].lower() == "asc" + cn = self.normalize_name(row_dict["column_name"]) + index_dict["column_names"].append(cn) + if "expressions" in index_dict: + index_dict["expressions"].append(cn) + + default = ReflectionDefaults.indexes + + return ( + (key, list(indexes[key].values()) if key in indexes else default()) + for key in ( + (schema, self.normalize_name(obj_name)) + for obj_name in all_objects + ) + ) + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_pk_constraint( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @lru_cache() + def _constraint_query(self, owner): + local = dictionary.all_cons_columns.alias("local") + remote = dictionary.all_cons_columns.alias("remote") + return ( + select( + dictionary.all_constraints.c.table_name, + dictionary.all_constraints.c.constraint_type, + dictionary.all_constraints.c.constraint_name, + local.c.column_name.label("local_column"), + remote.c.table_name.label("remote_table"), + remote.c.column_name.label("remote_column"), + remote.c.owner.label("remote_owner"), + dictionary.all_constraints.c.search_condition, + dictionary.all_constraints.c.delete_rule, + ) + .select_from(dictionary.all_constraints) + .join( + local, + and_( + local.c.owner == dictionary.all_constraints.c.owner, + dictionary.all_constraints.c.constraint_name + == local.c.constraint_name, + ), + ) + .outerjoin( + remote, + and_( + dictionary.all_constraints.c.r_owner == remote.c.owner, + dictionary.all_constraints.c.r_constraint_name + == remote.c.constraint_name, + or_( + remote.c.position.is_(sql.null()), + local.c.position == remote.c.position, + ), + ), + ) + .where( + dictionary.all_constraints.c.owner == owner, + dictionary.all_constraints.c.table_name.in_( + bindparam("all_objects") + ), + dictionary.all_constraints.c.constraint_type.in_( + ("R", "P", "U", "C") + ), + ) + .order_by( + dictionary.all_constraints.c.constraint_name, local.c.position + ) + ) + + @reflection.flexi_cache( + ("schema", InternalTraversal.dp_string), + ("dblink", InternalTraversal.dp_string), + ("all_objects", InternalTraversal.dp_string_list), + ) + def _get_all_constraint_rows( + self, connection, schema, dblink, all_objects, **kw + ): + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + query = self._constraint_query(owner) + + # since the result is cached a list must be created + values = list( + self._run_batches( + connection, + query, + dblink, + returns_long=False, + mappings=True, + all_objects=all_objects, + ) + ) + return values + + @_handle_synonyms_decorator + def get_multi_pk_constraint( + self, + connection, + *, + scope, + schema, + filter_names, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + primary_keys = defaultdict(dict) + default = ReflectionDefaults.pk_constraint + + for row_dict in self._get_all_constraint_rows( + connection, schema, dblink, all_objects, **kw + ): + if row_dict["constraint_type"] != "P": + continue + table_name = self.normalize_name(row_dict["table_name"]) + constraint_name = self.normalize_name(row_dict["constraint_name"]) + column_name = self.normalize_name(row_dict["local_column"]) + + table_pk = primary_keys[(schema, table_name)] + if not table_pk: + table_pk["name"] = constraint_name + table_pk["constrained_columns"] = [column_name] + else: + table_pk["constrained_columns"].append(column_name) + + return ( + (key, primary_keys[key] if key in primary_keys else default()) + for key in ( + (schema, self.normalize_name(obj_name)) + for obj_name in all_objects + ) + ) + + @reflection.cache + def get_foreign_keys( + self, + connection, + table_name, + schema=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_foreign_keys( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @_handle_synonyms_decorator + def get_multi_foreign_keys( + self, + connection, + *, + scope, + schema, + filter_names, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + resolve_synonyms = kw.get("oracle_resolve_synonyms", False) + + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + + all_remote_owners = set() + fkeys = defaultdict(dict) + + for row_dict in self._get_all_constraint_rows( + connection, schema, dblink, all_objects, **kw + ): + if row_dict["constraint_type"] != "R": + continue + + table_name = self.normalize_name(row_dict["table_name"]) + constraint_name = self.normalize_name(row_dict["constraint_name"]) + table_fkey = fkeys[(schema, table_name)] + + assert constraint_name is not None + + local_column = self.normalize_name(row_dict["local_column"]) + remote_table = self.normalize_name(row_dict["remote_table"]) + remote_column = self.normalize_name(row_dict["remote_column"]) + remote_owner_orig = row_dict["remote_owner"] + remote_owner = self.normalize_name(remote_owner_orig) + if remote_owner_orig is not None: + all_remote_owners.add(remote_owner_orig) + + if remote_table is None: + # ticket 363 + if dblink and not dblink.startswith("@"): + dblink = f"@{dblink}" + util.warn( + "Got 'None' querying 'table_name' from " + f"all_cons_columns{dblink or ''} - does the user have " + "proper rights to the table?" + ) + continue + + if constraint_name not in table_fkey: + table_fkey[constraint_name] = fkey = { + "name": constraint_name, + "constrained_columns": [], + "referred_schema": None, + "referred_table": remote_table, + "referred_columns": [], + "options": {}, + } + + if resolve_synonyms: + # will be removed below + fkey["_ref_schema"] = remote_owner + + if schema is not None or remote_owner_orig != owner: + fkey["referred_schema"] = remote_owner + + delete_rule = row_dict["delete_rule"] + if delete_rule != "NO ACTION": + fkey["options"]["ondelete"] = delete_rule + + else: + fkey = table_fkey[constraint_name] + + fkey["constrained_columns"].append(local_column) + fkey["referred_columns"].append(remote_column) + + if resolve_synonyms and all_remote_owners: + query = select( + dictionary.all_synonyms.c.owner, + dictionary.all_synonyms.c.table_name, + dictionary.all_synonyms.c.table_owner, + dictionary.all_synonyms.c.synonym_name, + ).where(dictionary.all_synonyms.c.owner.in_(all_remote_owners)) + + result = self._execute_reflection( + connection, query, dblink, returns_long=False + ).mappings() + + remote_owners_lut = {} + for row in result: + synonym_owner = self.normalize_name(row["owner"]) + table_name = self.normalize_name(row["table_name"]) + + remote_owners_lut[(synonym_owner, table_name)] = ( + row["table_owner"], + row["synonym_name"], + ) + + empty = (None, None) + for table_fkeys in fkeys.values(): + for table_fkey in table_fkeys.values(): + key = ( + table_fkey.pop("_ref_schema"), + table_fkey["referred_table"], + ) + remote_owner, syn_name = remote_owners_lut.get(key, empty) + if syn_name: + sn = self.normalize_name(syn_name) + table_fkey["referred_table"] = sn + if schema is not None or remote_owner != owner: + ro = self.normalize_name(remote_owner) + table_fkey["referred_schema"] = ro + else: + table_fkey["referred_schema"] = None + default = ReflectionDefaults.foreign_keys + + return ( + (key, list(fkeys[key].values()) if key in fkeys else default()) + for key in ( + (schema, self.normalize_name(obj_name)) + for obj_name in all_objects + ) + ) + + @reflection.cache + def get_unique_constraints( + self, connection, table_name, schema=None, **kw + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_unique_constraints( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @_handle_synonyms_decorator + def get_multi_unique_constraints( + self, + connection, + *, + scope, + schema, + filter_names, + kind, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + unique_cons = defaultdict(dict) + + index_names = { + row_dict["index_name"] + for row_dict in self._get_indexes_rows( + connection, schema, dblink, all_objects, **kw + ) + } + + for row_dict in self._get_all_constraint_rows( + connection, schema, dblink, all_objects, **kw + ): + if row_dict["constraint_type"] != "U": + continue + table_name = self.normalize_name(row_dict["table_name"]) + constraint_name_orig = row_dict["constraint_name"] + constraint_name = self.normalize_name(constraint_name_orig) + column_name = self.normalize_name(row_dict["local_column"]) + table_uc = unique_cons[(schema, table_name)] + + assert constraint_name is not None + + if constraint_name not in table_uc: + table_uc[constraint_name] = uc = { + "name": constraint_name, + "column_names": [], + "duplicates_index": ( + constraint_name + if constraint_name_orig in index_names + else None + ), + } + else: + uc = table_uc[constraint_name] + + uc["column_names"].append(column_name) + + default = ReflectionDefaults.unique_constraints + + return ( + ( + key, + ( + list(unique_cons[key].values()) + if key in unique_cons + else default() + ), + ) + for key in ( + (schema, self.normalize_name(obj_name)) + for obj_name in all_objects + ) + ) + + @reflection.cache + def get_view_definition( + self, + connection, + view_name, + schema=None, + dblink=None, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + if kw.get("oracle_resolve_synonyms", False): + synonyms = self._get_synonyms( + connection, schema, filter_names=[view_name], dblink=dblink + ) + if synonyms: + assert len(synonyms) == 1 + row_dict = synonyms[0] + dblink = self.normalize_name(row_dict["db_link"]) + schema = row_dict["table_owner"] + view_name = row_dict["table_name"] + + name = self.denormalize_name(view_name) + owner = self.denormalize_schema_name( + schema or self.default_schema_name + ) + query = ( + select(dictionary.all_views.c.text) + .where( + dictionary.all_views.c.view_name == name, + dictionary.all_views.c.owner == owner, + ) + .union_all( + select(dictionary.all_mviews.c.query).where( + dictionary.all_mviews.c.mview_name == name, + dictionary.all_mviews.c.owner == owner, + ) + ) + ) + + rp = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalar() + if rp is None: + raise exc.NoSuchTableError( + f"{schema}.{view_name}" if schema else view_name + ) + else: + return rp + + @reflection.cache + def get_check_constraints( + self, connection, table_name, schema=None, include_all=False, **kw + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + data = self.get_multi_check_constraints( + connection, + schema=schema, + filter_names=[table_name], + scope=ObjectScope.ANY, + include_all=include_all, + kind=ObjectKind.ANY, + **kw, + ) + return self._value_or_raise(data, table_name, schema) + + @_handle_synonyms_decorator + def get_multi_check_constraints( + self, + connection, + *, + schema, + filter_names, + dblink=None, + scope, + kind, + include_all=False, + **kw, + ): + """Supported kw arguments are: ``dblink`` to reflect via a db link; + ``oracle_resolve_synonyms`` to resolve names to synonyms + """ + all_objects = self._get_all_objects( + connection, schema, scope, kind, filter_names, dblink, **kw + ) + + not_null = re.compile(r"..+?. IS NOT NULL$") + + check_constraints = defaultdict(list) + + for row_dict in self._get_all_constraint_rows( + connection, schema, dblink, all_objects, **kw + ): + if row_dict["constraint_type"] != "C": + continue + table_name = self.normalize_name(row_dict["table_name"]) + constraint_name = self.normalize_name(row_dict["constraint_name"]) + search_condition = row_dict["search_condition"] + + table_checks = check_constraints[(schema, table_name)] + if constraint_name is not None and ( + include_all or not not_null.match(search_condition) + ): + table_checks.append( + {"name": constraint_name, "sqltext": search_condition} + ) + + default = ReflectionDefaults.check_constraints + + return ( + ( + key, + ( + check_constraints[key] + if key in check_constraints + else default() + ), + ) + for key in ( + (schema, self.normalize_name(obj_name)) + for obj_name in all_objects + ) + ) + + def _list_dblinks(self, connection, dblink=None): + query = select(dictionary.all_db_links.c.db_link) + links = self._execute_reflection( + connection, query, dblink, returns_long=False + ).scalars() + return [self.normalize_name(link) for link in links] + + +class _OuterJoinColumn(sql.ClauseElement): + __visit_name__ = "outer_join_column" + + def __init__(self, column): + self.column = column diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/dictionary.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/dictionary.py new file mode 100644 index 0000000000000000000000000000000000000000..f785a66ef71e25aef7227cf755a2389d0ab3bf59 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/dictionary.py @@ -0,0 +1,507 @@ +# dialects/oracle/dictionary.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +from .types import DATE +from .types import LONG +from .types import NUMBER +from .types import RAW +from .types import VARCHAR2 +from ... import Column +from ... import MetaData +from ... import Table +from ... import table +from ...sql.sqltypes import CHAR + +# constants +DB_LINK_PLACEHOLDER = "__$sa_dblink$__" +# tables +dual = table("dual") +dictionary_meta = MetaData() + +# NOTE: all the dictionary_meta are aliases because oracle does not like +# using the full table@dblink for every column in query, and complains with +# ORA-00960: ambiguous column naming in select list +all_tables = Table( + "all_tables" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("tablespace_name", VARCHAR2(30)), + Column("cluster_name", VARCHAR2(128)), + Column("iot_name", VARCHAR2(128)), + Column("status", VARCHAR2(8)), + Column("pct_free", NUMBER), + Column("pct_used", NUMBER), + Column("ini_trans", NUMBER), + Column("max_trans", NUMBER), + Column("initial_extent", NUMBER), + Column("next_extent", NUMBER), + Column("min_extents", NUMBER), + Column("max_extents", NUMBER), + Column("pct_increase", NUMBER), + Column("freelists", NUMBER), + Column("freelist_groups", NUMBER), + Column("logging", VARCHAR2(3)), + Column("backed_up", VARCHAR2(1)), + Column("num_rows", NUMBER), + Column("blocks", NUMBER), + Column("empty_blocks", NUMBER), + Column("avg_space", NUMBER), + Column("chain_cnt", NUMBER), + Column("avg_row_len", NUMBER), + Column("avg_space_freelist_blocks", NUMBER), + Column("num_freelist_blocks", NUMBER), + Column("degree", VARCHAR2(10)), + Column("instances", VARCHAR2(10)), + Column("cache", VARCHAR2(5)), + Column("table_lock", VARCHAR2(8)), + Column("sample_size", NUMBER), + Column("last_analyzed", DATE), + Column("partitioned", VARCHAR2(3)), + Column("iot_type", VARCHAR2(12)), + Column("temporary", VARCHAR2(1)), + Column("secondary", VARCHAR2(1)), + Column("nested", VARCHAR2(3)), + Column("buffer_pool", VARCHAR2(7)), + Column("flash_cache", VARCHAR2(7)), + Column("cell_flash_cache", VARCHAR2(7)), + Column("row_movement", VARCHAR2(8)), + Column("global_stats", VARCHAR2(3)), + Column("user_stats", VARCHAR2(3)), + Column("duration", VARCHAR2(15)), + Column("skip_corrupt", VARCHAR2(8)), + Column("monitoring", VARCHAR2(3)), + Column("cluster_owner", VARCHAR2(128)), + Column("dependencies", VARCHAR2(8)), + Column("compression", VARCHAR2(8)), + Column("compress_for", VARCHAR2(30)), + Column("dropped", VARCHAR2(3)), + Column("read_only", VARCHAR2(3)), + Column("segment_created", VARCHAR2(3)), + Column("result_cache", VARCHAR2(7)), + Column("clustering", VARCHAR2(3)), + Column("activity_tracking", VARCHAR2(23)), + Column("dml_timestamp", VARCHAR2(25)), + Column("has_identity", VARCHAR2(3)), + Column("container_data", VARCHAR2(3)), + Column("inmemory", VARCHAR2(8)), + Column("inmemory_priority", VARCHAR2(8)), + Column("inmemory_distribute", VARCHAR2(15)), + Column("inmemory_compression", VARCHAR2(17)), + Column("inmemory_duplicate", VARCHAR2(13)), + Column("default_collation", VARCHAR2(100)), + Column("duplicated", VARCHAR2(1)), + Column("sharded", VARCHAR2(1)), + Column("externally_sharded", VARCHAR2(1)), + Column("externally_duplicated", VARCHAR2(1)), + Column("external", VARCHAR2(3)), + Column("hybrid", VARCHAR2(3)), + Column("cellmemory", VARCHAR2(24)), + Column("containers_default", VARCHAR2(3)), + Column("container_map", VARCHAR2(3)), + Column("extended_data_link", VARCHAR2(3)), + Column("extended_data_link_map", VARCHAR2(3)), + Column("inmemory_service", VARCHAR2(12)), + Column("inmemory_service_name", VARCHAR2(1000)), + Column("container_map_object", VARCHAR2(3)), + Column("memoptimize_read", VARCHAR2(8)), + Column("memoptimize_write", VARCHAR2(8)), + Column("has_sensitive_column", VARCHAR2(3)), + Column("admit_null", VARCHAR2(3)), + Column("data_link_dml_enabled", VARCHAR2(3)), + Column("logical_replication", VARCHAR2(8)), +).alias("a_tables") + +all_views = Table( + "all_views" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("view_name", VARCHAR2(128), nullable=False), + Column("text_length", NUMBER), + Column("text", LONG), + Column("text_vc", VARCHAR2(4000)), + Column("type_text_length", NUMBER), + Column("type_text", VARCHAR2(4000)), + Column("oid_text_length", NUMBER), + Column("oid_text", VARCHAR2(4000)), + Column("view_type_owner", VARCHAR2(128)), + Column("view_type", VARCHAR2(128)), + Column("superview_name", VARCHAR2(128)), + Column("editioning_view", VARCHAR2(1)), + Column("read_only", VARCHAR2(1)), + Column("container_data", VARCHAR2(1)), + Column("bequeath", VARCHAR2(12)), + Column("origin_con_id", VARCHAR2(256)), + Column("default_collation", VARCHAR2(100)), + Column("containers_default", VARCHAR2(3)), + Column("container_map", VARCHAR2(3)), + Column("extended_data_link", VARCHAR2(3)), + Column("extended_data_link_map", VARCHAR2(3)), + Column("has_sensitive_column", VARCHAR2(3)), + Column("admit_null", VARCHAR2(3)), + Column("pdb_local_only", VARCHAR2(3)), +).alias("a_views") + +all_sequences = Table( + "all_sequences" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("sequence_owner", VARCHAR2(128), nullable=False), + Column("sequence_name", VARCHAR2(128), nullable=False), + Column("min_value", NUMBER), + Column("max_value", NUMBER), + Column("increment_by", NUMBER, nullable=False), + Column("cycle_flag", VARCHAR2(1)), + Column("order_flag", VARCHAR2(1)), + Column("cache_size", NUMBER, nullable=False), + Column("last_number", NUMBER, nullable=False), + Column("scale_flag", VARCHAR2(1)), + Column("extend_flag", VARCHAR2(1)), + Column("sharded_flag", VARCHAR2(1)), + Column("session_flag", VARCHAR2(1)), + Column("keep_value", VARCHAR2(1)), +).alias("a_sequences") + +all_users = Table( + "all_users" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("username", VARCHAR2(128), nullable=False), + Column("user_id", NUMBER, nullable=False), + Column("created", DATE, nullable=False), + Column("common", VARCHAR2(3)), + Column("oracle_maintained", VARCHAR2(1)), + Column("inherited", VARCHAR2(3)), + Column("default_collation", VARCHAR2(100)), + Column("implicit", VARCHAR2(3)), + Column("all_shard", VARCHAR2(3)), + Column("external_shard", VARCHAR2(3)), +).alias("a_users") + +all_mviews = Table( + "all_mviews" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("mview_name", VARCHAR2(128), nullable=False), + Column("container_name", VARCHAR2(128), nullable=False), + Column("query", LONG), + Column("query_len", NUMBER(38)), + Column("updatable", VARCHAR2(1)), + Column("update_log", VARCHAR2(128)), + Column("master_rollback_seg", VARCHAR2(128)), + Column("master_link", VARCHAR2(128)), + Column("rewrite_enabled", VARCHAR2(1)), + Column("rewrite_capability", VARCHAR2(9)), + Column("refresh_mode", VARCHAR2(6)), + Column("refresh_method", VARCHAR2(8)), + Column("build_mode", VARCHAR2(9)), + Column("fast_refreshable", VARCHAR2(18)), + Column("last_refresh_type", VARCHAR2(8)), + Column("last_refresh_date", DATE), + Column("last_refresh_end_time", DATE), + Column("staleness", VARCHAR2(19)), + Column("after_fast_refresh", VARCHAR2(19)), + Column("unknown_prebuilt", VARCHAR2(1)), + Column("unknown_plsql_func", VARCHAR2(1)), + Column("unknown_external_table", VARCHAR2(1)), + Column("unknown_consider_fresh", VARCHAR2(1)), + Column("unknown_import", VARCHAR2(1)), + Column("unknown_trusted_fd", VARCHAR2(1)), + Column("compile_state", VARCHAR2(19)), + Column("use_no_index", VARCHAR2(1)), + Column("stale_since", DATE), + Column("num_pct_tables", NUMBER), + Column("num_fresh_pct_regions", NUMBER), + Column("num_stale_pct_regions", NUMBER), + Column("segment_created", VARCHAR2(3)), + Column("evaluation_edition", VARCHAR2(128)), + Column("unusable_before", VARCHAR2(128)), + Column("unusable_beginning", VARCHAR2(128)), + Column("default_collation", VARCHAR2(100)), + Column("on_query_computation", VARCHAR2(1)), + Column("auto", VARCHAR2(3)), +).alias("a_mviews") + +all_tab_identity_cols = Table( + "all_tab_identity_cols" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_name", VARCHAR2(128), nullable=False), + Column("generation_type", VARCHAR2(10)), + Column("sequence_name", VARCHAR2(128), nullable=False), + Column("identity_options", VARCHAR2(298)), +).alias("a_tab_identity_cols") + +all_tab_cols = Table( + "all_tab_cols" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_name", VARCHAR2(128), nullable=False), + Column("data_type", VARCHAR2(128)), + Column("data_type_mod", VARCHAR2(3)), + Column("data_type_owner", VARCHAR2(128)), + Column("data_length", NUMBER, nullable=False), + Column("data_precision", NUMBER), + Column("data_scale", NUMBER), + Column("nullable", VARCHAR2(1)), + Column("column_id", NUMBER), + Column("default_length", NUMBER), + Column("data_default", LONG), + Column("num_distinct", NUMBER), + Column("low_value", RAW(1000)), + Column("high_value", RAW(1000)), + Column("density", NUMBER), + Column("num_nulls", NUMBER), + Column("num_buckets", NUMBER), + Column("last_analyzed", DATE), + Column("sample_size", NUMBER), + Column("character_set_name", VARCHAR2(44)), + Column("char_col_decl_length", NUMBER), + Column("global_stats", VARCHAR2(3)), + Column("user_stats", VARCHAR2(3)), + Column("avg_col_len", NUMBER), + Column("char_length", NUMBER), + Column("char_used", VARCHAR2(1)), + Column("v80_fmt_image", VARCHAR2(3)), + Column("data_upgraded", VARCHAR2(3)), + Column("hidden_column", VARCHAR2(3)), + Column("virtual_column", VARCHAR2(3)), + Column("segment_column_id", NUMBER), + Column("internal_column_id", NUMBER, nullable=False), + Column("histogram", VARCHAR2(15)), + Column("qualified_col_name", VARCHAR2(4000)), + Column("user_generated", VARCHAR2(3)), + Column("default_on_null", VARCHAR2(3)), + Column("identity_column", VARCHAR2(3)), + Column("evaluation_edition", VARCHAR2(128)), + Column("unusable_before", VARCHAR2(128)), + Column("unusable_beginning", VARCHAR2(128)), + Column("collation", VARCHAR2(100)), + Column("collated_column_id", NUMBER), +).alias("a_tab_cols") + +all_tab_comments = Table( + "all_tab_comments" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("table_type", VARCHAR2(11)), + Column("comments", VARCHAR2(4000)), + Column("origin_con_id", NUMBER), +).alias("a_tab_comments") + +all_col_comments = Table( + "all_col_comments" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_name", VARCHAR2(128), nullable=False), + Column("comments", VARCHAR2(4000)), + Column("origin_con_id", NUMBER), +).alias("a_col_comments") + +all_mview_comments = Table( + "all_mview_comments" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("mview_name", VARCHAR2(128), nullable=False), + Column("comments", VARCHAR2(4000)), +).alias("a_mview_comments") + +all_ind_columns = Table( + "all_ind_columns" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("index_owner", VARCHAR2(128), nullable=False), + Column("index_name", VARCHAR2(128), nullable=False), + Column("table_owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_name", VARCHAR2(4000)), + Column("column_position", NUMBER, nullable=False), + Column("column_length", NUMBER, nullable=False), + Column("char_length", NUMBER), + Column("descend", VARCHAR2(4)), + Column("collated_column_id", NUMBER), +).alias("a_ind_columns") + +all_indexes = Table( + "all_indexes" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("index_name", VARCHAR2(128), nullable=False), + Column("index_type", VARCHAR2(27)), + Column("table_owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("table_type", CHAR(11)), + Column("uniqueness", VARCHAR2(9)), + Column("compression", VARCHAR2(13)), + Column("prefix_length", NUMBER), + Column("tablespace_name", VARCHAR2(30)), + Column("ini_trans", NUMBER), + Column("max_trans", NUMBER), + Column("initial_extent", NUMBER), + Column("next_extent", NUMBER), + Column("min_extents", NUMBER), + Column("max_extents", NUMBER), + Column("pct_increase", NUMBER), + Column("pct_threshold", NUMBER), + Column("include_column", NUMBER), + Column("freelists", NUMBER), + Column("freelist_groups", NUMBER), + Column("pct_free", NUMBER), + Column("logging", VARCHAR2(3)), + Column("blevel", NUMBER), + Column("leaf_blocks", NUMBER), + Column("distinct_keys", NUMBER), + Column("avg_leaf_blocks_per_key", NUMBER), + Column("avg_data_blocks_per_key", NUMBER), + Column("clustering_factor", NUMBER), + Column("status", VARCHAR2(8)), + Column("num_rows", NUMBER), + Column("sample_size", NUMBER), + Column("last_analyzed", DATE), + Column("degree", VARCHAR2(40)), + Column("instances", VARCHAR2(40)), + Column("partitioned", VARCHAR2(3)), + Column("temporary", VARCHAR2(1)), + Column("generated", VARCHAR2(1)), + Column("secondary", VARCHAR2(1)), + Column("buffer_pool", VARCHAR2(7)), + Column("flash_cache", VARCHAR2(7)), + Column("cell_flash_cache", VARCHAR2(7)), + Column("user_stats", VARCHAR2(3)), + Column("duration", VARCHAR2(15)), + Column("pct_direct_access", NUMBER), + Column("ityp_owner", VARCHAR2(128)), + Column("ityp_name", VARCHAR2(128)), + Column("parameters", VARCHAR2(1000)), + Column("global_stats", VARCHAR2(3)), + Column("domidx_status", VARCHAR2(12)), + Column("domidx_opstatus", VARCHAR2(6)), + Column("funcidx_status", VARCHAR2(8)), + Column("join_index", VARCHAR2(3)), + Column("iot_redundant_pkey_elim", VARCHAR2(3)), + Column("dropped", VARCHAR2(3)), + Column("visibility", VARCHAR2(9)), + Column("domidx_management", VARCHAR2(14)), + Column("segment_created", VARCHAR2(3)), + Column("orphaned_entries", VARCHAR2(3)), + Column("indexing", VARCHAR2(7)), + Column("auto", VARCHAR2(3)), +).alias("a_indexes") + +all_ind_expressions = Table( + "all_ind_expressions" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("index_owner", VARCHAR2(128), nullable=False), + Column("index_name", VARCHAR2(128), nullable=False), + Column("table_owner", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_expression", LONG), + Column("column_position", NUMBER, nullable=False), +).alias("a_ind_expressions") + +all_constraints = Table( + "all_constraints" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128)), + Column("constraint_name", VARCHAR2(128)), + Column("constraint_type", VARCHAR2(1)), + Column("table_name", VARCHAR2(128)), + Column("search_condition", LONG), + Column("search_condition_vc", VARCHAR2(4000)), + Column("r_owner", VARCHAR2(128)), + Column("r_constraint_name", VARCHAR2(128)), + Column("delete_rule", VARCHAR2(9)), + Column("status", VARCHAR2(8)), + Column("deferrable", VARCHAR2(14)), + Column("deferred", VARCHAR2(9)), + Column("validated", VARCHAR2(13)), + Column("generated", VARCHAR2(14)), + Column("bad", VARCHAR2(3)), + Column("rely", VARCHAR2(4)), + Column("last_change", DATE), + Column("index_owner", VARCHAR2(128)), + Column("index_name", VARCHAR2(128)), + Column("invalid", VARCHAR2(7)), + Column("view_related", VARCHAR2(14)), + Column("origin_con_id", VARCHAR2(256)), +).alias("a_constraints") + +all_cons_columns = Table( + "all_cons_columns" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("constraint_name", VARCHAR2(128), nullable=False), + Column("table_name", VARCHAR2(128), nullable=False), + Column("column_name", VARCHAR2(4000)), + Column("position", NUMBER), +).alias("a_cons_columns") + +# TODO figure out if it's still relevant, since there is no mention from here +# https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/ALL_DB_LINKS.html +# original note: +# using user_db_links here since all_db_links appears +# to have more restricted permissions. +# https://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm +# will need to hear from more users if we are doing +# the right thing here. See [ticket:2619] +all_db_links = Table( + "all_db_links" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("db_link", VARCHAR2(128), nullable=False), + Column("username", VARCHAR2(128)), + Column("host", VARCHAR2(2000)), + Column("created", DATE, nullable=False), + Column("hidden", VARCHAR2(3)), + Column("shard_internal", VARCHAR2(3)), + Column("valid", VARCHAR2(3)), + Column("intra_cdb", VARCHAR2(3)), +).alias("a_db_links") + +all_synonyms = Table( + "all_synonyms" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128)), + Column("synonym_name", VARCHAR2(128)), + Column("table_owner", VARCHAR2(128)), + Column("table_name", VARCHAR2(128)), + Column("db_link", VARCHAR2(128)), + Column("origin_con_id", VARCHAR2(256)), +).alias("a_synonyms") + +all_objects = Table( + "all_objects" + DB_LINK_PLACEHOLDER, + dictionary_meta, + Column("owner", VARCHAR2(128), nullable=False), + Column("object_name", VARCHAR2(128), nullable=False), + Column("subobject_name", VARCHAR2(128)), + Column("object_id", NUMBER, nullable=False), + Column("data_object_id", NUMBER), + Column("object_type", VARCHAR2(23)), + Column("created", DATE, nullable=False), + Column("last_ddl_time", DATE, nullable=False), + Column("timestamp", VARCHAR2(19)), + Column("status", VARCHAR2(7)), + Column("temporary", VARCHAR2(1)), + Column("generated", VARCHAR2(1)), + Column("secondary", VARCHAR2(1)), + Column("namespace", NUMBER, nullable=False), + Column("edition_name", VARCHAR2(128)), + Column("sharing", VARCHAR2(13)), + Column("editionable", VARCHAR2(1)), + Column("oracle_maintained", VARCHAR2(1)), + Column("application", VARCHAR2(1)), + Column("default_collation", VARCHAR2(100)), + Column("duplicated", VARCHAR2(1)), + Column("sharded", VARCHAR2(1)), + Column("created_appid", NUMBER), + Column("created_vsnid", NUMBER), + Column("modified_appid", NUMBER), + Column("modified_vsnid", NUMBER), +).alias("a_objects") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/oracledb.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/oracledb.py new file mode 100644 index 0000000000000000000000000000000000000000..c09d2bae0df6a0590770dee3103a75370e39aec7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/oracledb.py @@ -0,0 +1,947 @@ +# dialects/oracle/oracledb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +r""".. dialect:: oracle+oracledb + :name: python-oracledb + :dbapi: oracledb + :connectstring: oracle+oracledb://user:pass@hostname:port[/dbname][?service_name=[&key=value&key=value...]] + :url: https://oracle.github.io/python-oracledb/ + +Description +----------- + +Python-oracledb is the Oracle Database driver for Python. It features a default +"thin" client mode that requires no dependencies, and an optional "thick" mode +that uses Oracle Client libraries. It supports SQLAlchemy features including +two phase transactions and Asyncio. + +Python-oracle is the renamed, updated cx_Oracle driver. Oracle is no longer +doing any releases in the cx_Oracle namespace. + +The SQLAlchemy ``oracledb`` dialect provides both a sync and an async +implementation under the same dialect name. The proper version is +selected depending on how the engine is created: + +* calling :func:`_sa.create_engine` with ``oracle+oracledb://...`` will + automatically select the sync version:: + + from sqlalchemy import create_engine + + sync_engine = create_engine( + "oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1" + ) + +* calling :func:`_asyncio.create_async_engine` with ``oracle+oracledb://...`` + will automatically select the async version:: + + from sqlalchemy.ext.asyncio import create_async_engine + + asyncio_engine = create_async_engine( + "oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1" + ) + + The asyncio version of the dialect may also be specified explicitly using the + ``oracledb_async`` suffix:: + + from sqlalchemy.ext.asyncio import create_async_engine + + asyncio_engine = create_async_engine( + "oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1" + ) + +.. versionadded:: 2.0.25 added support for the async version of oracledb. + +Thick mode support +------------------ + +By default, the python-oracledb driver runs in a "thin" mode that does not +require Oracle Client libraries to be installed. The driver also supports a +"thick" mode that uses Oracle Client libraries to get functionality such as +Oracle Application Continuity. + +To enable thick mode, call `oracledb.init_oracle_client() +`_ +explicitly, or pass the parameter ``thick_mode=True`` to +:func:`_sa.create_engine`. To pass custom arguments to +``init_oracle_client()``, like the ``lib_dir`` path, a dict may be passed, for +example:: + + engine = sa.create_engine( + "oracle+oracledb://...", + thick_mode={ + "lib_dir": "/path/to/oracle/client/lib", + "config_dir": "/path/to/network_config_file_directory", + "driver_name": "my-app : 1.0.0", + }, + ) + +Note that passing a ``lib_dir`` path should only be done on macOS or +Windows. On Linux it does not behave as you might expect. + +.. seealso:: + + python-oracledb documentation `Enabling python-oracledb Thick mode + `_ + +Connecting to Oracle Database +----------------------------- + +python-oracledb provides several methods of indicating the target database. +The dialect translates from a series of different URL forms. + +Given the hostname, port and service name of the target database, you can +connect in SQLAlchemy using the ``service_name`` query string parameter:: + + engine = create_engine( + "oracle+oracledb://scott:tiger@hostname:port?service_name=myservice" + ) + +Connecting with Easy Connect strings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can pass any valid python-oracledb connection string as the ``dsn`` key +value in a :paramref:`_sa.create_engine.connect_args` dictionary. See +python-oracledb documentation `Oracle Net Services Connection Strings +`_. + +For example to use an `Easy Connect string +`_ +with a timeout to prevent connection establishment from hanging if the network +transport to the database cannot be establishd in 30 seconds, and also setting +a keep-alive time of 60 seconds to stop idle network connections from being +terminated by a firewall:: + + e = create_engine( + "oracle+oracledb://@", + connect_args={ + "user": "scott", + "password": "tiger", + "dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60", + }, + ) + +The Easy Connect syntax has been enhanced during the life of Oracle Database. +Review the documentation for your database version. The current documentation +is at `Understanding the Easy Connect Naming Method +`_. + +The general syntax is similar to: + +.. sourcecode:: text + + [[protocol:]//]host[:port][/[service_name]][?parameter_name=value{¶meter_name=value}] + +Note that although the SQLAlchemy URL syntax ``hostname:port/dbname`` looks +like Oracle's Easy Connect syntax, it is different. SQLAlchemy's URL requires a +system identifier (SID) for the ``dbname`` component:: + + engine = create_engine("oracle+oracledb://scott:tiger@hostname:port/sid") + +Easy Connect syntax does not support SIDs. It uses services names, which are +the preferred choice for connecting to Oracle Database. + +Passing python-oracledb connect arguments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Other python-oracledb driver `connection options +`_ +can be passed in ``connect_args``. For example:: + + e = create_engine( + "oracle+oracledb://@", + connect_args={ + "user": "scott", + "password": "tiger", + "dsn": "hostname:port/myservice", + "events": True, + "mode": oracledb.AUTH_MODE_SYSDBA, + }, + ) + +Connecting with tnsnames.ora TNS aliases +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If no port, database name, or service name is provided, the dialect will use an +Oracle Database DSN "connection string". This takes the "hostname" portion of +the URL as the data source name. For example, if the ``tnsnames.ora`` file +contains a `TNS Alias +`_ +of ``myalias`` as below: + +.. sourcecode:: text + + myalias = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = TCP)(HOST = mymachine.example.com)(PORT = 1521)) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = orclpdb1) + ) + ) + +The python-oracledb dialect connects to this database service when ``myalias`` is the +hostname portion of the URL, without specifying a port, database name or +``service_name``:: + + engine = create_engine("oracle+oracledb://scott:tiger@myalias") + +Connecting to Oracle Autonomous Database +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Users of Oracle Autonomous Database should use either use the TNS Alias URL +shown above, or pass the TNS Alias as the ``dsn`` key value in a +:paramref:`_sa.create_engine.connect_args` dictionary. + +If Oracle Autonomous Database is configured for mutual TLS ("mTLS") +connections, then additional configuration is required as shown in `Connecting +to Oracle Cloud Autonomous Databases +`_. In +summary, Thick mode users should configure file locations and set the wallet +path in ``sqlnet.ora`` appropriately:: + + e = create_engine( + "oracle+oracledb://@", + thick_mode={ + # directory containing tnsnames.ora and cwallet.so + "config_dir": "/opt/oracle/wallet_dir", + }, + connect_args={ + "user": "scott", + "password": "tiger", + "dsn": "mydb_high", + }, + ) + +Thin mode users of mTLS should pass the appropriate directories and PEM wallet +password when creating the engine, similar to:: + + e = create_engine( + "oracle+oracledb://@", + connect_args={ + "user": "scott", + "password": "tiger", + "dsn": "mydb_high", + "config_dir": "/opt/oracle/wallet_dir", # directory containing tnsnames.ora + "wallet_location": "/opt/oracle/wallet_dir", # directory containing ewallet.pem + "wallet_password": "top secret", # password for the PEM file + }, + ) + +Typically ``config_dir`` and ``wallet_location`` are the same directory, which +is where the Oracle Autonomous Database wallet zip file was extracted. Note +this directory should be protected. + +Connection Pooling +------------------ + +Applications with multiple concurrent users should use connection pooling. A +minimal sized connection pool is also beneficial for long-running, single-user +applications that do not frequently use a connection. + +The python-oracledb driver provides its own connection pool implementation that +may be used in place of SQLAlchemy's pooling functionality. The driver pool +gives support for high availability features such as dead connection detection, +connection draining for planned database downtime, support for Oracle +Application Continuity and Transparent Application Continuity, and gives +support for `Database Resident Connection Pooling (DRCP) +`_. + +To take advantage of python-oracledb's pool, use the +:paramref:`_sa.create_engine.creator` parameter to provide a function that +returns a new connection, along with setting +:paramref:`_sa.create_engine.pool_class` to ``NullPool`` to disable +SQLAlchemy's pooling:: + + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + from sqlalchemy.pool import NullPool + + # Uncomment to use the optional python-oracledb Thick mode. + # Review the python-oracledb doc for the appropriate parameters + # oracledb.init_oracle_client() + + pool = oracledb.create_pool( + user="scott", + password="tiger", + dsn="localhost:1521/freepdb1", + min=1, + max=4, + increment=1, + ) + engine = create_engine( + "oracle+oracledb://", creator=pool.acquire, poolclass=NullPool + ) + +The above engine may then be used normally. Internally, python-oracledb handles +connection pooling:: + + with engine.connect() as conn: + print(conn.scalar(text("select 1 from dual"))) + +Refer to the python-oracledb documentation for `oracledb.create_pool() +`_ +for the arguments that can be used when creating a connection pool. + +.. _drcp: + +Using Oracle Database Resident Connection Pooling (DRCP) +-------------------------------------------------------- + +When using Oracle Database's Database Resident Connection Pooling (DRCP), the +best practice is to specify a connection class and "purity". Refer to the +`python-oracledb documentation on DRCP +`_. +For example:: + + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + from sqlalchemy.pool import NullPool + + # Uncomment to use the optional python-oracledb Thick mode. + # Review the python-oracledb doc for the appropriate parameters + # oracledb.init_oracle_client() + + pool = oracledb.create_pool( + user="scott", + password="tiger", + dsn="localhost:1521/freepdb1", + min=1, + max=4, + increment=1, + cclass="MYCLASS", + purity=oracledb.PURITY_SELF, + ) + engine = create_engine( + "oracle+oracledb://", creator=pool.acquire, poolclass=NullPool + ) + +The above engine may then be used normally where python-oracledb handles +application connection pooling and Oracle Database additionally uses DRCP:: + + with engine.connect() as conn: + print(conn.scalar(text("select 1 from dual"))) + +If you wish to use different connection classes or purities for different +connections, then wrap ``pool.acquire()``:: + + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + from sqlalchemy.pool import NullPool + + # Uncomment to use python-oracledb Thick mode. + # Review the python-oracledb doc for the appropriate parameters + # oracledb.init_oracle_client() + + pool = oracledb.create_pool( + user="scott", + password="tiger", + dsn="localhost:1521/freepdb1", + min=1, + max=4, + increment=1, + cclass="MYCLASS", + purity=oracledb.PURITY_SELF, + ) + + + def creator(): + return pool.acquire(cclass="MYOTHERCLASS", purity=oracledb.PURITY_NEW) + + + engine = create_engine( + "oracle+oracledb://", creator=creator, poolclass=NullPool + ) + +Engine Options consumed by the SQLAlchemy oracledb dialect outside of the driver +-------------------------------------------------------------------------------- + +There are also options that are consumed by the SQLAlchemy oracledb dialect +itself. These options are always passed directly to :func:`_sa.create_engine`, +such as:: + + e = create_engine("oracle+oracledb://user:pass@tnsalias", arraysize=500) + +The parameters accepted by the oracledb dialect are as follows: + +* ``arraysize`` - set the driver cursor.arraysize value. It defaults to + ``None``, indicating that the driver default value of 100 should be used. + This setting controls how many rows are buffered when fetching rows, and can + have a significant effect on performance if increased for queries that return + large numbers of rows. + + .. versionchanged:: 2.0.26 - changed the default value from 50 to None, + to use the default value of the driver itself. + +* ``auto_convert_lobs`` - defaults to True; See :ref:`oracledb_lob`. + +* ``coerce_to_decimal`` - see :ref:`oracledb_numeric` for detail. + +* ``encoding_errors`` - see :ref:`oracledb_unicode_encoding_errors` for detail. + +.. _oracledb_unicode: + +Unicode +------- + +As is the case for all DBAPIs under Python 3, all strings are inherently +Unicode strings. + +Ensuring the Correct Client Encoding +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In python-oracledb, the encoding used for all character data is "UTF-8". + +Unicode-specific Column datatypes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Core expression language handles unicode data by use of the +:class:`.Unicode` and :class:`.UnicodeText` datatypes. These types correspond +to the VARCHAR2 and CLOB Oracle Database datatypes by default. When using +these datatypes with Unicode data, it is expected that the database is +configured with a Unicode-aware character set so that the VARCHAR2 and CLOB +datatypes can accommodate the data. + +In the case that Oracle Database is not configured with a Unicode character +set, the two options are to use the :class:`_types.NCHAR` and +:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag +``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`, which will cause +the SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` / +:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB. + +.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText` + datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle Database + datatypes unless the ``use_nchar_for_unicode=True`` is passed to the dialect + when :func:`_sa.create_engine` is called. + + +.. _oracledb_unicode_encoding_errors: + +Encoding Errors +^^^^^^^^^^^^^^^ + +For the unusual case that data in Oracle Database is present with a broken +encoding, the dialect accepts a parameter ``encoding_errors`` which will be +passed to Unicode decoding functions in order to affect how decoding errors are +handled. The value is ultimately consumed by the Python `decode +`_ function, and +is passed both via python-oracledb's ``encodingErrors`` parameter consumed by +``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the +python-oracledb dialect makes use of both under different circumstances. + +.. versionadded:: 1.3.11 + + +.. _oracledb_setinputsizes: + +Fine grained control over python-oracledb data binding with setinputsizes +------------------------------------------------------------------------- + +The python-oracle DBAPI has a deep and fundamental reliance upon the usage of +the DBAPI ``setinputsizes()`` call. The purpose of this call is to establish +the datatypes that are bound to a SQL statement for Python values being passed +as parameters. While virtually no other DBAPI assigns any use to the +``setinputsizes()`` call, the python-oracledb DBAPI relies upon it heavily in +its interactions with the Oracle Database, and in some scenarios it is not +possible for SQLAlchemy to know exactly how data should be bound, as some +settings can cause profoundly different performance characteristics, while +altering the type coercion behavior at the same time. + +Users of the oracledb dialect are **strongly encouraged** to read through +python-oracledb's list of built-in datatype symbols at `Database Types +`_ +Note that in some cases, significant performance degradation can occur when +using these types vs. not. + +On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can +be used both for runtime visibility (e.g. logging) of the setinputsizes step as +well as to fully control how ``setinputsizes()`` is used on a per-statement +basis. + +.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes` + + +Example 1 - logging all setinputsizes calls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following example illustrates how to log the intermediary values from a +SQLAlchemy perspective before they are converted to the raw ``setinputsizes()`` +parameter dictionary. The keys of the dictionary are :class:`.BindParameter` +objects which have a ``.key`` and a ``.type`` attribute:: + + from sqlalchemy import create_engine, event + + engine = create_engine( + "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1" + ) + + + @event.listens_for(engine, "do_setinputsizes") + def _log_setinputsizes(inputsizes, cursor, statement, parameters, context): + for bindparam, dbapitype in inputsizes.items(): + log.info( + "Bound parameter name: %s SQLAlchemy type: %r DBAPI object: %s", + bindparam.key, + bindparam.type, + dbapitype, + ) + +Example 2 - remove all bindings to CLOB +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For performance, fetching LOB datatypes from Oracle Database is set by default +for the ``Text`` type within SQLAlchemy. This setting can be modified as +follows:: + + + from sqlalchemy import create_engine, event + from oracledb import CLOB + + engine = create_engine( + "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1" + ) + + + @event.listens_for(engine, "do_setinputsizes") + def _remove_clob(inputsizes, cursor, statement, parameters, context): + for bindparam, dbapitype in list(inputsizes.items()): + if dbapitype is CLOB: + del inputsizes[bindparam] + +.. _oracledb_lob: + +LOB Datatypes +-------------- + +LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and +BLOB. Oracle Database can efficiently return these datatypes as a single +buffer. SQLAlchemy makes use of type handlers to do this by default. + +To disable the use of the type handlers and deliver LOB objects as classic +buffered objects with a ``read()`` method, the parameter +``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`. + +.. _oracledb_returning: + +RETURNING Support +----------------- + +The oracledb dialect implements RETURNING using OUT parameters. The dialect +supports RETURNING fully. + +Two Phase Transaction Support +----------------------------- + +Two phase transactions are fully supported with python-oracledb. (Thin mode +requires python-oracledb 2.3). APIs for two phase transactions are provided at +the Core level via :meth:`_engine.Connection.begin_twophase` and +:paramref:`_orm.Session.twophase` for transparent ORM use. + +.. versionchanged:: 2.0.32 added support for two phase transactions + +.. _oracledb_numeric: + +Precision Numerics +------------------ + +SQLAlchemy's numeric types can handle receiving and returning values as Python +``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a +subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in +use, the :paramref:`.Numeric.asdecimal` flag determines if values should be +coerced to ``Decimal`` upon return, or returned as float objects. To make +matters more complicated under Oracle Database, the ``NUMBER`` type can also +represent integer values if the "scale" is zero, so the Oracle +Database-specific :class:`_oracle.NUMBER` type takes this into account as well. + +The oracledb dialect makes extensive use of connection- and cursor-level +"outputtypehandler" callables in order to coerce numeric values as requested. +These callables are specific to the specific flavor of :class:`.Numeric` in +use, as well as if no SQLAlchemy typing objects are present. There are +observed scenarios where Oracle Database may send incomplete or ambiguous +information about the numeric types being returned, such as a query where the +numeric types are buried under multiple levels of subquery. The type handlers +do their best to make the right decision in all cases, deferring to the +underlying python-oracledb DBAPI for all those cases where the driver can make +the best decision. + +When no typing objects are present, as when executing plain SQL strings, a +default "outputtypehandler" is present which will generally return numeric +values which specify precision and scale as Python ``Decimal`` objects. To +disable this coercion to decimal for performance reasons, pass the flag +``coerce_to_decimal=False`` to :func:`_sa.create_engine`:: + + engine = create_engine( + "oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False + ) + +The ``coerce_to_decimal`` flag only impacts the results of plain string +SQL statements that are not otherwise associated with a :class:`.Numeric` +SQLAlchemy type (or a subclass of such). + +.. versionchanged:: 1.2 The numeric handling system for the oracle dialects has + been reworked to take advantage of newer driver features as well as better + integration of outputtypehandlers. + +.. versionadded:: 2.0.0 added support for the python-oracledb driver. + +""" # noqa +from __future__ import annotations + +import collections +import re +from typing import Any +from typing import TYPE_CHECKING + +from . import cx_oracle as _cx_oracle +from ... import exc +from ... import pool +from ...connectors.asyncio import AsyncAdapt_dbapi_connection +from ...connectors.asyncio import AsyncAdapt_dbapi_cursor +from ...connectors.asyncio import AsyncAdapt_dbapi_ss_cursor +from ...connectors.asyncio import AsyncAdaptFallback_dbapi_connection +from ...engine import default +from ...util import asbool +from ...util import await_fallback +from ...util import await_only + +if TYPE_CHECKING: + from oracledb import AsyncConnection + from oracledb import AsyncCursor + + +class OracleExecutionContext_oracledb( + _cx_oracle.OracleExecutionContext_cx_oracle +): + pass + + +class OracleDialect_oracledb(_cx_oracle.OracleDialect_cx_oracle): + supports_statement_cache = True + execution_ctx_cls = OracleExecutionContext_oracledb + + driver = "oracledb" + _min_version = (1,) + + def __init__( + self, + auto_convert_lobs=True, + coerce_to_decimal=True, + arraysize=None, + encoding_errors=None, + thick_mode=None, + **kwargs, + ): + super().__init__( + auto_convert_lobs, + coerce_to_decimal, + arraysize, + encoding_errors, + **kwargs, + ) + + if self.dbapi is not None and ( + thick_mode or isinstance(thick_mode, dict) + ): + kw = thick_mode if isinstance(thick_mode, dict) else {} + self.dbapi.init_oracle_client(**kw) + + @classmethod + def import_dbapi(cls): + import oracledb + + return oracledb + + @classmethod + def is_thin_mode(cls, connection): + return connection.connection.dbapi_connection.thin + + @classmethod + def get_async_dialect_cls(cls, url): + return OracleDialectAsync_oracledb + + def _load_version(self, dbapi_module): + version = (0, 0, 0) + if dbapi_module is not None: + m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", dbapi_module.version) + if m: + version = tuple( + int(x) for x in m.group(1, 2, 3) if x is not None + ) + self.oracledb_ver = version + if ( + self.oracledb_ver > (0, 0, 0) + and self.oracledb_ver < self._min_version + ): + raise exc.InvalidRequestError( + f"oracledb version {self._min_version} and above are supported" + ) + + def do_begin_twophase(self, connection, xid): + conn_xis = connection.connection.xid(*xid) + connection.connection.tpc_begin(conn_xis) + connection.connection.info["oracledb_xid"] = conn_xis + + def do_prepare_twophase(self, connection, xid): + should_commit = connection.connection.tpc_prepare() + connection.info["oracledb_should_commit"] = should_commit + + def do_rollback_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + if recover: + conn_xid = connection.connection.xid(*xid) + else: + conn_xid = None + connection.connection.tpc_rollback(conn_xid) + + def do_commit_twophase( + self, connection, xid, is_prepared=True, recover=False + ): + conn_xid = None + if not is_prepared: + should_commit = connection.connection.tpc_prepare() + elif recover: + conn_xid = connection.connection.xid(*xid) + should_commit = True + else: + should_commit = connection.info["oracledb_should_commit"] + if should_commit: + connection.connection.tpc_commit(conn_xid) + + def do_recover_twophase(self, connection): + return [ + # oracledb seems to return bytes + ( + fi, + gti.decode() if isinstance(gti, bytes) else gti, + bq.decode() if isinstance(bq, bytes) else bq, + ) + for fi, gti, bq in connection.connection.tpc_recover() + ] + + def _check_max_identifier_length(self, connection): + if self.oracledb_ver >= (2, 5): + max_len = connection.connection.max_identifier_length + if max_len is not None: + return max_len + return super()._check_max_identifier_length(connection) + + +class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor): + _cursor: AsyncCursor + __slots__ = () + + @property + def outputtypehandler(self): + return self._cursor.outputtypehandler + + @outputtypehandler.setter + def outputtypehandler(self, value): + self._cursor.outputtypehandler = value + + def var(self, *args, **kwargs): + return self._cursor.var(*args, **kwargs) + + def close(self): + self._rows.clear() + self._cursor.close() + + def setinputsizes(self, *args: Any, **kwargs: Any) -> Any: + return self._cursor.setinputsizes(*args, **kwargs) + + def _aenter_cursor(self, cursor: AsyncCursor) -> AsyncCursor: + try: + return cursor.__enter__() + except Exception as error: + self._adapt_connection._handle_exception(error) + + async def _execute_async(self, operation, parameters): + # override to not use mutex, oracledb already has a mutex + + if parameters is None: + result = await self._cursor.execute(operation) + else: + result = await self._cursor.execute(operation, parameters) + + if self._cursor.description and not self.server_side: + self._rows = collections.deque(await self._cursor.fetchall()) + return result + + async def _executemany_async( + self, + operation, + seq_of_parameters, + ): + # override to not use mutex, oracledb already has a mutex + return await self._cursor.executemany(operation, seq_of_parameters) + + def __enter__(self): + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + self.close() + + +class AsyncAdapt_oracledb_ss_cursor( + AsyncAdapt_dbapi_ss_cursor, AsyncAdapt_oracledb_cursor +): + __slots__ = () + + def close(self) -> None: + if self._cursor is not None: + self._cursor.close() + self._cursor = None # type: ignore + + +class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection): + _connection: AsyncConnection + __slots__ = () + + thin = True + + _cursor_cls = AsyncAdapt_oracledb_cursor + _ss_cursor_cls = None + + @property + def autocommit(self): + return self._connection.autocommit + + @autocommit.setter + def autocommit(self, value): + self._connection.autocommit = value + + @property + def outputtypehandler(self): + return self._connection.outputtypehandler + + @outputtypehandler.setter + def outputtypehandler(self, value): + self._connection.outputtypehandler = value + + @property + def version(self): + return self._connection.version + + @property + def stmtcachesize(self): + return self._connection.stmtcachesize + + @stmtcachesize.setter + def stmtcachesize(self, value): + self._connection.stmtcachesize = value + + @property + def max_identifier_length(self): + return self._connection.max_identifier_length + + def cursor(self): + return AsyncAdapt_oracledb_cursor(self) + + def ss_cursor(self): + return AsyncAdapt_oracledb_ss_cursor(self) + + def xid(self, *args: Any, **kwargs: Any) -> Any: + return self._connection.xid(*args, **kwargs) + + def tpc_begin(self, *args: Any, **kwargs: Any) -> Any: + return self.await_(self._connection.tpc_begin(*args, **kwargs)) + + def tpc_commit(self, *args: Any, **kwargs: Any) -> Any: + return self.await_(self._connection.tpc_commit(*args, **kwargs)) + + def tpc_prepare(self, *args: Any, **kwargs: Any) -> Any: + return self.await_(self._connection.tpc_prepare(*args, **kwargs)) + + def tpc_recover(self, *args: Any, **kwargs: Any) -> Any: + return self.await_(self._connection.tpc_recover(*args, **kwargs)) + + def tpc_rollback(self, *args: Any, **kwargs: Any) -> Any: + return self.await_(self._connection.tpc_rollback(*args, **kwargs)) + + +class AsyncAdaptFallback_oracledb_connection( + AsyncAdaptFallback_dbapi_connection, AsyncAdapt_oracledb_connection +): + __slots__ = () + + +class OracledbAdaptDBAPI: + def __init__(self, oracledb) -> None: + self.oracledb = oracledb + + for k, v in self.oracledb.__dict__.items(): + if k != "connect": + self.__dict__[k] = v + + def connect(self, *arg, **kw): + async_fallback = kw.pop("async_fallback", False) + creator_fn = kw.pop("async_creator_fn", self.oracledb.connect_async) + + if asbool(async_fallback): + return AsyncAdaptFallback_oracledb_connection( + self, await_fallback(creator_fn(*arg, **kw)) + ) + + else: + return AsyncAdapt_oracledb_connection( + self, await_only(creator_fn(*arg, **kw)) + ) + + +class OracleExecutionContextAsync_oracledb(OracleExecutionContext_oracledb): + # restore default create cursor + create_cursor = default.DefaultExecutionContext.create_cursor + + def create_default_cursor(self): + # copy of OracleExecutionContext_cx_oracle.create_cursor + c = self._dbapi_connection.cursor() + if self.dialect.arraysize: + c.arraysize = self.dialect.arraysize + + return c + + def create_server_side_cursor(self): + c = self._dbapi_connection.ss_cursor() + if self.dialect.arraysize: + c.arraysize = self.dialect.arraysize + + return c + + +class OracleDialectAsync_oracledb(OracleDialect_oracledb): + is_async = True + supports_server_side_cursors = True + supports_statement_cache = True + execution_ctx_cls = OracleExecutionContextAsync_oracledb + + _min_version = (2,) + + # thick_mode mode is not supported by asyncio, oracledb will raise + @classmethod + def import_dbapi(cls): + import oracledb + + return OracledbAdaptDBAPI(oracledb) + + @classmethod + def get_pool_class(cls, url): + async_fallback = url.query.get("async_fallback", False) + + if asbool(async_fallback): + return pool.FallbackAsyncAdaptedQueuePool + else: + return pool.AsyncAdaptedQueuePool + + def get_driver_connection(self, connection): + return connection._connection + + +dialect = OracleDialect_oracledb +dialect_async = OracleDialectAsync_oracledb diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/provision.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/provision.py new file mode 100644 index 0000000000000000000000000000000000000000..3587de9d011db55cc0eb13dcdfaab25ad7c87494 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/provision.py @@ -0,0 +1,220 @@ +# dialects/oracle/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +from ... import create_engine +from ... import exc +from ... import inspect +from ...engine import url as sa_url +from ...testing.provision import configure_follower +from ...testing.provision import create_db +from ...testing.provision import drop_all_schema_objects_post_tables +from ...testing.provision import drop_all_schema_objects_pre_tables +from ...testing.provision import drop_db +from ...testing.provision import follower_url_from_main +from ...testing.provision import log +from ...testing.provision import post_configure_engine +from ...testing.provision import run_reap_dbs +from ...testing.provision import set_default_schema_on_connection +from ...testing.provision import stop_test_class_outside_fixtures +from ...testing.provision import temp_table_keyword_args +from ...testing.provision import update_db_opts + + +@create_db.for_db("oracle") +def _oracle_create_db(cfg, eng, ident): + # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or + # similar, so that the default tablespace is not "system"; reflection will + # fail otherwise + with eng.begin() as conn: + conn.exec_driver_sql("create user %s identified by xe" % ident) + conn.exec_driver_sql("create user %s_ts1 identified by xe" % ident) + conn.exec_driver_sql("create user %s_ts2 identified by xe" % ident) + conn.exec_driver_sql("grant dba to %s" % (ident,)) + conn.exec_driver_sql("grant unlimited tablespace to %s" % ident) + conn.exec_driver_sql("grant unlimited tablespace to %s_ts1" % ident) + conn.exec_driver_sql("grant unlimited tablespace to %s_ts2" % ident) + # these are needed to create materialized views + conn.exec_driver_sql("grant create table to %s" % ident) + conn.exec_driver_sql("grant create table to %s_ts1" % ident) + conn.exec_driver_sql("grant create table to %s_ts2" % ident) + + +@configure_follower.for_db("oracle") +def _oracle_configure_follower(config, ident): + config.test_schema = "%s_ts1" % ident + config.test_schema_2 = "%s_ts2" % ident + + +def _ora_drop_ignore(conn, dbname): + try: + conn.exec_driver_sql("drop user %s cascade" % dbname) + log.info("Reaped db: %s", dbname) + return True + except exc.DatabaseError as err: + log.warning("couldn't drop db: %s", err) + return False + + +@drop_all_schema_objects_pre_tables.for_db("oracle") +def _ora_drop_all_schema_objects_pre_tables(cfg, eng): + _purge_recyclebin(eng) + _purge_recyclebin(eng, cfg.test_schema) + + +@drop_all_schema_objects_post_tables.for_db("oracle") +def _ora_drop_all_schema_objects_post_tables(cfg, eng): + with eng.begin() as conn: + for syn in conn.dialect._get_synonyms(conn, None, None, None): + conn.exec_driver_sql(f"drop synonym {syn['synonym_name']}") + + for syn in conn.dialect._get_synonyms( + conn, cfg.test_schema, None, None + ): + conn.exec_driver_sql( + f"drop synonym {cfg.test_schema}.{syn['synonym_name']}" + ) + + for tmp_table in inspect(conn).get_temp_table_names(): + conn.exec_driver_sql(f"drop table {tmp_table}") + + +@drop_db.for_db("oracle") +def _oracle_drop_db(cfg, eng, ident): + with eng.begin() as conn: + # cx_Oracle seems to occasionally leak open connections when a large + # suite it run, even if we confirm we have zero references to + # connection objects. + # while there is a "kill session" command in Oracle Database, + # it unfortunately does not release the connection sufficiently. + _ora_drop_ignore(conn, ident) + _ora_drop_ignore(conn, "%s_ts1" % ident) + _ora_drop_ignore(conn, "%s_ts2" % ident) + + +@stop_test_class_outside_fixtures.for_db("oracle") +def _ora_stop_test_class_outside_fixtures(config, db, cls): + try: + _purge_recyclebin(db) + except exc.DatabaseError as err: + log.warning("purge recyclebin command failed: %s", err) + + # clear statement cache on all connections that were used + # https://github.com/oracle/python-cx_Oracle/issues/519 + + for cx_oracle_conn in _all_conns: + try: + sc = cx_oracle_conn.stmtcachesize + except db.dialect.dbapi.InterfaceError: + # connection closed + pass + else: + cx_oracle_conn.stmtcachesize = 0 + cx_oracle_conn.stmtcachesize = sc + _all_conns.clear() + + +def _purge_recyclebin(eng, schema=None): + with eng.begin() as conn: + if schema is None: + # run magic command to get rid of identity sequences + # https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501 + conn.exec_driver_sql("purge recyclebin") + else: + # per user: https://community.oracle.com/tech/developers/discussion/2255402/how-to-clear-dba-recyclebin-for-a-particular-user # noqa: E501 + for owner, object_name, type_ in conn.exec_driver_sql( + "select owner, object_name,type from " + "dba_recyclebin where owner=:schema and type='TABLE'", + {"schema": conn.dialect.denormalize_name(schema)}, + ).all(): + conn.exec_driver_sql(f'purge {type_} {owner}."{object_name}"') + + +_all_conns = set() + + +@post_configure_engine.for_db("oracle") +def _oracle_post_configure_engine(url, engine, follower_ident): + from sqlalchemy import event + + @event.listens_for(engine, "checkout") + def checkout(dbapi_con, con_record, con_proxy): + _all_conns.add(dbapi_con) + + @event.listens_for(engine, "checkin") + def checkin(dbapi_connection, connection_record): + # work around cx_Oracle issue: + # https://github.com/oracle/python-cx_Oracle/issues/530 + # invalidate oracle connections that had 2pc set up + if "cx_oracle_xid" in connection_record.info: + connection_record.invalidate() + + +@run_reap_dbs.for_db("oracle") +def _reap_oracle_dbs(url, idents): + log.info("db reaper connecting to %r", url) + eng = create_engine(url) + with eng.begin() as conn: + log.info("identifiers in file: %s", ", ".join(idents)) + + to_reap = conn.exec_driver_sql( + "select u.username from all_users u where username " + "like 'TEST_%' and not exists (select username " + "from v$session where username=u.username)" + ) + all_names = {username.lower() for (username,) in to_reap} + to_drop = set() + for name in all_names: + if name.endswith("_ts1") or name.endswith("_ts2"): + continue + elif name in idents: + to_drop.add(name) + if "%s_ts1" % name in all_names: + to_drop.add("%s_ts1" % name) + if "%s_ts2" % name in all_names: + to_drop.add("%s_ts2" % name) + + dropped = total = 0 + for total, username in enumerate(to_drop, 1): + if _ora_drop_ignore(conn, username): + dropped += 1 + log.info( + "Dropped %d out of %d stale databases detected", dropped, total + ) + + +@follower_url_from_main.for_db("oracle") +def _oracle_follower_url_from_main(url, ident): + url = sa_url.make_url(url) + return url.set(username=ident, password="xe") + + +@temp_table_keyword_args.for_db("oracle") +def _oracle_temp_table_keyword_args(cfg, eng): + return { + "prefixes": ["GLOBAL TEMPORARY"], + "oracle_on_commit": "PRESERVE ROWS", + } + + +@set_default_schema_on_connection.for_db("oracle") +def _oracle_set_default_schema_on_connection( + cfg, dbapi_connection, schema_name +): + cursor = dbapi_connection.cursor() + cursor.execute("ALTER SESSION SET CURRENT_SCHEMA=%s" % schema_name) + cursor.close() + + +@update_db_opts.for_db("oracle") +def _update_db_opts(db_url, db_opts, options): + """Set database options (db_opts) for a test database that we created.""" + if ( + options.oracledb_thick_mode + and sa_url.make_url(db_url).get_driver_name() == "oracledb" + ): + db_opts["thick_mode"] = True diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/vector.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/vector.py new file mode 100644 index 0000000000000000000000000000000000000000..dae89d3418d2c965e233fae971ad5bdcebdc0f46 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/oracle/vector.py @@ -0,0 +1,266 @@ +# dialects/oracle/vector.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +from __future__ import annotations + +import array +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +import sqlalchemy.types as types +from sqlalchemy.types import Float + + +class VectorIndexType(Enum): + """Enum representing different types of VECTOR index structures. + + See :ref:`oracle_vector_datatype` for background. + + .. versionadded:: 2.0.41 + + """ + + HNSW = "HNSW" + """ + The HNSW (Hierarchical Navigable Small World) index type. + """ + IVF = "IVF" + """ + The IVF (Inverted File Index) index type + """ + + +class VectorDistanceType(Enum): + """Enum representing different types of vector distance metrics. + + See :ref:`oracle_vector_datatype` for background. + + .. versionadded:: 2.0.41 + + """ + + EUCLIDEAN = "EUCLIDEAN" + """Euclidean distance (L2 norm). + + Measures the straight-line distance between two vectors in space. + """ + DOT = "DOT" + """Dot product similarity. + + Measures the algebraic similarity between two vectors. + """ + COSINE = "COSINE" + """Cosine similarity. + + Measures the cosine of the angle between two vectors. + """ + MANHATTAN = "MANHATTAN" + """Manhattan distance (L1 norm). + + Calculates the sum of absolute differences across dimensions. + """ + + +class VectorStorageFormat(Enum): + """Enum representing the data format used to store vector components. + + See :ref:`oracle_vector_datatype` for background. + + .. versionadded:: 2.0.41 + + """ + + INT8 = "INT8" + """ + 8-bit integer format. + """ + BINARY = "BINARY" + """ + Binary format. + """ + FLOAT32 = "FLOAT32" + """ + 32-bit floating-point format. + """ + FLOAT64 = "FLOAT64" + """ + 64-bit floating-point format. + """ + + +@dataclass +class VectorIndexConfig: + """Define the configuration for Oracle VECTOR Index. + + See :ref:`oracle_vector_datatype` for background. + + .. versionadded:: 2.0.41 + + :param index_type: Enum value from :class:`.VectorIndexType` + Specifies the indexing method. For HNSW, this must be + :attr:`.VectorIndexType.HNSW`. + + :param distance: Enum value from :class:`.VectorDistanceType` + specifies the metric for calculating distance between VECTORS. + + :param accuracy: interger. Should be in the range 0 to 100 + Specifies the accuracy of the nearest neighbor search during + query execution. + + :param parallel: integer. Specifies degree of parallelism. + + :param hnsw_neighbors: interger. Should be in the range 0 to + 2048. Specifies the number of nearest neighbors considered + during the search. The attribute :attr:`.VectorIndexConfig.hnsw_neighbors` + is HNSW index specific. + + :param hnsw_efconstruction: integer. Should be in the range 0 + to 65535. Controls the trade-off between indexing speed and + recall quality during index construction. The attribute + :attr:`.VectorIndexConfig.hnsw_efconstruction` is HNSW index + specific. + + :param ivf_neighbor_partitions: integer. Should be in the range + 0 to 10,000,000. Specifies the number of partitions used to + divide the dataset. The attribute + :attr:`.VectorIndexConfig.ivf_neighbor_partitions` is IVF index + specific. + + :param ivf_sample_per_partition: integer. Should be between 1 + and ``num_vectors / neighbor partitions``. Specifies the + number of samples used per partition. The attribute + :attr:`.VectorIndexConfig.ivf_sample_per_partition` is IVF index + specific. + + :param ivf_min_vectors_per_partition: integer. From 0 (no trimming) + to the total number of vectors (results in 1 partition). Specifies + the minimum number of vectors per partition. The attribute + :attr:`.VectorIndexConfig.ivf_min_vectors_per_partition` + is IVF index specific. + + """ + + index_type: VectorIndexType = VectorIndexType.HNSW + distance: Optional[VectorDistanceType] = None + accuracy: Optional[int] = None + hnsw_neighbors: Optional[int] = None + hnsw_efconstruction: Optional[int] = None + ivf_neighbor_partitions: Optional[int] = None + ivf_sample_per_partition: Optional[int] = None + ivf_min_vectors_per_partition: Optional[int] = None + parallel: Optional[int] = None + + def __post_init__(self): + self.index_type = VectorIndexType(self.index_type) + for field in [ + "hnsw_neighbors", + "hnsw_efconstruction", + "ivf_neighbor_partitions", + "ivf_sample_per_partition", + "ivf_min_vectors_per_partition", + "parallel", + "accuracy", + ]: + value = getattr(self, field) + if value is not None and not isinstance(value, int): + raise TypeError( + f"{field} must be an integer if" + f"provided, got {type(value).__name__}" + ) + + +class VECTOR(types.TypeEngine): + """Oracle VECTOR datatype. + + For complete background on using this type, see + :ref:`oracle_vector_datatype`. + + .. versionadded:: 2.0.41 + + """ + + cache_ok = True + __visit_name__ = "VECTOR" + + _typecode_map = { + VectorStorageFormat.INT8: "b", # Signed int + VectorStorageFormat.BINARY: "B", # Unsigned int + VectorStorageFormat.FLOAT32: "f", # Float + VectorStorageFormat.FLOAT64: "d", # Double + } + + def __init__(self, dim=None, storage_format=None): + """Construct a VECTOR. + + :param dim: integer. The dimension of the VECTOR datatype. This + should be an integer value. + + :param storage_format: VectorStorageFormat. The VECTOR storage + type format. This may be Enum values form + :class:`.VectorStorageFormat` INT8, BINARY, FLOAT32, or FLOAT64. + + """ + if dim is not None and not isinstance(dim, int): + raise TypeError("dim must be an interger") + if storage_format is not None and not isinstance( + storage_format, VectorStorageFormat + ): + raise TypeError( + "storage_format must be an enum of type VectorStorageFormat" + ) + self.dim = dim + self.storage_format = storage_format + + def _cached_bind_processor(self, dialect): + """ + Convert a list to a array.array before binding it to the database. + """ + + def process(value): + if value is None or isinstance(value, array.array): + return value + + # Convert list to a array.array + elif isinstance(value, list): + typecode = self._array_typecode(self.storage_format) + value = array.array(typecode, value) + return value + + else: + raise TypeError("VECTOR accepts list or array.array()") + + return process + + def _cached_result_processor(self, dialect, coltype): + """ + Convert a array.array to list before binding it to the database. + """ + + def process(value): + if isinstance(value, array.array): + return list(value) + + return process + + def _array_typecode(self, typecode): + """ + Map storage format to array typecode. + """ + return self._typecode_map.get(typecode, "d") + + class comparator_factory(types.TypeEngine.Comparator): + def l2_distance(self, other): + return self.op("<->", return_type=Float)(other) + + def inner_product(self, other): + return self.op("<#>", return_type=Float)(other) + + def cosine_distance(self, other): + return self.op("<=>", return_type=Float)(other) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/sqlite/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/sqlite/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cc43a826f5ac59a68f6989179e22d3f18b38c11a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/dialects/sqlite/base.py @@ -0,0 +1,2945 @@ +# dialects/sqlite/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +r''' +.. dialect:: sqlite + :name: SQLite + :normal_support: 3.12+ + :best_effort: 3.7.16+ + +.. _sqlite_datetime: + +Date and Time Types +------------------- + +SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does +not provide out of the box functionality for translating values between Python +`datetime` objects and a SQLite-supported format. SQLAlchemy's own +:class:`~sqlalchemy.types.DateTime` and related types provide date formatting +and parsing functionality when SQLite is used. The implementation classes are +:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`. +These types represent dates and times as ISO formatted strings, which also +nicely support ordering. There's no reliance on typical "libc" internals for +these functions so historical dates are fully supported. + +Ensuring Text affinity +^^^^^^^^^^^^^^^^^^^^^^ + +The DDL rendered for these types is the standard ``DATE``, ``TIME`` +and ``DATETIME`` indicators. However, custom storage formats can also be +applied to these types. When the +storage format is detected as containing no alpha characters, the DDL for +these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, +so that the column continues to have textual affinity. + +.. seealso:: + + `Type Affinity `_ - + in the SQLite documentation + +.. _sqlite_autoincrement: + +SQLite Auto Incrementing Behavior +---------------------------------- + +Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html + +Key concepts: + +* SQLite has an implicit "auto increment" feature that takes place for any + non-composite primary-key column that is specifically created using + "INTEGER PRIMARY KEY" for the type + primary key. + +* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not** + equivalent to the implicit autoincrement feature; this keyword is not + recommended for general use. SQLAlchemy does not render this keyword + unless a special SQLite-specific directive is used (see below). However, + it still requires that the column's type is named "INTEGER". + +Using the AUTOINCREMENT Keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To specifically render the AUTOINCREMENT keyword on the primary key column +when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table +construct:: + + Table( + "sometable", + metadata, + Column("id", Integer, primary_key=True), + sqlite_autoincrement=True, + ) + +Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SQLite's typing model is based on naming conventions. Among other things, this +means that any type name which contains the substring ``"INT"`` will be +determined to be of "integer affinity". A type named ``"BIGINT"``, +``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be +of "integer" affinity. However, **the SQLite autoincrement feature, whether +implicitly or explicitly enabled, requires that the name of the column's type +is exactly the string "INTEGER"**. Therefore, if an application uses a type +like :class:`.BigInteger` for a primary key, on SQLite this type will need to +be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE +TABLE`` statement in order for the autoincrement behavior to be available. + +One approach to achieve this is to use :class:`.Integer` on SQLite +only using :meth:`.TypeEngine.with_variant`:: + + table = Table( + "my_table", + metadata, + Column( + "id", + BigInteger().with_variant(Integer, "sqlite"), + primary_key=True, + ), + ) + +Another is to use a subclass of :class:`.BigInteger` that overrides its DDL +name to be ``INTEGER`` when compiled against SQLite:: + + from sqlalchemy import BigInteger + from sqlalchemy.ext.compiler import compiles + + + class SLBigInteger(BigInteger): + pass + + + @compiles(SLBigInteger, "sqlite") + def bi_c(element, compiler, **kw): + return "INTEGER" + + + @compiles(SLBigInteger) + def bi_c(element, compiler, **kw): + return compiler.visit_BIGINT(element, **kw) + + + table = Table( + "my_table", metadata, Column("id", SLBigInteger(), primary_key=True) + ) + +.. seealso:: + + :meth:`.TypeEngine.with_variant` + + :ref:`sqlalchemy.ext.compiler_toplevel` + + `Datatypes In SQLite Version 3 `_ + +.. _sqlite_transactions: + +Transactions with SQLite and the sqlite3 driver +----------------------------------------------- + +As a file-based database, SQLite's approach to transactions differs from +traditional databases in many ways. Additionally, the ``sqlite3`` driver +standard with Python (as well as the async version ``aiosqlite`` which builds +on top of it) has several quirks, workarounds, and API features in the +area of transaction control, all of which generally need to be addressed when +constructing a SQLAlchemy application that uses SQLite. + +Legacy Transaction Mode with the sqlite3 driver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The most important aspect of transaction handling with the sqlite3 driver is +that it defaults (which will continue through Python 3.15 before being +removed in Python 3.16) to legacy transactional behavior which does +not strictly follow :pep:`249`. The way in which the driver diverges from the +PEP is that it does not "begin" a transaction automatically as dictated by +:pep:`249` except in the case of DML statements, e.g. INSERT, UPDATE, and +DELETE. Normally, :pep:`249` dictates that a BEGIN must be emitted upon +the first SQL statement of any kind, so that all subsequent operations will +be established within a transaction until ``connection.commit()`` has been +called. The ``sqlite3`` driver, in an effort to be easier to use in +highly concurrent environments, skips this step for DQL (e.g. SELECT) statements, +and also skips it for DDL (e.g. CREATE TABLE etc.) statements for more legacy +reasons. Statements such as SAVEPOINT are also skipped. + +In modern versions of the ``sqlite3`` driver as of Python 3.12, this legacy +mode of operation is referred to as +`"legacy transaction control" `_, and is in +effect by default due to the ``Connection.autocommit`` parameter being set to +the constant ``sqlite3.LEGACY_TRANSACTION_CONTROL``. Prior to Python 3.12, +the ``Connection.autocommit`` attribute did not exist. + +The implications of legacy transaction mode include: + +* **Incorrect support for transactional DDL** - statements like CREATE TABLE, ALTER TABLE, + CREATE INDEX etc. will not automatically BEGIN a transaction if one were not + started already, leading to the changes by each statement being + "autocommitted" immediately unless BEGIN were otherwise emitted first. Very + old (pre Python 3.6) versions of SQLite would also force a COMMIT for these + operations even if a transaction were present, however this is no longer the + case. +* **SERIALIZABLE behavior not fully functional** - SQLite's transaction isolation + behavior is normally consistent with SERIALIZABLE isolation, as it is a file- + based system that locks the database file entirely for write operations, + preventing COMMIT until all reader transactions (and associated file locks) + have completed. However, sqlite3's legacy transaction mode fails to emit BEGIN for SELECT + statements, which causes these SELECT statements to no longer be "repeatable", + failing one of the consistency guarantees of SERIALIZABLE. +* **Incorrect behavior for SAVEPOINT** - as the SAVEPOINT statement does not + imply a BEGIN, a new SAVEPOINT emitted before a BEGIN will function on its + own but fails to participate in the enclosing transaction, meaning a ROLLBACK + of the transaction will not rollback elements that were part of a released + savepoint. + +Legacy transaction mode first existed in order to faciliate working around +SQLite's file locks. Because SQLite relies upon whole-file locks, it is easy to +get "database is locked" errors, particularly when newer features like "write +ahead logging" are disabled. This is a key reason why ``sqlite3``'s legacy +transaction mode is still the default mode of operation; disabling it will +produce behavior that is more susceptible to locked database errors. However +note that **legacy transaction mode will no longer be the default** in a future +Python version (3.16 as of this writing). + +.. _sqlite_enabling_transactions: + +Enabling Non-Legacy SQLite Transactional Modes with the sqlite3 or aiosqlite driver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Current SQLAlchemy support allows either for setting the +``.Connection.autocommit`` attribute, most directly by using a +:func:`._sa.create_engine` parameter, or if on an older version of Python where +the attribute is not available, using event hooks to control the behavior of +BEGIN. + +* **Enabling modern sqlite3 transaction control via the autocommit connect parameter** (Python 3.12 and above) + + To use SQLite in the mode described at `Transaction control via the autocommit attribute `_, + the most straightforward approach is to set the attribute to its recommended value + of ``False`` at the connect level using :paramref:`_sa.create_engine.connect_args``:: + + from sqlalchemy import create_engine + + engine = create_engine( + "sqlite:///myfile.db", connect_args={"autocommit": False} + ) + + This parameter is also passed through when using the aiosqlite driver:: + + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine( + "sqlite+aiosqlite:///myfile.db", connect_args={"autocommit": False} + ) + + The parameter can also be set at the attribute level using the :meth:`.PoolEvents.connect` + event hook, however this will only work for sqlite3, as aiosqlite does not yet expose this + attribute on its ``Connection`` object:: + + from sqlalchemy import create_engine, event + + engine = create_engine("sqlite:///myfile.db") + + + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + # enable autocommit=False mode + dbapi_connection.autocommit = False + +* **Using SQLAlchemy to emit BEGIN in lieu of SQLite's transaction control** (all Python versions, sqlite3 and aiosqlite) + + For older versions of ``sqlite3`` or for cross-compatiblity with older and + newer versions, SQLAlchemy can also take over the job of transaction control. + This is achieved by using the :meth:`.ConnectionEvents.begin` hook + to emit the "BEGIN" command directly, while also disabling SQLite's control + of this command using the :meth:`.PoolEvents.connect` event hook to set the + ``Connection.isolation_level`` attribute to ``None``:: + + + from sqlalchemy import create_engine, event + + engine = create_engine("sqlite:///myfile.db") + + + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + # disable sqlite3's emitting of the BEGIN statement entirely. + dbapi_connection.isolation_level = None + + + @event.listens_for(engine, "begin") + def do_begin(conn): + # emit our own BEGIN. sqlite3 still emits COMMIT/ROLLBACK correctly + conn.exec_driver_sql("BEGIN") + + When using the asyncio variant ``aiosqlite``, refer to ``engine.sync_engine`` + as in the example below:: + + from sqlalchemy import create_engine, event + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine("sqlite+aiosqlite:///myfile.db") + + + @event.listens_for(engine.sync_engine, "connect") + def do_connect(dbapi_connection, connection_record): + # disable aiosqlite's emitting of the BEGIN statement entirely. + dbapi_connection.isolation_level = None + + + @event.listens_for(engine.sync_engine, "begin") + def do_begin(conn): + # emit our own BEGIN. aiosqlite still emits COMMIT/ROLLBACK correctly + conn.exec_driver_sql("BEGIN") + +.. _sqlite_isolation_level: + +Using SQLAlchemy's Driver Level AUTOCOMMIT Feature with SQLite +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SQLAlchemy has a comprehensive database isolation feature with optional +autocommit support that is introduced in the section :ref:`dbapi_autocommit`. + +For the ``sqlite3`` and ``aiosqlite`` drivers, SQLAlchemy only includes +built-in support for "AUTOCOMMIT". Note that this mode is currently incompatible +with the non-legacy isolation mode hooks documented in the previous +section at :ref:`sqlite_enabling_transactions`. + +To use the ``sqlite3`` driver with SQLAlchemy driver-level autocommit, +create an engine setting the :paramref:`_sa.create_engine.isolation_level` +parameter to "AUTOCOMMIT":: + + eng = create_engine("sqlite:///myfile.db", isolation_level="AUTOCOMMIT") + +When using the above mode, any event hooks that set the sqlite3 ``Connection.autocommit`` +parameter away from its default of ``sqlite3.LEGACY_TRANSACTION_CONTROL`` +as well as hooks that emit ``BEGIN`` should be disabled. + +Additional Reading for SQLite / sqlite3 transaction control +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Links with important information on SQLite, the sqlite3 driver, +as well as long historical conversations on how things got to their current state: + +* `Isolation in SQLite `_ - on the SQLite website +* `Transaction control `_ - describes the sqlite3 autocommit attribute as well + as the legacy isolation_level attribute. +* `sqlite3 SELECT does not BEGIN a transaction, but should according to spec `_ - imported Python standard library issue on github +* `sqlite3 module breaks transactions and potentially corrupts data `_ - imported Python standard library issue on github + + +INSERT/UPDATE/DELETE...RETURNING +--------------------------------- + +The SQLite dialect supports SQLite 3.35's ``INSERT|UPDATE|DELETE..RETURNING`` +syntax. ``INSERT..RETURNING`` may be used +automatically in some cases in order to fetch newly generated identifiers in +place of the traditional approach of using ``cursor.lastrowid``, however +``cursor.lastrowid`` is currently still preferred for simple single-statement +cases for its better performance. + +To specify an explicit ``RETURNING`` clause, use the +:meth:`._UpdateBase.returning` method on a per-statement basis:: + + # INSERT..RETURNING + result = connection.execute( + table.insert().values(name="foo").returning(table.c.col1, table.c.col2) + ) + print(result.all()) + + # UPDATE..RETURNING + result = connection.execute( + table.update() + .where(table.c.name == "foo") + .values(name="bar") + .returning(table.c.col1, table.c.col2) + ) + print(result.all()) + + # DELETE..RETURNING + result = connection.execute( + table.delete() + .where(table.c.name == "foo") + .returning(table.c.col1, table.c.col2) + ) + print(result.all()) + +.. versionadded:: 2.0 Added support for SQLite RETURNING + + +.. _sqlite_foreign_keys: + +Foreign Key Support +------------------- + +SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, +however by default these constraints have no effect on the operation of the +table. + +Constraint checking on SQLite has three prerequisites: + +* At least version 3.6.19 of SQLite must be in use +* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY + or SQLITE_OMIT_TRIGGER symbols enabled. +* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all + connections before use -- including the initial call to + :meth:`sqlalchemy.schema.MetaData.create_all`. + +SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for +new connections through the usage of events:: + + from sqlalchemy.engine import Engine + from sqlalchemy import event + + + @event.listens_for(Engine, "connect") + def set_sqlite_pragma(dbapi_connection, connection_record): + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + +.. warning:: + + When SQLite foreign keys are enabled, it is **not possible** + to emit CREATE or DROP statements for tables that contain + mutually-dependent foreign key constraints; + to emit the DDL for these tables requires that ALTER TABLE be used to + create or drop these constraints separately, for which SQLite has + no support. + +.. seealso:: + + `SQLite Foreign Key Support `_ + - on the SQLite web site. + + :ref:`event_toplevel` - SQLAlchemy event API. + + :ref:`use_alter` - more information on SQLAlchemy's facilities for handling + mutually-dependent foreign key constraints. + +.. _sqlite_on_conflict_ddl: + +ON CONFLICT support for constraints +----------------------------------- + +.. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for + SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as + applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`. + +SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied +to primary key, unique, check, and not null constraints. In DDL, it is +rendered either within the "CONSTRAINT" clause or within the column definition +itself depending on the location of the target constraint. To render this +clause within DDL, the extension parameter ``sqlite_on_conflict`` can be +specified with a string conflict resolution algorithm within the +:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`, +:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object, +there +are individual parameters ``sqlite_on_conflict_not_null``, +``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each +correspond to the three types of relevant constraint types that can be +indicated from a :class:`_schema.Column` object. + +.. seealso:: + + `ON CONFLICT `_ - in the SQLite + documentation + +.. versionadded:: 1.3 + + +The ``sqlite_on_conflict`` parameters accept a string argument which is just +the resolution name to be chosen, which on SQLite can be one of ROLLBACK, +ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint +that specifies the IGNORE algorithm:: + + some_table = Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", Integer), + UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"), + ) + +The above renders CREATE TABLE DDL as: + +.. sourcecode:: sql + + CREATE TABLE some_table ( + id INTEGER NOT NULL, + data INTEGER, + PRIMARY KEY (id), + UNIQUE (id, data) ON CONFLICT IGNORE + ) + + +When using the :paramref:`_schema.Column.unique` +flag to add a UNIQUE constraint +to a single column, the ``sqlite_on_conflict_unique`` parameter can +be added to the :class:`_schema.Column` as well, which will be added to the +UNIQUE constraint in the DDL:: + + some_table = Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column( + "data", Integer, unique=True, sqlite_on_conflict_unique="IGNORE" + ), + ) + +rendering: + +.. sourcecode:: sql + + CREATE TABLE some_table ( + id INTEGER NOT NULL, + data INTEGER, + PRIMARY KEY (id), + UNIQUE (data) ON CONFLICT IGNORE + ) + +To apply the FAIL algorithm for a NOT NULL constraint, +``sqlite_on_conflict_not_null`` is used:: + + some_table = Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column( + "data", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL" + ), + ) + +this renders the column inline ON CONFLICT phrase: + +.. sourcecode:: sql + + CREATE TABLE some_table ( + id INTEGER NOT NULL, + data INTEGER NOT NULL ON CONFLICT FAIL, + PRIMARY KEY (id) + ) + + +Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``:: + + some_table = Table( + "some_table", + metadata, + Column( + "id", + Integer, + primary_key=True, + sqlite_on_conflict_primary_key="FAIL", + ), + ) + +SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict +resolution algorithm is applied to the constraint itself: + +.. sourcecode:: sql + + CREATE TABLE some_table ( + id INTEGER NOT NULL, + PRIMARY KEY (id) ON CONFLICT FAIL + ) + +.. _sqlite_on_conflict_insert: + +INSERT...ON CONFLICT (Upsert) +----------------------------- + +.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for + SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as + applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`. + +From version 3.24.0 onwards, SQLite supports "upserts" (update or insert) +of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` +statement. A candidate row will only be inserted if that row does not violate +any unique or primary key constraints. In the case of a unique constraint violation, a +secondary action can occur which can be either "DO UPDATE", indicating that +the data in the target row should be updated, or "DO NOTHING", which indicates +to silently skip this row. + +Conflicts are determined using columns that are part of existing unique +constraints and indexes. These constraints are identified by stating the +columns and conditions that comprise the indexes. + +SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific +:func:`_sqlite.insert()` function, which provides +the generative methods :meth:`_sqlite.Insert.on_conflict_do_update` +and :meth:`_sqlite.Insert.on_conflict_do_nothing`: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.dialects.sqlite import insert + + >>> insert_stmt = insert(my_table).values( + ... id="some_existing_id", data="inserted value" + ... ) + + >>> do_update_stmt = insert_stmt.on_conflict_do_update( + ... index_elements=["id"], set_=dict(data="updated value") + ... ) + + >>> print(do_update_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) + ON CONFLICT (id) DO UPDATE SET data = ?{stop} + + >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"]) + + >>> print(do_nothing_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) + ON CONFLICT (id) DO NOTHING + +.. versionadded:: 1.4 + +.. seealso:: + + `Upsert + `_ + - in the SQLite documentation. + + +Specifying the Target +^^^^^^^^^^^^^^^^^^^^^ + +Both methods supply the "target" of the conflict using column inference: + +* The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument + specifies a sequence containing string column names, :class:`_schema.Column` + objects, and/or SQL expression elements, which would identify a unique index + or unique constraint. + +* When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` + to infer an index, a partial index can be inferred by also specifying the + :paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter: + + .. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data") + + >>> do_update_stmt = stmt.on_conflict_do_update( + ... index_elements=[my_table.c.user_email], + ... index_where=my_table.c.user_email.like("%@gmail.com"), + ... set_=dict(data=stmt.excluded.data), + ... ) + + >>> print(do_update_stmt) + {printsql}INSERT INTO my_table (data, user_email) VALUES (?, ?) + ON CONFLICT (user_email) + WHERE user_email LIKE '%@gmail.com' + DO UPDATE SET data = excluded.data + +The SET Clause +^^^^^^^^^^^^^^^ + +``ON CONFLICT...DO UPDATE`` is used to perform an update of the already +existing row, using any combination of new values as well as values +from the proposed insertion. These values are specified using the +:paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This +parameter accepts a dictionary which consists of direct values +for UPDATE: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values(id="some_id", data="inserted value") + + >>> do_update_stmt = stmt.on_conflict_do_update( + ... index_elements=["id"], set_=dict(data="updated value") + ... ) + + >>> print(do_update_stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) + ON CONFLICT (id) DO UPDATE SET data = ? + +.. warning:: + + The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take + into account Python-side default UPDATE values or generation functions, + e.g. those specified using :paramref:`_schema.Column.onupdate`. These + values will not be exercised for an ON CONFLICT style of UPDATE, unless + they are manually specified in the + :paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary. + +Updating using the Excluded INSERT Values +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In order to refer to the proposed insertion row, the special alias +:attr:`~.sqlite.Insert.excluded` is available as an attribute on +the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix +on a column, that informs the DO UPDATE to update the row with the value that +would have been inserted had the constraint not failed: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values( + ... id="some_id", data="inserted value", author="jlh" + ... ) + + >>> do_update_stmt = stmt.on_conflict_do_update( + ... index_elements=["id"], + ... set_=dict(data="updated value", author=stmt.excluded.author), + ... ) + + >>> print(do_update_stmt) + {printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?) + ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author + +Additional WHERE Criteria +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts +a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where` +parameter, which will limit those rows which receive an UPDATE: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values( + ... id="some_id", data="inserted value", author="jlh" + ... ) + + >>> on_update_stmt = stmt.on_conflict_do_update( + ... index_elements=["id"], + ... set_=dict(data="updated value", author=stmt.excluded.author), + ... where=(my_table.c.status == 2), + ... ) + >>> print(on_update_stmt) + {printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?) + ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author + WHERE my_table.status = ? + + +Skipping Rows with DO NOTHING +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``ON CONFLICT`` may be used to skip inserting a row entirely +if any conflict with a unique constraint occurs; below this is illustrated +using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values(id="some_id", data="inserted value") + >>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"]) + >>> print(stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING + + +If ``DO NOTHING`` is used without specifying any columns or constraint, +it has the effect of skipping the INSERT for any unique violation which +occurs: + +.. sourcecode:: pycon+sql + + >>> stmt = insert(my_table).values(id="some_id", data="inserted value") + >>> stmt = stmt.on_conflict_do_nothing() + >>> print(stmt) + {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING + +.. _sqlite_type_reflection: + +Type Reflection +--------------- + +SQLite types are unlike those of most other database backends, in that +the string name of the type usually does not correspond to a "type" in a +one-to-one fashion. Instead, SQLite links per-column typing behavior +to one of five so-called "type affinities" based on a string matching +pattern for the type. + +SQLAlchemy's reflection process, when inspecting types, uses a simple +lookup table to link the keywords returned to provided SQLAlchemy types. +This lookup table is present within the SQLite dialect as it is for all +other dialects. However, the SQLite dialect has a different "fallback" +routine for when a particular type name is not located in the lookup map; +it instead implements the SQLite "type affinity" scheme located at +https://www.sqlite.org/datatype3.html section 2.1. + +The provided typemap will make direct associations from an exact string +name match for the following types: + +:class:`_types.BIGINT`, :class:`_types.BLOB`, +:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`, +:class:`_types.CHAR`, :class:`_types.DATE`, +:class:`_types.DATETIME`, :class:`_types.FLOAT`, +:class:`_types.DECIMAL`, :class:`_types.FLOAT`, +:class:`_types.INTEGER`, :class:`_types.INTEGER`, +:class:`_types.NUMERIC`, :class:`_types.REAL`, +:class:`_types.SMALLINT`, :class:`_types.TEXT`, +:class:`_types.TIME`, :class:`_types.TIMESTAMP`, +:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`, +:class:`_types.NCHAR` + +When a type name does not match one of the above types, the "type affinity" +lookup is used instead: + +* :class:`_types.INTEGER` is returned if the type name includes the + string ``INT`` +* :class:`_types.TEXT` is returned if the type name includes the + string ``CHAR``, ``CLOB`` or ``TEXT`` +* :class:`_types.NullType` is returned if the type name includes the + string ``BLOB`` +* :class:`_types.REAL` is returned if the type name includes the string + ``REAL``, ``FLOA`` or ``DOUB``. +* Otherwise, the :class:`_types.NUMERIC` type is used. + +.. _sqlite_partial_index: + +Partial Indexes +--------------- + +A partial index, e.g. one which uses a WHERE clause, can be specified +with the DDL system using the argument ``sqlite_where``:: + + tbl = Table("testtbl", m, Column("data", Integer)) + idx = Index( + "test_idx1", + tbl.c.data, + sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10), + ) + +The index will be rendered at create time as: + +.. sourcecode:: sql + + CREATE INDEX test_idx1 ON testtbl (data) + WHERE data > 5 AND data < 10 + +.. _sqlite_dotted_column_names: + +Dotted Column Names +------------------- + +Using table or column names that explicitly have periods in them is +**not recommended**. While this is generally a bad idea for relational +databases in general, as the dot is a syntactically significant character, +the SQLite driver up until version **3.10.0** of SQLite has a bug which +requires that SQLAlchemy filter out these dots in result sets. + +The bug, entirely outside of SQLAlchemy, can be illustrated thusly:: + + import sqlite3 + + assert sqlite3.sqlite_version_info < ( + 3, + 10, + 0, + ), "bug is fixed in this version" + + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + cursor.execute("create table x (a integer, b integer)") + cursor.execute("insert into x (a, b) values (1, 1)") + cursor.execute("insert into x (a, b) values (2, 2)") + + cursor.execute("select x.a, x.b from x") + assert [c[0] for c in cursor.description] == ["a", "b"] + + cursor.execute( + """ + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + """ + ) + assert [c[0] for c in cursor.description] == ["a", "b"], [ + c[0] for c in cursor.description + ] + +The second assertion fails: + +.. sourcecode:: text + + Traceback (most recent call last): + File "test.py", line 19, in + [c[0] for c in cursor.description] + AssertionError: ['x.a', 'x.b'] + +Where above, the driver incorrectly reports the names of the columns +including the name of the table, which is entirely inconsistent vs. +when the UNION is not present. + +SQLAlchemy relies upon column names being predictable in how they match +to the original statement, so the SQLAlchemy dialect has no choice but +to filter these out:: + + + from sqlalchemy import create_engine + + eng = create_engine("sqlite://") + conn = eng.connect() + + conn.exec_driver_sql("create table x (a integer, b integer)") + conn.exec_driver_sql("insert into x (a, b) values (1, 1)") + conn.exec_driver_sql("insert into x (a, b) values (2, 2)") + + result = conn.exec_driver_sql("select x.a, x.b from x") + assert result.keys() == ["a", "b"] + + result = conn.exec_driver_sql( + """ + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + """ + ) + assert result.keys() == ["a", "b"] + +Note that above, even though SQLAlchemy filters out the dots, *both +names are still addressable*:: + + >>> row = result.first() + >>> row["a"] + 1 + >>> row["x.a"] + 1 + >>> row["b"] + 1 + >>> row["x.b"] + 1 + +Therefore, the workaround applied by SQLAlchemy only impacts +:meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In +the very specific case where an application is forced to use column names that +contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and +:meth:`.Row.keys()` is required to return these dotted names unmodified, +the ``sqlite_raw_colnames`` execution option may be provided, either on a +per-:class:`_engine.Connection` basis:: + + result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql( + """ + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + """ + ) + assert result.keys() == ["x.a", "x.b"] + +or on a per-:class:`_engine.Engine` basis:: + + engine = create_engine( + "sqlite://", execution_options={"sqlite_raw_colnames": True} + ) + +When using the per-:class:`_engine.Engine` execution option, note that +**Core and ORM queries that use UNION may not function properly**. + +SQLite-specific table options +----------------------------- + +One option for CREATE TABLE is supported directly by the SQLite +dialect in conjunction with the :class:`_schema.Table` construct: + +* ``WITHOUT ROWID``:: + + Table("some_table", metadata, ..., sqlite_with_rowid=False) + +* + ``STRICT``:: + + Table("some_table", metadata, ..., sqlite_strict=True) + + .. versionadded:: 2.0.37 + +.. seealso:: + + `SQLite CREATE TABLE options + `_ + +.. _sqlite_include_internal: + +Reflecting internal schema tables +---------------------------------- + +Reflection methods that return lists of tables will omit so-called +"SQLite internal schema object" names, which are considered by SQLite +as any object name that is prefixed with ``sqlite_``. An example of +such an object is the ``sqlite_sequence`` table that's generated when +the ``AUTOINCREMENT`` column parameter is used. In order to return +these objects, the parameter ``sqlite_include_internal=True`` may be +passed to methods such as :meth:`_schema.MetaData.reflect` or +:meth:`.Inspector.get_table_names`. + +.. versionadded:: 2.0 Added the ``sqlite_include_internal=True`` parameter. + Previously, these tables were not ignored by SQLAlchemy reflection + methods. + +.. note:: + + The ``sqlite_include_internal`` parameter does not refer to the + "system" tables that are present in schemas such as ``sqlite_master``. + +.. seealso:: + + `SQLite Internal Schema Objects `_ - in the SQLite + documentation. + +''' # noqa +from __future__ import annotations + +import datetime +import numbers +import re +from typing import Optional + +from .json import JSON +from .json import JSONIndexType +from .json import JSONPathType +from ... import exc +from ... import schema as sa_schema +from ... import sql +from ... import text +from ... import types as sqltypes +from ... import util +from ...engine import default +from ...engine import processors +from ...engine import reflection +from ...engine.reflection import ReflectionDefaults +from ...sql import coercions +from ...sql import compiler +from ...sql import elements +from ...sql import roles +from ...sql import schema +from ...types import BLOB # noqa +from ...types import BOOLEAN # noqa +from ...types import CHAR # noqa +from ...types import DECIMAL # noqa +from ...types import FLOAT # noqa +from ...types import INTEGER # noqa +from ...types import NUMERIC # noqa +from ...types import REAL # noqa +from ...types import SMALLINT # noqa +from ...types import TEXT # noqa +from ...types import TIMESTAMP # noqa +from ...types import VARCHAR # noqa + + +class _SQliteJson(JSON): + def result_processor(self, dialect, coltype): + default_processor = super().result_processor(dialect, coltype) + + def process(value): + try: + return default_processor(value) + except TypeError: + if isinstance(value, numbers.Number): + return value + else: + raise + + return process + + +class _DateTimeMixin: + _reg = None + _storage_format = None + + def __init__(self, storage_format=None, regexp=None, **kw): + super().__init__(**kw) + if regexp is not None: + self._reg = re.compile(regexp) + if storage_format is not None: + self._storage_format = storage_format + + @property + def format_is_text_affinity(self): + """return True if the storage format will automatically imply + a TEXT affinity. + + If the storage format contains no non-numeric characters, + it will imply a NUMERIC storage format on SQLite; in this case, + the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, + TIME_CHAR. + + """ + spec = self._storage_format % { + "year": 0, + "month": 0, + "day": 0, + "hour": 0, + "minute": 0, + "second": 0, + "microsecond": 0, + } + return bool(re.search(r"[^0-9]", spec)) + + def adapt(self, cls, **kw): + if issubclass(cls, _DateTimeMixin): + if self._storage_format: + kw["storage_format"] = self._storage_format + if self._reg: + kw["regexp"] = self._reg + return super().adapt(cls, **kw) + + def literal_processor(self, dialect): + bp = self.bind_processor(dialect) + + def process(value): + return "'%s'" % bp(value) + + return process + + +class DATETIME(_DateTimeMixin, sqltypes.DateTime): + r"""Represent a Python datetime object in SQLite using a string. + + The default string storage format is:: + + "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + + e.g.: + + .. sourcecode:: text + + 2021-03-15 12:05:57.105542 + + The incoming storage format is by default parsed using the + Python ``datetime.fromisoformat()`` function. + + .. versionchanged:: 2.0 ``datetime.fromisoformat()`` is used for default + datetime string parsing. + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import DATETIME + + dt = DATETIME( + storage_format=( + "%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(minute)02d:%(second)02d" + ), + regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)", + ) + + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the datetime. Can't be specified together with ``storage_format`` + or ``regexp``. + + :param storage_format: format string which will be applied to the dict + with keys year, month, day, hour, minute, second, and microsecond. + + :param regexp: regular expression which will be applied to incoming result + rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming + strings. If the regexp contains named groups, the resulting match dict is + applied to the Python datetime() constructor as keyword arguments. + Otherwise, if positional groups are used, the datetime() constructor + is called with positional arguments via + ``*map(int, match_obj.groups(0))``. + + """ # noqa + + _storage_format = ( + "%(year)04d-%(month)02d-%(day)02d " + "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + ) + + def __init__(self, *args, **kwargs): + truncate_microseconds = kwargs.pop("truncate_microseconds", False) + super().__init__(*args, **kwargs) + if truncate_microseconds: + assert "storage_format" not in kwargs, ( + "You can specify only " + "one of truncate_microseconds or storage_format." + ) + assert "regexp" not in kwargs, ( + "You can specify only one of " + "truncate_microseconds or regexp." + ) + self._storage_format = ( + "%(year)04d-%(month)02d-%(day)02d " + "%(hour)02d:%(minute)02d:%(second)02d" + ) + + def bind_processor(self, dialect): + datetime_datetime = datetime.datetime + datetime_date = datetime.date + format_ = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_datetime): + return format_ % { + "year": value.year, + "month": value.month, + "day": value.day, + "hour": value.hour, + "minute": value.minute, + "second": value.second, + "microsecond": value.microsecond, + } + elif isinstance(value, datetime_date): + return format_ % { + "year": value.year, + "month": value.month, + "day": value.day, + "hour": 0, + "minute": 0, + "second": 0, + "microsecond": 0, + } + else: + raise TypeError( + "SQLite DateTime type only accepts Python " + "datetime and date objects as input." + ) + + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.datetime + ) + else: + return processors.str_to_datetime + + +class DATE(_DateTimeMixin, sqltypes.Date): + r"""Represent a Python date object in SQLite using a string. + + The default string storage format is:: + + "%(year)04d-%(month)02d-%(day)02d" + + e.g.: + + .. sourcecode:: text + + 2011-03-15 + + The incoming storage format is by default parsed using the + Python ``date.fromisoformat()`` function. + + .. versionchanged:: 2.0 ``date.fromisoformat()`` is used for default + date string parsing. + + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import DATE + + d = DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)"), + ) + + :param storage_format: format string which will be applied to the + dict with keys year, month, and day. + + :param regexp: regular expression which will be applied to + incoming result rows, replacing the use of ``date.fromisoformat()`` to + parse incoming strings. If the regexp contains named groups, the resulting + match dict is applied to the Python date() constructor as keyword + arguments. Otherwise, if positional groups are used, the date() + constructor is called with positional arguments via + ``*map(int, match_obj.groups(0))``. + + """ + + _storage_format = "%(year)04d-%(month)02d-%(day)02d" + + def bind_processor(self, dialect): + datetime_date = datetime.date + format_ = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_date): + return format_ % { + "year": value.year, + "month": value.month, + "day": value.day, + } + else: + raise TypeError( + "SQLite Date type only accepts Python " + "date objects as input." + ) + + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.date + ) + else: + return processors.str_to_date + + +class TIME(_DateTimeMixin, sqltypes.Time): + r"""Represent a Python time object in SQLite using a string. + + The default string storage format is:: + + "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + + e.g.: + + .. sourcecode:: text + + 12:05:57.10558 + + The incoming storage format is by default parsed using the + Python ``time.fromisoformat()`` function. + + .. versionchanged:: 2.0 ``time.fromisoformat()`` is used for default + time string parsing. + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import TIME + + t = TIME( + storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d", + regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?"), + ) + + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the time. Can't be specified together with ``storage_format`` + or ``regexp``. + + :param storage_format: format string which will be applied to the dict + with keys hour, minute, second, and microsecond. + + :param regexp: regular expression which will be applied to incoming result + rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming + strings. If the regexp contains named groups, the resulting match dict is + applied to the Python time() constructor as keyword arguments. Otherwise, + if positional groups are used, the time() constructor is called with + positional arguments via ``*map(int, match_obj.groups(0))``. + + """ + + _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + + def __init__(self, *args, **kwargs): + truncate_microseconds = kwargs.pop("truncate_microseconds", False) + super().__init__(*args, **kwargs) + if truncate_microseconds: + assert "storage_format" not in kwargs, ( + "You can specify only " + "one of truncate_microseconds or storage_format." + ) + assert "regexp" not in kwargs, ( + "You can specify only one of " + "truncate_microseconds or regexp." + ) + self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d" + + def bind_processor(self, dialect): + datetime_time = datetime.time + format_ = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_time): + return format_ % { + "hour": value.hour, + "minute": value.minute, + "second": value.second, + "microsecond": value.microsecond, + } + else: + raise TypeError( + "SQLite Time type only accepts Python " + "time objects as input." + ) + + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.time + ) + else: + return processors.str_to_time + + +colspecs = { + sqltypes.Date: DATE, + sqltypes.DateTime: DATETIME, + sqltypes.JSON: _SQliteJson, + sqltypes.JSON.JSONIndexType: JSONIndexType, + sqltypes.JSON.JSONPathType: JSONPathType, + sqltypes.Time: TIME, +} + +ischema_names = { + "BIGINT": sqltypes.BIGINT, + "BLOB": sqltypes.BLOB, + "BOOL": sqltypes.BOOLEAN, + "BOOLEAN": sqltypes.BOOLEAN, + "CHAR": sqltypes.CHAR, + "DATE": sqltypes.DATE, + "DATE_CHAR": sqltypes.DATE, + "DATETIME": sqltypes.DATETIME, + "DATETIME_CHAR": sqltypes.DATETIME, + "DOUBLE": sqltypes.DOUBLE, + "DECIMAL": sqltypes.DECIMAL, + "FLOAT": sqltypes.FLOAT, + "INT": sqltypes.INTEGER, + "INTEGER": sqltypes.INTEGER, + "JSON": JSON, + "NUMERIC": sqltypes.NUMERIC, + "REAL": sqltypes.REAL, + "SMALLINT": sqltypes.SMALLINT, + "TEXT": sqltypes.TEXT, + "TIME": sqltypes.TIME, + "TIME_CHAR": sqltypes.TIME, + "TIMESTAMP": sqltypes.TIMESTAMP, + "VARCHAR": sqltypes.VARCHAR, + "NVARCHAR": sqltypes.NVARCHAR, + "NCHAR": sqltypes.NCHAR, +} + + +class SQLiteCompiler(compiler.SQLCompiler): + extract_map = util.update_copy( + compiler.SQLCompiler.extract_map, + { + "month": "%m", + "day": "%d", + "year": "%Y", + "second": "%S", + "hour": "%H", + "doy": "%j", + "minute": "%M", + "epoch": "%s", + "dow": "%w", + "week": "%W", + }, + ) + + def visit_truediv_binary(self, binary, operator, **kw): + return ( + self.process(binary.left, **kw) + + " / " + + "(%s + 0.0)" % self.process(binary.right, **kw) + ) + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_localtimestamp_func(self, func, **kw): + return "DATETIME(CURRENT_TIMESTAMP, 'localtime')" + + def visit_true(self, expr, **kw): + return "1" + + def visit_false(self, expr, **kw): + return "0" + + def visit_char_length_func(self, fn, **kw): + return "length%s" % self.function_argspec(fn) + + def visit_aggregate_strings_func(self, fn, **kw): + return "group_concat%s" % self.function_argspec(fn) + + def visit_cast(self, cast, **kwargs): + if self.dialect.supports_cast: + return super().visit_cast(cast, **kwargs) + else: + return self.process(cast.clause, **kwargs) + + def visit_extract(self, extract, **kw): + try: + return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( + self.extract_map[extract.field], + self.process(extract.expr, **kw), + ) + except KeyError as err: + raise exc.CompileError( + "%s is not a valid extract argument." % extract.field + ) from err + + def returning_clause( + self, + stmt, + returning_cols, + *, + populate_result_map, + **kw, + ): + kw["include_table"] = False + return super().returning_clause( + stmt, returning_cols, populate_result_map=populate_result_map, **kw + ) + + def limit_clause(self, select, **kw): + text = "" + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += "\n LIMIT " + self.process(sql.literal(-1)) + text += " OFFSET " + self.process(select._offset_clause, **kw) + else: + text += " OFFSET " + self.process(sql.literal(0), **kw) + return text + + def for_update_clause(self, select, **kw): + # sqlite has no "FOR UPDATE" AFAICT + return "" + + def update_from_clause( + self, update_stmt, from_table, extra_froms, from_hints, **kw + ): + kw["asfrom"] = True + return "FROM " + ", ".join( + t._compiler_dispatch(self, fromhints=from_hints, **kw) + for t in extra_froms + ) + + def visit_is_distinct_from_binary(self, binary, operator, **kw): + return "%s IS NOT %s" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_is_not_distinct_from_binary(self, binary, operator, **kw): + return "%s IS %s" % ( + self.process(binary.left), + self.process(binary.right), + ) + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + if binary.type._type_affinity is sqltypes.JSON: + expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))" + else: + expr = "JSON_EXTRACT(%s, %s)" + + return expr % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + if binary.type._type_affinity is sqltypes.JSON: + expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))" + else: + expr = "JSON_EXTRACT(%s, %s)" + + return expr % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + def visit_empty_set_op_expr(self, type_, expand_op, **kw): + # slightly old SQLite versions don't seem to be able to handle + # the empty set impl + return self.visit_empty_set_expr(type_) + + def visit_empty_set_expr(self, element_types, **kw): + return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % ( + ", ".join("1" for type_ in element_types or [INTEGER()]), + ", ".join("1" for type_ in element_types or [INTEGER()]), + ) + + def visit_regexp_match_op_binary(self, binary, operator, **kw): + return self._generate_generic_binary(binary, " REGEXP ", **kw) + + def visit_not_regexp_match_op_binary(self, binary, operator, **kw): + return self._generate_generic_binary(binary, " NOT REGEXP ", **kw) + + def _on_conflict_target(self, clause, **kw): + if clause.inferred_target_elements is not None: + target_text = "(%s)" % ", ".join( + ( + self.preparer.quote(c) + if isinstance(c, str) + else self.process(c, include_table=False, use_schema=False) + ) + for c in clause.inferred_target_elements + ) + if clause.inferred_target_whereclause is not None: + target_text += " WHERE %s" % self.process( + clause.inferred_target_whereclause, + include_table=False, + use_schema=False, + literal_execute=True, + ) + + else: + target_text = "" + + return target_text + + def visit_on_conflict_do_nothing(self, on_conflict, **kw): + target_text = self._on_conflict_target(on_conflict, **kw) + + if target_text: + return "ON CONFLICT %s DO NOTHING" % target_text + else: + return "ON CONFLICT DO NOTHING" + + def visit_on_conflict_do_update(self, on_conflict, **kw): + clause = on_conflict + + target_text = self._on_conflict_target(on_conflict, **kw) + + action_set_ops = [] + + set_parameters = dict(clause.update_values_to_set) + # create a list of column assignment clauses as tuples + + insert_statement = self.stack[-1]["selectable"] + cols = insert_statement.table.c + for c in cols: + col_key = c.key + + if col_key in set_parameters: + value = set_parameters.pop(col_key) + elif c in set_parameters: + value = set_parameters.pop(c) + else: + continue + + if coercions._is_literal(value): + value = elements.BindParameter(None, value, type_=c.type) + + else: + if ( + isinstance(value, elements.BindParameter) + and value.type._isnull + ): + value = value._clone() + value.type = c.type + value_text = self.process(value.self_group(), use_schema=False) + + key_text = self.preparer.quote(c.name) + action_set_ops.append("%s = %s" % (key_text, value_text)) + + # check for names that don't match columns + if set_parameters: + util.warn( + "Additional column names not matching " + "any column keys in table '%s': %s" + % ( + self.current_executable.table.name, + (", ".join("'%s'" % c for c in set_parameters)), + ) + ) + for k, v in set_parameters.items(): + key_text = ( + self.preparer.quote(k) + if isinstance(k, str) + else self.process(k, use_schema=False) + ) + value_text = self.process( + coercions.expect(roles.ExpressionElementRole, v), + use_schema=False, + ) + action_set_ops.append("%s = %s" % (key_text, value_text)) + + action_text = ", ".join(action_set_ops) + if clause.update_whereclause is not None: + action_text += " WHERE %s" % self.process( + clause.update_whereclause, include_table=True, use_schema=False + ) + + return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text) + + def visit_bitwise_xor_op_binary(self, binary, operator, **kw): + # sqlite has no xor. Use "a XOR b" = "(a | b) - (a & b)". + kw["eager_grouping"] = True + or_ = self._generate_generic_binary(binary, " | ", **kw) + and_ = self._generate_generic_binary(binary, " & ", **kw) + return f"({or_} - {and_})" + + +class SQLiteDDLCompiler(compiler.DDLCompiler): + def get_column_specification(self, column, **kwargs): + coltype = self.dialect.type_compiler_instance.process( + column.type, type_expression=column + ) + colspec = self.preparer.format_column(column) + " " + coltype + default = self.get_column_default_string(column) + if default is not None: + + if not re.match(r"""^\s*[\'\"\(]""", default) and re.match( + r".*\W.*", default + ): + colspec += f" DEFAULT ({default})" + else: + colspec += f" DEFAULT {default}" + + if not column.nullable: + colspec += " NOT NULL" + + on_conflict_clause = column.dialect_options["sqlite"][ + "on_conflict_not_null" + ] + if on_conflict_clause is not None: + colspec += " ON CONFLICT " + on_conflict_clause + + if column.primary_key: + if ( + column.autoincrement is True + and len(column.table.primary_key.columns) != 1 + ): + raise exc.CompileError( + "SQLite does not support autoincrement for " + "composite primary keys" + ) + + if ( + column.table.dialect_options["sqlite"]["autoincrement"] + and len(column.table.primary_key.columns) == 1 + and issubclass(column.type._type_affinity, sqltypes.Integer) + and not column.foreign_keys + ): + colspec += " PRIMARY KEY" + + on_conflict_clause = column.dialect_options["sqlite"][ + "on_conflict_primary_key" + ] + if on_conflict_clause is not None: + colspec += " ON CONFLICT " + on_conflict_clause + + colspec += " AUTOINCREMENT" + + if column.computed is not None: + colspec += " " + self.process(column.computed) + + return colspec + + def visit_primary_key_constraint(self, constraint, **kw): + # for columns with sqlite_autoincrement=True, + # the PRIMARY KEY constraint can only be inline + # with the column itself. + if len(constraint.columns) == 1: + c = list(constraint)[0] + if ( + c.primary_key + and c.table.dialect_options["sqlite"]["autoincrement"] + and issubclass(c.type._type_affinity, sqltypes.Integer) + and not c.foreign_keys + ): + return None + + text = super().visit_primary_key_constraint(constraint) + + on_conflict_clause = constraint.dialect_options["sqlite"][ + "on_conflict" + ] + if on_conflict_clause is None and len(constraint.columns) == 1: + on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][ + "on_conflict_primary_key" + ] + + if on_conflict_clause is not None: + text += " ON CONFLICT " + on_conflict_clause + + return text + + def visit_unique_constraint(self, constraint, **kw): + text = super().visit_unique_constraint(constraint) + + on_conflict_clause = constraint.dialect_options["sqlite"][ + "on_conflict" + ] + if on_conflict_clause is None and len(constraint.columns) == 1: + col1 = list(constraint)[0] + if isinstance(col1, schema.SchemaItem): + on_conflict_clause = list(constraint)[0].dialect_options[ + "sqlite" + ]["on_conflict_unique"] + + if on_conflict_clause is not None: + text += " ON CONFLICT " + on_conflict_clause + + return text + + def visit_check_constraint(self, constraint, **kw): + text = super().visit_check_constraint(constraint) + + on_conflict_clause = constraint.dialect_options["sqlite"][ + "on_conflict" + ] + + if on_conflict_clause is not None: + text += " ON CONFLICT " + on_conflict_clause + + return text + + def visit_column_check_constraint(self, constraint, **kw): + text = super().visit_column_check_constraint(constraint) + + if constraint.dialect_options["sqlite"]["on_conflict"] is not None: + raise exc.CompileError( + "SQLite does not support on conflict clause for " + "column check constraint" + ) + + return text + + def visit_foreign_key_constraint(self, constraint, **kw): + local_table = constraint.elements[0].parent.table + remote_table = constraint.elements[0].column.table + + if local_table.schema != remote_table.schema: + return None + else: + return super().visit_foreign_key_constraint(constraint) + + def define_constraint_remote_table(self, constraint, table, preparer): + """Format the remote table clause of a CREATE CONSTRAINT clause.""" + + return preparer.format_table(table, use_schema=False) + + def visit_create_index( + self, create, include_schema=False, include_table_schema=True, **kw + ): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + + text += "INDEX " + + if create.if_not_exists: + text += "IF NOT EXISTS " + + text += "%s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=True), + preparer.format_table(index.table, use_schema=False), + ", ".join( + self.sql_compiler.process( + expr, include_table=False, literal_binds=True + ) + for expr in index.expressions + ), + ) + + whereclause = index.dialect_options["sqlite"]["where"] + if whereclause is not None: + where_compiled = self.sql_compiler.process( + whereclause, include_table=False, literal_binds=True + ) + text += " WHERE " + where_compiled + + return text + + def post_create_table(self, table): + table_options = [] + + if not table.dialect_options["sqlite"]["with_rowid"]: + table_options.append("WITHOUT ROWID") + + if table.dialect_options["sqlite"]["strict"]: + table_options.append("STRICT") + + if table_options: + return "\n " + ",\n ".join(table_options) + else: + return "" + + +class SQLiteTypeCompiler(compiler.GenericTypeCompiler): + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_) + + def visit_DATETIME(self, type_, **kw): + if ( + not isinstance(type_, _DateTimeMixin) + or type_.format_is_text_affinity + ): + return super().visit_DATETIME(type_) + else: + return "DATETIME_CHAR" + + def visit_DATE(self, type_, **kw): + if ( + not isinstance(type_, _DateTimeMixin) + or type_.format_is_text_affinity + ): + return super().visit_DATE(type_) + else: + return "DATE_CHAR" + + def visit_TIME(self, type_, **kw): + if ( + not isinstance(type_, _DateTimeMixin) + or type_.format_is_text_affinity + ): + return super().visit_TIME(type_) + else: + return "TIME_CHAR" + + def visit_JSON(self, type_, **kw): + # note this name provides NUMERIC affinity, not TEXT. + # should not be an issue unless the JSON value consists of a single + # numeric value. JSONTEXT can be used if this case is required. + return "JSON" + + +class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = { + "add", + "after", + "all", + "alter", + "analyze", + "and", + "as", + "asc", + "attach", + "autoincrement", + "before", + "begin", + "between", + "by", + "cascade", + "case", + "cast", + "check", + "collate", + "column", + "commit", + "conflict", + "constraint", + "create", + "cross", + "current_date", + "current_time", + "current_timestamp", + "database", + "default", + "deferrable", + "deferred", + "delete", + "desc", + "detach", + "distinct", + "drop", + "each", + "else", + "end", + "escape", + "except", + "exclusive", + "exists", + "explain", + "false", + "fail", + "for", + "foreign", + "from", + "full", + "glob", + "group", + "having", + "if", + "ignore", + "immediate", + "in", + "index", + "indexed", + "initially", + "inner", + "insert", + "instead", + "intersect", + "into", + "is", + "isnull", + "join", + "key", + "left", + "like", + "limit", + "match", + "natural", + "not", + "notnull", + "null", + "of", + "offset", + "on", + "or", + "order", + "outer", + "plan", + "pragma", + "primary", + "query", + "raise", + "references", + "reindex", + "rename", + "replace", + "restrict", + "right", + "rollback", + "row", + "select", + "set", + "table", + "temp", + "temporary", + "then", + "to", + "transaction", + "trigger", + "true", + "union", + "unique", + "update", + "using", + "vacuum", + "values", + "view", + "virtual", + "when", + "where", + } + + +class SQLiteExecutionContext(default.DefaultExecutionContext): + @util.memoized_property + def _preserve_raw_colnames(self): + return ( + not self.dialect._broken_dotted_colnames + or self.execution_options.get("sqlite_raw_colnames", False) + ) + + def _translate_colname(self, colname): + # TODO: detect SQLite version 3.10.0 or greater; + # see [ticket:3633] + + # adjust for dotted column names. SQLite + # in the case of UNION may store col names as + # "tablename.colname", or if using an attached database, + # "database.tablename.colname", in cursor.description + if not self._preserve_raw_colnames and "." in colname: + return colname.split(".")[-1], colname + else: + return colname, None + + +class SQLiteDialect(default.DefaultDialect): + name = "sqlite" + supports_alter = False + + # SQlite supports "DEFAULT VALUES" but *does not* support + # "VALUES (DEFAULT)" + supports_default_values = True + supports_default_metavalue = False + + # sqlite issue: + # https://github.com/python/cpython/issues/93421 + # note this parameter is no longer used by the ORM or default dialect + # see #9414 + supports_sane_rowcount_returning = False + + supports_empty_insert = False + supports_cast = True + supports_multivalues_insert = True + use_insertmanyvalues = True + tuple_in_values = True + supports_statement_cache = True + insert_null_pk_still_autoincrements = True + insert_returning = True + update_returning = True + update_returning_multifrom = True + delete_returning = True + update_returning_multifrom = True + + supports_default_metavalue = True + """dialect supports INSERT... VALUES (DEFAULT) syntax""" + + default_metavalue_token = "NULL" + """for INSERT... VALUES (DEFAULT) syntax, the token to put in the + parenthesis.""" + + default_paramstyle = "qmark" + execution_ctx_cls = SQLiteExecutionContext + statement_compiler = SQLiteCompiler + ddl_compiler = SQLiteDDLCompiler + type_compiler_cls = SQLiteTypeCompiler + preparer = SQLiteIdentifierPreparer + ischema_names = ischema_names + colspecs = colspecs + + construct_arguments = [ + ( + sa_schema.Table, + { + "autoincrement": False, + "with_rowid": True, + "strict": False, + }, + ), + (sa_schema.Index, {"where": None}), + ( + sa_schema.Column, + { + "on_conflict_primary_key": None, + "on_conflict_not_null": None, + "on_conflict_unique": None, + }, + ), + (sa_schema.Constraint, {"on_conflict": None}), + ] + + _broken_fk_pragma_quotes = False + _broken_dotted_colnames = False + + @util.deprecated_params( + _json_serializer=( + "1.3.7", + "The _json_serializer argument to the SQLite dialect has " + "been renamed to the correct name of json_serializer. The old " + "argument name will be removed in a future release.", + ), + _json_deserializer=( + "1.3.7", + "The _json_deserializer argument to the SQLite dialect has " + "been renamed to the correct name of json_deserializer. The old " + "argument name will be removed in a future release.", + ), + ) + def __init__( + self, + native_datetime=False, + json_serializer=None, + json_deserializer=None, + _json_serializer=None, + _json_deserializer=None, + **kwargs, + ): + default.DefaultDialect.__init__(self, **kwargs) + + if _json_serializer: + json_serializer = _json_serializer + if _json_deserializer: + json_deserializer = _json_deserializer + self._json_serializer = json_serializer + self._json_deserializer = json_deserializer + + # this flag used by pysqlite dialect, and perhaps others in the + # future, to indicate the driver is handling date/timestamp + # conversions (and perhaps datetime/time as well on some hypothetical + # driver ?) + self.native_datetime = native_datetime + + if self.dbapi is not None: + if self.dbapi.sqlite_version_info < (3, 7, 16): + util.warn( + "SQLite version %s is older than 3.7.16, and will not " + "support right nested joins, as are sometimes used in " + "more complex ORM scenarios. SQLAlchemy 1.4 and above " + "no longer tries to rewrite these joins." + % (self.dbapi.sqlite_version_info,) + ) + + # NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These + # version checks are getting very stale. + self._broken_dotted_colnames = self.dbapi.sqlite_version_info < ( + 3, + 10, + 0, + ) + self.supports_default_values = self.dbapi.sqlite_version_info >= ( + 3, + 3, + 8, + ) + self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3) + self.supports_multivalues_insert = ( + # https://www.sqlite.org/releaselog/3_7_11.html + self.dbapi.sqlite_version_info + >= (3, 7, 11) + ) + # see https://www.sqlalchemy.org/trac/ticket/2568 + # as well as https://www.sqlite.org/src/info/600482d161 + self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < ( + 3, + 6, + 14, + ) + + if self.dbapi.sqlite_version_info < (3, 35) or util.pypy: + self.update_returning = self.delete_returning = ( + self.insert_returning + ) = False + + if self.dbapi.sqlite_version_info < (3, 32, 0): + # https://www.sqlite.org/limits.html + self.insertmanyvalues_max_parameters = 999 + + _isolation_lookup = util.immutabledict( + {"READ UNCOMMITTED": 1, "SERIALIZABLE": 0} + ) + + def get_isolation_level_values(self, dbapi_connection): + return list(self._isolation_lookup) + + def set_isolation_level(self, dbapi_connection, level): + isolation_level = self._isolation_lookup[level] + + cursor = dbapi_connection.cursor() + cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}") + cursor.close() + + def get_isolation_level(self, dbapi_connection): + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA read_uncommitted") + res = cursor.fetchone() + if res: + value = res[0] + else: + # https://www.sqlite.org/changes.html#version_3_3_3 + # "Optional READ UNCOMMITTED isolation (instead of the + # default isolation level of SERIALIZABLE) and + # table level locking when database connections + # share a common cache."" + # pre-SQLite 3.3.0 default to 0 + value = 0 + cursor.close() + if value == 0: + return "SERIALIZABLE" + elif value == 1: + return "READ UNCOMMITTED" + else: + assert False, "Unknown isolation level %s" % value + + @reflection.cache + def get_schema_names(self, connection, **kw): + s = "PRAGMA database_list" + dl = connection.exec_driver_sql(s) + + return [db[1] for db in dl if db[1] != "temp"] + + def _format_schema(self, schema, table_name): + if schema is not None: + qschema = self.identifier_preparer.quote_identifier(schema) + name = f"{qschema}.{table_name}" + else: + name = table_name + return name + + def _sqlite_main_query( + self, + table: str, + type_: str, + schema: Optional[str], + sqlite_include_internal: bool, + ): + main = self._format_schema(schema, table) + if not sqlite_include_internal: + filter_table = " AND name NOT LIKE 'sqlite~_%' ESCAPE '~'" + else: + filter_table = "" + query = ( + f"SELECT name FROM {main} " + f"WHERE type='{type_}'{filter_table} " + "ORDER BY name" + ) + return query + + @reflection.cache + def get_table_names( + self, connection, schema=None, sqlite_include_internal=False, **kw + ): + query = self._sqlite_main_query( + "sqlite_master", "table", schema, sqlite_include_internal + ) + names = connection.exec_driver_sql(query).scalars().all() + return names + + @reflection.cache + def get_temp_table_names( + self, connection, sqlite_include_internal=False, **kw + ): + query = self._sqlite_main_query( + "sqlite_temp_master", "table", None, sqlite_include_internal + ) + names = connection.exec_driver_sql(query).scalars().all() + return names + + @reflection.cache + def get_temp_view_names( + self, connection, sqlite_include_internal=False, **kw + ): + query = self._sqlite_main_query( + "sqlite_temp_master", "view", None, sqlite_include_internal + ) + names = connection.exec_driver_sql(query).scalars().all() + return names + + @reflection.cache + def has_table(self, connection, table_name, schema=None, **kw): + self._ensure_has_table_connection(connection) + + if schema is not None and schema not in self.get_schema_names( + connection, **kw + ): + return False + + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema + ) + return bool(info) + + def _get_default_schema_name(self, connection): + return "main" + + @reflection.cache + def get_view_names( + self, connection, schema=None, sqlite_include_internal=False, **kw + ): + query = self._sqlite_main_query( + "sqlite_master", "view", schema, sqlite_include_internal + ) + names = connection.exec_driver_sql(query).scalars().all() + return names + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + if schema is not None: + qschema = self.identifier_preparer.quote_identifier(schema) + master = f"{qschema}.sqlite_master" + s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % ( + master, + ) + rs = connection.exec_driver_sql(s, (view_name,)) + else: + try: + s = ( + "SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL " + " SELECT * FROM sqlite_temp_master) " + "WHERE name = ? " + "AND type='view'" + ) + rs = connection.exec_driver_sql(s, (view_name,)) + except exc.DBAPIError: + s = ( + "SELECT sql FROM sqlite_master WHERE name = ? " + "AND type='view'" + ) + rs = connection.exec_driver_sql(s, (view_name,)) + + result = rs.fetchall() + if result: + return result[0].sql + else: + raise exc.NoSuchTableError( + f"{schema}.{view_name}" if schema else view_name + ) + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + pragma = "table_info" + # computed columns are threaded as hidden, they require table_xinfo + if self.server_version_info >= (3, 31): + pragma = "table_xinfo" + info = self._get_table_pragma( + connection, pragma, table_name, schema=schema + ) + columns = [] + tablesql = None + for row in info: + name = row[1] + type_ = row[2].upper() + nullable = not row[3] + default = row[4] + primary_key = row[5] + hidden = row[6] if pragma == "table_xinfo" else 0 + + # hidden has value 0 for normal columns, 1 for hidden columns, + # 2 for computed virtual columns and 3 for computed stored columns + # https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b + if hidden == 1: + continue + + generated = bool(hidden) + persisted = hidden == 3 + + if tablesql is None and generated: + tablesql = self._get_table_sql( + connection, table_name, schema, **kw + ) + # remove create table + match = re.match( + r"create table .*?\((.*)\)$", + tablesql.strip(), + re.DOTALL | re.IGNORECASE, + ) + assert match, f"create table not found in {tablesql}" + tablesql = match.group(1).strip() + + columns.append( + self._get_column_info( + name, + type_, + nullable, + default, + primary_key, + generated, + persisted, + tablesql, + ) + ) + if columns: + return columns + elif not self.has_table(connection, table_name, schema): + raise exc.NoSuchTableError( + f"{schema}.{table_name}" if schema else table_name + ) + else: + return ReflectionDefaults.columns() + + def _get_column_info( + self, + name, + type_, + nullable, + default, + primary_key, + generated, + persisted, + tablesql, + ): + if generated: + # the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)" + # somehow is "INTEGER GENERATED ALWAYS" + type_ = re.sub("generated", "", type_, flags=re.IGNORECASE) + type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip() + + coltype = self._resolve_type_affinity(type_) + + if default is not None: + default = str(default) + + colspec = { + "name": name, + "type": coltype, + "nullable": nullable, + "default": default, + "primary_key": primary_key, + } + if generated: + sqltext = "" + if tablesql: + pattern = ( + r"[^,]*\s+GENERATED\s+ALWAYS\s+AS" + r"\s+\((.*)\)\s*(?:virtual|stored)?" + ) + match = re.search( + re.escape(name) + pattern, tablesql, re.IGNORECASE + ) + if match: + sqltext = match.group(1) + colspec["computed"] = {"sqltext": sqltext, "persisted": persisted} + return colspec + + def _resolve_type_affinity(self, type_): + """Return a data type from a reflected column, using affinity rules. + + SQLite's goal for universal compatibility introduces some complexity + during reflection, as a column's defined type might not actually be a + type that SQLite understands - or indeed, my not be defined *at all*. + Internally, SQLite handles this with a 'data type affinity' for each + column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', + 'REAL', or 'NONE' (raw bits). The algorithm that determines this is + listed in https://www.sqlite.org/datatype3.html section 2.1. + + This method allows SQLAlchemy to support that algorithm, while still + providing access to smarter reflection utilities by recognizing + column definitions that SQLite only supports through affinity (like + DATE and DOUBLE). + + """ + match = re.match(r"([\w ]+)(\(.*?\))?", type_) + if match: + coltype = match.group(1) + args = match.group(2) + else: + coltype = "" + args = "" + + if coltype in self.ischema_names: + coltype = self.ischema_names[coltype] + elif "INT" in coltype: + coltype = sqltypes.INTEGER + elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype: + coltype = sqltypes.TEXT + elif "BLOB" in coltype or not coltype: + coltype = sqltypes.NullType + elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype: + coltype = sqltypes.REAL + else: + coltype = sqltypes.NUMERIC + + if args is not None: + args = re.findall(r"(\d+)", args) + try: + coltype = coltype(*[int(a) for a in args]) + except TypeError: + util.warn( + "Could not instantiate type %s with " + "reflected arguments %s; using no arguments." + % (coltype, args) + ) + coltype = coltype() + else: + coltype = coltype() + + return coltype + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + constraint_name = None + table_data = self._get_table_sql(connection, table_name, schema=schema) + if table_data: + PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY" + result = re.search(PK_PATTERN, table_data, re.I) + constraint_name = result.group(1) if result else None + + cols = self.get_columns(connection, table_name, schema, **kw) + # consider only pk columns. This also avoids sorting the cached + # value returned by get_columns + cols = [col for col in cols if col.get("primary_key", 0) > 0] + cols.sort(key=lambda col: col.get("primary_key")) + pkeys = [col["name"] for col in cols] + + if pkeys: + return {"constrained_columns": pkeys, "name": constraint_name} + else: + return ReflectionDefaults.pk_constraint() + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + # sqlite makes this *extremely difficult*. + # First, use the pragma to get the actual FKs. + pragma_fks = self._get_table_pragma( + connection, "foreign_key_list", table_name, schema=schema + ) + + fks = {} + + for row in pragma_fks: + (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4]) + + if not rcol: + # no referred column, which means it was not named in the + # original DDL. The referred columns of the foreign key + # constraint are therefore the primary key of the referred + # table. + try: + referred_pk = self.get_pk_constraint( + connection, rtbl, schema=schema, **kw + ) + referred_columns = referred_pk["constrained_columns"] + except exc.NoSuchTableError: + # ignore not existing parents + referred_columns = [] + else: + # note we use this list only if this is the first column + # in the constraint. for subsequent columns we ignore the + # list and append "rcol" if present. + referred_columns = [] + + if self._broken_fk_pragma_quotes: + rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl) + + if numerical_id in fks: + fk = fks[numerical_id] + else: + fk = fks[numerical_id] = { + "name": None, + "constrained_columns": [], + "referred_schema": schema, + "referred_table": rtbl, + "referred_columns": referred_columns, + "options": {}, + } + fks[numerical_id] = fk + + fk["constrained_columns"].append(lcol) + + if rcol: + fk["referred_columns"].append(rcol) + + def fk_sig(constrained_columns, referred_table, referred_columns): + return ( + tuple(constrained_columns) + + (referred_table,) + + tuple(referred_columns) + ) + + # then, parse the actual SQL and attempt to find DDL that matches + # the names as well. SQLite saves the DDL in whatever format + # it was typed in as, so need to be liberal here. + + keys_by_signature = { + fk_sig( + fk["constrained_columns"], + fk["referred_table"], + fk["referred_columns"], + ): fk + for fk in fks.values() + } + + table_data = self._get_table_sql(connection, table_name, schema=schema) + + def parse_fks(): + if table_data is None: + # system tables, etc. + return + + # note that we already have the FKs from PRAGMA above. This whole + # regexp thing is trying to locate additional detail about the + # FKs, namely the name of the constraint and other options. + # so parsing the columns is really about matching it up to what + # we already have. + FK_PATTERN = ( + r"(?:CONSTRAINT (\w+) +)?" + r"FOREIGN KEY *\( *(.+?) *\) +" + r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\( *((?:(?:"[^"]+"|[a-z0-9_]+) *(?:, *)?)+)\) *' # noqa: E501 + r"((?:ON (?:DELETE|UPDATE) " + r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)" + r"((?:NOT +)?DEFERRABLE)?" + r"(?: +INITIALLY +(DEFERRED|IMMEDIATE))?" + ) + for match in re.finditer(FK_PATTERN, table_data, re.I): + ( + constraint_name, + constrained_columns, + referred_quoted_name, + referred_name, + referred_columns, + onupdatedelete, + deferrable, + initially, + ) = match.group(1, 2, 3, 4, 5, 6, 7, 8) + constrained_columns = list( + self._find_cols_in_sig(constrained_columns) + ) + if not referred_columns: + referred_columns = constrained_columns + else: + referred_columns = list( + self._find_cols_in_sig(referred_columns) + ) + referred_name = referred_quoted_name or referred_name + options = {} + + for token in re.split(r" *\bON\b *", onupdatedelete.upper()): + if token.startswith("DELETE"): + ondelete = token[6:].strip() + if ondelete and ondelete != "NO ACTION": + options["ondelete"] = ondelete + elif token.startswith("UPDATE"): + onupdate = token[6:].strip() + if onupdate and onupdate != "NO ACTION": + options["onupdate"] = onupdate + + if deferrable: + options["deferrable"] = "NOT" not in deferrable.upper() + if initially: + options["initially"] = initially.upper() + + yield ( + constraint_name, + constrained_columns, + referred_name, + referred_columns, + options, + ) + + fkeys = [] + + for ( + constraint_name, + constrained_columns, + referred_name, + referred_columns, + options, + ) in parse_fks(): + sig = fk_sig(constrained_columns, referred_name, referred_columns) + if sig not in keys_by_signature: + util.warn( + "WARNING: SQL-parsed foreign key constraint " + "'%s' could not be located in PRAGMA " + "foreign_keys for table %s" % (sig, table_name) + ) + continue + key = keys_by_signature.pop(sig) + key["name"] = constraint_name + key["options"] = options + fkeys.append(key) + # assume the remainders are the unnamed, inline constraints, just + # use them as is as it's extremely difficult to parse inline + # constraints + fkeys.extend(keys_by_signature.values()) + if fkeys: + return fkeys + else: + return ReflectionDefaults.foreign_keys() + + def _find_cols_in_sig(self, sig): + for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): + yield match.group(1) or match.group(2) + + @reflection.cache + def get_unique_constraints( + self, connection, table_name, schema=None, **kw + ): + auto_index_by_sig = {} + for idx in self.get_indexes( + connection, + table_name, + schema=schema, + include_auto_indexes=True, + **kw, + ): + if not idx["name"].startswith("sqlite_autoindex"): + continue + sig = tuple(idx["column_names"]) + auto_index_by_sig[sig] = idx + + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw + ) + unique_constraints = [] + + def parse_uqs(): + if table_data is None: + return + UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' + INLINE_UNIQUE_PATTERN = ( + r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?)[\t ]' + r"+[a-z0-9_ ]+?[\t ]+UNIQUE" + ) + + for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): + name, cols = match.group(1, 2) + yield name, list(self._find_cols_in_sig(cols)) + + # we need to match inlines as well, as we seek to differentiate + # a UNIQUE constraint from a UNIQUE INDEX, even though these + # are kind of the same thing :) + for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): + cols = list( + self._find_cols_in_sig(match.group(1) or match.group(2)) + ) + yield None, cols + + for name, cols in parse_uqs(): + sig = tuple(cols) + if sig in auto_index_by_sig: + auto_index_by_sig.pop(sig) + parsed_constraint = {"name": name, "column_names": cols} + unique_constraints.append(parsed_constraint) + # NOTE: auto_index_by_sig might not be empty here, + # the PRIMARY KEY may have an entry. + if unique_constraints: + return unique_constraints + else: + return ReflectionDefaults.unique_constraints() + + @reflection.cache + def get_check_constraints(self, connection, table_name, schema=None, **kw): + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw + ) + + # NOTE NOTE NOTE + # DO NOT CHANGE THIS REGULAR EXPRESSION. There is no known way + # to parse CHECK constraints that contain newlines themselves using + # regular expressions, and the approach here relies upon each + # individual + # CHECK constraint being on a single line by itself. This + # necessarily makes assumptions as to how the CREATE TABLE + # was emitted. A more comprehensive DDL parsing solution would be + # needed to improve upon the current situation. See #11840 for + # background + CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?CHECK *\( *(.+) *\),? *" + cks = [] + + for match in re.finditer(CHECK_PATTERN, table_data or "", re.I): + + name = match.group(1) + + if name: + name = re.sub(r'^"|"$', "", name) + + cks.append({"sqltext": match.group(2), "name": name}) + cks.sort(key=lambda d: d["name"] or "~") # sort None as last + if cks: + return cks + else: + return ReflectionDefaults.check_constraints() + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + pragma_indexes = self._get_table_pragma( + connection, "index_list", table_name, schema=schema + ) + indexes = [] + + # regular expression to extract the filter predicate of a partial + # index. this could fail to extract the predicate correctly on + # indexes created like + # CREATE INDEX i ON t (col || ') where') WHERE col <> '' + # but as this function does not support expression-based indexes + # this case does not occur. + partial_pred_re = re.compile(r"\)\s+where\s+(.+)", re.IGNORECASE) + + if schema: + schema_expr = "%s." % self.identifier_preparer.quote_identifier( + schema + ) + else: + schema_expr = "" + + include_auto_indexes = kw.pop("include_auto_indexes", False) + for row in pragma_indexes: + # ignore implicit primary key index. + # https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html + if not include_auto_indexes and row[1].startswith( + "sqlite_autoindex" + ): + continue + indexes.append( + dict( + name=row[1], + column_names=[], + unique=row[2], + dialect_options={}, + ) + ) + + # check partial indexes + if len(row) >= 5 and row[4]: + s = ( + "SELECT sql FROM %(schema)ssqlite_master " + "WHERE name = ? " + "AND type = 'index'" % {"schema": schema_expr} + ) + rs = connection.exec_driver_sql(s, (row[1],)) + index_sql = rs.scalar() + predicate_match = partial_pred_re.search(index_sql) + if predicate_match is None: + # unless the regex is broken this case shouldn't happen + # because we know this is a partial index, so the + # definition sql should match the regex + util.warn( + "Failed to look up filter predicate of " + "partial index %s" % row[1] + ) + else: + predicate = predicate_match.group(1) + indexes[-1]["dialect_options"]["sqlite_where"] = text( + predicate + ) + + # loop thru unique indexes to get the column names. + for idx in list(indexes): + pragma_index = self._get_table_pragma( + connection, "index_info", idx["name"], schema=schema + ) + + for row in pragma_index: + if row[2] is None: + util.warn( + "Skipped unsupported reflection of " + "expression-based index %s" % idx["name"] + ) + indexes.remove(idx) + break + else: + idx["column_names"].append(row[2]) + + indexes.sort(key=lambda d: d["name"] or "~") # sort None as last + if indexes: + return indexes + elif not self.has_table(connection, table_name, schema): + raise exc.NoSuchTableError( + f"{schema}.{table_name}" if schema else table_name + ) + else: + return ReflectionDefaults.indexes() + + def _is_sys_table(self, table_name): + return table_name in { + "sqlite_schema", + "sqlite_master", + "sqlite_temp_schema", + "sqlite_temp_master", + } + + @reflection.cache + def _get_table_sql(self, connection, table_name, schema=None, **kw): + if schema: + schema_expr = "%s." % ( + self.identifier_preparer.quote_identifier(schema) + ) + else: + schema_expr = "" + try: + s = ( + "SELECT sql FROM " + " (SELECT * FROM %(schema)ssqlite_master UNION ALL " + " SELECT * FROM %(schema)ssqlite_temp_master) " + "WHERE name = ? " + "AND type in ('table', 'view')" % {"schema": schema_expr} + ) + rs = connection.exec_driver_sql(s, (table_name,)) + except exc.DBAPIError: + s = ( + "SELECT sql FROM %(schema)ssqlite_master " + "WHERE name = ? " + "AND type in ('table', 'view')" % {"schema": schema_expr} + ) + rs = connection.exec_driver_sql(s, (table_name,)) + value = rs.scalar() + if value is None and not self._is_sys_table(table_name): + raise exc.NoSuchTableError(f"{schema_expr}{table_name}") + return value + + def _get_table_pragma(self, connection, pragma, table_name, schema=None): + quote = self.identifier_preparer.quote_identifier + if schema is not None: + statements = [f"PRAGMA {quote(schema)}."] + else: + # because PRAGMA looks in all attached databases if no schema + # given, need to specify "main" schema, however since we want + # 'temp' tables in the same namespace as 'main', need to run + # the PRAGMA twice + statements = ["PRAGMA main.", "PRAGMA temp."] + + qtable = quote(table_name) + for statement in statements: + statement = f"{statement}{pragma}({qtable})" + cursor = connection.exec_driver_sql(statement) + if not cursor._soft_closed: + # work around SQLite issue whereby cursor.description + # is blank when PRAGMA returns no rows: + # https://www.sqlite.org/cvstrac/tktview?tn=1884 + result = cursor.fetchall() + else: + result = [] + if result: + return result + else: + return [] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4205d892600bde3f96321ae03084f166a26ca2d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/__init__.py @@ -0,0 +1,62 @@ +# engine/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""SQL connections, SQL execution and high-level DB-API interface. + +The engine package defines the basic components used to interface +DB-API modules with higher-level statement construction, +connection-management, execution and result contexts. The primary +"entry point" class into this package is the Engine and its public +constructor ``create_engine()``. + +""" + +from . import events as events +from . import util as util +from .base import Connection as Connection +from .base import Engine as Engine +from .base import NestedTransaction as NestedTransaction +from .base import RootTransaction as RootTransaction +from .base import Transaction as Transaction +from .base import TwoPhaseTransaction as TwoPhaseTransaction +from .create import create_engine as create_engine +from .create import create_pool_from_url as create_pool_from_url +from .create import engine_from_config as engine_from_config +from .cursor import CursorResult as CursorResult +from .cursor import ResultProxy as ResultProxy +from .interfaces import AdaptedConnection as AdaptedConnection +from .interfaces import BindTyping as BindTyping +from .interfaces import Compiled as Compiled +from .interfaces import Connectable as Connectable +from .interfaces import ConnectArgsType as ConnectArgsType +from .interfaces import ConnectionEventsTarget as ConnectionEventsTarget +from .interfaces import CreateEnginePlugin as CreateEnginePlugin +from .interfaces import Dialect as Dialect +from .interfaces import ExceptionContext as ExceptionContext +from .interfaces import ExecutionContext as ExecutionContext +from .interfaces import TypeCompiler as TypeCompiler +from .mock import create_mock_engine as create_mock_engine +from .reflection import Inspector as Inspector +from .reflection import ObjectKind as ObjectKind +from .reflection import ObjectScope as ObjectScope +from .result import ChunkedIteratorResult as ChunkedIteratorResult +from .result import FilterResult as FilterResult +from .result import FrozenResult as FrozenResult +from .result import IteratorResult as IteratorResult +from .result import MappingResult as MappingResult +from .result import MergedResult as MergedResult +from .result import Result as Result +from .result import result_tuple as result_tuple +from .result import ScalarResult as ScalarResult +from .result import TupleResult as TupleResult +from .row import BaseRow as BaseRow +from .row import Row as Row +from .row import RowMapping as RowMapping +from .url import make_url as make_url +from .url import URL as URL +from .util import connection_memoize as connection_memoize +from ..sql import ddl as ddl diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_processors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..8536d53d77942a2f88959c52e17186435528a144 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_processors.py @@ -0,0 +1,136 @@ +# engine/_py_processors.py +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors +# +# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""defines generic type conversion functions, as used in bind and result +processors. + +They all share one common characteristic: None is passed through unchanged. + +""" + +from __future__ import annotations + +import datetime +from datetime import date as date_cls +from datetime import datetime as datetime_cls +from datetime import time as time_cls +from decimal import Decimal +import typing +from typing import Any +from typing import Callable +from typing import Optional +from typing import Type +from typing import TypeVar +from typing import Union + + +_DT = TypeVar( + "_DT", bound=Union[datetime.datetime, datetime.time, datetime.date] +) + + +def str_to_datetime_processor_factory( + regexp: typing.Pattern[str], type_: Callable[..., _DT] +) -> Callable[[Optional[str]], Optional[_DT]]: + rmatch = regexp.match + # Even on python2.6 datetime.strptime is both slower than this code + # and it does not support microseconds. + has_named_groups = bool(regexp.groupindex) + + def process(value: Optional[str]) -> Optional[_DT]: + if value is None: + return None + else: + try: + m = rmatch(value) + except TypeError as err: + raise ValueError( + "Couldn't parse %s string '%r' " + "- value is not a string." % (type_.__name__, value) + ) from err + + if m is None: + raise ValueError( + "Couldn't parse %s string: " + "'%s'" % (type_.__name__, value) + ) + if has_named_groups: + groups = m.groupdict(0) + return type_( + **dict( + list( + zip( + iter(groups.keys()), + list(map(int, iter(groups.values()))), + ) + ) + ) + ) + else: + return type_(*list(map(int, m.groups(0)))) + + return process + + +def to_decimal_processor_factory( + target_class: Type[Decimal], scale: int +) -> Callable[[Optional[float]], Optional[Decimal]]: + fstring = "%%.%df" % scale + + def process(value: Optional[float]) -> Optional[Decimal]: + if value is None: + return None + else: + return target_class(fstring % value) + + return process + + +def to_float(value: Optional[Union[int, float]]) -> Optional[float]: + if value is None: + return None + else: + return float(value) + + +def to_str(value: Optional[Any]) -> Optional[str]: + if value is None: + return None + else: + return str(value) + + +def int_to_boolean(value: Optional[int]) -> Optional[bool]: + if value is None: + return None + else: + return bool(value) + + +def str_to_datetime(value: Optional[str]) -> Optional[datetime.datetime]: + if value is not None: + dt_value = datetime_cls.fromisoformat(value) + else: + dt_value = None + return dt_value + + +def str_to_time(value: Optional[str]) -> Optional[datetime.time]: + if value is not None: + dt_value = time_cls.fromisoformat(value) + else: + dt_value = None + return dt_value + + +def str_to_date(value: Optional[str]) -> Optional[datetime.date]: + if value is not None: + dt_value = date_cls.fromisoformat(value) + else: + dt_value = None + return dt_value diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_row.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_row.py new file mode 100644 index 0000000000000000000000000000000000000000..38c60fcd2765a4ebe3d61a117e1c83fbf6232a2a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_row.py @@ -0,0 +1,128 @@ +# engine/_py_row.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import operator +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import Type + +if typing.TYPE_CHECKING: + from .result import _KeyType + from .result import _ProcessorsType + from .result import _RawRowType + from .result import _TupleGetterType + from .result import ResultMetaData + +MD_INDEX = 0 # integer index in cursor.description + + +class BaseRow: + __slots__ = ("_parent", "_data", "_key_to_index") + + _parent: ResultMetaData + _key_to_index: Mapping[_KeyType, int] + _data: _RawRowType + + def __init__( + self, + parent: ResultMetaData, + processors: Optional[_ProcessorsType], + key_to_index: Mapping[_KeyType, int], + data: _RawRowType, + ): + """Row objects are constructed by CursorResult objects.""" + object.__setattr__(self, "_parent", parent) + + object.__setattr__(self, "_key_to_index", key_to_index) + + if processors: + object.__setattr__( + self, + "_data", + tuple( + [ + proc(value) if proc else value + for proc, value in zip(processors, data) + ] + ), + ) + else: + object.__setattr__(self, "_data", tuple(data)) + + def __reduce__(self) -> Tuple[Callable[..., BaseRow], Tuple[Any, ...]]: + return ( + rowproxy_reconstructor, + (self.__class__, self.__getstate__()), + ) + + def __getstate__(self) -> Dict[str, Any]: + return {"_parent": self._parent, "_data": self._data} + + def __setstate__(self, state: Dict[str, Any]) -> None: + parent = state["_parent"] + object.__setattr__(self, "_parent", parent) + object.__setattr__(self, "_data", state["_data"]) + object.__setattr__(self, "_key_to_index", parent._key_to_index) + + def _values_impl(self) -> List[Any]: + return list(self) + + def __iter__(self) -> Iterator[Any]: + return iter(self._data) + + def __len__(self) -> int: + return len(self._data) + + def __hash__(self) -> int: + return hash(self._data) + + def __getitem__(self, key: Any) -> Any: + return self._data[key] + + def _get_by_key_impl_mapping(self, key: str) -> Any: + try: + return self._data[self._key_to_index[key]] + except KeyError: + pass + self._parent._key_not_found(key, False) + + def __getattr__(self, name: str) -> Any: + try: + return self._data[self._key_to_index[name]] + except KeyError: + pass + self._parent._key_not_found(name, True) + + def _to_tuple_instance(self) -> Tuple[Any, ...]: + return self._data + + +# This reconstructor is necessary so that pickles with the Cy extension or +# without use the same Binary format. +def rowproxy_reconstructor( + cls: Type[BaseRow], state: Dict[str, Any] +) -> BaseRow: + obj = cls.__new__(cls) + obj.__setstate__(state) + return obj + + +def tuplegetter(*indexes: int) -> _TupleGetterType: + if len(indexes) != 1: + for i in range(1, len(indexes)): + if indexes[i - 1] != indexes[i] - 1: + return operator.itemgetter(*indexes) + # slice form is faster but returns a list if input is list + return operator.itemgetter(slice(indexes[0], indexes[-1] + 1)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_util.py new file mode 100644 index 0000000000000000000000000000000000000000..50badea2a9482232462bfbdbf0a6f0204e1dcde6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/_py_util.py @@ -0,0 +1,74 @@ +# engine/_py_util.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import typing +from typing import Any +from typing import Mapping +from typing import Optional +from typing import Tuple + +from .. import exc + +if typing.TYPE_CHECKING: + from .interfaces import _CoreAnyExecuteParams + from .interfaces import _CoreMultiExecuteParams + from .interfaces import _DBAPIAnyExecuteParams + from .interfaces import _DBAPIMultiExecuteParams + + +_no_tuple: Tuple[Any, ...] = () + + +def _distill_params_20( + params: Optional[_CoreAnyExecuteParams], +) -> _CoreMultiExecuteParams: + if params is None: + return _no_tuple + # Assume list is more likely than tuple + elif isinstance(params, list) or isinstance(params, tuple): + # collections_abc.MutableSequence): # avoid abc.__instancecheck__ + if params and not isinstance(params[0], (tuple, Mapping)): + raise exc.ArgumentError( + "List argument must consist only of tuples or dictionaries" + ) + + return params + elif isinstance(params, dict) or isinstance( + # only do immutabledict or abc.__instancecheck__ for Mapping after + # we've checked for plain dictionaries and would otherwise raise + params, + Mapping, + ): + return [params] + else: + raise exc.ArgumentError("mapping or list expected for parameters") + + +def _distill_raw_params( + params: Optional[_DBAPIAnyExecuteParams], +) -> _DBAPIMultiExecuteParams: + if params is None: + return _no_tuple + elif isinstance(params, list): + # collections_abc.MutableSequence): # avoid abc.__instancecheck__ + if params and not isinstance(params[0], (tuple, Mapping)): + raise exc.ArgumentError( + "List argument must consist only of tuples or dictionaries" + ) + + return params + elif isinstance(params, (tuple, dict)) or isinstance( + # only do abc.__instancecheck__ for Mapping after we've checked + # for plain dictionaries and would otherwise raise + params, + Mapping, + ): + # cast("Union[List[Mapping[str, Any]], Tuple[Any, ...]]", [params]) + return [params] # type: ignore + else: + raise exc.ArgumentError("mapping or sequence expected for parameters") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/base.py new file mode 100644 index 0000000000000000000000000000000000000000..4292ed6d100c19b3349416e9cca7aa8e88298d39 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/base.py @@ -0,0 +1,3370 @@ +# engine/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +"""Defines :class:`_engine.Connection` and :class:`_engine.Engine`. + +""" +from __future__ import annotations + +import contextlib +import sys +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from .interfaces import BindTyping +from .interfaces import ConnectionEventsTarget +from .interfaces import DBAPICursor +from .interfaces import ExceptionContext +from .interfaces import ExecuteStyle +from .interfaces import ExecutionContext +from .interfaces import IsolationLevel +from .util import _distill_params_20 +from .util import _distill_raw_params +from .util import TransactionalContext +from .. import exc +from .. import inspection +from .. import log +from .. import util +from ..sql import compiler +from ..sql import util as sql_util + +if typing.TYPE_CHECKING: + from . import CursorResult + from . import ScalarResult + from .interfaces import _AnyExecuteParams + from .interfaces import _AnyMultiExecuteParams + from .interfaces import _CoreAnyExecuteParams + from .interfaces import _CoreMultiExecuteParams + from .interfaces import _CoreSingleExecuteParams + from .interfaces import _DBAPIAnyExecuteParams + from .interfaces import _DBAPISingleExecuteParams + from .interfaces import _ExecuteOptions + from .interfaces import CompiledCacheType + from .interfaces import CoreExecuteOptionsParameter + from .interfaces import Dialect + from .interfaces import SchemaTranslateMapType + from .reflection import Inspector # noqa + from .url import URL + from ..event import dispatcher + from ..log import _EchoFlagType + from ..pool import _ConnectionFairy + from ..pool import Pool + from ..pool import PoolProxiedConnection + from ..sql import Executable + from ..sql._typing import _InfoType + from ..sql.compiler import Compiled + from ..sql.ddl import ExecutableDDLElement + from ..sql.ddl import InvokeDDLBase + from ..sql.functions import FunctionElement + from ..sql.schema import DefaultGenerator + from ..sql.schema import HasSchemaAttr + from ..sql.schema import SchemaVisitable + from ..sql.selectable import TypedReturnsRows + + +_T = TypeVar("_T", bound=Any) +_EMPTY_EXECUTION_OPTS: _ExecuteOptions = util.EMPTY_DICT +NO_OPTIONS: Mapping[str, Any] = util.EMPTY_DICT + + +class Connection(ConnectionEventsTarget, inspection.Inspectable["Inspector"]): + """Provides high-level functionality for a wrapped DB-API connection. + + The :class:`_engine.Connection` object is procured by calling the + :meth:`_engine.Engine.connect` method of the :class:`_engine.Engine` + object, and provides services for execution of SQL statements as well + as transaction control. + + The Connection object is **not** thread-safe. While a Connection can be + shared among threads using properly synchronized access, it is still + possible that the underlying DBAPI connection may not support shared + access between threads. Check the DBAPI documentation for details. + + The Connection object represents a single DBAPI connection checked out + from the connection pool. In this state, the connection pool has no + affect upon the connection, including its expiration or timeout state. + For the connection pool to properly manage connections, connections + should be returned to the connection pool (i.e. ``connection.close()``) + whenever the connection is not in use. + + .. index:: + single: thread safety; Connection + + """ + + dialect: Dialect + dispatch: dispatcher[ConnectionEventsTarget] + + _sqla_logger_namespace = "sqlalchemy.engine.Connection" + + # used by sqlalchemy.engine.util.TransactionalContext + _trans_context_manager: Optional[TransactionalContext] = None + + # legacy as of 2.0, should be eventually deprecated and + # removed. was used in the "pre_ping" recipe that's been in the docs + # a long time + should_close_with_result = False + + _dbapi_connection: Optional[PoolProxiedConnection] + + _execution_options: _ExecuteOptions + + _transaction: Optional[RootTransaction] + _nested_transaction: Optional[NestedTransaction] + + def __init__( + self, + engine: Engine, + connection: Optional[PoolProxiedConnection] = None, + _has_events: Optional[bool] = None, + _allow_revalidate: bool = True, + _allow_autobegin: bool = True, + ): + """Construct a new Connection.""" + self.engine = engine + self.dialect = dialect = engine.dialect + + if connection is None: + try: + self._dbapi_connection = engine.raw_connection() + except dialect.loaded_dbapi.Error as err: + Connection._handle_dbapi_exception_noconnection( + err, dialect, engine + ) + raise + else: + self._dbapi_connection = connection + + self._transaction = self._nested_transaction = None + self.__savepoint_seq = 0 + self.__in_begin = False + + self.__can_reconnect = _allow_revalidate + self._allow_autobegin = _allow_autobegin + self._echo = self.engine._should_log_info() + + if _has_events is None: + # if _has_events is sent explicitly as False, + # then don't join the dispatch of the engine; we don't + # want to handle any of the engine's events in that case. + self.dispatch = self.dispatch._join(engine.dispatch) + self._has_events = _has_events or ( + _has_events is None and engine._has_events + ) + + self._execution_options = engine._execution_options + + if self._has_events or self.engine._has_events: + self.dispatch.engine_connect(self) + + # this can be assigned differently via + # characteristics.LoggingTokenCharacteristic + _message_formatter: Any = None + + def _log_info(self, message: str, *arg: Any, **kw: Any) -> None: + fmt = self._message_formatter + + if fmt: + message = fmt(message) + + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET + + self.engine.logger.info(message, *arg, **kw) + + def _log_debug(self, message: str, *arg: Any, **kw: Any) -> None: + fmt = self._message_formatter + + if fmt: + message = fmt(message) + + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET + + self.engine.logger.debug(message, *arg, **kw) + + @property + def _schema_translate_map(self) -> Optional[SchemaTranslateMapType]: + schema_translate_map: Optional[SchemaTranslateMapType] = ( + self._execution_options.get("schema_translate_map", None) + ) + + return schema_translate_map + + def schema_for_object(self, obj: HasSchemaAttr) -> Optional[str]: + """Return the schema name for the given schema item taking into + account current schema translate map. + + """ + + name = obj.schema + schema_translate_map: Optional[SchemaTranslateMapType] = ( + self._execution_options.get("schema_translate_map", None) + ) + + if ( + schema_translate_map + and name in schema_translate_map + and obj._use_schema_map + ): + return schema_translate_map[name] + else: + return name + + def __enter__(self) -> Connection: + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + self.close() + + @overload + def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + no_parameters: bool = False, + stream_results: bool = False, + max_row_buffer: int = ..., + yield_per: int = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + preserve_rowcount: bool = False, + **opt: Any, + ) -> Connection: ... + + @overload + def execution_options(self, **opt: Any) -> Connection: ... + + def execution_options(self, **opt: Any) -> Connection: + r"""Set non-SQL options for the connection which take effect + during execution. + + This method modifies this :class:`_engine.Connection` **in-place**; + the return value is the same :class:`_engine.Connection` object + upon which the method is called. Note that this is in contrast + to the behavior of the ``execution_options`` methods on other + objects such as :meth:`_engine.Engine.execution_options` and + :meth:`_sql.Executable.execution_options`. The rationale is that many + such execution options necessarily modify the state of the base + DBAPI connection in any case so there is no feasible means of + keeping the effect of such an option localized to a "sub" connection. + + .. versionchanged:: 2.0 The :meth:`_engine.Connection.execution_options` + method, in contrast to other objects with this method, modifies + the connection in-place without creating copy of it. + + As discussed elsewhere, the :meth:`_engine.Connection.execution_options` + method accepts any arbitrary parameters including user defined names. + All parameters given are consumable in a number of ways including + by using the :meth:`_engine.Connection.get_execution_options` method. + See the examples at :meth:`_sql.Executable.execution_options` + and :meth:`_engine.Engine.execution_options`. + + The keywords that are currently recognized by SQLAlchemy itself + include all those listed under :meth:`.Executable.execution_options`, + as well as others that are specific to :class:`_engine.Connection`. + + :param compiled_cache: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`. + + A dictionary where :class:`.Compiled` objects + will be cached when the :class:`_engine.Connection` + compiles a clause + expression into a :class:`.Compiled` object. This dictionary will + supersede the statement cache that may be configured on the + :class:`_engine.Engine` itself. If set to None, caching + is disabled, even if the engine has a configured cache size. + + Note that the ORM makes use of its own "compiled" caches for + some operations, including flush operations. The caching + used by the ORM internally supersedes a cache dictionary + specified here. + + :param logging_token: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`, :class:`_sql.Executable`. + + Adds the specified string token surrounded by brackets in log + messages logged by the connection, i.e. the logging that's enabled + either via the :paramref:`_sa.create_engine.echo` flag or via the + ``logging.getLogger("sqlalchemy.engine")`` logger. This allows a + per-connection or per-sub-engine token to be available which is + useful for debugging concurrent connection scenarios. + + .. versionadded:: 1.4.0b2 + + .. seealso:: + + :ref:`dbengine_logging_tokens` - usage example + + :paramref:`_sa.create_engine.logging_name` - adds a name to the + name used by the Python logger object itself. + + :param isolation_level: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`. + + Set the transaction isolation level for the lifespan of this + :class:`_engine.Connection` object. + Valid values include those string + values accepted by the :paramref:`_sa.create_engine.isolation_level` + parameter passed to :func:`_sa.create_engine`. These levels are + semi-database specific; see individual dialect documentation for + valid levels. + + The isolation level option applies the isolation level by emitting + statements on the DBAPI connection, and **necessarily affects the + original Connection object overall**. The isolation level will remain + at the given setting until explicitly changed, or when the DBAPI + connection itself is :term:`released` to the connection pool, i.e. the + :meth:`_engine.Connection.close` method is called, at which time an + event handler will emit additional statements on the DBAPI connection + in order to revert the isolation level change. + + .. note:: The ``isolation_level`` execution option may only be + established before the :meth:`_engine.Connection.begin` method is + called, as well as before any SQL statements are emitted which + would otherwise trigger "autobegin", or directly after a call to + :meth:`_engine.Connection.commit` or + :meth:`_engine.Connection.rollback`. A database cannot change the + isolation level on a transaction in progress. + + .. note:: The ``isolation_level`` execution option is implicitly + reset if the :class:`_engine.Connection` is invalidated, e.g. via + the :meth:`_engine.Connection.invalidate` method, or if a + disconnection error occurs. The new connection produced after the + invalidation will **not** have the selected isolation level + re-applied to it automatically. + + .. seealso:: + + :ref:`dbapi_autocommit` + + :meth:`_engine.Connection.get_isolation_level` + - view current actual level + + :param no_parameters: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. + + When ``True``, if the final parameter + list or dictionary is totally empty, will invoke the + statement on the cursor as ``cursor.execute(statement)``, + not passing the parameter collection at all. + Some DBAPIs such as psycopg2 and mysql-python consider + percent signs as significant only when parameters are + present; this option allows code to generate SQL + containing percent signs (and possibly other characters) + that is neutral regarding whether it's executed by the DBAPI + or piped into a script that's later invoked by + command line tools. + + :param stream_results: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. + + Indicate to the dialect that results should be "streamed" and not + pre-buffered, if possible. For backends such as PostgreSQL, MySQL + and MariaDB, this indicates the use of a "server side cursor" as + opposed to a client side cursor. Other backends such as that of + Oracle Database may already use server side cursors by default. + + The usage of + :paramref:`_engine.Connection.execution_options.stream_results` is + usually combined with setting a fixed number of rows to to be fetched + in batches, to allow for efficient iteration of database rows while + at the same time not loading all result rows into memory at once; + this can be configured on a :class:`_engine.Result` object using the + :meth:`_engine.Result.yield_per` method, after execution has + returned a new :class:`_engine.Result`. If + :meth:`_engine.Result.yield_per` is not used, + the :paramref:`_engine.Connection.execution_options.stream_results` + mode of operation will instead use a dynamically sized buffer + which buffers sets of rows at a time, growing on each batch + based on a fixed growth size up until a limit which may + be configured using the + :paramref:`_engine.Connection.execution_options.max_row_buffer` + parameter. + + When using the ORM to fetch ORM mapped objects from a result, + :meth:`_engine.Result.yield_per` should always be used with + :paramref:`_engine.Connection.execution_options.stream_results`, + so that the ORM does not fetch all rows into new ORM objects at once. + + For typical use, the + :paramref:`_engine.Connection.execution_options.yield_per` execution + option should be preferred, which sets up both + :paramref:`_engine.Connection.execution_options.stream_results` and + :meth:`_engine.Result.yield_per` at once. This option is supported + both at a core level by :class:`_engine.Connection` as well as by the + ORM :class:`_engine.Session`; the latter is described at + :ref:`orm_queryguide_yield_per`. + + .. seealso:: + + :ref:`engine_stream_results` - background on + :paramref:`_engine.Connection.execution_options.stream_results` + + :paramref:`_engine.Connection.execution_options.max_row_buffer` + + :paramref:`_engine.Connection.execution_options.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param max_row_buffer: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Sets a maximum + buffer size to use when the + :paramref:`_engine.Connection.execution_options.stream_results` + execution option is used on a backend that supports server side + cursors. The default value if not specified is 1000. + + .. seealso:: + + :paramref:`_engine.Connection.execution_options.stream_results` + + :ref:`engine_stream_results` + + + :param yield_per: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Integer value applied which will + set the :paramref:`_engine.Connection.execution_options.stream_results` + execution option and invoke :meth:`_engine.Result.yield_per` + automatically at once. Allows equivalent functionality as + is present when using this parameter with the ORM. + + .. versionadded:: 1.4.40 + + .. seealso:: + + :ref:`engine_stream_results` - background and examples + on using server side cursors with Core. + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param insertmanyvalues_page_size: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`. Number of rows to format into an + INSERT statement when the statement uses "insertmanyvalues" mode, + which is a paged form of bulk insert that is used for many backends + when using :term:`executemany` execution typically in conjunction + with RETURNING. Defaults to 1000. May also be modified on a + per-engine basis using the + :paramref:`_sa.create_engine.insertmanyvalues_page_size` parameter. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + :param schema_translate_map: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`, :class:`_sql.Executable`. + + A dictionary mapping schema names to schema names, that will be + applied to the :paramref:`_schema.Table.schema` element of each + :class:`_schema.Table` + encountered when SQL or DDL expression elements + are compiled into strings; the resulting schema name will be + converted based on presence in the map of the original name. + + .. seealso:: + + :ref:`schema_translating` + + :param preserve_rowcount: Boolean; when True, the ``cursor.rowcount`` + attribute will be unconditionally memoized within the result and + made available via the :attr:`.CursorResult.rowcount` attribute. + Normally, this attribute is only preserved for UPDATE and DELETE + statements. Using this option, the DBAPIs rowcount value can + be accessed for other kinds of statements such as INSERT and SELECT, + to the degree that the DBAPI supports these statements. See + :attr:`.CursorResult.rowcount` for notes regarding the behavior + of this attribute. + + .. versionadded:: 2.0.28 + + .. seealso:: + + :meth:`_engine.Engine.execution_options` + + :meth:`.Executable.execution_options` + + :meth:`_engine.Connection.get_execution_options` + + :ref:`orm_queryguide_execution_options` - documentation on all + ORM-specific execution options + + """ # noqa + if self._has_events or self.engine._has_events: + self.dispatch.set_connection_execution_options(self, opt) + self._execution_options = self._execution_options.union(opt) + self.dialect.set_connection_execution_options(self, opt) + return self + + def get_execution_options(self) -> _ExecuteOptions: + """Get the non-SQL options which will take effect during execution. + + .. versionadded:: 1.3 + + .. seealso:: + + :meth:`_engine.Connection.execution_options` + """ + return self._execution_options + + @property + def _still_open_and_dbapi_connection_is_valid(self) -> bool: + pool_proxied_connection = self._dbapi_connection + return ( + pool_proxied_connection is not None + and pool_proxied_connection.is_valid + ) + + @property + def closed(self) -> bool: + """Return True if this connection is closed.""" + + return self._dbapi_connection is None and not self.__can_reconnect + + @property + def invalidated(self) -> bool: + """Return True if this connection was invalidated. + + This does not indicate whether or not the connection was + invalidated at the pool level, however + + """ + + # prior to 1.4, "invalid" was stored as a state independent of + # "closed", meaning an invalidated connection could be "closed", + # the _dbapi_connection would be None and closed=True, yet the + # "invalid" flag would stay True. This meant that there were + # three separate states (open/valid, closed/valid, closed/invalid) + # when there is really no reason for that; a connection that's + # "closed" does not need to be "invalid". So the state is now + # represented by the two facts alone. + + pool_proxied_connection = self._dbapi_connection + return pool_proxied_connection is None and self.__can_reconnect + + @property + def connection(self) -> PoolProxiedConnection: + """The underlying DB-API connection managed by this Connection. + + This is a SQLAlchemy connection-pool proxied connection + which then has the attribute + :attr:`_pool._ConnectionFairy.dbapi_connection` that refers to the + actual driver connection. + + .. seealso:: + + + :ref:`dbapi_connections` + + """ + + if self._dbapi_connection is None: + try: + return self._revalidate_connection() + except (exc.PendingRollbackError, exc.ResourceClosedError): + raise + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + else: + return self._dbapi_connection + + def get_isolation_level(self) -> IsolationLevel: + """Return the current **actual** isolation level that's present on + the database within the scope of this connection. + + This attribute will perform a live SQL operation against the database + in order to procure the current isolation level, so the value returned + is the actual level on the underlying DBAPI connection regardless of + how this state was set. This will be one of the four actual isolation + modes ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. It will **not** include the ``AUTOCOMMIT`` isolation + level setting. Third party dialects may also feature additional + isolation level settings. + + .. note:: This method **will not report** on the ``AUTOCOMMIT`` + isolation level, which is a separate :term:`dbapi` setting that's + independent of **actual** isolation level. When ``AUTOCOMMIT`` is + in use, the database connection still has a "traditional" isolation + mode in effect, that is typically one of the four values + ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. + + Compare to the :attr:`_engine.Connection.default_isolation_level` + accessor which returns the isolation level that is present on the + database at initial connection time. + + .. seealso:: + + :attr:`_engine.Connection.default_isolation_level` + - view default level + + :paramref:`_sa.create_engine.isolation_level` + - set per :class:`_engine.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`_engine.Connection` isolation level + + """ + dbapi_connection = self.connection.dbapi_connection + assert dbapi_connection is not None + try: + return self.dialect.get_isolation_level(dbapi_connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + @property + def default_isolation_level(self) -> Optional[IsolationLevel]: + """The initial-connection time isolation level associated with the + :class:`_engine.Dialect` in use. + + This value is independent of the + :paramref:`.Connection.execution_options.isolation_level` and + :paramref:`.Engine.execution_options.isolation_level` execution + options, and is determined by the :class:`_engine.Dialect` when the + first connection is created, by performing a SQL query against the + database for the current isolation level before any additional commands + have been emitted. + + Calling this accessor does not invoke any new SQL queries. + + .. seealso:: + + :meth:`_engine.Connection.get_isolation_level` + - view current actual isolation level + + :paramref:`_sa.create_engine.isolation_level` + - set per :class:`_engine.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`_engine.Connection` isolation level + + """ + return self.dialect.default_isolation_level + + def _invalid_transaction(self) -> NoReturn: + raise exc.PendingRollbackError( + "Can't reconnect until invalid %stransaction is rolled " + "back. Please rollback() fully before proceeding" + % ("savepoint " if self._nested_transaction is not None else ""), + code="8s2b", + ) + + def _revalidate_connection(self) -> PoolProxiedConnection: + if self.__can_reconnect and self.invalidated: + if self._transaction is not None: + self._invalid_transaction() + self._dbapi_connection = self.engine.raw_connection() + return self._dbapi_connection + raise exc.ResourceClosedError("This Connection is closed") + + @property + def info(self) -> _InfoType: + """Info dictionary associated with the underlying DBAPI connection + referred to by this :class:`_engine.Connection`, allowing user-defined + data to be associated with the connection. + + The data here will follow along with the DBAPI connection including + after it is returned to the connection pool and used again + in subsequent instances of :class:`_engine.Connection`. + + """ + + return self.connection.info + + def invalidate(self, exception: Optional[BaseException] = None) -> None: + """Invalidate the underlying DBAPI connection associated with + this :class:`_engine.Connection`. + + An attempt will be made to close the underlying DBAPI connection + immediately; however if this operation fails, the error is logged + but not raised. The connection is then discarded whether or not + close() succeeded. + + Upon the next use (where "use" typically means using the + :meth:`_engine.Connection.execute` method or similar), + this :class:`_engine.Connection` will attempt to + procure a new DBAPI connection using the services of the + :class:`_pool.Pool` as a source of connectivity (e.g. + a "reconnection"). + + If a transaction was in progress (e.g. the + :meth:`_engine.Connection.begin` method has been called) when + :meth:`_engine.Connection.invalidate` method is called, at the DBAPI + level all state associated with this transaction is lost, as + the DBAPI connection is closed. The :class:`_engine.Connection` + will not allow a reconnection to proceed until the + :class:`.Transaction` object is ended, by calling the + :meth:`.Transaction.rollback` method; until that point, any attempt at + continuing to use the :class:`_engine.Connection` will raise an + :class:`~sqlalchemy.exc.InvalidRequestError`. + This is to prevent applications from accidentally + continuing an ongoing transactional operations despite the + fact that the transaction has been lost due to an + invalidation. + + The :meth:`_engine.Connection.invalidate` method, + just like auto-invalidation, + will at the connection pool level invoke the + :meth:`_events.PoolEvents.invalidate` event. + + :param exception: an optional ``Exception`` instance that's the + reason for the invalidation. is passed along to event handlers + and logging functions. + + .. seealso:: + + :ref:`pool_connection_invalidation` + + """ + + if self.invalidated: + return + + if self.closed: + raise exc.ResourceClosedError("This Connection is closed") + + if self._still_open_and_dbapi_connection_is_valid: + pool_proxied_connection = self._dbapi_connection + assert pool_proxied_connection is not None + pool_proxied_connection.invalidate(exception) + + self._dbapi_connection = None + + def detach(self) -> None: + """Detach the underlying DB-API connection from its connection pool. + + E.g.:: + + with engine.connect() as conn: + conn.detach() + conn.execute(text("SET search_path TO schema1, schema2")) + + # work with connection + + # connection is fully closed (since we used "with:", can + # also call .close()) + + This :class:`_engine.Connection` instance will remain usable. + When closed + (or exited from a context manager context as above), + the DB-API connection will be literally closed and not + returned to its originating pool. + + This method can be used to insulate the rest of an application + from a modified state on a connection (such as a transaction + isolation level or similar). + + """ + + if self.closed: + raise exc.ResourceClosedError("This Connection is closed") + + pool_proxied_connection = self._dbapi_connection + if pool_proxied_connection is None: + raise exc.InvalidRequestError( + "Can't detach an invalidated Connection" + ) + pool_proxied_connection.detach() + + def _autobegin(self) -> None: + if self._allow_autobegin and not self.__in_begin: + self.begin() + + def begin(self) -> RootTransaction: + """Begin a transaction prior to autobegin occurring. + + E.g.:: + + with engine.connect() as conn: + with conn.begin() as trans: + conn.execute(table.insert(), {"username": "sandy"}) + + The returned object is an instance of :class:`_engine.RootTransaction`. + This object represents the "scope" of the transaction, + which completes when either the :meth:`_engine.Transaction.rollback` + or :meth:`_engine.Transaction.commit` method is called; the object + also works as a context manager as illustrated above. + + The :meth:`_engine.Connection.begin` method begins a + transaction that normally will be begun in any case when the connection + is first used to execute a statement. The reason this method might be + used would be to invoke the :meth:`_events.ConnectionEvents.begin` + event at a specific time, or to organize code within the scope of a + connection checkout in terms of context managed blocks, such as:: + + with engine.connect() as conn: + with conn.begin(): + conn.execute(...) + conn.execute(...) + + with conn.begin(): + conn.execute(...) + conn.execute(...) + + The above code is not fundamentally any different in its behavior than + the following code which does not use + :meth:`_engine.Connection.begin`; the below style is known + as "commit as you go" style:: + + with engine.connect() as conn: + conn.execute(...) + conn.execute(...) + conn.commit() + + conn.execute(...) + conn.execute(...) + conn.commit() + + From a database point of view, the :meth:`_engine.Connection.begin` + method does not emit any SQL or change the state of the underlying + DBAPI connection in any way; the Python DBAPI does not have any + concept of explicit transaction begin. + + .. seealso:: + + :ref:`tutorial_working_with_transactions` - in the + :ref:`unified_tutorial` + + :meth:`_engine.Connection.begin_nested` - use a SAVEPOINT + + :meth:`_engine.Connection.begin_twophase` - + use a two phase /XID transaction + + :meth:`_engine.Engine.begin` - context manager available from + :class:`_engine.Engine` + + """ + if self._transaction is None: + self._transaction = RootTransaction(self) + return self._transaction + else: + raise exc.InvalidRequestError( + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; can't " + "call begin() here unless rollback() or commit() " + "is called first." + ) + + def begin_nested(self) -> NestedTransaction: + """Begin a nested transaction (i.e. SAVEPOINT) and return a transaction + handle that controls the scope of the SAVEPOINT. + + E.g.:: + + with engine.begin() as connection: + with connection.begin_nested(): + connection.execute(table.insert(), {"username": "sandy"}) + + The returned object is an instance of + :class:`_engine.NestedTransaction`, which includes transactional + methods :meth:`_engine.NestedTransaction.commit` and + :meth:`_engine.NestedTransaction.rollback`; for a nested transaction, + these methods correspond to the operations "RELEASE SAVEPOINT " + and "ROLLBACK TO SAVEPOINT ". The name of the savepoint is local + to the :class:`_engine.NestedTransaction` object and is generated + automatically. Like any other :class:`_engine.Transaction`, the + :class:`_engine.NestedTransaction` may be used as a context manager as + illustrated above which will "release" or "rollback" corresponding to + if the operation within the block were successful or raised an + exception. + + Nested transactions require SAVEPOINT support in the underlying + database, else the behavior is undefined. SAVEPOINT is commonly used to + run operations within a transaction that may fail, while continuing the + outer transaction. E.g.:: + + from sqlalchemy import exc + + with engine.begin() as connection: + trans = connection.begin_nested() + try: + connection.execute(table.insert(), {"username": "sandy"}) + trans.commit() + except exc.IntegrityError: # catch for duplicate username + trans.rollback() # rollback to savepoint + + # outer transaction continues + connection.execute(...) + + If :meth:`_engine.Connection.begin_nested` is called without first + calling :meth:`_engine.Connection.begin` or + :meth:`_engine.Engine.begin`, the :class:`_engine.Connection` object + will "autobegin" the outer transaction first. This outer transaction + may be committed using "commit-as-you-go" style, e.g.:: + + with engine.connect() as connection: # begin() wasn't called + + with connection.begin_nested(): # will auto-"begin()" first + connection.execute(...) + # savepoint is released + + connection.execute(...) + + # explicitly commit outer transaction + connection.commit() + + # can continue working with connection here + + .. versionchanged:: 2.0 + + :meth:`_engine.Connection.begin_nested` will now participate + in the connection "autobegin" behavior that is new as of + 2.0 / "future" style connections in 1.4. + + .. seealso:: + + :meth:`_engine.Connection.begin` + + :ref:`session_begin_nested` - ORM support for SAVEPOINT + + """ + if self._transaction is None: + self._autobegin() + + return NestedTransaction(self) + + def begin_twophase(self, xid: Optional[Any] = None) -> TwoPhaseTransaction: + """Begin a two-phase or XA transaction and return a transaction + handle. + + The returned object is an instance of :class:`.TwoPhaseTransaction`, + which in addition to the methods provided by + :class:`.Transaction`, also provides a + :meth:`~.TwoPhaseTransaction.prepare` method. + + :param xid: the two phase transaction id. If not supplied, a + random id will be generated. + + .. seealso:: + + :meth:`_engine.Connection.begin` + + :meth:`_engine.Connection.begin_twophase` + + """ + + if self._transaction is not None: + raise exc.InvalidRequestError( + "Cannot start a two phase transaction when a transaction " + "is already in progress." + ) + if xid is None: + xid = self.engine.dialect.create_xid() + return TwoPhaseTransaction(self, xid) + + def commit(self) -> None: + """Commit the transaction that is currently in progress. + + This method commits the current transaction if one has been started. + If no transaction was started, the method has no effect, assuming + the connection is in a non-invalidated state. + + A transaction is begun on a :class:`_engine.Connection` automatically + whenever a statement is first executed, or when the + :meth:`_engine.Connection.begin` method is called. + + .. note:: The :meth:`_engine.Connection.commit` method only acts upon + the primary database transaction that is linked to the + :class:`_engine.Connection` object. It does not operate upon a + SAVEPOINT that would have been invoked from the + :meth:`_engine.Connection.begin_nested` method; for control of a + SAVEPOINT, call :meth:`_engine.NestedTransaction.commit` on the + :class:`_engine.NestedTransaction` that is returned by the + :meth:`_engine.Connection.begin_nested` method itself. + + + """ + if self._transaction: + self._transaction.commit() + + def rollback(self) -> None: + """Roll back the transaction that is currently in progress. + + This method rolls back the current transaction if one has been started. + If no transaction was started, the method has no effect. If a + transaction was started and the connection is in an invalidated state, + the transaction is cleared using this method. + + A transaction is begun on a :class:`_engine.Connection` automatically + whenever a statement is first executed, or when the + :meth:`_engine.Connection.begin` method is called. + + .. note:: The :meth:`_engine.Connection.rollback` method only acts + upon the primary database transaction that is linked to the + :class:`_engine.Connection` object. It does not operate upon a + SAVEPOINT that would have been invoked from the + :meth:`_engine.Connection.begin_nested` method; for control of a + SAVEPOINT, call :meth:`_engine.NestedTransaction.rollback` on the + :class:`_engine.NestedTransaction` that is returned by the + :meth:`_engine.Connection.begin_nested` method itself. + + + """ + if self._transaction: + self._transaction.rollback() + + def recover_twophase(self) -> List[Any]: + return self.engine.dialect.do_recover_twophase(self) + + def rollback_prepared(self, xid: Any, recover: bool = False) -> None: + self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) + + def commit_prepared(self, xid: Any, recover: bool = False) -> None: + self.engine.dialect.do_commit_twophase(self, xid, recover=recover) + + def in_transaction(self) -> bool: + """Return True if a transaction is in progress.""" + return self._transaction is not None and self._transaction.is_active + + def in_nested_transaction(self) -> bool: + """Return True if a transaction is in progress.""" + return ( + self._nested_transaction is not None + and self._nested_transaction.is_active + ) + + def _is_autocommit_isolation(self) -> bool: + opt_iso = self._execution_options.get("isolation_level", None) + return bool( + opt_iso == "AUTOCOMMIT" + or ( + opt_iso is None + and self.engine.dialect._on_connect_isolation_level + == "AUTOCOMMIT" + ) + ) + + def _get_required_transaction(self) -> RootTransaction: + trans = self._transaction + if trans is None: + raise exc.InvalidRequestError("connection is not in a transaction") + return trans + + def _get_required_nested_transaction(self) -> NestedTransaction: + trans = self._nested_transaction + if trans is None: + raise exc.InvalidRequestError( + "connection is not in a nested transaction" + ) + return trans + + def get_transaction(self) -> Optional[RootTransaction]: + """Return the current root transaction in progress, if any. + + .. versionadded:: 1.4 + + """ + + return self._transaction + + def get_nested_transaction(self) -> Optional[NestedTransaction]: + """Return the current nested transaction in progress, if any. + + .. versionadded:: 1.4 + + """ + return self._nested_transaction + + def _begin_impl(self, transaction: RootTransaction) -> None: + if self._echo: + if self._is_autocommit_isolation(): + self._log_info( + "BEGIN (implicit; DBAPI should not BEGIN due to " + "autocommit mode)" + ) + else: + self._log_info("BEGIN (implicit)") + + self.__in_begin = True + + if self._has_events or self.engine._has_events: + self.dispatch.begin(self) + + try: + self.engine.dialect.do_begin(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + finally: + self.__in_begin = False + + def _rollback_impl(self) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.rollback(self) + + if self._still_open_and_dbapi_connection_is_valid: + if self._echo: + if self._is_autocommit_isolation(): + self._log_info( + "ROLLBACK using DBAPI connection.rollback(), " + "DBAPI should ignore due to autocommit mode" + ) + else: + self._log_info("ROLLBACK") + try: + self.engine.dialect.do_rollback(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def _commit_impl(self) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.commit(self) + + if self._echo: + if self._is_autocommit_isolation(): + self._log_info( + "COMMIT using DBAPI connection.commit(), " + "DBAPI should ignore due to autocommit mode" + ) + else: + self._log_info("COMMIT") + try: + self.engine.dialect.do_commit(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def _savepoint_impl(self, name: Optional[str] = None) -> str: + if self._has_events or self.engine._has_events: + self.dispatch.savepoint(self, name) + + if name is None: + self.__savepoint_seq += 1 + name = "sa_savepoint_%s" % self.__savepoint_seq + self.engine.dialect.do_savepoint(self, name) + return name + + def _rollback_to_savepoint_impl(self, name: str) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.rollback_savepoint(self, name, None) + + if self._still_open_and_dbapi_connection_is_valid: + self.engine.dialect.do_rollback_to_savepoint(self, name) + + def _release_savepoint_impl(self, name: str) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.release_savepoint(self, name, None) + + self.engine.dialect.do_release_savepoint(self, name) + + def _begin_twophase_impl(self, transaction: TwoPhaseTransaction) -> None: + if self._echo: + self._log_info("BEGIN TWOPHASE (implicit)") + if self._has_events or self.engine._has_events: + self.dispatch.begin_twophase(self, transaction.xid) + + self.__in_begin = True + try: + self.engine.dialect.do_begin_twophase(self, transaction.xid) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + finally: + self.__in_begin = False + + def _prepare_twophase_impl(self, xid: Any) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.prepare_twophase(self, xid) + + assert isinstance(self._transaction, TwoPhaseTransaction) + try: + self.engine.dialect.do_prepare_twophase(self, xid) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def _rollback_twophase_impl(self, xid: Any, is_prepared: bool) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.rollback_twophase(self, xid, is_prepared) + + if self._still_open_and_dbapi_connection_is_valid: + assert isinstance(self._transaction, TwoPhaseTransaction) + try: + self.engine.dialect.do_rollback_twophase( + self, xid, is_prepared + ) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def _commit_twophase_impl(self, xid: Any, is_prepared: bool) -> None: + if self._has_events or self.engine._has_events: + self.dispatch.commit_twophase(self, xid, is_prepared) + + assert isinstance(self._transaction, TwoPhaseTransaction) + try: + self.engine.dialect.do_commit_twophase(self, xid, is_prepared) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def close(self) -> None: + """Close this :class:`_engine.Connection`. + + This results in a release of the underlying database + resources, that is, the DBAPI connection referenced + internally. The DBAPI connection is typically restored + back to the connection-holding :class:`_pool.Pool` referenced + by the :class:`_engine.Engine` that produced this + :class:`_engine.Connection`. Any transactional state present on + the DBAPI connection is also unconditionally released via + the DBAPI connection's ``rollback()`` method, regardless + of any :class:`.Transaction` object that may be + outstanding with regards to this :class:`_engine.Connection`. + + This has the effect of also calling :meth:`_engine.Connection.rollback` + if any transaction is in place. + + After :meth:`_engine.Connection.close` is called, the + :class:`_engine.Connection` is permanently in a closed state, + and will allow no further operations. + + """ + + if self._transaction: + self._transaction.close() + skip_reset = True + else: + skip_reset = False + + if self._dbapi_connection is not None: + conn = self._dbapi_connection + + # as we just closed the transaction, close the connection + # pool connection without doing an additional reset + if skip_reset: + cast("_ConnectionFairy", conn)._close_special( + transaction_reset=True + ) + else: + conn.close() + + # There is a slight chance that conn.close() may have + # triggered an invalidation here in which case + # _dbapi_connection would already be None, however usually + # it will be non-None here and in a "closed" state. + self._dbapi_connection = None + self.__can_reconnect = False + + @overload + def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Optional[_T]: ... + + @overload + def scalar( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Any: ... + + def scalar( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Any: + r"""Executes a SQL statement construct and returns a scalar object. + + This method is shorthand for invoking the + :meth:`_engine.Result.scalar` method after invoking the + :meth:`_engine.Connection.execute` method. Parameters are equivalent. + + :return: a scalar Python value representing the first column of the + first row returned. + + """ + distilled_parameters = _distill_params_20(parameters) + try: + meth = statement._execute_on_scalar + except AttributeError as err: + raise exc.ObjectNotExecutableError(statement) from err + else: + return meth( + self, + distilled_parameters, + execution_options or NO_OPTIONS, + ) + + @overload + def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[_T]: ... + + @overload + def scalars( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[Any]: ... + + def scalars( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[Any]: + """Executes and returns a scalar result set, which yields scalar values + from the first column of each row. + + This method is equivalent to calling :meth:`_engine.Connection.execute` + to receive a :class:`_result.Result` object, then invoking the + :meth:`_result.Result.scalars` method to produce a + :class:`_result.ScalarResult` instance. + + :return: a :class:`_result.ScalarResult` + + .. versionadded:: 1.4.24 + + """ + + return self.execute( + statement, parameters, execution_options=execution_options + ).scalars() + + @overload + def execute( + self, + statement: TypedReturnsRows[_T], + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[_T]: ... + + @overload + def execute( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: ... + + def execute( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: + r"""Executes a SQL statement construct and returns a + :class:`_engine.CursorResult`. + + :param statement: The statement to be executed. This is always + an object that is in both the :class:`_expression.ClauseElement` and + :class:`_expression.Executable` hierarchies, including: + + * :class:`_expression.Select` + * :class:`_expression.Insert`, :class:`_expression.Update`, + :class:`_expression.Delete` + * :class:`_expression.TextClause` and + :class:`_expression.TextualSelect` + * :class:`_schema.DDL` and objects which inherit from + :class:`_schema.ExecutableDDLElement` + + :param parameters: parameters which will be bound into the statement. + This may be either a dictionary of parameter names to values, + or a mutable sequence (e.g. a list) of dictionaries. When a + list of dictionaries is passed, the underlying statement execution + will make use of the DBAPI ``cursor.executemany()`` method. + When a single dictionary is passed, the DBAPI ``cursor.execute()`` + method will be used. + + :param execution_options: optional dictionary of execution options, + which will be associated with the statement execution. This + dictionary can provide a subset of the options that are accepted + by :meth:`_engine.Connection.execution_options`. + + :return: a :class:`_engine.Result` object. + + """ + distilled_parameters = _distill_params_20(parameters) + try: + meth = statement._execute_on_connection + except AttributeError as err: + raise exc.ObjectNotExecutableError(statement) from err + else: + return meth( + self, + distilled_parameters, + execution_options or NO_OPTIONS, + ) + + def _execute_function( + self, + func: FunctionElement[Any], + distilled_parameters: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> CursorResult[Any]: + """Execute a sql.FunctionElement object.""" + + return self._execute_clauseelement( + func.select(), distilled_parameters, execution_options + ) + + def _execute_default( + self, + default: DefaultGenerator, + distilled_parameters: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> Any: + """Execute a schema.ColumnDefault object.""" + + execution_options = self._execution_options.merge_with( + execution_options + ) + + event_multiparams: Optional[_CoreMultiExecuteParams] + event_params: Optional[_CoreAnyExecuteParams] + + # note for event handlers, the "distilled parameters" which is always + # a list of dicts is broken out into separate "multiparams" and + # "params" collections, which allows the handler to distinguish + # between an executemany and execute style set of parameters. + if self._has_events or self.engine._has_events: + ( + default, + distilled_parameters, + event_multiparams, + event_params, + ) = self._invoke_before_exec_event( + default, distilled_parameters, execution_options + ) + else: + event_multiparams = event_params = None + + try: + conn = self._dbapi_connection + if conn is None: + conn = self._revalidate_connection() + + dialect = self.dialect + ctx = dialect.execution_ctx_cls._init_default( + dialect, self, conn, execution_options + ) + except (exc.PendingRollbackError, exc.ResourceClosedError): + raise + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + ret = ctx._exec_default(None, default, None) + + if self._has_events or self.engine._has_events: + self.dispatch.after_execute( + self, + default, + event_multiparams, + event_params, + execution_options, + ret, + ) + + return ret + + def _execute_ddl( + self, + ddl: ExecutableDDLElement, + distilled_parameters: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> CursorResult[Any]: + """Execute a schema.DDL object.""" + + exec_opts = ddl._execution_options.merge_with( + self._execution_options, execution_options + ) + + event_multiparams: Optional[_CoreMultiExecuteParams] + event_params: Optional[_CoreSingleExecuteParams] + + if self._has_events or self.engine._has_events: + ( + ddl, + distilled_parameters, + event_multiparams, + event_params, + ) = self._invoke_before_exec_event( + ddl, distilled_parameters, exec_opts + ) + else: + event_multiparams = event_params = None + + schema_translate_map = exec_opts.get("schema_translate_map", None) + + dialect = self.dialect + + compiled = ddl.compile( + dialect=dialect, schema_translate_map=schema_translate_map + ) + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_ddl, + compiled, + None, + exec_opts, + compiled, + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute( + self, + ddl, + event_multiparams, + event_params, + exec_opts, + ret, + ) + return ret + + def _invoke_before_exec_event( + self, + elem: Any, + distilled_params: _CoreMultiExecuteParams, + execution_options: _ExecuteOptions, + ) -> Tuple[ + Any, + _CoreMultiExecuteParams, + _CoreMultiExecuteParams, + _CoreSingleExecuteParams, + ]: + event_multiparams: _CoreMultiExecuteParams + event_params: _CoreSingleExecuteParams + + if len(distilled_params) == 1: + event_multiparams, event_params = [], distilled_params[0] + else: + event_multiparams, event_params = distilled_params, {} + + for fn in self.dispatch.before_execute: + elem, event_multiparams, event_params = fn( + self, + elem, + event_multiparams, + event_params, + execution_options, + ) + + if event_multiparams: + distilled_params = list(event_multiparams) + if event_params: + raise exc.InvalidRequestError( + "Event handler can't return non-empty multiparams " + "and params at the same time" + ) + elif event_params: + distilled_params = [event_params] + else: + distilled_params = [] + + return elem, distilled_params, event_multiparams, event_params + + def _execute_clauseelement( + self, + elem: Executable, + distilled_parameters: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> CursorResult[Any]: + """Execute a sql.ClauseElement object.""" + + execution_options = elem._execution_options.merge_with( + self._execution_options, execution_options + ) + + has_events = self._has_events or self.engine._has_events + if has_events: + ( + elem, + distilled_parameters, + event_multiparams, + event_params, + ) = self._invoke_before_exec_event( + elem, distilled_parameters, execution_options + ) + + if distilled_parameters: + # ensure we don't retain a link to the view object for keys() + # which links to the values, which we don't want to cache + keys = sorted(distilled_parameters[0]) + for_executemany = len(distilled_parameters) > 1 + else: + keys = [] + for_executemany = False + + dialect = self.dialect + + schema_translate_map = execution_options.get( + "schema_translate_map", None + ) + + compiled_cache: Optional[CompiledCacheType] = execution_options.get( + "compiled_cache", self.engine._compiled_cache + ) + + compiled_sql, extracted_params, cache_hit = elem._compile_w_cache( + dialect=dialect, + compiled_cache=compiled_cache, + column_keys=keys, + for_executemany=for_executemany, + schema_translate_map=schema_translate_map, + linting=self.dialect.compiler_linting | compiler.WARN_LINTING, + ) + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_compiled, + compiled_sql, + distilled_parameters, + execution_options, + compiled_sql, + distilled_parameters, + elem, + extracted_params, + cache_hit=cache_hit, + ) + if has_events: + self.dispatch.after_execute( + self, + elem, + event_multiparams, + event_params, + execution_options, + ret, + ) + return ret + + def _execute_compiled( + self, + compiled: Compiled, + distilled_parameters: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter = _EMPTY_EXECUTION_OPTS, + ) -> CursorResult[Any]: + """Execute a sql.Compiled object. + + TODO: why do we have this? likely deprecate or remove + + """ + + execution_options = compiled.execution_options.merge_with( + self._execution_options, execution_options + ) + + if self._has_events or self.engine._has_events: + ( + compiled, + distilled_parameters, + event_multiparams, + event_params, + ) = self._invoke_before_exec_event( + compiled, distilled_parameters, execution_options + ) + + dialect = self.dialect + + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_compiled, + compiled, + distilled_parameters, + execution_options, + compiled, + distilled_parameters, + None, + None, + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute( + self, + compiled, + event_multiparams, + event_params, + execution_options, + ret, + ) + return ret + + def exec_driver_sql( + self, + statement: str, + parameters: Optional[_DBAPIAnyExecuteParams] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: + r"""Executes a string SQL statement on the DBAPI cursor directly, + without any SQL compilation steps. + + This can be used to pass any string directly to the + ``cursor.execute()`` method of the DBAPI in use. + + :param statement: The statement str to be executed. Bound parameters + must use the underlying DBAPI's paramstyle, such as "qmark", + "pyformat", "format", etc. + + :param parameters: represent bound parameter values to be used in the + execution. The format is one of: a dictionary of named parameters, + a tuple of positional parameters, or a list containing either + dictionaries or tuples for multiple-execute support. + + :return: a :class:`_engine.CursorResult`. + + E.g. multiple dictionaries:: + + + conn.exec_driver_sql( + "INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)", + [{"id": 1, "value": "v1"}, {"id": 2, "value": "v2"}], + ) + + Single dictionary:: + + conn.exec_driver_sql( + "INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)", + dict(id=1, value="v1"), + ) + + Single tuple:: + + conn.exec_driver_sql( + "INSERT INTO table (id, value) VALUES (?, ?)", (1, "v1") + ) + + .. note:: The :meth:`_engine.Connection.exec_driver_sql` method does + not participate in the + :meth:`_events.ConnectionEvents.before_execute` and + :meth:`_events.ConnectionEvents.after_execute` events. To + intercept calls to :meth:`_engine.Connection.exec_driver_sql`, use + :meth:`_events.ConnectionEvents.before_cursor_execute` and + :meth:`_events.ConnectionEvents.after_cursor_execute`. + + .. seealso:: + + :pep:`249` + + """ + + distilled_parameters = _distill_raw_params(parameters) + + execution_options = self._execution_options.merge_with( + execution_options + ) + + dialect = self.dialect + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_statement, + statement, + None, + execution_options, + statement, + distilled_parameters, + ) + + return ret + + def _execute_context( + self, + dialect: Dialect, + constructor: Callable[..., ExecutionContext], + statement: Union[str, Compiled], + parameters: Optional[_AnyMultiExecuteParams], + execution_options: _ExecuteOptions, + *args: Any, + **kw: Any, + ) -> CursorResult[Any]: + """Create an :class:`.ExecutionContext` and execute, returning + a :class:`_engine.CursorResult`.""" + + if execution_options: + yp = execution_options.get("yield_per", None) + if yp: + execution_options = execution_options.union( + {"stream_results": True, "max_row_buffer": yp} + ) + try: + conn = self._dbapi_connection + if conn is None: + conn = self._revalidate_connection() + + context = constructor( + dialect, self, conn, execution_options, *args, **kw + ) + except (exc.PendingRollbackError, exc.ResourceClosedError): + raise + except BaseException as e: + self._handle_dbapi_exception( + e, str(statement), parameters, None, None + ) + + if ( + self._transaction + and not self._transaction.is_active + or ( + self._nested_transaction + and not self._nested_transaction.is_active + ) + ): + self._invalid_transaction() + + elif self._trans_context_manager: + TransactionalContext._trans_ctx_check(self) + + if self._transaction is None: + self._autobegin() + + context.pre_exec() + + if context.execute_style is ExecuteStyle.INSERTMANYVALUES: + return self._exec_insertmany_context(dialect, context) + else: + return self._exec_single_context( + dialect, context, statement, parameters + ) + + def _exec_single_context( + self, + dialect: Dialect, + context: ExecutionContext, + statement: Union[str, Compiled], + parameters: Optional[_AnyMultiExecuteParams], + ) -> CursorResult[Any]: + """continue the _execute_context() method for a single DBAPI + cursor.execute() or cursor.executemany() call. + + """ + if dialect.bind_typing is BindTyping.SETINPUTSIZES: + generic_setinputsizes = context._prepare_set_input_sizes() + + if generic_setinputsizes: + try: + dialect.do_set_input_sizes( + context.cursor, generic_setinputsizes, context + ) + except BaseException as e: + self._handle_dbapi_exception( + e, str(statement), parameters, None, context + ) + + cursor, str_statement, parameters = ( + context.cursor, + context.statement, + context.parameters, + ) + + effective_parameters: Optional[_AnyExecuteParams] + + if not context.executemany: + effective_parameters = parameters[0] + else: + effective_parameters = parameters + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_cursor_execute: + str_statement, effective_parameters = fn( + self, + cursor, + str_statement, + effective_parameters, + context, + context.executemany, + ) + + if self._echo: + self._log_info(str_statement) + + stats = context._get_cache_stats() + + if not self.engine.hide_parameters: + self._log_info( + "[%s] %r", + stats, + sql_util._repr_params( + effective_parameters, + batches=10, + ismulti=context.executemany, + ), + ) + else: + self._log_info( + "[%s] [SQL parameters hidden due to hide_parameters=True]", + stats, + ) + + evt_handled: bool = False + try: + if context.execute_style is ExecuteStyle.EXECUTEMANY: + effective_parameters = cast( + "_CoreMultiExecuteParams", effective_parameters + ) + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_executemany: + if fn( + cursor, + str_statement, + effective_parameters, + context, + ): + evt_handled = True + break + if not evt_handled: + self.dialect.do_executemany( + cursor, + str_statement, + effective_parameters, + context, + ) + elif not effective_parameters and context.no_parameters: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute_no_params: + if fn(cursor, str_statement, context): + evt_handled = True + break + if not evt_handled: + self.dialect.do_execute_no_params( + cursor, str_statement, context + ) + else: + effective_parameters = cast( + "_CoreSingleExecuteParams", effective_parameters + ) + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute: + if fn( + cursor, + str_statement, + effective_parameters, + context, + ): + evt_handled = True + break + if not evt_handled: + self.dialect.do_execute( + cursor, str_statement, effective_parameters, context + ) + + if self._has_events or self.engine._has_events: + self.dispatch.after_cursor_execute( + self, + cursor, + str_statement, + effective_parameters, + context, + context.executemany, + ) + + context.post_exec() + + result = context._setup_result_proxy() + + except BaseException as e: + self._handle_dbapi_exception( + e, str_statement, effective_parameters, cursor, context + ) + + return result + + def _exec_insertmany_context( + self, + dialect: Dialect, + context: ExecutionContext, + ) -> CursorResult[Any]: + """continue the _execute_context() method for an "insertmanyvalues" + operation, which will invoke DBAPI + cursor.execute() one or more times with individual log and + event hook calls. + + """ + + if dialect.bind_typing is BindTyping.SETINPUTSIZES: + generic_setinputsizes = context._prepare_set_input_sizes() + else: + generic_setinputsizes = None + + cursor, str_statement, parameters = ( + context.cursor, + context.statement, + context.parameters, + ) + + effective_parameters = parameters + + engine_events = self._has_events or self.engine._has_events + if self.dialect._has_events: + do_execute_dispatch: Iterable[Any] = ( + self.dialect.dispatch.do_execute + ) + else: + do_execute_dispatch = () + + if self._echo: + stats = context._get_cache_stats() + " (insertmanyvalues)" + + preserve_rowcount = context.execution_options.get( + "preserve_rowcount", False + ) + rowcount = 0 + + for imv_batch in dialect._deliver_insertmanyvalues_batches( + self, + cursor, + str_statement, + effective_parameters, + generic_setinputsizes, + context, + ): + if imv_batch.processed_setinputsizes: + try: + dialect.do_set_input_sizes( + context.cursor, + imv_batch.processed_setinputsizes, + context, + ) + except BaseException as e: + self._handle_dbapi_exception( + e, + sql_util._long_statement(imv_batch.replaced_statement), + imv_batch.replaced_parameters, + None, + context, + is_sub_exec=True, + ) + + sub_stmt = imv_batch.replaced_statement + sub_params = imv_batch.replaced_parameters + + if engine_events: + for fn in self.dispatch.before_cursor_execute: + sub_stmt, sub_params = fn( + self, + cursor, + sub_stmt, + sub_params, + context, + True, + ) + + if self._echo: + self._log_info(sql_util._long_statement(sub_stmt)) + + imv_stats = f""" {imv_batch.batchnum}/{ + imv_batch.total_batches + } ({ + 'ordered' + if imv_batch.rows_sorted else 'unordered' + }{ + '; batch not supported' + if imv_batch.is_downgraded + else '' + })""" + + if imv_batch.batchnum == 1: + stats += imv_stats + else: + stats = f"insertmanyvalues{imv_stats}" + + if not self.engine.hide_parameters: + self._log_info( + "[%s] %r", + stats, + sql_util._repr_params( + sub_params, + batches=10, + ismulti=False, + ), + ) + else: + self._log_info( + "[%s] [SQL parameters hidden due to " + "hide_parameters=True]", + stats, + ) + + try: + for fn in do_execute_dispatch: + if fn( + cursor, + sub_stmt, + sub_params, + context, + ): + break + else: + dialect.do_execute( + cursor, + sub_stmt, + sub_params, + context, + ) + + except BaseException as e: + self._handle_dbapi_exception( + e, + sql_util._long_statement(sub_stmt), + sub_params, + cursor, + context, + is_sub_exec=True, + ) + + if engine_events: + self.dispatch.after_cursor_execute( + self, + cursor, + str_statement, + effective_parameters, + context, + context.executemany, + ) + + if preserve_rowcount: + rowcount += imv_batch.current_batch_size + + try: + context.post_exec() + + if preserve_rowcount: + context._rowcount = rowcount # type: ignore[attr-defined] + + result = context._setup_result_proxy() + + except BaseException as e: + self._handle_dbapi_exception( + e, str_statement, effective_parameters, cursor, context + ) + + return result + + def _cursor_execute( + self, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPISingleExecuteParams, + context: Optional[ExecutionContext] = None, + ) -> None: + """Execute a statement + params on the given cursor. + + Adds appropriate logging and exception handling. + + This method is used by DefaultDialect for special-case + executions, such as for sequences and column defaults. + The path of statement execution in the majority of cases + terminates at _execute_context(). + + """ + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_cursor_execute: + statement, parameters = fn( + self, cursor, statement, parameters, context, False + ) + + if self._echo: + self._log_info(statement) + self._log_info("[raw sql] %r", parameters) + try: + for fn in ( + () + if not self.dialect._has_events + else self.dialect.dispatch.do_execute + ): + if fn(cursor, statement, parameters, context): + break + else: + self.dialect.do_execute(cursor, statement, parameters, context) + except BaseException as e: + self._handle_dbapi_exception( + e, statement, parameters, cursor, context + ) + + if self._has_events or self.engine._has_events: + self.dispatch.after_cursor_execute( + self, cursor, statement, parameters, context, False + ) + + def _safe_close_cursor(self, cursor: DBAPICursor) -> None: + """Close the given cursor, catching exceptions + and turning into log warnings. + + """ + try: + cursor.close() + except Exception: + # log the error through the connection pool's logger. + self.engine.pool.logger.error( + "Error closing cursor", exc_info=True + ) + + _reentrant_error = False + _is_disconnect = False + + def _handle_dbapi_exception( + self, + e: BaseException, + statement: Optional[str], + parameters: Optional[_AnyExecuteParams], + cursor: Optional[DBAPICursor], + context: Optional[ExecutionContext], + is_sub_exec: bool = False, + ) -> NoReturn: + exc_info = sys.exc_info() + + is_exit_exception = util.is_exit_exception(e) + + if not self._is_disconnect: + self._is_disconnect = ( + isinstance(e, self.dialect.loaded_dbapi.Error) + and not self.closed + and self.dialect.is_disconnect( + e, + self._dbapi_connection if not self.invalidated else None, + cursor, + ) + ) or (is_exit_exception and not self.closed) + + invalidate_pool_on_disconnect = not is_exit_exception + + ismulti: bool = ( + not is_sub_exec and context.executemany + if context is not None + else False + ) + if self._reentrant_error: + raise exc.DBAPIError.instance( + statement, + parameters, + e, + self.dialect.loaded_dbapi.Error, + hide_parameters=self.engine.hide_parameters, + dialect=self.dialect, + ismulti=ismulti, + ).with_traceback(exc_info[2]) from e + self._reentrant_error = True + try: + # non-DBAPI error - if we already got a context, + # or there's no string statement, don't wrap it + should_wrap = isinstance(e, self.dialect.loaded_dbapi.Error) or ( + statement is not None + and context is None + and not is_exit_exception + ) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + statement, + parameters, + cast(Exception, e), + self.dialect.loaded_dbapi.Error, + hide_parameters=self.engine.hide_parameters, + connection_invalidated=self._is_disconnect, + dialect=self.dialect, + ismulti=ismulti, + ) + else: + sqlalchemy_exception = None + + newraise = None + + if (self.dialect._has_events) and not self._execution_options.get( + "skip_user_error_events", False + ): + ctx = ExceptionContextImpl( + e, + sqlalchemy_exception, + self.engine, + self.dialect, + self, + cursor, + statement, + parameters, + context, + self._is_disconnect, + invalidate_pool_on_disconnect, + False, + ) + + for fn in self.dialect.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if self._is_disconnect != ctx.is_disconnect: + self._is_disconnect = ctx.is_disconnect + if sqlalchemy_exception: + sqlalchemy_exception.connection_invalidated = ( + ctx.is_disconnect + ) + + # set up potentially user-defined value for + # invalidate pool. + invalidate_pool_on_disconnect = ( + ctx.invalidate_pool_on_disconnect + ) + + if should_wrap and context: + context.handle_dbapi_exception(e) + + if not self._is_disconnect: + if cursor: + self._safe_close_cursor(cursor) + # "autorollback" was mostly relevant in 1.x series. + # It's very unlikely to reach here, as the connection + # does autobegin so when we are here, we are usually + # in an explicit / semi-explicit transaction. + # however we have a test which manufactures this + # scenario in any case using an event handler. + # test/engine/test_execute.py-> test_actual_autorollback + if not self.in_transaction(): + self._rollback_impl() + + if newraise: + raise newraise.with_traceback(exc_info[2]) from e + elif should_wrap: + assert sqlalchemy_exception is not None + raise sqlalchemy_exception.with_traceback(exc_info[2]) from e + else: + assert exc_info[1] is not None + raise exc_info[1].with_traceback(exc_info[2]) + finally: + del self._reentrant_error + if self._is_disconnect: + del self._is_disconnect + if not self.invalidated: + dbapi_conn_wrapper = self._dbapi_connection + assert dbapi_conn_wrapper is not None + if invalidate_pool_on_disconnect: + self.engine.pool._invalidate(dbapi_conn_wrapper, e) + self.invalidate(e) + + @classmethod + def _handle_dbapi_exception_noconnection( + cls, + e: BaseException, + dialect: Dialect, + engine: Optional[Engine] = None, + is_disconnect: Optional[bool] = None, + invalidate_pool_on_disconnect: bool = True, + is_pre_ping: bool = False, + ) -> NoReturn: + exc_info = sys.exc_info() + + if is_disconnect is None: + is_disconnect = isinstance( + e, dialect.loaded_dbapi.Error + ) and dialect.is_disconnect(e, None, None) + + should_wrap = isinstance(e, dialect.loaded_dbapi.Error) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + None, + None, + cast(Exception, e), + dialect.loaded_dbapi.Error, + hide_parameters=( + engine.hide_parameters if engine is not None else False + ), + connection_invalidated=is_disconnect, + dialect=dialect, + ) + else: + sqlalchemy_exception = None + + newraise = None + + if dialect._has_events: + ctx = ExceptionContextImpl( + e, + sqlalchemy_exception, + engine, + dialect, + None, + None, + None, + None, + None, + is_disconnect, + invalidate_pool_on_disconnect, + is_pre_ping, + ) + for fn in dialect.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if sqlalchemy_exception and is_disconnect != ctx.is_disconnect: + sqlalchemy_exception.connection_invalidated = ctx.is_disconnect + + if newraise: + raise newraise.with_traceback(exc_info[2]) from e + elif should_wrap: + assert sqlalchemy_exception is not None + raise sqlalchemy_exception.with_traceback(exc_info[2]) from e + else: + assert exc_info[1] is not None + raise exc_info[1].with_traceback(exc_info[2]) + + def _run_ddl_visitor( + self, + visitorcallable: Type[InvokeDDLBase], + element: SchemaVisitable, + **kwargs: Any, + ) -> None: + """run a DDL visitor. + + This method is only here so that the MockConnection can change the + options given to the visitor so that "checkfirst" is skipped. + + """ + visitorcallable( + dialect=self.dialect, connection=self, **kwargs + ).traverse_single(element) + + +class ExceptionContextImpl(ExceptionContext): + """Implement the :class:`.ExceptionContext` interface.""" + + __slots__ = ( + "connection", + "engine", + "dialect", + "cursor", + "statement", + "parameters", + "original_exception", + "sqlalchemy_exception", + "chained_exception", + "execution_context", + "is_disconnect", + "invalidate_pool_on_disconnect", + "is_pre_ping", + ) + + def __init__( + self, + exception: BaseException, + sqlalchemy_exception: Optional[exc.StatementError], + engine: Optional[Engine], + dialect: Dialect, + connection: Optional[Connection], + cursor: Optional[DBAPICursor], + statement: Optional[str], + parameters: Optional[_DBAPIAnyExecuteParams], + context: Optional[ExecutionContext], + is_disconnect: bool, + invalidate_pool_on_disconnect: bool, + is_pre_ping: bool, + ): + self.engine = engine + self.dialect = dialect + self.connection = connection + self.sqlalchemy_exception = sqlalchemy_exception + self.original_exception = exception + self.execution_context = context + self.statement = statement + self.parameters = parameters + self.is_disconnect = is_disconnect + self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect + self.is_pre_ping = is_pre_ping + + +class Transaction(TransactionalContext): + """Represent a database transaction in progress. + + The :class:`.Transaction` object is procured by + calling the :meth:`_engine.Connection.begin` method of + :class:`_engine.Connection`:: + + from sqlalchemy import create_engine + + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test") + connection = engine.connect() + trans = connection.begin() + connection.execute(text("insert into x (a, b) values (1, 2)")) + trans.commit() + + The object provides :meth:`.rollback` and :meth:`.commit` + methods in order to control transaction boundaries. It + also implements a context manager interface so that + the Python ``with`` statement can be used with the + :meth:`_engine.Connection.begin` method:: + + with connection.begin(): + connection.execute(text("insert into x (a, b) values (1, 2)")) + + The Transaction object is **not** threadsafe. + + .. seealso:: + + :meth:`_engine.Connection.begin` + + :meth:`_engine.Connection.begin_twophase` + + :meth:`_engine.Connection.begin_nested` + + .. index:: + single: thread safety; Transaction + """ # noqa + + __slots__ = () + + _is_root: bool = False + is_active: bool + connection: Connection + + def __init__(self, connection: Connection): + raise NotImplementedError() + + @property + def _deactivated_from_connection(self) -> bool: + """True if this transaction is totally deactivated from the connection + and therefore can no longer affect its state. + + """ + raise NotImplementedError() + + def _do_close(self) -> None: + raise NotImplementedError() + + def _do_rollback(self) -> None: + raise NotImplementedError() + + def _do_commit(self) -> None: + raise NotImplementedError() + + @property + def is_valid(self) -> bool: + return self.is_active and not self.connection.invalidated + + def close(self) -> None: + """Close this :class:`.Transaction`. + + If this transaction is the base transaction in a begin/commit + nesting, the transaction will rollback(). Otherwise, the + method returns. + + This is used to cancel a Transaction without affecting the scope of + an enclosing transaction. + + """ + try: + self._do_close() + finally: + assert not self.is_active + + def rollback(self) -> None: + """Roll back this :class:`.Transaction`. + + The implementation of this may vary based on the type of transaction in + use: + + * For a simple database transaction (e.g. :class:`.RootTransaction`), + it corresponds to a ROLLBACK. + + * For a :class:`.NestedTransaction`, it corresponds to a + "ROLLBACK TO SAVEPOINT" operation. + + * For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two + phase transactions may be used. + + + """ + try: + self._do_rollback() + finally: + assert not self.is_active + + def commit(self) -> None: + """Commit this :class:`.Transaction`. + + The implementation of this may vary based on the type of transaction in + use: + + * For a simple database transaction (e.g. :class:`.RootTransaction`), + it corresponds to a COMMIT. + + * For a :class:`.NestedTransaction`, it corresponds to a + "RELEASE SAVEPOINT" operation. + + * For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two + phase transactions may be used. + + """ + try: + self._do_commit() + finally: + assert not self.is_active + + def _get_subject(self) -> Connection: + return self.connection + + def _transaction_is_active(self) -> bool: + return self.is_active + + def _transaction_is_closed(self) -> bool: + return not self._deactivated_from_connection + + def _rollback_can_be_called(self) -> bool: + # for RootTransaction / NestedTransaction, it's safe to call + # rollback() even if the transaction is deactive and no warnings + # will be emitted. tested in + # test_transaction.py -> test_no_rollback_in_deactive(?:_savepoint)? + return True + + +class RootTransaction(Transaction): + """Represent the "root" transaction on a :class:`_engine.Connection`. + + This corresponds to the current "BEGIN/COMMIT/ROLLBACK" that's occurring + for the :class:`_engine.Connection`. The :class:`_engine.RootTransaction` + is created by calling upon the :meth:`_engine.Connection.begin` method, and + remains associated with the :class:`_engine.Connection` throughout its + active span. The current :class:`_engine.RootTransaction` in use is + accessible via the :attr:`_engine.Connection.get_transaction` method of + :class:`_engine.Connection`. + + In :term:`2.0 style` use, the :class:`_engine.Connection` also employs + "autobegin" behavior that will create a new + :class:`_engine.RootTransaction` whenever a connection in a + non-transactional state is used to emit commands on the DBAPI connection. + The scope of the :class:`_engine.RootTransaction` in 2.0 style + use can be controlled using the :meth:`_engine.Connection.commit` and + :meth:`_engine.Connection.rollback` methods. + + + """ + + _is_root = True + + __slots__ = ("connection", "is_active") + + def __init__(self, connection: Connection): + assert connection._transaction is None + if connection._trans_context_manager: + TransactionalContext._trans_ctx_check(connection) + self.connection = connection + self._connection_begin_impl() + connection._transaction = self + + self.is_active = True + + def _deactivate_from_connection(self) -> None: + if self.is_active: + assert self.connection._transaction is self + self.is_active = False + + elif self.connection._transaction is not self: + util.warn("transaction already deassociated from connection") + + @property + def _deactivated_from_connection(self) -> bool: + return self.connection._transaction is not self + + def _connection_begin_impl(self) -> None: + self.connection._begin_impl(self) + + def _connection_rollback_impl(self) -> None: + self.connection._rollback_impl() + + def _connection_commit_impl(self) -> None: + self.connection._commit_impl() + + def _close_impl(self, try_deactivate: bool = False) -> None: + try: + if self.is_active: + self._connection_rollback_impl() + + if self.connection._nested_transaction: + self.connection._nested_transaction._cancel() + finally: + if self.is_active or try_deactivate: + self._deactivate_from_connection() + if self.connection._transaction is self: + self.connection._transaction = None + + assert not self.is_active + assert self.connection._transaction is not self + + def _do_close(self) -> None: + self._close_impl() + + def _do_rollback(self) -> None: + self._close_impl(try_deactivate=True) + + def _do_commit(self) -> None: + if self.is_active: + assert self.connection._transaction is self + + try: + self._connection_commit_impl() + finally: + # whether or not commit succeeds, cancel any + # nested transactions, make this transaction "inactive" + # and remove it as a reset agent + if self.connection._nested_transaction: + self.connection._nested_transaction._cancel() + + self._deactivate_from_connection() + + # ...however only remove as the connection's current transaction + # if commit succeeded. otherwise it stays on so that a rollback + # needs to occur. + self.connection._transaction = None + else: + if self.connection._transaction is self: + self.connection._invalid_transaction() + else: + raise exc.InvalidRequestError("This transaction is inactive") + + assert not self.is_active + assert self.connection._transaction is not self + + +class NestedTransaction(Transaction): + """Represent a 'nested', or SAVEPOINT transaction. + + The :class:`.NestedTransaction` object is created by calling the + :meth:`_engine.Connection.begin_nested` method of + :class:`_engine.Connection`. + + When using :class:`.NestedTransaction`, the semantics of "begin" / + "commit" / "rollback" are as follows: + + * the "begin" operation corresponds to the "BEGIN SAVEPOINT" command, where + the savepoint is given an explicit name that is part of the state + of this object. + + * The :meth:`.NestedTransaction.commit` method corresponds to a + "RELEASE SAVEPOINT" operation, using the savepoint identifier associated + with this :class:`.NestedTransaction`. + + * The :meth:`.NestedTransaction.rollback` method corresponds to a + "ROLLBACK TO SAVEPOINT" operation, using the savepoint identifier + associated with this :class:`.NestedTransaction`. + + The rationale for mimicking the semantics of an outer transaction in + terms of savepoints so that code may deal with a "savepoint" transaction + and an "outer" transaction in an agnostic way. + + .. seealso:: + + :ref:`session_begin_nested` - ORM version of the SAVEPOINT API. + + """ + + __slots__ = ("connection", "is_active", "_savepoint", "_previous_nested") + + _savepoint: str + + def __init__(self, connection: Connection): + assert connection._transaction is not None + if connection._trans_context_manager: + TransactionalContext._trans_ctx_check(connection) + self.connection = connection + self._savepoint = self.connection._savepoint_impl() + self.is_active = True + self._previous_nested = connection._nested_transaction + connection._nested_transaction = self + + def _deactivate_from_connection(self, warn: bool = True) -> None: + if self.connection._nested_transaction is self: + self.connection._nested_transaction = self._previous_nested + elif warn: + util.warn( + "nested transaction already deassociated from connection" + ) + + @property + def _deactivated_from_connection(self) -> bool: + return self.connection._nested_transaction is not self + + def _cancel(self) -> None: + # called by RootTransaction when the outer transaction is + # committed, rolled back, or closed to cancel all savepoints + # without any action being taken + self.is_active = False + self._deactivate_from_connection() + if self._previous_nested: + self._previous_nested._cancel() + + def _close_impl( + self, deactivate_from_connection: bool, warn_already_deactive: bool + ) -> None: + try: + if ( + self.is_active + and self.connection._transaction + and self.connection._transaction.is_active + ): + self.connection._rollback_to_savepoint_impl(self._savepoint) + finally: + self.is_active = False + + if deactivate_from_connection: + self._deactivate_from_connection(warn=warn_already_deactive) + + assert not self.is_active + if deactivate_from_connection: + assert self.connection._nested_transaction is not self + + def _do_close(self) -> None: + self._close_impl(True, False) + + def _do_rollback(self) -> None: + self._close_impl(True, True) + + def _do_commit(self) -> None: + if self.is_active: + try: + self.connection._release_savepoint_impl(self._savepoint) + finally: + # nested trans becomes inactive on failed release + # unconditionally. this prevents it from trying to + # emit SQL when it rolls back. + self.is_active = False + + # but only de-associate from connection if it succeeded + self._deactivate_from_connection() + else: + if self.connection._nested_transaction is self: + self.connection._invalid_transaction() + else: + raise exc.InvalidRequestError( + "This nested transaction is inactive" + ) + + +class TwoPhaseTransaction(RootTransaction): + """Represent a two-phase transaction. + + A new :class:`.TwoPhaseTransaction` object may be procured + using the :meth:`_engine.Connection.begin_twophase` method. + + The interface is the same as that of :class:`.Transaction` + with the addition of the :meth:`prepare` method. + + """ + + __slots__ = ("xid", "_is_prepared") + + xid: Any + + def __init__(self, connection: Connection, xid: Any): + self._is_prepared = False + self.xid = xid + super().__init__(connection) + + def prepare(self) -> None: + """Prepare this :class:`.TwoPhaseTransaction`. + + After a PREPARE, the transaction can be committed. + + """ + if not self.is_active: + raise exc.InvalidRequestError("This transaction is inactive") + self.connection._prepare_twophase_impl(self.xid) + self._is_prepared = True + + def _connection_begin_impl(self) -> None: + self.connection._begin_twophase_impl(self) + + def _connection_rollback_impl(self) -> None: + self.connection._rollback_twophase_impl(self.xid, self._is_prepared) + + def _connection_commit_impl(self) -> None: + self.connection._commit_twophase_impl(self.xid, self._is_prepared) + + +class Engine( + ConnectionEventsTarget, log.Identified, inspection.Inspectable["Inspector"] +): + """ + Connects a :class:`~sqlalchemy.pool.Pool` and + :class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a + source of database connectivity and behavior. + + An :class:`_engine.Engine` object is instantiated publicly using the + :func:`~sqlalchemy.create_engine` function. + + .. seealso:: + + :doc:`/core/engines` + + :ref:`connections_toplevel` + + """ + + dispatch: dispatcher[ConnectionEventsTarget] + + _compiled_cache: Optional[CompiledCacheType] + + _execution_options: _ExecuteOptions = _EMPTY_EXECUTION_OPTS + _has_events: bool = False + _connection_cls: Type[Connection] = Connection + _sqla_logger_namespace: str = "sqlalchemy.engine.Engine" + _is_future: bool = False + + _schema_translate_map: Optional[SchemaTranslateMapType] = None + _option_cls: Type[OptionEngine] + + dialect: Dialect + pool: Pool + url: URL + hide_parameters: bool + + def __init__( + self, + pool: Pool, + dialect: Dialect, + url: URL, + logging_name: Optional[str] = None, + echo: Optional[_EchoFlagType] = None, + query_cache_size: int = 500, + execution_options: Optional[Mapping[str, Any]] = None, + hide_parameters: bool = False, + ): + self.pool = pool + self.url = url + self.dialect = dialect + if logging_name: + self.logging_name = logging_name + self.echo = echo + self.hide_parameters = hide_parameters + if query_cache_size != 0: + self._compiled_cache = util.LRUCache( + query_cache_size, size_alert=self._lru_size_alert + ) + else: + self._compiled_cache = None + log.instance_logger(self, echoflag=echo) + if execution_options: + self.update_execution_options(**execution_options) + + def _lru_size_alert(self, cache: util.LRUCache[Any, Any]) -> None: + if self._should_log_info(): + self.logger.info( + "Compiled cache size pruning from %d items to %d. " + "Increase cache size to reduce the frequency of pruning.", + len(cache), + cache.capacity, + ) + + @property + def engine(self) -> Engine: + """Returns this :class:`.Engine`. + + Used for legacy schemes that accept :class:`.Connection` / + :class:`.Engine` objects within the same variable. + + """ + return self + + def clear_compiled_cache(self) -> None: + """Clear the compiled cache associated with the dialect. + + This applies **only** to the built-in cache that is established + via the :paramref:`_engine.create_engine.query_cache_size` parameter. + It will not impact any dictionary caches that were passed via the + :paramref:`.Connection.execution_options.compiled_cache` parameter. + + .. versionadded:: 1.4 + + """ + if self._compiled_cache: + self._compiled_cache.clear() + + def update_execution_options(self, **opt: Any) -> None: + r"""Update the default execution_options dictionary + of this :class:`_engine.Engine`. + + The given keys/values in \**opt are added to the + default execution options that will be used for + all connections. The initial contents of this dictionary + can be sent via the ``execution_options`` parameter + to :func:`_sa.create_engine`. + + .. seealso:: + + :meth:`_engine.Connection.execution_options` + + :meth:`_engine.Engine.execution_options` + + """ + self.dispatch.set_engine_execution_options(self, opt) + self._execution_options = self._execution_options.union(opt) + self.dialect.set_engine_execution_options(self, opt) + + @overload + def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + **opt: Any, + ) -> OptionEngine: ... + + @overload + def execution_options(self, **opt: Any) -> OptionEngine: ... + + def execution_options(self, **opt: Any) -> OptionEngine: + """Return a new :class:`_engine.Engine` that will provide + :class:`_engine.Connection` objects with the given execution options. + + The returned :class:`_engine.Engine` remains related to the original + :class:`_engine.Engine` in that it shares the same connection pool and + other state: + + * The :class:`_pool.Pool` used by the new :class:`_engine.Engine` + is the + same instance. The :meth:`_engine.Engine.dispose` + method will replace + the connection pool instance for the parent engine as well + as this one. + * Event listeners are "cascaded" - meaning, the new + :class:`_engine.Engine` + inherits the events of the parent, and new events can be associated + with the new :class:`_engine.Engine` individually. + * The logging configuration and logging_name is copied from the parent + :class:`_engine.Engine`. + + The intent of the :meth:`_engine.Engine.execution_options` method is + to implement schemes where multiple :class:`_engine.Engine` + objects refer to the same connection pool, but are differentiated + by options that affect some execution-level behavior for each + engine. One such example is breaking into separate "reader" and + "writer" :class:`_engine.Engine` instances, where one + :class:`_engine.Engine` + has a lower :term:`isolation level` setting configured or is even + transaction-disabled using "autocommit". An example of this + configuration is at :ref:`dbapi_autocommit_multiple`. + + Another example is one that + uses a custom option ``shard_id`` which is consumed by an event + to change the current schema on a database connection:: + + from sqlalchemy import event + from sqlalchemy.engine import Engine + + primary_engine = create_engine("mysql+mysqldb://") + shard1 = primary_engine.execution_options(shard_id="shard1") + shard2 = primary_engine.execution_options(shard_id="shard2") + + shards = {"default": "base", "shard_1": "db1", "shard_2": "db2"} + + + @event.listens_for(Engine, "before_cursor_execute") + def _switch_shard(conn, cursor, stmt, params, context, executemany): + shard_id = conn.get_execution_options().get("shard_id", "default") + current_shard = conn.info.get("current_shard", None) + + if current_shard != shard_id: + cursor.execute("use %s" % shards[shard_id]) + conn.info["current_shard"] = shard_id + + The above recipe illustrates two :class:`_engine.Engine` objects that + will each serve as factories for :class:`_engine.Connection` objects + that have pre-established "shard_id" execution options present. A + :meth:`_events.ConnectionEvents.before_cursor_execute` event handler + then interprets this execution option to emit a MySQL ``use`` statement + to switch databases before a statement execution, while at the same + time keeping track of which database we've established using the + :attr:`_engine.Connection.info` dictionary. + + .. seealso:: + + :meth:`_engine.Connection.execution_options` + - update execution options + on a :class:`_engine.Connection` object. + + :meth:`_engine.Engine.update_execution_options` + - update the execution + options for a given :class:`_engine.Engine` in place. + + :meth:`_engine.Engine.get_execution_options` + + + """ # noqa: E501 + return self._option_cls(self, opt) + + def get_execution_options(self) -> _ExecuteOptions: + """Get the non-SQL options which will take effect during execution. + + .. versionadded: 1.3 + + .. seealso:: + + :meth:`_engine.Engine.execution_options` + """ + return self._execution_options + + @property + def name(self) -> str: + """String name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`. + + """ + + return self.dialect.name + + @property + def driver(self) -> str: + """Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`. + + """ + + return self.dialect.driver + + echo = log.echo_property() + + def __repr__(self) -> str: + return "Engine(%r)" % (self.url,) + + def dispose(self, close: bool = True) -> None: + """Dispose of the connection pool used by this + :class:`_engine.Engine`. + + A new connection pool is created immediately after the old one has been + disposed. The previous connection pool is disposed either actively, by + closing out all currently checked-in connections in that pool, or + passively, by losing references to it but otherwise not closing any + connections. The latter strategy is more appropriate for an initializer + in a forked Python process. + + :param close: if left at its default of ``True``, has the + effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`_engine.Engine`, + so when they are closed individually, eventually the + :class:`_pool.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + + If set to ``False``, the previous connection pool is de-referenced, + and otherwise not touched in any way. + + .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` + parameter to allow the replacement of a connection pool in a child + process without interfering with the connections used by the parent + process. + + + .. seealso:: + + :ref:`engine_disposal` + + :ref:`pooling_multiprocessing` + + """ + if close: + self.pool.dispose() + self.pool = self.pool.recreate() + self.dispatch.engine_disposed(self) + + @contextlib.contextmanager + def _optional_conn_ctx_manager( + self, connection: Optional[Connection] = None + ) -> Iterator[Connection]: + if connection is None: + with self.connect() as conn: + yield conn + else: + yield connection + + @contextlib.contextmanager + def begin(self) -> Iterator[Connection]: + """Return a context manager delivering a :class:`_engine.Connection` + with a :class:`.Transaction` established. + + E.g.:: + + with engine.begin() as conn: + conn.execute(text("insert into table (x, y, z) values (1, 2, 3)")) + conn.execute(text("my_special_procedure(5)")) + + Upon successful operation, the :class:`.Transaction` + is committed. If an error is raised, the :class:`.Transaction` + is rolled back. + + .. seealso:: + + :meth:`_engine.Engine.connect` - procure a + :class:`_engine.Connection` from + an :class:`_engine.Engine`. + + :meth:`_engine.Connection.begin` - start a :class:`.Transaction` + for a particular :class:`_engine.Connection`. + + """ # noqa: E501 + with self.connect() as conn: + with conn.begin(): + yield conn + + def _run_ddl_visitor( + self, + visitorcallable: Type[InvokeDDLBase], + element: SchemaVisitable, + **kwargs: Any, + ) -> None: + with self.begin() as conn: + conn._run_ddl_visitor(visitorcallable, element, **kwargs) + + def connect(self) -> Connection: + """Return a new :class:`_engine.Connection` object. + + The :class:`_engine.Connection` acts as a Python context manager, so + the typical use of this method looks like:: + + with engine.connect() as connection: + connection.execute(text("insert into table values ('foo')")) + connection.commit() + + Where above, after the block is completed, the connection is "closed" + and its underlying DBAPI resources are returned to the connection pool. + This also has the effect of rolling back any transaction that + was explicitly begun or was begun via autobegin, and will + emit the :meth:`_events.ConnectionEvents.rollback` event if one was + started and is still in progress. + + .. seealso:: + + :meth:`_engine.Engine.begin` + + """ + + return self._connection_cls(self) + + def raw_connection(self) -> PoolProxiedConnection: + """Return a "raw" DBAPI connection from the connection pool. + + The returned object is a proxied version of the DBAPI + connection object used by the underlying driver in use. + The object will have all the same behavior as the real DBAPI + connection, except that its ``close()`` method will result in the + connection being returned to the pool, rather than being closed + for real. + + This method provides direct DBAPI connection access for + special situations when the API provided by + :class:`_engine.Connection` + is not needed. When a :class:`_engine.Connection` object is already + present, the DBAPI connection is available using + the :attr:`_engine.Connection.connection` accessor. + + .. seealso:: + + :ref:`dbapi_connections` + + """ + return self.pool.connect() + + +class OptionEngineMixin(log.Identified): + _sa_propagate_class_events = False + + dispatch: dispatcher[ConnectionEventsTarget] + _compiled_cache: Optional[CompiledCacheType] + dialect: Dialect + pool: Pool + url: URL + hide_parameters: bool + echo: log.echo_property + + def __init__( + self, proxied: Engine, execution_options: CoreExecuteOptionsParameter + ): + self._proxied = proxied + self.url = proxied.url + self.dialect = proxied.dialect + self.logging_name = proxied.logging_name + self.echo = proxied.echo + self._compiled_cache = proxied._compiled_cache + self.hide_parameters = proxied.hide_parameters + log.instance_logger(self, echoflag=self.echo) + + # note: this will propagate events that are assigned to the parent + # engine after this OptionEngine is created. Since we share + # the events of the parent we also disallow class-level events + # to apply to the OptionEngine class directly. + # + # the other way this can work would be to transfer existing + # events only, using: + # self.dispatch._update(proxied.dispatch) + # + # that might be more appropriate however it would be a behavioral + # change for logic that assigns events to the parent engine and + # would like it to take effect for the already-created sub-engine. + self.dispatch = self.dispatch._join(proxied.dispatch) + + self._execution_options = proxied._execution_options + self.update_execution_options(**execution_options) + + def update_execution_options(self, **opt: Any) -> None: + raise NotImplementedError() + + if not typing.TYPE_CHECKING: + # https://github.com/python/typing/discussions/1095 + + @property + def pool(self) -> Pool: + return self._proxied.pool + + @pool.setter + def pool(self, pool: Pool) -> None: + self._proxied.pool = pool + + @property + def _has_events(self) -> bool: + return self._proxied._has_events or self.__dict__.get( + "_has_events", False + ) + + @_has_events.setter + def _has_events(self, value: bool) -> None: + self.__dict__["_has_events"] = value + + +class OptionEngine(OptionEngineMixin, Engine): + def update_execution_options(self, **opt: Any) -> None: + Engine.update_execution_options(self, **opt) + + +Engine._option_cls = OptionEngine diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py new file mode 100644 index 0000000000000000000000000000000000000000..322c28b5aa714a80e3821aa67effa0e5ed44cd2b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py @@ -0,0 +1,155 @@ +# engine/characteristics.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import abc +import typing +from typing import Any +from typing import ClassVar + +if typing.TYPE_CHECKING: + from .base import Connection + from .interfaces import DBAPIConnection + from .interfaces import Dialect + + +class ConnectionCharacteristic(abc.ABC): + """An abstract base for an object that can set, get and reset a + per-connection characteristic, typically one that gets reset when the + connection is returned to the connection pool. + + transaction isolation is the canonical example, and the + ``IsolationLevelCharacteristic`` implementation provides this for the + ``DefaultDialect``. + + The ``ConnectionCharacteristic`` class should call upon the ``Dialect`` for + the implementation of each method. The object exists strictly to serve as + a dialect visitor that can be placed into the + ``DefaultDialect.connection_characteristics`` dictionary where it will take + effect for calls to :meth:`_engine.Connection.execution_options` and + related APIs. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + transactional: ClassVar[bool] = False + + @abc.abstractmethod + def reset_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> None: + """Reset the characteristic on the DBAPI connection to its default + value.""" + + @abc.abstractmethod + def set_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection, value: Any + ) -> None: + """set characteristic on the DBAPI connection to a given value.""" + + def set_connection_characteristic( + self, + dialect: Dialect, + conn: Connection, + dbapi_conn: DBAPIConnection, + value: Any, + ) -> None: + """set characteristic on the :class:`_engine.Connection` to a given + value. + + .. versionadded:: 2.0.30 - added to support elements that are local + to the :class:`_engine.Connection` itself. + + """ + self.set_characteristic(dialect, dbapi_conn, value) + + @abc.abstractmethod + def get_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> Any: + """Given a DBAPI connection, get the current value of the + characteristic. + + """ + + def get_connection_characteristic( + self, dialect: Dialect, conn: Connection, dbapi_conn: DBAPIConnection + ) -> Any: + """Given a :class:`_engine.Connection`, get the current value of the + characteristic. + + .. versionadded:: 2.0.30 - added to support elements that are local + to the :class:`_engine.Connection` itself. + + """ + return self.get_characteristic(dialect, dbapi_conn) + + +class IsolationLevelCharacteristic(ConnectionCharacteristic): + """Manage the isolation level on a DBAPI connection""" + + transactional: ClassVar[bool] = True + + def reset_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> None: + dialect.reset_isolation_level(dbapi_conn) + + def set_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection, value: Any + ) -> None: + dialect._assert_and_set_isolation_level(dbapi_conn, value) + + def get_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> Any: + return dialect.get_isolation_level(dbapi_conn) + + +class LoggingTokenCharacteristic(ConnectionCharacteristic): + """Manage the 'logging_token' option of a :class:`_engine.Connection`. + + .. versionadded:: 2.0.30 + + """ + + transactional: ClassVar[bool] = False + + def reset_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> None: + pass + + def set_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection, value: Any + ) -> None: + raise NotImplementedError() + + def set_connection_characteristic( + self, + dialect: Dialect, + conn: Connection, + dbapi_conn: DBAPIConnection, + value: Any, + ) -> None: + if value: + conn._message_formatter = lambda msg: "[%s] %s" % (value, msg) + else: + del conn._message_formatter + + def get_characteristic( + self, dialect: Dialect, dbapi_conn: DBAPIConnection + ) -> Any: + raise NotImplementedError() + + def get_connection_characteristic( + self, dialect: Dialect, conn: Connection, dbapi_conn: DBAPIConnection + ) -> Any: + return conn._execution_options.get("logging_token", None) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/create.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/create.py new file mode 100644 index 0000000000000000000000000000000000000000..920f620bd481d5fa604ff154ed1317bdf665bfa3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/create.py @@ -0,0 +1,878 @@ +# engine/create.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import inspect +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Optional +from typing import overload +from typing import Type +from typing import Union + +from . import base +from . import url as _url +from .interfaces import DBAPIConnection +from .mock import create_mock_engine +from .. import event +from .. import exc +from .. import util +from ..pool import _AdhocProxiedConnection +from ..pool import ConnectionPoolEntry +from ..sql import compiler +from ..util import immutabledict + +if typing.TYPE_CHECKING: + from .base import Engine + from .interfaces import _ExecuteOptions + from .interfaces import _ParamStyle + from .interfaces import IsolationLevel + from .url import URL + from ..log import _EchoFlagType + from ..pool import _CreatorFnType + from ..pool import _CreatorWRecFnType + from ..pool import _ResetStyleArgType + from ..pool import Pool + from ..util.typing import Literal + + +@overload +def create_engine( + url: Union[str, URL], + *, + connect_args: Dict[Any, Any] = ..., + convert_unicode: bool = ..., + creator: Union[_CreatorFnType, _CreatorWRecFnType] = ..., + echo: _EchoFlagType = ..., + echo_pool: _EchoFlagType = ..., + enable_from_linting: bool = ..., + execution_options: _ExecuteOptions = ..., + future: Literal[True], + hide_parameters: bool = ..., + implicit_returning: Literal[True] = ..., + insertmanyvalues_page_size: int = ..., + isolation_level: IsolationLevel = ..., + json_deserializer: Callable[..., Any] = ..., + json_serializer: Callable[..., Any] = ..., + label_length: Optional[int] = ..., + logging_name: str = ..., + max_identifier_length: Optional[int] = ..., + max_overflow: int = ..., + module: Optional[Any] = ..., + paramstyle: Optional[_ParamStyle] = ..., + pool: Optional[Pool] = ..., + poolclass: Optional[Type[Pool]] = ..., + pool_logging_name: str = ..., + pool_pre_ping: bool = ..., + pool_size: int = ..., + pool_recycle: int = ..., + pool_reset_on_return: Optional[_ResetStyleArgType] = ..., + pool_timeout: float = ..., + pool_use_lifo: bool = ..., + plugins: List[str] = ..., + query_cache_size: int = ..., + use_insertmanyvalues: bool = ..., + **kwargs: Any, +) -> Engine: ... + + +@overload +def create_engine(url: Union[str, URL], **kwargs: Any) -> Engine: ... + + +@util.deprecated_params( + strategy=( + "1.4", + "The :paramref:`_sa.create_engine.strategy` keyword is deprecated, " + "and the only argument accepted is 'mock'; please use " + ":func:`.create_mock_engine` going forward. For general " + "customization of create_engine which may have been accomplished " + "using strategies, see :class:`.CreateEnginePlugin`.", + ), + empty_in_strategy=( + "1.4", + "The :paramref:`_sa.create_engine.empty_in_strategy` keyword is " + "deprecated, and no longer has any effect. All IN expressions " + "are now rendered using " + 'the "expanding parameter" strategy which renders a set of bound' + 'expressions, or an "empty set" SELECT, at statement execution' + "time.", + ), + implicit_returning=( + "2.0", + "The :paramref:`_sa.create_engine.implicit_returning` parameter " + "is deprecated and will be removed in a future release. ", + ), +) +def create_engine(url: Union[str, _url.URL], **kwargs: Any) -> Engine: + """Create a new :class:`_engine.Engine` instance. + + The standard calling form is to send the :ref:`URL ` as the + first positional argument, usually a string + that indicates database dialect and connection arguments:: + + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test") + + .. note:: + + Please review :ref:`database_urls` for general guidelines in composing + URL strings. In particular, special characters, such as those often + part of passwords, must be URL encoded to be properly parsed. + + Additional keyword arguments may then follow it which + establish various options on the resulting :class:`_engine.Engine` + and its underlying :class:`.Dialect` and :class:`_pool.Pool` + constructs:: + + engine = create_engine( + "mysql+mysqldb://scott:tiger@hostname/dbname", + pool_recycle=3600, + echo=True, + ) + + The string form of the URL is + ``dialect[+driver]://user:password@host/dbname[?key=value..]``, where + ``dialect`` is a database name such as ``mysql``, ``oracle``, + ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as + ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, + the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. + + ``**kwargs`` takes a wide variety of options which are routed + towards their appropriate components. Arguments may be specific to + the :class:`_engine.Engine`, the underlying :class:`.Dialect`, + as well as the + :class:`_pool.Pool`. Specific dialects also accept keyword arguments that + are unique to that dialect. Here, we describe the parameters + that are common to most :func:`_sa.create_engine()` usage. + + Once established, the newly resulting :class:`_engine.Engine` will + request a connection from the underlying :class:`_pool.Pool` once + :meth:`_engine.Engine.connect` is called, or a method which depends on it + such as :meth:`_engine.Engine.execute` is invoked. The + :class:`_pool.Pool` in turn + will establish the first actual DBAPI connection when this request + is received. The :func:`_sa.create_engine` call itself does **not** + establish any actual DBAPI connections directly. + + .. seealso:: + + :doc:`/core/engines` + + :doc:`/dialects/index` + + :ref:`connections_toplevel` + + :param connect_args: a dictionary of options which will be + passed directly to the DBAPI's ``connect()`` method as + additional keyword arguments. See the example + at :ref:`custom_dbapi_args`. + + :param creator: a callable which returns a DBAPI connection. + This creation function will be passed to the underlying + connection pool and will be used to create all new database + connections. Usage of this function causes connection + parameters specified in the URL argument to be bypassed. + + This hook is not as flexible as the newer + :meth:`_events.DialectEvents.do_connect` hook which allows complete + control over how a connection is made to the database, given the full + set of URL arguments and state beforehand. + + .. seealso:: + + :meth:`_events.DialectEvents.do_connect` - event hook that allows + full control over DBAPI connection mechanics. + + :ref:`custom_dbapi_args` + + :param echo=False: if True, the Engine will log all statements + as well as a ``repr()`` of their parameter lists to the default log + handler, which defaults to ``sys.stdout`` for output. If set to the + string ``"debug"``, result rows will be printed to the standard output + as well. The ``echo`` attribute of ``Engine`` can be modified at any + time to turn logging on and off; direct control of logging is also + available using the standard Python ``logging`` module. + + .. seealso:: + + :ref:`dbengine_logging` - further detail on how to configure + logging. + + + :param echo_pool=False: if True, the connection pool will log + informational output such as when connections are invalidated + as well as when connections are recycled to the default log handler, + which defaults to ``sys.stdout`` for output. If set to the string + ``"debug"``, the logging will include pool checkouts and checkins. + Direct control of logging is also available using the standard Python + ``logging`` module. + + .. seealso:: + + :ref:`dbengine_logging` - further detail on how to configure + logging. + + + :param empty_in_strategy: No longer used; SQLAlchemy now uses + "empty set" behavior for IN in all cases. + + :param enable_from_linting: defaults to True. Will emit a warning + if a given SELECT statement is found to have un-linked FROM elements + which would cause a cartesian product. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`change_4737` + + :param execution_options: Dictionary execution options which will + be applied to all connections. See + :meth:`~sqlalchemy.engine.Connection.execution_options` + + :param future: Use the 2.0 style :class:`_engine.Engine` and + :class:`_engine.Connection` API. + + As of SQLAlchemy 2.0, this parameter is present for backwards + compatibility only and must remain at its default value of ``True``. + + The :paramref:`_sa.create_engine.future` parameter will be + deprecated in a subsequent 2.x release and eventually removed. + + .. versionadded:: 1.4 + + .. versionchanged:: 2.0 All :class:`_engine.Engine` objects are + "future" style engines and there is no longer a ``future=False`` + mode of operation. + + .. seealso:: + + :ref:`migration_20_toplevel` + + :param hide_parameters: Boolean, when set to True, SQL statement parameters + will not be displayed in INFO logging nor will they be formatted into + the string representation of :class:`.StatementError` objects. + + .. versionadded:: 1.3.8 + + .. seealso:: + + :ref:`dbengine_logging` - further detail on how to configure + logging. + + :param implicit_returning=True: Legacy parameter that may only be set + to True. In SQLAlchemy 2.0, this parameter does nothing. In order to + disable "implicit returning" for statements invoked by the ORM, + configure this on a per-table basis using the + :paramref:`.Table.implicit_returning` parameter. + + + :param insertmanyvalues_page_size: number of rows to format into an + INSERT statement when the statement uses "insertmanyvalues" mode, which is + a paged form of bulk insert that is used for many backends when using + :term:`executemany` execution typically in conjunction with RETURNING. + Defaults to 1000, but may also be subject to dialect-specific limiting + factors which may override this value on a per-statement basis. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + :ref:`engine_insertmanyvalues_page_size` + + :paramref:`_engine.Connection.execution_options.insertmanyvalues_page_size` + + :param isolation_level: optional string name of an isolation level + which will be set on all new connections unconditionally. + Isolation levels are typically some subset of the string names + ``"SERIALIZABLE"``, ``"REPEATABLE READ"``, + ``"READ COMMITTED"``, ``"READ UNCOMMITTED"`` and ``"AUTOCOMMIT"`` + based on backend. + + The :paramref:`_sa.create_engine.isolation_level` parameter is + in contrast to the + :paramref:`.Connection.execution_options.isolation_level` + execution option, which may be set on an individual + :class:`.Connection`, as well as the same parameter passed to + :meth:`.Engine.execution_options`, where it may be used to create + multiple engines with different isolation levels that share a common + connection pool and dialect. + + .. versionchanged:: 2.0 The + :paramref:`_sa.create_engine.isolation_level` + parameter has been generalized to work on all dialects which support + the concept of isolation level, and is provided as a more succinct, + up front configuration switch in contrast to the execution option + which is more of an ad-hoc programmatic option. + + .. seealso:: + + :ref:`dbapi_autocommit` + + :param json_deserializer: for dialects that support the + :class:`_types.JSON` + datatype, this is a Python callable that will convert a JSON string + to a Python object. By default, the Python ``json.loads`` function is + used. + + .. versionchanged:: 1.3.7 The SQLite dialect renamed this from + ``_json_deserializer``. + + :param json_serializer: for dialects that support the :class:`_types.JSON` + datatype, this is a Python callable that will render a given object + as JSON. By default, the Python ``json.dumps`` function is used. + + .. versionchanged:: 1.3.7 The SQLite dialect renamed this from + ``_json_serializer``. + + + :param label_length=None: optional integer value which limits + the size of dynamically generated column labels to that many + characters. If less than 6, labels are generated as + "_(counter)". If ``None``, the value of + ``dialect.max_identifier_length``, which may be affected via the + :paramref:`_sa.create_engine.max_identifier_length` parameter, + is used instead. The value of + :paramref:`_sa.create_engine.label_length` + may not be larger than that of + :paramref:`_sa.create_engine.max_identfier_length`. + + .. seealso:: + + :paramref:`_sa.create_engine.max_identifier_length` + + :param logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.engine" logger. Defaults to a hexstring of the + object's id. + + .. seealso:: + + :ref:`dbengine_logging` - further detail on how to configure + logging. + + :paramref:`_engine.Connection.execution_options.logging_token` + + :param max_identifier_length: integer; override the max_identifier_length + determined by the dialect. if ``None`` or zero, has no effect. This + is the database's configured maximum number of characters that may be + used in a SQL identifier such as a table name, column name, or label + name. All dialects determine this value automatically, however in the + case of a new database version for which this value has changed but + SQLAlchemy's dialect has not been adjusted, the value may be passed + here. + + .. versionadded:: 1.3.9 + + .. seealso:: + + :paramref:`_sa.create_engine.label_length` + + :param max_overflow=10: the number of connections to allow in + connection pool "overflow", that is connections that can be + opened above and beyond the pool_size setting, which defaults + to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. + + :param module=None: reference to a Python module object (the module + itself, not its string name). Specifies an alternate DBAPI module to + be used by the engine's dialect. Each sub-dialect references a + specific DBAPI which will be imported before first connect. This + parameter causes the import to be bypassed, and the given module to + be used instead. Can be used for testing of DBAPIs as well as to + inject "mock" DBAPI implementations into the :class:`_engine.Engine`. + + :param paramstyle=None: The `paramstyle `_ + to use when rendering bound parameters. This style defaults to the + one recommended by the DBAPI itself, which is retrieved from the + ``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept + more than one paramstyle, and in particular it may be desirable + to change a "named" paramstyle into a "positional" one, or vice versa. + When this attribute is passed, it should be one of the values + ``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or + ``"pyformat"``, and should correspond to a parameter style known + to be supported by the DBAPI in use. + + :param pool=None: an already-constructed instance of + :class:`~sqlalchemy.pool.Pool`, such as a + :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this + pool will be used directly as the underlying connection pool + for the engine, bypassing whatever connection parameters are + present in the URL argument. For information on constructing + connection pools manually, see :ref:`pooling_toplevel`. + + :param poolclass=None: a :class:`~sqlalchemy.pool.Pool` + subclass, which will be used to create a connection pool + instance using the connection parameters given in the URL. Note + this differs from ``pool`` in that you don't actually + instantiate the pool in this case, you just indicate what type + of pool to be used. + + :param pool_logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.pool" logger. Defaults to a hexstring of the object's + id. + + .. seealso:: + + :ref:`dbengine_logging` - further detail on how to configure + logging. + + :param pool_pre_ping: boolean, if True will enable the connection pool + "pre-ping" feature that tests connections for liveness upon + each checkout. + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`pool_disconnects_pessimistic` + + :param pool_size=5: the number of connections to keep open + inside the connection pool. This used with + :class:`~sqlalchemy.pool.QueuePool` as + well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With + :class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting + of 0 indicates no limit; to disable pooling, set ``poolclass`` to + :class:`~sqlalchemy.pool.NullPool` instead. + + :param pool_recycle=-1: this setting causes the pool to recycle + connections after the given number of seconds has passed. It + defaults to -1, or no timeout. For example, setting to 3600 + means connections will be recycled after one hour. Note that + MySQL in particular will disconnect automatically if no + activity is detected on a connection for eight hours (although + this is configurable with the MySQLDB connection itself and the + server configuration as well). + + .. seealso:: + + :ref:`pool_setting_recycle` + + :param pool_reset_on_return='rollback': set the + :paramref:`_pool.Pool.reset_on_return` parameter of the underlying + :class:`_pool.Pool` object, which can be set to the values + ``"rollback"``, ``"commit"``, or ``None``. + + .. seealso:: + + :ref:`pool_reset_on_return` + + :param pool_timeout=30: number of seconds to wait before giving + up on getting a connection from the pool. This is only used + with :class:`~sqlalchemy.pool.QueuePool`. This can be a float but is + subject to the limitations of Python time functions which may not be + reliable in the tens of milliseconds. + + .. note: don't use 30.0 above, it seems to break with the :param tag + + :param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving + connections from :class:`.QueuePool` instead of FIFO + (first-in-first-out). Using LIFO, a server-side timeout scheme can + reduce the number of connections used during non- peak periods of + use. When planning for server-side timeouts, ensure that a recycle or + pre-ping strategy is in use to gracefully handle stale connections. + + .. versionadded:: 1.3 + + .. seealso:: + + :ref:`pool_use_lifo` + + :ref:`pool_disconnects` + + :param plugins: string list of plugin names to load. See + :class:`.CreateEnginePlugin` for background. + + .. versionadded:: 1.2.3 + + :param query_cache_size: size of the cache used to cache the SQL string + form of queries. Set to zero to disable caching. + + The cache is pruned of its least recently used items when its size reaches + N * 1.5. Defaults to 500, meaning the cache will always store at least + 500 SQL statements when filled, and will grow up to 750 items at which + point it is pruned back down to 500 by removing the 250 least recently + used items. + + Caching is accomplished on a per-statement basis by generating a + cache key that represents the statement's structure, then generating + string SQL for the current dialect only if that key is not present + in the cache. All statements support caching, however some features + such as an INSERT with a large set of parameters will intentionally + bypass the cache. SQL logging will indicate statistics for each + statement whether or not it were pull from the cache. + + .. note:: some ORM functions related to unit-of-work persistence as well + as some attribute loading strategies will make use of individual + per-mapper caches outside of the main cache. + + + .. seealso:: + + :ref:`sql_caching` + + .. versionadded:: 1.4 + + :param use_insertmanyvalues: True by default, use the "insertmanyvalues" + execution style for INSERT..RETURNING statements by default. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + """ # noqa + + if "strategy" in kwargs: + strat = kwargs.pop("strategy") + if strat == "mock": + # this case is deprecated + return create_mock_engine(url, **kwargs) # type: ignore + else: + raise exc.ArgumentError("unknown strategy: %r" % strat) + + kwargs.pop("empty_in_strategy", None) + + # create url.URL object + u = _url.make_url(url) + + u, plugins, kwargs = u._instantiate_plugins(kwargs) + + entrypoint = u._get_entrypoint() + _is_async = kwargs.pop("_is_async", False) + if _is_async: + dialect_cls = entrypoint.get_async_dialect_cls(u) + else: + dialect_cls = entrypoint.get_dialect_cls(u) + + if kwargs.pop("_coerce_config", False): + + def pop_kwarg(key: str, default: Optional[Any] = None) -> Any: + value = kwargs.pop(key, default) + if key in dialect_cls.engine_config_types: + value = dialect_cls.engine_config_types[key](value) + return value + + else: + pop_kwarg = kwargs.pop # type: ignore + + dialect_args = {} + # consume dialect arguments from kwargs + for k in util.get_cls_kwargs(dialect_cls): + if k in kwargs: + dialect_args[k] = pop_kwarg(k) + + dbapi = kwargs.pop("module", None) + if dbapi is None: + dbapi_args = {} + + if "import_dbapi" in dialect_cls.__dict__: + dbapi_meth = dialect_cls.import_dbapi + + elif hasattr(dialect_cls, "dbapi") and inspect.ismethod( + dialect_cls.dbapi + ): + util.warn_deprecated( + "The dbapi() classmethod on dialect classes has been " + "renamed to import_dbapi(). Implement an import_dbapi() " + f"classmethod directly on class {dialect_cls} to remove this " + "warning; the old .dbapi() classmethod may be maintained for " + "backwards compatibility.", + "2.0", + ) + dbapi_meth = dialect_cls.dbapi + else: + dbapi_meth = dialect_cls.import_dbapi + + for k in util.get_func_kwargs(dbapi_meth): + if k in kwargs: + dbapi_args[k] = pop_kwarg(k) + dbapi = dbapi_meth(**dbapi_args) + + dialect_args["dbapi"] = dbapi + + dialect_args.setdefault("compiler_linting", compiler.NO_LINTING) + enable_from_linting = kwargs.pop("enable_from_linting", True) + if enable_from_linting: + dialect_args["compiler_linting"] ^= compiler.COLLECT_CARTESIAN_PRODUCTS + + for plugin in plugins: + plugin.handle_dialect_kwargs(dialect_cls, dialect_args) + + # create dialect + dialect = dialect_cls(**dialect_args) + + # assemble connection arguments + (cargs_tup, cparams) = dialect.create_connect_args(u) + cparams.update(pop_kwarg("connect_args", {})) + + if "async_fallback" in cparams and util.asbool(cparams["async_fallback"]): + util.warn_deprecated( + "The async_fallback dialect argument is deprecated and will be " + "removed in SQLAlchemy 2.1.", + "2.0", + ) + + cargs = list(cargs_tup) # allow mutability + + # look for existing pool or create + pool = pop_kwarg("pool", None) + if pool is None: + + def connect( + connection_record: Optional[ConnectionPoolEntry] = None, + ) -> DBAPIConnection: + if dialect._has_events: + for fn in dialect.dispatch.do_connect: + connection = cast( + DBAPIConnection, + fn(dialect, connection_record, cargs, cparams), + ) + if connection is not None: + return connection + + return dialect.connect(*cargs, **cparams) + + creator = pop_kwarg("creator", connect) + + poolclass = pop_kwarg("poolclass", None) + if poolclass is None: + poolclass = dialect.get_dialect_pool_class(u) + pool_args = {"dialect": dialect} + + # consume pool arguments from kwargs, translating a few of + # the arguments + for k in util.get_cls_kwargs(poolclass): + tk = _pool_translate_kwargs.get(k, k) + if tk in kwargs: + pool_args[k] = pop_kwarg(tk) + + for plugin in plugins: + plugin.handle_pool_kwargs(poolclass, pool_args) + + pool = poolclass(creator, **pool_args) + else: + pool._dialect = dialect + + if ( + hasattr(pool, "_is_asyncio") + and pool._is_asyncio is not dialect.is_async + ): + raise exc.ArgumentError( + f"Pool class {pool.__class__.__name__} cannot be " + f"used with {'non-' if not dialect.is_async else ''}" + "asyncio engine", + code="pcls", + ) + + # create engine. + if not pop_kwarg("future", True): + raise exc.ArgumentError( + "The 'future' parameter passed to " + "create_engine() may only be set to True." + ) + + engineclass = base.Engine + + engine_args = {} + for k in util.get_cls_kwargs(engineclass): + if k in kwargs: + engine_args[k] = pop_kwarg(k) + + # internal flags used by the test suite for instrumenting / proxying + # engines with mocks etc. + _initialize = kwargs.pop("_initialize", True) + + # all kwargs should be consumed + if kwargs: + raise TypeError( + "Invalid argument(s) %s sent to create_engine(), " + "using configuration %s/%s/%s. Please check that the " + "keyword arguments are appropriate for this combination " + "of components." + % ( + ",".join("'%s'" % k for k in kwargs), + dialect.__class__.__name__, + pool.__class__.__name__, + engineclass.__name__, + ) + ) + + engine = engineclass(pool, dialect, u, **engine_args) + + if _initialize: + do_on_connect = dialect.on_connect_url(u) + if do_on_connect: + + def on_connect( + dbapi_connection: DBAPIConnection, + connection_record: ConnectionPoolEntry, + ) -> None: + assert do_on_connect is not None + do_on_connect(dbapi_connection) + + event.listen(pool, "connect", on_connect) + + builtin_on_connect = dialect._builtin_onconnect() + if builtin_on_connect: + event.listen(pool, "connect", builtin_on_connect) + + def first_connect( + dbapi_connection: DBAPIConnection, + connection_record: ConnectionPoolEntry, + ) -> None: + c = base.Connection( + engine, + connection=_AdhocProxiedConnection( + dbapi_connection, connection_record + ), + _has_events=False, + # reconnecting will be a reentrant condition, so if the + # connection goes away, Connection is then closed + _allow_revalidate=False, + # dont trigger the autobegin sequence + # within the up front dialect checks + _allow_autobegin=False, + ) + c._execution_options = util.EMPTY_DICT + + try: + dialect.initialize(c) + finally: + # note that "invalidated" and "closed" are mutually + # exclusive in 1.4 Connection. + if not c.invalidated and not c.closed: + # transaction is rolled back otherwise, tested by + # test/dialect/postgresql/test_dialect.py + # ::MiscBackendTest::test_initial_transaction_state + dialect.do_rollback(c.connection) + + # previously, the "first_connect" event was used here, which was then + # scaled back if the "on_connect" handler were present. now, + # since "on_connect" is virtually always present, just use + # "connect" event with once_unless_exception in all cases so that + # the connection event flow is consistent in all cases. + event.listen( + pool, "connect", first_connect, _once_unless_exception=True + ) + + dialect_cls.engine_created(engine) + if entrypoint is not dialect_cls: + entrypoint.engine_created(engine) + + for plugin in plugins: + plugin.engine_created(engine) + + return engine + + +def engine_from_config( + configuration: Dict[str, Any], prefix: str = "sqlalchemy.", **kwargs: Any +) -> Engine: + """Create a new Engine instance using a configuration dictionary. + + The dictionary is typically produced from a config file. + + The keys of interest to ``engine_from_config()`` should be prefixed, e.g. + ``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument + indicates the prefix to be searched for. Each matching key (after the + prefix is stripped) is treated as though it were the corresponding keyword + argument to a :func:`_sa.create_engine` call. + + The only required key is (assuming the default prefix) ``sqlalchemy.url``, + which provides the :ref:`database URL `. + + A select set of keyword arguments will be "coerced" to their + expected type based on string values. The set of arguments + is extensible per-dialect using the ``engine_config_types`` accessor. + + :param configuration: A dictionary (typically produced from a config file, + but this is not a requirement). Items whose keys start with the value + of 'prefix' will have that prefix stripped, and will then be passed to + :func:`_sa.create_engine`. + + :param prefix: Prefix to match and then strip from keys + in 'configuration'. + + :param kwargs: Each keyword argument to ``engine_from_config()`` itself + overrides the corresponding item taken from the 'configuration' + dictionary. Keyword arguments should *not* be prefixed. + + """ + + options = { + key[len(prefix) :]: configuration[key] + for key in configuration + if key.startswith(prefix) + } + options["_coerce_config"] = True + options.update(kwargs) + url = options.pop("url") + return create_engine(url, **options) + + +@overload +def create_pool_from_url( + url: Union[str, URL], + *, + poolclass: Optional[Type[Pool]] = ..., + logging_name: str = ..., + pre_ping: bool = ..., + size: int = ..., + recycle: int = ..., + reset_on_return: Optional[_ResetStyleArgType] = ..., + timeout: float = ..., + use_lifo: bool = ..., + **kwargs: Any, +) -> Pool: ... + + +@overload +def create_pool_from_url(url: Union[str, URL], **kwargs: Any) -> Pool: ... + + +def create_pool_from_url(url: Union[str, URL], **kwargs: Any) -> Pool: + """Create a pool instance from the given url. + + If ``poolclass`` is not provided the pool class used + is selected using the dialect specified in the URL. + + The arguments passed to :func:`_sa.create_pool_from_url` are + identical to the pool argument passed to the :func:`_sa.create_engine` + function. + + .. versionadded:: 2.0.10 + """ + + for key in _pool_translate_kwargs: + if key in kwargs: + kwargs[_pool_translate_kwargs[key]] = kwargs.pop(key) + + engine = create_engine(url, **kwargs, _initialize=False) + return engine.pool + + +_pool_translate_kwargs = immutabledict( + { + "logging_name": "pool_logging_name", + "echo": "echo_pool", + "timeout": "pool_timeout", + "recycle": "pool_recycle", + "events": "pool_events", # deprecated + "reset_on_return": "pool_reset_on_return", + "pre_ping": "pool_pre_ping", + "use_lifo": "pool_use_lifo", + } +) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/cursor.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..54e9784e0c427132f74519cf3f9ec803246430ba --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/cursor.py @@ -0,0 +1,2181 @@ +# engine/cursor.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Define cursor-specific result set constructs including +:class:`.CursorResult`.""" + + +from __future__ import annotations + +import collections +import functools +import operator +import typing +from typing import Any +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .result import IteratorResult +from .result import MergedResult +from .result import Result +from .result import ResultMetaData +from .result import SimpleResultMetaData +from .result import tuplegetter +from .row import Row +from .. import exc +from .. import util +from ..sql import elements +from ..sql import sqltypes +from ..sql import util as sql_util +from ..sql.base import _generative +from ..sql.compiler import ResultColumnsEntry +from ..sql.compiler import RM_NAME +from ..sql.compiler import RM_OBJECTS +from ..sql.compiler import RM_RENDERED_NAME +from ..sql.compiler import RM_TYPE +from ..sql.type_api import TypeEngine +from ..util import compat +from ..util.typing import Literal +from ..util.typing import Self + + +if typing.TYPE_CHECKING: + from .base import Connection + from .default import DefaultExecutionContext + from .interfaces import _DBAPICursorDescription + from .interfaces import DBAPICursor + from .interfaces import Dialect + from .interfaces import ExecutionContext + from .result import _KeyIndexType + from .result import _KeyMapRecType + from .result import _KeyMapType + from .result import _KeyType + from .result import _ProcessorsType + from .result import _TupleGetterType + from ..sql.type_api import _ResultProcessorType + + +_T = TypeVar("_T", bound=Any) + + +# metadata entry tuple indexes. +# using raw tuple is faster than namedtuple. +# these match up to the positions in +# _CursorKeyMapRecType +MD_INDEX: Literal[0] = 0 +"""integer index in cursor.description + +""" + +MD_RESULT_MAP_INDEX: Literal[1] = 1 +"""integer index in compiled._result_columns""" + +MD_OBJECTS: Literal[2] = 2 +"""other string keys and ColumnElement obj that can match. + +This comes from compiler.RM_OBJECTS / compiler.ResultColumnsEntry.objects + +""" + +MD_LOOKUP_KEY: Literal[3] = 3 +"""string key we usually expect for key-based lookup + +this comes from compiler.RM_NAME / compiler.ResultColumnsEntry.name +""" + + +MD_RENDERED_NAME: Literal[4] = 4 +"""name that is usually in cursor.description + +this comes from compiler.RENDERED_NAME / compiler.ResultColumnsEntry.keyname +""" + + +MD_PROCESSOR: Literal[5] = 5 +"""callable to process a result value into a row""" + +MD_UNTRANSLATED: Literal[6] = 6 +"""raw name from cursor.description""" + + +_CursorKeyMapRecType = Tuple[ + Optional[int], # MD_INDEX, None means the record is ambiguously named + int, # MD_RESULT_MAP_INDEX + List[Any], # MD_OBJECTS + str, # MD_LOOKUP_KEY + str, # MD_RENDERED_NAME + Optional["_ResultProcessorType[Any]"], # MD_PROCESSOR + Optional[str], # MD_UNTRANSLATED +] + +_CursorKeyMapType = Mapping["_KeyType", _CursorKeyMapRecType] + +# same as _CursorKeyMapRecType except the MD_INDEX value is definitely +# not None +_NonAmbigCursorKeyMapRecType = Tuple[ + int, + int, + List[Any], + str, + str, + Optional["_ResultProcessorType[Any]"], + str, +] + + +class CursorResultMetaData(ResultMetaData): + """Result metadata for DBAPI cursors.""" + + __slots__ = ( + "_keymap", + "_processors", + "_keys", + "_keymap_by_result_column_idx", + "_tuplefilter", + "_translated_indexes", + "_safe_for_cache", + "_unpickled", + "_key_to_index", + # don't need _unique_filters support here for now. Can be added + # if a need arises. + ) + + _keymap: _CursorKeyMapType + _processors: _ProcessorsType + _keymap_by_result_column_idx: Optional[Dict[int, _KeyMapRecType]] + _unpickled: bool + _safe_for_cache: bool + _translated_indexes: Optional[List[int]] + + returns_rows: ClassVar[bool] = True + + def _has_key(self, key: Any) -> bool: + return key in self._keymap + + def _for_freeze(self) -> ResultMetaData: + return SimpleResultMetaData( + self._keys, + extra=[self._keymap[key][MD_OBJECTS] for key in self._keys], + ) + + def _make_new_metadata( + self, + *, + unpickled: bool, + processors: _ProcessorsType, + keys: Sequence[str], + keymap: _KeyMapType, + tuplefilter: Optional[_TupleGetterType], + translated_indexes: Optional[List[int]], + safe_for_cache: bool, + keymap_by_result_column_idx: Any, + ) -> CursorResultMetaData: + new_obj = self.__class__.__new__(self.__class__) + new_obj._unpickled = unpickled + new_obj._processors = processors + new_obj._keys = keys + new_obj._keymap = keymap + new_obj._tuplefilter = tuplefilter + new_obj._translated_indexes = translated_indexes + new_obj._safe_for_cache = safe_for_cache + new_obj._keymap_by_result_column_idx = keymap_by_result_column_idx + new_obj._key_to_index = self._make_key_to_index(keymap, MD_INDEX) + return new_obj + + def _remove_processors(self) -> CursorResultMetaData: + assert not self._tuplefilter + return self._make_new_metadata( + unpickled=self._unpickled, + processors=[None] * len(self._processors), + tuplefilter=None, + translated_indexes=None, + keymap={ + key: value[0:5] + (None,) + value[6:] + for key, value in self._keymap.items() + }, + keys=self._keys, + safe_for_cache=self._safe_for_cache, + keymap_by_result_column_idx=self._keymap_by_result_column_idx, + ) + + def _splice_horizontally( + self, other: CursorResultMetaData + ) -> CursorResultMetaData: + assert not self._tuplefilter + + keymap = dict(self._keymap) + offset = len(self._keys) + keymap.update( + { + key: ( + # int index should be None for ambiguous key + ( + value[0] + offset + if value[0] is not None and key not in keymap + else None + ), + value[1] + offset, + *value[2:], + ) + for key, value in other._keymap.items() + } + ) + return self._make_new_metadata( + unpickled=self._unpickled, + processors=self._processors + other._processors, # type: ignore + tuplefilter=None, + translated_indexes=None, + keys=self._keys + other._keys, # type: ignore + keymap=keymap, + safe_for_cache=self._safe_for_cache, + keymap_by_result_column_idx={ + metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry + for metadata_entry in keymap.values() + }, + ) + + def _reduce(self, keys: Sequence[_KeyIndexType]) -> ResultMetaData: + recs = list(self._metadata_for_keys(keys)) + + indexes = [rec[MD_INDEX] for rec in recs] + new_keys: List[str] = [rec[MD_LOOKUP_KEY] for rec in recs] + + if self._translated_indexes: + indexes = [self._translated_indexes[idx] for idx in indexes] + tup = tuplegetter(*indexes) + new_recs = [(index,) + rec[1:] for index, rec in enumerate(recs)] + + keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs} + # TODO: need unit test for: + # result = connection.execute("raw sql, no columns").scalars() + # without the "or ()" it's failing because MD_OBJECTS is None + keymap.update( + (e, new_rec) + for new_rec in new_recs + for e in new_rec[MD_OBJECTS] or () + ) + + return self._make_new_metadata( + unpickled=self._unpickled, + processors=self._processors, + keys=new_keys, + tuplefilter=tup, + translated_indexes=indexes, + keymap=keymap, # type: ignore[arg-type] + safe_for_cache=self._safe_for_cache, + keymap_by_result_column_idx=self._keymap_by_result_column_idx, + ) + + def _adapt_to_context(self, context: ExecutionContext) -> ResultMetaData: + """When using a cached Compiled construct that has a _result_map, + for a new statement that used the cached Compiled, we need to ensure + the keymap has the Column objects from our new statement as keys. + So here we rewrite keymap with new entries for the new columns + as matched to those of the cached statement. + + """ + + if not context.compiled or not context.compiled._result_columns: + return self + + compiled_statement = context.compiled.statement + invoked_statement = context.invoked_statement + + if TYPE_CHECKING: + assert isinstance(invoked_statement, elements.ClauseElement) + + if compiled_statement is invoked_statement: + return self + + assert invoked_statement is not None + + # this is the most common path for Core statements when + # caching is used. In ORM use, this codepath is not really used + # as the _result_disable_adapt_to_context execution option is + # set by the ORM. + + # make a copy and add the columns from the invoked statement + # to the result map. + + keymap_by_position = self._keymap_by_result_column_idx + + if keymap_by_position is None: + # first retrival from cache, this map will not be set up yet, + # initialize lazily + keymap_by_position = self._keymap_by_result_column_idx = { + metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry + for metadata_entry in self._keymap.values() + } + + assert not self._tuplefilter + return self._make_new_metadata( + keymap=compat.dict_union( + self._keymap, + { + new: keymap_by_position[idx] + for idx, new in enumerate( + invoked_statement._all_selected_columns + ) + if idx in keymap_by_position + }, + ), + unpickled=self._unpickled, + processors=self._processors, + tuplefilter=None, + translated_indexes=None, + keys=self._keys, + safe_for_cache=self._safe_for_cache, + keymap_by_result_column_idx=self._keymap_by_result_column_idx, + ) + + def __init__( + self, + parent: CursorResult[Any], + cursor_description: _DBAPICursorDescription, + ): + context = parent.context + self._tuplefilter = None + self._translated_indexes = None + self._safe_for_cache = self._unpickled = False + + if context.result_column_struct: + ( + result_columns, + cols_are_ordered, + textual_ordered, + ad_hoc_textual, + loose_column_name_matching, + ) = context.result_column_struct + num_ctx_cols = len(result_columns) + else: + result_columns = cols_are_ordered = ( # type: ignore + num_ctx_cols + ) = ad_hoc_textual = loose_column_name_matching = ( + textual_ordered + ) = False + + # merge cursor.description with the column info + # present in the compiled structure, if any + raw = self._merge_cursor_description( + context, + cursor_description, + result_columns, + num_ctx_cols, + cols_are_ordered, + textual_ordered, + ad_hoc_textual, + loose_column_name_matching, + ) + + # processors in key order which are used when building up + # a row + self._processors = [ + metadata_entry[MD_PROCESSOR] for metadata_entry in raw + ] + + # this is used when using this ResultMetaData in a Core-only cache + # retrieval context. it's initialized on first cache retrieval + # when the _result_disable_adapt_to_context execution option + # (which the ORM generally sets) is not set. + self._keymap_by_result_column_idx = None + + # for compiled SQL constructs, copy additional lookup keys into + # the key lookup map, such as Column objects, labels, + # column keys and other names + if num_ctx_cols: + # keymap by primary string... + by_key = { + metadata_entry[MD_LOOKUP_KEY]: metadata_entry + for metadata_entry in raw + } + + if len(by_key) != num_ctx_cols: + # if by-primary-string dictionary smaller than + # number of columns, assume we have dupes; (this check + # is also in place if string dictionary is bigger, as + # can occur when '*' was used as one of the compiled columns, + # which may or may not be suggestive of dupes), rewrite + # dupe records with "None" for index which results in + # ambiguous column exception when accessed. + # + # this is considered to be the less common case as it is not + # common to have dupe column keys in a SELECT statement. + # + # new in 1.4: get the complete set of all possible keys, + # strings, objects, whatever, that are dupes across two + # different records, first. + index_by_key: Dict[Any, Any] = {} + dupes = set() + for metadata_entry in raw: + for key in (metadata_entry[MD_RENDERED_NAME],) + ( + metadata_entry[MD_OBJECTS] or () + ): + idx = metadata_entry[MD_INDEX] + # if this key has been associated with more than one + # positional index, it's a dupe + if index_by_key.setdefault(key, idx) != idx: + dupes.add(key) + + # then put everything we have into the keymap excluding only + # those keys that are dupes. + self._keymap = { + obj_elem: metadata_entry + for metadata_entry in raw + if metadata_entry[MD_OBJECTS] + for obj_elem in metadata_entry[MD_OBJECTS] + if obj_elem not in dupes + } + + # then for the dupe keys, put the "ambiguous column" + # record into by_key. + by_key.update( + { + key: (None, None, [], key, key, None, None) + for key in dupes + } + ) + + else: + # no dupes - copy secondary elements from compiled + # columns into self._keymap. this is the most common + # codepath for Core / ORM statement executions before the + # result metadata is cached + self._keymap = { + obj_elem: metadata_entry + for metadata_entry in raw + if metadata_entry[MD_OBJECTS] + for obj_elem in metadata_entry[MD_OBJECTS] + } + # update keymap with primary string names taking + # precedence + self._keymap.update(by_key) + else: + # no compiled objects to map, just create keymap by primary string + self._keymap = { + metadata_entry[MD_LOOKUP_KEY]: metadata_entry + for metadata_entry in raw + } + + # update keymap with "translated" names. In SQLAlchemy this is a + # sqlite only thing, and in fact impacting only extremely old SQLite + # versions unlikely to be present in modern Python versions. + # however, the pyhive third party dialect is + # also using this hook, which means others still might use it as well. + # I dislike having this awkward hook here but as long as we need + # to use names in cursor.description in some cases we need to have + # some hook to accomplish this. + if not num_ctx_cols and context._translate_colname: + self._keymap.update( + { + metadata_entry[MD_UNTRANSLATED]: self._keymap[ + metadata_entry[MD_LOOKUP_KEY] + ] + for metadata_entry in raw + if metadata_entry[MD_UNTRANSLATED] + } + ) + + self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX) + + def _merge_cursor_description( + self, + context, + cursor_description, + result_columns, + num_ctx_cols, + cols_are_ordered, + textual_ordered, + ad_hoc_textual, + loose_column_name_matching, + ): + """Merge a cursor.description with compiled result column information. + + There are at least four separate strategies used here, selected + depending on the type of SQL construct used to start with. + + The most common case is that of the compiled SQL expression construct, + which generated the column names present in the raw SQL string and + which has the identical number of columns as were reported by + cursor.description. In this case, we assume a 1-1 positional mapping + between the entries in cursor.description and the compiled object. + This is also the most performant case as we disregard extracting / + decoding the column names present in cursor.description since we + already have the desired name we generated in the compiled SQL + construct. + + The next common case is that of the completely raw string SQL, + such as passed to connection.execute(). In this case we have no + compiled construct to work with, so we extract and decode the + names from cursor.description and index those as the primary + result row target keys. + + The remaining fairly common case is that of the textual SQL + that includes at least partial column information; this is when + we use a :class:`_expression.TextualSelect` construct. + This construct may have + unordered or ordered column information. In the ordered case, we + merge the cursor.description and the compiled construct's information + positionally, and warn if there are additional description names + present, however we still decode the names in cursor.description + as we don't have a guarantee that the names in the columns match + on these. In the unordered case, we match names in cursor.description + to that of the compiled construct based on name matching. + In both of these cases, the cursor.description names and the column + expression objects and names are indexed as result row target keys. + + The final case is much less common, where we have a compiled + non-textual SQL expression construct, but the number of columns + in cursor.description doesn't match what's in the compiled + construct. We make the guess here that there might be textual + column expressions in the compiled construct that themselves include + a comma in them causing them to split. We do the same name-matching + as with textual non-ordered columns. + + The name-matched system of merging is the same as that used by + SQLAlchemy for all cases up through the 0.9 series. Positional + matching for compiled SQL expressions was introduced in 1.0 as a + major performance feature, and positional matching for textual + :class:`_expression.TextualSelect` objects in 1.1. + As name matching is no longer + a common case, it was acceptable to factor it into smaller generator- + oriented methods that are easier to understand, but incur slightly + more performance overhead. + + """ + + if ( + num_ctx_cols + and cols_are_ordered + and not textual_ordered + and num_ctx_cols == len(cursor_description) + ): + self._keys = [elem[0] for elem in result_columns] + # pure positional 1-1 case; doesn't need to read + # the names from cursor.description + + # most common case for Core and ORM + + # this metadata is safe to cache because we are guaranteed + # to have the columns in the same order for new executions + self._safe_for_cache = True + return [ + ( + idx, + idx, + rmap_entry[RM_OBJECTS], + rmap_entry[RM_NAME], + rmap_entry[RM_RENDERED_NAME], + context.get_result_processor( + rmap_entry[RM_TYPE], + rmap_entry[RM_RENDERED_NAME], + cursor_description[idx][1], + ), + None, + ) + for idx, rmap_entry in enumerate(result_columns) + ] + else: + # name-based or text-positional cases, where we need + # to read cursor.description names + + if textual_ordered or ( + ad_hoc_textual and len(cursor_description) == num_ctx_cols + ): + self._safe_for_cache = True + # textual positional case + raw_iterator = self._merge_textual_cols_by_position( + context, cursor_description, result_columns + ) + elif num_ctx_cols: + # compiled SQL with a mismatch of description cols + # vs. compiled cols, or textual w/ unordered columns + # the order of columns can change if the query is + # against a "select *", so not safe to cache + self._safe_for_cache = False + raw_iterator = self._merge_cols_by_name( + context, + cursor_description, + result_columns, + loose_column_name_matching, + ) + else: + # no compiled SQL, just a raw string, order of columns + # can change for "select *" + self._safe_for_cache = False + raw_iterator = self._merge_cols_by_none( + context, cursor_description + ) + + return [ + ( + idx, + ridx, + obj, + cursor_colname, + cursor_colname, + context.get_result_processor( + mapped_type, cursor_colname, coltype + ), + untranslated, + ) + for ( + idx, + ridx, + cursor_colname, + mapped_type, + coltype, + obj, + untranslated, + ) in raw_iterator + ] + + def _colnames_from_description(self, context, cursor_description): + """Extract column names and data types from a cursor.description. + + Applies unicode decoding, column translation, "normalization", + and case sensitivity rules to the names based on the dialect. + + """ + + dialect = context.dialect + translate_colname = context._translate_colname + normalize_name = ( + dialect.normalize_name if dialect.requires_name_normalize else None + ) + untranslated = None + + self._keys = [] + + for idx, rec in enumerate(cursor_description): + colname = rec[0] + coltype = rec[1] + + if translate_colname: + colname, untranslated = translate_colname(colname) + + if normalize_name: + colname = normalize_name(colname) + + self._keys.append(colname) + + yield idx, colname, untranslated, coltype + + def _merge_textual_cols_by_position( + self, context, cursor_description, result_columns + ): + num_ctx_cols = len(result_columns) + + if num_ctx_cols > len(cursor_description): + util.warn( + "Number of columns in textual SQL (%d) is " + "smaller than number of columns requested (%d)" + % (num_ctx_cols, len(cursor_description)) + ) + seen = set() + + for ( + idx, + colname, + untranslated, + coltype, + ) in self._colnames_from_description(context, cursor_description): + if idx < num_ctx_cols: + ctx_rec = result_columns[idx] + obj = ctx_rec[RM_OBJECTS] + ridx = idx + mapped_type = ctx_rec[RM_TYPE] + if obj[0] in seen: + raise exc.InvalidRequestError( + "Duplicate column expression requested " + "in textual SQL: %r" % obj[0] + ) + seen.add(obj[0]) + else: + mapped_type = sqltypes.NULLTYPE + obj = None + ridx = None + yield idx, ridx, colname, mapped_type, coltype, obj, untranslated + + def _merge_cols_by_name( + self, + context, + cursor_description, + result_columns, + loose_column_name_matching, + ): + match_map = self._create_description_match_map( + result_columns, loose_column_name_matching + ) + mapped_type: TypeEngine[Any] + + for ( + idx, + colname, + untranslated, + coltype, + ) in self._colnames_from_description(context, cursor_description): + try: + ctx_rec = match_map[colname] + except KeyError: + mapped_type = sqltypes.NULLTYPE + obj = None + result_columns_idx = None + else: + obj = ctx_rec[1] + mapped_type = ctx_rec[2] + result_columns_idx = ctx_rec[3] + yield ( + idx, + result_columns_idx, + colname, + mapped_type, + coltype, + obj, + untranslated, + ) + + @classmethod + def _create_description_match_map( + cls, + result_columns: List[ResultColumnsEntry], + loose_column_name_matching: bool = False, + ) -> Dict[ + Union[str, object], Tuple[str, Tuple[Any, ...], TypeEngine[Any], int] + ]: + """when matching cursor.description to a set of names that are present + in a Compiled object, as is the case with TextualSelect, get all the + names we expect might match those in cursor.description. + """ + + d: Dict[ + Union[str, object], + Tuple[str, Tuple[Any, ...], TypeEngine[Any], int], + ] = {} + for ridx, elem in enumerate(result_columns): + key = elem[RM_RENDERED_NAME] + if key in d: + # conflicting keyname - just add the column-linked objects + # to the existing record. if there is a duplicate column + # name in the cursor description, this will allow all of those + # objects to raise an ambiguous column error + e_name, e_obj, e_type, e_ridx = d[key] + d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx + else: + d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx) + + if loose_column_name_matching: + # when using a textual statement with an unordered set + # of columns that line up, we are expecting the user + # to be using label names in the SQL that match to the column + # expressions. Enable more liberal matching for this case; + # duplicate keys that are ambiguous will be fixed later. + for r_key in elem[RM_OBJECTS]: + d.setdefault( + r_key, + (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx), + ) + return d + + def _merge_cols_by_none(self, context, cursor_description): + for ( + idx, + colname, + untranslated, + coltype, + ) in self._colnames_from_description(context, cursor_description): + yield ( + idx, + None, + colname, + sqltypes.NULLTYPE, + coltype, + None, + untranslated, + ) + + if not TYPE_CHECKING: + + def _key_fallback( + self, key: Any, err: Optional[Exception], raiseerr: bool = True + ) -> Optional[NoReturn]: + if raiseerr: + if self._unpickled and isinstance(key, elements.ColumnElement): + raise exc.NoSuchColumnError( + "Row was unpickled; lookup by ColumnElement " + "is unsupported" + ) from err + else: + raise exc.NoSuchColumnError( + "Could not locate column in row for column '%s'" + % util.string_or_unprintable(key) + ) from err + else: + return None + + def _raise_for_ambiguous_column_name(self, rec): + raise exc.InvalidRequestError( + "Ambiguous column name '%s' in " + "result set column descriptions" % rec[MD_LOOKUP_KEY] + ) + + def _index_for_key(self, key: Any, raiseerr: bool = True) -> Optional[int]: + # TODO: can consider pre-loading ints and negative ints + # into _keymap - also no coverage here + if isinstance(key, int): + key = self._keys[key] + + try: + rec = self._keymap[key] + except KeyError as ke: + x = self._key_fallback(key, ke, raiseerr) + assert x is None + return None + + index = rec[0] + + if index is None: + self._raise_for_ambiguous_column_name(rec) + return index + + def _indexes_for_keys(self, keys): + try: + return [self._keymap[key][0] for key in keys] + except KeyError as ke: + # ensure it raises + CursorResultMetaData._key_fallback(self, ke.args[0], ke) + + def _metadata_for_keys( + self, keys: Sequence[Any] + ) -> Iterator[_NonAmbigCursorKeyMapRecType]: + for key in keys: + if int in key.__class__.__mro__: + key = self._keys[key] + + try: + rec = self._keymap[key] + except KeyError as ke: + # ensure it raises + CursorResultMetaData._key_fallback(self, ke.args[0], ke) + + index = rec[MD_INDEX] + + if index is None: + self._raise_for_ambiguous_column_name(rec) + + yield cast(_NonAmbigCursorKeyMapRecType, rec) + + def __getstate__(self): + # TODO: consider serializing this as SimpleResultMetaData + return { + "_keymap": { + key: ( + rec[MD_INDEX], + rec[MD_RESULT_MAP_INDEX], + [], + key, + rec[MD_RENDERED_NAME], + None, + None, + ) + for key, rec in self._keymap.items() + if isinstance(key, (str, int)) + }, + "_keys": self._keys, + "_translated_indexes": self._translated_indexes, + } + + def __setstate__(self, state): + self._processors = [None for _ in range(len(state["_keys"]))] + self._keymap = state["_keymap"] + self._keymap_by_result_column_idx = None + self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX) + self._keys = state["_keys"] + self._unpickled = True + if state["_translated_indexes"]: + self._translated_indexes = cast( + "List[int]", state["_translated_indexes"] + ) + self._tuplefilter = tuplegetter(*self._translated_indexes) + else: + self._translated_indexes = self._tuplefilter = None + + +class ResultFetchStrategy: + """Define a fetching strategy for a result object. + + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + alternate_cursor_description: Optional[_DBAPICursorDescription] = None + + def soft_close( + self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor] + ) -> None: + raise NotImplementedError() + + def hard_close( + self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor] + ) -> None: + raise NotImplementedError() + + def yield_per( + self, + result: CursorResult[Any], + dbapi_cursor: Optional[DBAPICursor], + num: int, + ) -> None: + return + + def fetchone( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + hard_close: bool = False, + ) -> Any: + raise NotImplementedError() + + def fetchmany( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + size: Optional[int] = None, + ) -> Any: + raise NotImplementedError() + + def fetchall( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + ) -> Any: + raise NotImplementedError() + + def handle_exception( + self, + result: CursorResult[Any], + dbapi_cursor: Optional[DBAPICursor], + err: BaseException, + ) -> NoReturn: + raise err + + +class NoCursorFetchStrategy(ResultFetchStrategy): + """Cursor strategy for a result that has no open cursor. + + There are two varieties of this strategy, one for DQL and one for + DML (and also DDL), each of which represent a result that had a cursor + but no longer has one. + + """ + + __slots__ = () + + def soft_close(self, result, dbapi_cursor): + pass + + def hard_close(self, result, dbapi_cursor): + pass + + def fetchone(self, result, dbapi_cursor, hard_close=False): + return self._non_result(result, None) + + def fetchmany(self, result, dbapi_cursor, size=None): + return self._non_result(result, []) + + def fetchall(self, result, dbapi_cursor): + return self._non_result(result, []) + + def _non_result(self, result, default, err=None): + raise NotImplementedError() + + +class NoCursorDQLFetchStrategy(NoCursorFetchStrategy): + """Cursor strategy for a DQL result that has no open cursor. + + This is a result set that can return rows, i.e. for a SELECT, or for an + INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state + where the cursor is closed and no rows remain available. The owning result + object may or may not be "hard closed", which determines if the fetch + methods send empty results or raise for closed result. + + """ + + __slots__ = () + + def _non_result(self, result, default, err=None): + if result.closed: + raise exc.ResourceClosedError( + "This result object is closed." + ) from err + else: + return default + + +_NO_CURSOR_DQL = NoCursorDQLFetchStrategy() + + +class NoCursorDMLFetchStrategy(NoCursorFetchStrategy): + """Cursor strategy for a DML result that has no open cursor. + + This is a result set that does not return rows, i.e. for an INSERT, + UPDATE, DELETE that does not include RETURNING. + + """ + + __slots__ = () + + def _non_result(self, result, default, err=None): + # we only expect to have a _NoResultMetaData() here right now. + assert not result._metadata.returns_rows + result._metadata._we_dont_return_rows(err) + + +_NO_CURSOR_DML = NoCursorDMLFetchStrategy() + + +class CursorFetchStrategy(ResultFetchStrategy): + """Call fetch methods from a DBAPI cursor. + + Alternate versions of this class may instead buffer the rows from + cursors or not use cursors at all. + + """ + + __slots__ = () + + def soft_close( + self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor] + ) -> None: + result.cursor_strategy = _NO_CURSOR_DQL + + def hard_close( + self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor] + ) -> None: + result.cursor_strategy = _NO_CURSOR_DQL + + def handle_exception( + self, + result: CursorResult[Any], + dbapi_cursor: Optional[DBAPICursor], + err: BaseException, + ) -> NoReturn: + result.connection._handle_dbapi_exception( + err, None, None, dbapi_cursor, result.context + ) + + def yield_per( + self, + result: CursorResult[Any], + dbapi_cursor: Optional[DBAPICursor], + num: int, + ) -> None: + result.cursor_strategy = BufferedRowCursorFetchStrategy( + dbapi_cursor, + {"max_row_buffer": num}, + initial_buffer=collections.deque(), + growth_factor=0, + ) + + def fetchone( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + hard_close: bool = False, + ) -> Any: + try: + row = dbapi_cursor.fetchone() + if row is None: + result._soft_close(hard=hard_close) + return row + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + + def fetchmany( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + size: Optional[int] = None, + ) -> Any: + try: + if size is None: + l = dbapi_cursor.fetchmany() + else: + l = dbapi_cursor.fetchmany(size) + + if not l: + result._soft_close() + return l + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + + def fetchall( + self, + result: CursorResult[Any], + dbapi_cursor: DBAPICursor, + ) -> Any: + try: + rows = dbapi_cursor.fetchall() + result._soft_close() + return rows + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + + +_DEFAULT_FETCH = CursorFetchStrategy() + + +class BufferedRowCursorFetchStrategy(CursorFetchStrategy): + """A cursor fetch strategy with row buffering behavior. + + This strategy buffers the contents of a selection of rows + before ``fetchone()`` is called. This is to allow the results of + ``cursor.description`` to be available immediately, when + interfacing with a DB-API that requires rows to be consumed before + this information is available (currently psycopg2, when used with + server-side cursors). + + The pre-fetching behavior fetches only one row initially, and then + grows its buffer size by a fixed amount with each successive need + for additional rows up the ``max_row_buffer`` size, which defaults + to 1000:: + + with psycopg2_engine.connect() as conn: + + result = conn.execution_options( + stream_results=True, max_row_buffer=50 + ).execute(text("select * from table")) + + .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows. + + .. seealso:: + + :ref:`psycopg2_execution_options` + """ + + __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor") + + def __init__( + self, + dbapi_cursor, + execution_options, + growth_factor=5, + initial_buffer=None, + ): + self._max_row_buffer = execution_options.get("max_row_buffer", 1000) + + if initial_buffer is not None: + self._rowbuffer = initial_buffer + else: + self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1)) + self._growth_factor = growth_factor + + if growth_factor: + self._bufsize = min(self._max_row_buffer, self._growth_factor) + else: + self._bufsize = self._max_row_buffer + + @classmethod + def create(cls, result): + return BufferedRowCursorFetchStrategy( + result.cursor, + result.context.execution_options, + ) + + def _buffer_rows(self, result, dbapi_cursor): + """this is currently used only by fetchone().""" + + size = self._bufsize + try: + if size < 1: + new_rows = dbapi_cursor.fetchall() + else: + new_rows = dbapi_cursor.fetchmany(size) + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + + if not new_rows: + return + self._rowbuffer = collections.deque(new_rows) + if self._growth_factor and size < self._max_row_buffer: + self._bufsize = min( + self._max_row_buffer, size * self._growth_factor + ) + + def yield_per(self, result, dbapi_cursor, num): + self._growth_factor = 0 + self._max_row_buffer = self._bufsize = num + + def soft_close(self, result, dbapi_cursor): + self._rowbuffer.clear() + super().soft_close(result, dbapi_cursor) + + def hard_close(self, result, dbapi_cursor): + self._rowbuffer.clear() + super().hard_close(result, dbapi_cursor) + + def fetchone(self, result, dbapi_cursor, hard_close=False): + if not self._rowbuffer: + self._buffer_rows(result, dbapi_cursor) + if not self._rowbuffer: + try: + result._soft_close(hard=hard_close) + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + return None + return self._rowbuffer.popleft() + + def fetchmany(self, result, dbapi_cursor, size=None): + if size is None: + return self.fetchall(result, dbapi_cursor) + + rb = self._rowbuffer + lb = len(rb) + close = False + if size > lb: + try: + new = dbapi_cursor.fetchmany(size - lb) + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + else: + if not new: + # defer closing since it may clear the row buffer + close = True + else: + rb.extend(new) + + res = [rb.popleft() for _ in range(min(size, len(rb)))] + if close: + result._soft_close() + return res + + def fetchall(self, result, dbapi_cursor): + try: + ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall()) + self._rowbuffer.clear() + result._soft_close() + return ret + except BaseException as e: + self.handle_exception(result, dbapi_cursor, e) + + +class FullyBufferedCursorFetchStrategy(CursorFetchStrategy): + """A cursor strategy that buffers rows fully upon creation. + + Used for operations where a result is to be delivered + after the database conversation can not be continued, + such as MSSQL INSERT...OUTPUT after an autocommit. + + """ + + __slots__ = ("_rowbuffer", "alternate_cursor_description") + + def __init__( + self, + dbapi_cursor: Optional[DBAPICursor], + alternate_description: Optional[_DBAPICursorDescription] = None, + initial_buffer: Optional[Iterable[Any]] = None, + ): + self.alternate_cursor_description = alternate_description + if initial_buffer is not None: + self._rowbuffer = collections.deque(initial_buffer) + else: + assert dbapi_cursor is not None + self._rowbuffer = collections.deque(dbapi_cursor.fetchall()) + + def yield_per(self, result, dbapi_cursor, num): + pass + + def soft_close(self, result, dbapi_cursor): + self._rowbuffer.clear() + super().soft_close(result, dbapi_cursor) + + def hard_close(self, result, dbapi_cursor): + self._rowbuffer.clear() + super().hard_close(result, dbapi_cursor) + + def fetchone(self, result, dbapi_cursor, hard_close=False): + if self._rowbuffer: + return self._rowbuffer.popleft() + else: + result._soft_close(hard=hard_close) + return None + + def fetchmany(self, result, dbapi_cursor, size=None): + if size is None: + return self.fetchall(result, dbapi_cursor) + + rb = self._rowbuffer + rows = [rb.popleft() for _ in range(min(size, len(rb)))] + if not rows: + result._soft_close() + return rows + + def fetchall(self, result, dbapi_cursor): + ret = self._rowbuffer + self._rowbuffer = collections.deque() + result._soft_close() + return ret + + +class _NoResultMetaData(ResultMetaData): + __slots__ = () + + returns_rows = False + + def _we_dont_return_rows(self, err=None): + raise exc.ResourceClosedError( + "This result object does not return rows. " + "It has been closed automatically." + ) from err + + def _index_for_key(self, keys, raiseerr): + self._we_dont_return_rows() + + def _metadata_for_keys(self, key): + self._we_dont_return_rows() + + def _reduce(self, keys): + self._we_dont_return_rows() + + @property + def _keymap(self): + self._we_dont_return_rows() + + @property + def _key_to_index(self): + self._we_dont_return_rows() + + @property + def _processors(self): + self._we_dont_return_rows() + + @property + def keys(self): + self._we_dont_return_rows() + + +_NO_RESULT_METADATA = _NoResultMetaData() + + +def null_dml_result() -> IteratorResult[Any]: + it: IteratorResult[Any] = IteratorResult(_NoResultMetaData(), iter([])) + it._soft_close() + return it + + +class CursorResult(Result[_T]): + """A Result that is representing state from a DBAPI cursor. + + .. versionchanged:: 1.4 The :class:`.CursorResult`` + class replaces the previous :class:`.ResultProxy` interface. + This classes are based on the :class:`.Result` calling API + which provides an updated usage model and calling facade for + SQLAlchemy Core and SQLAlchemy ORM. + + Returns database rows via the :class:`.Row` class, which provides + additional API features and behaviors on top of the raw data returned by + the DBAPI. Through the use of filters such as the :meth:`.Result.scalars` + method, other kinds of objects may also be returned. + + .. seealso:: + + :ref:`tutorial_selecting_data` - introductory material for accessing + :class:`_engine.CursorResult` and :class:`.Row` objects. + + """ + + __slots__ = ( + "context", + "dialect", + "cursor", + "cursor_strategy", + "_echo", + "connection", + ) + + _metadata: Union[CursorResultMetaData, _NoResultMetaData] + _no_result_metadata = _NO_RESULT_METADATA + _soft_closed: bool = False + closed: bool = False + _is_cursor = True + + context: DefaultExecutionContext + dialect: Dialect + cursor_strategy: ResultFetchStrategy + connection: Connection + + def __init__( + self, + context: DefaultExecutionContext, + cursor_strategy: ResultFetchStrategy, + cursor_description: Optional[_DBAPICursorDescription], + ): + self.context = context + self.dialect = context.dialect + self.cursor = context.cursor + self.cursor_strategy = cursor_strategy + self.connection = context.root_connection + self._echo = echo = ( + self.connection._echo and context.engine._should_log_debug() + ) + + if cursor_description is not None: + # inline of Result._row_getter(), set up an initial row + # getter assuming no transformations will be called as this + # is the most common case + + metadata = self._init_metadata(context, cursor_description) + + _make_row: Any + _make_row = functools.partial( + Row, + metadata, + metadata._effective_processors, + metadata._key_to_index, + ) + + if context._num_sentinel_cols: + sentinel_filter = operator.itemgetter( + slice(-context._num_sentinel_cols) + ) + + def _sliced_row(raw_data): + return _make_row(sentinel_filter(raw_data)) + + sliced_row = _sliced_row + else: + sliced_row = _make_row + + if echo: + log = self.context.connection._log_debug + + def _log_row(row): + log("Row %r", sql_util._repr_row(row)) + return row + + self._row_logging_fn = _log_row + + def _make_row_2(row): + return _log_row(sliced_row(row)) + + make_row = _make_row_2 + else: + make_row = sliced_row + self._set_memoized_attribute("_row_getter", make_row) + + else: + assert context._num_sentinel_cols == 0 + self._metadata = self._no_result_metadata + + def _init_metadata(self, context, cursor_description): + if context.compiled: + compiled = context.compiled + + if compiled._cached_metadata: + metadata = compiled._cached_metadata + else: + metadata = CursorResultMetaData(self, cursor_description) + if metadata._safe_for_cache: + compiled._cached_metadata = metadata + + # result rewrite/ adapt step. this is to suit the case + # when we are invoked against a cached Compiled object, we want + # to rewrite the ResultMetaData to reflect the Column objects + # that are in our current SQL statement object, not the one + # that is associated with the cached Compiled object. + # the Compiled object may also tell us to not + # actually do this step; this is to support the ORM where + # it is to produce a new Result object in any case, and will + # be using the cached Column objects against this database result + # so we don't want to rewrite them. + # + # Basically this step suits the use case where the end user + # is using Core SQL expressions and is accessing columns in the + # result row using row._mapping[table.c.column]. + if ( + not context.execution_options.get( + "_result_disable_adapt_to_context", False + ) + and compiled._result_columns + and context.cache_hit is context.dialect.CACHE_HIT + and compiled.statement is not context.invoked_statement + ): + metadata = metadata._adapt_to_context(context) + + self._metadata = metadata + + else: + self._metadata = metadata = CursorResultMetaData( + self, cursor_description + ) + if self._echo: + context.connection._log_debug( + "Col %r", tuple(x[0] for x in cursor_description) + ) + return metadata + + def _soft_close(self, hard=False): + """Soft close this :class:`_engine.CursorResult`. + + This releases all DBAPI cursor resources, but leaves the + CursorResult "open" from a semantic perspective, meaning the + fetchXXX() methods will continue to return empty results. + + This method is called automatically when: + + * all result rows are exhausted using the fetchXXX() methods. + * cursor.description is None. + + This method is **not public**, but is documented in order to clarify + the "autoclose" process used. + + .. seealso:: + + :meth:`_engine.CursorResult.close` + + + """ + + if (not hard and self._soft_closed) or (hard and self.closed): + return + + if hard: + self.closed = True + self.cursor_strategy.hard_close(self, self.cursor) + else: + self.cursor_strategy.soft_close(self, self.cursor) + + if not self._soft_closed: + cursor = self.cursor + self.cursor = None # type: ignore + self.connection._safe_close_cursor(cursor) + self._soft_closed = True + + @property + def inserted_primary_key_rows(self): + """Return the value of + :attr:`_engine.CursorResult.inserted_primary_key` + as a row contained within a list; some dialects may support a + multiple row form as well. + + .. note:: As indicated below, in current SQLAlchemy versions this + accessor is only useful beyond what's already supplied by + :attr:`_engine.CursorResult.inserted_primary_key` when using the + :ref:`postgresql_psycopg2` dialect. Future versions hope to + generalize this feature to more dialects. + + This accessor is added to support dialects that offer the feature + that is currently implemented by the :ref:`psycopg2_executemany_mode` + feature, currently **only the psycopg2 dialect**, which provides + for many rows to be INSERTed at once while still retaining the + behavior of being able to return server-generated primary key values. + + * **When using the psycopg2 dialect, or other dialects that may support + "fast executemany" style inserts in upcoming releases** : When + invoking an INSERT statement while passing a list of rows as the + second argument to :meth:`_engine.Connection.execute`, this accessor + will then provide a list of rows, where each row contains the primary + key value for each row that was INSERTed. + + * **When using all other dialects / backends that don't yet support + this feature**: This accessor is only useful for **single row INSERT + statements**, and returns the same information as that of the + :attr:`_engine.CursorResult.inserted_primary_key` within a + single-element list. When an INSERT statement is executed in + conjunction with a list of rows to be INSERTed, the list will contain + one row per row inserted in the statement, however it will contain + ``None`` for any server-generated values. + + Future releases of SQLAlchemy will further generalize the + "fast execution helper" feature of psycopg2 to suit other dialects, + thus allowing this accessor to be of more general use. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_engine.CursorResult.inserted_primary_key` + + """ + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled expression construct." + ) + elif not self.context.isinsert: + raise exc.InvalidRequestError( + "Statement is not an insert() expression construct." + ) + elif self.context._is_explicit_returning: + raise exc.InvalidRequestError( + "Can't call inserted_primary_key " + "when returning() " + "is used." + ) + return self.context.inserted_primary_key_rows + + @property + def inserted_primary_key(self): + """Return the primary key for the row just inserted. + + The return value is a :class:`_result.Row` object representing + a named tuple of primary key values in the order in which the + primary key columns are configured in the source + :class:`_schema.Table`. + + .. versionchanged:: 1.4.8 - the + :attr:`_engine.CursorResult.inserted_primary_key` + value is now a named tuple via the :class:`_result.Row` class, + rather than a plain tuple. + + This accessor only applies to single row :func:`_expression.insert` + constructs which did not explicitly specify + :meth:`_expression.Insert.returning`. Support for multirow inserts, + while not yet available for most backends, would be accessed using + the :attr:`_engine.CursorResult.inserted_primary_key_rows` accessor. + + Note that primary key columns which specify a server_default clause, or + otherwise do not qualify as "autoincrement" columns (see the notes at + :class:`_schema.Column`), and were generated using the database-side + default, will appear in this list as ``None`` unless the backend + supports "returning" and the insert statement executed with the + "implicit returning" enabled. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() construct. + + """ + + if self.context.executemany: + raise exc.InvalidRequestError( + "This statement was an executemany call; if primary key " + "returning is supported, please " + "use .inserted_primary_key_rows." + ) + + ikp = self.inserted_primary_key_rows + if ikp: + return ikp[0] + else: + return None + + def last_updated_params(self): + """Return the collection of updated parameters from this + execution. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an update() construct. + + """ + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled expression construct." + ) + elif not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an update() expression construct." + ) + elif self.context.executemany: + return self.context.compiled_parameters + else: + return self.context.compiled_parameters[0] + + def last_inserted_params(self): + """Return the collection of inserted parameters from this + execution. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() construct. + + """ + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled expression construct." + ) + elif not self.context.isinsert: + raise exc.InvalidRequestError( + "Statement is not an insert() expression construct." + ) + elif self.context.executemany: + return self.context.compiled_parameters + else: + return self.context.compiled_parameters[0] + + @property + def returned_defaults_rows(self): + """Return a list of rows each containing the values of default + columns that were fetched using + the :meth:`.ValuesBase.return_defaults` feature. + + The return value is a list of :class:`.Row` objects. + + .. versionadded:: 1.4 + + """ + return self.context.returned_default_rows + + def splice_horizontally(self, other): + """Return a new :class:`.CursorResult` that "horizontally splices" + together the rows of this :class:`.CursorResult` with that of another + :class:`.CursorResult`. + + .. tip:: This method is for the benefit of the SQLAlchemy ORM and is + not intended for general use. + + "horizontally splices" means that for each row in the first and second + result sets, a new row that concatenates the two rows together is + produced, which then becomes the new row. The incoming + :class:`.CursorResult` must have the identical number of rows. It is + typically expected that the two result sets come from the same sort + order as well, as the result rows are spliced together based on their + position in the result. + + The expected use case here is so that multiple INSERT..RETURNING + statements (which definitely need to be sorted) against different + tables can produce a single result that looks like a JOIN of those two + tables. + + E.g.:: + + r1 = connection.execute( + users.insert().returning( + users.c.user_name, users.c.user_id, sort_by_parameter_order=True + ), + user_values, + ) + + r2 = connection.execute( + addresses.insert().returning( + addresses.c.address_id, + addresses.c.address, + addresses.c.user_id, + sort_by_parameter_order=True, + ), + address_values, + ) + + rows = r1.splice_horizontally(r2).all() + assert rows == [ + ("john", 1, 1, "foo@bar.com", 1), + ("jack", 2, 2, "bar@bat.com", 2), + ] + + .. versionadded:: 2.0 + + .. seealso:: + + :meth:`.CursorResult.splice_vertically` + + + """ # noqa: E501 + + clone = self._generate() + total_rows = [ + tuple(r1) + tuple(r2) + for r1, r2 in zip( + list(self._raw_row_iterator()), + list(other._raw_row_iterator()), + ) + ] + + clone._metadata = clone._metadata._splice_horizontally(other._metadata) + + clone.cursor_strategy = FullyBufferedCursorFetchStrategy( + None, + initial_buffer=total_rows, + ) + clone._reset_memoizations() + return clone + + def splice_vertically(self, other): + """Return a new :class:`.CursorResult` that "vertically splices", + i.e. "extends", the rows of this :class:`.CursorResult` with that of + another :class:`.CursorResult`. + + .. tip:: This method is for the benefit of the SQLAlchemy ORM and is + not intended for general use. + + "vertically splices" means the rows of the given result are appended to + the rows of this cursor result. The incoming :class:`.CursorResult` + must have rows that represent the identical list of columns in the + identical order as they are in this :class:`.CursorResult`. + + .. versionadded:: 2.0 + + .. seealso:: + + :meth:`.CursorResult.splice_horizontally` + + """ + clone = self._generate() + total_rows = list(self._raw_row_iterator()) + list( + other._raw_row_iterator() + ) + + clone.cursor_strategy = FullyBufferedCursorFetchStrategy( + None, + initial_buffer=total_rows, + ) + clone._reset_memoizations() + return clone + + def _rewind(self, rows): + """rewind this result back to the given rowset. + + this is used internally for the case where an :class:`.Insert` + construct combines the use of + :meth:`.Insert.return_defaults` along with the + "supplemental columns" feature. + + """ + + if self._echo: + self.context.connection._log_debug( + "CursorResult rewound %d row(s)", len(rows) + ) + + # the rows given are expected to be Row objects, so we + # have to clear out processors which have already run on these + # rows + self._metadata = cast( + CursorResultMetaData, self._metadata + )._remove_processors() + + self.cursor_strategy = FullyBufferedCursorFetchStrategy( + None, + # TODO: if these are Row objects, can we save on not having to + # re-make new Row objects out of them a second time? is that + # what's actually happening right now? maybe look into this + initial_buffer=rows, + ) + self._reset_memoizations() + return self + + @property + def returned_defaults(self): + """Return the values of default columns that were fetched using + the :meth:`.ValuesBase.return_defaults` feature. + + The value is an instance of :class:`.Row`, or ``None`` + if :meth:`.ValuesBase.return_defaults` was not used or if the + backend does not support RETURNING. + + .. seealso:: + + :meth:`.ValuesBase.return_defaults` + + """ + + if self.context.executemany: + raise exc.InvalidRequestError( + "This statement was an executemany call; if return defaults " + "is supported, please use .returned_defaults_rows." + ) + + rows = self.context.returned_default_rows + if rows: + return rows[0] + else: + return None + + def lastrow_has_defaults(self): + """Return ``lastrow_has_defaults()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + """ + + return self.context.lastrow_has_defaults() + + def postfetch_cols(self): + """Return ``postfetch_cols()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() or update() construct. + + """ + + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled expression construct." + ) + elif not self.context.isinsert and not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an insert() or update() " + "expression construct." + ) + return self.context.postfetch_cols + + def prefetch_cols(self): + """Return ``prefetch_cols()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() or update() construct. + + """ + + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled expression construct." + ) + elif not self.context.isinsert and not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an insert() or update() " + "expression construct." + ) + return self.context.prefetch_cols + + def supports_sane_rowcount(self): + """Return ``supports_sane_rowcount`` from the dialect. + + See :attr:`_engine.CursorResult.rowcount` for background. + + """ + + return self.dialect.supports_sane_rowcount + + def supports_sane_multi_rowcount(self): + """Return ``supports_sane_multi_rowcount`` from the dialect. + + See :attr:`_engine.CursorResult.rowcount` for background. + + """ + + return self.dialect.supports_sane_multi_rowcount + + @util.memoized_property + def rowcount(self) -> int: + """Return the 'rowcount' for this result. + + The primary purpose of 'rowcount' is to report the number of rows + matched by the WHERE criterion of an UPDATE or DELETE statement + executed once (i.e. for a single parameter set), which may then be + compared to the number of rows expected to be updated or deleted as a + means of asserting data integrity. + + This attribute is transferred from the ``cursor.rowcount`` attribute + of the DBAPI before the cursor is closed, to support DBAPIs that + don't make this value available after cursor close. Some DBAPIs may + offer meaningful values for other kinds of statements, such as INSERT + and SELECT statements as well. In order to retrieve ``cursor.rowcount`` + for these statements, set the + :paramref:`.Connection.execution_options.preserve_rowcount` + execution option to True, which will cause the ``cursor.rowcount`` + value to be unconditionally memoized before any results are returned + or the cursor is closed, regardless of statement type. + + For cases where the DBAPI does not support rowcount for a particular + kind of statement and/or execution, the returned value will be ``-1``, + which is delivered directly from the DBAPI and is part of :pep:`249`. + All DBAPIs should support rowcount for single-parameter-set + UPDATE and DELETE statements, however. + + .. note:: + + Notes regarding :attr:`_engine.CursorResult.rowcount`: + + + * This attribute returns the number of rows *matched*, + which is not necessarily the same as the number of rows + that were actually *modified*. For example, an UPDATE statement + may have no net change on a given row if the SET values + given are the same as those present in the row already. + Such a row would be matched but not modified. + On backends that feature both styles, such as MySQL, + rowcount is configured to return the match + count in all cases. + + * :attr:`_engine.CursorResult.rowcount` in the default case is + *only* useful in conjunction with an UPDATE or DELETE statement, + and only with a single set of parameters. For other kinds of + statements, SQLAlchemy will not attempt to pre-memoize the value + unless the + :paramref:`.Connection.execution_options.preserve_rowcount` + execution option is used. Note that contrary to :pep:`249`, many + DBAPIs do not support rowcount values for statements that are not + UPDATE or DELETE, particularly when rows are being returned which + are not fully pre-buffered. DBAPIs that dont support rowcount + for a particular kind of statement should return the value ``-1`` + for such statements. + + * :attr:`_engine.CursorResult.rowcount` may not be meaningful + when executing a single statement with multiple parameter sets + (i.e. an :term:`executemany`). Most DBAPIs do not sum "rowcount" + values across multiple parameter sets and will return ``-1`` + when accessed. + + * SQLAlchemy's :ref:`engine_insertmanyvalues` feature does support + a correct population of :attr:`_engine.CursorResult.rowcount` + when the :paramref:`.Connection.execution_options.preserve_rowcount` + execution option is set to True. + + * Statements that use RETURNING may not support rowcount, returning + a ``-1`` value instead. + + .. seealso:: + + :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial` + + :paramref:`.Connection.execution_options.preserve_rowcount` + + """ # noqa: E501 + try: + return self.context.rowcount + except BaseException as e: + self.cursor_strategy.handle_exception(self, self.cursor, e) + raise # not called + + @property + def lastrowid(self): + """Return the 'lastrowid' accessor on the DBAPI cursor. + + This is a DBAPI specific method and is only functional + for those backends which support it, for statements + where it is appropriate. It's behavior is not + consistent across backends. + + Usage of this method is normally unnecessary when + using insert() expression constructs; the + :attr:`~CursorResult.inserted_primary_key` attribute provides a + tuple of primary key values for a newly inserted row, + regardless of database backend. + + """ + try: + return self.context.get_lastrowid() + except BaseException as e: + self.cursor_strategy.handle_exception(self, self.cursor, e) + + @property + def returns_rows(self): + """True if this :class:`_engine.CursorResult` returns zero or more + rows. + + I.e. if it is legal to call the methods + :meth:`_engine.CursorResult.fetchone`, + :meth:`_engine.CursorResult.fetchmany` + :meth:`_engine.CursorResult.fetchall`. + + Overall, the value of :attr:`_engine.CursorResult.returns_rows` should + always be synonymous with whether or not the DBAPI cursor had a + ``.description`` attribute, indicating the presence of result columns, + noting that a cursor that returns zero rows still has a + ``.description`` if a row-returning statement was emitted. + + This attribute should be True for all results that are against + SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE + that use RETURNING. For INSERT/UPDATE/DELETE statements that were + not using RETURNING, the value will usually be False, however + there are some dialect-specific exceptions to this, such as when + using the MSSQL / pyodbc dialect a SELECT is emitted inline in + order to retrieve an inserted primary key value. + + + """ + return self._metadata.returns_rows + + @property + def is_insert(self): + """True if this :class:`_engine.CursorResult` is the result + of a executing an expression language compiled + :func:`_expression.insert` construct. + + When True, this implies that the + :attr:`inserted_primary_key` attribute is accessible, + assuming the statement did not include + a user defined "returning" construct. + + """ + return self.context.isinsert + + def _fetchiter_impl(self): + fetchone = self.cursor_strategy.fetchone + + while True: + row = fetchone(self, self.cursor) + if row is None: + break + yield row + + def _fetchone_impl(self, hard_close=False): + return self.cursor_strategy.fetchone(self, self.cursor, hard_close) + + def _fetchall_impl(self): + return self.cursor_strategy.fetchall(self, self.cursor) + + def _fetchmany_impl(self, size=None): + return self.cursor_strategy.fetchmany(self, self.cursor, size) + + def _raw_row_iterator(self): + return self._fetchiter_impl() + + def merge(self, *others: Result[Any]) -> MergedResult[Any]: + merged_result = super().merge(*others) + if self.context._has_rowcount: + merged_result.rowcount = sum( + cast("CursorResult[Any]", result).rowcount + for result in (self,) + others + ) + return merged_result + + def close(self) -> Any: + """Close this :class:`_engine.CursorResult`. + + This closes out the underlying DBAPI cursor corresponding to the + statement execution, if one is still present. Note that the DBAPI + cursor is automatically released when the :class:`_engine.CursorResult` + exhausts all available rows. :meth:`_engine.CursorResult.close` is + generally an optional method except in the case when discarding a + :class:`_engine.CursorResult` that still has additional rows pending + for fetch. + + After this method is called, it is no longer valid to call upon + the fetch methods, which will raise a :class:`.ResourceClosedError` + on subsequent use. + + .. seealso:: + + :ref:`connections_toplevel` + + """ + self._soft_close(hard=True) + + @_generative + def yield_per(self, num: int) -> Self: + self._yield_per = num + self.cursor_strategy.yield_per(self, self.cursor, num) + return self + + +ResultProxy = CursorResult diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/default.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/default.py new file mode 100644 index 0000000000000000000000000000000000000000..69c6dc1b623359e5e25b6fa2429de95847e03005 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/default.py @@ -0,0 +1,2380 @@ +# engine/default.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Default implementations of per-dialect sqlalchemy.engine classes. + +These are semi-private implementation classes which are only of importance +to database dialect authors; dialects will usually use the classes here +as the base class for their own corresponding classes. + +""" + +from __future__ import annotations + +import functools +import operator +import random +import re +from time import perf_counter +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import MutableSequence +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union +import weakref + +from . import characteristics +from . import cursor as _cursor +from . import interfaces +from .base import Connection +from .interfaces import CacheStats +from .interfaces import DBAPICursor +from .interfaces import Dialect +from .interfaces import ExecuteStyle +from .interfaces import ExecutionContext +from .reflection import ObjectKind +from .reflection import ObjectScope +from .. import event +from .. import exc +from .. import pool +from .. import util +from ..sql import compiler +from ..sql import dml +from ..sql import expression +from ..sql import type_api +from ..sql import util as sql_util +from ..sql._typing import is_tuple_type +from ..sql.base import _NoArg +from ..sql.compiler import DDLCompiler +from ..sql.compiler import InsertmanyvaluesSentinelOpts +from ..sql.compiler import SQLCompiler +from ..sql.elements import quoted_name +from ..util.typing import Final +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + from types import ModuleType + + from .base import Engine + from .cursor import ResultFetchStrategy + from .interfaces import _CoreMultiExecuteParams + from .interfaces import _CoreSingleExecuteParams + from .interfaces import _DBAPICursorDescription + from .interfaces import _DBAPIMultiExecuteParams + from .interfaces import _DBAPISingleExecuteParams + from .interfaces import _ExecuteOptions + from .interfaces import _MutableCoreSingleExecuteParams + from .interfaces import _ParamStyle + from .interfaces import ConnectArgsType + from .interfaces import DBAPIConnection + from .interfaces import IsolationLevel + from .row import Row + from .url import URL + from ..event import _ListenerFnType + from ..pool import Pool + from ..pool import PoolProxiedConnection + from ..sql import Executable + from ..sql.compiler import Compiled + from ..sql.compiler import Linting + from ..sql.compiler import ResultColumnsEntry + from ..sql.dml import DMLState + from ..sql.dml import UpdateBase + from ..sql.elements import BindParameter + from ..sql.schema import Column + from ..sql.type_api import _BindProcessorType + from ..sql.type_api import _ResultProcessorType + from ..sql.type_api import TypeEngine + + +# When we're handed literal SQL, ensure it's a SELECT query +SERVER_SIDE_CURSOR_RE = re.compile(r"\s*SELECT", re.I | re.UNICODE) + + +( + CACHE_HIT, + CACHE_MISS, + CACHING_DISABLED, + NO_CACHE_KEY, + NO_DIALECT_SUPPORT, +) = list(CacheStats) + + +class DefaultDialect(Dialect): + """Default implementation of Dialect""" + + statement_compiler = compiler.SQLCompiler + ddl_compiler = compiler.DDLCompiler + type_compiler_cls = compiler.GenericTypeCompiler + + preparer = compiler.IdentifierPreparer + supports_alter = True + supports_comments = False + supports_constraint_comments = False + inline_comments = False + supports_statement_cache = True + + div_is_floordiv = True + + bind_typing = interfaces.BindTyping.NONE + + include_set_input_sizes: Optional[Set[Any]] = None + exclude_set_input_sizes: Optional[Set[Any]] = None + + # the first value we'd get for an autoincrement column. + default_sequence_base = 1 + + # most DBAPIs happy with this for execute(). + # not cx_oracle. + execute_sequence_format = tuple + + supports_schemas = True + supports_views = True + supports_sequences = False + sequences_optional = False + preexecute_autoincrement_sequences = False + supports_identity_columns = False + postfetch_lastrowid = True + favor_returning_over_lastrowid = False + insert_null_pk_still_autoincrements = False + update_returning = False + delete_returning = False + update_returning_multifrom = False + delete_returning_multifrom = False + insert_returning = False + + cte_follows_insert = False + + supports_native_enum = False + supports_native_boolean = False + supports_native_uuid = False + returns_native_bytes = False + + non_native_boolean_check_constraint = True + + supports_simple_order_by_label = True + + tuple_in_values = False + + connection_characteristics = util.immutabledict( + { + "isolation_level": characteristics.IsolationLevelCharacteristic(), + "logging_token": characteristics.LoggingTokenCharacteristic(), + } + ) + + engine_config_types: Mapping[str, Any] = util.immutabledict( + { + "pool_timeout": util.asint, + "echo": util.bool_or_str("debug"), + "echo_pool": util.bool_or_str("debug"), + "pool_recycle": util.asint, + "pool_size": util.asint, + "max_overflow": util.asint, + "future": util.asbool, + } + ) + + # if the NUMERIC type + # returns decimal.Decimal. + # *not* the FLOAT type however. + supports_native_decimal = False + + name = "default" + + # length at which to truncate + # any identifier. + max_identifier_length = 9999 + _user_defined_max_identifier_length: Optional[int] = None + + isolation_level: Optional[str] = None + + # sub-categories of max_identifier_length. + # currently these accommodate for MySQL which allows alias names + # of 255 but DDL names only of 64. + max_index_name_length: Optional[int] = None + max_constraint_name_length: Optional[int] = None + + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + colspecs: MutableMapping[Type[TypeEngine[Any]], Type[TypeEngine[Any]]] = {} + default_paramstyle = "named" + + supports_default_values = False + """dialect supports INSERT... DEFAULT VALUES syntax""" + + supports_default_metavalue = False + """dialect supports INSERT... VALUES (DEFAULT) syntax""" + + default_metavalue_token = "DEFAULT" + """for INSERT... VALUES (DEFAULT) syntax, the token to put in the + parenthesis.""" + + # not sure if this is a real thing but the compiler will deliver it + # if this is the only flag enabled. + supports_empty_insert = True + """dialect supports INSERT () VALUES ()""" + + supports_multivalues_insert = False + + use_insertmanyvalues: bool = False + + use_insertmanyvalues_wo_returning: bool = False + + insertmanyvalues_implicit_sentinel: InsertmanyvaluesSentinelOpts = ( + InsertmanyvaluesSentinelOpts.NOT_SUPPORTED + ) + + insertmanyvalues_page_size: int = 1000 + insertmanyvalues_max_parameters = 32700 + + supports_is_distinct_from = True + + supports_server_side_cursors = False + + server_side_cursors = False + + # extra record-level locking features (#4860) + supports_for_update_of = False + + server_version_info = None + + default_schema_name: Optional[str] = None + + # indicates symbol names are + # UPPERCASED if they are case insensitive + # within the database. + # if this is True, the methods normalize_name() + # and denormalize_name() must be provided. + requires_name_normalize = False + + is_async = False + + has_terminate = False + + # TODO: this is not to be part of 2.0. implement rudimentary binary + # literals for SQLite, PostgreSQL, MySQL only within + # _Binary.literal_processor + _legacy_binary_type_literal_encoding = "utf-8" + + @util.deprecated_params( + empty_in_strategy=( + "1.4", + "The :paramref:`_sa.create_engine.empty_in_strategy` keyword is " + "deprecated, and no longer has any effect. All IN expressions " + "are now rendered using " + 'the "expanding parameter" strategy which renders a set of bound' + 'expressions, or an "empty set" SELECT, at statement execution' + "time.", + ), + server_side_cursors=( + "1.4", + "The :paramref:`_sa.create_engine.server_side_cursors` parameter " + "is deprecated and will be removed in a future release. Please " + "use the " + ":paramref:`_engine.Connection.execution_options.stream_results` " + "parameter.", + ), + ) + def __init__( + self, + paramstyle: Optional[_ParamStyle] = None, + isolation_level: Optional[IsolationLevel] = None, + dbapi: Optional[ModuleType] = None, + implicit_returning: Literal[True] = True, + supports_native_boolean: Optional[bool] = None, + max_identifier_length: Optional[int] = None, + label_length: Optional[int] = None, + insertmanyvalues_page_size: Union[_NoArg, int] = _NoArg.NO_ARG, + use_insertmanyvalues: Optional[bool] = None, + # util.deprecated_params decorator cannot render the + # Linting.NO_LINTING constant + compiler_linting: Linting = int(compiler.NO_LINTING), # type: ignore + server_side_cursors: bool = False, + **kwargs: Any, + ): + if server_side_cursors: + if not self.supports_server_side_cursors: + raise exc.ArgumentError( + "Dialect %s does not support server side cursors" % self + ) + else: + self.server_side_cursors = True + + if getattr(self, "use_setinputsizes", False): + util.warn_deprecated( + "The dialect-level use_setinputsizes attribute is " + "deprecated. Please use " + "bind_typing = BindTyping.SETINPUTSIZES", + "2.0", + ) + self.bind_typing = interfaces.BindTyping.SETINPUTSIZES + + self.positional = False + self._ischema = None + + self.dbapi = dbapi + + if paramstyle is not None: + self.paramstyle = paramstyle + elif self.dbapi is not None: + self.paramstyle = self.dbapi.paramstyle + else: + self.paramstyle = self.default_paramstyle + self.positional = self.paramstyle in ( + "qmark", + "format", + "numeric", + "numeric_dollar", + ) + self.identifier_preparer = self.preparer(self) + self._on_connect_isolation_level = isolation_level + + legacy_tt_callable = getattr(self, "type_compiler", None) + if legacy_tt_callable is not None: + tt_callable = cast( + Type[compiler.GenericTypeCompiler], + self.type_compiler, + ) + else: + tt_callable = self.type_compiler_cls + + self.type_compiler_instance = self.type_compiler = tt_callable(self) + + if supports_native_boolean is not None: + self.supports_native_boolean = supports_native_boolean + + self._user_defined_max_identifier_length = max_identifier_length + if self._user_defined_max_identifier_length: + self.max_identifier_length = ( + self._user_defined_max_identifier_length + ) + self.label_length = label_length + self.compiler_linting = compiler_linting + + if use_insertmanyvalues is not None: + self.use_insertmanyvalues = use_insertmanyvalues + + if insertmanyvalues_page_size is not _NoArg.NO_ARG: + self.insertmanyvalues_page_size = insertmanyvalues_page_size + + @property + @util.deprecated( + "2.0", + "full_returning is deprecated, please use insert_returning, " + "update_returning, delete_returning", + ) + def full_returning(self): + return ( + self.insert_returning + and self.update_returning + and self.delete_returning + ) + + @util.memoized_property + def insert_executemany_returning(self): + """Default implementation for insert_executemany_returning, if not + otherwise overridden by the specific dialect. + + The default dialect determines "insert_executemany_returning" is + available if the dialect in use has opted into using the + "use_insertmanyvalues" feature. If they haven't opted into that, then + this attribute is False, unless the dialect in question overrides this + and provides some other implementation (such as the Oracle Database + dialects). + + """ + return self.insert_returning and self.use_insertmanyvalues + + @util.memoized_property + def insert_executemany_returning_sort_by_parameter_order(self): + """Default implementation for + insert_executemany_returning_deterministic_order, if not otherwise + overridden by the specific dialect. + + The default dialect determines "insert_executemany_returning" can have + deterministic order only if the dialect in use has opted into using the + "use_insertmanyvalues" feature, which implements deterministic ordering + using client side sentinel columns only by default. The + "insertmanyvalues" feature also features alternate forms that can + use server-generated PK values as "sentinels", but those are only + used if the :attr:`.Dialect.insertmanyvalues_implicit_sentinel` + bitflag enables those alternate SQL forms, which are disabled + by default. + + If the dialect in use hasn't opted into that, then this attribute is + False, unless the dialect in question overrides this and provides some + other implementation (such as the Oracle Database dialects). + + """ + return self.insert_returning and self.use_insertmanyvalues + + update_executemany_returning = False + delete_executemany_returning = False + + @util.memoized_property + def loaded_dbapi(self) -> ModuleType: + if self.dbapi is None: + raise exc.InvalidRequestError( + f"Dialect {self} does not have a Python DBAPI established " + "and cannot be used for actual database interaction" + ) + return self.dbapi + + @util.memoized_property + def _bind_typing_render_casts(self): + return self.bind_typing is interfaces.BindTyping.RENDER_CASTS + + def _ensure_has_table_connection(self, arg: Connection) -> None: + if not isinstance(arg, Connection): + raise exc.ArgumentError( + "The argument passed to Dialect.has_table() should be a " + "%s, got %s. " + "Additionally, the Dialect.has_table() method is for " + "internal dialect " + "use only; please use " + "``inspect(some_engine).has_table(>)`` " + "for public API use." % (Connection, type(arg)) + ) + + @util.memoized_property + def _supports_statement_cache(self): + ssc = self.__class__.__dict__.get("supports_statement_cache", None) + if ssc is None: + util.warn( + "Dialect %s:%s will not make use of SQL compilation caching " + "as it does not set the 'supports_statement_cache' attribute " + "to ``True``. This can have " + "significant performance implications including some " + "performance degradations in comparison to prior SQLAlchemy " + "versions. Dialect maintainers should seek to set this " + "attribute to True after appropriate development and testing " + "for SQLAlchemy 1.4 caching support. Alternatively, this " + "attribute may be set to False which will disable this " + "warning." % (self.name, self.driver), + code="cprf", + ) + + return bool(ssc) + + @util.memoized_property + def _type_memos(self): + return weakref.WeakKeyDictionary() + + @property + def dialect_description(self): + return self.name + "+" + self.driver + + @property + def supports_sane_rowcount_returning(self): + """True if this dialect supports sane rowcount even if RETURNING is + in use. + + For dialects that don't support RETURNING, this is synonymous with + ``supports_sane_rowcount``. + + """ + return self.supports_sane_rowcount + + @classmethod + def get_pool_class(cls, url: URL) -> Type[Pool]: + return getattr(cls, "poolclass", pool.QueuePool) + + def get_dialect_pool_class(self, url: URL) -> Type[Pool]: + return self.get_pool_class(url) + + @classmethod + def load_provisioning(cls): + package = ".".join(cls.__module__.split(".")[0:-1]) + try: + __import__(package + ".provision") + except ImportError: + pass + + def _builtin_onconnect(self) -> Optional[_ListenerFnType]: + if self._on_connect_isolation_level is not None: + + def builtin_connect(dbapi_conn, conn_rec): + self._assert_and_set_isolation_level( + dbapi_conn, self._on_connect_isolation_level + ) + + return builtin_connect + else: + return None + + def initialize(self, connection: Connection) -> None: + try: + self.server_version_info = self._get_server_version_info( + connection + ) + except NotImplementedError: + self.server_version_info = None + try: + self.default_schema_name = self._get_default_schema_name( + connection + ) + except NotImplementedError: + self.default_schema_name = None + + try: + self.default_isolation_level = self.get_default_isolation_level( + connection.connection.dbapi_connection + ) + except NotImplementedError: + self.default_isolation_level = None + + if not self._user_defined_max_identifier_length: + max_ident_length = self._check_max_identifier_length(connection) + if max_ident_length: + self.max_identifier_length = max_ident_length + + if ( + self.label_length + and self.label_length > self.max_identifier_length + ): + raise exc.ArgumentError( + "Label length of %d is greater than this dialect's" + " maximum identifier length of %d" + % (self.label_length, self.max_identifier_length) + ) + + def on_connect(self) -> Optional[Callable[[Any], Any]]: + # inherits the docstring from interfaces.Dialect.on_connect + return None + + def _check_max_identifier_length(self, connection): + """Perform a connection / server version specific check to determine + the max_identifier_length. + + If the dialect's class level max_identifier_length should be used, + can return None. + + .. versionadded:: 1.3.9 + + """ + return None + + def get_default_isolation_level(self, dbapi_conn): + """Given a DBAPI connection, return its isolation level, or + a default isolation level if one cannot be retrieved. + + May be overridden by subclasses in order to provide a + "fallback" isolation level for databases that cannot reliably + retrieve the actual isolation level. + + By default, calls the :meth:`_engine.Interfaces.get_isolation_level` + method, propagating any exceptions raised. + + .. versionadded:: 1.3.22 + + """ + return self.get_isolation_level(dbapi_conn) + + def type_descriptor(self, typeobj): + """Provide a database-specific :class:`.TypeEngine` object, given + the generic object which comes from the types module. + + This method looks for a dictionary called + ``colspecs`` as a class or instance-level variable, + and passes on to :func:`_types.adapt_type`. + + """ + return type_api.adapt_type(typeobj, self.colspecs) + + def has_index(self, connection, table_name, index_name, schema=None, **kw): + if not self.has_table(connection, table_name, schema=schema, **kw): + return False + for idx in self.get_indexes( + connection, table_name, schema=schema, **kw + ): + if idx["name"] == index_name: + return True + else: + return False + + def has_schema( + self, connection: Connection, schema_name: str, **kw: Any + ) -> bool: + return schema_name in self.get_schema_names(connection, **kw) + + def validate_identifier(self, ident: str) -> None: + if len(ident) > self.max_identifier_length: + raise exc.IdentifierError( + "Identifier '%s' exceeds maximum length of %d characters" + % (ident, self.max_identifier_length) + ) + + def connect(self, *cargs: Any, **cparams: Any) -> DBAPIConnection: + # inherits the docstring from interfaces.Dialect.connect + return self.loaded_dbapi.connect(*cargs, **cparams) # type: ignore[no-any-return] # NOQA: E501 + + def create_connect_args(self, url: URL) -> ConnectArgsType: + # inherits the docstring from interfaces.Dialect.create_connect_args + opts = url.translate_connect_args() + opts.update(url.query) + return ([], opts) + + def set_engine_execution_options( + self, engine: Engine, opts: Mapping[str, Any] + ) -> None: + supported_names = set(self.connection_characteristics).intersection( + opts + ) + if supported_names: + characteristics: Mapping[str, Any] = util.immutabledict( + (name, opts[name]) for name in supported_names + ) + + @event.listens_for(engine, "engine_connect") + def set_connection_characteristics(connection): + self._set_connection_characteristics( + connection, characteristics + ) + + def set_connection_execution_options( + self, connection: Connection, opts: Mapping[str, Any] + ) -> None: + supported_names = set(self.connection_characteristics).intersection( + opts + ) + if supported_names: + characteristics: Mapping[str, Any] = util.immutabledict( + (name, opts[name]) for name in supported_names + ) + self._set_connection_characteristics(connection, characteristics) + + def _set_connection_characteristics(self, connection, characteristics): + characteristic_values = [ + (name, self.connection_characteristics[name], value) + for name, value in characteristics.items() + ] + + if connection.in_transaction(): + trans_objs = [ + (name, obj) + for name, obj, _ in characteristic_values + if obj.transactional + ] + if trans_objs: + raise exc.InvalidRequestError( + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; " + "%s may not be altered unless rollback() or commit() " + "is called first." + % (", ".join(name for name, obj in trans_objs)) + ) + + dbapi_connection = connection.connection.dbapi_connection + for _, characteristic, value in characteristic_values: + characteristic.set_connection_characteristic( + self, connection, dbapi_connection, value + ) + connection.connection._connection_record.finalize_callback.append( + functools.partial(self._reset_characteristics, characteristics) + ) + + def _reset_characteristics(self, characteristics, dbapi_connection): + for characteristic_name in characteristics: + characteristic = self.connection_characteristics[ + characteristic_name + ] + characteristic.reset_characteristic(self, dbapi_connection) + + def do_begin(self, dbapi_connection): + pass + + def do_rollback(self, dbapi_connection): + dbapi_connection.rollback() + + def do_commit(self, dbapi_connection): + dbapi_connection.commit() + + def do_terminate(self, dbapi_connection): + self.do_close(dbapi_connection) + + def do_close(self, dbapi_connection): + dbapi_connection.close() + + @util.memoized_property + def _dialect_specific_select_one(self): + return str(expression.select(1).compile(dialect=self)) + + def _do_ping_w_event(self, dbapi_connection: DBAPIConnection) -> bool: + try: + return self.do_ping(dbapi_connection) + except self.loaded_dbapi.Error as err: + is_disconnect = self.is_disconnect(err, dbapi_connection, None) + + if self._has_events: + try: + Connection._handle_dbapi_exception_noconnection( + err, + self, + is_disconnect=is_disconnect, + invalidate_pool_on_disconnect=False, + is_pre_ping=True, + ) + except exc.StatementError as new_err: + is_disconnect = new_err.connection_invalidated + + if is_disconnect: + return False + else: + raise + + def do_ping(self, dbapi_connection: DBAPIConnection) -> bool: + cursor = dbapi_connection.cursor() + try: + cursor.execute(self._dialect_specific_select_one) + finally: + cursor.close() + return True + + def create_xid(self): + """Create a random two-phase transaction ID. + + This id will be passed to do_begin_twophase(), do_rollback_twophase(), + do_commit_twophase(). Its format is unspecified. + """ + + return "_sa_%032x" % random.randint(0, 2**128) + + def do_savepoint(self, connection, name): + connection.execute(expression.SavepointClause(name)) + + def do_rollback_to_savepoint(self, connection, name): + connection.execute(expression.RollbackToSavepointClause(name)) + + def do_release_savepoint(self, connection, name): + connection.execute(expression.ReleaseSavepointClause(name)) + + def _deliver_insertmanyvalues_batches( + self, + connection, + cursor, + statement, + parameters, + generic_setinputsizes, + context, + ): + context = cast(DefaultExecutionContext, context) + compiled = cast(SQLCompiler, context.compiled) + + _composite_sentinel_proc: Sequence[ + Optional[_ResultProcessorType[Any]] + ] = () + _scalar_sentinel_proc: Optional[_ResultProcessorType[Any]] = None + _sentinel_proc_initialized: bool = False + + compiled_parameters = context.compiled_parameters + + imv = compiled._insertmanyvalues + assert imv is not None + + is_returning: Final[bool] = bool(compiled.effective_returning) + batch_size = context.execution_options.get( + "insertmanyvalues_page_size", self.insertmanyvalues_page_size + ) + + if compiled.schema_translate_map: + schema_translate_map = context.execution_options.get( + "schema_translate_map", {} + ) + else: + schema_translate_map = None + + if is_returning: + result: Optional[List[Any]] = [] + context._insertmanyvalues_rows = result + + sort_by_parameter_order = imv.sort_by_parameter_order + + else: + sort_by_parameter_order = False + result = None + + for imv_batch in compiled._deliver_insertmanyvalues_batches( + statement, + parameters, + compiled_parameters, + generic_setinputsizes, + batch_size, + sort_by_parameter_order, + schema_translate_map, + ): + yield imv_batch + + if is_returning: + + try: + rows = context.fetchall_for_returning(cursor) + except BaseException as be: + connection._handle_dbapi_exception( + be, + sql_util._long_statement(imv_batch.replaced_statement), + imv_batch.replaced_parameters, + None, + context, + is_sub_exec=True, + ) + + # I would have thought "is_returning: Final[bool]" + # would have assured this but pylance thinks not + assert result is not None + + if imv.num_sentinel_columns and not imv_batch.is_downgraded: + composite_sentinel = imv.num_sentinel_columns > 1 + if imv.implicit_sentinel: + # for implicit sentinel, which is currently single-col + # integer autoincrement, do a simple sort. + assert not composite_sentinel + result.extend( + sorted(rows, key=operator.itemgetter(-1)) + ) + continue + + # otherwise, create dictionaries to match up batches + # with parameters + assert imv.sentinel_param_keys + assert imv.sentinel_columns + + _nsc = imv.num_sentinel_columns + + if not _sentinel_proc_initialized: + if composite_sentinel: + _composite_sentinel_proc = [ + col.type._cached_result_processor( + self, cursor_desc[1] + ) + for col, cursor_desc in zip( + imv.sentinel_columns, + cursor.description[-_nsc:], + ) + ] + else: + _scalar_sentinel_proc = ( + imv.sentinel_columns[0] + ).type._cached_result_processor( + self, cursor.description[-1][1] + ) + _sentinel_proc_initialized = True + + rows_by_sentinel: Union[ + Dict[Tuple[Any, ...], Any], + Dict[Any, Any], + ] + if composite_sentinel: + rows_by_sentinel = { + tuple( + (proc(val) if proc else val) + for val, proc in zip( + row[-_nsc:], _composite_sentinel_proc + ) + ): row + for row in rows + } + elif _scalar_sentinel_proc: + rows_by_sentinel = { + _scalar_sentinel_proc(row[-1]): row for row in rows + } + else: + rows_by_sentinel = {row[-1]: row for row in rows} + + if len(rows_by_sentinel) != len(imv_batch.batch): + # see test_insert_exec.py:: + # IMVSentinelTest::test_sentinel_incorrect_rowcount + # for coverage / demonstration + raise exc.InvalidRequestError( + f"Sentinel-keyed result set did not produce " + f"correct number of rows {len(imv_batch.batch)}; " + "produced " + f"{len(rows_by_sentinel)}. Please ensure the " + "sentinel column is fully unique and populated in " + "all cases." + ) + + try: + ordered_rows = [ + rows_by_sentinel[sentinel_keys] + for sentinel_keys in imv_batch.sentinel_values + ] + except KeyError as ke: + # see test_insert_exec.py:: + # IMVSentinelTest::test_sentinel_cant_match_keys + # for coverage / demonstration + raise exc.InvalidRequestError( + f"Can't match sentinel values in result set to " + f"parameter sets; key {ke.args[0]!r} was not " + "found. " + "There may be a mismatch between the datatype " + "passed to the DBAPI driver vs. that which it " + "returns in a result row. Ensure the given " + "Python value matches the expected result type " + "*exactly*, taking care to not rely upon implicit " + "conversions which may occur such as when using " + "strings in place of UUID or integer values, etc. " + ) from ke + + result.extend(ordered_rows) + + else: + result.extend(rows) + + def do_executemany(self, cursor, statement, parameters, context=None): + cursor.executemany(statement, parameters) + + def do_execute(self, cursor, statement, parameters, context=None): + cursor.execute(statement, parameters) + + def do_execute_no_params(self, cursor, statement, context=None): + cursor.execute(statement) + + def is_disconnect( + self, + e: Exception, + connection: Union[ + pool.PoolProxiedConnection, interfaces.DBAPIConnection, None + ], + cursor: Optional[interfaces.DBAPICursor], + ) -> bool: + return False + + @util.memoized_instancemethod + def _gen_allowed_isolation_levels(self, dbapi_conn): + try: + raw_levels = list(self.get_isolation_level_values(dbapi_conn)) + except NotImplementedError: + return None + else: + normalized_levels = [ + level.replace("_", " ").upper() for level in raw_levels + ] + if raw_levels != normalized_levels: + raise ValueError( + f"Dialect {self.name!r} get_isolation_level_values() " + f"method should return names as UPPERCASE using spaces, " + f"not underscores; got " + f"{sorted(set(raw_levels).difference(normalized_levels))}" + ) + return tuple(normalized_levels) + + def _assert_and_set_isolation_level(self, dbapi_conn, level): + level = level.replace("_", " ").upper() + + _allowed_isolation_levels = self._gen_allowed_isolation_levels( + dbapi_conn + ) + if ( + _allowed_isolation_levels + and level not in _allowed_isolation_levels + ): + raise exc.ArgumentError( + f"Invalid value {level!r} for isolation_level. " + f"Valid isolation levels for {self.name!r} are " + f"{', '.join(_allowed_isolation_levels)}" + ) + + self.set_isolation_level(dbapi_conn, level) + + def reset_isolation_level(self, dbapi_conn): + if self._on_connect_isolation_level is not None: + assert ( + self._on_connect_isolation_level == "AUTOCOMMIT" + or self._on_connect_isolation_level + == self.default_isolation_level + ) + self._assert_and_set_isolation_level( + dbapi_conn, self._on_connect_isolation_level + ) + else: + assert self.default_isolation_level is not None + self._assert_and_set_isolation_level( + dbapi_conn, + self.default_isolation_level, + ) + + def normalize_name(self, name): + if name is None: + return None + + name_lower = name.lower() + name_upper = name.upper() + + if name_upper == name_lower: + # name has no upper/lower conversion, e.g. non-european characters. + # return unchanged + return name + elif name_upper == name and not ( + self.identifier_preparer._requires_quotes + )(name_lower): + # name is all uppercase and doesn't require quoting; normalize + # to all lower case + return name_lower + elif name_lower == name: + # name is all lower case, which if denormalized means we need to + # force quoting on it + return quoted_name(name, quote=True) + else: + # name is mixed case, means it will be quoted in SQL when used + # later, no normalizes + return name + + def denormalize_name(self, name): + if name is None: + return None + + name_lower = name.lower() + name_upper = name.upper() + + if name_upper == name_lower: + # name has no upper/lower conversion, e.g. non-european characters. + # return unchanged + return name + elif name_lower == name and not ( + self.identifier_preparer._requires_quotes + )(name_lower): + name = name_upper + return name + + def get_driver_connection(self, connection): + return connection + + def _overrides_default(self, method): + return ( + getattr(type(self), method).__code__ + is not getattr(DefaultDialect, method).__code__ + ) + + def _default_multi_reflect( + self, + single_tbl_method, + connection, + kind, + schema, + filter_names, + scope, + **kw, + ): + names_fns = [] + temp_names_fns = [] + if ObjectKind.TABLE in kind: + names_fns.append(self.get_table_names) + temp_names_fns.append(self.get_temp_table_names) + if ObjectKind.VIEW in kind: + names_fns.append(self.get_view_names) + temp_names_fns.append(self.get_temp_view_names) + if ObjectKind.MATERIALIZED_VIEW in kind: + names_fns.append(self.get_materialized_view_names) + # no temp materialized view at the moment + # temp_names_fns.append(self.get_temp_materialized_view_names) + + unreflectable = kw.pop("unreflectable", {}) + + if ( + filter_names + and scope is ObjectScope.ANY + and kind is ObjectKind.ANY + ): + # if names are given and no qualification on type of table + # (i.e. the Table(..., autoload) case), take the names as given, + # don't run names queries. If a table does not exit + # NoSuchTableError is raised and it's skipped + + # this also suits the case for mssql where we can reflect + # individual temp tables but there's no temp_names_fn + names = filter_names + else: + names = [] + name_kw = {"schema": schema, **kw} + fns = [] + if ObjectScope.DEFAULT in scope: + fns.extend(names_fns) + if ObjectScope.TEMPORARY in scope: + fns.extend(temp_names_fns) + + for fn in fns: + try: + names.extend(fn(connection, **name_kw)) + except NotImplementedError: + pass + + if filter_names: + filter_names = set(filter_names) + + # iterate over all the tables/views and call the single table method + for table in names: + if not filter_names or table in filter_names: + key = (schema, table) + try: + yield ( + key, + single_tbl_method( + connection, table, schema=schema, **kw + ), + ) + except exc.UnreflectableTableError as err: + if key not in unreflectable: + unreflectable[key] = err + except exc.NoSuchTableError: + pass + + def get_multi_table_options(self, connection, **kw): + return self._default_multi_reflect( + self.get_table_options, connection, **kw + ) + + def get_multi_columns(self, connection, **kw): + return self._default_multi_reflect(self.get_columns, connection, **kw) + + def get_multi_pk_constraint(self, connection, **kw): + return self._default_multi_reflect( + self.get_pk_constraint, connection, **kw + ) + + def get_multi_foreign_keys(self, connection, **kw): + return self._default_multi_reflect( + self.get_foreign_keys, connection, **kw + ) + + def get_multi_indexes(self, connection, **kw): + return self._default_multi_reflect(self.get_indexes, connection, **kw) + + def get_multi_unique_constraints(self, connection, **kw): + return self._default_multi_reflect( + self.get_unique_constraints, connection, **kw + ) + + def get_multi_check_constraints(self, connection, **kw): + return self._default_multi_reflect( + self.get_check_constraints, connection, **kw + ) + + def get_multi_table_comment(self, connection, **kw): + return self._default_multi_reflect( + self.get_table_comment, connection, **kw + ) + + +class StrCompileDialect(DefaultDialect): + statement_compiler = compiler.StrSQLCompiler + ddl_compiler = compiler.DDLCompiler + type_compiler_cls = compiler.StrSQLTypeCompiler + preparer = compiler.IdentifierPreparer + + insert_returning = True + update_returning = True + delete_returning = True + + supports_statement_cache = True + + supports_identity_columns = True + + supports_sequences = True + sequences_optional = True + preexecute_autoincrement_sequences = False + + supports_native_boolean = True + + supports_multivalues_insert = True + supports_simple_order_by_label = True + + +class DefaultExecutionContext(ExecutionContext): + isinsert = False + isupdate = False + isdelete = False + is_crud = False + is_text = False + isddl = False + + execute_style: ExecuteStyle = ExecuteStyle.EXECUTE + + compiled: Optional[Compiled] = None + result_column_struct: Optional[ + Tuple[List[ResultColumnsEntry], bool, bool, bool, bool] + ] = None + returned_default_rows: Optional[Sequence[Row[Any]]] = None + + execution_options: _ExecuteOptions = util.EMPTY_DICT + + cursor_fetch_strategy = _cursor._DEFAULT_FETCH + + invoked_statement: Optional[Executable] = None + + _is_implicit_returning = False + _is_explicit_returning = False + _is_supplemental_returning = False + _is_server_side = False + + _soft_closed = False + + _rowcount: Optional[int] = None + + # a hook for SQLite's translation of + # result column names + # NOTE: pyhive is using this hook, can't remove it :( + _translate_colname: Optional[Callable[[str], str]] = None + + _expanded_parameters: Mapping[str, List[str]] = util.immutabledict() + """used by set_input_sizes(). + + This collection comes from ``ExpandedState.parameter_expansion``. + + """ + + cache_hit = NO_CACHE_KEY + + root_connection: Connection + _dbapi_connection: PoolProxiedConnection + dialect: Dialect + unicode_statement: str + cursor: DBAPICursor + compiled_parameters: List[_MutableCoreSingleExecuteParams] + parameters: _DBAPIMultiExecuteParams + extracted_parameters: Optional[Sequence[BindParameter[Any]]] + + _empty_dict_params = cast("Mapping[str, Any]", util.EMPTY_DICT) + + _insertmanyvalues_rows: Optional[List[Tuple[Any, ...]]] = None + _num_sentinel_cols: int = 0 + + @classmethod + def _init_ddl( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + compiled_ddl: DDLCompiler, + ) -> ExecutionContext: + """Initialize execution context for an ExecutableDDLElement + construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + + self.compiled = compiled = compiled_ddl + self.isddl = True + + self.execution_options = execution_options + + self.unicode_statement = str(compiled) + if compiled.schema_translate_map: + schema_translate_map = self.execution_options.get( + "schema_translate_map", {} + ) + + rst = compiled.preparer._render_schema_translates + self.unicode_statement = rst( + self.unicode_statement, schema_translate_map + ) + + self.statement = self.unicode_statement + + self.cursor = self.create_cursor() + self.compiled_parameters = [] + + if dialect.positional: + self.parameters = [dialect.execute_sequence_format()] + else: + self.parameters = [self._empty_dict_params] + + return self + + @classmethod + def _init_compiled( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + compiled: SQLCompiler, + parameters: _CoreMultiExecuteParams, + invoked_statement: Executable, + extracted_parameters: Optional[Sequence[BindParameter[Any]]], + cache_hit: CacheStats = CacheStats.CACHING_DISABLED, + ) -> ExecutionContext: + """Initialize execution context for a Compiled construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + self.extracted_parameters = extracted_parameters + self.invoked_statement = invoked_statement + self.compiled = compiled + self.cache_hit = cache_hit + + self.execution_options = execution_options + + self.result_column_struct = ( + compiled._result_columns, + compiled._ordered_columns, + compiled._textual_ordered_columns, + compiled._ad_hoc_textual, + compiled._loose_column_name_matching, + ) + + self.isinsert = ii = compiled.isinsert + self.isupdate = iu = compiled.isupdate + self.isdelete = id_ = compiled.isdelete + self.is_text = compiled.isplaintext + + if ii or iu or id_: + dml_statement = compiled.compile_state.statement # type: ignore + if TYPE_CHECKING: + assert isinstance(dml_statement, UpdateBase) + self.is_crud = True + self._is_explicit_returning = ier = bool(dml_statement._returning) + self._is_implicit_returning = iir = bool( + compiled.implicit_returning + ) + if iir and dml_statement._supplemental_returning: + self._is_supplemental_returning = True + + # dont mix implicit and explicit returning + assert not (iir and ier) + + if (ier or iir) and compiled.for_executemany: + if ii and not self.dialect.insert_executemany_returning: + raise exc.InvalidRequestError( + f"Dialect {self.dialect.dialect_description} with " + f"current server capabilities does not support " + "INSERT..RETURNING when executemany is used" + ) + elif ( + ii + and dml_statement._sort_by_parameter_order + and not self.dialect.insert_executemany_returning_sort_by_parameter_order # noqa: E501 + ): + raise exc.InvalidRequestError( + f"Dialect {self.dialect.dialect_description} with " + f"current server capabilities does not support " + "INSERT..RETURNING with deterministic row ordering " + "when executemany is used" + ) + elif ( + ii + and self.dialect.use_insertmanyvalues + and not compiled._insertmanyvalues + ): + raise exc.InvalidRequestError( + 'Statement does not have "insertmanyvalues" ' + "enabled, can't use INSERT..RETURNING with " + "executemany in this case." + ) + elif iu and not self.dialect.update_executemany_returning: + raise exc.InvalidRequestError( + f"Dialect {self.dialect.dialect_description} with " + f"current server capabilities does not support " + "UPDATE..RETURNING when executemany is used" + ) + elif id_ and not self.dialect.delete_executemany_returning: + raise exc.InvalidRequestError( + f"Dialect {self.dialect.dialect_description} with " + f"current server capabilities does not support " + "DELETE..RETURNING when executemany is used" + ) + + if not parameters: + self.compiled_parameters = [ + compiled.construct_params( + extracted_parameters=extracted_parameters, + escape_names=False, + ) + ] + else: + self.compiled_parameters = [ + compiled.construct_params( + m, + escape_names=False, + _group_number=grp, + extracted_parameters=extracted_parameters, + ) + for grp, m in enumerate(parameters) + ] + + if len(parameters) > 1: + if self.isinsert and compiled._insertmanyvalues: + self.execute_style = ExecuteStyle.INSERTMANYVALUES + + imv = compiled._insertmanyvalues + if imv.sentinel_columns is not None: + self._num_sentinel_cols = imv.num_sentinel_columns + else: + self.execute_style = ExecuteStyle.EXECUTEMANY + + self.unicode_statement = compiled.string + + self.cursor = self.create_cursor() + + if self.compiled.insert_prefetch or self.compiled.update_prefetch: + self._process_execute_defaults() + + processors = compiled._bind_processors + + flattened_processors: Mapping[ + str, _BindProcessorType[Any] + ] = processors # type: ignore[assignment] + + if compiled.literal_execute_params or compiled.post_compile_params: + if self.executemany: + raise exc.InvalidRequestError( + "'literal_execute' or 'expanding' parameters can't be " + "used with executemany()" + ) + + expanded_state = compiled._process_parameters_for_postcompile( + self.compiled_parameters[0] + ) + + # re-assign self.unicode_statement + self.unicode_statement = expanded_state.statement + + self._expanded_parameters = expanded_state.parameter_expansion + + flattened_processors = dict(processors) # type: ignore + flattened_processors.update(expanded_state.processors) + positiontup = expanded_state.positiontup + elif compiled.positional: + positiontup = self.compiled.positiontup + else: + positiontup = None + + if compiled.schema_translate_map: + schema_translate_map = self.execution_options.get( + "schema_translate_map", {} + ) + rst = compiled.preparer._render_schema_translates + self.unicode_statement = rst( + self.unicode_statement, schema_translate_map + ) + + # final self.unicode_statement is now assigned, encode if needed + # by dialect + self.statement = self.unicode_statement + + # Convert the dictionary of bind parameter values + # into a dict or list to be sent to the DBAPI's + # execute() or executemany() method. + + if compiled.positional: + core_positional_parameters: MutableSequence[Sequence[Any]] = [] + assert positiontup is not None + for compiled_params in self.compiled_parameters: + l_param: List[Any] = [ + ( + flattened_processors[key](compiled_params[key]) + if key in flattened_processors + else compiled_params[key] + ) + for key in positiontup + ] + core_positional_parameters.append( + dialect.execute_sequence_format(l_param) + ) + + self.parameters = core_positional_parameters + else: + core_dict_parameters: MutableSequence[Dict[str, Any]] = [] + escaped_names = compiled.escaped_bind_names + + # note that currently, "expanded" parameters will be present + # in self.compiled_parameters in their quoted form. This is + # slightly inconsistent with the approach taken as of + # #8056 where self.compiled_parameters is meant to contain unquoted + # param names. + d_param: Dict[str, Any] + for compiled_params in self.compiled_parameters: + if escaped_names: + d_param = { + escaped_names.get(key, key): ( + flattened_processors[key](compiled_params[key]) + if key in flattened_processors + else compiled_params[key] + ) + for key in compiled_params + } + else: + d_param = { + key: ( + flattened_processors[key](compiled_params[key]) + if key in flattened_processors + else compiled_params[key] + ) + for key in compiled_params + } + + core_dict_parameters.append(d_param) + + self.parameters = core_dict_parameters + + return self + + @classmethod + def _init_statement( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + statement: str, + parameters: _DBAPIMultiExecuteParams, + ) -> ExecutionContext: + """Initialize execution context for a string SQL statement.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + self.is_text = True + + self.execution_options = execution_options + + if not parameters: + if self.dialect.positional: + self.parameters = [dialect.execute_sequence_format()] + else: + self.parameters = [self._empty_dict_params] + elif isinstance(parameters[0], dialect.execute_sequence_format): + self.parameters = parameters + elif isinstance(parameters[0], dict): + self.parameters = parameters + else: + self.parameters = [ + dialect.execute_sequence_format(p) for p in parameters + ] + + if len(parameters) > 1: + self.execute_style = ExecuteStyle.EXECUTEMANY + + self.statement = self.unicode_statement = statement + + self.cursor = self.create_cursor() + return self + + @classmethod + def _init_default( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + ) -> ExecutionContext: + """Initialize execution context for a ColumnDefault construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + + self.execution_options = execution_options + + self.cursor = self.create_cursor() + return self + + def _get_cache_stats(self) -> str: + if self.compiled is None: + return "raw sql" + + now = perf_counter() + + ch = self.cache_hit + + gen_time = self.compiled._gen_time + assert gen_time is not None + + if ch is NO_CACHE_KEY: + return "no key %.5fs" % (now - gen_time,) + elif ch is CACHE_HIT: + return "cached since %.4gs ago" % (now - gen_time,) + elif ch is CACHE_MISS: + return "generated in %.5fs" % (now - gen_time,) + elif ch is CACHING_DISABLED: + if "_cache_disable_reason" in self.execution_options: + return "caching disabled (%s) %.5fs " % ( + self.execution_options["_cache_disable_reason"], + now - gen_time, + ) + else: + return "caching disabled %.5fs" % (now - gen_time,) + elif ch is NO_DIALECT_SUPPORT: + return "dialect %s+%s does not support caching %.5fs" % ( + self.dialect.name, + self.dialect.driver, + now - gen_time, + ) + else: + return "unknown" + + @property + def executemany(self): + return self.execute_style in ( + ExecuteStyle.EXECUTEMANY, + ExecuteStyle.INSERTMANYVALUES, + ) + + @util.memoized_property + def identifier_preparer(self): + if self.compiled: + return self.compiled.preparer + elif "schema_translate_map" in self.execution_options: + return self.dialect.identifier_preparer._with_schema_translate( + self.execution_options["schema_translate_map"] + ) + else: + return self.dialect.identifier_preparer + + @util.memoized_property + def engine(self): + return self.root_connection.engine + + @util.memoized_property + def postfetch_cols(self) -> Optional[Sequence[Column[Any]]]: + if TYPE_CHECKING: + assert isinstance(self.compiled, SQLCompiler) + return self.compiled.postfetch + + @util.memoized_property + def prefetch_cols(self) -> Optional[Sequence[Column[Any]]]: + if TYPE_CHECKING: + assert isinstance(self.compiled, SQLCompiler) + if self.isinsert: + return self.compiled.insert_prefetch + elif self.isupdate: + return self.compiled.update_prefetch + else: + return () + + @util.memoized_property + def no_parameters(self): + return self.execution_options.get("no_parameters", False) + + def _execute_scalar( + self, + stmt: str, + type_: Optional[TypeEngine[Any]], + parameters: Optional[_DBAPISingleExecuteParams] = None, + ) -> Any: + """Execute a string statement on the current cursor, returning a + scalar result. + + Used to fire off sequences, default phrases, and "select lastrowid" + types of statements individually or in the context of a parent INSERT + or UPDATE statement. + + """ + + conn = self.root_connection + + if "schema_translate_map" in self.execution_options: + schema_translate_map = self.execution_options.get( + "schema_translate_map", {} + ) + + rst = self.identifier_preparer._render_schema_translates + stmt = rst(stmt, schema_translate_map) + + if not parameters: + if self.dialect.positional: + parameters = self.dialect.execute_sequence_format() + else: + parameters = {} + + conn._cursor_execute(self.cursor, stmt, parameters, context=self) + row = self.cursor.fetchone() + if row is not None: + r = row[0] + else: + r = None + if type_ is not None: + # apply type post processors to the result + proc = type_._cached_result_processor( + self.dialect, self.cursor.description[0][1] + ) + if proc: + return proc(r) + return r + + @util.memoized_property + def connection(self): + return self.root_connection + + def _use_server_side_cursor(self): + if not self.dialect.supports_server_side_cursors: + return False + + if self.dialect.server_side_cursors: + # this is deprecated + use_server_side = self.execution_options.get( + "stream_results", True + ) and ( + self.compiled + and isinstance(self.compiled.statement, expression.Selectable) + or ( + ( + not self.compiled + or isinstance( + self.compiled.statement, expression.TextClause + ) + ) + and self.unicode_statement + and SERVER_SIDE_CURSOR_RE.match(self.unicode_statement) + ) + ) + else: + use_server_side = self.execution_options.get( + "stream_results", False + ) + + return use_server_side + + def create_cursor(self) -> DBAPICursor: + if ( + # inlining initial preference checks for SS cursors + self.dialect.supports_server_side_cursors + and ( + self.execution_options.get("stream_results", False) + or ( + self.dialect.server_side_cursors + and self._use_server_side_cursor() + ) + ) + ): + self._is_server_side = True + return self.create_server_side_cursor() + else: + self._is_server_side = False + return self.create_default_cursor() + + def fetchall_for_returning(self, cursor): + return cursor.fetchall() + + def create_default_cursor(self) -> DBAPICursor: + return self._dbapi_connection.cursor() + + def create_server_side_cursor(self) -> DBAPICursor: + raise NotImplementedError() + + def pre_exec(self): + pass + + def get_out_parameter_values(self, names): + raise NotImplementedError( + "This dialect does not support OUT parameters" + ) + + def post_exec(self): + pass + + def get_result_processor(self, type_, colname, coltype): + """Return a 'result processor' for a given type as present in + cursor.description. + + This has a default implementation that dialects can override + for context-sensitive result type handling. + + """ + return type_._cached_result_processor(self.dialect, coltype) + + def get_lastrowid(self): + """return self.cursor.lastrowid, or equivalent, after an INSERT. + + This may involve calling special cursor functions, issuing a new SELECT + on the cursor (or a new one), or returning a stored value that was + calculated within post_exec(). + + This function will only be called for dialects which support "implicit" + primary key generation, keep preexecute_autoincrement_sequences set to + False, and when no explicit id value was bound to the statement. + + The function is called once for an INSERT statement that would need to + return the last inserted primary key for those dialects that make use + of the lastrowid concept. In these cases, it is called directly after + :meth:`.ExecutionContext.post_exec`. + + """ + return self.cursor.lastrowid + + def handle_dbapi_exception(self, e): + pass + + @util.non_memoized_property + def rowcount(self) -> int: + if self._rowcount is not None: + return self._rowcount + else: + return self.cursor.rowcount + + @property + def _has_rowcount(self): + return self._rowcount is not None + + def supports_sane_rowcount(self): + return self.dialect.supports_sane_rowcount + + def supports_sane_multi_rowcount(self): + return self.dialect.supports_sane_multi_rowcount + + def _setup_result_proxy(self): + exec_opt = self.execution_options + + if self._rowcount is None and exec_opt.get("preserve_rowcount", False): + self._rowcount = self.cursor.rowcount + + if self.is_crud or self.is_text: + result = self._setup_dml_or_text_result() + yp = False + else: + yp = exec_opt.get("yield_per", None) + sr = self._is_server_side or exec_opt.get("stream_results", False) + strategy = self.cursor_fetch_strategy + if sr and strategy is _cursor._DEFAULT_FETCH: + strategy = _cursor.BufferedRowCursorFetchStrategy( + self.cursor, self.execution_options + ) + cursor_description: _DBAPICursorDescription = ( + strategy.alternate_cursor_description + or self.cursor.description + ) + if cursor_description is None: + strategy = _cursor._NO_CURSOR_DQL + + result = _cursor.CursorResult(self, strategy, cursor_description) + + compiled = self.compiled + + if ( + compiled + and not self.isddl + and cast(SQLCompiler, compiled).has_out_parameters + ): + self._setup_out_parameters(result) + + self._soft_closed = result._soft_closed + + if yp: + result = result.yield_per(yp) + + return result + + def _setup_out_parameters(self, result): + compiled = cast(SQLCompiler, self.compiled) + + out_bindparams = [ + (param, name) + for param, name in compiled.bind_names.items() + if param.isoutparam + ] + out_parameters = {} + + for bindparam, raw_value in zip( + [param for param, name in out_bindparams], + self.get_out_parameter_values( + [name for param, name in out_bindparams] + ), + ): + type_ = bindparam.type + impl_type = type_.dialect_impl(self.dialect) + dbapi_type = impl_type.get_dbapi_type(self.dialect.loaded_dbapi) + result_processor = impl_type.result_processor( + self.dialect, dbapi_type + ) + if result_processor is not None: + raw_value = result_processor(raw_value) + out_parameters[bindparam.key] = raw_value + + result.out_parameters = out_parameters + + def _setup_dml_or_text_result(self): + compiled = cast(SQLCompiler, self.compiled) + + strategy: ResultFetchStrategy = self.cursor_fetch_strategy + + if self.isinsert: + if ( + self.execute_style is ExecuteStyle.INSERTMANYVALUES + and compiled.effective_returning + ): + strategy = _cursor.FullyBufferedCursorFetchStrategy( + self.cursor, + initial_buffer=self._insertmanyvalues_rows, + # maintain alt cursor description if set by the + # dialect, e.g. mssql preserves it + alternate_description=( + strategy.alternate_cursor_description + ), + ) + + if compiled.postfetch_lastrowid: + self.inserted_primary_key_rows = ( + self._setup_ins_pk_from_lastrowid() + ) + # else if not self._is_implicit_returning, + # the default inserted_primary_key_rows accessor will + # return an "empty" primary key collection when accessed. + + if self._is_server_side and strategy is _cursor._DEFAULT_FETCH: + strategy = _cursor.BufferedRowCursorFetchStrategy( + self.cursor, self.execution_options + ) + + if strategy is _cursor._NO_CURSOR_DML: + cursor_description = None + else: + cursor_description = ( + strategy.alternate_cursor_description + or self.cursor.description + ) + + if cursor_description is None: + strategy = _cursor._NO_CURSOR_DML + elif self._num_sentinel_cols: + assert self.execute_style is ExecuteStyle.INSERTMANYVALUES + # strip out the sentinel columns from cursor description + # a similar logic is done to the rows only in CursorResult + cursor_description = cursor_description[ + 0 : -self._num_sentinel_cols + ] + + result: _cursor.CursorResult[Any] = _cursor.CursorResult( + self, strategy, cursor_description + ) + + if self.isinsert: + if self._is_implicit_returning: + rows = result.all() + + self.returned_default_rows = rows + + self.inserted_primary_key_rows = ( + self._setup_ins_pk_from_implicit_returning(result, rows) + ) + + # test that it has a cursor metadata that is accurate. the + # first row will have been fetched and current assumptions + # are that the result has only one row, until executemany() + # support is added here. + assert result._metadata.returns_rows + + # Insert statement has both return_defaults() and + # returning(). rewind the result on the list of rows + # we just used. + if self._is_supplemental_returning: + result._rewind(rows) + else: + result._soft_close() + elif not self._is_explicit_returning: + result._soft_close() + + # we assume here the result does not return any rows. + # *usually*, this will be true. However, some dialects + # such as that of MSSQL/pyodbc need to SELECT a post fetch + # function so this is not necessarily true. + # assert not result.returns_rows + + elif self._is_implicit_returning: + rows = result.all() + + if rows: + self.returned_default_rows = rows + self._rowcount = len(rows) + + if self._is_supplemental_returning: + result._rewind(rows) + else: + result._soft_close() + + # test that it has a cursor metadata that is accurate. + # the rows have all been fetched however. + assert result._metadata.returns_rows + + elif not result._metadata.returns_rows: + # no results, get rowcount + # (which requires open cursor on some drivers) + if self._rowcount is None: + self._rowcount = self.cursor.rowcount + result._soft_close() + elif self.isupdate or self.isdelete: + if self._rowcount is None: + self._rowcount = self.cursor.rowcount + return result + + @util.memoized_property + def inserted_primary_key_rows(self): + # if no specific "get primary key" strategy was set up + # during execution, return a "default" primary key based + # on what's in the compiled_parameters and nothing else. + return self._setup_ins_pk_from_empty() + + def _setup_ins_pk_from_lastrowid(self): + getter = cast( + SQLCompiler, self.compiled + )._inserted_primary_key_from_lastrowid_getter + lastrowid = self.get_lastrowid() + return [getter(lastrowid, self.compiled_parameters[0])] + + def _setup_ins_pk_from_empty(self): + getter = cast( + SQLCompiler, self.compiled + )._inserted_primary_key_from_lastrowid_getter + return [getter(None, param) for param in self.compiled_parameters] + + def _setup_ins_pk_from_implicit_returning(self, result, rows): + if not rows: + return [] + + getter = cast( + SQLCompiler, self.compiled + )._inserted_primary_key_from_returning_getter + compiled_params = self.compiled_parameters + + return [ + getter(row, param) for row, param in zip(rows, compiled_params) + ] + + def lastrow_has_defaults(self): + return (self.isinsert or self.isupdate) and bool( + cast(SQLCompiler, self.compiled).postfetch + ) + + def _prepare_set_input_sizes( + self, + ) -> Optional[List[Tuple[str, Any, TypeEngine[Any]]]]: + """Given a cursor and ClauseParameters, prepare arguments + in order to call the appropriate + style of ``setinputsizes()`` on the cursor, using DB-API types + from the bind parameter's ``TypeEngine`` objects. + + This method only called by those dialects which set the + :attr:`.Dialect.bind_typing` attribute to + :attr:`.BindTyping.SETINPUTSIZES`. Python-oracledb and cx_Oracle are + the only DBAPIs that requires setinputsizes(); pyodbc offers it as an + option. + + Prior to SQLAlchemy 2.0, the setinputsizes() approach was also used + for pg8000 and asyncpg, which has been changed to inline rendering + of casts. + + """ + if self.isddl or self.is_text: + return None + + compiled = cast(SQLCompiler, self.compiled) + + inputsizes = compiled._get_set_input_sizes_lookup() + + if inputsizes is None: + return None + + dialect = self.dialect + + # all of the rest of this... cython? + + if dialect._has_events: + inputsizes = dict(inputsizes) + dialect.dispatch.do_setinputsizes( + inputsizes, self.cursor, self.statement, self.parameters, self + ) + + if compiled.escaped_bind_names: + escaped_bind_names = compiled.escaped_bind_names + else: + escaped_bind_names = None + + if dialect.positional: + items = [ + (key, compiled.binds[key]) + for key in compiled.positiontup or () + ] + else: + items = [ + (key, bindparam) + for bindparam, key in compiled.bind_names.items() + ] + + generic_inputsizes: List[Tuple[str, Any, TypeEngine[Any]]] = [] + for key, bindparam in items: + if bindparam in compiled.literal_execute_params: + continue + + if key in self._expanded_parameters: + if is_tuple_type(bindparam.type): + num = len(bindparam.type.types) + dbtypes = inputsizes[bindparam] + generic_inputsizes.extend( + ( + ( + escaped_bind_names.get(paramname, paramname) + if escaped_bind_names is not None + else paramname + ), + dbtypes[idx % num], + bindparam.type.types[idx % num], + ) + for idx, paramname in enumerate( + self._expanded_parameters[key] + ) + ) + else: + dbtype = inputsizes.get(bindparam, None) + generic_inputsizes.extend( + ( + ( + escaped_bind_names.get(paramname, paramname) + if escaped_bind_names is not None + else paramname + ), + dbtype, + bindparam.type, + ) + for paramname in self._expanded_parameters[key] + ) + else: + dbtype = inputsizes.get(bindparam, None) + + escaped_name = ( + escaped_bind_names.get(key, key) + if escaped_bind_names is not None + else key + ) + + generic_inputsizes.append( + (escaped_name, dbtype, bindparam.type) + ) + + return generic_inputsizes + + def _exec_default(self, column, default, type_): + if default.is_sequence: + return self.fire_sequence(default, type_) + elif default.is_callable: + # this codepath is not normally used as it's inlined + # into _process_execute_defaults + self.current_column = column + return default.arg(self) + elif default.is_clause_element: + return self._exec_default_clause_element(column, default, type_) + else: + # this codepath is not normally used as it's inlined + # into _process_execute_defaults + return default.arg + + def _exec_default_clause_element(self, column, default, type_): + # execute a default that's a complete clause element. Here, we have + # to re-implement a miniature version of the compile->parameters-> + # cursor.execute() sequence, since we don't want to modify the state + # of the connection / result in progress or create new connection/ + # result objects etc. + # .. versionchanged:: 1.4 + + if not default._arg_is_typed: + default_arg = expression.type_coerce(default.arg, type_) + else: + default_arg = default.arg + compiled = expression.select(default_arg).compile(dialect=self.dialect) + compiled_params = compiled.construct_params() + processors = compiled._bind_processors + if compiled.positional: + parameters = self.dialect.execute_sequence_format( + [ + ( + processors[key](compiled_params[key]) # type: ignore + if key in processors + else compiled_params[key] + ) + for key in compiled.positiontup or () + ] + ) + else: + parameters = { + key: ( + processors[key](compiled_params[key]) # type: ignore + if key in processors + else compiled_params[key] + ) + for key in compiled_params + } + return self._execute_scalar( + str(compiled), type_, parameters=parameters + ) + + current_parameters: Optional[_CoreSingleExecuteParams] = None + """A dictionary of parameters applied to the current row. + + This attribute is only available in the context of a user-defined default + generation function, e.g. as described at :ref:`context_default_functions`. + It consists of a dictionary which includes entries for each column/value + pair that is to be part of the INSERT or UPDATE statement. The keys of the + dictionary will be the key value of each :class:`_schema.Column`, + which is usually + synonymous with the name. + + Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute + does not accommodate for the "multi-values" feature of the + :meth:`_expression.Insert.values` method. The + :meth:`.DefaultExecutionContext.get_current_parameters` method should be + preferred. + + .. seealso:: + + :meth:`.DefaultExecutionContext.get_current_parameters` + + :ref:`context_default_functions` + + """ + + def get_current_parameters(self, isolate_multiinsert_groups=True): + """Return a dictionary of parameters applied to the current row. + + This method can only be used in the context of a user-defined default + generation function, e.g. as described at + :ref:`context_default_functions`. When invoked, a dictionary is + returned which includes entries for each column/value pair that is part + of the INSERT or UPDATE statement. The keys of the dictionary will be + the key value of each :class:`_schema.Column`, + which is usually synonymous + with the name. + + :param isolate_multiinsert_groups=True: indicates that multi-valued + INSERT constructs created using :meth:`_expression.Insert.values` + should be + handled by returning only the subset of parameters that are local + to the current column default invocation. When ``False``, the + raw parameters of the statement are returned including the + naming convention used in the case of multi-valued INSERT. + + .. versionadded:: 1.2 added + :meth:`.DefaultExecutionContext.get_current_parameters` + which provides more functionality over the existing + :attr:`.DefaultExecutionContext.current_parameters` + attribute. + + .. seealso:: + + :attr:`.DefaultExecutionContext.current_parameters` + + :ref:`context_default_functions` + + """ + try: + parameters = self.current_parameters + column = self.current_column + except AttributeError: + raise exc.InvalidRequestError( + "get_current_parameters() can only be invoked in the " + "context of a Python side column default function" + ) + else: + assert column is not None + assert parameters is not None + compile_state = cast( + "DMLState", cast(SQLCompiler, self.compiled).compile_state + ) + assert compile_state is not None + if ( + isolate_multiinsert_groups + and dml.isinsert(compile_state) + and compile_state._has_multi_parameters + ): + if column._is_multiparam_column: + index = column.index + 1 + d = {column.original.key: parameters[column.key]} + else: + d = {column.key: parameters[column.key]} + index = 0 + assert compile_state._dict_parameters is not None + keys = compile_state._dict_parameters.keys() + d.update( + (key, parameters["%s_m%d" % (key, index)]) for key in keys + ) + return d + else: + return parameters + + def get_insert_default(self, column): + if column.default is None: + return None + else: + return self._exec_default(column, column.default, column.type) + + def get_update_default(self, column): + if column.onupdate is None: + return None + else: + return self._exec_default(column, column.onupdate, column.type) + + def _process_execute_defaults(self): + compiled = cast(SQLCompiler, self.compiled) + + key_getter = compiled._within_exec_param_key_getter + + sentinel_counter = 0 + + if compiled.insert_prefetch: + prefetch_recs = [ + ( + c, + key_getter(c), + c._default_description_tuple, + self.get_insert_default, + ) + for c in compiled.insert_prefetch + ] + elif compiled.update_prefetch: + prefetch_recs = [ + ( + c, + key_getter(c), + c._onupdate_description_tuple, + self.get_update_default, + ) + for c in compiled.update_prefetch + ] + else: + prefetch_recs = [] + + for param in self.compiled_parameters: + self.current_parameters = param + + for ( + c, + param_key, + (arg, is_scalar, is_callable, is_sentinel), + fallback, + ) in prefetch_recs: + if is_sentinel: + param[param_key] = sentinel_counter + sentinel_counter += 1 + elif is_scalar: + param[param_key] = arg + elif is_callable: + self.current_column = c + param[param_key] = arg(self) + else: + val = fallback(c) + if val is not None: + param[param_key] = val + + del self.current_parameters + + +DefaultDialect.execution_ctx_cls = DefaultExecutionContext diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/events.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/events.py new file mode 100644 index 0000000000000000000000000000000000000000..b759382cb277da36ff076321ea38dcf1d3363e2f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/events.py @@ -0,0 +1,965 @@ +# engine/events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + + +from __future__ import annotations + +import typing +from typing import Any +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Type +from typing import Union + +from .base import Connection +from .base import Engine +from .interfaces import ConnectionEventsTarget +from .interfaces import DBAPIConnection +from .interfaces import DBAPICursor +from .interfaces import Dialect +from .. import event +from .. import exc +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + from .interfaces import _CoreMultiExecuteParams + from .interfaces import _CoreSingleExecuteParams + from .interfaces import _DBAPIAnyExecuteParams + from .interfaces import _DBAPIMultiExecuteParams + from .interfaces import _DBAPISingleExecuteParams + from .interfaces import _ExecuteOptions + from .interfaces import ExceptionContext + from .interfaces import ExecutionContext + from .result import Result + from ..pool import ConnectionPoolEntry + from ..sql import Executable + from ..sql.elements import BindParameter + + +class ConnectionEvents(event.Events[ConnectionEventsTarget]): + """Available events for + :class:`_engine.Connection` and :class:`_engine.Engine`. + + The methods here define the name of an event as well as the names of + members that are passed to listener functions. + + An event listener can be associated with any + :class:`_engine.Connection` or :class:`_engine.Engine` + class or instance, such as an :class:`_engine.Engine`, e.g.:: + + from sqlalchemy import event, create_engine + + + def before_cursor_execute( + conn, cursor, statement, parameters, context, executemany + ): + log.info("Received statement: %s", statement) + + + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test") + event.listen(engine, "before_cursor_execute", before_cursor_execute) + + or with a specific :class:`_engine.Connection`:: + + with engine.begin() as conn: + + @event.listens_for(conn, "before_cursor_execute") + def before_cursor_execute( + conn, cursor, statement, parameters, context, executemany + ): + log.info("Received statement: %s", statement) + + When the methods are called with a `statement` parameter, such as in + :meth:`.after_cursor_execute` or :meth:`.before_cursor_execute`, + the statement is the exact SQL string that was prepared for transmission + to the DBAPI ``cursor`` in the connection's :class:`.Dialect`. + + The :meth:`.before_execute` and :meth:`.before_cursor_execute` + events can also be established with the ``retval=True`` flag, which + allows modification of the statement and parameters to be sent + to the database. The :meth:`.before_cursor_execute` event is + particularly useful here to add ad-hoc string transformations, such + as comments, to all executions:: + + from sqlalchemy.engine import Engine + from sqlalchemy import event + + + @event.listens_for(Engine, "before_cursor_execute", retval=True) + def comment_sql_calls( + conn, cursor, statement, parameters, context, executemany + ): + statement = statement + " -- some comment" + return statement, parameters + + .. note:: :class:`_events.ConnectionEvents` can be established on any + combination of :class:`_engine.Engine`, :class:`_engine.Connection`, + as well + as instances of each of those classes. Events across all + four scopes will fire off for a given instance of + :class:`_engine.Connection`. However, for performance reasons, the + :class:`_engine.Connection` object determines at instantiation time + whether or not its parent :class:`_engine.Engine` has event listeners + established. Event listeners added to the :class:`_engine.Engine` + class or to an instance of :class:`_engine.Engine` + *after* the instantiation + of a dependent :class:`_engine.Connection` instance will usually + *not* be available on that :class:`_engine.Connection` instance. + The newly + added listeners will instead take effect for + :class:`_engine.Connection` + instances created subsequent to those event listeners being + established on the parent :class:`_engine.Engine` class or instance. + + :param retval=False: Applies to the :meth:`.before_execute` and + :meth:`.before_cursor_execute` events only. When True, the + user-defined event function must have a return value, which + is a tuple of parameters that replace the given statement + and parameters. See those methods for a description of + specific return arguments. + + """ # noqa + + _target_class_doc = "SomeEngine" + _dispatch_target = ConnectionEventsTarget + + @classmethod + def _accept_with( + cls, + target: Union[ConnectionEventsTarget, Type[ConnectionEventsTarget]], + identifier: str, + ) -> Optional[Union[ConnectionEventsTarget, Type[ConnectionEventsTarget]]]: + default_dispatch = super()._accept_with(target, identifier) + if default_dispatch is None and hasattr( + target, "_no_async_engine_events" + ): + target._no_async_engine_events() + + return default_dispatch + + @classmethod + def _listen( + cls, + event_key: event._EventKey[ConnectionEventsTarget], + *, + retval: bool = False, + **kw: Any, + ) -> None: + target, identifier, fn = ( + event_key.dispatch_target, + event_key.identifier, + event_key._listen_fn, + ) + target._has_events = True + + if not retval: + if identifier == "before_execute": + orig_fn = fn + + def wrap_before_execute( # type: ignore + conn, clauseelement, multiparams, params, execution_options + ): + orig_fn( + conn, + clauseelement, + multiparams, + params, + execution_options, + ) + return clauseelement, multiparams, params + + fn = wrap_before_execute + elif identifier == "before_cursor_execute": + orig_fn = fn + + def wrap_before_cursor_execute( # type: ignore + conn, cursor, statement, parameters, context, executemany + ): + orig_fn( + conn, + cursor, + statement, + parameters, + context, + executemany, + ) + return statement, parameters + + fn = wrap_before_cursor_execute + elif retval and identifier not in ( + "before_execute", + "before_cursor_execute", + ): + raise exc.ArgumentError( + "Only the 'before_execute', " + "'before_cursor_execute' and 'handle_error' engine " + "event listeners accept the 'retval=True' " + "argument." + ) + event_key.with_wrapper(fn).base_listen() + + @event._legacy_signature( + "1.4", + ["conn", "clauseelement", "multiparams", "params"], + lambda conn, clauseelement, multiparams, params, execution_options: ( + conn, + clauseelement, + multiparams, + params, + ), + ) + def before_execute( + self, + conn: Connection, + clauseelement: Executable, + multiparams: _CoreMultiExecuteParams, + params: _CoreSingleExecuteParams, + execution_options: _ExecuteOptions, + ) -> Optional[ + Tuple[Executable, _CoreMultiExecuteParams, _CoreSingleExecuteParams] + ]: + """Intercept high level execute() events, receiving uncompiled + SQL constructs and other objects prior to rendering into SQL. + + This event is good for debugging SQL compilation issues as well + as early manipulation of the parameters being sent to the database, + as the parameter lists will be in a consistent format here. + + This event can be optionally established with the ``retval=True`` + flag. The ``clauseelement``, ``multiparams``, and ``params`` + arguments should be returned as a three-tuple in this case:: + + @event.listens_for(Engine, "before_execute", retval=True) + def before_execute(conn, clauseelement, multiparams, params): + # do something with clauseelement, multiparams, params + return clauseelement, multiparams, params + + :param conn: :class:`_engine.Connection` object + :param clauseelement: SQL expression construct, :class:`.Compiled` + instance, or string statement passed to + :meth:`_engine.Connection.execute`. + :param multiparams: Multiple parameter sets, a list of dictionaries. + :param params: Single parameter set, a single dictionary. + :param execution_options: dictionary of execution + options passed along with the statement, if any. This is a merge + of all options that will be used, including those of the statement, + the connection, and those passed in to the method itself for + the 2.0 style of execution. + + .. versionadded: 1.4 + + .. seealso:: + + :meth:`.before_cursor_execute` + + """ + + @event._legacy_signature( + "1.4", + ["conn", "clauseelement", "multiparams", "params", "result"], + lambda conn, clauseelement, multiparams, params, execution_options, result: ( # noqa + conn, + clauseelement, + multiparams, + params, + result, + ), + ) + def after_execute( + self, + conn: Connection, + clauseelement: Executable, + multiparams: _CoreMultiExecuteParams, + params: _CoreSingleExecuteParams, + execution_options: _ExecuteOptions, + result: Result[Any], + ) -> None: + """Intercept high level execute() events after execute. + + + :param conn: :class:`_engine.Connection` object + :param clauseelement: SQL expression construct, :class:`.Compiled` + instance, or string statement passed to + :meth:`_engine.Connection.execute`. + :param multiparams: Multiple parameter sets, a list of dictionaries. + :param params: Single parameter set, a single dictionary. + :param execution_options: dictionary of execution + options passed along with the statement, if any. This is a merge + of all options that will be used, including those of the statement, + the connection, and those passed in to the method itself for + the 2.0 style of execution. + + .. versionadded: 1.4 + + :param result: :class:`_engine.CursorResult` generated by the + execution. + + """ + + def before_cursor_execute( + self, + conn: Connection, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIAnyExecuteParams, + context: Optional[ExecutionContext], + executemany: bool, + ) -> Optional[Tuple[str, _DBAPIAnyExecuteParams]]: + """Intercept low-level cursor execute() events before execution, + receiving the string SQL statement and DBAPI-specific parameter list to + be invoked against a cursor. + + This event is a good choice for logging as well as late modifications + to the SQL string. It's less ideal for parameter modifications except + for those which are specific to a target backend. + + This event can be optionally established with the ``retval=True`` + flag. The ``statement`` and ``parameters`` arguments should be + returned as a two-tuple in this case:: + + @event.listens_for(Engine, "before_cursor_execute", retval=True) + def before_cursor_execute( + conn, cursor, statement, parameters, context, executemany + ): + # do something with statement, parameters + return statement, parameters + + See the example at :class:`_events.ConnectionEvents`. + + :param conn: :class:`_engine.Connection` object + :param cursor: DBAPI cursor object + :param statement: string SQL statement, as to be passed to the DBAPI + :param parameters: Dictionary, tuple, or list of parameters being + passed to the ``execute()`` or ``executemany()`` method of the + DBAPI ``cursor``. In some cases may be ``None``. + :param context: :class:`.ExecutionContext` object in use. May + be ``None``. + :param executemany: boolean, if ``True``, this is an ``executemany()`` + call, if ``False``, this is an ``execute()`` call. + + .. seealso:: + + :meth:`.before_execute` + + :meth:`.after_cursor_execute` + + """ + + def after_cursor_execute( + self, + conn: Connection, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIAnyExecuteParams, + context: Optional[ExecutionContext], + executemany: bool, + ) -> None: + """Intercept low-level cursor execute() events after execution. + + :param conn: :class:`_engine.Connection` object + :param cursor: DBAPI cursor object. Will have results pending + if the statement was a SELECT, but these should not be consumed + as they will be needed by the :class:`_engine.CursorResult`. + :param statement: string SQL statement, as passed to the DBAPI + :param parameters: Dictionary, tuple, or list of parameters being + passed to the ``execute()`` or ``executemany()`` method of the + DBAPI ``cursor``. In some cases may be ``None``. + :param context: :class:`.ExecutionContext` object in use. May + be ``None``. + :param executemany: boolean, if ``True``, this is an ``executemany()`` + call, if ``False``, this is an ``execute()`` call. + + """ + + @event._legacy_signature( + "2.0", ["conn", "branch"], converter=lambda conn: (conn, False) + ) + def engine_connect(self, conn: Connection) -> None: + """Intercept the creation of a new :class:`_engine.Connection`. + + This event is called typically as the direct result of calling + the :meth:`_engine.Engine.connect` method. + + It differs from the :meth:`_events.PoolEvents.connect` method, which + refers to the actual connection to a database at the DBAPI level; + a DBAPI connection may be pooled and reused for many operations. + In contrast, this event refers only to the production of a higher level + :class:`_engine.Connection` wrapper around such a DBAPI connection. + + It also differs from the :meth:`_events.PoolEvents.checkout` event + in that it is specific to the :class:`_engine.Connection` object, + not the + DBAPI connection that :meth:`_events.PoolEvents.checkout` deals with, + although + this DBAPI connection is available here via the + :attr:`_engine.Connection.connection` attribute. + But note there can in fact + be multiple :meth:`_events.PoolEvents.checkout` + events within the lifespan + of a single :class:`_engine.Connection` object, if that + :class:`_engine.Connection` + is invalidated and re-established. + + :param conn: :class:`_engine.Connection` object. + + .. seealso:: + + :meth:`_events.PoolEvents.checkout` + the lower-level pool checkout event + for an individual DBAPI connection + + """ + + def set_connection_execution_options( + self, conn: Connection, opts: Dict[str, Any] + ) -> None: + """Intercept when the :meth:`_engine.Connection.execution_options` + method is called. + + This method is called after the new :class:`_engine.Connection` + has been + produced, with the newly updated execution options collection, but + before the :class:`.Dialect` has acted upon any of those new options. + + Note that this method is not called when a new + :class:`_engine.Connection` + is produced which is inheriting execution options from its parent + :class:`_engine.Engine`; to intercept this condition, use the + :meth:`_events.ConnectionEvents.engine_connect` event. + + :param conn: The newly copied :class:`_engine.Connection` object + + :param opts: dictionary of options that were passed to the + :meth:`_engine.Connection.execution_options` method. + This dictionary may be modified in place to affect the ultimate + options which take effect. + + .. versionadded:: 2.0 the ``opts`` dictionary may be modified + in place. + + + .. seealso:: + + :meth:`_events.ConnectionEvents.set_engine_execution_options` + - event + which is called when :meth:`_engine.Engine.execution_options` + is called. + + + """ + + def set_engine_execution_options( + self, engine: Engine, opts: Dict[str, Any] + ) -> None: + """Intercept when the :meth:`_engine.Engine.execution_options` + method is called. + + The :meth:`_engine.Engine.execution_options` method produces a shallow + copy of the :class:`_engine.Engine` which stores the new options. + That new + :class:`_engine.Engine` is passed here. + A particular application of this + method is to add a :meth:`_events.ConnectionEvents.engine_connect` + event + handler to the given :class:`_engine.Engine` + which will perform some per- + :class:`_engine.Connection` task specific to these execution options. + + :param conn: The newly copied :class:`_engine.Engine` object + + :param opts: dictionary of options that were passed to the + :meth:`_engine.Connection.execution_options` method. + This dictionary may be modified in place to affect the ultimate + options which take effect. + + .. versionadded:: 2.0 the ``opts`` dictionary may be modified + in place. + + .. seealso:: + + :meth:`_events.ConnectionEvents.set_connection_execution_options` + - event + which is called when :meth:`_engine.Connection.execution_options` + is + called. + + """ + + def engine_disposed(self, engine: Engine) -> None: + """Intercept when the :meth:`_engine.Engine.dispose` method is called. + + The :meth:`_engine.Engine.dispose` method instructs the engine to + "dispose" of it's connection pool (e.g. :class:`_pool.Pool`), and + replaces it with a new one. Disposing of the old pool has the + effect that existing checked-in connections are closed. The new + pool does not establish any new connections until it is first used. + + This event can be used to indicate that resources related to the + :class:`_engine.Engine` should also be cleaned up, + keeping in mind that the + :class:`_engine.Engine` + can still be used for new requests in which case + it re-acquires connection resources. + + """ + + def begin(self, conn: Connection) -> None: + """Intercept begin() events. + + :param conn: :class:`_engine.Connection` object + + """ + + def rollback(self, conn: Connection) -> None: + """Intercept rollback() events, as initiated by a + :class:`.Transaction`. + + Note that the :class:`_pool.Pool` also "auto-rolls back" + a DBAPI connection upon checkin, if the ``reset_on_return`` + flag is set to its default value of ``'rollback'``. + To intercept this + rollback, use the :meth:`_events.PoolEvents.reset` hook. + + :param conn: :class:`_engine.Connection` object + + .. seealso:: + + :meth:`_events.PoolEvents.reset` + + """ + + def commit(self, conn: Connection) -> None: + """Intercept commit() events, as initiated by a + :class:`.Transaction`. + + Note that the :class:`_pool.Pool` may also "auto-commit" + a DBAPI connection upon checkin, if the ``reset_on_return`` + flag is set to the value ``'commit'``. To intercept this + commit, use the :meth:`_events.PoolEvents.reset` hook. + + :param conn: :class:`_engine.Connection` object + """ + + def savepoint(self, conn: Connection, name: str) -> None: + """Intercept savepoint() events. + + :param conn: :class:`_engine.Connection` object + :param name: specified name used for the savepoint. + + """ + + def rollback_savepoint( + self, conn: Connection, name: str, context: None + ) -> None: + """Intercept rollback_savepoint() events. + + :param conn: :class:`_engine.Connection` object + :param name: specified name used for the savepoint. + :param context: not used + + """ + # TODO: deprecate "context" + + def release_savepoint( + self, conn: Connection, name: str, context: None + ) -> None: + """Intercept release_savepoint() events. + + :param conn: :class:`_engine.Connection` object + :param name: specified name used for the savepoint. + :param context: not used + + """ + # TODO: deprecate "context" + + def begin_twophase(self, conn: Connection, xid: Any) -> None: + """Intercept begin_twophase() events. + + :param conn: :class:`_engine.Connection` object + :param xid: two-phase XID identifier + + """ + + def prepare_twophase(self, conn: Connection, xid: Any) -> None: + """Intercept prepare_twophase() events. + + :param conn: :class:`_engine.Connection` object + :param xid: two-phase XID identifier + """ + + def rollback_twophase( + self, conn: Connection, xid: Any, is_prepared: bool + ) -> None: + """Intercept rollback_twophase() events. + + :param conn: :class:`_engine.Connection` object + :param xid: two-phase XID identifier + :param is_prepared: boolean, indicates if + :meth:`.TwoPhaseTransaction.prepare` was called. + + """ + + def commit_twophase( + self, conn: Connection, xid: Any, is_prepared: bool + ) -> None: + """Intercept commit_twophase() events. + + :param conn: :class:`_engine.Connection` object + :param xid: two-phase XID identifier + :param is_prepared: boolean, indicates if + :meth:`.TwoPhaseTransaction.prepare` was called. + + """ + + +class DialectEvents(event.Events[Dialect]): + """event interface for execution-replacement functions. + + These events allow direct instrumentation and replacement + of key dialect functions which interact with the DBAPI. + + .. note:: + + :class:`.DialectEvents` hooks should be considered **semi-public** + and experimental. + These hooks are not for general use and are only for those situations + where intricate re-statement of DBAPI mechanics must be injected onto + an existing dialect. For general-use statement-interception events, + please use the :class:`_events.ConnectionEvents` interface. + + .. seealso:: + + :meth:`_events.ConnectionEvents.before_cursor_execute` + + :meth:`_events.ConnectionEvents.before_execute` + + :meth:`_events.ConnectionEvents.after_cursor_execute` + + :meth:`_events.ConnectionEvents.after_execute` + + """ + + _target_class_doc = "SomeEngine" + _dispatch_target = Dialect + + @classmethod + def _listen( + cls, + event_key: event._EventKey[Dialect], + *, + retval: bool = False, + **kw: Any, + ) -> None: + target = event_key.dispatch_target + + target._has_events = True + event_key.base_listen() + + @classmethod + def _accept_with( + cls, + target: Union[Engine, Type[Engine], Dialect, Type[Dialect]], + identifier: str, + ) -> Optional[Union[Dialect, Type[Dialect]]]: + if isinstance(target, type): + if issubclass(target, Engine): + return Dialect + elif issubclass(target, Dialect): + return target + elif isinstance(target, Engine): + return target.dialect + elif isinstance(target, Dialect): + return target + elif isinstance(target, Connection) and identifier == "handle_error": + raise exc.InvalidRequestError( + "The handle_error() event hook as of SQLAlchemy 2.0 is " + "established on the Dialect, and may only be applied to the " + "Engine as a whole or to a specific Dialect as a whole, " + "not on a per-Connection basis." + ) + elif hasattr(target, "_no_async_engine_events"): + target._no_async_engine_events() + else: + return None + + def handle_error( + self, exception_context: ExceptionContext + ) -> Optional[BaseException]: + r"""Intercept all exceptions processed by the + :class:`_engine.Dialect`, typically but not limited to those + emitted within the scope of a :class:`_engine.Connection`. + + .. versionchanged:: 2.0 the :meth:`.DialectEvents.handle_error` event + is moved to the :class:`.DialectEvents` class, moved from the + :class:`.ConnectionEvents` class, so that it may also participate in + the "pre ping" operation configured with the + :paramref:`_sa.create_engine.pool_pre_ping` parameter. The event + remains registered by using the :class:`_engine.Engine` as the event + target, however note that using the :class:`_engine.Connection` as + an event target for :meth:`.DialectEvents.handle_error` is no longer + supported. + + This includes all exceptions emitted by the DBAPI as well as + within SQLAlchemy's statement invocation process, including + encoding errors and other statement validation errors. Other areas + in which the event is invoked include transaction begin and end, + result row fetching, cursor creation. + + Note that :meth:`.handle_error` may support new kinds of exceptions + and new calling scenarios at *any time*. Code which uses this + event must expect new calling patterns to be present in minor + releases. + + To support the wide variety of members that correspond to an exception, + as well as to allow extensibility of the event without backwards + incompatibility, the sole argument received is an instance of + :class:`.ExceptionContext`. This object contains data members + representing detail about the exception. + + Use cases supported by this hook include: + + * read-only, low-level exception handling for logging and + debugging purposes + * Establishing whether a DBAPI connection error message indicates + that the database connection needs to be reconnected, including + for the "pre_ping" handler used by **some** dialects + * Establishing or disabling whether a connection or the owning + connection pool is invalidated or expired in response to a + specific exception + * exception re-writing + + The hook is called while the cursor from the failed operation + (if any) is still open and accessible. Special cleanup operations + can be called on this cursor; SQLAlchemy will attempt to close + this cursor subsequent to this hook being invoked. + + As of SQLAlchemy 2.0, the "pre_ping" handler enabled using the + :paramref:`_sa.create_engine.pool_pre_ping` parameter will also + participate in the :meth:`.handle_error` process, **for those dialects + that rely upon disconnect codes to detect database liveness**. Note + that some dialects such as psycopg, psycopg2, and most MySQL dialects + make use of a native ``ping()`` method supplied by the DBAPI which does + not make use of disconnect codes. + + .. versionchanged:: 2.0.0 The :meth:`.DialectEvents.handle_error` + event hook participates in connection pool "pre-ping" operations. + Within this usage, the :attr:`.ExceptionContext.engine` attribute + will be ``None``, however the :class:`.Dialect` in use is always + available via the :attr:`.ExceptionContext.dialect` attribute. + + .. versionchanged:: 2.0.5 Added :attr:`.ExceptionContext.is_pre_ping` + attribute which will be set to ``True`` when the + :meth:`.DialectEvents.handle_error` event hook is triggered within + a connection pool pre-ping operation. + + .. versionchanged:: 2.0.5 An issue was repaired that allows for the + PostgreSQL ``psycopg`` and ``psycopg2`` drivers, as well as all + MySQL drivers, to properly participate in the + :meth:`.DialectEvents.handle_error` event hook during + connection pool "pre-ping" operations; previously, the + implementation was non-working for these drivers. + + + A handler function has two options for replacing + the SQLAlchemy-constructed exception into one that is user + defined. It can either raise this new exception directly, in + which case all further event listeners are bypassed and the + exception will be raised, after appropriate cleanup as taken + place:: + + @event.listens_for(Engine, "handle_error") + def handle_exception(context): + if isinstance( + context.original_exception, psycopg2.OperationalError + ) and "failed" in str(context.original_exception): + raise MySpecialException("failed operation") + + .. warning:: Because the + :meth:`_events.DialectEvents.handle_error` + event specifically provides for exceptions to be re-thrown as + the ultimate exception raised by the failed statement, + **stack traces will be misleading** if the user-defined event + handler itself fails and throws an unexpected exception; + the stack trace may not illustrate the actual code line that + failed! It is advised to code carefully here and use + logging and/or inline debugging if unexpected exceptions are + occurring. + + Alternatively, a "chained" style of event handling can be + used, by configuring the handler with the ``retval=True`` + modifier and returning the new exception instance from the + function. In this case, event handling will continue onto the + next handler. The "chained" exception is available using + :attr:`.ExceptionContext.chained_exception`:: + + @event.listens_for(Engine, "handle_error", retval=True) + def handle_exception(context): + if ( + context.chained_exception is not None + and "special" in context.chained_exception.message + ): + return MySpecialException( + "failed", cause=context.chained_exception + ) + + Handlers that return ``None`` may be used within the chain; when + a handler returns ``None``, the previous exception instance, + if any, is maintained as the current exception that is passed onto the + next handler. + + When a custom exception is raised or returned, SQLAlchemy raises + this new exception as-is, it is not wrapped by any SQLAlchemy + object. If the exception is not a subclass of + :class:`sqlalchemy.exc.StatementError`, + certain features may not be available; currently this includes + the ORM's feature of adding a detail hint about "autoflush" to + exceptions raised within the autoflush process. + + :param context: an :class:`.ExceptionContext` object. See this + class for details on all available members. + + + .. seealso:: + + :ref:`pool_new_disconnect_codes` + + """ + + def do_connect( + self, + dialect: Dialect, + conn_rec: ConnectionPoolEntry, + cargs: Tuple[Any, ...], + cparams: Dict[str, Any], + ) -> Optional[DBAPIConnection]: + """Receive connection arguments before a connection is made. + + This event is useful in that it allows the handler to manipulate the + cargs and/or cparams collections that control how the DBAPI + ``connect()`` function will be called. ``cargs`` will always be a + Python list that can be mutated in-place, and ``cparams`` a Python + dictionary that may also be mutated:: + + e = create_engine("postgresql+psycopg2://user@host/dbname") + + + @event.listens_for(e, "do_connect") + def receive_do_connect(dialect, conn_rec, cargs, cparams): + cparams["password"] = "some_password" + + The event hook may also be used to override the call to ``connect()`` + entirely, by returning a non-``None`` DBAPI connection object:: + + e = create_engine("postgresql+psycopg2://user@host/dbname") + + + @event.listens_for(e, "do_connect") + def receive_do_connect(dialect, conn_rec, cargs, cparams): + return psycopg2.connect(*cargs, **cparams) + + .. seealso:: + + :ref:`custom_dbapi_args` + + """ + + def do_executemany( + self, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIMultiExecuteParams, + context: ExecutionContext, + ) -> Optional[Literal[True]]: + """Receive a cursor to have executemany() called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ + + def do_execute_no_params( + self, cursor: DBAPICursor, statement: str, context: ExecutionContext + ) -> Optional[Literal[True]]: + """Receive a cursor to have execute() with no parameters called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ + + def do_execute( + self, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPISingleExecuteParams, + context: ExecutionContext, + ) -> Optional[Literal[True]]: + """Receive a cursor to have execute() called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ + + def do_setinputsizes( + self, + inputsizes: Dict[BindParameter[Any], Any], + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIAnyExecuteParams, + context: ExecutionContext, + ) -> None: + """Receive the setinputsizes dictionary for possible modification. + + This event is emitted in the case where the dialect makes use of the + DBAPI ``cursor.setinputsizes()`` method which passes information about + parameter binding for a particular statement. The given + ``inputsizes`` dictionary will contain :class:`.BindParameter` objects + as keys, linked to DBAPI-specific type objects as values; for + parameters that are not bound, they are added to the dictionary with + ``None`` as the value, which means the parameter will not be included + in the ultimate setinputsizes call. The event may be used to inspect + and/or log the datatypes that are being bound, as well as to modify the + dictionary in place. Parameters can be added, modified, or removed + from this dictionary. Callers will typically want to inspect the + :attr:`.BindParameter.type` attribute of the given bind objects in + order to make decisions about the DBAPI object. + + After the event, the ``inputsizes`` dictionary is converted into + an appropriate datastructure to be passed to ``cursor.setinputsizes``; + either a list for a positional bound parameter execution style, + or a dictionary of string parameter keys to DBAPI type objects for + a named bound parameter execution style. + + The setinputsizes hook overall is only used for dialects which include + the flag ``use_setinputsizes=True``. Dialects which use this + include python-oracledb, cx_Oracle, pg8000, asyncpg, and pyodbc + dialects. + + .. note:: + + For use with pyodbc, the ``use_setinputsizes`` flag + must be passed to the dialect, e.g.:: + + create_engine("mssql+pyodbc://...", use_setinputsizes=True) + + .. seealso:: + + :ref:`mssql_pyodbc_setinputsizes` + + .. versionadded:: 1.2.9 + + .. seealso:: + + :ref:`cx_oracle_setinputsizes` + + """ + pass diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/interfaces.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..1823c97fc31a578e39034ccc271593aad92e602f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/interfaces.py @@ -0,0 +1,3413 @@ +# engine/interfaces.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Define core interfaces used by the engine system.""" + +from __future__ import annotations + +from enum import Enum +from types import ModuleType +from typing import Any +from typing import Awaitable +from typing import Callable +from typing import ClassVar +from typing import Collection +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .. import util +from ..event import EventTarget +from ..pool import Pool +from ..pool import PoolProxiedConnection +from ..sql.compiler import Compiled as Compiled +from ..sql.compiler import Compiled # noqa +from ..sql.compiler import TypeCompiler as TypeCompiler +from ..sql.compiler import TypeCompiler # noqa +from ..util import immutabledict +from ..util.concurrency import await_only +from ..util.typing import Literal +from ..util.typing import NotRequired +from ..util.typing import Protocol +from ..util.typing import TypedDict + +if TYPE_CHECKING: + from .base import Connection + from .base import Engine + from .cursor import CursorResult + from .url import URL + from ..event import _ListenerFnType + from ..event import dispatcher + from ..exc import StatementError + from ..sql import Executable + from ..sql.compiler import _InsertManyValuesBatch + from ..sql.compiler import DDLCompiler + from ..sql.compiler import IdentifierPreparer + from ..sql.compiler import InsertmanyvaluesSentinelOpts + from ..sql.compiler import Linting + from ..sql.compiler import SQLCompiler + from ..sql.elements import BindParameter + from ..sql.elements import ClauseElement + from ..sql.schema import Column + from ..sql.schema import DefaultGenerator + from ..sql.schema import SchemaItem + from ..sql.schema import Sequence as Sequence_SchemaItem + from ..sql.sqltypes import Integer + from ..sql.type_api import _TypeMemoDict + from ..sql.type_api import TypeEngine + +ConnectArgsType = Tuple[Sequence[str], MutableMapping[str, Any]] + +_T = TypeVar("_T", bound="Any") + + +class CacheStats(Enum): + CACHE_HIT = 0 + CACHE_MISS = 1 + CACHING_DISABLED = 2 + NO_CACHE_KEY = 3 + NO_DIALECT_SUPPORT = 4 + + +class ExecuteStyle(Enum): + """indicates the :term:`DBAPI` cursor method that will be used to invoke + a statement.""" + + EXECUTE = 0 + """indicates cursor.execute() will be used""" + + EXECUTEMANY = 1 + """indicates cursor.executemany() will be used.""" + + INSERTMANYVALUES = 2 + """indicates cursor.execute() will be used with an INSERT where the + VALUES expression will be expanded to accommodate for multiple + parameter sets + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + """ + + +class DBAPIConnection(Protocol): + """protocol representing a :pep:`249` database connection. + + .. versionadded:: 2.0 + + .. seealso:: + + `Connection Objects `_ + - in :pep:`249` + + """ # noqa: E501 + + def close(self) -> None: ... + + def commit(self) -> None: ... + + def cursor(self, *args: Any, **kwargs: Any) -> DBAPICursor: ... + + def rollback(self) -> None: ... + + autocommit: bool + + +class DBAPIType(Protocol): + """protocol representing a :pep:`249` database type. + + .. versionadded:: 2.0 + + .. seealso:: + + `Type Objects `_ + - in :pep:`249` + + """ # noqa: E501 + + +class DBAPICursor(Protocol): + """protocol representing a :pep:`249` database cursor. + + .. versionadded:: 2.0 + + .. seealso:: + + `Cursor Objects `_ + - in :pep:`249` + + """ # noqa: E501 + + @property + def description( + self, + ) -> _DBAPICursorDescription: + """The description attribute of the Cursor. + + .. seealso:: + + `cursor.description `_ + - in :pep:`249` + + + """ # noqa: E501 + ... + + @property + def rowcount(self) -> int: ... + + arraysize: int + + lastrowid: int + + def close(self) -> None: ... + + def execute( + self, + operation: Any, + parameters: Optional[_DBAPISingleExecuteParams] = None, + ) -> Any: ... + + def executemany( + self, + operation: Any, + parameters: _DBAPIMultiExecuteParams, + ) -> Any: ... + + def fetchone(self) -> Optional[Any]: ... + + def fetchmany(self, size: int = ...) -> Sequence[Any]: ... + + def fetchall(self) -> Sequence[Any]: ... + + def setinputsizes(self, sizes: Sequence[Any]) -> None: ... + + def setoutputsize(self, size: Any, column: Any) -> None: ... + + def callproc( + self, procname: str, parameters: Sequence[Any] = ... + ) -> Any: ... + + def nextset(self) -> Optional[bool]: ... + + def __getattr__(self, key: str) -> Any: ... + + +_CoreSingleExecuteParams = Mapping[str, Any] +_MutableCoreSingleExecuteParams = MutableMapping[str, Any] +_CoreMultiExecuteParams = Sequence[_CoreSingleExecuteParams] +_CoreAnyExecuteParams = Union[ + _CoreMultiExecuteParams, _CoreSingleExecuteParams +] + +_DBAPISingleExecuteParams = Union[Sequence[Any], _CoreSingleExecuteParams] + +_DBAPIMultiExecuteParams = Union[ + Sequence[Sequence[Any]], _CoreMultiExecuteParams +] +_DBAPIAnyExecuteParams = Union[ + _DBAPIMultiExecuteParams, _DBAPISingleExecuteParams +] +_DBAPICursorDescription = Sequence[ + Tuple[ + str, + "DBAPIType", + Optional[int], + Optional[int], + Optional[int], + Optional[int], + Optional[bool], + ] +] + +_AnySingleExecuteParams = _DBAPISingleExecuteParams +_AnyMultiExecuteParams = _DBAPIMultiExecuteParams +_AnyExecuteParams = _DBAPIAnyExecuteParams + +CompiledCacheType = MutableMapping[Any, "Compiled"] +SchemaTranslateMapType = Mapping[Optional[str], Optional[str]] + +_ImmutableExecuteOptions = immutabledict[str, Any] + +_ParamStyle = Literal[ + "qmark", "numeric", "named", "format", "pyformat", "numeric_dollar" +] + +_GenericSetInputSizesType = List[Tuple[str, Any, "TypeEngine[Any]"]] + +IsolationLevel = Literal[ + "SERIALIZABLE", + "REPEATABLE READ", + "READ COMMITTED", + "READ UNCOMMITTED", + "AUTOCOMMIT", +] + + +class _CoreKnownExecutionOptions(TypedDict, total=False): + compiled_cache: Optional[CompiledCacheType] + logging_token: str + isolation_level: IsolationLevel + no_parameters: bool + stream_results: bool + max_row_buffer: int + yield_per: int + insertmanyvalues_page_size: int + schema_translate_map: Optional[SchemaTranslateMapType] + preserve_rowcount: bool + + +_ExecuteOptions = immutabledict[str, Any] +CoreExecuteOptionsParameter = Union[ + _CoreKnownExecutionOptions, Mapping[str, Any] +] + + +class ReflectedIdentity(TypedDict): + """represent the reflected IDENTITY structure of a column, corresponding + to the :class:`_schema.Identity` construct. + + The :class:`.ReflectedIdentity` structure is part of the + :class:`.ReflectedColumn` structure, which is returned by the + :meth:`.Inspector.get_columns` method. + + """ + + always: bool + """type of identity column""" + + on_null: bool + """indicates ON NULL""" + + start: int + """starting index of the sequence""" + + increment: int + """increment value of the sequence""" + + minvalue: int + """the minimum value of the sequence.""" + + maxvalue: int + """the maximum value of the sequence.""" + + nominvalue: bool + """no minimum value of the sequence.""" + + nomaxvalue: bool + """no maximum value of the sequence.""" + + cycle: bool + """allows the sequence to wrap around when the maxvalue + or minvalue has been reached.""" + + cache: Optional[int] + """number of future values in the + sequence which are calculated in advance.""" + + order: bool + """if true, renders the ORDER keyword.""" + + +class ReflectedComputed(TypedDict): + """Represent the reflected elements of a computed column, corresponding + to the :class:`_schema.Computed` construct. + + The :class:`.ReflectedComputed` structure is part of the + :class:`.ReflectedColumn` structure, which is returned by the + :meth:`.Inspector.get_columns` method. + + """ + + sqltext: str + """the expression used to generate this column returned + as a string SQL expression""" + + persisted: NotRequired[bool] + """indicates if the value is stored in the table or computed on demand""" + + +class ReflectedColumn(TypedDict): + """Dictionary representing the reflected elements corresponding to + a :class:`_schema.Column` object. + + The :class:`.ReflectedColumn` structure is returned by the + :class:`.Inspector.get_columns` method. + + """ + + name: str + """column name""" + + type: TypeEngine[Any] + """column type represented as a :class:`.TypeEngine` instance.""" + + nullable: bool + """boolean flag if the column is NULL or NOT NULL""" + + default: Optional[str] + """column default expression as a SQL string""" + + autoincrement: NotRequired[bool] + """database-dependent autoincrement flag. + + This flag indicates if the column has a database-side "autoincrement" + flag of some kind. Within SQLAlchemy, other kinds of columns may + also act as an "autoincrement" column without necessarily having + such a flag on them. + + See :paramref:`_schema.Column.autoincrement` for more background on + "autoincrement". + + """ + + comment: NotRequired[Optional[str]] + """comment for the column, if present. + Only some dialects return this key + """ + + computed: NotRequired[ReflectedComputed] + """indicates that this column is computed by the database. + Only some dialects return this key. + + .. versionadded:: 1.3.16 - added support for computed reflection. + """ + + identity: NotRequired[ReflectedIdentity] + """indicates this column is an IDENTITY column. + Only some dialects return this key. + + .. versionadded:: 1.4 - added support for identity column reflection. + """ + + dialect_options: NotRequired[Dict[str, Any]] + """Additional dialect-specific options detected for this reflected + object""" + + +class ReflectedConstraint(TypedDict): + """Dictionary representing the reflected elements corresponding to + :class:`.Constraint` + + A base class for all constraints + """ + + name: Optional[str] + """constraint name""" + + comment: NotRequired[Optional[str]] + """comment for the constraint, if present""" + + +class ReflectedCheckConstraint(ReflectedConstraint): + """Dictionary representing the reflected elements corresponding to + :class:`.CheckConstraint`. + + The :class:`.ReflectedCheckConstraint` structure is returned by the + :meth:`.Inspector.get_check_constraints` method. + + """ + + sqltext: str + """the check constraint's SQL expression""" + + dialect_options: NotRequired[Dict[str, Any]] + """Additional dialect-specific options detected for this check constraint + + .. versionadded:: 1.3.8 + """ + + +class ReflectedUniqueConstraint(ReflectedConstraint): + """Dictionary representing the reflected elements corresponding to + :class:`.UniqueConstraint`. + + The :class:`.ReflectedUniqueConstraint` structure is returned by the + :meth:`.Inspector.get_unique_constraints` method. + + """ + + column_names: List[str] + """column names which comprise the unique constraint""" + + duplicates_index: NotRequired[Optional[str]] + "Indicates if this unique constraint duplicates an index with this name" + + dialect_options: NotRequired[Dict[str, Any]] + """Additional dialect-specific options detected for this unique + constraint""" + + +class ReflectedPrimaryKeyConstraint(ReflectedConstraint): + """Dictionary representing the reflected elements corresponding to + :class:`.PrimaryKeyConstraint`. + + The :class:`.ReflectedPrimaryKeyConstraint` structure is returned by the + :meth:`.Inspector.get_pk_constraint` method. + + """ + + constrained_columns: List[str] + """column names which comprise the primary key""" + + dialect_options: NotRequired[Dict[str, Any]] + """Additional dialect-specific options detected for this primary key""" + + +class ReflectedForeignKeyConstraint(ReflectedConstraint): + """Dictionary representing the reflected elements corresponding to + :class:`.ForeignKeyConstraint`. + + The :class:`.ReflectedForeignKeyConstraint` structure is returned by + the :meth:`.Inspector.get_foreign_keys` method. + + """ + + constrained_columns: List[str] + """local column names which comprise the foreign key""" + + referred_schema: Optional[str] + """schema name of the table being referred""" + + referred_table: str + """name of the table being referred""" + + referred_columns: List[str] + """referred column names that correspond to ``constrained_columns``""" + + options: NotRequired[Dict[str, Any]] + """Additional options detected for this foreign key constraint""" + + +class ReflectedIndex(TypedDict): + """Dictionary representing the reflected elements corresponding to + :class:`.Index`. + + The :class:`.ReflectedIndex` structure is returned by the + :meth:`.Inspector.get_indexes` method. + + """ + + name: Optional[str] + """index name""" + + column_names: List[Optional[str]] + """column names which the index references. + An element of this list is ``None`` if it's an expression and is + returned in the ``expressions`` list. + """ + + expressions: NotRequired[List[str]] + """Expressions that compose the index. This list, when present, contains + both plain column names (that are also in ``column_names``) and + expressions (that are ``None`` in ``column_names``). + """ + + unique: bool + """whether or not the index has a unique flag""" + + duplicates_constraint: NotRequired[Optional[str]] + "Indicates if this index mirrors a constraint with this name" + + include_columns: NotRequired[List[str]] + """columns to include in the INCLUDE clause for supporting databases. + + .. deprecated:: 2.0 + + Legacy value, will be replaced with + ``index_dict["dialect_options"]["_include"]`` + + """ + + column_sorting: NotRequired[Dict[str, Tuple[str]]] + """optional dict mapping column names or expressions to tuple of sort + keywords, which may include ``asc``, ``desc``, ``nulls_first``, + ``nulls_last``. + + .. versionadded:: 1.3.5 + """ + + dialect_options: NotRequired[Dict[str, Any]] + """Additional dialect-specific options detected for this index""" + + +class ReflectedTableComment(TypedDict): + """Dictionary representing the reflected comment corresponding to + the :attr:`_schema.Table.comment` attribute. + + The :class:`.ReflectedTableComment` structure is returned by the + :meth:`.Inspector.get_table_comment` method. + + """ + + text: Optional[str] + """text of the comment""" + + +class BindTyping(Enum): + """Define different methods of passing typing information for + bound parameters in a statement to the database driver. + + .. versionadded:: 2.0 + + """ + + NONE = 1 + """No steps are taken to pass typing information to the database driver. + + This is the default behavior for databases such as SQLite, MySQL / MariaDB, + SQL Server. + + """ + + SETINPUTSIZES = 2 + """Use the pep-249 setinputsizes method. + + This is only implemented for DBAPIs that support this method and for which + the SQLAlchemy dialect has the appropriate infrastructure for that dialect + set up. Current dialects include python-oracledb, cx_Oracle as well as + optional support for SQL Server using pyodbc. + + When using setinputsizes, dialects also have a means of only using the + method for certain datatypes using include/exclude lists. + + When SETINPUTSIZES is used, the :meth:`.Dialect.do_set_input_sizes` method + is called for each statement executed which has bound parameters. + + """ + + RENDER_CASTS = 3 + """Render casts or other directives in the SQL string. + + This method is used for all PostgreSQL dialects, including asyncpg, + pg8000, psycopg, psycopg2. Dialects which implement this can choose + which kinds of datatypes are explicitly cast in SQL statements and which + aren't. + + When RENDER_CASTS is used, the compiler will invoke the + :meth:`.SQLCompiler.render_bind_cast` method for the rendered + string representation of each :class:`.BindParameter` object whose + dialect-level type sets the :attr:`.TypeEngine.render_bind_cast` attribute. + + The :meth:`.SQLCompiler.render_bind_cast` is also used to render casts + for one form of "insertmanyvalues" query, when both + :attr:`.InsertmanyvaluesSentinelOpts.USE_INSERT_FROM_SELECT` and + :attr:`.InsertmanyvaluesSentinelOpts.RENDER_SELECT_COL_CASTS` are set, + where the casts are applied to the intermediary columns e.g. + "INSERT INTO t (a, b, c) SELECT p0::TYP, p1::TYP, p2::TYP " + "FROM (VALUES (?, ?), (?, ?), ...)". + + .. versionadded:: 2.0.10 - :meth:`.SQLCompiler.render_bind_cast` is now + used within some elements of the "insertmanyvalues" implementation. + + + """ + + +VersionInfoType = Tuple[Union[int, str], ...] +TableKey = Tuple[Optional[str], str] + + +class Dialect(EventTarget): + """Define the behavior of a specific database and DB-API combination. + + Any aspect of metadata definition, SQL query generation, + execution, result-set handling, or anything else which varies + between databases is defined under the general category of the + Dialect. The Dialect acts as a factory for other + database-specific object implementations including + ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. + + .. note:: Third party dialects should not subclass :class:`.Dialect` + directly. Instead, subclass :class:`.default.DefaultDialect` or + descendant class. + + """ + + CACHE_HIT = CacheStats.CACHE_HIT + CACHE_MISS = CacheStats.CACHE_MISS + CACHING_DISABLED = CacheStats.CACHING_DISABLED + NO_CACHE_KEY = CacheStats.NO_CACHE_KEY + NO_DIALECT_SUPPORT = CacheStats.NO_DIALECT_SUPPORT + + dispatch: dispatcher[Dialect] + + name: str + """identifying name for the dialect from a DBAPI-neutral point of view + (i.e. 'sqlite') + """ + + driver: str + """identifying name for the dialect's DBAPI""" + + dialect_description: str + + dbapi: Optional[ModuleType] + """A reference to the DBAPI module object itself. + + SQLAlchemy dialects import DBAPI modules using the classmethod + :meth:`.Dialect.import_dbapi`. The rationale is so that any dialect + module can be imported and used to generate SQL statements without the + need for the actual DBAPI driver to be installed. Only when an + :class:`.Engine` is constructed using :func:`.create_engine` does the + DBAPI get imported; at that point, the creation process will assign + the DBAPI module to this attribute. + + Dialects should therefore implement :meth:`.Dialect.import_dbapi` + which will import the necessary module and return it, and then refer + to ``self.dbapi`` in dialect code in order to refer to the DBAPI module + contents. + + .. versionchanged:: The :attr:`.Dialect.dbapi` attribute is exclusively + used as the per-:class:`.Dialect`-instance reference to the DBAPI + module. The previous not-fully-documented ``.Dialect.dbapi()`` + classmethod is deprecated and replaced by :meth:`.Dialect.import_dbapi`. + + """ + + @util.non_memoized_property + def loaded_dbapi(self) -> ModuleType: + """same as .dbapi, but is never None; will raise an error if no + DBAPI was set up. + + .. versionadded:: 2.0 + + """ + raise NotImplementedError() + + positional: bool + """True if the paramstyle for this Dialect is positional.""" + + paramstyle: str + """the paramstyle to be used (some DB-APIs support multiple + paramstyles). + """ + + compiler_linting: Linting + + statement_compiler: Type[SQLCompiler] + """a :class:`.Compiled` class used to compile SQL statements""" + + ddl_compiler: Type[DDLCompiler] + """a :class:`.Compiled` class used to compile DDL statements""" + + type_compiler_cls: ClassVar[Type[TypeCompiler]] + """a :class:`.Compiled` class used to compile SQL type objects + + .. versionadded:: 2.0 + + """ + + type_compiler_instance: TypeCompiler + """instance of a :class:`.Compiled` class used to compile SQL type + objects + + .. versionadded:: 2.0 + + """ + + type_compiler: Any + """legacy; this is a TypeCompiler class at the class level, a + TypeCompiler instance at the instance level. + + Refer to type_compiler_instance instead. + + """ + + preparer: Type[IdentifierPreparer] + """a :class:`.IdentifierPreparer` class used to + quote identifiers. + """ + + identifier_preparer: IdentifierPreparer + """This element will refer to an instance of :class:`.IdentifierPreparer` + once a :class:`.DefaultDialect` has been constructed. + + """ + + server_version_info: Optional[Tuple[Any, ...]] + """a tuple containing a version number for the DB backend in use. + + This value is only available for supporting dialects, and is + typically populated during the initial connection to the database. + """ + + default_schema_name: Optional[str] + """the name of the default schema. This value is only available for + supporting dialects, and is typically populated during the + initial connection to the database. + + """ + + # NOTE: this does not take into effect engine-level isolation level. + # not clear if this should be changed, seems like it should + default_isolation_level: Optional[IsolationLevel] + """the isolation that is implicitly present on new connections""" + + # create_engine() -> isolation_level currently goes here + _on_connect_isolation_level: Optional[IsolationLevel] + + execution_ctx_cls: Type[ExecutionContext] + """a :class:`.ExecutionContext` class used to handle statement execution""" + + execute_sequence_format: Union[ + Type[Tuple[Any, ...]], Type[Tuple[List[Any]]] + ] + """either the 'tuple' or 'list' type, depending on what cursor.execute() + accepts for the second argument (they vary).""" + + supports_alter: bool + """``True`` if the database supports ``ALTER TABLE`` - used only for + generating foreign key constraints in certain circumstances + """ + + max_identifier_length: int + """The maximum length of identifier names.""" + max_index_name_length: Optional[int] + """The maximum length of index names if different from + ``max_identifier_length``.""" + max_constraint_name_length: Optional[int] + """The maximum length of constraint names if different from + ``max_identifier_length``.""" + + supports_server_side_cursors: bool + """indicates if the dialect supports server side cursors""" + + server_side_cursors: bool + """deprecated; indicates if the dialect should attempt to use server + side cursors by default""" + + supports_sane_rowcount: bool + """Indicate whether the dialect properly implements rowcount for + ``UPDATE`` and ``DELETE`` statements. + """ + + supports_sane_multi_rowcount: bool + """Indicate whether the dialect properly implements rowcount for + ``UPDATE`` and ``DELETE`` statements when executed via + executemany. + """ + + supports_empty_insert: bool + """dialect supports INSERT () VALUES (), i.e. a plain INSERT with no + columns in it. + + This is not usually supported; an "empty" insert is typically + suited using either "INSERT..DEFAULT VALUES" or + "INSERT ... (col) VALUES (DEFAULT)". + + """ + + supports_default_values: bool + """dialect supports INSERT... DEFAULT VALUES syntax""" + + supports_default_metavalue: bool + """dialect supports INSERT...(col) VALUES (DEFAULT) syntax. + + Most databases support this in some way, e.g. SQLite supports it using + ``VALUES (NULL)``. MS SQL Server supports the syntax also however + is the only included dialect where we have this disabled, as + MSSQL does not support the field for the IDENTITY column, which is + usually where we like to make use of the feature. + + """ + + default_metavalue_token: str = "DEFAULT" + """for INSERT... VALUES (DEFAULT) syntax, the token to put in the + parenthesis. + + E.g. for SQLite this is the keyword "NULL". + + """ + + supports_multivalues_insert: bool + """Target database supports INSERT...VALUES with multiple value + sets, i.e. INSERT INTO table (cols) VALUES (...), (...), (...), ... + + """ + + insert_executemany_returning: bool + """dialect / driver / database supports some means of providing + INSERT...RETURNING support when dialect.do_executemany() is used. + + """ + + insert_executemany_returning_sort_by_parameter_order: bool + """dialect / driver / database supports some means of providing + INSERT...RETURNING support when dialect.do_executemany() is used + along with the :paramref:`_dml.Insert.returning.sort_by_parameter_order` + parameter being set. + + """ + + update_executemany_returning: bool + """dialect supports UPDATE..RETURNING with executemany.""" + + delete_executemany_returning: bool + """dialect supports DELETE..RETURNING with executemany.""" + + use_insertmanyvalues: bool + """if True, indicates "insertmanyvalues" functionality should be used + to allow for ``insert_executemany_returning`` behavior, if possible. + + In practice, setting this to True means: + + if ``supports_multivalues_insert``, ``insert_returning`` and + ``use_insertmanyvalues`` are all True, the SQL compiler will produce + an INSERT that will be interpreted by the :class:`.DefaultDialect` + as an :attr:`.ExecuteStyle.INSERTMANYVALUES` execution that allows + for INSERT of many rows with RETURNING by rewriting a single-row + INSERT statement to have multiple VALUES clauses, also executing + the statement multiple times for a series of batches when large numbers + of rows are given. + + The parameter is False for the default dialect, and is set to True for + SQLAlchemy internal dialects SQLite, MySQL/MariaDB, PostgreSQL, SQL Server. + It remains at False for Oracle Database, which provides native "executemany + with RETURNING" support and also does not support + ``supports_multivalues_insert``. For MySQL/MariaDB, those MySQL dialects + that don't support RETURNING will not report + ``insert_executemany_returning`` as True. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + """ + + use_insertmanyvalues_wo_returning: bool + """if True, and use_insertmanyvalues is also True, INSERT statements + that don't include RETURNING will also use "insertmanyvalues". + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`engine_insertmanyvalues` + + """ + + insertmanyvalues_implicit_sentinel: InsertmanyvaluesSentinelOpts + """Options indicating the database supports a form of bulk INSERT where + the autoincrement integer primary key can be reliably used as an ordering + for INSERTed rows. + + .. versionadded:: 2.0.10 + + .. seealso:: + + :ref:`engine_insertmanyvalues_returning_order` + + """ + + insertmanyvalues_page_size: int + """Number of rows to render into an individual INSERT..VALUES() statement + for :attr:`.ExecuteStyle.INSERTMANYVALUES` executions. + + The default dialect defaults this to 1000. + + .. versionadded:: 2.0 + + .. seealso:: + + :paramref:`_engine.Connection.execution_options.insertmanyvalues_page_size` - + execution option available on :class:`_engine.Connection`, statements + + """ # noqa: E501 + + insertmanyvalues_max_parameters: int + """Alternate to insertmanyvalues_page_size, will additionally limit + page size based on number of parameters total in the statement. + + + """ + + preexecute_autoincrement_sequences: bool + """True if 'implicit' primary key functions must be executed separately + in order to get their value, if RETURNING is not used. + + This is currently oriented towards PostgreSQL when the + ``implicit_returning=False`` parameter is used on a :class:`.Table` + object. + + """ + + insert_returning: bool + """if the dialect supports RETURNING with INSERT + + .. versionadded:: 2.0 + + """ + + update_returning: bool + """if the dialect supports RETURNING with UPDATE + + .. versionadded:: 2.0 + + """ + + update_returning_multifrom: bool + """if the dialect supports RETURNING with UPDATE..FROM + + .. versionadded:: 2.0 + + """ + + delete_returning: bool + """if the dialect supports RETURNING with DELETE + + .. versionadded:: 2.0 + + """ + + delete_returning_multifrom: bool + """if the dialect supports RETURNING with DELETE..FROM + + .. versionadded:: 2.0 + + """ + + favor_returning_over_lastrowid: bool + """for backends that support both a lastrowid and a RETURNING insert + strategy, favor RETURNING for simple single-int pk inserts. + + cursor.lastrowid tends to be more performant on most backends. + + """ + + supports_identity_columns: bool + """target database supports IDENTITY""" + + cte_follows_insert: bool + """target database, when given a CTE with an INSERT statement, needs + the CTE to be below the INSERT""" + + colspecs: MutableMapping[Type[TypeEngine[Any]], Type[TypeEngine[Any]]] + """A dictionary of TypeEngine classes from sqlalchemy.types mapped + to subclasses that are specific to the dialect class. This + dictionary is class-level only and is not accessed from the + dialect instance itself. + """ + + supports_sequences: bool + """Indicates if the dialect supports CREATE SEQUENCE or similar.""" + + sequences_optional: bool + """If True, indicates if the :paramref:`_schema.Sequence.optional` + parameter on the :class:`_schema.Sequence` construct + should signal to not generate a CREATE SEQUENCE. Applies only to + dialects that support sequences. Currently used only to allow PostgreSQL + SERIAL to be used on a column that specifies Sequence() for usage on + other backends. + """ + + default_sequence_base: int + """the default value that will be rendered as the "START WITH" portion of + a CREATE SEQUENCE DDL statement. + + """ + + supports_native_enum: bool + """Indicates if the dialect supports a native ENUM construct. + This will prevent :class:`_types.Enum` from generating a CHECK + constraint when that type is used in "native" mode. + """ + + supports_native_boolean: bool + """Indicates if the dialect supports a native boolean construct. + This will prevent :class:`_types.Boolean` from generating a CHECK + constraint when that type is used. + """ + + supports_native_decimal: bool + """indicates if Decimal objects are handled and returned for precision + numeric types, or if floats are returned""" + + supports_native_uuid: bool + """indicates if Python UUID() objects are handled natively by the + driver for SQL UUID datatypes. + + .. versionadded:: 2.0 + + """ + + returns_native_bytes: bool + """indicates if Python bytes() objects are returned natively by the + driver for SQL "binary" datatypes. + + .. versionadded:: 2.0.11 + + """ + + construct_arguments: Optional[ + List[Tuple[Type[Union[SchemaItem, ClauseElement]], Mapping[str, Any]]] + ] = None + """Optional set of argument specifiers for various SQLAlchemy + constructs, typically schema items. + + To implement, establish as a series of tuples, as in:: + + construct_arguments = [ + (schema.Index, {"using": False, "where": None, "ops": None}), + ] + + If the above construct is established on the PostgreSQL dialect, + the :class:`.Index` construct will now accept the keyword arguments + ``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``. + Any other argument specified to the constructor of :class:`.Index` + which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`. + + A dialect which does not include a ``construct_arguments`` member will + not participate in the argument validation system. For such a dialect, + any argument name is accepted by all participating constructs, within + the namespace of arguments prefixed with that dialect name. The rationale + here is so that third-party dialects that haven't yet implemented this + feature continue to function in the old way. + + .. seealso:: + + :class:`.DialectKWArgs` - implementing base class which consumes + :attr:`.DefaultDialect.construct_arguments` + + + """ + + reflection_options: Sequence[str] = () + """Sequence of string names indicating keyword arguments that can be + established on a :class:`.Table` object which will be passed as + "reflection options" when using :paramref:`.Table.autoload_with`. + + Current example is "oracle_resolve_synonyms" in the Oracle Database + dialects. + + """ + + dbapi_exception_translation_map: Mapping[str, str] = util.EMPTY_DICT + """A dictionary of names that will contain as values the names of + pep-249 exceptions ("IntegrityError", "OperationalError", etc) + keyed to alternate class names, to support the case where a + DBAPI has exception classes that aren't named as they are + referred to (e.g. IntegrityError = MyException). In the vast + majority of cases this dictionary is empty. + """ + + supports_comments: bool + """Indicates the dialect supports comment DDL on tables and columns.""" + + inline_comments: bool + """Indicates the dialect supports comment DDL that's inline with the + definition of a Table or Column. If False, this implies that ALTER must + be used to set table and column comments.""" + + supports_constraint_comments: bool + """Indicates if the dialect supports comment DDL on constraints. + + .. versionadded:: 2.0 + """ + + _has_events = False + + supports_statement_cache: bool = True + """indicates if this dialect supports caching. + + All dialects that are compatible with statement caching should set this + flag to True directly on each dialect class and subclass that supports + it. SQLAlchemy tests that this flag is locally present on each dialect + subclass before it will use statement caching. This is to provide + safety for legacy or new dialects that are not yet fully tested to be + compliant with SQL statement caching. + + .. versionadded:: 1.4.5 + + .. seealso:: + + :ref:`engine_thirdparty_caching` + + """ + + _supports_statement_cache: bool + """internal evaluation for supports_statement_cache""" + + bind_typing = BindTyping.NONE + """define a means of passing typing information to the database and/or + driver for bound parameters. + + See :class:`.BindTyping` for values. + + .. versionadded:: 2.0 + + """ + + is_async: bool + """Whether or not this dialect is intended for asyncio use.""" + + has_terminate: bool + """Whether or not this dialect has a separate "terminate" implementation + that does not block or require awaiting.""" + + engine_config_types: Mapping[str, Any] + """a mapping of string keys that can be in an engine config linked to + type conversion functions. + + """ + + label_length: Optional[int] + """optional user-defined max length for SQL labels""" + + include_set_input_sizes: Optional[Set[Any]] + """set of DBAPI type objects that should be included in + automatic cursor.setinputsizes() calls. + + This is only used if bind_typing is BindTyping.SET_INPUT_SIZES + + """ + + exclude_set_input_sizes: Optional[Set[Any]] + """set of DBAPI type objects that should be excluded in + automatic cursor.setinputsizes() calls. + + This is only used if bind_typing is BindTyping.SET_INPUT_SIZES + + """ + + supports_simple_order_by_label: bool + """target database supports ORDER BY , where + refers to a label in the columns clause of the SELECT""" + + div_is_floordiv: bool + """target database treats the / division operator as "floor division" """ + + tuple_in_values: bool + """target database supports tuple IN, i.e. (x, y) IN ((q, p), (r, z))""" + + _bind_typing_render_casts: bool + + _type_memos: MutableMapping[TypeEngine[Any], _TypeMemoDict] + + def _builtin_onconnect(self) -> Optional[_ListenerFnType]: + raise NotImplementedError() + + def create_connect_args(self, url: URL) -> ConnectArgsType: + """Build DB-API compatible connection arguments. + + Given a :class:`.URL` object, returns a tuple + consisting of a ``(*args, **kwargs)`` suitable to send directly + to the dbapi's connect function. The arguments are sent to the + :meth:`.Dialect.connect` method which then runs the DBAPI-level + ``connect()`` function. + + The method typically makes use of the + :meth:`.URL.translate_connect_args` + method in order to generate a dictionary of options. + + The default implementation is:: + + def create_connect_args(self, url): + opts = url.translate_connect_args() + opts.update(url.query) + return ([], opts) + + :param url: a :class:`.URL` object + + :return: a tuple of ``(*args, **kwargs)`` which will be passed to the + :meth:`.Dialect.connect` method. + + .. seealso:: + + :meth:`.URL.translate_connect_args` + + """ + + raise NotImplementedError() + + @classmethod + def import_dbapi(cls) -> ModuleType: + """Import the DBAPI module that is used by this dialect. + + The Python module object returned here will be assigned as an + instance variable to a constructed dialect under the name + ``.dbapi``. + + .. versionchanged:: 2.0 The :meth:`.Dialect.import_dbapi` class + method is renamed from the previous method ``.Dialect.dbapi()``, + which would be replaced at dialect instantiation time by the + DBAPI module itself, thus using the same name in two different ways. + If a ``.Dialect.dbapi()`` classmethod is present on a third-party + dialect, it will be used and a deprecation warning will be emitted. + + """ + raise NotImplementedError() + + def type_descriptor(self, typeobj: TypeEngine[_T]) -> TypeEngine[_T]: + """Transform a generic type to a dialect-specific type. + + Dialect classes will usually use the + :func:`_types.adapt_type` function in the types module to + accomplish this. + + The returned result is cached *per dialect class* so can + contain no dialect-instance state. + + """ + + raise NotImplementedError() + + def initialize(self, connection: Connection) -> None: + """Called during strategized creation of the dialect with a + connection. + + Allows dialects to configure options based on server version info or + other properties. + + The connection passed here is a SQLAlchemy Connection object, + with full capabilities. + + The initialize() method of the base dialect should be called via + super(). + + .. note:: as of SQLAlchemy 1.4, this method is called **before** + any :meth:`_engine.Dialect.on_connect` hooks are called. + + """ + + if TYPE_CHECKING: + + def _overrides_default(self, method_name: str) -> bool: ... + + def get_columns( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> List[ReflectedColumn]: + """Return information about columns in ``table_name``. + + Given a :class:`_engine.Connection`, a string + ``table_name``, and an optional string ``schema``, return column + information as a list of dictionaries + corresponding to the :class:`.ReflectedColumn` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_columns`. + + """ + + raise NotImplementedError() + + def get_multi_columns( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, List[ReflectedColumn]]]: + """Return information about columns in all tables in the + given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_multi_columns`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_pk_constraint( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> ReflectedPrimaryKeyConstraint: + """Return information about the primary key constraint on + table_name`. + + Given a :class:`_engine.Connection`, a string + ``table_name``, and an optional string ``schema``, return primary + key information as a dictionary corresponding to the + :class:`.ReflectedPrimaryKeyConstraint` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_pk_constraint`. + + """ + raise NotImplementedError() + + def get_multi_pk_constraint( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, ReflectedPrimaryKeyConstraint]]: + """Return information about primary key constraints in + all tables in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_multi_pk_constraint`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + raise NotImplementedError() + + def get_foreign_keys( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> List[ReflectedForeignKeyConstraint]: + """Return information about foreign_keys in ``table_name``. + + Given a :class:`_engine.Connection`, a string + ``table_name``, and an optional string ``schema``, return foreign + key information as a list of dicts corresponding to the + :class:`.ReflectedForeignKeyConstraint` dictionary. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_foreign_keys`. + """ + + raise NotImplementedError() + + def get_multi_foreign_keys( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, List[ReflectedForeignKeyConstraint]]]: + """Return information about foreign_keys in all tables + in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_multi_foreign_keys`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_table_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of table names for ``schema``. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_table_names`. + + """ + + raise NotImplementedError() + + def get_temp_table_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of temporary table names on the given connection, + if supported by the underlying backend. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_temp_table_names`. + + """ + + raise NotImplementedError() + + def get_view_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of all non-materialized view names available in the + database. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_view_names`. + + :param schema: schema name to query, if not the default schema. + + """ + + raise NotImplementedError() + + def get_materialized_view_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of all materialized view names available in the + database. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_materialized_view_names`. + + :param schema: schema name to query, if not the default schema. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_sequence_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of all sequence names available in the database. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_sequence_names`. + + :param schema: schema name to query, if not the default schema. + + .. versionadded:: 1.4 + """ + + raise NotImplementedError() + + def get_temp_view_names( + self, connection: Connection, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + """Return a list of temporary view names on the given connection, + if supported by the underlying backend. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_temp_view_names`. + + """ + + raise NotImplementedError() + + def get_schema_names(self, connection: Connection, **kw: Any) -> List[str]: + """Return a list of all schema names available in the database. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_schema_names`. + """ + raise NotImplementedError() + + def get_view_definition( + self, + connection: Connection, + view_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> str: + """Return plain or materialized view definition. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_view_definition`. + + Given a :class:`_engine.Connection`, a string + ``view_name``, and an optional string ``schema``, return the view + definition. + """ + + raise NotImplementedError() + + def get_indexes( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> List[ReflectedIndex]: + """Return information about indexes in ``table_name``. + + Given a :class:`_engine.Connection`, a string + ``table_name`` and an optional string ``schema``, return index + information as a list of dictionaries corresponding to the + :class:`.ReflectedIndex` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_indexes`. + """ + + raise NotImplementedError() + + def get_multi_indexes( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, List[ReflectedIndex]]]: + """Return information about indexes in in all tables + in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_multi_indexes`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_unique_constraints( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> List[ReflectedUniqueConstraint]: + r"""Return information about unique constraints in ``table_name``. + + Given a string ``table_name`` and an optional string ``schema``, return + unique constraint information as a list of dicts corresponding + to the :class:`.ReflectedUniqueConstraint` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_unique_constraints`. + """ + + raise NotImplementedError() + + def get_multi_unique_constraints( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, List[ReflectedUniqueConstraint]]]: + """Return information about unique constraints in all tables + in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_multi_unique_constraints`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_check_constraints( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> List[ReflectedCheckConstraint]: + r"""Return information about check constraints in ``table_name``. + + Given a string ``table_name`` and an optional string ``schema``, return + check constraint information as a list of dicts corresponding + to the :class:`.ReflectedCheckConstraint` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_check_constraints`. + + """ + + raise NotImplementedError() + + def get_multi_check_constraints( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, List[ReflectedCheckConstraint]]]: + """Return information about check constraints in all tables + in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_multi_check_constraints`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def get_table_options( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> Dict[str, Any]: + """Return a dictionary of options specified when ``table_name`` + was created. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_table_options`. + """ + raise NotImplementedError() + + def get_multi_table_options( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, Dict[str, Any]]]: + """Return a dictionary of options specified when the tables in the + given schema were created. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_multi_table_options`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + raise NotImplementedError() + + def get_table_comment( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> ReflectedTableComment: + r"""Return the "comment" for the table identified by ``table_name``. + + Given a string ``table_name`` and an optional string ``schema``, return + table comment information as a dictionary corresponding to the + :class:`.ReflectedTableComment` dictionary. + + This is an internal dialect method. Applications should use + :meth:`.Inspector.get_table_comment`. + + :raise: ``NotImplementedError`` for dialects that don't support + comments. + + .. versionadded:: 1.2 + + """ + + raise NotImplementedError() + + def get_multi_table_comment( + self, + connection: Connection, + *, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + **kw: Any, + ) -> Iterable[Tuple[TableKey, ReflectedTableComment]]: + """Return information about the table comment in all tables + in the given ``schema``. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.get_multi_table_comment`. + + .. note:: The :class:`_engine.DefaultDialect` provides a default + implementation that will call the single table method for + each object returned by :meth:`Dialect.get_table_names`, + :meth:`Dialect.get_view_names` or + :meth:`Dialect.get_materialized_view_names` depending on the + provided ``kind``. Dialects that want to support a faster + implementation should implement this method. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def normalize_name(self, name: str) -> str: + """convert the given name to lowercase if it is detected as + case insensitive. + + This method is only used if the dialect defines + requires_name_normalize=True. + + """ + raise NotImplementedError() + + def denormalize_name(self, name: str) -> str: + """convert the given name to a case insensitive identifier + for the backend if it is an all-lowercase name. + + This method is only used if the dialect defines + requires_name_normalize=True. + + """ + raise NotImplementedError() + + def has_table( + self, + connection: Connection, + table_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> bool: + """For internal dialect use, check the existence of a particular table + or view in the database. + + Given a :class:`_engine.Connection` object, a string table_name and + optional schema name, return True if the given table exists in the + database, False otherwise. + + This method serves as the underlying implementation of the + public facing :meth:`.Inspector.has_table` method, and is also used + internally to implement the "checkfirst" behavior for methods like + :meth:`_schema.Table.create` and :meth:`_schema.MetaData.create_all`. + + .. note:: This method is used internally by SQLAlchemy, and is + published so that third-party dialects may provide an + implementation. It is **not** the public API for checking for table + presence. Please use the :meth:`.Inspector.has_table` method. + + .. versionchanged:: 2.0:: :meth:`_engine.Dialect.has_table` now + formally supports checking for additional table-like objects: + + * any type of views (plain or materialized) + * temporary tables of any kind + + Previously, these two checks were not formally specified and + different dialects would vary in their behavior. The dialect + testing suite now includes tests for all of these object types, + and dialects to the degree that the backing database supports views + or temporary tables should seek to support locating these objects + for full compliance. + + """ + + raise NotImplementedError() + + def has_index( + self, + connection: Connection, + table_name: str, + index_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> bool: + """Check the existence of a particular index name in the database. + + Given a :class:`_engine.Connection` object, a string + ``table_name`` and string index name, return ``True`` if an index of + the given name on the given table exists, ``False`` otherwise. + + The :class:`.DefaultDialect` implements this in terms of the + :meth:`.Dialect.has_table` and :meth:`.Dialect.get_indexes` methods, + however dialects can implement a more performant version. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.has_index`. + + .. versionadded:: 1.4 + + """ + + raise NotImplementedError() + + def has_sequence( + self, + connection: Connection, + sequence_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> bool: + """Check the existence of a particular sequence in the database. + + Given a :class:`_engine.Connection` object and a string + `sequence_name`, return ``True`` if the given sequence exists in + the database, ``False`` otherwise. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.has_sequence`. + """ + + raise NotImplementedError() + + def has_schema( + self, connection: Connection, schema_name: str, **kw: Any + ) -> bool: + """Check the existence of a particular schema name in the database. + + Given a :class:`_engine.Connection` object, a string + ``schema_name``, return ``True`` if a schema of the + given exists, ``False`` otherwise. + + The :class:`.DefaultDialect` implements this by checking + the presence of ``schema_name`` among the schemas returned by + :meth:`.Dialect.get_schema_names`, + however dialects can implement a more performant version. + + This is an internal dialect method. Applications should use + :meth:`_engine.Inspector.has_schema`. + + .. versionadded:: 2.0 + + """ + + raise NotImplementedError() + + def _get_server_version_info(self, connection: Connection) -> Any: + """Retrieve the server version info from the given connection. + + This is used by the default implementation to populate the + "server_version_info" attribute and is called exactly + once upon first connect. + + """ + + raise NotImplementedError() + + def _get_default_schema_name(self, connection: Connection) -> str: + """Return the string name of the currently selected schema from + the given connection. + + This is used by the default implementation to populate the + "default_schema_name" attribute and is called exactly + once upon first connect. + + """ + + raise NotImplementedError() + + def do_begin(self, dbapi_connection: PoolProxiedConnection) -> None: + """Provide an implementation of ``connection.begin()``, given a + DB-API connection. + + The DBAPI has no dedicated "begin" method and it is expected + that transactions are implicit. This hook is provided for those + DBAPIs that might need additional help in this area. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_rollback(self, dbapi_connection: PoolProxiedConnection) -> None: + """Provide an implementation of ``connection.rollback()``, given + a DB-API connection. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_commit(self, dbapi_connection: PoolProxiedConnection) -> None: + """Provide an implementation of ``connection.commit()``, given a + DB-API connection. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_terminate(self, dbapi_connection: DBAPIConnection) -> None: + """Provide an implementation of ``connection.close()`` that tries as + much as possible to not block, given a DBAPI + connection. + + In the vast majority of cases this just calls .close(), however + for some asyncio dialects may call upon different API features. + + This hook is called by the :class:`_pool.Pool` + when a connection is being recycled or has been invalidated. + + .. versionadded:: 1.4.41 + + """ + + raise NotImplementedError() + + def do_close(self, dbapi_connection: DBAPIConnection) -> None: + """Provide an implementation of ``connection.close()``, given a DBAPI + connection. + + This hook is called by the :class:`_pool.Pool` + when a connection has been + detached from the pool, or is being returned beyond the normal + capacity of the pool. + + """ + + raise NotImplementedError() + + def _do_ping_w_event(self, dbapi_connection: DBAPIConnection) -> bool: + raise NotImplementedError() + + def do_ping(self, dbapi_connection: DBAPIConnection) -> bool: + """ping the DBAPI connection and return True if the connection is + usable.""" + raise NotImplementedError() + + def do_set_input_sizes( + self, + cursor: DBAPICursor, + list_of_tuples: _GenericSetInputSizesType, + context: ExecutionContext, + ) -> Any: + """invoke the cursor.setinputsizes() method with appropriate arguments + + This hook is called if the :attr:`.Dialect.bind_typing` attribute is + set to the + :attr:`.BindTyping.SETINPUTSIZES` value. + Parameter data is passed in a list of tuples (paramname, dbtype, + sqltype), where ``paramname`` is the key of the parameter in the + statement, ``dbtype`` is the DBAPI datatype and ``sqltype`` is the + SQLAlchemy type. The order of tuples is in the correct parameter order. + + .. versionadded:: 1.4 + + .. versionchanged:: 2.0 - setinputsizes mode is now enabled by + setting :attr:`.Dialect.bind_typing` to + :attr:`.BindTyping.SETINPUTSIZES`. Dialects which accept + a ``use_setinputsizes`` parameter should set this value + appropriately. + + + """ + raise NotImplementedError() + + def create_xid(self) -> Any: + """Create a two-phase transaction ID. + + This id will be passed to do_begin_twophase(), + do_rollback_twophase(), do_commit_twophase(). Its format is + unspecified. + """ + + raise NotImplementedError() + + def do_savepoint(self, connection: Connection, name: str) -> None: + """Create a savepoint with the given name. + + :param connection: a :class:`_engine.Connection`. + :param name: savepoint name. + + """ + + raise NotImplementedError() + + def do_rollback_to_savepoint( + self, connection: Connection, name: str + ) -> None: + """Rollback a connection to the named savepoint. + + :param connection: a :class:`_engine.Connection`. + :param name: savepoint name. + + """ + + raise NotImplementedError() + + def do_release_savepoint(self, connection: Connection, name: str) -> None: + """Release the named savepoint on a connection. + + :param connection: a :class:`_engine.Connection`. + :param name: savepoint name. + """ + + raise NotImplementedError() + + def do_begin_twophase(self, connection: Connection, xid: Any) -> None: + """Begin a two phase transaction on the given connection. + + :param connection: a :class:`_engine.Connection`. + :param xid: xid + + """ + + raise NotImplementedError() + + def do_prepare_twophase(self, connection: Connection, xid: Any) -> None: + """Prepare a two phase transaction on the given connection. + + :param connection: a :class:`_engine.Connection`. + :param xid: xid + + """ + + raise NotImplementedError() + + def do_rollback_twophase( + self, + connection: Connection, + xid: Any, + is_prepared: bool = True, + recover: bool = False, + ) -> None: + """Rollback a two phase transaction on the given connection. + + :param connection: a :class:`_engine.Connection`. + :param xid: xid + :param is_prepared: whether or not + :meth:`.TwoPhaseTransaction.prepare` was called. + :param recover: if the recover flag was passed. + + """ + + raise NotImplementedError() + + def do_commit_twophase( + self, + connection: Connection, + xid: Any, + is_prepared: bool = True, + recover: bool = False, + ) -> None: + """Commit a two phase transaction on the given connection. + + + :param connection: a :class:`_engine.Connection`. + :param xid: xid + :param is_prepared: whether or not + :meth:`.TwoPhaseTransaction.prepare` was called. + :param recover: if the recover flag was passed. + + """ + + raise NotImplementedError() + + def do_recover_twophase(self, connection: Connection) -> List[Any]: + """Recover list of uncommitted prepared two phase transaction + identifiers on the given connection. + + :param connection: a :class:`_engine.Connection`. + + """ + + raise NotImplementedError() + + def _deliver_insertmanyvalues_batches( + self, + connection: Connection, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIMultiExecuteParams, + generic_setinputsizes: Optional[_GenericSetInputSizesType], + context: ExecutionContext, + ) -> Iterator[_InsertManyValuesBatch]: + """convert executemany parameters for an INSERT into an iterator + of statement/single execute values, used by the insertmanyvalues + feature. + + """ + raise NotImplementedError() + + def do_executemany( + self, + cursor: DBAPICursor, + statement: str, + parameters: _DBAPIMultiExecuteParams, + context: Optional[ExecutionContext] = None, + ) -> None: + """Provide an implementation of ``cursor.executemany(statement, + parameters)``.""" + + raise NotImplementedError() + + def do_execute( + self, + cursor: DBAPICursor, + statement: str, + parameters: Optional[_DBAPISingleExecuteParams], + context: Optional[ExecutionContext] = None, + ) -> None: + """Provide an implementation of ``cursor.execute(statement, + parameters)``.""" + + raise NotImplementedError() + + def do_execute_no_params( + self, + cursor: DBAPICursor, + statement: str, + context: Optional[ExecutionContext] = None, + ) -> None: + """Provide an implementation of ``cursor.execute(statement)``. + + The parameter collection should not be sent. + + """ + + raise NotImplementedError() + + def is_disconnect( + self, + e: Exception, + connection: Optional[Union[PoolProxiedConnection, DBAPIConnection]], + cursor: Optional[DBAPICursor], + ) -> bool: + """Return True if the given DB-API error indicates an invalid + connection""" + + raise NotImplementedError() + + def connect(self, *cargs: Any, **cparams: Any) -> DBAPIConnection: + r"""Establish a connection using this dialect's DBAPI. + + The default implementation of this method is:: + + def connect(self, *cargs, **cparams): + return self.dbapi.connect(*cargs, **cparams) + + The ``*cargs, **cparams`` parameters are generated directly + from this dialect's :meth:`.Dialect.create_connect_args` method. + + This method may be used for dialects that need to perform programmatic + per-connection steps when a new connection is procured from the + DBAPI. + + + :param \*cargs: positional parameters returned from the + :meth:`.Dialect.create_connect_args` method + + :param \*\*cparams: keyword parameters returned from the + :meth:`.Dialect.create_connect_args` method. + + :return: a DBAPI connection, typically from the :pep:`249` module + level ``.connect()`` function. + + .. seealso:: + + :meth:`.Dialect.create_connect_args` + + :meth:`.Dialect.on_connect` + + """ + raise NotImplementedError() + + def on_connect_url(self, url: URL) -> Optional[Callable[[Any], Any]]: + """return a callable which sets up a newly created DBAPI connection. + + This method is a new hook that supersedes the + :meth:`_engine.Dialect.on_connect` method when implemented by a + dialect. When not implemented by a dialect, it invokes the + :meth:`_engine.Dialect.on_connect` method directly to maintain + compatibility with existing dialects. There is no deprecation + for :meth:`_engine.Dialect.on_connect` expected. + + The callable should accept a single argument "conn" which is the + DBAPI connection itself. The inner callable has no + return value. + + E.g.:: + + class MyDialect(default.DefaultDialect): + # ... + + def on_connect_url(self, url): + def do_on_connect(connection): + connection.execute("SET SPECIAL FLAGS etc") + + return do_on_connect + + This is used to set dialect-wide per-connection options such as + isolation modes, Unicode modes, etc. + + This method differs from :meth:`_engine.Dialect.on_connect` in that + it is passed the :class:`_engine.URL` object that's relevant to the + connect args. Normally the only way to get this is from the + :meth:`_engine.Dialect.on_connect` hook is to look on the + :class:`_engine.Engine` itself, however this URL object may have been + replaced by plugins. + + .. note:: + + The default implementation of + :meth:`_engine.Dialect.on_connect_url` is to invoke the + :meth:`_engine.Dialect.on_connect` method. Therefore if a dialect + implements this method, the :meth:`_engine.Dialect.on_connect` + method **will not be called** unless the overriding dialect calls + it directly from here. + + .. versionadded:: 1.4.3 added :meth:`_engine.Dialect.on_connect_url` + which normally calls into :meth:`_engine.Dialect.on_connect`. + + :param url: a :class:`_engine.URL` object representing the + :class:`_engine.URL` that was passed to the + :meth:`_engine.Dialect.create_connect_args` method. + + :return: a callable that accepts a single DBAPI connection as an + argument, or None. + + .. seealso:: + + :meth:`_engine.Dialect.on_connect` + + """ + return self.on_connect() + + def on_connect(self) -> Optional[Callable[[Any], Any]]: + """return a callable which sets up a newly created DBAPI connection. + + The callable should accept a single argument "conn" which is the + DBAPI connection itself. The inner callable has no + return value. + + E.g.:: + + class MyDialect(default.DefaultDialect): + # ... + + def on_connect(self): + def do_on_connect(connection): + connection.execute("SET SPECIAL FLAGS etc") + + return do_on_connect + + This is used to set dialect-wide per-connection options such as + isolation modes, Unicode modes, etc. + + The "do_on_connect" callable is invoked by using the + :meth:`_events.PoolEvents.connect` event + hook, then unwrapping the DBAPI connection and passing it into the + callable. + + .. versionchanged:: 1.4 the on_connect hook is no longer called twice + for the first connection of a dialect. The on_connect hook is still + called before the :meth:`_engine.Dialect.initialize` method however. + + .. versionchanged:: 1.4.3 the on_connect hook is invoked from a new + method on_connect_url that passes the URL that was used to create + the connect args. Dialects can implement on_connect_url instead + of on_connect if they need the URL object that was used for the + connection in order to get additional context. + + If None is returned, no event listener is generated. + + :return: a callable that accepts a single DBAPI connection as an + argument, or None. + + .. seealso:: + + :meth:`.Dialect.connect` - allows the DBAPI ``connect()`` sequence + itself to be controlled. + + :meth:`.Dialect.on_connect_url` - supersedes + :meth:`.Dialect.on_connect` to also receive the + :class:`_engine.URL` object in context. + + """ + return None + + def reset_isolation_level(self, dbapi_connection: DBAPIConnection) -> None: + """Given a DBAPI connection, revert its isolation to the default. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`_engine.Connection` and + :class:`_engine.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + .. seealso:: + + :meth:`_engine.Connection.get_isolation_level` + - view current level + + :attr:`_engine.Connection.default_isolation_level` + - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`_engine.Connection` isolation level + + :paramref:`_sa.create_engine.isolation_level` - + set per :class:`_engine.Engine` isolation level + + """ + + raise NotImplementedError() + + def set_isolation_level( + self, dbapi_connection: DBAPIConnection, level: IsolationLevel + ) -> None: + """Given a DBAPI connection, set its isolation level. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`_engine.Connection` and + :class:`_engine.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + If the dialect also implements the + :meth:`.Dialect.get_isolation_level_values` method, then the given + level is guaranteed to be one of the string names within that sequence, + and the method will not need to anticipate a lookup failure. + + .. seealso:: + + :meth:`_engine.Connection.get_isolation_level` + - view current level + + :attr:`_engine.Connection.default_isolation_level` + - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`_engine.Connection` isolation level + + :paramref:`_sa.create_engine.isolation_level` - + set per :class:`_engine.Engine` isolation level + + """ + + raise NotImplementedError() + + def get_isolation_level( + self, dbapi_connection: DBAPIConnection + ) -> IsolationLevel: + """Given a DBAPI connection, return its isolation level. + + When working with a :class:`_engine.Connection` object, + the corresponding + DBAPI connection may be procured using the + :attr:`_engine.Connection.connection` accessor. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`_engine.Connection` and + :class:`_engine.Engine` isolation level facilities; + these APIs should be preferred for most typical use cases. + + + .. seealso:: + + :meth:`_engine.Connection.get_isolation_level` + - view current level + + :attr:`_engine.Connection.default_isolation_level` + - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`_engine.Connection` isolation level + + :paramref:`_sa.create_engine.isolation_level` - + set per :class:`_engine.Engine` isolation level + + + """ + + raise NotImplementedError() + + def get_default_isolation_level( + self, dbapi_conn: DBAPIConnection + ) -> IsolationLevel: + """Given a DBAPI connection, return its isolation level, or + a default isolation level if one cannot be retrieved. + + This method may only raise NotImplementedError and + **must not raise any other exception**, as it is used implicitly upon + first connect. + + The method **must return a value** for a dialect that supports + isolation level settings, as this level is what will be reverted + towards when a per-connection isolation level change is made. + + The method defaults to using the :meth:`.Dialect.get_isolation_level` + method unless overridden by a dialect. + + .. versionadded:: 1.3.22 + + """ + raise NotImplementedError() + + def get_isolation_level_values( + self, dbapi_conn: DBAPIConnection + ) -> Sequence[IsolationLevel]: + """return a sequence of string isolation level names that are accepted + by this dialect. + + The available names should use the following conventions: + + * use UPPERCASE names. isolation level methods will accept lowercase + names but these are normalized into UPPERCASE before being passed + along to the dialect. + * separate words should be separated by spaces, not underscores, e.g. + ``REPEATABLE READ``. isolation level names will have underscores + converted to spaces before being passed along to the dialect. + * The names for the four standard isolation names to the extent that + they are supported by the backend should be ``READ UNCOMMITTED``, + ``READ COMMITTED``, ``REPEATABLE READ``, ``SERIALIZABLE`` + * if the dialect supports an autocommit option it should be provided + using the isolation level name ``AUTOCOMMIT``. + * Other isolation modes may also be present, provided that they + are named in UPPERCASE and use spaces not underscores. + + This function is used so that the default dialect can check that + a given isolation level parameter is valid, else raises an + :class:`_exc.ArgumentError`. + + A DBAPI connection is passed to the method, in the unlikely event that + the dialect needs to interrogate the connection itself to determine + this list, however it is expected that most backends will return + a hardcoded list of values. If the dialect supports "AUTOCOMMIT", + that value should also be present in the sequence returned. + + The method raises ``NotImplementedError`` by default. If a dialect + does not implement this method, then the default dialect will not + perform any checking on a given isolation level value before passing + it onto the :meth:`.Dialect.set_isolation_level` method. This is + to allow backwards-compatibility with third party dialects that may + not yet be implementing this method. + + .. versionadded:: 2.0 + + """ + raise NotImplementedError() + + def _assert_and_set_isolation_level( + self, dbapi_conn: DBAPIConnection, level: IsolationLevel + ) -> None: + raise NotImplementedError() + + @classmethod + def get_dialect_cls(cls, url: URL) -> Type[Dialect]: + """Given a URL, return the :class:`.Dialect` that will be used. + + This is a hook that allows an external plugin to provide functionality + around an existing dialect, by allowing the plugin to be loaded + from the url based on an entrypoint, and then the plugin returns + the actual dialect to be used. + + By default this just returns the cls. + + """ + return cls + + @classmethod + def get_async_dialect_cls(cls, url: URL) -> Type[Dialect]: + """Given a URL, return the :class:`.Dialect` that will be used by + an async engine. + + By default this is an alias of :meth:`.Dialect.get_dialect_cls` and + just returns the cls. It may be used if a dialect provides + both a sync and async version under the same name, like the + ``psycopg`` driver. + + .. versionadded:: 2 + + .. seealso:: + + :meth:`.Dialect.get_dialect_cls` + + """ + return cls.get_dialect_cls(url) + + @classmethod + def load_provisioning(cls) -> None: + """set up the provision.py module for this dialect. + + For dialects that include a provision.py module that sets up + provisioning followers, this method should initiate that process. + + A typical implementation would be:: + + @classmethod + def load_provisioning(cls): + __import__("mydialect.provision") + + The default method assumes a module named ``provision.py`` inside + the owning package of the current dialect, based on the ``__module__`` + attribute:: + + @classmethod + def load_provisioning(cls): + package = ".".join(cls.__module__.split(".")[0:-1]) + try: + __import__(package + ".provision") + except ImportError: + pass + + .. versionadded:: 1.3.14 + + """ + + @classmethod + def engine_created(cls, engine: Engine) -> None: + """A convenience hook called before returning the final + :class:`_engine.Engine`. + + If the dialect returned a different class from the + :meth:`.get_dialect_cls` + method, then the hook is called on both classes, first on + the dialect class returned by the :meth:`.get_dialect_cls` method and + then on the class on which the method was called. + + The hook should be used by dialects and/or wrappers to apply special + events to the engine or its components. In particular, it allows + a dialect-wrapping class to apply dialect-level events. + + """ + + def get_driver_connection(self, connection: DBAPIConnection) -> Any: + """Returns the connection object as returned by the external driver + package. + + For normal dialects that use a DBAPI compliant driver this call + will just return the ``connection`` passed as argument. + For dialects that instead adapt a non DBAPI compliant driver, like + when adapting an asyncio driver, this call will return the + connection-like object as returned by the driver. + + .. versionadded:: 1.4.24 + + """ + raise NotImplementedError() + + def set_engine_execution_options( + self, engine: Engine, opts: CoreExecuteOptionsParameter + ) -> None: + """Establish execution options for a given engine. + + This is implemented by :class:`.DefaultDialect` to establish + event hooks for new :class:`.Connection` instances created + by the given :class:`.Engine` which will then invoke the + :meth:`.Dialect.set_connection_execution_options` method for that + connection. + + """ + raise NotImplementedError() + + def set_connection_execution_options( + self, connection: Connection, opts: CoreExecuteOptionsParameter + ) -> None: + """Establish execution options for a given connection. + + This is implemented by :class:`.DefaultDialect` in order to implement + the :paramref:`_engine.Connection.execution_options.isolation_level` + execution option. Dialects can intercept various execution options + which may need to modify state on a particular DBAPI connection. + + .. versionadded:: 1.4 + + """ + raise NotImplementedError() + + def get_dialect_pool_class(self, url: URL) -> Type[Pool]: + """return a Pool class to use for a given URL""" + raise NotImplementedError() + + def validate_identifier(self, ident: str) -> None: + """Validates an identifier name, raising an exception if invalid""" + + +class CreateEnginePlugin: + """A set of hooks intended to augment the construction of an + :class:`_engine.Engine` object based on entrypoint names in a URL. + + The purpose of :class:`_engine.CreateEnginePlugin` is to allow third-party + systems to apply engine, pool and dialect level event listeners without + the need for the target application to be modified; instead, the plugin + names can be added to the database URL. Target applications for + :class:`_engine.CreateEnginePlugin` include: + + * connection and SQL performance tools, e.g. which use events to track + number of checkouts and/or time spent with statements + + * connectivity plugins such as proxies + + A rudimentary :class:`_engine.CreateEnginePlugin` that attaches a logger + to an :class:`_engine.Engine` object might look like:: + + + import logging + + from sqlalchemy.engine import CreateEnginePlugin + from sqlalchemy import event + + + class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + # consume the parameter "log_cursor_logging_name" from the + # URL query + logging_name = url.query.get( + "log_cursor_logging_name", "log_cursor" + ) + + self.log = logging.getLogger(logging_name) + + def update_url(self, url): + "update the URL to one that no longer includes our parameters" + return url.difference_update_query(["log_cursor_logging_name"]) + + def engine_created(self, engine): + "attach an event listener after the new Engine is constructed" + event.listen(engine, "before_cursor_execute", self._log_event) + + def _log_event( + self, + conn, + cursor, + statement, + parameters, + context, + executemany, + ): + + self.log.info("Plugin logged cursor event: %s", statement) + + Plugins are registered using entry points in a similar way as that + of dialects:: + + entry_points = { + "sqlalchemy.plugins": [ + "log_cursor_plugin = myapp.plugins:LogCursorEventsPlugin" + ] + } + + A plugin that uses the above names would be invoked from a database + URL as in:: + + from sqlalchemy import create_engine + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?" + "plugin=log_cursor_plugin&log_cursor_logging_name=mylogger" + ) + + The ``plugin`` URL parameter supports multiple instances, so that a URL + may specify multiple plugins; they are loaded in the order stated + in the URL:: + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?" + "plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three" + ) + + The plugin names may also be passed directly to :func:`_sa.create_engine` + using the :paramref:`_sa.create_engine.plugins` argument:: + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test", plugins=["myplugin"] + ) + + .. versionadded:: 1.2.3 plugin names can also be specified + to :func:`_sa.create_engine` as a list + + A plugin may consume plugin-specific arguments from the + :class:`_engine.URL` object as well as the ``kwargs`` dictionary, which is + the dictionary of arguments passed to the :func:`_sa.create_engine` + call. "Consuming" these arguments includes that they must be removed + when the plugin initializes, so that the arguments are not passed along + to the :class:`_engine.Dialect` constructor, where they will raise an + :class:`_exc.ArgumentError` because they are not known by the dialect. + + As of version 1.4 of SQLAlchemy, arguments should continue to be consumed + from the ``kwargs`` dictionary directly, by removing the values with a + method such as ``dict.pop``. Arguments from the :class:`_engine.URL` object + should be consumed by implementing the + :meth:`_engine.CreateEnginePlugin.update_url` method, returning a new copy + of the :class:`_engine.URL` with plugin-specific parameters removed:: + + class MyPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] + self.my_argument_three = kwargs.pop("my_argument_three", None) + + def update_url(self, url): + return url.difference_update_query( + ["my_argument_one", "my_argument_two"] + ) + + Arguments like those illustrated above would be consumed from a + :func:`_sa.create_engine` call such as:: + + from sqlalchemy import create_engine + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?" + "plugin=myplugin&my_argument_one=foo&my_argument_two=bar", + my_argument_three="bat", + ) + + .. versionchanged:: 1.4 + + The :class:`_engine.URL` object is now immutable; a + :class:`_engine.CreateEnginePlugin` that needs to alter the + :class:`_engine.URL` should implement the newly added + :meth:`_engine.CreateEnginePlugin.update_url` method, which + is invoked after the plugin is constructed. + + For migration, construct the plugin in the following way, checking + for the existence of the :meth:`_engine.CreateEnginePlugin.update_url` + method to detect which version is running:: + + class MyPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + if hasattr(CreateEnginePlugin, "update_url"): + # detect the 1.4 API + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] + else: + # detect the 1.3 and earlier API - mutate the + # URL directly + self.my_argument_one = url.query.pop("my_argument_one") + self.my_argument_two = url.query.pop("my_argument_two") + + self.my_argument_three = kwargs.pop("my_argument_three", None) + + def update_url(self, url): + # this method is only called in the 1.4 version + return url.difference_update_query( + ["my_argument_one", "my_argument_two"] + ) + + .. seealso:: + + :ref:`change_5526` - overview of the :class:`_engine.URL` change which + also includes notes regarding :class:`_engine.CreateEnginePlugin`. + + + When the engine creation process completes and produces the + :class:`_engine.Engine` object, it is again passed to the plugin via the + :meth:`_engine.CreateEnginePlugin.engine_created` hook. In this hook, additional + changes can be made to the engine, most typically involving setup of + events (e.g. those defined in :ref:`core_event_toplevel`). + + """ # noqa: E501 + + def __init__(self, url: URL, kwargs: Dict[str, Any]): + """Construct a new :class:`.CreateEnginePlugin`. + + The plugin object is instantiated individually for each call + to :func:`_sa.create_engine`. A single :class:`_engine. + Engine` will be + passed to the :meth:`.CreateEnginePlugin.engine_created` method + corresponding to this URL. + + :param url: the :class:`_engine.URL` object. The plugin may inspect + the :class:`_engine.URL` for arguments. Arguments used by the + plugin should be removed, by returning an updated :class:`_engine.URL` + from the :meth:`_engine.CreateEnginePlugin.update_url` method. + + .. versionchanged:: 1.4 + + The :class:`_engine.URL` object is now immutable, so a + :class:`_engine.CreateEnginePlugin` that needs to alter the + :class:`_engine.URL` object should implement the + :meth:`_engine.CreateEnginePlugin.update_url` method. + + :param kwargs: The keyword arguments passed to + :func:`_sa.create_engine`. + + """ + self.url = url + + def update_url(self, url: URL) -> URL: + """Update the :class:`_engine.URL`. + + A new :class:`_engine.URL` should be returned. This method is + typically used to consume configuration arguments from the + :class:`_engine.URL` which must be removed, as they will not be + recognized by the dialect. The + :meth:`_engine.URL.difference_update_query` method is available + to remove these arguments. See the docstring at + :class:`_engine.CreateEnginePlugin` for an example. + + + .. versionadded:: 1.4 + + """ + raise NotImplementedError() + + def handle_dialect_kwargs( + self, dialect_cls: Type[Dialect], dialect_args: Dict[str, Any] + ) -> None: + """parse and modify dialect kwargs""" + + def handle_pool_kwargs( + self, pool_cls: Type[Pool], pool_args: Dict[str, Any] + ) -> None: + """parse and modify pool kwargs""" + + def engine_created(self, engine: Engine) -> None: + """Receive the :class:`_engine.Engine` + object when it is fully constructed. + + The plugin may make additional changes to the engine, such as + registering engine or connection pool events. + + """ + + +class ExecutionContext: + """A messenger object for a Dialect that corresponds to a single + execution. + + """ + + engine: Engine + """engine which the Connection is associated with""" + + connection: Connection + """Connection object which can be freely used by default value + generators to execute SQL. This Connection should reference the + same underlying connection/transactional resources of + root_connection.""" + + root_connection: Connection + """Connection object which is the source of this ExecutionContext.""" + + dialect: Dialect + """dialect which created this ExecutionContext.""" + + cursor: DBAPICursor + """DB-API cursor procured from the connection""" + + compiled: Optional[Compiled] + """if passed to constructor, sqlalchemy.engine.base.Compiled object + being executed""" + + statement: str + """string version of the statement to be executed. Is either + passed to the constructor, or must be created from the + sql.Compiled object by the time pre_exec() has completed.""" + + invoked_statement: Optional[Executable] + """The Executable statement object that was given in the first place. + + This should be structurally equivalent to compiled.statement, but not + necessarily the same object as in a caching scenario the compiled form + will have been extracted from the cache. + + """ + + parameters: _AnyMultiExecuteParams + """bind parameters passed to the execute() or exec_driver_sql() methods. + + These are always stored as a list of parameter entries. A single-element + list corresponds to a ``cursor.execute()`` call and a multiple-element + list corresponds to ``cursor.executemany()``, except in the case + of :attr:`.ExecuteStyle.INSERTMANYVALUES` which will use + ``cursor.execute()`` one or more times. + + """ + + no_parameters: bool + """True if the execution style does not use parameters""" + + isinsert: bool + """True if the statement is an INSERT.""" + + isupdate: bool + """True if the statement is an UPDATE.""" + + execute_style: ExecuteStyle + """the style of DBAPI cursor method that will be used to execute + a statement. + + .. versionadded:: 2.0 + + """ + + executemany: bool + """True if the context has a list of more than one parameter set. + + Historically this attribute links to whether ``cursor.execute()`` or + ``cursor.executemany()`` will be used. It also can now mean that + "insertmanyvalues" may be used which indicates one or more + ``cursor.execute()`` calls. + + """ + + prefetch_cols: util.generic_fn_descriptor[Optional[Sequence[Column[Any]]]] + """a list of Column objects for which a client-side default + was fired off. Applies to inserts and updates.""" + + postfetch_cols: util.generic_fn_descriptor[Optional[Sequence[Column[Any]]]] + """a list of Column objects for which a server-side default or + inline SQL expression value was fired off. Applies to inserts + and updates.""" + + execution_options: _ExecuteOptions + """Execution options associated with the current statement execution""" + + @classmethod + def _init_ddl( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + compiled_ddl: DDLCompiler, + ) -> ExecutionContext: + raise NotImplementedError() + + @classmethod + def _init_compiled( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + compiled: SQLCompiler, + parameters: _CoreMultiExecuteParams, + invoked_statement: Executable, + extracted_parameters: Optional[Sequence[BindParameter[Any]]], + cache_hit: CacheStats = CacheStats.CACHING_DISABLED, + ) -> ExecutionContext: + raise NotImplementedError() + + @classmethod + def _init_statement( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + statement: str, + parameters: _DBAPIMultiExecuteParams, + ) -> ExecutionContext: + raise NotImplementedError() + + @classmethod + def _init_default( + cls, + dialect: Dialect, + connection: Connection, + dbapi_connection: PoolProxiedConnection, + execution_options: _ExecuteOptions, + ) -> ExecutionContext: + raise NotImplementedError() + + def _exec_default( + self, + column: Optional[Column[Any]], + default: DefaultGenerator, + type_: Optional[TypeEngine[Any]], + ) -> Any: + raise NotImplementedError() + + def _prepare_set_input_sizes( + self, + ) -> Optional[List[Tuple[str, Any, TypeEngine[Any]]]]: + raise NotImplementedError() + + def _get_cache_stats(self) -> str: + raise NotImplementedError() + + def _setup_result_proxy(self) -> CursorResult[Any]: + raise NotImplementedError() + + def fire_sequence(self, seq: Sequence_SchemaItem, type_: Integer) -> int: + """given a :class:`.Sequence`, invoke it and return the next int + value""" + raise NotImplementedError() + + def create_cursor(self) -> DBAPICursor: + """Return a new cursor generated from this ExecutionContext's + connection. + + Some dialects may wish to change the behavior of + connection.cursor(), such as postgresql which may return a PG + "server side" cursor. + """ + + raise NotImplementedError() + + def pre_exec(self) -> None: + """Called before an execution of a compiled statement. + + If a compiled statement was passed to this ExecutionContext, + the `statement` and `parameters` datamembers must be + initialized after this statement is complete. + """ + + raise NotImplementedError() + + def get_out_parameter_values( + self, out_param_names: Sequence[str] + ) -> Sequence[Any]: + """Return a sequence of OUT parameter values from a cursor. + + For dialects that support OUT parameters, this method will be called + when there is a :class:`.SQLCompiler` object which has the + :attr:`.SQLCompiler.has_out_parameters` flag set. This flag in turn + will be set to True if the statement itself has :class:`.BindParameter` + objects that have the ``.isoutparam`` flag set which are consumed by + the :meth:`.SQLCompiler.visit_bindparam` method. If the dialect + compiler produces :class:`.BindParameter` objects with ``.isoutparam`` + set which are not handled by :meth:`.SQLCompiler.visit_bindparam`, it + should set this flag explicitly. + + The list of names that were rendered for each bound parameter + is passed to the method. The method should then return a sequence of + values corresponding to the list of parameter objects. Unlike in + previous SQLAlchemy versions, the values can be the **raw values** from + the DBAPI; the execution context will apply the appropriate type + handler based on what's present in self.compiled.binds and update the + values. The processed dictionary will then be made available via the + ``.out_parameters`` collection on the result object. Note that + SQLAlchemy 1.4 has multiple kinds of result object as part of the 2.0 + transition. + + .. versionadded:: 1.4 - added + :meth:`.ExecutionContext.get_out_parameter_values`, which is invoked + automatically by the :class:`.DefaultExecutionContext` when there + are :class:`.BindParameter` objects with the ``.isoutparam`` flag + set. This replaces the practice of setting out parameters within + the now-removed ``get_result_proxy()`` method. + + """ + raise NotImplementedError() + + def post_exec(self) -> None: + """Called after the execution of a compiled statement. + + If a compiled statement was passed to this ExecutionContext, + the `last_insert_ids`, `last_inserted_params`, etc. + datamembers should be available after this method completes. + """ + + raise NotImplementedError() + + def handle_dbapi_exception(self, e: BaseException) -> None: + """Receive a DBAPI exception which occurred upon execute, result + fetch, etc.""" + + raise NotImplementedError() + + def lastrow_has_defaults(self) -> bool: + """Return True if the last INSERT or UPDATE row contained + inlined or database-side defaults. + """ + + raise NotImplementedError() + + def get_rowcount(self) -> Optional[int]: + """Return the DBAPI ``cursor.rowcount`` value, or in some + cases an interpreted value. + + See :attr:`_engine.CursorResult.rowcount` for details on this. + + """ + + raise NotImplementedError() + + def fetchall_for_returning(self, cursor: DBAPICursor) -> Sequence[Any]: + """For a RETURNING result, deliver cursor.fetchall() from the + DBAPI cursor. + + This is a dialect-specific hook for dialects that have special + considerations when calling upon the rows delivered for a + "RETURNING" statement. Default implementation is + ``cursor.fetchall()``. + + This hook is currently used only by the :term:`insertmanyvalues` + feature. Dialects that don't set ``use_insertmanyvalues=True`` + don't need to consider this hook. + + .. versionadded:: 2.0.10 + + """ + raise NotImplementedError() + + +class ConnectionEventsTarget(EventTarget): + """An object which can accept events from :class:`.ConnectionEvents`. + + Includes :class:`_engine.Connection` and :class:`_engine.Engine`. + + .. versionadded:: 2.0 + + """ + + dispatch: dispatcher[ConnectionEventsTarget] + + +Connectable = ConnectionEventsTarget + + +class ExceptionContext: + """Encapsulate information about an error condition in progress. + + This object exists solely to be passed to the + :meth:`_events.DialectEvents.handle_error` event, + supporting an interface that + can be extended without backwards-incompatibility. + + + """ + + __slots__ = () + + dialect: Dialect + """The :class:`_engine.Dialect` in use. + + This member is present for all invocations of the event hook. + + .. versionadded:: 2.0 + + """ + + connection: Optional[Connection] + """The :class:`_engine.Connection` in use during the exception. + + This member is present, except in the case of a failure when + first connecting. + + .. seealso:: + + :attr:`.ExceptionContext.engine` + + + """ + + engine: Optional[Engine] + """The :class:`_engine.Engine` in use during the exception. + + This member is present in all cases except for when handling an error + within the connection pool "pre-ping" process. + + """ + + cursor: Optional[DBAPICursor] + """The DBAPI cursor object. + + May be None. + + """ + + statement: Optional[str] + """String SQL statement that was emitted directly to the DBAPI. + + May be None. + + """ + + parameters: Optional[_DBAPIAnyExecuteParams] + """Parameter collection that was emitted directly to the DBAPI. + + May be None. + + """ + + original_exception: BaseException + """The exception object which was caught. + + This member is always present. + + """ + + sqlalchemy_exception: Optional[StatementError] + """The :class:`sqlalchemy.exc.StatementError` which wraps the original, + and will be raised if exception handling is not circumvented by the event. + + May be None, as not all exception types are wrapped by SQLAlchemy. + For DBAPI-level exceptions that subclass the dbapi's Error class, this + field will always be present. + + """ + + chained_exception: Optional[BaseException] + """The exception that was returned by the previous handler in the + exception chain, if any. + + If present, this exception will be the one ultimately raised by + SQLAlchemy unless a subsequent handler replaces it. + + May be None. + + """ + + execution_context: Optional[ExecutionContext] + """The :class:`.ExecutionContext` corresponding to the execution + operation in progress. + + This is present for statement execution operations, but not for + operations such as transaction begin/end. It also is not present when + the exception was raised before the :class:`.ExecutionContext` + could be constructed. + + Note that the :attr:`.ExceptionContext.statement` and + :attr:`.ExceptionContext.parameters` members may represent a + different value than that of the :class:`.ExecutionContext`, + potentially in the case where a + :meth:`_events.ConnectionEvents.before_cursor_execute` event or similar + modified the statement/parameters to be sent. + + May be None. + + """ + + is_disconnect: bool + """Represent whether the exception as occurred represents a "disconnect" + condition. + + This flag will always be True or False within the scope of the + :meth:`_events.DialectEvents.handle_error` handler. + + SQLAlchemy will defer to this flag in order to determine whether or not + the connection should be invalidated subsequently. That is, by + assigning to this flag, a "disconnect" event which then results in + a connection and pool invalidation can be invoked or prevented by + changing this flag. + + + .. note:: The pool "pre_ping" handler enabled using the + :paramref:`_sa.create_engine.pool_pre_ping` parameter does **not** + consult this event before deciding if the "ping" returned false, + as opposed to receiving an unhandled error. For this use case, the + :ref:`legacy recipe based on engine_connect() may be used + `. A future API allow more + comprehensive customization of the "disconnect" detection mechanism + across all functions. + + """ + + invalidate_pool_on_disconnect: bool + """Represent whether all connections in the pool should be invalidated + when a "disconnect" condition is in effect. + + Setting this flag to False within the scope of the + :meth:`_events.DialectEvents.handle_error` + event will have the effect such + that the full collection of connections in the pool will not be + invalidated during a disconnect; only the current connection that is the + subject of the error will actually be invalidated. + + The purpose of this flag is for custom disconnect-handling schemes where + the invalidation of other connections in the pool is to be performed + based on other conditions, or even on a per-connection basis. + + """ + + is_pre_ping: bool + """Indicates if this error is occurring within the "pre-ping" step + performed when :paramref:`_sa.create_engine.pool_pre_ping` is set to + ``True``. In this mode, the :attr:`.ExceptionContext.engine` attribute + will be ``None``. The dialect in use is accessible via the + :attr:`.ExceptionContext.dialect` attribute. + + .. versionadded:: 2.0.5 + + """ + + +class AdaptedConnection: + """Interface of an adapted connection object to support the DBAPI protocol. + + Used by asyncio dialects to provide a sync-style pep-249 facade on top + of the asyncio connection/cursor API provided by the driver. + + .. versionadded:: 1.4.24 + + """ + + __slots__ = ("_connection",) + + _connection: Any + + @property + def driver_connection(self) -> Any: + """The connection object as returned by the driver after a connect.""" + return self._connection + + def run_async(self, fn: Callable[[Any], Awaitable[_T]]) -> _T: + """Run the awaitable returned by the given function, which is passed + the raw asyncio driver connection. + + This is used to invoke awaitable-only methods on the driver connection + within the context of a "synchronous" method, like a connection + pool event handler. + + E.g.:: + + engine = create_async_engine(...) + + + @event.listens_for(engine.sync_engine, "connect") + def register_custom_types( + dbapi_connection, # ... + ): + dbapi_connection.run_async( + lambda connection: connection.set_type_codec( + "MyCustomType", encoder, decoder, ... + ) + ) + + .. versionadded:: 1.4.30 + + .. seealso:: + + :ref:`asyncio_events_run_async` + + """ + return await_only(fn(self._connection)) + + def __repr__(self) -> str: + return "" % self._connection diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/mock.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/mock.py new file mode 100644 index 0000000000000000000000000000000000000000..a96af36ccda5b530c688bef08d04e9c9713c053f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/mock.py @@ -0,0 +1,134 @@ +# engine/mock.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from operator import attrgetter +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Optional +from typing import Type +from typing import Union + +from . import url as _url +from .. import util + + +if typing.TYPE_CHECKING: + from .base import Engine + from .interfaces import _CoreAnyExecuteParams + from .interfaces import CoreExecuteOptionsParameter + from .interfaces import Dialect + from .url import URL + from ..sql.base import Executable + from ..sql.ddl import InvokeDDLBase + from ..sql.schema import HasSchemaAttr + from ..sql.visitors import Visitable + + +class MockConnection: + def __init__(self, dialect: Dialect, execute: Callable[..., Any]): + self._dialect = dialect + self._execute_impl = execute + + engine: Engine = cast(Any, property(lambda s: s)) + dialect: Dialect = cast(Any, property(attrgetter("_dialect"))) + name: str = cast(Any, property(lambda s: s._dialect.name)) + + def connect(self, **kwargs: Any) -> MockConnection: + return self + + def schema_for_object(self, obj: HasSchemaAttr) -> Optional[str]: + return obj.schema + + def execution_options(self, **kw: Any) -> MockConnection: + return self + + def _run_ddl_visitor( + self, + visitorcallable: Type[InvokeDDLBase], + element: Visitable, + **kwargs: Any, + ) -> None: + kwargs["checkfirst"] = False + visitorcallable( + dialect=self.dialect, connection=self, **kwargs + ).traverse_single(element) + + def execute( + self, + obj: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Any: + return self._execute_impl(obj, parameters) + + +def create_mock_engine( + url: Union[str, URL], executor: Any, **kw: Any +) -> MockConnection: + """Create a "mock" engine used for echoing DDL. + + This is a utility function used for debugging or storing the output of DDL + sequences as generated by :meth:`_schema.MetaData.create_all` + and related methods. + + The function accepts a URL which is used only to determine the kind of + dialect to be used, as well as an "executor" callable function which + will receive a SQL expression object and parameters, which can then be + echoed or otherwise printed. The executor's return value is not handled, + nor does the engine allow regular string statements to be invoked, and + is therefore only useful for DDL that is sent to the database without + receiving any results. + + E.g.:: + + from sqlalchemy import create_mock_engine + + + def dump(sql, *multiparams, **params): + print(sql.compile(dialect=engine.dialect)) + + + engine = create_mock_engine("postgresql+psycopg2://", dump) + metadata.create_all(engine, checkfirst=False) + + :param url: A string URL which typically needs to contain only the + database backend name. + + :param executor: a callable which receives the arguments ``sql``, + ``*multiparams`` and ``**params``. The ``sql`` parameter is typically + an instance of :class:`.ExecutableDDLElement`, which can then be compiled + into a string using :meth:`.ExecutableDDLElement.compile`. + + .. versionadded:: 1.4 - the :func:`.create_mock_engine` function replaces + the previous "mock" engine strategy used with + :func:`_sa.create_engine`. + + .. seealso:: + + :ref:`faq_ddl_as_string` + + """ + + # create url.URL object + u = _url.make_url(url) + + dialect_cls = u.get_dialect() + + dialect_args = {} + # consume dialect arguments from kwargs + for k in util.get_cls_kwargs(dialect_cls): + if k in kw: + dialect_args[k] = kw.pop(k) + + # create dialect + dialect = dialect_cls(**dialect_args) + + return MockConnection(dialect, executor) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/processors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/processors.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f9330842d18e354599550dbd006edb68684e1d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/processors.py @@ -0,0 +1,61 @@ +# engine/processors.py +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors +# +# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""defines generic type conversion functions, as used in bind and result +processors. + +They all share one common characteristic: None is passed through unchanged. + +""" +from __future__ import annotations + +import typing + +from ._py_processors import str_to_datetime_processor_factory # noqa +from ..util._has_cy import HAS_CYEXTENSION + +if typing.TYPE_CHECKING or not HAS_CYEXTENSION: + from ._py_processors import int_to_boolean as int_to_boolean + from ._py_processors import str_to_date as str_to_date + from ._py_processors import str_to_datetime as str_to_datetime + from ._py_processors import str_to_time as str_to_time + from ._py_processors import ( + to_decimal_processor_factory as to_decimal_processor_factory, + ) + from ._py_processors import to_float as to_float + from ._py_processors import to_str as to_str +else: + from sqlalchemy.cyextension.processors import ( + DecimalResultProcessor, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401 + int_to_boolean as int_to_boolean, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401,E501 + str_to_date as str_to_date, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401 + str_to_datetime as str_to_datetime, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401,E501 + str_to_time as str_to_time, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401,E501 + to_float as to_float, + ) + from sqlalchemy.cyextension.processors import ( # noqa: F401,E501 + to_str as to_str, + ) + + def to_decimal_processor_factory(target_class, scale): + # Note that the scale argument is not taken into account for integer + # values in the C implementation while it is in the Python one. + # For example, the Python implementation might return + # Decimal('5.00000') whereas the C implementation will + # return Decimal('5'). These are equivalent of course. + return DecimalResultProcessor(target_class, "%%.%df" % scale).process diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/reflection.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/reflection.py new file mode 100644 index 0000000000000000000000000000000000000000..23009c64a4cc5d4179c9d0c88ca9b65c91a7ce8c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/reflection.py @@ -0,0 +1,2102 @@ +# engine/reflection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Provides an abstraction for obtaining database schema information. + +Usage Notes: + +Here are some general conventions when accessing the low level inspector +methods such as get_table_names, get_columns, etc. + +1. Inspector methods return lists of dicts in most cases for the following + reasons: + + * They're both standard types that can be serialized. + * Using a dict instead of a tuple allows easy expansion of attributes. + * Using a list for the outer structure maintains order and is easy to work + with (e.g. list comprehension [d['name'] for d in cols]). + +2. Records that contain a name, such as the column name in a column record + use the key 'name'. So for most return values, each record will have a + 'name' attribute.. +""" +from __future__ import annotations + +import contextlib +from dataclasses import dataclass +from enum import auto +from enum import Flag +from enum import unique +from typing import Any +from typing import Callable +from typing import Collection +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .base import Connection +from .base import Engine +from .. import exc +from .. import inspection +from .. import sql +from .. import util +from ..sql import operators +from ..sql import schema as sa_schema +from ..sql.cache_key import _ad_hoc_cache_key_from_args +from ..sql.elements import quoted_name +from ..sql.elements import TextClause +from ..sql.type_api import TypeEngine +from ..sql.visitors import InternalTraversal +from ..util import topological +from ..util.typing import final + +if TYPE_CHECKING: + from .interfaces import Dialect + from .interfaces import ReflectedCheckConstraint + from .interfaces import ReflectedColumn + from .interfaces import ReflectedForeignKeyConstraint + from .interfaces import ReflectedIndex + from .interfaces import ReflectedPrimaryKeyConstraint + from .interfaces import ReflectedTableComment + from .interfaces import ReflectedUniqueConstraint + from .interfaces import TableKey + +_R = TypeVar("_R") + + +@util.decorator +def cache( + fn: Callable[..., _R], + self: Dialect, + con: Connection, + *args: Any, + **kw: Any, +) -> _R: + info_cache = kw.get("info_cache", None) + if info_cache is None: + return fn(self, con, *args, **kw) + exclude = {"info_cache", "unreflectable"} + key = ( + fn.__name__, + tuple( + (str(a), a.quote) if isinstance(a, quoted_name) else a + for a in args + if isinstance(a, str) + ), + tuple( + (k, (str(v), v.quote) if isinstance(v, quoted_name) else v) + for k, v in kw.items() + if k not in exclude + ), + ) + ret: _R = info_cache.get(key) + if ret is None: + ret = fn(self, con, *args, **kw) + info_cache[key] = ret + return ret + + +def flexi_cache( + *traverse_args: Tuple[str, InternalTraversal] +) -> Callable[[Callable[..., _R]], Callable[..., _R]]: + @util.decorator + def go( + fn: Callable[..., _R], + self: Dialect, + con: Connection, + *args: Any, + **kw: Any, + ) -> _R: + info_cache = kw.get("info_cache", None) + if info_cache is None: + return fn(self, con, *args, **kw) + key = _ad_hoc_cache_key_from_args((fn.__name__,), traverse_args, args) + ret: _R = info_cache.get(key) + if ret is None: + ret = fn(self, con, *args, **kw) + info_cache[key] = ret + return ret + + return go + + +@unique +class ObjectKind(Flag): + """Enumerator that indicates which kind of object to return when calling + the ``get_multi`` methods. + + This is a Flag enum, so custom combinations can be passed. For example, + to reflect tables and plain views ``ObjectKind.TABLE | ObjectKind.VIEW`` + may be used. + + .. note:: + Not all dialect may support all kind of object. If a dialect does + not support a particular object an empty dict is returned. + In case a dialect supports an object, but the requested method + is not applicable for the specified kind the default value + will be returned for each reflected object. For example reflecting + check constraints of view return a dict with all the views with + empty lists as values. + """ + + TABLE = auto() + "Reflect table objects" + VIEW = auto() + "Reflect plain view objects" + MATERIALIZED_VIEW = auto() + "Reflect materialized view object" + + ANY_VIEW = VIEW | MATERIALIZED_VIEW + "Reflect any kind of view objects" + ANY = TABLE | VIEW | MATERIALIZED_VIEW + "Reflect all type of objects" + + +@unique +class ObjectScope(Flag): + """Enumerator that indicates which scope to use when calling + the ``get_multi`` methods. + """ + + DEFAULT = auto() + "Include default scope" + TEMPORARY = auto() + "Include only temp scope" + ANY = DEFAULT | TEMPORARY + "Include both default and temp scope" + + +@inspection._self_inspects +class Inspector(inspection.Inspectable["Inspector"]): + """Performs database schema inspection. + + The Inspector acts as a proxy to the reflection methods of the + :class:`~sqlalchemy.engine.interfaces.Dialect`, providing a + consistent interface as well as caching support for previously + fetched metadata. + + A :class:`_reflection.Inspector` object is usually created via the + :func:`_sa.inspect` function, which may be passed an + :class:`_engine.Engine` + or a :class:`_engine.Connection`:: + + from sqlalchemy import inspect, create_engine + + engine = create_engine("...") + insp = inspect(engine) + + Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` associated + with the engine may opt to return an :class:`_reflection.Inspector` + subclass that + provides additional methods specific to the dialect's target database. + + """ + + bind: Union[Engine, Connection] + engine: Engine + _op_context_requires_connect: bool + dialect: Dialect + info_cache: Dict[Any, Any] + + @util.deprecated( + "1.4", + "The __init__() method on :class:`_reflection.Inspector` " + "is deprecated and " + "will be removed in a future release. Please use the " + ":func:`.sqlalchemy.inspect` " + "function on an :class:`_engine.Engine` or " + ":class:`_engine.Connection` " + "in order to " + "acquire an :class:`_reflection.Inspector`.", + ) + def __init__(self, bind: Union[Engine, Connection]): + """Initialize a new :class:`_reflection.Inspector`. + + :param bind: a :class:`~sqlalchemy.engine.Connection`, + which is typically an instance of + :class:`~sqlalchemy.engine.Engine` or + :class:`~sqlalchemy.engine.Connection`. + + For a dialect-specific instance of :class:`_reflection.Inspector`, see + :meth:`_reflection.Inspector.from_engine` + + """ + self._init_legacy(bind) + + @classmethod + def _construct( + cls, init: Callable[..., Any], bind: Union[Engine, Connection] + ) -> Inspector: + if hasattr(bind.dialect, "inspector"): + cls = bind.dialect.inspector + + self = cls.__new__(cls) + init(self, bind) + return self + + def _init_legacy(self, bind: Union[Engine, Connection]) -> None: + if hasattr(bind, "exec_driver_sql"): + self._init_connection(bind) # type: ignore[arg-type] + else: + self._init_engine(bind) + + def _init_engine(self, engine: Engine) -> None: + self.bind = self.engine = engine + engine.connect().close() + self._op_context_requires_connect = True + self.dialect = self.engine.dialect + self.info_cache = {} + + def _init_connection(self, connection: Connection) -> None: + self.bind = connection + self.engine = connection.engine + self._op_context_requires_connect = False + self.dialect = self.engine.dialect + self.info_cache = {} + + def clear_cache(self) -> None: + """reset the cache for this :class:`.Inspector`. + + Inspection methods that have data cached will emit SQL queries + when next called to get new data. + + .. versionadded:: 2.0 + + """ + self.info_cache.clear() + + @classmethod + @util.deprecated( + "1.4", + "The from_engine() method on :class:`_reflection.Inspector` " + "is deprecated and " + "will be removed in a future release. Please use the " + ":func:`.sqlalchemy.inspect` " + "function on an :class:`_engine.Engine` or " + ":class:`_engine.Connection` " + "in order to " + "acquire an :class:`_reflection.Inspector`.", + ) + def from_engine(cls, bind: Engine) -> Inspector: + """Construct a new dialect-specific Inspector object from the given + engine or connection. + + :param bind: a :class:`~sqlalchemy.engine.Connection` + or :class:`~sqlalchemy.engine.Engine`. + + This method differs from direct a direct constructor call of + :class:`_reflection.Inspector` in that the + :class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to + provide a dialect-specific :class:`_reflection.Inspector` instance, + which may + provide additional methods. + + See the example at :class:`_reflection.Inspector`. + + """ + return cls._construct(cls._init_legacy, bind) + + @inspection._inspects(Engine) + def _engine_insp(bind: Engine) -> Inspector: # type: ignore[misc] + return Inspector._construct(Inspector._init_engine, bind) + + @inspection._inspects(Connection) + def _connection_insp(bind: Connection) -> Inspector: # type: ignore[misc] + return Inspector._construct(Inspector._init_connection, bind) + + @contextlib.contextmanager + def _operation_context(self) -> Generator[Connection, None, None]: + """Return a context that optimizes for multiple operations on a single + transaction. + + This essentially allows connect()/close() to be called if we detected + that we're against an :class:`_engine.Engine` and not a + :class:`_engine.Connection`. + + """ + conn: Connection + if self._op_context_requires_connect: + conn = self.bind.connect() # type: ignore[union-attr] + else: + conn = self.bind # type: ignore[assignment] + try: + yield conn + finally: + if self._op_context_requires_connect: + conn.close() + + @contextlib.contextmanager + def _inspection_context(self) -> Generator[Inspector, None, None]: + """Return an :class:`_reflection.Inspector` + from this one that will run all + operations on a single connection. + + """ + + with self._operation_context() as conn: + sub_insp = self._construct(self.__class__._init_connection, conn) + sub_insp.info_cache = self.info_cache + yield sub_insp + + @property + def default_schema_name(self) -> Optional[str]: + """Return the default schema name presented by the dialect + for the current engine's database user. + + E.g. this is typically ``public`` for PostgreSQL and ``dbo`` + for SQL Server. + + """ + return self.dialect.default_schema_name + + def get_schema_names(self, **kw: Any) -> List[str]: + r"""Return all schema names. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + """ + + with self._operation_context() as conn: + return self.dialect.get_schema_names( + conn, info_cache=self.info_cache, **kw + ) + + def get_table_names( + self, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + r"""Return all table names within a particular schema. + + The names are expected to be real tables only, not views. + Views are instead returned using the + :meth:`_reflection.Inspector.get_view_names` and/or + :meth:`_reflection.Inspector.get_materialized_view_names` + methods. + + :param schema: Schema name. If ``schema`` is left at ``None``, the + database's default schema is + used, else the named schema is searched. If the database does not + support named schemas, behavior is undefined if ``schema`` is not + passed as ``None``. For special quoting, use :class:`.quoted_name`. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. seealso:: + + :meth:`_reflection.Inspector.get_sorted_table_and_fkc_names` + + :attr:`_schema.MetaData.sorted_tables` + + """ + + with self._operation_context() as conn: + return self.dialect.get_table_names( + conn, schema, info_cache=self.info_cache, **kw + ) + + def has_table( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> bool: + r"""Return True if the backend has a table, view, or temporary + table of the given name. + + :param table_name: name of the table to check + :param schema: schema name to query, if not the default schema. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. versionadded:: 1.4 - the :meth:`.Inspector.has_table` method + replaces the :meth:`_engine.Engine.has_table` method. + + .. versionchanged:: 2.0:: :meth:`.Inspector.has_table` now formally + supports checking for additional table-like objects: + + * any type of views (plain or materialized) + * temporary tables of any kind + + Previously, these two checks were not formally specified and + different dialects would vary in their behavior. The dialect + testing suite now includes tests for all of these object types + and should be supported by all SQLAlchemy-included dialects. + Support among third party dialects may be lagging, however. + + """ + with self._operation_context() as conn: + return self.dialect.has_table( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def has_sequence( + self, sequence_name: str, schema: Optional[str] = None, **kw: Any + ) -> bool: + r"""Return True if the backend has a sequence with the given name. + + :param sequence_name: name of the sequence to check + :param schema: schema name to query, if not the default schema. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. versionadded:: 1.4 + + """ + with self._operation_context() as conn: + return self.dialect.has_sequence( + conn, sequence_name, schema, info_cache=self.info_cache, **kw + ) + + def has_index( + self, + table_name: str, + index_name: str, + schema: Optional[str] = None, + **kw: Any, + ) -> bool: + r"""Check the existence of a particular index name in the database. + + :param table_name: the name of the table the index belongs to + :param index_name: the name of the index to check + :param schema: schema name to query, if not the default schema. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. versionadded:: 2.0 + + """ + with self._operation_context() as conn: + return self.dialect.has_index( + conn, + table_name, + index_name, + schema, + info_cache=self.info_cache, + **kw, + ) + + def has_schema(self, schema_name: str, **kw: Any) -> bool: + r"""Return True if the backend has a schema with the given name. + + :param schema_name: name of the schema to check + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. versionadded:: 2.0 + + """ + with self._operation_context() as conn: + return self.dialect.has_schema( + conn, schema_name, info_cache=self.info_cache, **kw + ) + + def get_sorted_table_and_fkc_names( + self, + schema: Optional[str] = None, + **kw: Any, + ) -> List[Tuple[Optional[str], List[Tuple[str, Optional[str]]]]]: + r"""Return dependency-sorted table and foreign key constraint names in + referred to within a particular schema. + + This will yield 2-tuples of + ``(tablename, [(tname, fkname), (tname, fkname), ...])`` + consisting of table names in CREATE order grouped with the foreign key + constraint names that are not detected as belonging to a cycle. + The final element + will be ``(None, [(tname, fkname), (tname, fkname), ..])`` + which will consist of remaining + foreign key constraint names that would require a separate CREATE + step after-the-fact, based on dependencies between tables. + + :param schema: schema name to query, if not the default schema. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. seealso:: + + :meth:`_reflection.Inspector.get_table_names` + + :func:`.sort_tables_and_constraints` - similar method which works + with an already-given :class:`_schema.MetaData`. + + """ + + return [ + ( + table_key[1] if table_key else None, + [(tname, fks) for (_, tname), fks in fk_collection], + ) + for ( + table_key, + fk_collection, + ) in self.sort_tables_on_foreign_key_dependency( + consider_schemas=(schema,) + ) + ] + + def sort_tables_on_foreign_key_dependency( + self, + consider_schemas: Collection[Optional[str]] = (None,), + **kw: Any, + ) -> List[ + Tuple[ + Optional[Tuple[Optional[str], str]], + List[Tuple[Tuple[Optional[str], str], Optional[str]]], + ] + ]: + r"""Return dependency-sorted table and foreign key constraint names + referred to within multiple schemas. + + This method may be compared to + :meth:`.Inspector.get_sorted_table_and_fkc_names`, which + works on one schema at a time; here, the method is a generalization + that will consider multiple schemas at once including that it will + resolve for cross-schema foreign keys. + + .. versionadded:: 2.0 + + """ + SchemaTab = Tuple[Optional[str], str] + + tuples: Set[Tuple[SchemaTab, SchemaTab]] = set() + remaining_fkcs: Set[Tuple[SchemaTab, Optional[str]]] = set() + fknames_for_table: Dict[SchemaTab, Set[Optional[str]]] = {} + tnames: List[SchemaTab] = [] + + for schname in consider_schemas: + schema_fkeys = self.get_multi_foreign_keys(schname, **kw) + tnames.extend(schema_fkeys) + for (_, tname), fkeys in schema_fkeys.items(): + fknames_for_table[(schname, tname)] = { + fk["name"] for fk in fkeys + } + for fkey in fkeys: + if ( + tname != fkey["referred_table"] + or schname != fkey["referred_schema"] + ): + tuples.add( + ( + ( + fkey["referred_schema"], + fkey["referred_table"], + ), + (schname, tname), + ) + ) + try: + candidate_sort = list(topological.sort(tuples, tnames)) + except exc.CircularDependencyError as err: + edge: Tuple[SchemaTab, SchemaTab] + for edge in err.edges: + tuples.remove(edge) + remaining_fkcs.update( + (edge[1], fkc) for fkc in fknames_for_table[edge[1]] + ) + + candidate_sort = list(topological.sort(tuples, tnames)) + ret: List[ + Tuple[Optional[SchemaTab], List[Tuple[SchemaTab, Optional[str]]]] + ] + ret = [ + ( + (schname, tname), + [ + ((schname, tname), fk) + for fk in fknames_for_table[(schname, tname)].difference( + name for _, name in remaining_fkcs + ) + ], + ) + for (schname, tname) in candidate_sort + ] + return ret + [(None, list(remaining_fkcs))] + + def get_temp_table_names(self, **kw: Any) -> List[str]: + r"""Return a list of temporary table names for the current bind. + + This method is unsupported by most dialects; currently + only Oracle Database, PostgreSQL and SQLite implements it. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + """ + + with self._operation_context() as conn: + return self.dialect.get_temp_table_names( + conn, info_cache=self.info_cache, **kw + ) + + def get_temp_view_names(self, **kw: Any) -> List[str]: + r"""Return a list of temporary view names for the current bind. + + This method is unsupported by most dialects; currently + only PostgreSQL and SQLite implements it. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + """ + with self._operation_context() as conn: + return self.dialect.get_temp_view_names( + conn, info_cache=self.info_cache, **kw + ) + + def get_table_options( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> Dict[str, Any]: + r"""Return a dictionary of options specified when the table of the + given name was created. + + This currently includes some options that apply to MySQL and Oracle + Database tables. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dict with the table options. The returned keys depend on the + dialect in use. Each one is prefixed with the dialect name. + + .. seealso:: :meth:`Inspector.get_multi_table_options` + + """ + with self._operation_context() as conn: + return self.dialect.get_table_options( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_table_options( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, Dict[str, Any]]: + r"""Return a dictionary of options specified when the tables in the + given schema were created. + + The tables can be filtered by passing the names to use to + ``filter_names``. + + This currently includes some options that apply to MySQL and Oracle + tables. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if options of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are dictionaries with the table options. + The returned keys in each dict depend on the + dialect in use. Each one is prefixed with the dialect name. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_table_options` + """ + with self._operation_context() as conn: + res = self.dialect.get_multi_table_options( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + return dict(res) + + def get_view_names( + self, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + r"""Return all non-materialized view names in `schema`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + + .. versionchanged:: 2.0 For those dialects that previously included + the names of materialized views in this list (currently PostgreSQL), + this method no longer returns the names of materialized views. + the :meth:`.Inspector.get_materialized_view_names` method should + be used instead. + + .. seealso:: + + :meth:`.Inspector.get_materialized_view_names` + + """ + + with self._operation_context() as conn: + return self.dialect.get_view_names( + conn, schema, info_cache=self.info_cache, **kw + ) + + def get_materialized_view_names( + self, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + r"""Return all materialized view names in `schema`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + .. versionadded:: 2.0 + + .. seealso:: + + :meth:`.Inspector.get_view_names` + + """ + + with self._operation_context() as conn: + return self.dialect.get_materialized_view_names( + conn, schema, info_cache=self.info_cache, **kw + ) + + def get_sequence_names( + self, schema: Optional[str] = None, **kw: Any + ) -> List[str]: + r"""Return all sequence names in `schema`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + """ + + with self._operation_context() as conn: + return self.dialect.get_sequence_names( + conn, schema, info_cache=self.info_cache, **kw + ) + + def get_view_definition( + self, view_name: str, schema: Optional[str] = None, **kw: Any + ) -> str: + r"""Return definition for the plain or materialized view called + ``view_name``. + + :param view_name: Name of the view. + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + """ + + with self._operation_context() as conn: + return self.dialect.get_view_definition( + conn, view_name, schema, info_cache=self.info_cache, **kw + ) + + def get_columns( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> List[ReflectedColumn]: + r"""Return information about columns in ``table_name``. + + Given a string ``table_name`` and an optional string ``schema``, + return column information as a list of :class:`.ReflectedColumn`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: list of dictionaries, each representing the definition of + a database column. + + .. seealso:: :meth:`Inspector.get_multi_columns`. + + """ + + with self._operation_context() as conn: + col_defs = self.dialect.get_columns( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + if col_defs: + self._instantiate_types([col_defs]) + return col_defs + + def _instantiate_types( + self, data: Iterable[List[ReflectedColumn]] + ) -> None: + # make this easy and only return instances for coltype + for col_defs in data: + for col_def in col_defs: + coltype = col_def["type"] + if not isinstance(coltype, TypeEngine): + col_def["type"] = coltype() + + def get_multi_columns( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, List[ReflectedColumn]]: + r"""Return information about columns in all objects in the given + schema. + + The objects can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a list of :class:`.ReflectedColumn`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if columns of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of a database column. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_columns` + """ + + with self._operation_context() as conn: + table_col_defs = dict( + self.dialect.get_multi_columns( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + self._instantiate_types(table_col_defs.values()) + return table_col_defs + + def get_pk_constraint( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> ReflectedPrimaryKeyConstraint: + r"""Return information about primary key constraint in ``table_name``. + + Given a string ``table_name``, and an optional string `schema`, return + primary key information as a :class:`.ReflectedPrimaryKeyConstraint`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary representing the definition of + a primary key constraint. + + .. seealso:: :meth:`Inspector.get_multi_pk_constraint` + """ + with self._operation_context() as conn: + return self.dialect.get_pk_constraint( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_pk_constraint( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, ReflectedPrimaryKeyConstraint]: + r"""Return information about primary key constraints in + all tables in the given schema. + + The tables can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a :class:`.ReflectedPrimaryKeyConstraint`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if primary keys of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are dictionaries, each representing the + definition of a primary key constraint. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_pk_constraint` + """ + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_pk_constraint( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def get_foreign_keys( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> List[ReflectedForeignKeyConstraint]: + r"""Return information about foreign_keys in ``table_name``. + + Given a string ``table_name``, and an optional string `schema`, return + foreign key information as a list of + :class:`.ReflectedForeignKeyConstraint`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a list of dictionaries, each representing the + a foreign key definition. + + .. seealso:: :meth:`Inspector.get_multi_foreign_keys` + """ + + with self._operation_context() as conn: + return self.dialect.get_foreign_keys( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_foreign_keys( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, List[ReflectedForeignKeyConstraint]]: + r"""Return information about foreign_keys in all tables + in the given schema. + + The tables can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a list of + :class:`.ReflectedForeignKeyConstraint`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if foreign keys of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing + a foreign key definition. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_foreign_keys` + """ + + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_foreign_keys( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def get_indexes( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> List[ReflectedIndex]: + r"""Return information about indexes in ``table_name``. + + Given a string ``table_name`` and an optional string `schema`, return + index information as a list of :class:`.ReflectedIndex`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a list of dictionaries, each representing the + definition of an index. + + .. seealso:: :meth:`Inspector.get_multi_indexes` + """ + + with self._operation_context() as conn: + return self.dialect.get_indexes( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_indexes( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, List[ReflectedIndex]]: + r"""Return information about indexes in in all objects + in the given schema. + + The objects can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a list of :class:`.ReflectedIndex`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if indexes of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of an index. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_indexes` + """ + + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_indexes( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def get_unique_constraints( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> List[ReflectedUniqueConstraint]: + r"""Return information about unique constraints in ``table_name``. + + Given a string ``table_name`` and an optional string `schema`, return + unique constraint information as a list of + :class:`.ReflectedUniqueConstraint`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a list of dictionaries, each representing the + definition of an unique constraint. + + .. seealso:: :meth:`Inspector.get_multi_unique_constraints` + """ + + with self._operation_context() as conn: + return self.dialect.get_unique_constraints( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_unique_constraints( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, List[ReflectedUniqueConstraint]]: + r"""Return information about unique constraints in all tables + in the given schema. + + The tables can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a list of + :class:`.ReflectedUniqueConstraint`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if constraints of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of an unique constraint. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_unique_constraints` + """ + + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_unique_constraints( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def get_table_comment( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> ReflectedTableComment: + r"""Return information about the table comment for ``table_name``. + + Given a string ``table_name`` and an optional string ``schema``, + return table comment information as a :class:`.ReflectedTableComment`. + + Raises ``NotImplementedError`` for a dialect that does not support + comments. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary, with the table comment. + + .. versionadded:: 1.2 + + .. seealso:: :meth:`Inspector.get_multi_table_comment` + """ + + with self._operation_context() as conn: + return self.dialect.get_table_comment( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_table_comment( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, ReflectedTableComment]: + r"""Return information about the table comment in all objects + in the given schema. + + The objects can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a :class:`.ReflectedTableComment`. + + Raises ``NotImplementedError`` for a dialect that does not support + comments. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if comments of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are dictionaries, representing the + table comments. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_table_comment` + """ + + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_table_comment( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def get_check_constraints( + self, table_name: str, schema: Optional[str] = None, **kw: Any + ) -> List[ReflectedCheckConstraint]: + r"""Return information about check constraints in ``table_name``. + + Given a string ``table_name`` and an optional string `schema`, return + check constraint information as a list of + :class:`.ReflectedCheckConstraint`. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a list of dictionaries, each representing the + definition of a check constraints. + + .. seealso:: :meth:`Inspector.get_multi_check_constraints` + """ + + with self._operation_context() as conn: + return self.dialect.get_check_constraints( + conn, table_name, schema, info_cache=self.info_cache, **kw + ) + + def get_multi_check_constraints( + self, + schema: Optional[str] = None, + filter_names: Optional[Sequence[str]] = None, + kind: ObjectKind = ObjectKind.TABLE, + scope: ObjectScope = ObjectScope.DEFAULT, + **kw: Any, + ) -> Dict[TableKey, List[ReflectedCheckConstraint]]: + r"""Return information about check constraints in all tables + in the given schema. + + The tables can be filtered by passing the names to use to + ``filter_names``. + + For each table the value is a list of + :class:`.ReflectedCheckConstraint`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :param filter_names: optionally return information only for the + objects listed here. + + :param kind: a :class:`.ObjectKind` that specifies the type of objects + to reflect. Defaults to ``ObjectKind.TABLE``. + + :param scope: a :class:`.ObjectScope` that specifies if constraints of + default, temporary or any tables should be reflected. + Defaults to ``ObjectScope.DEFAULT``. + + :param \**kw: Additional keyword argument to pass to the dialect + specific implementation. See the documentation of the dialect + in use for more information. + + :return: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of a check constraints. + The schema is ``None`` if no schema is provided. + + .. versionadded:: 2.0 + + .. seealso:: :meth:`Inspector.get_check_constraints` + """ + + with self._operation_context() as conn: + return dict( + self.dialect.get_multi_check_constraints( + conn, + schema=schema, + filter_names=filter_names, + kind=kind, + scope=scope, + info_cache=self.info_cache, + **kw, + ) + ) + + def reflect_table( + self, + table: sa_schema.Table, + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str] = (), + resolve_fks: bool = True, + _extend_on: Optional[Set[sa_schema.Table]] = None, + _reflect_info: Optional[_ReflectionInfo] = None, + ) -> None: + """Given a :class:`_schema.Table` object, load its internal + constructs based on introspection. + + This is the underlying method used by most dialects to produce + table reflection. Direct usage is like:: + + from sqlalchemy import create_engine, MetaData, Table + from sqlalchemy import inspect + + engine = create_engine("...") + meta = MetaData() + user_table = Table("user", meta) + insp = inspect(engine) + insp.reflect_table(user_table, None) + + .. versionchanged:: 1.4 Renamed from ``reflecttable`` to + ``reflect_table`` + + :param table: a :class:`~sqlalchemy.schema.Table` instance. + :param include_columns: a list of string column names to include + in the reflection process. If ``None``, all columns are reflected. + + """ + + if _extend_on is not None: + if table in _extend_on: + return + else: + _extend_on.add(table) + + dialect = self.bind.dialect + + with self._operation_context() as conn: + schema = conn.schema_for_object(table) + + table_name = table.name + + # get table-level arguments that are specifically + # intended for reflection, e.g. oracle_resolve_synonyms. + # these are unconditionally passed to related Table + # objects + reflection_options = { + k: table.dialect_kwargs.get(k) + for k in dialect.reflection_options + if k in table.dialect_kwargs + } + + table_key = (schema, table_name) + if _reflect_info is None or table_key not in _reflect_info.columns: + _reflect_info = self._get_reflection_info( + schema, + filter_names=[table_name], + kind=ObjectKind.ANY, + scope=ObjectScope.ANY, + _reflect_info=_reflect_info, + **table.dialect_kwargs, + ) + if table_key in _reflect_info.unreflectable: + raise _reflect_info.unreflectable[table_key] + + if table_key not in _reflect_info.columns: + raise exc.NoSuchTableError(table_name) + + # reflect table options, like mysql_engine + if _reflect_info.table_options: + tbl_opts = _reflect_info.table_options.get(table_key) + if tbl_opts: + # add additional kwargs to the Table if the dialect + # returned them + table._validate_dialect_kwargs(tbl_opts) + + found_table = False + cols_by_orig_name: Dict[str, sa_schema.Column[Any]] = {} + + for col_d in _reflect_info.columns[table_key]: + found_table = True + + self._reflect_column( + table, + col_d, + include_columns, + exclude_columns, + cols_by_orig_name, + ) + + # NOTE: support tables/views with no columns + if not found_table and not self.has_table(table_name, schema): + raise exc.NoSuchTableError(table_name) + + self._reflect_pk( + _reflect_info, table_key, table, cols_by_orig_name, exclude_columns + ) + + self._reflect_fk( + _reflect_info, + table_key, + table, + cols_by_orig_name, + include_columns, + exclude_columns, + resolve_fks, + _extend_on, + reflection_options, + ) + + self._reflect_indexes( + _reflect_info, + table_key, + table, + cols_by_orig_name, + include_columns, + exclude_columns, + reflection_options, + ) + + self._reflect_unique_constraints( + _reflect_info, + table_key, + table, + cols_by_orig_name, + include_columns, + exclude_columns, + reflection_options, + ) + + self._reflect_check_constraints( + _reflect_info, + table_key, + table, + cols_by_orig_name, + include_columns, + exclude_columns, + reflection_options, + ) + + self._reflect_table_comment( + _reflect_info, + table_key, + table, + reflection_options, + ) + + def _reflect_column( + self, + table: sa_schema.Table, + col_d: ReflectedColumn, + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str], + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + ) -> None: + orig_name = col_d["name"] + + table.metadata.dispatch.column_reflect(self, table, col_d) + table.dispatch.column_reflect(self, table, col_d) + + # fetch name again as column_reflect is allowed to + # change it + name = col_d["name"] + if (include_columns and name not in include_columns) or ( + exclude_columns and name in exclude_columns + ): + return + + coltype = col_d["type"] + + col_kw = { + k: col_d[k] # type: ignore[literal-required] + for k in [ + "nullable", + "autoincrement", + "quote", + "info", + "key", + "comment", + ] + if k in col_d + } + + if "dialect_options" in col_d: + col_kw.update(col_d["dialect_options"]) + + colargs = [] + default: Any + if col_d.get("default") is not None: + default_text = col_d["default"] + assert default_text is not None + if isinstance(default_text, TextClause): + default = sa_schema.DefaultClause( + default_text, _reflected=True + ) + elif not isinstance(default_text, sa_schema.FetchedValue): + default = sa_schema.DefaultClause( + sql.text(default_text), _reflected=True + ) + else: + default = default_text + colargs.append(default) + + if "computed" in col_d: + computed = sa_schema.Computed(**col_d["computed"]) + colargs.append(computed) + + if "identity" in col_d: + identity = sa_schema.Identity(**col_d["identity"]) + colargs.append(identity) + + cols_by_orig_name[orig_name] = col = sa_schema.Column( + name, coltype, *colargs, **col_kw + ) + + if col.key in table.primary_key: + col.primary_key = True + table.append_column(col, replace_existing=True) + + def _reflect_pk( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + exclude_columns: Collection[str], + ) -> None: + pk_cons = _reflect_info.pk_constraint.get(table_key) + if pk_cons: + pk_cols = [ + cols_by_orig_name[pk] + for pk in pk_cons["constrained_columns"] + if pk in cols_by_orig_name and pk not in exclude_columns + ] + + # update pk constraint name, comment and dialect_kwargs + table.primary_key.name = pk_cons.get("name") + table.primary_key.comment = pk_cons.get("comment", None) + dialect_options = pk_cons.get("dialect_options") + if dialect_options: + table.primary_key.dialect_kwargs.update(dialect_options) + + # tell the PKConstraint to re-initialize + # its column collection + table.primary_key._reload(pk_cols) + + def _reflect_fk( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str], + resolve_fks: bool, + _extend_on: Optional[Set[sa_schema.Table]], + reflection_options: Dict[str, Any], + ) -> None: + fkeys = _reflect_info.foreign_keys.get(table_key, []) + for fkey_d in fkeys: + conname = fkey_d["name"] + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + constrained_columns = [ + cols_by_orig_name[c].key if c in cols_by_orig_name else c + for c in fkey_d["constrained_columns"] + ] + + if ( + exclude_columns + and set(constrained_columns).intersection(exclude_columns) + or ( + include_columns + and set(constrained_columns).difference(include_columns) + ) + ): + continue + + referred_schema = fkey_d["referred_schema"] + referred_table = fkey_d["referred_table"] + referred_columns = fkey_d["referred_columns"] + refspec = [] + if referred_schema is not None: + if resolve_fks: + sa_schema.Table( + referred_table, + table.metadata, + schema=referred_schema, + autoload_with=self.bind, + _extend_on=_extend_on, + _reflect_info=_reflect_info, + **reflection_options, + ) + for column in referred_columns: + refspec.append( + ".".join([referred_schema, referred_table, column]) + ) + else: + if resolve_fks: + sa_schema.Table( + referred_table, + table.metadata, + autoload_with=self.bind, + schema=sa_schema.BLANK_SCHEMA, + _extend_on=_extend_on, + _reflect_info=_reflect_info, + **reflection_options, + ) + for column in referred_columns: + refspec.append(".".join([referred_table, column])) + if "options" in fkey_d: + options = fkey_d["options"] + else: + options = {} + + try: + table.append_constraint( + sa_schema.ForeignKeyConstraint( + constrained_columns, + refspec, + conname, + link_to_name=True, + comment=fkey_d.get("comment"), + **options, + ) + ) + except exc.ConstraintColumnNotFoundError: + util.warn( + f"On reflected table {table.name}, skipping reflection of " + "foreign key constraint " + f"{conname}; one or more subject columns within " + f"name(s) {', '.join(constrained_columns)} are not " + "present in the table" + ) + + _index_sort_exprs = { + "asc": operators.asc_op, + "desc": operators.desc_op, + "nulls_first": operators.nulls_first_op, + "nulls_last": operators.nulls_last_op, + } + + def _reflect_indexes( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str], + reflection_options: Dict[str, Any], + ) -> None: + # Indexes + indexes = _reflect_info.indexes.get(table_key, []) + for index_d in indexes: + name = index_d["name"] + columns = index_d["column_names"] + expressions = index_d.get("expressions") + column_sorting = index_d.get("column_sorting", {}) + unique = index_d["unique"] + flavor = index_d.get("type", "index") + dialect_options = index_d.get("dialect_options", {}) + + duplicates = index_d.get("duplicates_constraint") + if include_columns and not set(columns).issubset(include_columns): + continue + if duplicates: + continue + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + idx_element: Any + idx_elements = [] + for index, c in enumerate(columns): + if c is None: + if not expressions: + util.warn( + f"Skipping {flavor} {name!r} because key " + f"{index + 1} reflected as None but no " + "'expressions' were returned" + ) + break + idx_element = sql.text(expressions[index]) + else: + try: + if c in cols_by_orig_name: + idx_element = cols_by_orig_name[c] + else: + idx_element = table.c[c] + except KeyError: + util.warn( + f"{flavor} key {c!r} was not located in " + f"columns for table {table.name!r}" + ) + continue + for option in column_sorting.get(c, ()): + if option in self._index_sort_exprs: + op = self._index_sort_exprs[option] + idx_element = op(idx_element) + idx_elements.append(idx_element) + else: + sa_schema.Index( + name, + *idx_elements, + _table=table, + unique=unique, + **dialect_options, + ) + + def _reflect_unique_constraints( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str], + reflection_options: Dict[str, Any], + ) -> None: + constraints = _reflect_info.unique_constraints.get(table_key, []) + # Unique Constraints + for const_d in constraints: + conname = const_d["name"] + columns = const_d["column_names"] + comment = const_d.get("comment") + duplicates = const_d.get("duplicates_index") + dialect_options = const_d.get("dialect_options", {}) + if include_columns and not set(columns).issubset(include_columns): + continue + if duplicates: + continue + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + constrained_cols = [] + for c in columns: + try: + constrained_col = ( + cols_by_orig_name[c] + if c in cols_by_orig_name + else table.c[c] + ) + except KeyError: + util.warn( + "unique constraint key '%s' was not located in " + "columns for table '%s'" % (c, table.name) + ) + else: + constrained_cols.append(constrained_col) + table.append_constraint( + sa_schema.UniqueConstraint( + *constrained_cols, + name=conname, + comment=comment, + **dialect_options, + ) + ) + + def _reflect_check_constraints( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + cols_by_orig_name: Dict[str, sa_schema.Column[Any]], + include_columns: Optional[Collection[str]], + exclude_columns: Collection[str], + reflection_options: Dict[str, Any], + ) -> None: + constraints = _reflect_info.check_constraints.get(table_key, []) + for const_d in constraints: + table.append_constraint(sa_schema.CheckConstraint(**const_d)) + + def _reflect_table_comment( + self, + _reflect_info: _ReflectionInfo, + table_key: TableKey, + table: sa_schema.Table, + reflection_options: Dict[str, Any], + ) -> None: + comment_dict = _reflect_info.table_comment.get(table_key) + if comment_dict: + table.comment = comment_dict["text"] + + def _get_reflection_info( + self, + schema: Optional[str] = None, + filter_names: Optional[Collection[str]] = None, + available: Optional[Collection[str]] = None, + _reflect_info: Optional[_ReflectionInfo] = None, + **kw: Any, + ) -> _ReflectionInfo: + kw["schema"] = schema + + if filter_names and available and len(filter_names) > 100: + fraction = len(filter_names) / len(available) + else: + fraction = None + + unreflectable: Dict[TableKey, exc.UnreflectableTableError] + kw["unreflectable"] = unreflectable = {} + + has_result: bool = True + + def run( + meth: Any, + *, + optional: bool = False, + check_filter_names_from_meth: bool = False, + ) -> Any: + nonlocal has_result + # simple heuristic to improve reflection performance if a + # dialect implements multi_reflection: + # if more than 50% of the tables in the db are in filter_names + # load all the tables, since it's most likely faster to avoid + # a filter on that many tables. + if ( + fraction is None + or fraction <= 0.5 + or not self.dialect._overrides_default(meth.__name__) + ): + _fn = filter_names + else: + _fn = None + try: + if has_result: + res = meth(filter_names=_fn, **kw) + if check_filter_names_from_meth and not res: + # method returned no result data. + # skip any future call methods + has_result = False + else: + res = {} + except NotImplementedError: + if not optional: + raise + res = {} + return res + + info = _ReflectionInfo( + columns=run( + self.get_multi_columns, check_filter_names_from_meth=True + ), + pk_constraint=run(self.get_multi_pk_constraint), + foreign_keys=run(self.get_multi_foreign_keys), + indexes=run(self.get_multi_indexes), + unique_constraints=run( + self.get_multi_unique_constraints, optional=True + ), + table_comment=run(self.get_multi_table_comment, optional=True), + check_constraints=run( + self.get_multi_check_constraints, optional=True + ), + table_options=run(self.get_multi_table_options, optional=True), + unreflectable=unreflectable, + ) + if _reflect_info: + _reflect_info.update(info) + return _reflect_info + else: + return info + + +@final +class ReflectionDefaults: + """provides blank default values for reflection methods.""" + + @classmethod + def columns(cls) -> List[ReflectedColumn]: + return [] + + @classmethod + def pk_constraint(cls) -> ReflectedPrimaryKeyConstraint: + return { + "name": None, + "constrained_columns": [], + } + + @classmethod + def foreign_keys(cls) -> List[ReflectedForeignKeyConstraint]: + return [] + + @classmethod + def indexes(cls) -> List[ReflectedIndex]: + return [] + + @classmethod + def unique_constraints(cls) -> List[ReflectedUniqueConstraint]: + return [] + + @classmethod + def check_constraints(cls) -> List[ReflectedCheckConstraint]: + return [] + + @classmethod + def table_options(cls) -> Dict[str, Any]: + return {} + + @classmethod + def table_comment(cls) -> ReflectedTableComment: + return {"text": None} + + +@dataclass +class _ReflectionInfo: + columns: Dict[TableKey, List[ReflectedColumn]] + pk_constraint: Dict[TableKey, Optional[ReflectedPrimaryKeyConstraint]] + foreign_keys: Dict[TableKey, List[ReflectedForeignKeyConstraint]] + indexes: Dict[TableKey, List[ReflectedIndex]] + # optionals + unique_constraints: Dict[TableKey, List[ReflectedUniqueConstraint]] + table_comment: Dict[TableKey, Optional[ReflectedTableComment]] + check_constraints: Dict[TableKey, List[ReflectedCheckConstraint]] + table_options: Dict[TableKey, Dict[str, Any]] + unreflectable: Dict[TableKey, exc.UnreflectableTableError] + + def update(self, other: _ReflectionInfo) -> None: + for k, v in self.__dict__.items(): + ov = getattr(other, k) + if ov is not None: + if v is None: + setattr(self, k, ov) + else: + v.update(ov) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/result.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/result.py new file mode 100644 index 0000000000000000000000000000000000000000..b84fb3d1cb5e45ffe789b603a6caf5292a3a1bfc --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/result.py @@ -0,0 +1,2387 @@ +# engine/result.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Define generic result set constructs.""" + +from __future__ import annotations + +from enum import Enum +import functools +import itertools +import operator +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .row import Row +from .row import RowMapping +from .. import exc +from .. import util +from ..sql.base import _generative +from ..sql.base import HasMemoized +from ..sql.base import InPlaceGenerative +from ..util import HasMemoized_ro_memoized_attribute +from ..util import NONE_SET +from ..util._has_cy import HAS_CYEXTENSION +from ..util.typing import Literal +from ..util.typing import Self + +if typing.TYPE_CHECKING or not HAS_CYEXTENSION: + from ._py_row import tuplegetter as tuplegetter +else: + from sqlalchemy.cyextension.resultproxy import tuplegetter as tuplegetter + +if typing.TYPE_CHECKING: + from ..sql.elements import SQLCoreOperations + from ..sql.type_api import _ResultProcessorType + +_KeyType = Union[str, "SQLCoreOperations[Any]"] +_KeyIndexType = Union[_KeyType, int] + +# is overridden in cursor using _CursorKeyMapRecType +_KeyMapRecType = Any + +_KeyMapType = Mapping[_KeyType, _KeyMapRecType] + + +_RowData = Union[Row[Any], RowMapping, Any] +"""A generic form of "row" that accommodates for the different kinds of +"rows" that different result objects return, including row, row mapping, and +scalar values""" + +_RawRowType = Tuple[Any, ...] +"""represents the kind of row we get from a DBAPI cursor""" + +_R = TypeVar("_R", bound=_RowData) +_T = TypeVar("_T", bound=Any) +_TP = TypeVar("_TP", bound=Tuple[Any, ...]) + +_InterimRowType = Union[_R, _RawRowType] +"""a catchall "anything" kind of return type that can be applied +across all the result types + +""" + +_InterimSupportsScalarsRowType = Union[Row[Any], Any] + +_ProcessorsType = Sequence[Optional["_ResultProcessorType[Any]"]] +_TupleGetterType = Callable[[Sequence[Any]], Sequence[Any]] +_UniqueFilterType = Callable[[Any], Any] +_UniqueFilterStateType = Tuple[Set[Any], Optional[_UniqueFilterType]] + + +class ResultMetaData: + """Base for metadata about result rows.""" + + __slots__ = () + + _tuplefilter: Optional[_TupleGetterType] = None + _translated_indexes: Optional[Sequence[int]] = None + _unique_filters: Optional[Sequence[Callable[[Any], Any]]] = None + _keymap: _KeyMapType + _keys: Sequence[str] + _processors: Optional[_ProcessorsType] + _key_to_index: Mapping[_KeyType, int] + + @property + def keys(self) -> RMKeyView: + return RMKeyView(self) + + def _has_key(self, key: object) -> bool: + raise NotImplementedError() + + def _for_freeze(self) -> ResultMetaData: + raise NotImplementedError() + + @overload + def _key_fallback( + self, key: Any, err: Optional[Exception], raiseerr: Literal[True] = ... + ) -> NoReturn: ... + + @overload + def _key_fallback( + self, + key: Any, + err: Optional[Exception], + raiseerr: Literal[False] = ..., + ) -> None: ... + + @overload + def _key_fallback( + self, key: Any, err: Optional[Exception], raiseerr: bool = ... + ) -> Optional[NoReturn]: ... + + def _key_fallback( + self, key: Any, err: Optional[Exception], raiseerr: bool = True + ) -> Optional[NoReturn]: + assert raiseerr + raise KeyError(key) from err + + def _raise_for_ambiguous_column_name( + self, rec: _KeyMapRecType + ) -> NoReturn: + raise NotImplementedError( + "ambiguous column name logic is implemented for " + "CursorResultMetaData" + ) + + def _index_for_key( + self, key: _KeyIndexType, raiseerr: bool + ) -> Optional[int]: + raise NotImplementedError() + + def _indexes_for_keys( + self, keys: Sequence[_KeyIndexType] + ) -> Sequence[int]: + raise NotImplementedError() + + def _metadata_for_keys( + self, keys: Sequence[_KeyIndexType] + ) -> Iterator[_KeyMapRecType]: + raise NotImplementedError() + + def _reduce(self, keys: Sequence[_KeyIndexType]) -> ResultMetaData: + raise NotImplementedError() + + def _getter( + self, key: Any, raiseerr: bool = True + ) -> Optional[Callable[[Row[Any]], Any]]: + index = self._index_for_key(key, raiseerr) + + if index is not None: + return operator.itemgetter(index) + else: + return None + + def _row_as_tuple_getter( + self, keys: Sequence[_KeyIndexType] + ) -> _TupleGetterType: + indexes = self._indexes_for_keys(keys) + return tuplegetter(*indexes) + + def _make_key_to_index( + self, keymap: Mapping[_KeyType, Sequence[Any]], index: int + ) -> Mapping[_KeyType, int]: + return { + key: rec[index] + for key, rec in keymap.items() + if rec[index] is not None + } + + def _key_not_found(self, key: Any, attr_error: bool) -> NoReturn: + if key in self._keymap: + # the index must be none in this case + self._raise_for_ambiguous_column_name(self._keymap[key]) + else: + # unknown key + if attr_error: + try: + self._key_fallback(key, None) + except KeyError as ke: + raise AttributeError(ke.args[0]) from ke + else: + self._key_fallback(key, None) + + @property + def _effective_processors(self) -> Optional[_ProcessorsType]: + if not self._processors or NONE_SET.issuperset(self._processors): + return None + else: + return self._processors + + +class RMKeyView(typing.KeysView[Any]): + __slots__ = ("_parent", "_keys") + + _parent: ResultMetaData + _keys: Sequence[str] + + def __init__(self, parent: ResultMetaData): + self._parent = parent + self._keys = [k for k in parent._keys if k is not None] + + def __len__(self) -> int: + return len(self._keys) + + def __repr__(self) -> str: + return "{0.__class__.__name__}({0._keys!r})".format(self) + + def __iter__(self) -> Iterator[str]: + return iter(self._keys) + + def __contains__(self, item: Any) -> bool: + if isinstance(item, int): + return False + + # note this also includes special key fallback behaviors + # which also don't seem to be tested in test_resultset right now + return self._parent._has_key(item) + + def __eq__(self, other: Any) -> bool: + return list(other) == list(self) + + def __ne__(self, other: Any) -> bool: + return list(other) != list(self) + + +class SimpleResultMetaData(ResultMetaData): + """result metadata for in-memory collections.""" + + __slots__ = ( + "_keys", + "_keymap", + "_processors", + "_tuplefilter", + "_translated_indexes", + "_unique_filters", + "_key_to_index", + ) + + _keys: Sequence[str] + + def __init__( + self, + keys: Sequence[str], + extra: Optional[Sequence[Any]] = None, + _processors: Optional[_ProcessorsType] = None, + _tuplefilter: Optional[_TupleGetterType] = None, + _translated_indexes: Optional[Sequence[int]] = None, + _unique_filters: Optional[Sequence[Callable[[Any], Any]]] = None, + ): + self._keys = list(keys) + self._tuplefilter = _tuplefilter + self._translated_indexes = _translated_indexes + self._unique_filters = _unique_filters + if extra: + recs_names = [ + ( + (name,) + (extras if extras else ()), + (index, name, extras), + ) + for index, (name, extras) in enumerate(zip(self._keys, extra)) + ] + else: + recs_names = [ + ((name,), (index, name, ())) + for index, name in enumerate(self._keys) + ] + + self._keymap = {key: rec for keys, rec in recs_names for key in keys} + + self._processors = _processors + + self._key_to_index = self._make_key_to_index(self._keymap, 0) + + def _has_key(self, key: object) -> bool: + return key in self._keymap + + def _for_freeze(self) -> ResultMetaData: + unique_filters = self._unique_filters + if unique_filters and self._tuplefilter: + unique_filters = self._tuplefilter(unique_filters) + + # TODO: are we freezing the result with or without uniqueness + # applied? + return SimpleResultMetaData( + self._keys, + extra=[self._keymap[key][2] for key in self._keys], + _unique_filters=unique_filters, + ) + + def __getstate__(self) -> Dict[str, Any]: + return { + "_keys": self._keys, + "_translated_indexes": self._translated_indexes, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + if state["_translated_indexes"]: + _translated_indexes = state["_translated_indexes"] + _tuplefilter = tuplegetter(*_translated_indexes) + else: + _translated_indexes = _tuplefilter = None + self.__init__( # type: ignore + state["_keys"], + _translated_indexes=_translated_indexes, + _tuplefilter=_tuplefilter, + ) + + def _index_for_key(self, key: Any, raiseerr: bool = True) -> int: + if int in key.__class__.__mro__: + key = self._keys[key] + try: + rec = self._keymap[key] + except KeyError as ke: + rec = self._key_fallback(key, ke, raiseerr) + + return rec[0] # type: ignore[no-any-return] + + def _indexes_for_keys(self, keys: Sequence[Any]) -> Sequence[int]: + return [self._keymap[key][0] for key in keys] + + def _metadata_for_keys( + self, keys: Sequence[Any] + ) -> Iterator[_KeyMapRecType]: + for key in keys: + if int in key.__class__.__mro__: + key = self._keys[key] + + try: + rec = self._keymap[key] + except KeyError as ke: + rec = self._key_fallback(key, ke, True) + + yield rec + + def _reduce(self, keys: Sequence[Any]) -> ResultMetaData: + try: + metadata_for_keys = [ + self._keymap[ + self._keys[key] if int in key.__class__.__mro__ else key + ] + for key in keys + ] + except KeyError as ke: + self._key_fallback(ke.args[0], ke, True) + + indexes: Sequence[int] + new_keys: Sequence[str] + extra: Sequence[Any] + indexes, new_keys, extra = zip(*metadata_for_keys) + + if self._translated_indexes: + indexes = [self._translated_indexes[idx] for idx in indexes] + + tup = tuplegetter(*indexes) + + new_metadata = SimpleResultMetaData( + new_keys, + extra=extra, + _tuplefilter=tup, + _translated_indexes=indexes, + _processors=self._processors, + _unique_filters=self._unique_filters, + ) + + return new_metadata + + +def result_tuple( + fields: Sequence[str], extra: Optional[Any] = None +) -> Callable[[Iterable[Any]], Row[Any]]: + parent = SimpleResultMetaData(fields, extra) + return functools.partial( + Row, parent, parent._effective_processors, parent._key_to_index + ) + + +# a symbol that indicates to internal Result methods that +# "no row is returned". We can't use None for those cases where a scalar +# filter is applied to rows. +class _NoRow(Enum): + _NO_ROW = 0 + + +_NO_ROW = _NoRow._NO_ROW + + +class ResultInternal(InPlaceGenerative, Generic[_R]): + __slots__ = () + + _real_result: Optional[Result[Any]] = None + _generate_rows: bool = True + _row_logging_fn: Optional[Callable[[Any], Any]] + + _unique_filter_state: Optional[_UniqueFilterStateType] = None + _post_creational_filter: Optional[Callable[[Any], Any]] = None + _is_cursor = False + + _metadata: ResultMetaData + + _source_supports_scalars: bool + + def _fetchiter_impl(self) -> Iterator[_InterimRowType[Row[Any]]]: + raise NotImplementedError() + + def _fetchone_impl( + self, hard_close: bool = False + ) -> Optional[_InterimRowType[Row[Any]]]: + raise NotImplementedError() + + def _fetchmany_impl( + self, size: Optional[int] = None + ) -> List[_InterimRowType[Row[Any]]]: + raise NotImplementedError() + + def _fetchall_impl(self) -> List[_InterimRowType[Row[Any]]]: + raise NotImplementedError() + + def _soft_close(self, hard: bool = False) -> None: + raise NotImplementedError() + + @HasMemoized_ro_memoized_attribute + def _row_getter(self) -> Optional[Callable[..., _R]]: + real_result: Result[Any] = ( + self._real_result + if self._real_result + else cast("Result[Any]", self) + ) + + if real_result._source_supports_scalars: + if not self._generate_rows: + return None + else: + _proc = Row + + def process_row( + metadata: ResultMetaData, + processors: Optional[_ProcessorsType], + key_to_index: Mapping[_KeyType, int], + scalar_obj: Any, + ) -> Row[Any]: + return _proc( + metadata, processors, key_to_index, (scalar_obj,) + ) + + else: + process_row = Row # type: ignore + + metadata = self._metadata + + key_to_index = metadata._key_to_index + processors = metadata._effective_processors + tf = metadata._tuplefilter + + if tf and not real_result._source_supports_scalars: + if processors: + processors = tf(processors) + + _make_row_orig: Callable[..., _R] = functools.partial( # type: ignore # noqa E501 + process_row, metadata, processors, key_to_index + ) + + fixed_tf = tf + + def make_row(row: _InterimRowType[Row[Any]]) -> _R: + return _make_row_orig(fixed_tf(row)) + + else: + make_row = functools.partial( # type: ignore + process_row, metadata, processors, key_to_index + ) + + if real_result._row_logging_fn: + _log_row = real_result._row_logging_fn + _make_row = make_row + + def make_row(row: _InterimRowType[Row[Any]]) -> _R: + return _log_row(_make_row(row)) # type: ignore + + return make_row + + @HasMemoized_ro_memoized_attribute + def _iterator_getter(self) -> Callable[..., Iterator[_R]]: + make_row = self._row_getter + + post_creational_filter = self._post_creational_filter + + if self._unique_filter_state: + uniques, strategy = self._unique_strategy + + def iterrows(self: Result[Any]) -> Iterator[_R]: + for raw_row in self._fetchiter_impl(): + obj: _InterimRowType[Any] = ( + make_row(raw_row) if make_row else raw_row + ) + hashed = strategy(obj) if strategy else obj + if hashed in uniques: + continue + uniques.add(hashed) + if post_creational_filter: + obj = post_creational_filter(obj) + yield obj # type: ignore + + else: + + def iterrows(self: Result[Any]) -> Iterator[_R]: + for raw_row in self._fetchiter_impl(): + row: _InterimRowType[Any] = ( + make_row(raw_row) if make_row else raw_row + ) + if post_creational_filter: + row = post_creational_filter(row) + yield row # type: ignore + + return iterrows + + def _raw_all_rows(self) -> List[_R]: + make_row = self._row_getter + assert make_row is not None + rows = self._fetchall_impl() + return [make_row(row) for row in rows] + + def _allrows(self) -> List[_R]: + post_creational_filter = self._post_creational_filter + + make_row = self._row_getter + + rows = self._fetchall_impl() + made_rows: List[_InterimRowType[_R]] + if make_row: + made_rows = [make_row(row) for row in rows] + else: + made_rows = rows # type: ignore + + interim_rows: List[_R] + + if self._unique_filter_state: + uniques, strategy = self._unique_strategy + + interim_rows = [ + made_row # type: ignore + for made_row, sig_row in [ + ( + made_row, + strategy(made_row) if strategy else made_row, + ) + for made_row in made_rows + ] + if sig_row not in uniques and not uniques.add(sig_row) # type: ignore # noqa: E501 + ] + else: + interim_rows = made_rows # type: ignore + + if post_creational_filter: + interim_rows = [ + post_creational_filter(row) for row in interim_rows + ] + return interim_rows + + @HasMemoized_ro_memoized_attribute + def _onerow_getter( + self, + ) -> Callable[..., Union[Literal[_NoRow._NO_ROW], _R]]: + make_row = self._row_getter + + post_creational_filter = self._post_creational_filter + + if self._unique_filter_state: + uniques, strategy = self._unique_strategy + + def onerow(self: Result[Any]) -> Union[_NoRow, _R]: + _onerow = self._fetchone_impl + while True: + row = _onerow() + if row is None: + return _NO_ROW + else: + obj: _InterimRowType[Any] = ( + make_row(row) if make_row else row + ) + hashed = strategy(obj) if strategy else obj + if hashed in uniques: + continue + else: + uniques.add(hashed) + if post_creational_filter: + obj = post_creational_filter(obj) + return obj # type: ignore + + else: + + def onerow(self: Result[Any]) -> Union[_NoRow, _R]: + row = self._fetchone_impl() + if row is None: + return _NO_ROW + else: + interim_row: _InterimRowType[Any] = ( + make_row(row) if make_row else row + ) + if post_creational_filter: + interim_row = post_creational_filter(interim_row) + return interim_row # type: ignore + + return onerow + + @HasMemoized_ro_memoized_attribute + def _manyrow_getter(self) -> Callable[..., List[_R]]: + make_row = self._row_getter + + post_creational_filter = self._post_creational_filter + + if self._unique_filter_state: + uniques, strategy = self._unique_strategy + + def filterrows( + make_row: Optional[Callable[..., _R]], + rows: List[Any], + strategy: Optional[Callable[[List[Any]], Any]], + uniques: Set[Any], + ) -> List[_R]: + if make_row: + rows = [make_row(row) for row in rows] + + if strategy: + made_rows = ( + (made_row, strategy(made_row)) for made_row in rows + ) + else: + made_rows = ((made_row, made_row) for made_row in rows) + return [ + made_row + for made_row, sig_row in made_rows + if sig_row not in uniques and not uniques.add(sig_row) # type: ignore # noqa: E501 + ] + + def manyrows( + self: ResultInternal[_R], num: Optional[int] + ) -> List[_R]: + collect: List[_R] = [] + + _manyrows = self._fetchmany_impl + + if num is None: + # if None is passed, we don't know the default + # manyrows number, DBAPI has this as cursor.arraysize + # different DBAPIs / fetch strategies may be different. + # do a fetch to find what the number is. if there are + # only fewer rows left, then it doesn't matter. + real_result = ( + self._real_result + if self._real_result + else cast("Result[Any]", self) + ) + if real_result._yield_per: + num_required = num = real_result._yield_per + else: + rows = _manyrows(num) + num = len(rows) + assert make_row is not None + collect.extend( + filterrows(make_row, rows, strategy, uniques) + ) + num_required = num - len(collect) + else: + num_required = num + + assert num is not None + + while num_required: + rows = _manyrows(num_required) + if not rows: + break + + collect.extend( + filterrows(make_row, rows, strategy, uniques) + ) + num_required = num - len(collect) + + if post_creational_filter: + collect = [post_creational_filter(row) for row in collect] + return collect + + else: + + def manyrows( + self: ResultInternal[_R], num: Optional[int] + ) -> List[_R]: + if num is None: + real_result = ( + self._real_result + if self._real_result + else cast("Result[Any]", self) + ) + num = real_result._yield_per + + rows: List[_InterimRowType[Any]] = self._fetchmany_impl(num) + if make_row: + rows = [make_row(row) for row in rows] + if post_creational_filter: + rows = [post_creational_filter(row) for row in rows] + return rows # type: ignore + + return manyrows + + @overload + def _only_one_row( + self: ResultInternal[Row[Any]], + raise_for_second_row: bool, + raise_for_none: bool, + scalar: Literal[True], + ) -> Any: ... + + @overload + def _only_one_row( + self, + raise_for_second_row: bool, + raise_for_none: Literal[True], + scalar: bool, + ) -> _R: ... + + @overload + def _only_one_row( + self, + raise_for_second_row: bool, + raise_for_none: bool, + scalar: bool, + ) -> Optional[_R]: ... + + def _only_one_row( + self, + raise_for_second_row: bool, + raise_for_none: bool, + scalar: bool, + ) -> Optional[_R]: + onerow = self._fetchone_impl + + row: Optional[_InterimRowType[Any]] = onerow(hard_close=True) + if row is None: + if raise_for_none: + raise exc.NoResultFound( + "No row was found when one was required" + ) + else: + return None + + if scalar and self._source_supports_scalars: + self._generate_rows = False + make_row = None + else: + make_row = self._row_getter + + try: + row = make_row(row) if make_row else row + except: + self._soft_close(hard=True) + raise + + if raise_for_second_row: + if self._unique_filter_state: + # for no second row but uniqueness, need to essentially + # consume the entire result :( + uniques, strategy = self._unique_strategy + + existing_row_hash = strategy(row) if strategy else row + + while True: + next_row: Any = onerow(hard_close=True) + if next_row is None: + next_row = _NO_ROW + break + + try: + next_row = make_row(next_row) if make_row else next_row + + if strategy: + assert next_row is not _NO_ROW + if existing_row_hash == strategy(next_row): + continue + elif row == next_row: + continue + # here, we have a row and it's different + break + except: + self._soft_close(hard=True) + raise + else: + next_row = onerow(hard_close=True) + if next_row is None: + next_row = _NO_ROW + + if next_row is not _NO_ROW: + self._soft_close(hard=True) + raise exc.MultipleResultsFound( + "Multiple rows were found when exactly one was required" + if raise_for_none + else "Multiple rows were found when one or none " + "was required" + ) + else: + # if we checked for second row then that would have + # closed us :) + self._soft_close(hard=True) + + if not scalar: + post_creational_filter = self._post_creational_filter + if post_creational_filter: + row = post_creational_filter(row) + + if scalar and make_row: + return row[0] # type: ignore + else: + return row # type: ignore + + def _iter_impl(self) -> Iterator[_R]: + return self._iterator_getter(self) + + def _next_impl(self) -> _R: + row = self._onerow_getter(self) + if row is _NO_ROW: + raise StopIteration() + else: + return row + + @_generative + def _column_slices(self, indexes: Sequence[_KeyIndexType]) -> Self: + real_result = ( + self._real_result + if self._real_result + else cast("Result[Any]", self) + ) + + if not real_result._source_supports_scalars or len(indexes) != 1: + self._metadata = self._metadata._reduce(indexes) + + assert self._generate_rows + + return self + + @HasMemoized.memoized_attribute + def _unique_strategy(self) -> _UniqueFilterStateType: + assert self._unique_filter_state is not None + uniques, strategy = self._unique_filter_state + + real_result = ( + self._real_result + if self._real_result is not None + else cast("Result[Any]", self) + ) + + if not strategy and self._metadata._unique_filters: + if ( + real_result._source_supports_scalars + and not self._generate_rows + ): + strategy = self._metadata._unique_filters[0] + else: + filters = self._metadata._unique_filters + if self._metadata._tuplefilter: + filters = self._metadata._tuplefilter(filters) + + strategy = operator.methodcaller("_filter_on_values", filters) + return uniques, strategy + + +class _WithKeys: + __slots__ = () + + _metadata: ResultMetaData + + # used mainly to share documentation on the keys method. + def keys(self) -> RMKeyView: + """Return an iterable view which yields the string keys that would + be represented by each :class:`_engine.Row`. + + The keys can represent the labels of the columns returned by a core + statement or the names of the orm classes returned by an orm + execution. + + The view also can be tested for key containment using the Python + ``in`` operator, which will test both for the string keys represented + in the view, as well as for alternate keys such as column objects. + + .. versionchanged:: 1.4 a key view object is returned rather than a + plain list. + + + """ + return self._metadata.keys + + +class Result(_WithKeys, ResultInternal[Row[_TP]]): + """Represent a set of database results. + + .. versionadded:: 1.4 The :class:`_engine.Result` object provides a + completely updated usage model and calling facade for SQLAlchemy + Core and SQLAlchemy ORM. In Core, it forms the basis of the + :class:`_engine.CursorResult` object which replaces the previous + :class:`_engine.ResultProxy` interface. When using the ORM, a + higher level object called :class:`_engine.ChunkedIteratorResult` + is normally used. + + .. note:: In SQLAlchemy 1.4 and above, this object is + used for ORM results returned by :meth:`_orm.Session.execute`, which can + yield instances of ORM mapped objects either individually or within + tuple-like rows. Note that the :class:`_engine.Result` object does not + deduplicate instances or rows automatically as is the case with the + legacy :class:`_orm.Query` object. For in-Python de-duplication of + instances or rows, use the :meth:`_engine.Result.unique` modifier + method. + + .. seealso:: + + :ref:`tutorial_fetching_rows` - in the :doc:`/tutorial/index` + + """ + + __slots__ = ("_metadata", "__dict__") + + _row_logging_fn: Optional[Callable[[Row[Any]], Row[Any]]] = None + + _source_supports_scalars: bool = False + + _yield_per: Optional[int] = None + + _attributes: util.immutabledict[Any, Any] = util.immutabledict() + + def __init__(self, cursor_metadata: ResultMetaData): + self._metadata = cursor_metadata + + def __enter__(self) -> Self: + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + self.close() + + def close(self) -> None: + """close this :class:`_engine.Result`. + + The behavior of this method is implementation specific, and is + not implemented by default. The method should generally end + the resources in use by the result object and also cause any + subsequent iteration or row fetching to raise + :class:`.ResourceClosedError`. + + .. versionadded:: 1.4.27 - ``.close()`` was previously not generally + available for all :class:`_engine.Result` classes, instead only + being available on the :class:`_engine.CursorResult` returned for + Core statement executions. As most other result objects, namely the + ones used by the ORM, are proxying a :class:`_engine.CursorResult` + in any case, this allows the underlying cursor result to be closed + from the outside facade for the case when the ORM query is using + the ``yield_per`` execution option where it does not immediately + exhaust and autoclose the database cursor. + + """ + self._soft_close(hard=True) + + @property + def _soft_closed(self) -> bool: + raise NotImplementedError() + + @property + def closed(self) -> bool: + """return ``True`` if this :class:`_engine.Result` reports .closed + + .. versionadded:: 1.4.43 + + """ + raise NotImplementedError() + + @_generative + def yield_per(self, num: int) -> Self: + """Configure the row-fetching strategy to fetch ``num`` rows at a time. + + This impacts the underlying behavior of the result when iterating over + the result object, or otherwise making use of methods such as + :meth:`_engine.Result.fetchone` that return one row at a time. Data + from the underlying cursor or other data source will be buffered up to + this many rows in memory, and the buffered collection will then be + yielded out one row at a time or as many rows are requested. Each time + the buffer clears, it will be refreshed to this many rows or as many + rows remain if fewer remain. + + The :meth:`_engine.Result.yield_per` method is generally used in + conjunction with the + :paramref:`_engine.Connection.execution_options.stream_results` + execution option, which will allow the database dialect in use to make + use of a server side cursor, if the DBAPI supports a specific "server + side cursor" mode separate from its default mode of operation. + + .. tip:: + + Consider using the + :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which will simultaneously set + :paramref:`_engine.Connection.execution_options.stream_results` + to ensure the use of server side cursors, as well as automatically + invoke the :meth:`_engine.Result.yield_per` method to establish + a fixed row buffer size at once. + + The :paramref:`_engine.Connection.execution_options.yield_per` + execution option is available for ORM operations, with + :class:`_orm.Session`-oriented use described at + :ref:`orm_queryguide_yield_per`. The Core-only version which works + with :class:`_engine.Connection` is new as of SQLAlchemy 1.4.40. + + .. versionadded:: 1.4 + + :param num: number of rows to fetch each time the buffer is refilled. + If set to a value below 1, fetches all rows for the next buffer. + + .. seealso:: + + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ + self._yield_per = num + return self + + @_generative + def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_engine.Result`. + + When this filter is applied with no arguments, the rows or objects + returned will filtered such that each row is returned uniquely. The + algorithm used to determine this uniqueness is by default the Python + hashing identity of the whole tuple. In some cases a specialized + per-entity hashing scheme may be used, such as when using the ORM, a + scheme is applied which works against the primary key identity of + returned objects. + + The unique filter is applied **after all other filters**, which means + if the columns returned have been refined using a method such as the + :meth:`_engine.Result.columns` or :meth:`_engine.Result.scalars` + method, the uniquing is applied to **only the column or columns + returned**. This occurs regardless of the order in which these + methods have been called upon the :class:`_engine.Result` object. + + The unique filter also changes the calculus used for methods like + :meth:`_engine.Result.fetchmany` and :meth:`_engine.Result.partitions`. + When using :meth:`_engine.Result.unique`, these methods will continue + to yield the number of rows or objects requested, after uniquing + has been applied. However, this necessarily impacts the buffering + behavior of the underlying cursor or datasource, such that multiple + underlying calls to ``cursor.fetchmany()`` may be necessary in order + to accumulate enough objects in order to provide a unique collection + of the requested size. + + :param strategy: a callable that will be applied to rows or objects + being iterated, which should return an object that represents the + unique value of the row. A Python ``set()`` is used to store + these identities. If not passed, a default uniqueness strategy + is used which may have been assembled by the source of this + :class:`_engine.Result` object. + + """ + self._unique_filter_state = (set(), strategy) + return self + + def columns(self, *col_expressions: _KeyIndexType) -> Self: + r"""Establish the columns that should be returned in each row. + + This method may be used to limit the columns returned as well + as to reorder them. The given list of expressions are normally + a series of integers or string key names. They may also be + appropriate :class:`.ColumnElement` objects which correspond to + a given statement construct. + + .. versionchanged:: 2.0 Due to a bug in 1.4, the + :meth:`_engine.Result.columns` method had an incorrect behavior + where calling upon the method with just one index would cause the + :class:`_engine.Result` object to yield scalar values rather than + :class:`_engine.Row` objects. In version 2.0, this behavior + has been corrected such that calling upon + :meth:`_engine.Result.columns` with a single index will + produce a :class:`_engine.Result` object that continues + to yield :class:`_engine.Row` objects, which include + only a single column. + + E.g.:: + + statement = select(table.c.x, table.c.y, table.c.z) + result = connection.execute(statement) + + for z, y in result.columns("z", "y"): + ... + + Example of using the column objects from the statement itself:: + + for z, y in result.columns( + statement.selected_columns.c.z, statement.selected_columns.c.y + ): + ... + + .. versionadded:: 1.4 + + :param \*col_expressions: indicates columns to be returned. Elements + may be integer row indexes, string column names, or appropriate + :class:`.ColumnElement` objects corresponding to a select construct. + + :return: this :class:`_engine.Result` object with the modifications + given. + + """ + return self._column_slices(col_expressions) + + @overload + def scalars(self: Result[Tuple[_T]]) -> ScalarResult[_T]: ... + + @overload + def scalars( + self: Result[Tuple[_T]], index: Literal[0] + ) -> ScalarResult[_T]: ... + + @overload + def scalars(self, index: _KeyIndexType = 0) -> ScalarResult[Any]: ... + + def scalars(self, index: _KeyIndexType = 0) -> ScalarResult[Any]: + """Return a :class:`_engine.ScalarResult` filtering object which + will return single elements rather than :class:`_row.Row` objects. + + E.g.:: + + >>> result = conn.execute(text("select int_id from table")) + >>> result.scalars().all() + [1, 2, 3] + + When results are fetched from the :class:`_engine.ScalarResult` + filtering object, the single column-row that would be returned by the + :class:`_engine.Result` is instead returned as the column's value. + + .. versionadded:: 1.4 + + :param index: integer or row key indicating the column to be fetched + from each row, defaults to ``0`` indicating the first column. + + :return: a new :class:`_engine.ScalarResult` filtering object referring + to this :class:`_engine.Result` object. + + """ + return ScalarResult(self, index) + + def _getter( + self, key: _KeyIndexType, raiseerr: bool = True + ) -> Optional[Callable[[Row[Any]], Any]]: + """return a callable that will retrieve the given key from a + :class:`_engine.Row`. + + """ + if self._source_supports_scalars: + raise NotImplementedError( + "can't use this function in 'only scalars' mode" + ) + return self._metadata._getter(key, raiseerr) + + def _tuple_getter(self, keys: Sequence[_KeyIndexType]) -> _TupleGetterType: + """return a callable that will retrieve the given keys from a + :class:`_engine.Row`. + + """ + if self._source_supports_scalars: + raise NotImplementedError( + "can't use this function in 'only scalars' mode" + ) + return self._metadata._row_as_tuple_getter(keys) + + def mappings(self) -> MappingResult: + """Apply a mappings filter to returned rows, returning an instance of + :class:`_engine.MappingResult`. + + When this filter is applied, fetching rows will return + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. + + .. versionadded:: 1.4 + + :return: a new :class:`_engine.MappingResult` filtering object + referring to this :class:`_engine.Result` object. + + """ + + return MappingResult(self) + + @property + def t(self) -> TupleResult[_TP]: + """Apply a "typed tuple" typing filter to returned rows. + + The :attr:`_engine.Result.t` attribute is a synonym for + calling the :meth:`_engine.Result.tuples` method. + + .. versionadded:: 2.0 + + """ + return self # type: ignore + + def tuples(self) -> TupleResult[_TP]: + """Apply a "typed tuple" typing filter to returned rows. + + This method returns the same :class:`_engine.Result` object + at runtime, + however annotates as returning a :class:`_engine.TupleResult` object + that will indicate to :pep:`484` typing tools that plain typed + ``Tuple`` instances are returned rather than rows. This allows + tuple unpacking and ``__getitem__`` access of :class:`_engine.Row` + objects to by typed, for those cases where the statement invoked + itself included typing information. + + .. versionadded:: 2.0 + + :return: the :class:`_engine.TupleResult` type at typing time. + + .. seealso:: + + :attr:`_engine.Result.t` - shorter synonym + + :attr:`_engine.Row._t` - :class:`_engine.Row` version + + """ + + return self # type: ignore + + def _raw_row_iterator(self) -> Iterator[_RowData]: + """Return a safe iterator that yields raw row data. + + This is used by the :meth:`_engine.Result.merge` method + to merge multiple compatible results together. + + """ + raise NotImplementedError() + + def __iter__(self) -> Iterator[Row[_TP]]: + return self._iter_impl() + + def __next__(self) -> Row[_TP]: + return self._next_impl() + + def partitions( + self, size: Optional[int] = None + ) -> Iterator[Sequence[Row[_TP]]]: + """Iterate through sub-lists of rows of the size given. + + Each list will be of the size given, excluding the last list to + be yielded, which may have a small number of rows. No empty + lists will be yielded. + + The result object is automatically closed when the iterator + is fully consumed. + + Note that the backend driver will usually buffer the entire result + ahead of time unless the + :paramref:`.Connection.execution_options.stream_results` execution + option is used indicating that the driver should not pre-buffer + results, if possible. Not all drivers support this option and + the option is silently ignored for those who do not. + + When using the ORM, the :meth:`_engine.Result.partitions` method + is typically more effective from a memory perspective when it is + combined with use of the + :ref:`yield_per execution option `, + which instructs both the DBAPI driver to use server side cursors, + if available, as well as instructs the ORM loading internals to only + build a certain amount of ORM objects from a result at a time before + yielding them out. + + .. versionadded:: 1.4 + + :param size: indicate the maximum number of rows to be present + in each list yielded. If None, makes use of the value set by + the :meth:`_engine.Result.yield_per`, method, if it were called, + or the :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which is equivalent in this regard. If + yield_per weren't set, it makes use of the + :meth:`_engine.Result.fetchmany` default, which may be backend + specific and not well defined. + + :return: iterator of lists + + .. seealso:: + + :ref:`engine_stream_results` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ + + getter = self._manyrow_getter + + while True: + partition = getter(self, size) + if partition: + yield partition + else: + break + + def fetchall(self) -> Sequence[Row[_TP]]: + """A synonym for the :meth:`_engine.Result.all` method.""" + + return self._allrows() + + def fetchone(self) -> Optional[Row[_TP]]: + """Fetch one row. + + When all rows are exhausted, returns None. + + This method is provided for backwards compatibility with + SQLAlchemy 1.x.x. + + To fetch the first row of a result only, use the + :meth:`_engine.Result.first` method. To iterate through all + rows, iterate the :class:`_engine.Result` object directly. + + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. + + """ + row = self._onerow_getter(self) + if row is _NO_ROW: + return None + else: + return row + + def fetchmany(self, size: Optional[int] = None) -> Sequence[Row[_TP]]: + """Fetch many rows. + + When all rows are exhausted, returns an empty sequence. + + This method is provided for backwards compatibility with + SQLAlchemy 1.x.x. + + To fetch rows in groups, use the :meth:`_engine.Result.partitions` + method. + + :return: a sequence of :class:`_engine.Row` objects. + + .. seealso:: + + :meth:`_engine.Result.partitions` + + """ + + return self._manyrow_getter(self, size) + + def all(self) -> Sequence[Row[_TP]]: + """Return all rows in a sequence. + + Closes the result set after invocation. Subsequent invocations + will return an empty sequence. + + .. versionadded:: 1.4 + + :return: a sequence of :class:`_engine.Row` objects. + + .. seealso:: + + :ref:`engine_stream_results` - How to stream a large result set + without loading it completely in python. + + """ + + return self._allrows() + + def first(self) -> Optional[Row[_TP]]: + """Fetch the first row or ``None`` if no row is present. + + Closes the result set and discards remaining rows. + + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_engine.Result.scalar` method, + or combine :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.first`. + + Additionally, in contrast to the behavior of the legacy ORM + :meth:`_orm.Query.first` method, **no limit is applied** to the + SQL query which was invoked to produce this + :class:`_engine.Result`; + for a DBAPI driver that buffers results in memory before yielding + rows, all rows will be sent to the Python process and all but + the first row will be discarded. + + .. seealso:: + + :ref:`migration_20_unify_select` + + :return: a :class:`_engine.Row` object, or None + if no rows remain. + + .. seealso:: + + :meth:`_engine.Result.scalar` + + :meth:`_engine.Result.one` + + """ + + return self._only_one_row( + raise_for_second_row=False, raise_for_none=False, scalar=False + ) + + def one_or_none(self) -> Optional[Row[_TP]]: + """Return at most one result or raise an exception. + + Returns ``None`` if the result has no rows. + Raises :class:`.MultipleResultsFound` + if multiple rows are returned. + + .. versionadded:: 1.4 + + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. + + :raises: :class:`.MultipleResultsFound` + + .. seealso:: + + :meth:`_engine.Result.first` + + :meth:`_engine.Result.one` + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=False, scalar=False + ) + + @overload + def scalar_one(self: Result[Tuple[_T]]) -> _T: ... + + @overload + def scalar_one(self) -> Any: ... + + def scalar_one(self) -> Any: + """Return exactly one scalar result or raise an exception. + + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.ScalarResult.one`. + + .. seealso:: + + :meth:`_engine.ScalarResult.one` + + :meth:`_engine.Result.scalars` + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=True, scalar=True + ) + + @overload + def scalar_one_or_none(self: Result[Tuple[_T]]) -> Optional[_T]: ... + + @overload + def scalar_one_or_none(self) -> Optional[Any]: ... + + def scalar_one_or_none(self) -> Optional[Any]: + """Return exactly one scalar result or ``None``. + + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.ScalarResult.one_or_none`. + + .. seealso:: + + :meth:`_engine.ScalarResult.one_or_none` + + :meth:`_engine.Result.scalars` + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=False, scalar=True + ) + + def one(self) -> Row[_TP]: + """Return exactly one row or raise an exception. + + Raises :class:`_exc.NoResultFound` if the result returns no + rows, or :class:`_exc.MultipleResultsFound` if multiple rows + would be returned. + + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_engine.Result.scalar_one` method, or combine + :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.one`. + + .. versionadded:: 1.4 + + :return: The first :class:`_engine.Row`. + + :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` + + .. seealso:: + + :meth:`_engine.Result.first` + + :meth:`_engine.Result.one_or_none` + + :meth:`_engine.Result.scalar_one` + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=True, scalar=False + ) + + @overload + def scalar(self: Result[Tuple[_T]]) -> Optional[_T]: ... + + @overload + def scalar(self) -> Any: ... + + def scalar(self) -> Any: + """Fetch the first column of the first row, and close the result set. + + Returns ``None`` if there are no rows to fetch. + + No validation is performed to test if additional rows remain. + + After calling this method, the object is fully closed, + e.g. the :meth:`_engine.CursorResult.close` + method will have been called. + + :return: a Python scalar value, or ``None`` if no rows remain. + + """ + return self._only_one_row( + raise_for_second_row=False, raise_for_none=False, scalar=True + ) + + def freeze(self) -> FrozenResult[_TP]: + """Return a callable object that will produce copies of this + :class:`_engine.Result` when invoked. + + The callable object returned is an instance of + :class:`_engine.FrozenResult`. + + This is used for result set caching. The method must be called + on the result when it has been unconsumed, and calling the method + will consume the result fully. When the :class:`_engine.FrozenResult` + is retrieved from a cache, it can be called any number of times where + it will produce a new :class:`_engine.Result` object each time + against its stored set of rows. + + .. seealso:: + + :ref:`do_orm_execute_re_executing` - example usage within the + ORM to implement a result-set cache. + + """ + + return FrozenResult(self) + + def merge(self, *others: Result[Any]) -> MergedResult[_TP]: + """Merge this :class:`_engine.Result` with other compatible result + objects. + + The object returned is an instance of :class:`_engine.MergedResult`, + which will be composed of iterators from the given result + objects. + + The new result will use the metadata from this result object. + The subsequent result objects must be against an identical + set of result / cursor metadata, otherwise the behavior is + undefined. + + """ + return MergedResult(self._metadata, (self,) + others) + + +class FilterResult(ResultInternal[_R]): + """A wrapper for a :class:`_engine.Result` that returns objects other than + :class:`_engine.Row` objects, such as dictionaries or scalar objects. + + :class:`_engine.FilterResult` is the common base for additional result + APIs including :class:`_engine.MappingResult`, + :class:`_engine.ScalarResult` and :class:`_engine.AsyncResult`. + + """ + + __slots__ = ( + "_real_result", + "_post_creational_filter", + "_metadata", + "_unique_filter_state", + "__dict__", + ) + + _post_creational_filter: Optional[Callable[[Any], Any]] + + _real_result: Result[Any] + + def __enter__(self) -> Self: + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + self._real_result.__exit__(type_, value, traceback) + + @_generative + def yield_per(self, num: int) -> Self: + """Configure the row-fetching strategy to fetch ``num`` rows at a time. + + The :meth:`_engine.FilterResult.yield_per` method is a pass through + to the :meth:`_engine.Result.yield_per` method. See that method's + documentation for usage notes. + + .. versionadded:: 1.4.40 - added :meth:`_engine.FilterResult.yield_per` + so that the method is available on all result set implementations + + .. seealso:: + + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ + self._real_result = self._real_result.yield_per(num) + return self + + def _soft_close(self, hard: bool = False) -> None: + self._real_result._soft_close(hard=hard) + + @property + def _soft_closed(self) -> bool: + return self._real_result._soft_closed + + @property + def closed(self) -> bool: + """Return ``True`` if the underlying :class:`_engine.Result` reports + closed + + .. versionadded:: 1.4.43 + + """ + return self._real_result.closed + + def close(self) -> None: + """Close this :class:`_engine.FilterResult`. + + .. versionadded:: 1.4.43 + + """ + self._real_result.close() + + @property + def _attributes(self) -> Dict[Any, Any]: + return self._real_result._attributes + + def _fetchiter_impl(self) -> Iterator[_InterimRowType[Row[Any]]]: + return self._real_result._fetchiter_impl() + + def _fetchone_impl( + self, hard_close: bool = False + ) -> Optional[_InterimRowType[Row[Any]]]: + return self._real_result._fetchone_impl(hard_close=hard_close) + + def _fetchall_impl(self) -> List[_InterimRowType[Row[Any]]]: + return self._real_result._fetchall_impl() + + def _fetchmany_impl( + self, size: Optional[int] = None + ) -> List[_InterimRowType[Row[Any]]]: + return self._real_result._fetchmany_impl(size=size) + + +class ScalarResult(FilterResult[_R]): + """A wrapper for a :class:`_engine.Result` that returns scalar values + rather than :class:`_row.Row` values. + + The :class:`_engine.ScalarResult` object is acquired by calling the + :meth:`_engine.Result.scalars` method. + + A special limitation of :class:`_engine.ScalarResult` is that it has + no ``fetchone()`` method; since the semantics of ``fetchone()`` are that + the ``None`` value indicates no more results, this is not compatible + with :class:`_engine.ScalarResult` since there is no way to distinguish + between ``None`` as a row value versus ``None`` as an indicator. Use + ``next(result)`` to receive values individually. + + """ + + __slots__ = () + + _generate_rows = False + + _post_creational_filter: Optional[Callable[[Any], Any]] + + def __init__(self, real_result: Result[Any], index: _KeyIndexType): + self._real_result = real_result + + if real_result._source_supports_scalars: + self._metadata = real_result._metadata + self._post_creational_filter = None + else: + self._metadata = real_result._metadata._reduce([index]) + self._post_creational_filter = operator.itemgetter(0) + + self._unique_filter_state = real_result._unique_filter_state + + def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_engine.ScalarResult`. + + See :meth:`_engine.Result.unique` for usage details. + + """ + self._unique_filter_state = (set(), strategy) + return self + + def partitions(self, size: Optional[int] = None) -> Iterator[Sequence[_R]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_engine.Result.partitions` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + + getter = self._manyrow_getter + + while True: + partition = getter(self, size) + if partition: + yield partition + else: + break + + def fetchall(self) -> Sequence[_R]: + """A synonym for the :meth:`_engine.ScalarResult.all` method.""" + + return self._allrows() + + def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]: + """Fetch many objects. + + Equivalent to :meth:`_engine.Result.fetchmany` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return self._manyrow_getter(self, size) + + def all(self) -> Sequence[_R]: + """Return all scalar values in a sequence. + + Equivalent to :meth:`_engine.Result.all` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return self._allrows() + + def __iter__(self) -> Iterator[_R]: + return self._iter_impl() + + def __next__(self) -> _R: + return self._next_impl() + + def first(self) -> Optional[_R]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_engine.Result.first` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + + """ + return self._only_one_row( + raise_for_second_row=False, raise_for_none=False, scalar=False + ) + + def one_or_none(self) -> Optional[_R]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one_or_none` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=False, scalar=False + ) + + def one(self) -> _R: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=True, scalar=False + ) + + +class TupleResult(FilterResult[_R], util.TypingOnly): + """A :class:`_engine.Result` that's typed as returning plain + Python tuples instead of rows. + + Since :class:`_engine.Row` acts like a tuple in every way already, + this class is a typing only class, regular :class:`_engine.Result` is + still used at runtime. + + """ + + __slots__ = () + + if TYPE_CHECKING: + + def partitions( + self, size: Optional[int] = None + ) -> Iterator[Sequence[_R]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_engine.Result.partitions` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + def fetchone(self) -> Optional[_R]: + """Fetch one tuple. + + Equivalent to :meth:`_engine.Result.fetchone` except that + tuple values, rather than :class:`_engine.Row` + objects, are returned. + + """ + ... + + def fetchall(self) -> Sequence[_R]: + """A synonym for the :meth:`_engine.ScalarResult.all` method.""" + ... + + def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]: + """Fetch many objects. + + Equivalent to :meth:`_engine.Result.fetchmany` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + def all(self) -> Sequence[_R]: # noqa: A001 + """Return all scalar values in a sequence. + + Equivalent to :meth:`_engine.Result.all` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + def __iter__(self) -> Iterator[_R]: ... + + def __next__(self) -> _R: ... + + def first(self) -> Optional[_R]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_engine.Result.first` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + + """ + ... + + def one_or_none(self) -> Optional[_R]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one_or_none` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + def one(self) -> _R: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + @overload + def scalar_one(self: TupleResult[Tuple[_T]]) -> _T: ... + + @overload + def scalar_one(self) -> Any: ... + + def scalar_one(self) -> Any: + """Return exactly one scalar result or raise an exception. + + This is equivalent to calling :meth:`_engine.Result.scalars` + and then :meth:`_engine.ScalarResult.one`. + + .. seealso:: + + :meth:`_engine.ScalarResult.one` + + :meth:`_engine.Result.scalars` + + """ + ... + + @overload + def scalar_one_or_none( + self: TupleResult[Tuple[_T]], + ) -> Optional[_T]: ... + + @overload + def scalar_one_or_none(self) -> Optional[Any]: ... + + def scalar_one_or_none(self) -> Optional[Any]: + """Return exactly one or no scalar result. + + This is equivalent to calling :meth:`_engine.Result.scalars` + and then :meth:`_engine.ScalarResult.one_or_none`. + + .. seealso:: + + :meth:`_engine.ScalarResult.one_or_none` + + :meth:`_engine.Result.scalars` + + """ + ... + + @overload + def scalar(self: TupleResult[Tuple[_T]]) -> Optional[_T]: ... + + @overload + def scalar(self) -> Any: ... + + def scalar(self) -> Any: + """Fetch the first column of the first row, and close the result + set. + + Returns ``None`` if there are no rows to fetch. + + No validation is performed to test if additional rows remain. + + After calling this method, the object is fully closed, + e.g. the :meth:`_engine.CursorResult.close` + method will have been called. + + :return: a Python scalar value , or ``None`` if no rows remain. + + """ + ... + + +class MappingResult(_WithKeys, FilterResult[RowMapping]): + """A wrapper for a :class:`_engine.Result` that returns dictionary values + rather than :class:`_engine.Row` values. + + The :class:`_engine.MappingResult` object is acquired by calling the + :meth:`_engine.Result.mappings` method. + + """ + + __slots__ = () + + _generate_rows = True + + _post_creational_filter = operator.attrgetter("_mapping") + + def __init__(self, result: Result[Any]): + self._real_result = result + self._unique_filter_state = result._unique_filter_state + self._metadata = result._metadata + if result._source_supports_scalars: + self._metadata = self._metadata._reduce([0]) + + def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_engine.MappingResult`. + + See :meth:`_engine.Result.unique` for usage details. + + """ + self._unique_filter_state = (set(), strategy) + return self + + def columns(self, *col_expressions: _KeyIndexType) -> Self: + r"""Establish the columns that should be returned in each row.""" + return self._column_slices(col_expressions) + + def partitions( + self, size: Optional[int] = None + ) -> Iterator[Sequence[RowMapping]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_engine.Result.partitions` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + getter = self._manyrow_getter + + while True: + partition = getter(self, size) + if partition: + yield partition + else: + break + + def fetchall(self) -> Sequence[RowMapping]: + """A synonym for the :meth:`_engine.MappingResult.all` method.""" + + return self._allrows() + + def fetchone(self) -> Optional[RowMapping]: + """Fetch one object. + + Equivalent to :meth:`_engine.Result.fetchone` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + row = self._onerow_getter(self) + if row is _NO_ROW: + return None + else: + return row + + def fetchmany(self, size: Optional[int] = None) -> Sequence[RowMapping]: + """Fetch many objects. + + Equivalent to :meth:`_engine.Result.fetchmany` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + return self._manyrow_getter(self, size) + + def all(self) -> Sequence[RowMapping]: + """Return all scalar values in a sequence. + + Equivalent to :meth:`_engine.Result.all` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + return self._allrows() + + def __iter__(self) -> Iterator[RowMapping]: + return self._iter_impl() + + def __next__(self) -> RowMapping: + return self._next_impl() + + def first(self) -> Optional[RowMapping]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_engine.Result.first` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + + """ + return self._only_one_row( + raise_for_second_row=False, raise_for_none=False, scalar=False + ) + + def one_or_none(self) -> Optional[RowMapping]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one_or_none` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=False, scalar=False + ) + + def one(self) -> RowMapping: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_engine.Result.one` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + return self._only_one_row( + raise_for_second_row=True, raise_for_none=True, scalar=False + ) + + +class FrozenResult(Generic[_TP]): + """Represents a :class:`_engine.Result` object in a "frozen" state suitable + for caching. + + The :class:`_engine.FrozenResult` object is returned from the + :meth:`_engine.Result.freeze` method of any :class:`_engine.Result` + object. + + A new iterable :class:`_engine.Result` object is generated from a fixed + set of data each time the :class:`_engine.FrozenResult` is invoked as + a callable:: + + + result = connection.execute(query) + + frozen = result.freeze() + + unfrozen_result_one = frozen() + + for row in unfrozen_result_one: + print(row) + + unfrozen_result_two = frozen() + rows = unfrozen_result_two.all() + + # ... etc + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`do_orm_execute_re_executing` - example usage within the + ORM to implement a result-set cache. + + :func:`_orm.loading.merge_frozen_result` - ORM function to merge + a frozen result back into a :class:`_orm.Session`. + + """ + + data: Sequence[Any] + + def __init__(self, result: Result[_TP]): + self.metadata = result._metadata._for_freeze() + self._source_supports_scalars = result._source_supports_scalars + self._attributes = result._attributes + + if self._source_supports_scalars: + self.data = list(result._raw_row_iterator()) + else: + self.data = result.fetchall() + + def rewrite_rows(self) -> Sequence[Sequence[Any]]: + if self._source_supports_scalars: + return [[elem] for elem in self.data] + else: + return [list(row) for row in self.data] + + def with_new_rows( + self, tuple_data: Sequence[Row[_TP]] + ) -> FrozenResult[_TP]: + fr = FrozenResult.__new__(FrozenResult) + fr.metadata = self.metadata + fr._attributes = self._attributes + fr._source_supports_scalars = self._source_supports_scalars + + if self._source_supports_scalars: + fr.data = [d[0] for d in tuple_data] + else: + fr.data = tuple_data + return fr + + def __call__(self) -> Result[_TP]: + result: IteratorResult[_TP] = IteratorResult( + self.metadata, iter(self.data) + ) + result._attributes = self._attributes + result._source_supports_scalars = self._source_supports_scalars + return result + + +class IteratorResult(Result[_TP]): + """A :class:`_engine.Result` that gets data from a Python iterator of + :class:`_engine.Row` objects or similar row-like data. + + .. versionadded:: 1.4 + + """ + + _hard_closed = False + _soft_closed = False + + def __init__( + self, + cursor_metadata: ResultMetaData, + iterator: Iterator[_InterimSupportsScalarsRowType], + raw: Optional[Result[Any]] = None, + _source_supports_scalars: bool = False, + ): + self._metadata = cursor_metadata + self.iterator = iterator + self.raw = raw + self._source_supports_scalars = _source_supports_scalars + + @property + def closed(self) -> bool: + """Return ``True`` if this :class:`_engine.IteratorResult` has + been closed + + .. versionadded:: 1.4.43 + + """ + return self._hard_closed + + def _soft_close(self, hard: bool = False, **kw: Any) -> None: + if hard: + self._hard_closed = True + if self.raw is not None: + self.raw._soft_close(hard=hard, **kw) + self.iterator = iter([]) + self._reset_memoizations() + self._soft_closed = True + + def _raise_hard_closed(self) -> NoReturn: + raise exc.ResourceClosedError("This result object is closed.") + + def _raw_row_iterator(self) -> Iterator[_RowData]: + return self.iterator + + def _fetchiter_impl(self) -> Iterator[_InterimSupportsScalarsRowType]: + if self._hard_closed: + self._raise_hard_closed() + return self.iterator + + def _fetchone_impl( + self, hard_close: bool = False + ) -> Optional[_InterimRowType[Row[Any]]]: + if self._hard_closed: + self._raise_hard_closed() + + row = next(self.iterator, _NO_ROW) + if row is _NO_ROW: + self._soft_close(hard=hard_close) + return None + else: + return row + + def _fetchall_impl(self) -> List[_InterimRowType[Row[Any]]]: + if self._hard_closed: + self._raise_hard_closed() + try: + return list(self.iterator) + finally: + self._soft_close() + + def _fetchmany_impl( + self, size: Optional[int] = None + ) -> List[_InterimRowType[Row[Any]]]: + if self._hard_closed: + self._raise_hard_closed() + + return list(itertools.islice(self.iterator, 0, size)) + + +def null_result() -> IteratorResult[Any]: + return IteratorResult(SimpleResultMetaData([]), iter([])) + + +class ChunkedIteratorResult(IteratorResult[_TP]): + """An :class:`_engine.IteratorResult` that works from an + iterator-producing callable. + + The given ``chunks`` argument is a function that is given a number of rows + to return in each chunk, or ``None`` for all rows. The function should + then return an un-consumed iterator of lists, each list of the requested + size. + + The function can be called at any time again, in which case it should + continue from the same result set but adjust the chunk size as given. + + .. versionadded:: 1.4 + + """ + + def __init__( + self, + cursor_metadata: ResultMetaData, + chunks: Callable[ + [Optional[int]], Iterator[Sequence[_InterimRowType[_R]]] + ], + source_supports_scalars: bool = False, + raw: Optional[Result[Any]] = None, + dynamic_yield_per: bool = False, + ): + self._metadata = cursor_metadata + self.chunks = chunks + self._source_supports_scalars = source_supports_scalars + self.raw = raw + self.iterator = itertools.chain.from_iterable(self.chunks(None)) + self.dynamic_yield_per = dynamic_yield_per + + @_generative + def yield_per(self, num: int) -> Self: + # TODO: this throws away the iterator which may be holding + # onto a chunk. the yield_per cannot be changed once any + # rows have been fetched. either find a way to enforce this, + # or we can't use itertools.chain and will instead have to + # keep track. + + self._yield_per = num + self.iterator = itertools.chain.from_iterable(self.chunks(num)) + return self + + def _soft_close(self, hard: bool = False, **kw: Any) -> None: + super()._soft_close(hard=hard, **kw) + self.chunks = lambda size: [] # type: ignore + + def _fetchmany_impl( + self, size: Optional[int] = None + ) -> List[_InterimRowType[Row[Any]]]: + if self.dynamic_yield_per: + self.iterator = itertools.chain.from_iterable(self.chunks(size)) + return super()._fetchmany_impl(size=size) + + +class MergedResult(IteratorResult[_TP]): + """A :class:`_engine.Result` that is merged from any number of + :class:`_engine.Result` objects. + + Returned by the :meth:`_engine.Result.merge` method. + + .. versionadded:: 1.4 + + """ + + closed = False + rowcount: Optional[int] + + def __init__( + self, cursor_metadata: ResultMetaData, results: Sequence[Result[_TP]] + ): + self._results = results + super().__init__( + cursor_metadata, + itertools.chain.from_iterable( + r._raw_row_iterator() for r in results + ), + ) + + self._unique_filter_state = results[0]._unique_filter_state + self._yield_per = results[0]._yield_per + + # going to try something w/ this in next rev + self._source_supports_scalars = results[0]._source_supports_scalars + + self._attributes = self._attributes.merge_with( + *[r._attributes for r in results] + ) + + def _soft_close(self, hard: bool = False, **kw: Any) -> None: + for r in self._results: + r._soft_close(hard=hard, **kw) + if hard: + self.closed = True diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/row.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/row.py new file mode 100644 index 0000000000000000000000000000000000000000..da7ae9af2779d692c82049a1f2aa75c9a75dff9a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/row.py @@ -0,0 +1,400 @@ +# engine/row.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Define row constructs including :class:`.Row`.""" + +from __future__ import annotations + +from abc import ABC +import collections.abc as collections_abc +import operator +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import Iterator +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from ..sql import util as sql_util +from ..util import deprecated +from ..util._has_cy import HAS_CYEXTENSION + +if TYPE_CHECKING or not HAS_CYEXTENSION: + from ._py_row import BaseRow as BaseRow +else: + from sqlalchemy.cyextension.resultproxy import BaseRow as BaseRow + +if TYPE_CHECKING: + from .result import _KeyType + from .result import _ProcessorsType + from .result import RMKeyView + +_T = TypeVar("_T", bound=Any) +_TP = TypeVar("_TP", bound=Tuple[Any, ...]) + + +class Row(BaseRow, Sequence[Any], Generic[_TP]): + """Represent a single result row. + + The :class:`.Row` object represents a row of a database result. It is + typically associated in the 1.x series of SQLAlchemy with the + :class:`_engine.CursorResult` object, however is also used by the ORM for + tuple-like results as of SQLAlchemy 1.4. + + The :class:`.Row` object seeks to act as much like a Python named + tuple as possible. For mapping (i.e. dictionary) behavior on a row, + such as testing for containment of keys, refer to the :attr:`.Row._mapping` + attribute. + + .. seealso:: + + :ref:`tutorial_selecting_data` - includes examples of selecting + rows from SELECT statements. + + .. versionchanged:: 1.4 + + Renamed ``RowProxy`` to :class:`.Row`. :class:`.Row` is no longer a + "proxy" object in that it contains the final form of data within it, + and now acts mostly like a named tuple. Mapping-like functionality is + moved to the :attr:`.Row._mapping` attribute. See + :ref:`change_4710_core` for background on this change. + + """ + + __slots__ = () + + def __setattr__(self, name: str, value: Any) -> NoReturn: + raise AttributeError("can't set attribute") + + def __delattr__(self, name: str) -> NoReturn: + raise AttributeError("can't delete attribute") + + def _tuple(self) -> _TP: + """Return a 'tuple' form of this :class:`.Row`. + + At runtime, this method returns "self"; the :class:`.Row` object is + already a named tuple. However, at the typing level, if this + :class:`.Row` is typed, the "tuple" return type will be a :pep:`484` + ``Tuple`` datatype that contains typing information about individual + elements, supporting typed unpacking and attribute access. + + .. versionadded:: 2.0.19 - The :meth:`.Row._tuple` method supersedes + the previous :meth:`.Row.tuple` method, which is now underscored + to avoid name conflicts with column names in the same way as other + named-tuple methods on :class:`.Row`. + + .. seealso:: + + :attr:`.Row._t` - shorthand attribute notation + + :meth:`.Result.tuples` + + + """ + return self # type: ignore + + @deprecated( + "2.0.19", + "The :meth:`.Row.tuple` method is deprecated in favor of " + ":meth:`.Row._tuple`; all :class:`.Row` " + "methods and library-level attributes are intended to be underscored " + "to avoid name conflicts. Please use :meth:`Row._tuple`.", + ) + def tuple(self) -> _TP: + """Return a 'tuple' form of this :class:`.Row`. + + .. versionadded:: 2.0 + + """ + return self._tuple() + + @property + def _t(self) -> _TP: + """A synonym for :meth:`.Row._tuple`. + + .. versionadded:: 2.0.19 - The :attr:`.Row._t` attribute supersedes + the previous :attr:`.Row.t` attribute, which is now underscored + to avoid name conflicts with column names in the same way as other + named-tuple methods on :class:`.Row`. + + .. seealso:: + + :attr:`.Result.t` + """ + return self # type: ignore + + @property + @deprecated( + "2.0.19", + "The :attr:`.Row.t` attribute is deprecated in favor of " + ":attr:`.Row._t`; all :class:`.Row` " + "methods and library-level attributes are intended to be underscored " + "to avoid name conflicts. Please use :attr:`Row._t`.", + ) + def t(self) -> _TP: + """A synonym for :meth:`.Row._tuple`. + + .. versionadded:: 2.0 + + """ + return self._t + + @property + def _mapping(self) -> RowMapping: + """Return a :class:`.RowMapping` for this :class:`.Row`. + + This object provides a consistent Python mapping (i.e. dictionary) + interface for the data contained within the row. The :class:`.Row` + by itself behaves like a named tuple. + + .. seealso:: + + :attr:`.Row._fields` + + .. versionadded:: 1.4 + + """ + return RowMapping(self._parent, None, self._key_to_index, self._data) + + def _filter_on_values( + self, processor: Optional[_ProcessorsType] + ) -> Row[Any]: + return Row(self._parent, processor, self._key_to_index, self._data) + + if not TYPE_CHECKING: + + def _special_name_accessor(name: str) -> Any: + """Handle ambiguous names such as "count" and "index" """ + + @property + def go(self: Row) -> Any: + if self._parent._has_key(name): + return self.__getattr__(name) + else: + + def meth(*arg: Any, **kw: Any) -> Any: + return getattr(collections_abc.Sequence, name)( + self, *arg, **kw + ) + + return meth + + return go + + count = _special_name_accessor("count") + index = _special_name_accessor("index") + + def __contains__(self, key: Any) -> bool: + return key in self._data + + def _op(self, other: Any, op: Callable[[Any, Any], bool]) -> bool: + return ( + op(self._to_tuple_instance(), other._to_tuple_instance()) + if isinstance(other, Row) + else op(self._to_tuple_instance(), other) + ) + + __hash__ = BaseRow.__hash__ + + if TYPE_CHECKING: + + @overload + def __getitem__(self, index: int) -> Any: ... + + @overload + def __getitem__(self, index: slice) -> Sequence[Any]: ... + + def __getitem__(self, index: Union[int, slice]) -> Any: ... + + def __lt__(self, other: Any) -> bool: + return self._op(other, operator.lt) + + def __le__(self, other: Any) -> bool: + return self._op(other, operator.le) + + def __ge__(self, other: Any) -> bool: + return self._op(other, operator.ge) + + def __gt__(self, other: Any) -> bool: + return self._op(other, operator.gt) + + def __eq__(self, other: Any) -> bool: + return self._op(other, operator.eq) + + def __ne__(self, other: Any) -> bool: + return self._op(other, operator.ne) + + def __repr__(self) -> str: + return repr(sql_util._repr_row(self)) + + @property + def _fields(self) -> Tuple[str, ...]: + """Return a tuple of string keys as represented by this + :class:`.Row`. + + The keys can represent the labels of the columns returned by a core + statement or the names of the orm classes returned by an orm + execution. + + This attribute is analogous to the Python named tuple ``._fields`` + attribute. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`.Row._mapping` + + """ + return tuple([k for k in self._parent.keys if k is not None]) + + def _asdict(self) -> Dict[str, Any]: + """Return a new dict which maps field names to their corresponding + values. + + This method is analogous to the Python named tuple ``._asdict()`` + method, and works by applying the ``dict()`` constructor to the + :attr:`.Row._mapping` attribute. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`.Row._mapping` + + """ + return dict(self._mapping) + + +BaseRowProxy = BaseRow +RowProxy = Row + + +class ROMappingView(ABC): + __slots__ = () + + _items: Sequence[Any] + _mapping: Mapping["_KeyType", Any] + + def __init__( + self, mapping: Mapping["_KeyType", Any], items: Sequence[Any] + ): + self._mapping = mapping # type: ignore[misc] + self._items = items # type: ignore[misc] + + def __len__(self) -> int: + return len(self._items) + + def __repr__(self) -> str: + return "{0.__class__.__name__}({0._mapping!r})".format(self) + + def __iter__(self) -> Iterator[Any]: + return iter(self._items) + + def __contains__(self, item: Any) -> bool: + return item in self._items + + def __eq__(self, other: Any) -> bool: + return list(other) == list(self) + + def __ne__(self, other: Any) -> bool: + return list(other) != list(self) + + +class ROMappingKeysValuesView( + ROMappingView, typing.KeysView["_KeyType"], typing.ValuesView[Any] +): + __slots__ = ("_items",) # mapping slot is provided by KeysView + + +class ROMappingItemsView(ROMappingView, typing.ItemsView["_KeyType", Any]): + __slots__ = ("_items",) # mapping slot is provided by ItemsView + + +class RowMapping(BaseRow, typing.Mapping["_KeyType", Any]): + """A ``Mapping`` that maps column names and objects to :class:`.Row` + values. + + The :class:`.RowMapping` is available from a :class:`.Row` via the + :attr:`.Row._mapping` attribute, as well as from the iterable interface + provided by the :class:`.MappingResult` object returned by the + :meth:`_engine.Result.mappings` method. + + :class:`.RowMapping` supplies Python mapping (i.e. dictionary) access to + the contents of the row. This includes support for testing of + containment of specific keys (string column names or objects), as well + as iteration of keys, values, and items:: + + for row in result: + if "a" in row._mapping: + print("Column 'a': %s" % row._mapping["a"]) + + print("Column b: %s" % row._mapping[table.c.b]) + + .. versionadded:: 1.4 The :class:`.RowMapping` object replaces the + mapping-like access previously provided by a database result row, + which now seeks to behave mostly like a named tuple. + + """ + + __slots__ = () + + if TYPE_CHECKING: + + def __getitem__(self, key: _KeyType) -> Any: ... + + else: + __getitem__ = BaseRow._get_by_key_impl_mapping + + def _values_impl(self) -> List[Any]: + return list(self._data) + + def __iter__(self) -> Iterator[str]: + return (k for k in self._parent.keys if k is not None) + + def __len__(self) -> int: + return len(self._data) + + def __contains__(self, key: object) -> bool: + return self._parent._has_key(key) + + def __repr__(self) -> str: + return repr(dict(self)) + + def items(self) -> ROMappingItemsView: + """Return a view of key/value tuples for the elements in the + underlying :class:`.Row`. + + """ + return ROMappingItemsView( + self, [(key, self[key]) for key in self.keys()] + ) + + def keys(self) -> RMKeyView: + """Return a view of 'keys' for string column names represented + by the underlying :class:`.Row`. + + """ + + return self._parent.keys + + def values(self) -> ROMappingKeysValuesView: + """Return a view of values for the values represented in the + underlying :class:`.Row`. + + """ + return ROMappingKeysValuesView(self, self._values_impl()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/strategies.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..5dd7bca9a49e9a8d77766633b79bbeb97917abea --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/strategies.py @@ -0,0 +1,19 @@ +# engine/strategies.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Deprecated mock engine strategy used by Alembic. + + +""" + +from __future__ import annotations + +from .mock import MockConnection # noqa + + +class MockEngineStrategy: + MockConnection = MockConnection diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/url.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/url.py new file mode 100644 index 0000000000000000000000000000000000000000..20079a6b535cc417b21c3f4325afec168d87094e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/url.py @@ -0,0 +1,924 @@ +# engine/url.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates +information about a database connection specification. + +The URL object is created automatically when +:func:`~sqlalchemy.engine.create_engine` is called with a string +argument; alternatively, the URL is a public-facing construct which can +be used directly and is also accepted directly by ``create_engine()``. +""" + +from __future__ import annotations + +import collections.abc as collections_abc +import re +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import List +from typing import Mapping +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import Union +from urllib.parse import parse_qsl +from urllib.parse import quote +from urllib.parse import quote_plus +from urllib.parse import unquote + +from .interfaces import Dialect +from .. import exc +from .. import util +from ..dialects import plugins +from ..dialects import registry + + +class URL(NamedTuple): + """ + Represent the components of a URL used to connect to a database. + + URLs are typically constructed from a fully formatted URL string, where the + :func:`.make_url` function is used internally by the + :func:`_sa.create_engine` function in order to parse the URL string into + its individual components, which are then used to construct a new + :class:`.URL` object. When parsing from a formatted URL string, the parsing + format generally follows + `RFC-1738 `_, with some exceptions. + + A :class:`_engine.URL` object may also be produced directly, either by + using the :func:`.make_url` function with a fully formed URL string, or + by using the :meth:`_engine.URL.create` constructor in order + to construct a :class:`_engine.URL` programmatically given individual + fields. The resulting :class:`.URL` object may be passed directly to + :func:`_sa.create_engine` in place of a string argument, which will bypass + the usage of :func:`.make_url` within the engine's creation process. + + .. versionchanged:: 1.4 + + The :class:`_engine.URL` object is now an immutable object. To + create a URL, use the :func:`_engine.make_url` or + :meth:`_engine.URL.create` function / method. To modify + a :class:`_engine.URL`, use methods like + :meth:`_engine.URL.set` and + :meth:`_engine.URL.update_query_dict` to return a new + :class:`_engine.URL` object with modifications. See notes for this + change at :ref:`change_5526`. + + .. seealso:: + + :ref:`database_urls` + + :class:`_engine.URL` contains the following attributes: + + * :attr:`_engine.URL.drivername`: database backend and driver name, such as + ``postgresql+psycopg2`` + * :attr:`_engine.URL.username`: username string + * :attr:`_engine.URL.password`: password string + * :attr:`_engine.URL.host`: string hostname + * :attr:`_engine.URL.port`: integer port number + * :attr:`_engine.URL.database`: string database name + * :attr:`_engine.URL.query`: an immutable mapping representing the query + string. contains strings for keys and either strings or tuples of + strings for values. + + + """ + + drivername: str + """database backend and driver name, such as + ``postgresql+psycopg2`` + + """ + + username: Optional[str] + "username string" + + password: Optional[str] + """password, which is normally a string but may also be any + object that has a ``__str__()`` method.""" + + host: Optional[str] + """hostname or IP number. May also be a data source name for some + drivers.""" + + port: Optional[int] + """integer port number""" + + database: Optional[str] + """database name""" + + query: util.immutabledict[str, Union[Tuple[str, ...], str]] + """an immutable mapping representing the query string. contains strings + for keys and either strings or tuples of strings for values, e.g.:: + + >>> from sqlalchemy.engine import make_url + >>> url = make_url( + ... "postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) + >>> url.query + immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'}) + + To create a mutable copy of this mapping, use the ``dict`` constructor:: + + mutable_query_opts = dict(url.query) + + .. seealso:: + + :attr:`_engine.URL.normalized_query` - normalizes all values into sequences + for consistent processing + + Methods for altering the contents of :attr:`_engine.URL.query`: + + :meth:`_engine.URL.update_query_dict` + + :meth:`_engine.URL.update_query_string` + + :meth:`_engine.URL.update_query_pairs` + + :meth:`_engine.URL.difference_update_query` + + """ # noqa: E501 + + @classmethod + def create( + cls, + drivername: str, + username: Optional[str] = None, + password: Optional[str] = None, + host: Optional[str] = None, + port: Optional[int] = None, + database: Optional[str] = None, + query: Mapping[str, Union[Sequence[str], str]] = util.EMPTY_DICT, + ) -> URL: + """Create a new :class:`_engine.URL` object. + + .. seealso:: + + :ref:`database_urls` + + :param drivername: the name of the database backend. This name will + correspond to a module in sqlalchemy/databases or a third party + plug-in. + :param username: The user name. + :param password: database password. Is typically a string, but may + also be an object that can be stringified with ``str()``. + + .. note:: The password string should **not** be URL encoded when + passed as an argument to :meth:`_engine.URL.create`; the string + should contain the password characters exactly as they would be + typed. + + .. note:: A password-producing object will be stringified only + **once** per :class:`_engine.Engine` object. For dynamic password + generation per connect, see :ref:`engines_dynamic_tokens`. + + :param host: The name of the host. + :param port: The port number. + :param database: The database name. + :param query: A dictionary of string keys to string values to be passed + to the dialect and/or the DBAPI upon connect. To specify non-string + parameters to a Python DBAPI directly, use the + :paramref:`_sa.create_engine.connect_args` parameter to + :func:`_sa.create_engine`. See also + :attr:`_engine.URL.normalized_query` for a dictionary that is + consistently string->list of string. + :return: new :class:`_engine.URL` object. + + .. versionadded:: 1.4 + + The :class:`_engine.URL` object is now an **immutable named + tuple**. In addition, the ``query`` dictionary is also immutable. + To create a URL, use the :func:`_engine.url.make_url` or + :meth:`_engine.URL.create` function/ method. To modify a + :class:`_engine.URL`, use the :meth:`_engine.URL.set` and + :meth:`_engine.URL.update_query` methods. + + """ + + return cls( + cls._assert_str(drivername, "drivername"), + cls._assert_none_str(username, "username"), + password, + cls._assert_none_str(host, "host"), + cls._assert_port(port), + cls._assert_none_str(database, "database"), + cls._str_dict(query), + ) + + @classmethod + def _assert_port(cls, port: Optional[int]) -> Optional[int]: + if port is None: + return None + try: + return int(port) + except TypeError: + raise TypeError("Port argument must be an integer or None") + + @classmethod + def _assert_str(cls, v: str, paramname: str) -> str: + if not isinstance(v, str): + raise TypeError("%s must be a string" % paramname) + return v + + @classmethod + def _assert_none_str( + cls, v: Optional[str], paramname: str + ) -> Optional[str]: + if v is None: + return v + + return cls._assert_str(v, paramname) + + @classmethod + def _str_dict( + cls, + dict_: Optional[ + Union[ + Sequence[Tuple[str, Union[Sequence[str], str]]], + Mapping[str, Union[Sequence[str], str]], + ] + ], + ) -> util.immutabledict[str, Union[Tuple[str, ...], str]]: + if dict_ is None: + return util.EMPTY_DICT + + @overload + def _assert_value( + val: str, + ) -> str: ... + + @overload + def _assert_value( + val: Sequence[str], + ) -> Union[str, Tuple[str, ...]]: ... + + def _assert_value( + val: Union[str, Sequence[str]], + ) -> Union[str, Tuple[str, ...]]: + if isinstance(val, str): + return val + elif isinstance(val, collections_abc.Sequence): + return tuple(_assert_value(elem) for elem in val) + else: + raise TypeError( + "Query dictionary values must be strings or " + "sequences of strings" + ) + + def _assert_str(v: str) -> str: + if not isinstance(v, str): + raise TypeError("Query dictionary keys must be strings") + return v + + dict_items: Iterable[Tuple[str, Union[Sequence[str], str]]] + if isinstance(dict_, collections_abc.Sequence): + dict_items = dict_ + else: + dict_items = dict_.items() + + return util.immutabledict( + { + _assert_str(key): _assert_value( + value, + ) + for key, value in dict_items + } + ) + + def set( + self, + drivername: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + host: Optional[str] = None, + port: Optional[int] = None, + database: Optional[str] = None, + query: Optional[Mapping[str, Union[Sequence[str], str]]] = None, + ) -> URL: + """return a new :class:`_engine.URL` object with modifications. + + Values are used if they are non-None. To set a value to ``None`` + explicitly, use the :meth:`_engine.URL._replace` method adapted + from ``namedtuple``. + + :param drivername: new drivername + :param username: new username + :param password: new password + :param host: new hostname + :param port: new port + :param query: new query parameters, passed a dict of string keys + referring to string or sequence of string values. Fully + replaces the previous list of arguments. + + :return: new :class:`_engine.URL` object. + + .. versionadded:: 1.4 + + .. seealso:: + + :meth:`_engine.URL.update_query_dict` + + """ + + kw: Dict[str, Any] = {} + if drivername is not None: + kw["drivername"] = drivername + if username is not None: + kw["username"] = username + if password is not None: + kw["password"] = password + if host is not None: + kw["host"] = host + if port is not None: + kw["port"] = port + if database is not None: + kw["database"] = database + if query is not None: + kw["query"] = query + + return self._assert_replace(**kw) + + def _assert_replace(self, **kw: Any) -> URL: + """argument checks before calling _replace()""" + + if "drivername" in kw: + self._assert_str(kw["drivername"], "drivername") + for name in "username", "host", "database": + if name in kw: + self._assert_none_str(kw[name], name) + if "port" in kw: + self._assert_port(kw["port"]) + if "query" in kw: + kw["query"] = self._str_dict(kw["query"]) + + return self._replace(**kw) + + def update_query_string( + self, query_string: str, append: bool = False + ) -> URL: + """Return a new :class:`_engine.URL` object with the :attr:`_engine.URL.query` + parameter dictionary updated by the given query string. + + E.g.:: + + >>> from sqlalchemy.engine import make_url + >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname") + >>> url = url.update_query_string( + ... "alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) + >>> str(url) + 'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt' + + :param query_string: a URL escaped query string, not including the + question mark. + + :param append: if True, parameters in the existing query string will + not be removed; new parameters will be in addition to those present. + If left at its default of False, keys present in the given query + parameters will replace those of the existing query string. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_engine.URL.query` + + :meth:`_engine.URL.update_query_dict` + + """ # noqa: E501 + return self.update_query_pairs(parse_qsl(query_string), append=append) + + def update_query_pairs( + self, + key_value_pairs: Iterable[Tuple[str, Union[str, List[str]]]], + append: bool = False, + ) -> URL: + """Return a new :class:`_engine.URL` object with the + :attr:`_engine.URL.query` + parameter dictionary updated by the given sequence of key/value pairs + + E.g.:: + + >>> from sqlalchemy.engine import make_url + >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname") + >>> url = url.update_query_pairs( + ... [ + ... ("alt_host", "host1"), + ... ("alt_host", "host2"), + ... ("ssl_cipher", "/path/to/crt"), + ... ] + ... ) + >>> str(url) + 'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt' + + :param key_value_pairs: A sequence of tuples containing two strings + each. + + :param append: if True, parameters in the existing query string will + not be removed; new parameters will be in addition to those present. + If left at its default of False, keys present in the given query + parameters will replace those of the existing query string. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_engine.URL.query` + + :meth:`_engine.URL.difference_update_query` + + :meth:`_engine.URL.set` + + """ # noqa: E501 + + existing_query = self.query + new_keys: Dict[str, Union[str, List[str]]] = {} + + for key, value in key_value_pairs: + if key in new_keys: + new_keys[key] = util.to_list(new_keys[key]) + cast("List[str]", new_keys[key]).append(cast(str, value)) + else: + new_keys[key] = ( + list(value) if isinstance(value, (list, tuple)) else value + ) + + new_query: Mapping[str, Union[str, Sequence[str]]] + if append: + new_query = {} + + for k in new_keys: + if k in existing_query: + new_query[k] = tuple( + util.to_list(existing_query[k]) + + util.to_list(new_keys[k]) + ) + else: + new_query[k] = new_keys[k] + + new_query.update( + { + k: existing_query[k] + for k in set(existing_query).difference(new_keys) + } + ) + else: + new_query = self.query.union( + { + k: tuple(v) if isinstance(v, list) else v + for k, v in new_keys.items() + } + ) + return self.set(query=new_query) + + def update_query_dict( + self, + query_parameters: Mapping[str, Union[str, List[str]]], + append: bool = False, + ) -> URL: + """Return a new :class:`_engine.URL` object with the + :attr:`_engine.URL.query` parameter dictionary updated by the given + dictionary. + + The dictionary typically contains string keys and string values. + In order to represent a query parameter that is expressed multiple + times, pass a sequence of string values. + + E.g.:: + + + >>> from sqlalchemy.engine import make_url + >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname") + >>> url = url.update_query_dict( + ... {"alt_host": ["host1", "host2"], "ssl_cipher": "/path/to/crt"} + ... ) + >>> str(url) + 'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt' + + + :param query_parameters: A dictionary with string keys and values + that are either strings, or sequences of strings. + + :param append: if True, parameters in the existing query string will + not be removed; new parameters will be in addition to those present. + If left at its default of False, keys present in the given query + parameters will replace those of the existing query string. + + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_engine.URL.query` + + :meth:`_engine.URL.update_query_string` + + :meth:`_engine.URL.update_query_pairs` + + :meth:`_engine.URL.difference_update_query` + + :meth:`_engine.URL.set` + + """ # noqa: E501 + return self.update_query_pairs(query_parameters.items(), append=append) + + def difference_update_query(self, names: Iterable[str]) -> URL: + """ + Remove the given names from the :attr:`_engine.URL.query` dictionary, + returning the new :class:`_engine.URL`. + + E.g.:: + + url = url.difference_update_query(["foo", "bar"]) + + Equivalent to using :meth:`_engine.URL.set` as follows:: + + url = url.set( + query={ + key: url.query[key] + for key in set(url.query).difference(["foo", "bar"]) + } + ) + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_engine.URL.query` + + :meth:`_engine.URL.update_query_dict` + + :meth:`_engine.URL.set` + + """ + + if not set(names).intersection(self.query): + return self + + return URL( + self.drivername, + self.username, + self.password, + self.host, + self.port, + self.database, + util.immutabledict( + { + key: self.query[key] + for key in set(self.query).difference(names) + } + ), + ) + + @property + def normalized_query(self) -> Mapping[str, Sequence[str]]: + """Return the :attr:`_engine.URL.query` dictionary with values normalized + into sequences. + + As the :attr:`_engine.URL.query` dictionary may contain either + string values or sequences of string values to differentiate between + parameters that are specified multiple times in the query string, + code that needs to handle multiple parameters generically will wish + to use this attribute so that all parameters present are presented + as sequences. Inspiration is from Python's ``urllib.parse.parse_qs`` + function. E.g.:: + + + >>> from sqlalchemy.engine import make_url + >>> url = make_url( + ... "postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) + >>> url.query + immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'}) + >>> url.normalized_query + immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': ('/path/to/crt',)}) + + """ # noqa: E501 + + return util.immutabledict( + { + k: (v,) if not isinstance(v, tuple) else v + for k, v in self.query.items() + } + ) + + @util.deprecated( + "1.4", + "The :meth:`_engine.URL.__to_string__ method is deprecated and will " + "be removed in a future release. Please use the " + ":meth:`_engine.URL.render_as_string` method.", + ) + def __to_string__(self, hide_password: bool = True) -> str: + """Render this :class:`_engine.URL` object as a string. + + :param hide_password: Defaults to True. The password is not shown + in the string unless this is set to False. + + """ + return self.render_as_string(hide_password=hide_password) + + def render_as_string(self, hide_password: bool = True) -> str: + """Render this :class:`_engine.URL` object as a string. + + This method is used when the ``__str__()`` or ``__repr__()`` + methods are used. The method directly includes additional options. + + :param hide_password: Defaults to True. The password is not shown + in the string unless this is set to False. + + """ + s = self.drivername + "://" + if self.username is not None: + s += quote(self.username, safe=" +") + if self.password is not None: + s += ":" + ( + "***" + if hide_password + else quote(str(self.password), safe=" +") + ) + s += "@" + if self.host is not None: + if ":" in self.host: + s += f"[{self.host}]" + else: + s += self.host + if self.port is not None: + s += ":" + str(self.port) + if self.database is not None: + s += "/" + self.database + if self.query: + keys = list(self.query) + keys.sort() + s += "?" + "&".join( + f"{quote_plus(k)}={quote_plus(element)}" + for k in keys + for element in util.to_list(self.query[k]) + ) + return s + + def __repr__(self) -> str: + return self.render_as_string() + + def __copy__(self) -> URL: + return self.__class__.create( + self.drivername, + self.username, + self.password, + self.host, + self.port, + self.database, + # note this is an immutabledict of str-> str / tuple of str, + # also fully immutable. does not require deepcopy + self.query, + ) + + def __deepcopy__(self, memo: Any) -> URL: + return self.__copy__() + + def __hash__(self) -> int: + return hash(str(self)) + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, URL) + and self.drivername == other.drivername + and self.username == other.username + and self.password == other.password + and self.host == other.host + and self.database == other.database + and self.query == other.query + and self.port == other.port + ) + + def __ne__(self, other: Any) -> bool: + return not self == other + + def get_backend_name(self) -> str: + """Return the backend name. + + This is the name that corresponds to the database backend in + use, and is the portion of the :attr:`_engine.URL.drivername` + that is to the left of the plus sign. + + """ + if "+" not in self.drivername: + return self.drivername + else: + return self.drivername.split("+")[0] + + def get_driver_name(self) -> str: + """Return the backend name. + + This is the name that corresponds to the DBAPI driver in + use, and is the portion of the :attr:`_engine.URL.drivername` + that is to the right of the plus sign. + + If the :attr:`_engine.URL.drivername` does not include a plus sign, + then the default :class:`_engine.Dialect` for this :class:`_engine.URL` + is imported in order to get the driver name. + + """ + + if "+" not in self.drivername: + return self.get_dialect().driver + else: + return self.drivername.split("+")[1] + + def _instantiate_plugins( + self, kwargs: Mapping[str, Any] + ) -> Tuple[URL, List[Any], Dict[str, Any]]: + plugin_names = util.to_list(self.query.get("plugin", ())) + plugin_names += kwargs.get("plugins", []) + + kwargs = dict(kwargs) + + loaded_plugins = [ + plugins.load(plugin_name)(self, kwargs) + for plugin_name in plugin_names + ] + + u = self.difference_update_query(["plugin", "plugins"]) + + for plugin in loaded_plugins: + new_u = plugin.update_url(u) + if new_u is not None: + u = new_u + + kwargs.pop("plugins", None) + + return u, loaded_plugins, kwargs + + def _get_entrypoint(self) -> Type[Dialect]: + """Return the "entry point" dialect class. + + This is normally the dialect itself except in the case when the + returned class implements the get_dialect_cls() method. + + """ + if "+" not in self.drivername: + name = self.drivername + else: + name = self.drivername.replace("+", ".") + cls = registry.load(name) + # check for legacy dialects that + # would return a module with 'dialect' as the + # actual class + if ( + hasattr(cls, "dialect") + and isinstance(cls.dialect, type) + and issubclass(cls.dialect, Dialect) + ): + return cls.dialect + else: + return cast("Type[Dialect]", cls) + + def get_dialect(self, _is_async: bool = False) -> Type[Dialect]: + """Return the SQLAlchemy :class:`_engine.Dialect` class corresponding + to this URL's driver name. + + """ + entrypoint = self._get_entrypoint() + if _is_async: + dialect_cls = entrypoint.get_async_dialect_cls(self) + else: + dialect_cls = entrypoint.get_dialect_cls(self) + return dialect_cls + + def translate_connect_args( + self, names: Optional[List[str]] = None, **kw: Any + ) -> Dict[str, Any]: + r"""Translate url attributes into a dictionary of connection arguments. + + Returns attributes of this url (`host`, `database`, `username`, + `password`, `port`) as a plain dictionary. The attribute names are + used as the keys by default. Unset or false attributes are omitted + from the final dictionary. + + :param \**kw: Optional, alternate key names for url attributes. + + :param names: Deprecated. Same purpose as the keyword-based alternate + names, but correlates the name to the original positionally. + """ + + if names is not None: + util.warn_deprecated( + "The `URL.translate_connect_args.name`s parameter is " + "deprecated. Please pass the " + "alternate names as kw arguments.", + "1.4", + ) + + translated = {} + attribute_names = ["host", "database", "username", "password", "port"] + for sname in attribute_names: + if names: + name = names.pop(0) + elif sname in kw: + name = kw[sname] + else: + name = sname + if name is not None and getattr(self, sname, False): + if sname == "password": + translated[name] = str(getattr(self, sname)) + else: + translated[name] = getattr(self, sname) + + return translated + + +def make_url(name_or_url: Union[str, URL]) -> URL: + """Given a string, produce a new URL instance. + + The format of the URL generally follows `RFC-1738 + `_, with some exceptions, including + that underscores, and not dashes or periods, are accepted within the + "scheme" portion. + + If a :class:`.URL` object is passed, it is returned as is. + + .. seealso:: + + :ref:`database_urls` + + """ + + if isinstance(name_or_url, str): + return _parse_url(name_or_url) + elif not isinstance(name_or_url, URL) and not hasattr( + name_or_url, "_sqla_is_testing_if_this_is_a_mock_object" + ): + raise exc.ArgumentError( + f"Expected string or URL object, got {name_or_url!r}" + ) + else: + return name_or_url + + +def _parse_url(name: str) -> URL: + pattern = re.compile( + r""" + (?P[\w\+]+):// + (?: + (?P[^:/]*) + (?::(?P[^@]*))? + @)? + (?: + (?: + \[(?P[^/\?]+)\] | + (?P[^/:\?]+) + )? + (?::(?P[^/\?]*))? + )? + (?:/(?P[^\?]*))? + (?:\?(?P.*))? + """, + re.X, + ) + + m = pattern.match(name) + if m is not None: + components = m.groupdict() + query: Optional[Dict[str, Union[str, List[str]]]] + if components["query"] is not None: + query = {} + + for key, value in parse_qsl(components["query"]): + if key in query: + query[key] = util.to_list(query[key]) + cast("List[str]", query[key]).append(value) + else: + query[key] = value + else: + query = None + components["query"] = query + + if components["username"] is not None: + components["username"] = unquote(components["username"]) + + if components["password"] is not None: + components["password"] = unquote(components["password"]) + + ipv4host = components.pop("ipv4host") + ipv6host = components.pop("ipv6host") + components["host"] = ipv4host or ipv6host + name = components.pop("name") + + if components["port"]: + components["port"] = int(components["port"]) + + return URL.create(name, **components) # type: ignore + + else: + raise exc.ArgumentError( + "Could not parse SQLAlchemy URL from given URL string" + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e499efa91aaf6adb3930aa3c962bb2bd17efe66d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/engine/util.py @@ -0,0 +1,167 @@ +# engine/util.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import Optional +from typing import TypeVar + +from .. import exc +from .. import util +from ..util._has_cy import HAS_CYEXTENSION +from ..util.typing import Protocol +from ..util.typing import Self + +if typing.TYPE_CHECKING or not HAS_CYEXTENSION: + from ._py_util import _distill_params_20 as _distill_params_20 + from ._py_util import _distill_raw_params as _distill_raw_params +else: + from sqlalchemy.cyextension.util import ( # noqa: F401 + _distill_params_20 as _distill_params_20, + ) + from sqlalchemy.cyextension.util import ( # noqa: F401 + _distill_raw_params as _distill_raw_params, + ) + +_C = TypeVar("_C", bound=Callable[[], Any]) + + +def connection_memoize(key: str) -> Callable[[_C], _C]: + """Decorator, memoize a function in a connection.info stash. + + Only applicable to functions which take no arguments other than a + connection. The memo will be stored in ``connection.info[key]``. + """ + + @util.decorator + def decorated(fn, self, connection): # type: ignore + connection = connection.connect() + try: + return connection.info[key] + except KeyError: + connection.info[key] = val = fn(self, connection) + return val + + return decorated + + +class _TConsSubject(Protocol): + _trans_context_manager: Optional[TransactionalContext] + + +class TransactionalContext: + """Apply Python context manager behavior to transaction objects. + + Performs validation to ensure the subject of the transaction is not + used if the transaction were ended prematurely. + + """ + + __slots__ = ("_outer_trans_ctx", "_trans_subject", "__weakref__") + + _trans_subject: Optional[_TConsSubject] + + def _transaction_is_active(self) -> bool: + raise NotImplementedError() + + def _transaction_is_closed(self) -> bool: + raise NotImplementedError() + + def _rollback_can_be_called(self) -> bool: + """indicates the object is in a state that is known to be acceptable + for rollback() to be called. + + This does not necessarily mean rollback() will succeed or not raise + an error, just that there is currently no state detected that indicates + rollback() would fail or emit warnings. + + It also does not mean that there's a transaction in progress, as + it is usually safe to call rollback() even if no transaction is + present. + + .. versionadded:: 1.4.28 + + """ + raise NotImplementedError() + + def _get_subject(self) -> _TConsSubject: + raise NotImplementedError() + + def commit(self) -> None: + raise NotImplementedError() + + def rollback(self) -> None: + raise NotImplementedError() + + def close(self) -> None: + raise NotImplementedError() + + @classmethod + def _trans_ctx_check(cls, subject: _TConsSubject) -> None: + trans_context = subject._trans_context_manager + if trans_context: + if not trans_context._transaction_is_active(): + raise exc.InvalidRequestError( + "Can't operate on closed transaction inside context " + "manager. Please complete the context manager " + "before emitting further commands." + ) + + def __enter__(self) -> Self: + subject = self._get_subject() + + # none for outer transaction, may be non-None for nested + # savepoint, legacy nesting cases + trans_context = subject._trans_context_manager + self._outer_trans_ctx = trans_context + + self._trans_subject = subject + subject._trans_context_manager = self + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + subject = getattr(self, "_trans_subject", None) + + # simplistically we could assume that + # "subject._trans_context_manager is self". However, any calling + # code that is manipulating __exit__ directly would break this + # assumption. alembic context manager + # is an example of partial use that just calls __exit__ and + # not __enter__ at the moment. it's safe to assume this is being done + # in the wild also + out_of_band_exit = ( + subject is None or subject._trans_context_manager is not self + ) + + if type_ is None and self._transaction_is_active(): + try: + self.commit() + except: + with util.safe_reraise(): + if self._rollback_can_be_called(): + self.rollback() + finally: + if not out_of_band_exit: + assert subject is not None + subject._trans_context_manager = self._outer_trans_ctx + self._trans_subject = self._outer_trans_ctx = None + else: + try: + if not self._transaction_is_active(): + if not self._transaction_is_closed(): + self.close() + else: + if self._rollback_can_be_called(): + self.rollback() + finally: + if not out_of_band_exit: + assert subject is not None + subject._trans_context_manager = self._outer_trans_ctx + self._trans_subject = self._outer_trans_ctx = None diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2751bcf938a6e75da0fa9947c331d6fececc42e6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/__init__.py @@ -0,0 +1,11 @@ +# ext/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from .. import util as _sa_util + + +_sa_util.preloaded.import_prefix("sqlalchemy.ext") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/associationproxy.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/associationproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..8f2c19b8764a599b109202cc7c3912873ebf1055 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/associationproxy.py @@ -0,0 +1,2013 @@ +# ext/associationproxy.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Contain the ``AssociationProxy`` class. + +The ``AssociationProxy`` is a Python property object which provides +transparent proxied access to the endpoint of an association object. + +See the example ``examples/association/proxied_association.py``. + +""" +from __future__ import annotations + +import operator +import typing +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Dict +from typing import Generic +from typing import ItemsView +from typing import Iterable +from typing import Iterator +from typing import KeysView +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import MutableSequence +from typing import MutableSet +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union +from typing import ValuesView + +from .. import ColumnElement +from .. import exc +from .. import inspect +from .. import orm +from .. import util +from ..orm import collections +from ..orm import InspectionAttrExtensionType +from ..orm import interfaces +from ..orm import ORMDescriptor +from ..orm.base import SQLORMOperations +from ..orm.interfaces import _AttributeOptions +from ..orm.interfaces import _DCAttributeOptions +from ..orm.interfaces import _DEFAULT_ATTRIBUTE_OPTIONS +from ..sql import operators +from ..sql import or_ +from ..sql.base import _NoArg +from ..util.typing import Literal +from ..util.typing import Protocol +from ..util.typing import Self +from ..util.typing import SupportsIndex +from ..util.typing import SupportsKeysAndGetItem + +if typing.TYPE_CHECKING: + from ..orm.interfaces import MapperProperty + from ..orm.interfaces import PropComparator + from ..orm.mapper import Mapper + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _InfoType + + +_T = TypeVar("_T", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) +_T_con = TypeVar("_T_con", bound=Any, contravariant=True) +_S = TypeVar("_S", bound=Any) +_KT = TypeVar("_KT", bound=Any) +_VT = TypeVar("_VT", bound=Any) + + +def association_proxy( + target_collection: str, + attr: str, + *, + creator: Optional[_CreatorProtocol] = None, + getset_factory: Optional[_GetSetFactoryProtocol] = None, + proxy_factory: Optional[_ProxyFactoryProtocol] = None, + proxy_bulk_set: Optional[_ProxyBulkSetProtocol] = None, + info: Optional[_InfoType] = None, + cascade_scalar_deletes: bool = False, + create_on_none_assignment: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 +) -> AssociationProxy[Any]: + r"""Return a Python property implementing a view of a target + attribute which references an attribute on members of the + target. + + The returned value is an instance of :class:`.AssociationProxy`. + + Implements a Python property representing a relationship as a collection + of simpler values, or a scalar value. The proxied property will mimic + the collection type of the target (list, dict or set), or, in the case of + a one to one relationship, a simple scalar value. + + :param target_collection: Name of the attribute that is the immediate + target. This attribute is typically mapped by + :func:`~sqlalchemy.orm.relationship` to link to a target collection, but + can also be a many-to-one or non-scalar relationship. + + :param attr: Attribute on the associated instance or instances that + are available on instances of the target object. + + :param creator: optional. + + Defines custom behavior when new items are added to the proxied + collection. + + By default, adding new items to the collection will trigger a + construction of an instance of the target object, passing the given + item as a positional argument to the target constructor. For cases + where this isn't sufficient, :paramref:`.association_proxy.creator` + can supply a callable that will construct the object in the + appropriate way, given the item that was passed. + + For list- and set- oriented collections, a single argument is + passed to the callable. For dictionary oriented collections, two + arguments are passed, corresponding to the key and value. + + The :paramref:`.association_proxy.creator` callable is also invoked + for scalar (i.e. many-to-one, one-to-one) relationships. If the + current value of the target relationship attribute is ``None``, the + callable is used to construct a new object. If an object value already + exists, the given attribute value is populated onto that object. + + .. seealso:: + + :ref:`associationproxy_creator` + + :param cascade_scalar_deletes: when True, indicates that setting + the proxied value to ``None``, or deleting it via ``del``, should + also remove the source object. Only applies to scalar attributes. + Normally, removing the proxied target will not remove the proxy + source, as this object may have other state that is still to be + kept. + + .. versionadded:: 1.3 + + .. seealso:: + + :ref:`cascade_scalar_deletes` - complete usage example + + :param create_on_none_assignment: when True, indicates that setting + the proxied value to ``None`` should **create** the source object + if it does not exist, using the creator. Only applies to scalar + attributes. This is mutually exclusive + vs. the :paramref:`.assocation_proxy.cascade_scalar_deletes`. + + .. versionadded:: 2.0.18 + + :param init: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__init__()`` + method as generated by the dataclass process. + + .. versionadded:: 2.0.0b4 + + :param repr: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the attribute established by this :class:`.AssociationProxy` + should be part of the ``__repr__()`` method as generated by the dataclass + process. + + .. versionadded:: 2.0.0b4 + + :param default_factory: Specific to + :ref:`orm_declarative_native_dataclasses`, specifies a default-value + generation function that will take place as part of the ``__init__()`` + method as generated by the dataclass process. + + .. versionadded:: 2.0.0b4 + + :param compare: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be included in comparison operations when generating the + ``__eq__()`` and ``__ne__()`` methods for the mapped class. + + .. versionadded:: 2.0.0b4 + + :param kw_only: Specific to :ref:`orm_declarative_native_dataclasses`, + indicates if this field should be marked as keyword-only when generating + the ``__init__()`` method as generated by the dataclass process. + + .. versionadded:: 2.0.0b4 + + :param hash: Specific to + :ref:`orm_declarative_native_dataclasses`, controls if this field + is included when generating the ``__hash__()`` method for the mapped + class. + + .. versionadded:: 2.0.36 + + :param info: optional, will be assigned to + :attr:`.AssociationProxy.info` if present. + + + The following additional parameters involve injection of custom behaviors + within the :class:`.AssociationProxy` object and are for advanced use + only: + + :param getset_factory: Optional. Proxied attribute access is + automatically handled by routines that get and set values based on + the `attr` argument for this proxy. + + If you would like to customize this behavior, you may supply a + `getset_factory` callable that produces a tuple of `getter` and + `setter` functions. The factory is called with two arguments, the + abstract type of the underlying collection and this proxy instance. + + :param proxy_factory: Optional. The type of collection to emulate is + determined by sniffing the target collection. If your collection + type can't be determined by duck typing or you'd like to use a + different collection implementation, you may supply a factory + function to produce those collections. Only applicable to + non-scalar relationships. + + :param proxy_bulk_set: Optional, use with proxy_factory. + + + """ + return AssociationProxy( + target_collection, + attr, + creator=creator, + getset_factory=getset_factory, + proxy_factory=proxy_factory, + proxy_bulk_set=proxy_bulk_set, + info=info, + cascade_scalar_deletes=cascade_scalar_deletes, + create_on_none_assignment=create_on_none_assignment, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + ) + + +class AssociationProxyExtensionType(InspectionAttrExtensionType): + ASSOCIATION_PROXY = "ASSOCIATION_PROXY" + """Symbol indicating an :class:`.InspectionAttr` that's + of type :class:`.AssociationProxy`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attribute. + + """ + + +class _GetterProtocol(Protocol[_T_co]): + def __call__(self, instance: Any) -> _T_co: ... + + +# mypy 0.990 we are no longer allowed to make this Protocol[_T_con] +class _SetterProtocol(Protocol): ... + + +class _PlainSetterProtocol(_SetterProtocol, Protocol[_T_con]): + def __call__(self, instance: Any, value: _T_con) -> None: ... + + +class _DictSetterProtocol(_SetterProtocol, Protocol[_T_con]): + def __call__(self, instance: Any, key: Any, value: _T_con) -> None: ... + + +# mypy 0.990 we are no longer allowed to make this Protocol[_T_con] +class _CreatorProtocol(Protocol): ... + + +class _PlainCreatorProtocol(_CreatorProtocol, Protocol[_T_con]): + def __call__(self, value: _T_con) -> Any: ... + + +class _KeyCreatorProtocol(_CreatorProtocol, Protocol[_T_con]): + def __call__(self, key: Any, value: Optional[_T_con]) -> Any: ... + + +class _LazyCollectionProtocol(Protocol[_T]): + def __call__( + self, + ) -> Union[ + MutableSet[_T], MutableMapping[Any, _T], MutableSequence[_T] + ]: ... + + +class _GetSetFactoryProtocol(Protocol): + def __call__( + self, + collection_class: Optional[Type[Any]], + assoc_instance: AssociationProxyInstance[Any], + ) -> Tuple[_GetterProtocol[Any], _SetterProtocol]: ... + + +class _ProxyFactoryProtocol(Protocol): + def __call__( + self, + lazy_collection: _LazyCollectionProtocol[Any], + creator: _CreatorProtocol, + value_attr: str, + parent: AssociationProxyInstance[Any], + ) -> Any: ... + + +class _ProxyBulkSetProtocol(Protocol): + def __call__( + self, proxy: _AssociationCollection[Any], collection: Iterable[Any] + ) -> None: ... + + +class _AssociationProxyProtocol(Protocol[_T]): + """describes the interface of :class:`.AssociationProxy` + without including descriptor methods in the interface.""" + + creator: Optional[_CreatorProtocol] + key: str + target_collection: str + value_attr: str + cascade_scalar_deletes: bool + create_on_none_assignment: bool + getset_factory: Optional[_GetSetFactoryProtocol] + proxy_factory: Optional[_ProxyFactoryProtocol] + proxy_bulk_set: Optional[_ProxyBulkSetProtocol] + + @util.ro_memoized_property + def info(self) -> _InfoType: ... + + def for_class( + self, class_: Type[Any], obj: Optional[object] = None + ) -> AssociationProxyInstance[_T]: ... + + def _default_getset( + self, collection_class: Any + ) -> Tuple[_GetterProtocol[Any], _SetterProtocol]: ... + + +class AssociationProxy( + interfaces.InspectionAttrInfo, + ORMDescriptor[_T], + _DCAttributeOptions, + _AssociationProxyProtocol[_T], +): + """A descriptor that presents a read/write view of an object attribute.""" + + is_attribute = True + extension_type = AssociationProxyExtensionType.ASSOCIATION_PROXY + + def __init__( + self, + target_collection: str, + attr: str, + *, + creator: Optional[_CreatorProtocol] = None, + getset_factory: Optional[_GetSetFactoryProtocol] = None, + proxy_factory: Optional[_ProxyFactoryProtocol] = None, + proxy_bulk_set: Optional[_ProxyBulkSetProtocol] = None, + info: Optional[_InfoType] = None, + cascade_scalar_deletes: bool = False, + create_on_none_assignment: bool = False, + attribute_options: Optional[_AttributeOptions] = None, + ): + """Construct a new :class:`.AssociationProxy`. + + The :class:`.AssociationProxy` object is typically constructed using + the :func:`.association_proxy` constructor function. See the + description of :func:`.association_proxy` for a description of all + parameters. + + + """ + self.target_collection = target_collection + self.value_attr = attr + self.creator = creator + self.getset_factory = getset_factory + self.proxy_factory = proxy_factory + self.proxy_bulk_set = proxy_bulk_set + + if cascade_scalar_deletes and create_on_none_assignment: + raise exc.ArgumentError( + "The cascade_scalar_deletes and create_on_none_assignment " + "parameters are mutually exclusive." + ) + self.cascade_scalar_deletes = cascade_scalar_deletes + self.create_on_none_assignment = create_on_none_assignment + + self.key = "_%s_%s_%s" % ( + type(self).__name__, + target_collection, + id(self), + ) + if info: + self.info = info # type: ignore + + if ( + attribute_options + and attribute_options != _DEFAULT_ATTRIBUTE_OPTIONS + ): + self._has_dataclass_arguments = True + self._attribute_options = attribute_options + else: + self._has_dataclass_arguments = False + self._attribute_options = _DEFAULT_ATTRIBUTE_OPTIONS + + @overload + def __get__( + self, instance: Literal[None], owner: Literal[None] + ) -> Self: ... + + @overload + def __get__( + self, instance: Literal[None], owner: Any + ) -> AssociationProxyInstance[_T]: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _T: ... + + def __get__( + self, instance: object, owner: Any + ) -> Union[AssociationProxyInstance[_T], _T, AssociationProxy[_T]]: + if owner is None: + return self + inst = self._as_instance(owner, instance) + if inst: + return inst.get(instance) + + assert instance is None + + return self + + def __set__(self, instance: object, values: _T) -> None: + class_ = type(instance) + self._as_instance(class_, instance).set(instance, values) + + def __delete__(self, instance: object) -> None: + class_ = type(instance) + self._as_instance(class_, instance).delete(instance) + + def for_class( + self, class_: Type[Any], obj: Optional[object] = None + ) -> AssociationProxyInstance[_T]: + r"""Return the internal state local to a specific mapped class. + + E.g., given a class ``User``:: + + class User(Base): + # ... + + keywords = association_proxy("kws", "keyword") + + If we access this :class:`.AssociationProxy` from + :attr:`_orm.Mapper.all_orm_descriptors`, and we want to view the + target class for this proxy as mapped by ``User``:: + + inspect(User).all_orm_descriptors["keywords"].for_class(User).target_class + + This returns an instance of :class:`.AssociationProxyInstance` that + is specific to the ``User`` class. The :class:`.AssociationProxy` + object remains agnostic of its parent class. + + :param class\_: the class that we are returning state for. + + :param obj: optional, an instance of the class that is required + if the attribute refers to a polymorphic target, e.g. where we have + to look at the type of the actual destination object to get the + complete path. + + .. versionadded:: 1.3 - :class:`.AssociationProxy` no longer stores + any state specific to a particular parent class; the state is now + stored in per-class :class:`.AssociationProxyInstance` objects. + + + """ + return self._as_instance(class_, obj) + + def _as_instance( + self, class_: Any, obj: Any + ) -> AssociationProxyInstance[_T]: + try: + inst = class_.__dict__[self.key + "_inst"] + except KeyError: + inst = None + + # avoid exception context + if inst is None: + owner = self._calc_owner(class_) + if owner is not None: + inst = AssociationProxyInstance.for_proxy(self, owner, obj) + setattr(class_, self.key + "_inst", inst) + else: + inst = None + + if inst is not None and not inst._is_canonical: + # the AssociationProxyInstance can't be generalized + # since the proxied attribute is not on the targeted + # class, only on subclasses of it, which might be + # different. only return for the specific + # object's current value + return inst._non_canonical_get_for_object(obj) # type: ignore + else: + return inst # type: ignore # TODO + + def _calc_owner(self, target_cls: Any) -> Any: + # we might be getting invoked for a subclass + # that is not mapped yet, in some declarative situations. + # save until we are mapped + try: + insp = inspect(target_cls) + except exc.NoInspectionAvailable: + # can't find a mapper, don't set owner. if we are a not-yet-mapped + # subclass, we can also scan through __mro__ to find a mapped + # class, but instead just wait for us to be called again against a + # mapped class normally. + return None + else: + return insp.mapper.class_manager.class_ + + def _default_getset( + self, collection_class: Any + ) -> Tuple[_GetterProtocol[Any], _SetterProtocol]: + attr = self.value_attr + _getter = operator.attrgetter(attr) + + def getter(instance: Any) -> Optional[Any]: + return _getter(instance) if instance is not None else None + + if collection_class is dict: + + def dict_setter(instance: Any, k: Any, value: Any) -> None: + setattr(instance, attr, value) + + return getter, dict_setter + + else: + + def plain_setter(o: Any, v: Any) -> None: + setattr(o, attr, v) + + return getter, plain_setter + + def __repr__(self) -> str: + return "AssociationProxy(%r, %r)" % ( + self.target_collection, + self.value_attr, + ) + + +# the pep-673 Self type does not work in Mypy for a "hybrid" +# style method that returns type or Self, so for one specific case +# we still need to use the pre-pep-673 workaround. +_Self = TypeVar("_Self", bound="AssociationProxyInstance[Any]") + + +class AssociationProxyInstance(SQLORMOperations[_T]): + """A per-class object that serves class- and object-specific results. + + This is used by :class:`.AssociationProxy` when it is invoked + in terms of a specific class or instance of a class, i.e. when it is + used as a regular Python descriptor. + + When referring to the :class:`.AssociationProxy` as a normal Python + descriptor, the :class:`.AssociationProxyInstance` is the object that + actually serves the information. Under normal circumstances, its presence + is transparent:: + + >>> User.keywords.scalar + False + + In the special case that the :class:`.AssociationProxy` object is being + accessed directly, in order to get an explicit handle to the + :class:`.AssociationProxyInstance`, use the + :meth:`.AssociationProxy.for_class` method:: + + proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) + + # view if proxy object is scalar or not + >>> proxy_state.scalar + False + + .. versionadded:: 1.3 + + """ # noqa + + collection_class: Optional[Type[Any]] + parent: _AssociationProxyProtocol[_T] + + def __init__( + self, + parent: _AssociationProxyProtocol[_T], + owning_class: Type[Any], + target_class: Type[Any], + value_attr: str, + ): + self.parent = parent + self.key = parent.key + self.owning_class = owning_class + self.target_collection = parent.target_collection + self.collection_class = None + self.target_class = target_class + self.value_attr = value_attr + + target_class: Type[Any] + """The intermediary class handled by this + :class:`.AssociationProxyInstance`. + + Intercepted append/set/assignment events will result + in the generation of new instances of this class. + + """ + + @classmethod + def for_proxy( + cls, + parent: AssociationProxy[_T], + owning_class: Type[Any], + parent_instance: Any, + ) -> AssociationProxyInstance[_T]: + target_collection = parent.target_collection + value_attr = parent.value_attr + prop = cast( + "orm.RelationshipProperty[_T]", + orm.class_mapper(owning_class).get_property(target_collection), + ) + + # this was never asserted before but this should be made clear. + if not isinstance(prop, orm.RelationshipProperty): + raise NotImplementedError( + "association proxy to a non-relationship " + "intermediary is not supported" + ) from None + + target_class = prop.mapper.class_ + + try: + target_assoc = cast( + "AssociationProxyInstance[_T]", + cls._cls_unwrap_target_assoc_proxy(target_class, value_attr), + ) + except AttributeError: + # the proxied attribute doesn't exist on the target class; + # return an "ambiguous" instance that will work on a per-object + # basis + return AmbiguousAssociationProxyInstance( + parent, owning_class, target_class, value_attr + ) + except Exception as err: + raise exc.InvalidRequestError( + f"Association proxy received an unexpected error when " + f"trying to retreive attribute " + f'"{target_class.__name__}.{parent.value_attr}" from ' + f'class "{target_class.__name__}": {err}' + ) from err + else: + return cls._construct_for_assoc( + target_assoc, parent, owning_class, target_class, value_attr + ) + + @classmethod + def _construct_for_assoc( + cls, + target_assoc: Optional[AssociationProxyInstance[_T]], + parent: _AssociationProxyProtocol[_T], + owning_class: Type[Any], + target_class: Type[Any], + value_attr: str, + ) -> AssociationProxyInstance[_T]: + if target_assoc is not None: + return ObjectAssociationProxyInstance( + parent, owning_class, target_class, value_attr + ) + + attr = getattr(target_class, value_attr) + if not hasattr(attr, "_is_internal_proxy"): + return AmbiguousAssociationProxyInstance( + parent, owning_class, target_class, value_attr + ) + is_object = attr._impl_uses_objects + if is_object: + return ObjectAssociationProxyInstance( + parent, owning_class, target_class, value_attr + ) + else: + return ColumnAssociationProxyInstance( + parent, owning_class, target_class, value_attr + ) + + def _get_property(self) -> MapperProperty[Any]: + return orm.class_mapper(self.owning_class).get_property( + self.target_collection + ) + + @property + def _comparator(self) -> PropComparator[Any]: + return getattr( # type: ignore + self.owning_class, self.target_collection + ).comparator + + def __clause_element__(self) -> NoReturn: + raise NotImplementedError( + "The association proxy can't be used as a plain column " + "expression; it only works inside of a comparison expression" + ) + + @classmethod + def _cls_unwrap_target_assoc_proxy( + cls, target_class: Any, value_attr: str + ) -> Optional[AssociationProxyInstance[_T]]: + attr = getattr(target_class, value_attr) + assert not isinstance(attr, AssociationProxy) + if isinstance(attr, AssociationProxyInstance): + return attr + return None + + @util.memoized_property + def _unwrap_target_assoc_proxy( + self, + ) -> Optional[AssociationProxyInstance[_T]]: + return self._cls_unwrap_target_assoc_proxy( + self.target_class, self.value_attr + ) + + @property + def remote_attr(self) -> SQLORMOperations[_T]: + """The 'remote' class attribute referenced by this + :class:`.AssociationProxyInstance`. + + .. seealso:: + + :attr:`.AssociationProxyInstance.attr` + + :attr:`.AssociationProxyInstance.local_attr` + + """ + return cast( + "SQLORMOperations[_T]", getattr(self.target_class, self.value_attr) + ) + + @property + def local_attr(self) -> SQLORMOperations[Any]: + """The 'local' class attribute referenced by this + :class:`.AssociationProxyInstance`. + + .. seealso:: + + :attr:`.AssociationProxyInstance.attr` + + :attr:`.AssociationProxyInstance.remote_attr` + + """ + return cast( + "SQLORMOperations[Any]", + getattr(self.owning_class, self.target_collection), + ) + + @property + def attr(self) -> Tuple[SQLORMOperations[Any], SQLORMOperations[_T]]: + """Return a tuple of ``(local_attr, remote_attr)``. + + This attribute was originally intended to facilitate using the + :meth:`_query.Query.join` method to join across the two relationships + at once, however this makes use of a deprecated calling style. + + To use :meth:`_sql.select.join` or :meth:`_orm.Query.join` with + an association proxy, the current method is to make use of the + :attr:`.AssociationProxyInstance.local_attr` and + :attr:`.AssociationProxyInstance.remote_attr` attributes separately:: + + stmt = ( + select(Parent) + .join(Parent.proxied.local_attr) + .join(Parent.proxied.remote_attr) + ) + + A future release may seek to provide a more succinct join pattern + for association proxy attributes. + + .. seealso:: + + :attr:`.AssociationProxyInstance.local_attr` + + :attr:`.AssociationProxyInstance.remote_attr` + + """ + return (self.local_attr, self.remote_attr) + + @util.memoized_property + def scalar(self) -> bool: + """Return ``True`` if this :class:`.AssociationProxyInstance` + proxies a scalar relationship on the local side.""" + + scalar = not self._get_property().uselist + if scalar: + self._initialize_scalar_accessors() + return scalar + + @util.memoized_property + def _value_is_scalar(self) -> bool: + return ( + not self._get_property() + .mapper.get_property(self.value_attr) + .uselist + ) + + @property + def _target_is_object(self) -> bool: + raise NotImplementedError() + + _scalar_get: _GetterProtocol[_T] + _scalar_set: _PlainSetterProtocol[_T] + + def _initialize_scalar_accessors(self) -> None: + if self.parent.getset_factory: + get, set_ = self.parent.getset_factory(None, self) + else: + get, set_ = self.parent._default_getset(None) + self._scalar_get, self._scalar_set = get, cast( + "_PlainSetterProtocol[_T]", set_ + ) + + def _default_getset( + self, collection_class: Any + ) -> Tuple[_GetterProtocol[Any], _SetterProtocol]: + attr = self.value_attr + _getter = operator.attrgetter(attr) + + def getter(instance: Any) -> Optional[_T]: + return _getter(instance) if instance is not None else None + + if collection_class is dict: + + def dict_setter(instance: Any, k: Any, value: _T) -> None: + setattr(instance, attr, value) + + return getter, dict_setter + else: + + def plain_setter(o: Any, v: _T) -> None: + setattr(o, attr, v) + + return getter, plain_setter + + @util.ro_non_memoized_property + def info(self) -> _InfoType: + return self.parent.info + + @overload + def get(self: _Self, obj: Literal[None]) -> _Self: ... + + @overload + def get(self, obj: Any) -> _T: ... + + def get( + self, obj: Any + ) -> Union[Optional[_T], AssociationProxyInstance[_T]]: + if obj is None: + return self + + proxy: _T + + if self.scalar: + target = getattr(obj, self.target_collection) + return self._scalar_get(target) + else: + try: + # If the owning instance is reborn (orm session resurrect, + # etc.), refresh the proxy cache. + creator_id, self_id, proxy = cast( + "Tuple[int, int, _T]", getattr(obj, self.key) + ) + except AttributeError: + pass + else: + if id(obj) == creator_id and id(self) == self_id: + assert self.collection_class is not None + return proxy + + self.collection_class, proxy = self._new( + _lazy_collection(obj, self.target_collection) + ) + setattr(obj, self.key, (id(obj), id(self), proxy)) + return proxy + + def set(self, obj: Any, values: _T) -> None: + if self.scalar: + creator = cast( + "_PlainCreatorProtocol[_T]", + ( + self.parent.creator + if self.parent.creator + else self.target_class + ), + ) + target = getattr(obj, self.target_collection) + if target is None: + if ( + values is None + and not self.parent.create_on_none_assignment + ): + return + setattr(obj, self.target_collection, creator(values)) + else: + self._scalar_set(target, values) + if values is None and self.parent.cascade_scalar_deletes: + setattr(obj, self.target_collection, None) + else: + proxy = self.get(obj) + assert self.collection_class is not None + if proxy is not values: + proxy._bulk_replace(self, values) + + def delete(self, obj: Any) -> None: + if self.owning_class is None: + self._calc_owner(obj, None) + + if self.scalar: + target = getattr(obj, self.target_collection) + if target is not None: + delattr(target, self.value_attr) + delattr(obj, self.target_collection) + + def _new( + self, lazy_collection: _LazyCollectionProtocol[_T] + ) -> Tuple[Type[Any], _T]: + creator = ( + self.parent.creator + if self.parent.creator is not None + else cast("_CreatorProtocol", self.target_class) + ) + collection_class = util.duck_type_collection(lazy_collection()) + + if collection_class is None: + raise exc.InvalidRequestError( + f"lazy collection factory did not return a " + f"valid collection type, got {collection_class}" + ) + if self.parent.proxy_factory: + return ( + collection_class, + self.parent.proxy_factory( + lazy_collection, creator, self.value_attr, self + ), + ) + + if self.parent.getset_factory: + getter, setter = self.parent.getset_factory(collection_class, self) + else: + getter, setter = self.parent._default_getset(collection_class) + + if collection_class is list: + return ( + collection_class, + cast( + _T, + _AssociationList( + lazy_collection, creator, getter, setter, self + ), + ), + ) + elif collection_class is dict: + return ( + collection_class, + cast( + _T, + _AssociationDict( + lazy_collection, creator, getter, setter, self + ), + ), + ) + elif collection_class is set: + return ( + collection_class, + cast( + _T, + _AssociationSet( + lazy_collection, creator, getter, setter, self + ), + ), + ) + else: + raise exc.ArgumentError( + "could not guess which interface to use for " + 'collection_class "%s" backing "%s"; specify a ' + "proxy_factory and proxy_bulk_set manually" + % (self.collection_class, self.target_collection) + ) + + def _set( + self, proxy: _AssociationCollection[Any], values: Iterable[Any] + ) -> None: + if self.parent.proxy_bulk_set: + self.parent.proxy_bulk_set(proxy, values) + elif self.collection_class is list: + cast("_AssociationList[Any]", proxy).extend(values) + elif self.collection_class is dict: + cast("_AssociationDict[Any, Any]", proxy).update(values) + elif self.collection_class is set: + cast("_AssociationSet[Any]", proxy).update(values) + else: + raise exc.ArgumentError( + "no proxy_bulk_set supplied for custom " + "collection_class implementation" + ) + + def _inflate(self, proxy: _AssociationCollection[Any]) -> None: + creator = ( + self.parent.creator + and self.parent.creator + or cast(_CreatorProtocol, self.target_class) + ) + + if self.parent.getset_factory: + getter, setter = self.parent.getset_factory( + self.collection_class, self + ) + else: + getter, setter = self.parent._default_getset(self.collection_class) + + proxy.creator = creator + proxy.getter = getter + proxy.setter = setter + + def _criterion_exists( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + is_has = kwargs.pop("is_has", None) + + target_assoc = self._unwrap_target_assoc_proxy + if target_assoc is not None: + inner = target_assoc._criterion_exists( + criterion=criterion, **kwargs + ) + return self._comparator._criterion_exists(inner) + + if self._target_is_object: + attr = getattr(self.target_class, self.value_attr) + value_expr = attr.comparator._criterion_exists(criterion, **kwargs) + else: + if kwargs: + raise exc.ArgumentError( + "Can't apply keyword arguments to column-targeted " + "association proxy; use ==" + ) + elif is_has and criterion is not None: + raise exc.ArgumentError( + "Non-empty has() not allowed for " + "column-targeted association proxy; use ==" + ) + + value_expr = criterion + + return self._comparator._criterion_exists(value_expr) + + def any( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + """Produce a proxied 'any' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.Relationship.Comparator.any` + and/or :meth:`.Relationship.Comparator.has` + operators of the underlying proxied attributes. + + """ + if self._unwrap_target_assoc_proxy is None and ( + self.scalar + and (not self._target_is_object or self._value_is_scalar) + ): + raise exc.InvalidRequestError( + "'any()' not implemented for scalar attributes. Use has()." + ) + return self._criterion_exists( + criterion=criterion, is_has=False, **kwargs + ) + + def has( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + """Produce a proxied 'has' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.Relationship.Comparator.any` + and/or :meth:`.Relationship.Comparator.has` + operators of the underlying proxied attributes. + + """ + if self._unwrap_target_assoc_proxy is None and ( + not self.scalar + or (self._target_is_object and not self._value_is_scalar) + ): + raise exc.InvalidRequestError( + "'has()' not implemented for collections. Use any()." + ) + return self._criterion_exists( + criterion=criterion, is_has=True, **kwargs + ) + + def __repr__(self) -> str: + return "%s(%r)" % (self.__class__.__name__, self.parent) + + +class AmbiguousAssociationProxyInstance(AssociationProxyInstance[_T]): + """an :class:`.AssociationProxyInstance` where we cannot determine + the type of target object. + """ + + _is_canonical = False + + def _ambiguous(self) -> NoReturn: + raise AttributeError( + "Association proxy %s.%s refers to an attribute '%s' that is not " + "directly mapped on class %s; therefore this operation cannot " + "proceed since we don't know what type of object is referred " + "towards" + % ( + self.owning_class.__name__, + self.target_collection, + self.value_attr, + self.target_class, + ) + ) + + def get(self, obj: Any) -> Any: + if obj is None: + return self + else: + return super().get(obj) + + def __eq__(self, obj: object) -> NoReturn: + self._ambiguous() + + def __ne__(self, obj: object) -> NoReturn: + self._ambiguous() + + def any( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> NoReturn: + self._ambiguous() + + def has( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> NoReturn: + self._ambiguous() + + @util.memoized_property + def _lookup_cache(self) -> Dict[Type[Any], AssociationProxyInstance[_T]]: + # mapping of ->AssociationProxyInstance. + # e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist; + # only B1(B) and B2(B) have "b_attr", keys in here would be B1, B2 + return {} + + def _non_canonical_get_for_object( + self, parent_instance: Any + ) -> AssociationProxyInstance[_T]: + if parent_instance is not None: + actual_obj = getattr(parent_instance, self.target_collection) + if actual_obj is not None: + try: + insp = inspect(actual_obj) + except exc.NoInspectionAvailable: + pass + else: + mapper = insp.mapper + instance_class = mapper.class_ + if instance_class not in self._lookup_cache: + self._populate_cache(instance_class, mapper) + + try: + return self._lookup_cache[instance_class] + except KeyError: + pass + + # no object or ambiguous object given, so return "self", which + # is a proxy with generally only instance-level functionality + return self + + def _populate_cache( + self, instance_class: Any, mapper: Mapper[Any] + ) -> None: + prop = orm.class_mapper(self.owning_class).get_property( + self.target_collection + ) + + if mapper.isa(prop.mapper): + target_class = instance_class + try: + target_assoc = self._cls_unwrap_target_assoc_proxy( + target_class, self.value_attr + ) + except AttributeError: + pass + else: + self._lookup_cache[instance_class] = self._construct_for_assoc( + cast("AssociationProxyInstance[_T]", target_assoc), + self.parent, + self.owning_class, + target_class, + self.value_attr, + ) + + +class ObjectAssociationProxyInstance(AssociationProxyInstance[_T]): + """an :class:`.AssociationProxyInstance` that has an object as a target.""" + + _target_is_object: bool = True + _is_canonical = True + + def contains(self, other: Any, **kw: Any) -> ColumnElement[bool]: + """Produce a proxied 'contains' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.Relationship.Comparator.any`, + :meth:`.Relationship.Comparator.has`, + and/or :meth:`.Relationship.Comparator.contains` + operators of the underlying proxied attributes. + """ + + target_assoc = self._unwrap_target_assoc_proxy + if target_assoc is not None: + return self._comparator._criterion_exists( + target_assoc.contains(other) + if not target_assoc.scalar + else target_assoc == other + ) + elif ( + self._target_is_object + and self.scalar + and not self._value_is_scalar + ): + return self._comparator.has( + getattr(self.target_class, self.value_attr).contains(other) + ) + elif self._target_is_object and self.scalar and self._value_is_scalar: + raise exc.InvalidRequestError( + "contains() doesn't apply to a scalar object endpoint; use ==" + ) + else: + return self._comparator._criterion_exists( + **{self.value_attr: other} + ) + + def __eq__(self, obj: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + # note the has() here will fail for collections; eq_() + # is only allowed with a scalar. + if obj is None: + return or_( + self._comparator.has(**{self.value_attr: obj}), + self._comparator == None, + ) + else: + return self._comparator.has(**{self.value_attr: obj}) + + def __ne__(self, obj: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + # note the has() here will fail for collections; eq_() + # is only allowed with a scalar. + return self._comparator.has( + getattr(self.target_class, self.value_attr) != obj + ) + + +class ColumnAssociationProxyInstance(AssociationProxyInstance[_T]): + """an :class:`.AssociationProxyInstance` that has a database column as a + target. + """ + + _target_is_object: bool = False + _is_canonical = True + + def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + # special case "is None" to check for no related row as well + expr = self._criterion_exists( + self.remote_attr.operate(operators.eq, other) + ) + if other is None: + return or_(expr, self._comparator == None) + else: + return expr + + def operate( + self, op: operators.OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return self._criterion_exists( + self.remote_attr.operate(op, *other, **kwargs) + ) + + +class _lazy_collection(_LazyCollectionProtocol[_T]): + def __init__(self, obj: Any, target: str): + self.parent = obj + self.target = target + + def __call__( + self, + ) -> Union[MutableSet[_T], MutableMapping[Any, _T], MutableSequence[_T]]: + return getattr(self.parent, self.target) # type: ignore[no-any-return] + + def __getstate__(self) -> Any: + return {"obj": self.parent, "target": self.target} + + def __setstate__(self, state: Any) -> None: + self.parent = state["obj"] + self.target = state["target"] + + +_IT = TypeVar("_IT", bound="Any") +"""instance type - this is the type of object inside a collection. + +this is not the same as the _T of AssociationProxy and +AssociationProxyInstance itself, which will often refer to the +collection[_IT] type. + +""" + + +class _AssociationCollection(Generic[_IT]): + getter: _GetterProtocol[_IT] + """A function. Given an associated object, return the 'value'.""" + + creator: _CreatorProtocol + """ + A function that creates new target entities. Given one parameter: + value. This assertion is assumed:: + + obj = creator(somevalue) + assert getter(obj) == somevalue + """ + + parent: AssociationProxyInstance[_IT] + setter: _SetterProtocol + """A function. Given an associated object and a value, store that + value on the object. + """ + + lazy_collection: _LazyCollectionProtocol[_IT] + """A callable returning a list-based collection of entities (usually an + object attribute managed by a SQLAlchemy relationship())""" + + def __init__( + self, + lazy_collection: _LazyCollectionProtocol[_IT], + creator: _CreatorProtocol, + getter: _GetterProtocol[_IT], + setter: _SetterProtocol, + parent: AssociationProxyInstance[_IT], + ): + """Constructs an _AssociationCollection. + + This will always be a subclass of either _AssociationList, + _AssociationSet, or _AssociationDict. + + """ + self.lazy_collection = lazy_collection + self.creator = creator + self.getter = getter + self.setter = setter + self.parent = parent + + if typing.TYPE_CHECKING: + col: Collection[_IT] + else: + col = property(lambda self: self.lazy_collection()) + + def __len__(self) -> int: + return len(self.col) + + def __bool__(self) -> bool: + return bool(self.col) + + def __getstate__(self) -> Any: + return {"parent": self.parent, "lazy_collection": self.lazy_collection} + + def __setstate__(self, state: Any) -> None: + self.parent = state["parent"] + self.lazy_collection = state["lazy_collection"] + self.parent._inflate(self) + + def clear(self) -> None: + raise NotImplementedError() + + +class _AssociationSingleItem(_AssociationCollection[_T]): + setter: _PlainSetterProtocol[_T] + creator: _PlainCreatorProtocol[_T] + + def _create(self, value: _T) -> Any: + return self.creator(value) + + def _get(self, object_: Any) -> _T: + return self.getter(object_) + + def _bulk_replace( + self, assoc_proxy: AssociationProxyInstance[Any], values: Iterable[_IT] + ) -> None: + self.clear() + assoc_proxy._set(self, values) + + +class _AssociationList(_AssociationSingleItem[_T], MutableSequence[_T]): + """Generic, converting, list-to-list proxy.""" + + col: MutableSequence[_T] + + def _set(self, object_: Any, value: _T) -> None: + self.setter(object_, value) + + @overload + def __getitem__(self, index: int) -> _T: ... + + @overload + def __getitem__(self, index: slice) -> MutableSequence[_T]: ... + + def __getitem__( + self, index: Union[int, slice] + ) -> Union[_T, MutableSequence[_T]]: + if not isinstance(index, slice): + return self._get(self.col[index]) + else: + return [self._get(member) for member in self.col[index]] + + @overload + def __setitem__(self, index: int, value: _T) -> None: ... + + @overload + def __setitem__(self, index: slice, value: Iterable[_T]) -> None: ... + + def __setitem__( + self, index: Union[int, slice], value: Union[_T, Iterable[_T]] + ) -> None: + if not isinstance(index, slice): + self._set(self.col[index], cast("_T", value)) + else: + if index.stop is None: + stop = len(self) + elif index.stop < 0: + stop = len(self) + index.stop + else: + stop = index.stop + step = index.step or 1 + + start = index.start or 0 + rng = list(range(index.start or 0, stop, step)) + + sized_value = list(value) + + if step == 1: + for i in rng: + del self[start] + i = start + for item in sized_value: + self.insert(i, item) + i += 1 + else: + if len(sized_value) != len(rng): + raise ValueError( + "attempt to assign sequence of size %s to " + "extended slice of size %s" + % (len(sized_value), len(rng)) + ) + for i, item in zip(rng, value): + self._set(self.col[i], item) + + @overload + def __delitem__(self, index: int) -> None: ... + + @overload + def __delitem__(self, index: slice) -> None: ... + + def __delitem__(self, index: Union[slice, int]) -> None: + del self.col[index] + + def __contains__(self, value: object) -> bool: + for member in self.col: + # testlib.pragma exempt:__eq__ + if self._get(member) == value: + return True + return False + + def __iter__(self) -> Iterator[_T]: + """Iterate over proxied values. + + For the actual domain objects, iterate over .col instead or + just use the underlying collection directly from its property + on the parent. + """ + + for member in self.col: + yield self._get(member) + return + + def append(self, value: _T) -> None: + col = self.col + item = self._create(value) + col.append(item) + + def count(self, value: Any) -> int: + count = 0 + for v in self: + if v == value: + count += 1 + return count + + def extend(self, values: Iterable[_T]) -> None: + for v in values: + self.append(v) + + def insert(self, index: int, value: _T) -> None: + self.col[index:index] = [self._create(value)] + + def pop(self, index: int = -1) -> _T: + return self.getter(self.col.pop(index)) + + def remove(self, value: _T) -> None: + for i, val in enumerate(self): + if val == value: + del self.col[i] + return + raise ValueError("value not in list") + + def reverse(self) -> NoReturn: + """Not supported, use reversed(mylist)""" + + raise NotImplementedError() + + def sort(self) -> NoReturn: + """Not supported, use sorted(mylist)""" + + raise NotImplementedError() + + def clear(self) -> None: + del self.col[0 : len(self.col)] + + def __eq__(self, other: object) -> bool: + return list(self) == other + + def __ne__(self, other: object) -> bool: + return list(self) != other + + def __lt__(self, other: List[_T]) -> bool: + return list(self) < other + + def __le__(self, other: List[_T]) -> bool: + return list(self) <= other + + def __gt__(self, other: List[_T]) -> bool: + return list(self) > other + + def __ge__(self, other: List[_T]) -> bool: + return list(self) >= other + + def __add__(self, other: List[_T]) -> List[_T]: + try: + other = list(other) + except TypeError: + return NotImplemented + return list(self) + other + + def __radd__(self, other: List[_T]) -> List[_T]: + try: + other = list(other) + except TypeError: + return NotImplemented + return other + list(self) + + def __mul__(self, n: SupportsIndex) -> List[_T]: + if not isinstance(n, int): + return NotImplemented + return list(self) * n + + def __rmul__(self, n: SupportsIndex) -> List[_T]: + if not isinstance(n, int): + return NotImplemented + return n * list(self) + + def __iadd__(self, iterable: Iterable[_T]) -> Self: + self.extend(iterable) + return self + + def __imul__(self, n: SupportsIndex) -> Self: + # unlike a regular list *=, proxied __imul__ will generate unique + # backing objects for each copy. *= on proxied lists is a bit of + # a stretch anyhow, and this interpretation of the __imul__ contract + # is more plausibly useful than copying the backing objects. + if not isinstance(n, int): + raise NotImplementedError() + if n == 0: + self.clear() + elif n > 1: + self.extend(list(self) * (n - 1)) + return self + + if typing.TYPE_CHECKING: + # TODO: no idea how to do this without separate "stub" + def index( + self, value: Any, start: int = ..., stop: int = ... + ) -> int: ... + + else: + + def index(self, value: Any, *arg) -> int: + ls = list(self) + return ls.index(value, *arg) + + def copy(self) -> List[_T]: + return list(self) + + def __repr__(self) -> str: + return repr(list(self)) + + def __hash__(self) -> NoReturn: + raise TypeError("%s objects are unhashable" % type(self).__name__) + + if not typing.TYPE_CHECKING: + for func_name, func in list(locals().items()): + if ( + callable(func) + and func.__name__ == func_name + and not func.__doc__ + and hasattr(list, func_name) + ): + func.__doc__ = getattr(list, func_name).__doc__ + del func_name, func + + +class _AssociationDict(_AssociationCollection[_VT], MutableMapping[_KT, _VT]): + """Generic, converting, dict-to-dict proxy.""" + + setter: _DictSetterProtocol[_VT] + creator: _KeyCreatorProtocol[_VT] + col: MutableMapping[_KT, Optional[_VT]] + + def _create(self, key: _KT, value: Optional[_VT]) -> Any: + return self.creator(key, value) + + def _get(self, object_: Any) -> _VT: + return self.getter(object_) + + def _set(self, object_: Any, key: _KT, value: _VT) -> None: + return self.setter(object_, key, value) + + def __getitem__(self, key: _KT) -> _VT: + return self._get(self.col[key]) + + def __setitem__(self, key: _KT, value: _VT) -> None: + if key in self.col: + self._set(self.col[key], key, value) + else: + self.col[key] = self._create(key, value) + + def __delitem__(self, key: _KT) -> None: + del self.col[key] + + def __contains__(self, key: object) -> bool: + return key in self.col + + def __iter__(self) -> Iterator[_KT]: + return iter(self.col.keys()) + + def clear(self) -> None: + self.col.clear() + + def __eq__(self, other: object) -> bool: + return dict(self) == other + + def __ne__(self, other: object) -> bool: + return dict(self) != other + + def __repr__(self) -> str: + return repr(dict(self)) + + @overload + def get(self, __key: _KT) -> Optional[_VT]: ... + + @overload + def get(self, __key: _KT, default: Union[_VT, _T]) -> Union[_VT, _T]: ... + + def get( + self, key: _KT, default: Optional[Union[_VT, _T]] = None + ) -> Union[_VT, _T, None]: + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: _KT, default: Optional[_VT] = None) -> _VT: + # TODO: again, no idea how to create an actual MutableMapping. + # default must allow None, return type can't include None, + # the stub explicitly allows for default of None with a cryptic message + # "This overload should be allowed only if the value type is + # compatible with None.". + if key not in self.col: + self.col[key] = self._create(key, default) + return default # type: ignore + else: + return self[key] + + def keys(self) -> KeysView[_KT]: + return self.col.keys() + + def items(self) -> ItemsView[_KT, _VT]: + return ItemsView(self) + + def values(self) -> ValuesView[_VT]: + return ValuesView(self) + + @overload + def pop(self, __key: _KT) -> _VT: ... + + @overload + def pop( + self, __key: _KT, default: Union[_VT, _T] = ... + ) -> Union[_VT, _T]: ... + + def pop(self, __key: _KT, *arg: Any, **kw: Any) -> Union[_VT, _T]: + member = self.col.pop(__key, *arg, **kw) + return self._get(member) + + def popitem(self) -> Tuple[_KT, _VT]: + item = self.col.popitem() + return (item[0], self._get(item[1])) + + @overload + def update( + self, __m: SupportsKeysAndGetItem[_KT, _VT], **kwargs: _VT + ) -> None: ... + + @overload + def update( + self, __m: Iterable[tuple[_KT, _VT]], **kwargs: _VT + ) -> None: ... + + @overload + def update(self, **kwargs: _VT) -> None: ... + + def update(self, *a: Any, **kw: Any) -> None: + up: Dict[_KT, _VT] = {} + up.update(*a, **kw) + + for key, value in up.items(): + self[key] = value + + def _bulk_replace( + self, + assoc_proxy: AssociationProxyInstance[Any], + values: Mapping[_KT, _VT], + ) -> None: + existing = set(self) + constants = existing.intersection(values or ()) + additions = set(values or ()).difference(constants) + removals = existing.difference(constants) + + for key, member in values.items() or (): + if key in additions: + self[key] = member + elif key in constants: + self[key] = member + + for key in removals: + del self[key] + + def copy(self) -> Dict[_KT, _VT]: + return dict(self.items()) + + def __hash__(self) -> NoReturn: + raise TypeError("%s objects are unhashable" % type(self).__name__) + + if not typing.TYPE_CHECKING: + for func_name, func in list(locals().items()): + if ( + callable(func) + and func.__name__ == func_name + and not func.__doc__ + and hasattr(dict, func_name) + ): + func.__doc__ = getattr(dict, func_name).__doc__ + del func_name, func + + +class _AssociationSet(_AssociationSingleItem[_T], MutableSet[_T]): + """Generic, converting, set-to-set proxy.""" + + col: MutableSet[_T] + + def __len__(self) -> int: + return len(self.col) + + def __bool__(self) -> bool: + if self.col: + return True + else: + return False + + def __contains__(self, __o: object) -> bool: + for member in self.col: + if self._get(member) == __o: + return True + return False + + def __iter__(self) -> Iterator[_T]: + """Iterate over proxied values. + + For the actual domain objects, iterate over .col instead or just use + the underlying collection directly from its property on the parent. + + """ + for member in self.col: + yield self._get(member) + return + + def add(self, __element: _T) -> None: + if __element not in self: + self.col.add(self._create(__element)) + + # for discard and remove, choosing a more expensive check strategy rather + # than call self.creator() + def discard(self, __element: _T) -> None: + for member in self.col: + if self._get(member) == __element: + self.col.discard(member) + break + + def remove(self, __element: _T) -> None: + for member in self.col: + if self._get(member) == __element: + self.col.discard(member) + return + raise KeyError(__element) + + def pop(self) -> _T: + if not self.col: + raise KeyError("pop from an empty set") + member = self.col.pop() + return self._get(member) + + def update(self, *s: Iterable[_T]) -> None: + for iterable in s: + for value in iterable: + self.add(value) + + def _bulk_replace(self, assoc_proxy: Any, values: Iterable[_T]) -> None: + existing = set(self) + constants = existing.intersection(values or ()) + additions = set(values or ()).difference(constants) + removals = existing.difference(constants) + + appender = self.add + remover = self.remove + + for member in values or (): + if member in additions: + appender(member) + elif member in constants: + appender(member) + + for member in removals: + remover(member) + + def __ior__( # type: ignore + self, other: AbstractSet[_S] + ) -> MutableSet[Union[_T, _S]]: + if not collections._set_binops_check_strict(self, other): + raise NotImplementedError() + for value in other: + self.add(value) + return self + + def _set(self) -> Set[_T]: + return set(iter(self)) + + def union(self, *s: Iterable[_S]) -> MutableSet[Union[_T, _S]]: + return set(self).union(*s) + + def __or__(self, __s: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: + return self.union(__s) + + def difference(self, *s: Iterable[Any]) -> MutableSet[_T]: + return set(self).difference(*s) + + def __sub__(self, s: AbstractSet[Any]) -> MutableSet[_T]: + return self.difference(s) + + def difference_update(self, *s: Iterable[Any]) -> None: + for other in s: + for value in other: + self.discard(value) + + def __isub__(self, s: AbstractSet[Any]) -> Self: + if not collections._set_binops_check_strict(self, s): + raise NotImplementedError() + for value in s: + self.discard(value) + return self + + def intersection(self, *s: Iterable[Any]) -> MutableSet[_T]: + return set(self).intersection(*s) + + def __and__(self, s: AbstractSet[Any]) -> MutableSet[_T]: + return self.intersection(s) + + def intersection_update(self, *s: Iterable[Any]) -> None: + for other in s: + want, have = self.intersection(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + + def __iand__(self, s: AbstractSet[Any]) -> Self: + if not collections._set_binops_check_strict(self, s): + raise NotImplementedError() + want = self.intersection(s) + have: Set[_T] = set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + return self + + def symmetric_difference(self, __s: Iterable[_T]) -> MutableSet[_T]: + return set(self).symmetric_difference(__s) + + def __xor__(self, s: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: + return self.symmetric_difference(s) + + def symmetric_difference_update(self, other: Iterable[Any]) -> None: + want, have = self.symmetric_difference(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + + def __ixor__(self, other: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: # type: ignore # noqa: E501 + if not collections._set_binops_check_strict(self, other): + raise NotImplementedError() + + self.symmetric_difference_update(other) + return self + + def issubset(self, __s: Iterable[Any]) -> bool: + return set(self).issubset(__s) + + def issuperset(self, __s: Iterable[Any]) -> bool: + return set(self).issuperset(__s) + + def clear(self) -> None: + self.col.clear() + + def copy(self) -> AbstractSet[_T]: + return set(self) + + def __eq__(self, other: object) -> bool: + return set(self) == other + + def __ne__(self, other: object) -> bool: + return set(self) != other + + def __lt__(self, other: AbstractSet[Any]) -> bool: + return set(self) < other + + def __le__(self, other: AbstractSet[Any]) -> bool: + return set(self) <= other + + def __gt__(self, other: AbstractSet[Any]) -> bool: + return set(self) > other + + def __ge__(self, other: AbstractSet[Any]) -> bool: + return set(self) >= other + + def __repr__(self) -> str: + return repr(set(self)) + + def __hash__(self) -> NoReturn: + raise TypeError("%s objects are unhashable" % type(self).__name__) + + if not typing.TYPE_CHECKING: + for func_name, func in list(locals().items()): + if ( + callable(func) + and func.__name__ == func_name + and not func.__doc__ + and hasattr(set, func_name) + ): + func.__doc__ = getattr(set, func_name).__doc__ + del func_name, func diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8a04bd789bf8792bda61d3ff4227ff8f1fe5be --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/__init__.py @@ -0,0 +1,25 @@ +# ext/asyncio/__init__.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from .engine import async_engine_from_config as async_engine_from_config +from .engine import AsyncConnection as AsyncConnection +from .engine import AsyncEngine as AsyncEngine +from .engine import AsyncTransaction as AsyncTransaction +from .engine import create_async_engine as create_async_engine +from .engine import create_async_pool_from_url as create_async_pool_from_url +from .result import AsyncMappingResult as AsyncMappingResult +from .result import AsyncResult as AsyncResult +from .result import AsyncScalarResult as AsyncScalarResult +from .result import AsyncTupleResult as AsyncTupleResult +from .scoping import async_scoped_session as async_scoped_session +from .session import async_object_session as async_object_session +from .session import async_session as async_session +from .session import async_sessionmaker as async_sessionmaker +from .session import AsyncAttrs as AsyncAttrs +from .session import AsyncSession as AsyncSession +from .session import AsyncSessionTransaction as AsyncSessionTransaction +from .session import close_all_sessions as close_all_sessions diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2c439f1609670f4ac0b010c7be129b99b33db3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/base.py @@ -0,0 +1,281 @@ +# ext/asyncio/base.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import abc +import functools +from typing import Any +from typing import AsyncGenerator +from typing import AsyncIterator +from typing import Awaitable +from typing import Callable +from typing import ClassVar +from typing import Dict +from typing import Generator +from typing import Generic +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Tuple +from typing import TypeVar +import weakref + +from . import exc as async_exc +from ... import util +from ...util.typing import Literal +from ...util.typing import Self + +_T = TypeVar("_T", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + + +_PT = TypeVar("_PT", bound=Any) + + +class ReversibleProxy(Generic[_PT]): + _proxy_objects: ClassVar[ + Dict[weakref.ref[Any], weakref.ref[ReversibleProxy[Any]]] + ] = {} + __slots__ = ("__weakref__",) + + @overload + def _assign_proxied(self, target: _PT) -> _PT: ... + + @overload + def _assign_proxied(self, target: None) -> None: ... + + def _assign_proxied(self, target: Optional[_PT]) -> Optional[_PT]: + if target is not None: + target_ref: weakref.ref[_PT] = weakref.ref( + target, ReversibleProxy._target_gced + ) + proxy_ref = weakref.ref( + self, + functools.partial(ReversibleProxy._target_gced, target_ref), + ) + ReversibleProxy._proxy_objects[target_ref] = proxy_ref + + return target + + @classmethod + def _target_gced( + cls, + ref: weakref.ref[_PT], + proxy_ref: Optional[weakref.ref[Self]] = None, # noqa: U100 + ) -> None: + cls._proxy_objects.pop(ref, None) + + @classmethod + def _regenerate_proxy_for_target( + cls, target: _PT, **additional_kw: Any + ) -> Self: + raise NotImplementedError() + + @overload + @classmethod + def _retrieve_proxy_for_target( + cls, target: _PT, regenerate: Literal[True] = ..., **additional_kw: Any + ) -> Self: ... + + @overload + @classmethod + def _retrieve_proxy_for_target( + cls, target: _PT, regenerate: bool = True, **additional_kw: Any + ) -> Optional[Self]: ... + + @classmethod + def _retrieve_proxy_for_target( + cls, target: _PT, regenerate: bool = True, **additional_kw: Any + ) -> Optional[Self]: + try: + proxy_ref = cls._proxy_objects[weakref.ref(target)] + except KeyError: + pass + else: + proxy = proxy_ref() + if proxy is not None: + return proxy # type: ignore + + if regenerate: + return cls._regenerate_proxy_for_target(target, **additional_kw) + else: + return None + + +class StartableContext(Awaitable[_T_co], abc.ABC): + __slots__ = () + + @abc.abstractmethod + async def start(self, is_ctxmanager: bool = False) -> _T_co: + raise NotImplementedError() + + def __await__(self) -> Generator[Any, Any, _T_co]: + return self.start().__await__() + + async def __aenter__(self) -> _T_co: + return await self.start(is_ctxmanager=True) + + @abc.abstractmethod + async def __aexit__( + self, type_: Any, value: Any, traceback: Any + ) -> Optional[bool]: + pass + + def _raise_for_not_started(self) -> NoReturn: + raise async_exc.AsyncContextNotStarted( + "%s context has not been started and object has not been awaited." + % (self.__class__.__name__) + ) + + +class GeneratorStartableContext(StartableContext[_T_co]): + __slots__ = ("gen",) + + gen: AsyncGenerator[_T_co, Any] + + def __init__( + self, + func: Callable[..., AsyncIterator[_T_co]], + args: Tuple[Any, ...], + kwds: Dict[str, Any], + ): + self.gen = func(*args, **kwds) # type: ignore + + async def start(self, is_ctxmanager: bool = False) -> _T_co: + try: + start_value = await util.anext_(self.gen) + except StopAsyncIteration: + raise RuntimeError("generator didn't yield") from None + + # if not a context manager, then interrupt the generator, don't + # let it complete. this step is technically not needed, as the + # generator will close in any case at gc time. not clear if having + # this here is a good idea or not (though it helps for clarity IMO) + if not is_ctxmanager: + await self.gen.aclose() + + return start_value + + async def __aexit__( + self, typ: Any, value: Any, traceback: Any + ) -> Optional[bool]: + # vendored from contextlib.py + if typ is None: + try: + await util.anext_(self.gen) + except StopAsyncIteration: + return False + else: + raise RuntimeError("generator didn't stop") + else: + if value is None: + # Need to force instantiation so we can reliably + # tell if we get the same exception back + value = typ() + try: + await self.gen.athrow(value) + except StopAsyncIteration as exc: + # Suppress StopIteration *unless* it's the same exception that + # was passed to throw(). This prevents a StopIteration + # raised inside the "with" statement from being suppressed. + return exc is not value + except RuntimeError as exc: + # Don't re-raise the passed in exception. (issue27122) + if exc is value: + return False + # Avoid suppressing if a Stop(Async)Iteration exception + # was passed to athrow() and later wrapped into a RuntimeError + # (see PEP 479 for sync generators; async generators also + # have this behavior). But do this only if the exception + # wrapped + # by the RuntimeError is actully Stop(Async)Iteration (see + # issue29692). + if ( + isinstance(value, (StopIteration, StopAsyncIteration)) + and exc.__cause__ is value + ): + return False + raise + except BaseException as exc: + # only re-raise if it's *not* the exception that was + # passed to throw(), because __exit__() must not raise + # an exception unless __exit__() itself failed. But throw() + # has to raise the exception to signal propagation, so this + # fixes the impedance mismatch between the throw() protocol + # and the __exit__() protocol. + if exc is not value: + raise + return False + raise RuntimeError("generator didn't stop after athrow()") + + +def asyncstartablecontext( + func: Callable[..., AsyncIterator[_T_co]] +) -> Callable[..., GeneratorStartableContext[_T_co]]: + """@asyncstartablecontext decorator. + + the decorated function can be called either as ``async with fn()``, **or** + ``await fn()``. This is decidedly different from what + ``@contextlib.asynccontextmanager`` supports, and the usage pattern + is different as well. + + Typical usage: + + .. sourcecode:: text + + @asyncstartablecontext + async def some_async_generator(): + + try: + yield + except GeneratorExit: + # return value was awaited, no context manager is present + # and caller will .close() the resource explicitly + pass + else: + + + + Above, ``GeneratorExit`` is caught if the function were used as an + ``await``. In this case, it's essential that the cleanup does **not** + occur, so there should not be a ``finally`` block. + + If ``GeneratorExit`` is not invoked, this means we're in ``__aexit__`` + and we were invoked as a context manager, and cleanup should proceed. + + + """ + + @functools.wraps(func) + def helper(*args: Any, **kwds: Any) -> GeneratorStartableContext[_T_co]: + return GeneratorStartableContext(func, args, kwds) + + return helper + + +class ProxyComparable(ReversibleProxy[_PT]): + __slots__ = () + + @util.ro_non_memoized_property + def _proxied(self) -> _PT: + raise NotImplementedError() + + def __hash__(self) -> int: + return id(self) + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, self.__class__) + and self._proxied == other._proxied + ) + + def __ne__(self, other: Any) -> bool: + return ( + not isinstance(other, self.__class__) + or self._proxied != other._proxied + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/engine.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..65c019954c214e7c9a120d42d09e20f22b554662 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/engine.py @@ -0,0 +1,1469 @@ +# ext/asyncio/engine.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import asyncio +import contextlib +from typing import Any +from typing import AsyncIterator +from typing import Callable +from typing import Dict +from typing import Generator +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import exc as async_exc +from .base import asyncstartablecontext +from .base import GeneratorStartableContext +from .base import ProxyComparable +from .base import StartableContext +from .result import _ensure_sync_result +from .result import AsyncResult +from .result import AsyncScalarResult +from ... import exc +from ... import inspection +from ... import util +from ...engine import Connection +from ...engine import create_engine as _create_engine +from ...engine import create_pool_from_url as _create_pool_from_url +from ...engine import Engine +from ...engine.base import NestedTransaction +from ...engine.base import Transaction +from ...exc import ArgumentError +from ...util.concurrency import greenlet_spawn +from ...util.typing import Concatenate +from ...util.typing import ParamSpec + +if TYPE_CHECKING: + from ...engine.cursor import CursorResult + from ...engine.interfaces import _CoreAnyExecuteParams + from ...engine.interfaces import _CoreSingleExecuteParams + from ...engine.interfaces import _DBAPIAnyExecuteParams + from ...engine.interfaces import _ExecuteOptions + from ...engine.interfaces import CompiledCacheType + from ...engine.interfaces import CoreExecuteOptionsParameter + from ...engine.interfaces import Dialect + from ...engine.interfaces import IsolationLevel + from ...engine.interfaces import SchemaTranslateMapType + from ...engine.result import ScalarResult + from ...engine.url import URL + from ...pool import Pool + from ...pool import PoolProxiedConnection + from ...sql._typing import _InfoType + from ...sql.base import Executable + from ...sql.selectable import TypedReturnsRows + +_P = ParamSpec("_P") +_T = TypeVar("_T", bound=Any) + + +def create_async_engine(url: Union[str, URL], **kw: Any) -> AsyncEngine: + """Create a new async engine instance. + + Arguments passed to :func:`_asyncio.create_async_engine` are mostly + identical to those passed to the :func:`_sa.create_engine` function. + The specified dialect must be an asyncio-compatible dialect + such as :ref:`dialect-postgresql-asyncpg`. + + .. versionadded:: 1.4 + + :param async_creator: an async callable which returns a driver-level + asyncio connection. If given, the function should take no arguments, + and return a new asyncio connection from the underlying asyncio + database driver; the connection will be wrapped in the appropriate + structures to be used with the :class:`.AsyncEngine`. Note that the + parameters specified in the URL are not applied here, and the creator + function should use its own connection parameters. + + This parameter is the asyncio equivalent of the + :paramref:`_sa.create_engine.creator` parameter of the + :func:`_sa.create_engine` function. + + .. versionadded:: 2.0.16 + + """ + + if kw.get("server_side_cursors", False): + raise async_exc.AsyncMethodRequired( + "Can't set server_side_cursors for async engine globally; " + "use the connection.stream() method for an async " + "streaming result set" + ) + kw["_is_async"] = True + async_creator = kw.pop("async_creator", None) + if async_creator: + if kw.get("creator", None): + raise ArgumentError( + "Can only specify one of 'async_creator' or 'creator', " + "not both." + ) + + def creator() -> Any: + # note that to send adapted arguments like + # prepared_statement_cache_size, user would use + # "creator" and emulate this form here + return sync_engine.dialect.dbapi.connect( # type: ignore + async_creator_fn=async_creator + ) + + kw["creator"] = creator + sync_engine = _create_engine(url, **kw) + return AsyncEngine(sync_engine) + + +def async_engine_from_config( + configuration: Dict[str, Any], prefix: str = "sqlalchemy.", **kwargs: Any +) -> AsyncEngine: + """Create a new AsyncEngine instance using a configuration dictionary. + + This function is analogous to the :func:`_sa.engine_from_config` function + in SQLAlchemy Core, except that the requested dialect must be an + asyncio-compatible dialect such as :ref:`dialect-postgresql-asyncpg`. + The argument signature of the function is identical to that + of :func:`_sa.engine_from_config`. + + .. versionadded:: 1.4.29 + + """ + options = { + key[len(prefix) :]: value + for key, value in configuration.items() + if key.startswith(prefix) + } + options["_coerce_config"] = True + options.update(kwargs) + url = options.pop("url") + return create_async_engine(url, **options) + + +def create_async_pool_from_url(url: Union[str, URL], **kwargs: Any) -> Pool: + """Create a new async engine instance. + + Arguments passed to :func:`_asyncio.create_async_pool_from_url` are mostly + identical to those passed to the :func:`_sa.create_pool_from_url` function. + The specified dialect must be an asyncio-compatible dialect + such as :ref:`dialect-postgresql-asyncpg`. + + .. versionadded:: 2.0.10 + + """ + kwargs["_is_async"] = True + return _create_pool_from_url(url, **kwargs) + + +class AsyncConnectable: + __slots__ = "_slots_dispatch", "__weakref__" + + @classmethod + def _no_async_engine_events(cls) -> NoReturn: + raise NotImplementedError( + "asynchronous events are not implemented at this time. Apply " + "synchronous listeners to the AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes." + ) + + +@util.create_proxy_methods( + Connection, + ":class:`_engine.Connection`", + ":class:`_asyncio.AsyncConnection`", + classmethods=[], + methods=[], + attributes=[ + "closed", + "invalidated", + "dialect", + "default_isolation_level", + ], +) +class AsyncConnection( + ProxyComparable[Connection], + StartableContext["AsyncConnection"], + AsyncConnectable, +): + """An asyncio proxy for a :class:`_engine.Connection`. + + :class:`_asyncio.AsyncConnection` is acquired using the + :meth:`_asyncio.AsyncEngine.connect` + method of :class:`_asyncio.AsyncEngine`:: + + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname") + + async with engine.connect() as conn: + result = await conn.execute(select(table)) + + .. versionadded:: 1.4 + + """ # noqa + + # AsyncConnection is a thin proxy; no state should be added here + # that is not retrievable from the "sync" engine / connection, e.g. + # current transaction, info, etc. It should be possible to + # create a new AsyncConnection that matches this one given only the + # "sync" elements. + __slots__ = ( + "engine", + "sync_engine", + "sync_connection", + ) + + def __init__( + self, + async_engine: AsyncEngine, + sync_connection: Optional[Connection] = None, + ): + self.engine = async_engine + self.sync_engine = async_engine.sync_engine + self.sync_connection = self._assign_proxied(sync_connection) + + sync_connection: Optional[Connection] + """Reference to the sync-style :class:`_engine.Connection` this + :class:`_asyncio.AsyncConnection` proxies requests towards. + + This instance can be used as an event target. + + .. seealso:: + + :ref:`asyncio_events` + + """ + + sync_engine: Engine + """Reference to the sync-style :class:`_engine.Engine` this + :class:`_asyncio.AsyncConnection` is associated with via its underlying + :class:`_engine.Connection`. + + This instance can be used as an event target. + + .. seealso:: + + :ref:`asyncio_events` + + """ + + @classmethod + def _regenerate_proxy_for_target( + cls, target: Connection, **additional_kw: Any # noqa: U100 + ) -> AsyncConnection: + return AsyncConnection( + AsyncEngine._retrieve_proxy_for_target(target.engine), target + ) + + async def start( + self, is_ctxmanager: bool = False # noqa: U100 + ) -> AsyncConnection: + """Start this :class:`_asyncio.AsyncConnection` object's context + outside of using a Python ``with:`` block. + + """ + if self.sync_connection: + raise exc.InvalidRequestError("connection is already started") + self.sync_connection = self._assign_proxied( + await greenlet_spawn(self.sync_engine.connect) + ) + return self + + @property + def connection(self) -> NoReturn: + """Not implemented for async; call + :meth:`_asyncio.AsyncConnection.get_raw_connection`. + """ + raise exc.InvalidRequestError( + "AsyncConnection.connection accessor is not implemented as the " + "attribute may need to reconnect on an invalidated connection. " + "Use the get_raw_connection() method." + ) + + async def get_raw_connection(self) -> PoolProxiedConnection: + """Return the pooled DBAPI-level connection in use by this + :class:`_asyncio.AsyncConnection`. + + This is a SQLAlchemy connection-pool proxied connection + which then has the attribute + :attr:`_pool._ConnectionFairy.driver_connection` that refers to the + actual driver connection. Its + :attr:`_pool._ConnectionFairy.dbapi_connection` refers instead + to an :class:`_engine.AdaptedConnection` instance that + adapts the driver connection to the DBAPI protocol. + + """ + + return await greenlet_spawn(getattr, self._proxied, "connection") + + @util.ro_non_memoized_property + def info(self) -> _InfoType: + """Return the :attr:`_engine.Connection.info` dictionary of the + underlying :class:`_engine.Connection`. + + This dictionary is freely writable for user-defined state to be + associated with the database connection. + + This attribute is only available if the :class:`.AsyncConnection` is + currently connected. If the :attr:`.AsyncConnection.closed` attribute + is ``True``, then accessing this attribute will raise + :class:`.ResourceClosedError`. + + .. versionadded:: 1.4.0b2 + + """ + return self._proxied.info + + @util.ro_non_memoized_property + def _proxied(self) -> Connection: + if not self.sync_connection: + self._raise_for_not_started() + return self.sync_connection + + def begin(self) -> AsyncTransaction: + """Begin a transaction prior to autobegin occurring.""" + assert self._proxied + return AsyncTransaction(self) + + def begin_nested(self) -> AsyncTransaction: + """Begin a nested transaction and return a transaction handle.""" + assert self._proxied + return AsyncTransaction(self, nested=True) + + async def invalidate( + self, exception: Optional[BaseException] = None + ) -> None: + """Invalidate the underlying DBAPI connection associated with + this :class:`_engine.Connection`. + + See the method :meth:`_engine.Connection.invalidate` for full + detail on this method. + + """ + + return await greenlet_spawn( + self._proxied.invalidate, exception=exception + ) + + async def get_isolation_level(self) -> IsolationLevel: + return await greenlet_spawn(self._proxied.get_isolation_level) + + def in_transaction(self) -> bool: + """Return True if a transaction is in progress.""" + + return self._proxied.in_transaction() + + def in_nested_transaction(self) -> bool: + """Return True if a transaction is in progress. + + .. versionadded:: 1.4.0b2 + + """ + return self._proxied.in_nested_transaction() + + def get_transaction(self) -> Optional[AsyncTransaction]: + """Return an :class:`.AsyncTransaction` representing the current + transaction, if any. + + This makes use of the underlying synchronous connection's + :meth:`_engine.Connection.get_transaction` method to get the current + :class:`_engine.Transaction`, which is then proxied in a new + :class:`.AsyncTransaction` object. + + .. versionadded:: 1.4.0b2 + + """ + + trans = self._proxied.get_transaction() + if trans is not None: + return AsyncTransaction._retrieve_proxy_for_target(trans) + else: + return None + + def get_nested_transaction(self) -> Optional[AsyncTransaction]: + """Return an :class:`.AsyncTransaction` representing the current + nested (savepoint) transaction, if any. + + This makes use of the underlying synchronous connection's + :meth:`_engine.Connection.get_nested_transaction` method to get the + current :class:`_engine.Transaction`, which is then proxied in a new + :class:`.AsyncTransaction` object. + + .. versionadded:: 1.4.0b2 + + """ + + trans = self._proxied.get_nested_transaction() + if trans is not None: + return AsyncTransaction._retrieve_proxy_for_target(trans) + else: + return None + + @overload + async def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + no_parameters: bool = False, + stream_results: bool = False, + max_row_buffer: int = ..., + yield_per: int = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + preserve_rowcount: bool = False, + **opt: Any, + ) -> AsyncConnection: ... + + @overload + async def execution_options(self, **opt: Any) -> AsyncConnection: ... + + async def execution_options(self, **opt: Any) -> AsyncConnection: + r"""Set non-SQL options for the connection which take effect + during execution. + + This returns this :class:`_asyncio.AsyncConnection` object with + the new options added. + + See :meth:`_engine.Connection.execution_options` for full details + on this method. + + """ + + conn = self._proxied + c2 = await greenlet_spawn(conn.execution_options, **opt) + assert c2 is conn + return self + + async def commit(self) -> None: + """Commit the transaction that is currently in progress. + + This method commits the current transaction if one has been started. + If no transaction was started, the method has no effect, assuming + the connection is in a non-invalidated state. + + A transaction is begun on a :class:`_engine.Connection` automatically + whenever a statement is first executed, or when the + :meth:`_engine.Connection.begin` method is called. + + """ + await greenlet_spawn(self._proxied.commit) + + async def rollback(self) -> None: + """Roll back the transaction that is currently in progress. + + This method rolls back the current transaction if one has been started. + If no transaction was started, the method has no effect. If a + transaction was started and the connection is in an invalidated state, + the transaction is cleared using this method. + + A transaction is begun on a :class:`_engine.Connection` automatically + whenever a statement is first executed, or when the + :meth:`_engine.Connection.begin` method is called. + + + """ + await greenlet_spawn(self._proxied.rollback) + + async def close(self) -> None: + """Close this :class:`_asyncio.AsyncConnection`. + + This has the effect of also rolling back the transaction if one + is in place. + + """ + await greenlet_spawn(self._proxied.close) + + async def aclose(self) -> None: + """A synonym for :meth:`_asyncio.AsyncConnection.close`. + + The :meth:`_asyncio.AsyncConnection.aclose` name is specifically + to support the Python standard library ``@contextlib.aclosing`` + context manager function. + + .. versionadded:: 2.0.20 + + """ + await self.close() + + async def exec_driver_sql( + self, + statement: str, + parameters: Optional[_DBAPIAnyExecuteParams] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: + r"""Executes a driver-level SQL string and return buffered + :class:`_engine.Result`. + + """ + + result = await greenlet_spawn( + self._proxied.exec_driver_sql, + statement, + parameters, + execution_options, + _require_await=True, + ) + + return await _ensure_sync_result(result, self.exec_driver_sql) + + @overload + def stream( + self, + statement: TypedReturnsRows[_T], + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> GeneratorStartableContext[AsyncResult[_T]]: ... + + @overload + def stream( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> GeneratorStartableContext[AsyncResult[Any]]: ... + + @asyncstartablecontext + async def stream( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> AsyncIterator[AsyncResult[Any]]: + """Execute a statement and return an awaitable yielding a + :class:`_asyncio.AsyncResult` object. + + E.g.:: + + result = await conn.stream(stmt) + async for row in result: + print(f"{row}") + + The :meth:`.AsyncConnection.stream` + method supports optional context manager use against the + :class:`.AsyncResult` object, as in:: + + async with conn.stream(stmt) as result: + async for row in result: + print(f"{row}") + + In the above pattern, the :meth:`.AsyncResult.close` method is + invoked unconditionally, even if the iterator is interrupted by an + exception throw. Context manager use remains optional, however, + and the function may be called in either an ``async with fn():`` or + ``await fn()`` style. + + .. versionadded:: 2.0.0b3 added context manager support + + + :return: an awaitable object that will yield an + :class:`_asyncio.AsyncResult` object. + + .. seealso:: + + :meth:`.AsyncConnection.stream_scalars` + + """ + if not self.dialect.supports_server_side_cursors: + raise exc.InvalidRequestError( + "Cant use `stream` or `stream_scalars` with the current " + "dialect since it does not support server side cursors." + ) + + result = await greenlet_spawn( + self._proxied.execute, + statement, + parameters, + execution_options=util.EMPTY_DICT.merge_with( + execution_options, {"stream_results": True} + ), + _require_await=True, + ) + assert result.context._is_server_side + ar = AsyncResult(result) + try: + yield ar + except GeneratorExit: + pass + else: + task = asyncio.create_task(ar.close()) + await asyncio.shield(task) + + @overload + async def execute( + self, + statement: TypedReturnsRows[_T], + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[_T]: ... + + @overload + async def execute( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: ... + + async def execute( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> CursorResult[Any]: + r"""Executes a SQL statement construct and return a buffered + :class:`_engine.Result`. + + :param object: The statement to be executed. This is always + an object that is in both the :class:`_expression.ClauseElement` and + :class:`_expression.Executable` hierarchies, including: + + * :class:`_expression.Select` + * :class:`_expression.Insert`, :class:`_expression.Update`, + :class:`_expression.Delete` + * :class:`_expression.TextClause` and + :class:`_expression.TextualSelect` + * :class:`_schema.DDL` and objects which inherit from + :class:`_schema.ExecutableDDLElement` + + :param parameters: parameters which will be bound into the statement. + This may be either a dictionary of parameter names to values, + or a mutable sequence (e.g. a list) of dictionaries. When a + list of dictionaries is passed, the underlying statement execution + will make use of the DBAPI ``cursor.executemany()`` method. + When a single dictionary is passed, the DBAPI ``cursor.execute()`` + method will be used. + + :param execution_options: optional dictionary of execution options, + which will be associated with the statement execution. This + dictionary can provide a subset of the options that are accepted + by :meth:`_engine.Connection.execution_options`. + + :return: a :class:`_engine.Result` object. + + """ + result = await greenlet_spawn( + self._proxied.execute, + statement, + parameters, + execution_options=execution_options, + _require_await=True, + ) + return await _ensure_sync_result(result, self.execute) + + @overload + async def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Optional[_T]: ... + + @overload + async def scalar( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Any: ... + + async def scalar( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Any: + r"""Executes a SQL statement construct and returns a scalar object. + + This method is shorthand for invoking the + :meth:`_engine.Result.scalar` method after invoking the + :meth:`_engine.Connection.execute` method. Parameters are equivalent. + + :return: a scalar Python value representing the first column of the + first row returned. + + """ + result = await self.execute( + statement, parameters, execution_options=execution_options + ) + return result.scalar() + + @overload + async def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[_T]: ... + + @overload + async def scalars( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[Any]: ... + + async def scalars( + self, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> ScalarResult[Any]: + r"""Executes a SQL statement construct and returns a scalar objects. + + This method is shorthand for invoking the + :meth:`_engine.Result.scalars` method after invoking the + :meth:`_engine.Connection.execute` method. Parameters are equivalent. + + :return: a :class:`_engine.ScalarResult` object. + + .. versionadded:: 1.4.24 + + """ + result = await self.execute( + statement, parameters, execution_options=execution_options + ) + return result.scalars() + + @overload + def stream_scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> GeneratorStartableContext[AsyncScalarResult[_T]]: ... + + @overload + def stream_scalars( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> GeneratorStartableContext[AsyncScalarResult[Any]]: ... + + @asyncstartablecontext + async def stream_scalars( + self, + statement: Executable, + parameters: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> AsyncIterator[AsyncScalarResult[Any]]: + r"""Execute a statement and return an awaitable yielding a + :class:`_asyncio.AsyncScalarResult` object. + + E.g.:: + + result = await conn.stream_scalars(stmt) + async for scalar in result: + print(f"{scalar}") + + This method is shorthand for invoking the + :meth:`_engine.AsyncResult.scalars` method after invoking the + :meth:`_engine.Connection.stream` method. Parameters are equivalent. + + The :meth:`.AsyncConnection.stream_scalars` + method supports optional context manager use against the + :class:`.AsyncScalarResult` object, as in:: + + async with conn.stream_scalars(stmt) as result: + async for scalar in result: + print(f"{scalar}") + + In the above pattern, the :meth:`.AsyncScalarResult.close` method is + invoked unconditionally, even if the iterator is interrupted by an + exception throw. Context manager use remains optional, however, + and the function may be called in either an ``async with fn():`` or + ``await fn()`` style. + + .. versionadded:: 2.0.0b3 added context manager support + + :return: an awaitable object that will yield an + :class:`_asyncio.AsyncScalarResult` object. + + .. versionadded:: 1.4.24 + + .. seealso:: + + :meth:`.AsyncConnection.stream` + + """ + + async with self.stream( + statement, parameters, execution_options=execution_options + ) as result: + yield result.scalars() + + async def run_sync( + self, + fn: Callable[Concatenate[Connection, _P], _T], + *arg: _P.args, + **kw: _P.kwargs, + ) -> _T: + '''Invoke the given synchronous (i.e. not async) callable, + passing a synchronous-style :class:`_engine.Connection` as the first + argument. + + This method allows traditional synchronous SQLAlchemy functions to + run within the context of an asyncio application. + + E.g.:: + + def do_something_with_core(conn: Connection, arg1: int, arg2: str) -> str: + """A synchronous function that does not require awaiting + + :param conn: a Core SQLAlchemy Connection, used synchronously + + :return: an optional return value is supported + + """ + conn.execute(some_table.insert().values(int_col=arg1, str_col=arg2)) + return "success" + + + async def do_something_async(async_engine: AsyncEngine) -> None: + """an async function that uses awaiting""" + + async with async_engine.begin() as async_conn: + # run do_something_with_core() with a sync-style + # Connection, proxied into an awaitable + return_code = await async_conn.run_sync( + do_something_with_core, 5, "strval" + ) + print(return_code) + + This method maintains the asyncio event loop all the way through + to the database connection by running the given callable in a + specially instrumented greenlet. + + The most rudimentary use of :meth:`.AsyncConnection.run_sync` is to + invoke methods such as :meth:`_schema.MetaData.create_all`, given + an :class:`.AsyncConnection` that needs to be provided to + :meth:`_schema.MetaData.create_all` as a :class:`_engine.Connection` + object:: + + # run metadata.create_all(conn) with a sync-style Connection, + # proxied into an awaitable + with async_engine.begin() as conn: + await conn.run_sync(metadata.create_all) + + .. note:: + + The provided callable is invoked inline within the asyncio event + loop, and will block on traditional IO calls. IO within this + callable should only call into SQLAlchemy's asyncio database + APIs which will be properly adapted to the greenlet context. + + .. seealso:: + + :meth:`.AsyncSession.run_sync` + + :ref:`session_run_sync` + + ''' # noqa: E501 + + return await greenlet_spawn( + fn, self._proxied, *arg, _require_await=False, **kw + ) + + def __await__(self) -> Generator[Any, None, AsyncConnection]: + return self.start().__await__() + + async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None: + task = asyncio.create_task(self.close()) + await asyncio.shield(task) + + # START PROXY METHODS AsyncConnection + + # code within this block is **programmatically, + # statically generated** by tools/generate_proxy_methods.py + + @property + def closed(self) -> Any: + r"""Return True if this connection is closed. + + .. container:: class_bases + + Proxied for the :class:`_engine.Connection` class + on behalf of the :class:`_asyncio.AsyncConnection` class. + + """ # noqa: E501 + + return self._proxied.closed + + @property + def invalidated(self) -> Any: + r"""Return True if this connection was invalidated. + + .. container:: class_bases + + Proxied for the :class:`_engine.Connection` class + on behalf of the :class:`_asyncio.AsyncConnection` class. + + This does not indicate whether or not the connection was + invalidated at the pool level, however + + + """ # noqa: E501 + + return self._proxied.invalidated + + @property + def dialect(self) -> Dialect: + r"""Proxy for the :attr:`_engine.Connection.dialect` attribute + on behalf of the :class:`_asyncio.AsyncConnection` class. + + """ # noqa: E501 + + return self._proxied.dialect + + @dialect.setter + def dialect(self, attr: Dialect) -> None: + self._proxied.dialect = attr + + @property + def default_isolation_level(self) -> Any: + r"""The initial-connection time isolation level associated with the + :class:`_engine.Dialect` in use. + + .. container:: class_bases + + Proxied for the :class:`_engine.Connection` class + on behalf of the :class:`_asyncio.AsyncConnection` class. + + This value is independent of the + :paramref:`.Connection.execution_options.isolation_level` and + :paramref:`.Engine.execution_options.isolation_level` execution + options, and is determined by the :class:`_engine.Dialect` when the + first connection is created, by performing a SQL query against the + database for the current isolation level before any additional commands + have been emitted. + + Calling this accessor does not invoke any new SQL queries. + + .. seealso:: + + :meth:`_engine.Connection.get_isolation_level` + - view current actual isolation level + + :paramref:`_sa.create_engine.isolation_level` + - set per :class:`_engine.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`_engine.Connection` isolation level + + + """ # noqa: E501 + + return self._proxied.default_isolation_level + + # END PROXY METHODS AsyncConnection + + +@util.create_proxy_methods( + Engine, + ":class:`_engine.Engine`", + ":class:`_asyncio.AsyncEngine`", + classmethods=[], + methods=[ + "clear_compiled_cache", + "update_execution_options", + "get_execution_options", + ], + attributes=["url", "pool", "dialect", "engine", "name", "driver", "echo"], +) +class AsyncEngine(ProxyComparable[Engine], AsyncConnectable): + """An asyncio proxy for a :class:`_engine.Engine`. + + :class:`_asyncio.AsyncEngine` is acquired using the + :func:`_asyncio.create_async_engine` function:: + + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname") + + .. versionadded:: 1.4 + + """ # noqa + + # AsyncEngine is a thin proxy; no state should be added here + # that is not retrievable from the "sync" engine / connection, e.g. + # current transaction, info, etc. It should be possible to + # create a new AsyncEngine that matches this one given only the + # "sync" elements. + __slots__ = "sync_engine" + + _connection_cls: Type[AsyncConnection] = AsyncConnection + + sync_engine: Engine + """Reference to the sync-style :class:`_engine.Engine` this + :class:`_asyncio.AsyncEngine` proxies requests towards. + + This instance can be used as an event target. + + .. seealso:: + + :ref:`asyncio_events` + """ + + def __init__(self, sync_engine: Engine): + if not sync_engine.dialect.is_async: + raise exc.InvalidRequestError( + "The asyncio extension requires an async driver to be used. " + f"The loaded {sync_engine.dialect.driver!r} is not async." + ) + self.sync_engine = self._assign_proxied(sync_engine) + + @util.ro_non_memoized_property + def _proxied(self) -> Engine: + return self.sync_engine + + @classmethod + def _regenerate_proxy_for_target( + cls, target: Engine, **additional_kw: Any # noqa: U100 + ) -> AsyncEngine: + return AsyncEngine(target) + + @contextlib.asynccontextmanager + async def begin(self) -> AsyncIterator[AsyncConnection]: + """Return a context manager which when entered will deliver an + :class:`_asyncio.AsyncConnection` with an + :class:`_asyncio.AsyncTransaction` established. + + E.g.:: + + async with async_engine.begin() as conn: + await conn.execute( + text("insert into table (x, y, z) values (1, 2, 3)") + ) + await conn.execute(text("my_special_procedure(5)")) + + """ + conn = self.connect() + + async with conn: + async with conn.begin(): + yield conn + + def connect(self) -> AsyncConnection: + """Return an :class:`_asyncio.AsyncConnection` object. + + The :class:`_asyncio.AsyncConnection` will procure a database + connection from the underlying connection pool when it is entered + as an async context manager:: + + async with async_engine.connect() as conn: + result = await conn.execute(select(user_table)) + + The :class:`_asyncio.AsyncConnection` may also be started outside of a + context manager by invoking its :meth:`_asyncio.AsyncConnection.start` + method. + + """ + + return self._connection_cls(self) + + async def raw_connection(self) -> PoolProxiedConnection: + """Return a "raw" DBAPI connection from the connection pool. + + .. seealso:: + + :ref:`dbapi_connections` + + """ + return await greenlet_spawn(self.sync_engine.raw_connection) + + @overload + def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + **opt: Any, + ) -> AsyncEngine: ... + + @overload + def execution_options(self, **opt: Any) -> AsyncEngine: ... + + def execution_options(self, **opt: Any) -> AsyncEngine: + """Return a new :class:`_asyncio.AsyncEngine` that will provide + :class:`_asyncio.AsyncConnection` objects with the given execution + options. + + Proxied from :meth:`_engine.Engine.execution_options`. See that + method for details. + + """ + + return AsyncEngine(self.sync_engine.execution_options(**opt)) + + async def dispose(self, close: bool = True) -> None: + """Dispose of the connection pool used by this + :class:`_asyncio.AsyncEngine`. + + :param close: if left at its default of ``True``, has the + effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`_engine.Engine`, + so when they are closed individually, eventually the + :class:`_pool.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + + If set to ``False``, the previous connection pool is de-referenced, + and otherwise not touched in any way. + + .. seealso:: + + :meth:`_engine.Engine.dispose` + + """ + + await greenlet_spawn(self.sync_engine.dispose, close=close) + + # START PROXY METHODS AsyncEngine + + # code within this block is **programmatically, + # statically generated** by tools/generate_proxy_methods.py + + def clear_compiled_cache(self) -> None: + r"""Clear the compiled cache associated with the dialect. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class on + behalf of the :class:`_asyncio.AsyncEngine` class. + + This applies **only** to the built-in cache that is established + via the :paramref:`_engine.create_engine.query_cache_size` parameter. + It will not impact any dictionary caches that were passed via the + :paramref:`.Connection.execution_options.compiled_cache` parameter. + + .. versionadded:: 1.4 + + + """ # noqa: E501 + + return self._proxied.clear_compiled_cache() + + def update_execution_options(self, **opt: Any) -> None: + r"""Update the default execution_options dictionary + of this :class:`_engine.Engine`. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class on + behalf of the :class:`_asyncio.AsyncEngine` class. + + The given keys/values in \**opt are added to the + default execution options that will be used for + all connections. The initial contents of this dictionary + can be sent via the ``execution_options`` parameter + to :func:`_sa.create_engine`. + + .. seealso:: + + :meth:`_engine.Connection.execution_options` + + :meth:`_engine.Engine.execution_options` + + + """ # noqa: E501 + + return self._proxied.update_execution_options(**opt) + + def get_execution_options(self) -> _ExecuteOptions: + r"""Get the non-SQL options which will take effect during execution. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class on + behalf of the :class:`_asyncio.AsyncEngine` class. + + .. versionadded: 1.3 + + .. seealso:: + + :meth:`_engine.Engine.execution_options` + + """ # noqa: E501 + + return self._proxied.get_execution_options() + + @property + def url(self) -> URL: + r"""Proxy for the :attr:`_engine.Engine.url` attribute + on behalf of the :class:`_asyncio.AsyncEngine` class. + + """ # noqa: E501 + + return self._proxied.url + + @url.setter + def url(self, attr: URL) -> None: + self._proxied.url = attr + + @property + def pool(self) -> Pool: + r"""Proxy for the :attr:`_engine.Engine.pool` attribute + on behalf of the :class:`_asyncio.AsyncEngine` class. + + """ # noqa: E501 + + return self._proxied.pool + + @pool.setter + def pool(self, attr: Pool) -> None: + self._proxied.pool = attr + + @property + def dialect(self) -> Dialect: + r"""Proxy for the :attr:`_engine.Engine.dialect` attribute + on behalf of the :class:`_asyncio.AsyncEngine` class. + + """ # noqa: E501 + + return self._proxied.dialect + + @dialect.setter + def dialect(self, attr: Dialect) -> None: + self._proxied.dialect = attr + + @property + def engine(self) -> Any: + r"""Returns this :class:`.Engine`. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class + on behalf of the :class:`_asyncio.AsyncEngine` class. + + Used for legacy schemes that accept :class:`.Connection` / + :class:`.Engine` objects within the same variable. + + + """ # noqa: E501 + + return self._proxied.engine + + @property + def name(self) -> Any: + r"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class + on behalf of the :class:`_asyncio.AsyncEngine` class. + + + """ # noqa: E501 + + return self._proxied.name + + @property + def driver(self) -> Any: + r"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class + on behalf of the :class:`_asyncio.AsyncEngine` class. + + + """ # noqa: E501 + + return self._proxied.driver + + @property + def echo(self) -> Any: + r"""When ``True``, enable log output for this element. + + .. container:: class_bases + + Proxied for the :class:`_engine.Engine` class + on behalf of the :class:`_asyncio.AsyncEngine` class. + + This has the effect of setting the Python logging level for the namespace + of this element's class and object reference. A value of boolean ``True`` + indicates that the loglevel ``logging.INFO`` will be set for the logger, + whereas the string value ``debug`` will set the loglevel to + ``logging.DEBUG``. + + """ # noqa: E501 + + return self._proxied.echo + + @echo.setter + def echo(self, attr: Any) -> None: + self._proxied.echo = attr + + # END PROXY METHODS AsyncEngine + + +class AsyncTransaction( + ProxyComparable[Transaction], StartableContext["AsyncTransaction"] +): + """An asyncio proxy for a :class:`_engine.Transaction`.""" + + __slots__ = ("connection", "sync_transaction", "nested") + + sync_transaction: Optional[Transaction] + connection: AsyncConnection + nested: bool + + def __init__(self, connection: AsyncConnection, nested: bool = False): + self.connection = connection + self.sync_transaction = None + self.nested = nested + + @classmethod + def _regenerate_proxy_for_target( + cls, target: Transaction, **additional_kw: Any # noqa: U100 + ) -> AsyncTransaction: + sync_connection = target.connection + sync_transaction = target + nested = isinstance(target, NestedTransaction) + + async_connection = AsyncConnection._retrieve_proxy_for_target( + sync_connection + ) + assert async_connection is not None + + obj = cls.__new__(cls) + obj.connection = async_connection + obj.sync_transaction = obj._assign_proxied(sync_transaction) + obj.nested = nested + return obj + + @util.ro_non_memoized_property + def _proxied(self) -> Transaction: + if not self.sync_transaction: + self._raise_for_not_started() + return self.sync_transaction + + @property + def is_valid(self) -> bool: + return self._proxied.is_valid + + @property + def is_active(self) -> bool: + return self._proxied.is_active + + async def close(self) -> None: + """Close this :class:`.AsyncTransaction`. + + If this transaction is the base transaction in a begin/commit + nesting, the transaction will rollback(). Otherwise, the + method returns. + + This is used to cancel a Transaction without affecting the scope of + an enclosing transaction. + + """ + await greenlet_spawn(self._proxied.close) + + async def rollback(self) -> None: + """Roll back this :class:`.AsyncTransaction`.""" + await greenlet_spawn(self._proxied.rollback) + + async def commit(self) -> None: + """Commit this :class:`.AsyncTransaction`.""" + + await greenlet_spawn(self._proxied.commit) + + async def start(self, is_ctxmanager: bool = False) -> AsyncTransaction: + """Start this :class:`_asyncio.AsyncTransaction` object's context + outside of using a Python ``with:`` block. + + """ + + self.sync_transaction = self._assign_proxied( + await greenlet_spawn( + self.connection._proxied.begin_nested + if self.nested + else self.connection._proxied.begin + ) + ) + if is_ctxmanager: + self.sync_transaction.__enter__() + return self + + async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None: + await greenlet_spawn(self._proxied.__exit__, type_, value, traceback) + + +@overload +def _get_sync_engine_or_connection(async_engine: AsyncEngine) -> Engine: ... + + +@overload +def _get_sync_engine_or_connection( + async_engine: AsyncConnection, +) -> Connection: ... + + +def _get_sync_engine_or_connection( + async_engine: Union[AsyncEngine, AsyncConnection] +) -> Union[Engine, Connection]: + if isinstance(async_engine, AsyncConnection): + return async_engine._proxied + + try: + return async_engine.sync_engine + except AttributeError as e: + raise exc.ArgumentError( + "AsyncEngine expected, got %r" % async_engine + ) from e + + +@inspection._inspects(AsyncConnection) +def _no_insp_for_async_conn_yet( + subject: AsyncConnection, # noqa: U100 +) -> NoReturn: + raise exc.NoInspectionAvailable( + "Inspection on an AsyncConnection is currently not supported. " + "Please use ``run_sync`` to pass a callable where it's possible " + "to call ``inspect`` on the passed connection.", + code="xd3s", + ) + + +@inspection._inspects(AsyncEngine) +def _no_insp_for_async_engine_xyet( + subject: AsyncEngine, # noqa: U100 +) -> NoReturn: + raise exc.NoInspectionAvailable( + "Inspection on an AsyncEngine is currently not supported. " + "Please obtain a connection then use ``conn.run_sync`` to pass a " + "callable where it's possible to call ``inspect`` on the " + "passed connection.", + code="xd3s", + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/exc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..558187c0b417140abd9b07dbfa5eb96c0fe10ef1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/exc.py @@ -0,0 +1,21 @@ +# ext/asyncio/exc.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from ... import exc + + +class AsyncMethodRequired(exc.InvalidRequestError): + """an API can't be used because its result would not be + compatible with async""" + + +class AsyncContextNotStarted(exc.InvalidRequestError): + """a startable context manager has not been started.""" + + +class AsyncContextAlreadyStarted(exc.InvalidRequestError): + """a startable context manager is already started.""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/result.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/result.py new file mode 100644 index 0000000000000000000000000000000000000000..8003f66afe29320db9014b0559a1a380657c4212 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/result.py @@ -0,0 +1,962 @@ +# ext/asyncio/result.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import operator +from typing import Any +from typing import AsyncIterator +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar + +from . import exc as async_exc +from ... import util +from ...engine import Result +from ...engine.result import _NO_ROW +from ...engine.result import _R +from ...engine.result import _WithKeys +from ...engine.result import FilterResult +from ...engine.result import FrozenResult +from ...engine.result import ResultMetaData +from ...engine.row import Row +from ...engine.row import RowMapping +from ...sql.base import _generative +from ...util.concurrency import greenlet_spawn +from ...util.typing import Literal +from ...util.typing import Self + +if TYPE_CHECKING: + from ...engine import CursorResult + from ...engine.result import _KeyIndexType + from ...engine.result import _UniqueFilterType + +_T = TypeVar("_T", bound=Any) +_TP = TypeVar("_TP", bound=Tuple[Any, ...]) + + +class AsyncCommon(FilterResult[_R]): + __slots__ = () + + _real_result: Result[Any] + _metadata: ResultMetaData + + async def close(self) -> None: # type: ignore[override] + """Close this result.""" + + await greenlet_spawn(self._real_result.close) + + @property + def closed(self) -> bool: + """proxies the .closed attribute of the underlying result object, + if any, else raises ``AttributeError``. + + .. versionadded:: 2.0.0b3 + + """ + return self._real_result.closed + + +class AsyncResult(_WithKeys, AsyncCommon[Row[_TP]]): + """An asyncio wrapper around a :class:`_result.Result` object. + + The :class:`_asyncio.AsyncResult` only applies to statement executions that + use a server-side cursor. It is returned only from the + :meth:`_asyncio.AsyncConnection.stream` and + :meth:`_asyncio.AsyncSession.stream` methods. + + .. note:: As is the case with :class:`_engine.Result`, this object is + used for ORM results returned by :meth:`_asyncio.AsyncSession.execute`, + which can yield instances of ORM mapped objects either individually or + within tuple-like rows. Note that these result objects do not + deduplicate instances or rows automatically as is the case with the + legacy :class:`_orm.Query` object. For in-Python de-duplication of + instances or rows, use the :meth:`_asyncio.AsyncResult.unique` modifier + method. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + _real_result: Result[_TP] + + def __init__(self, real_result: Result[_TP]): + self._real_result = real_result + + self._metadata = real_result._metadata + self._unique_filter_state = real_result._unique_filter_state + self._source_supports_scalars = real_result._source_supports_scalars + self._post_creational_filter = None + + # BaseCursorResult pre-generates the "_row_getter". Use that + # if available rather than building a second one + if "_row_getter" in real_result.__dict__: + self._set_memoized_attribute( + "_row_getter", real_result.__dict__["_row_getter"] + ) + + @property + def t(self) -> AsyncTupleResult[_TP]: + """Apply a "typed tuple" typing filter to returned rows. + + The :attr:`_asyncio.AsyncResult.t` attribute is a synonym for + calling the :meth:`_asyncio.AsyncResult.tuples` method. + + .. versionadded:: 2.0 + + """ + return self # type: ignore + + def tuples(self) -> AsyncTupleResult[_TP]: + """Apply a "typed tuple" typing filter to returned rows. + + This method returns the same :class:`_asyncio.AsyncResult` object + at runtime, + however annotates as returning a :class:`_asyncio.AsyncTupleResult` + object that will indicate to :pep:`484` typing tools that plain typed + ``Tuple`` instances are returned rather than rows. This allows + tuple unpacking and ``__getitem__`` access of :class:`_engine.Row` + objects to by typed, for those cases where the statement invoked + itself included typing information. + + .. versionadded:: 2.0 + + :return: the :class:`_result.AsyncTupleResult` type at typing time. + + .. seealso:: + + :attr:`_asyncio.AsyncResult.t` - shorter synonym + + :attr:`_engine.Row.t` - :class:`_engine.Row` version + + """ + + return self # type: ignore + + @_generative + def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_asyncio.AsyncResult`. + + Refer to :meth:`_engine.Result.unique` in the synchronous + SQLAlchemy API for a complete behavioral description. + + """ + self._unique_filter_state = (set(), strategy) + return self + + def columns(self, *col_expressions: _KeyIndexType) -> Self: + r"""Establish the columns that should be returned in each row. + + Refer to :meth:`_engine.Result.columns` in the synchronous + SQLAlchemy API for a complete behavioral description. + + """ + return self._column_slices(col_expressions) + + async def partitions( + self, size: Optional[int] = None + ) -> AsyncIterator[Sequence[Row[_TP]]]: + """Iterate through sub-lists of rows of the size given. + + An async iterator is returned:: + + async def scroll_results(connection): + result = await connection.stream(select(users_table)) + + async for partition in result.partitions(100): + print("list of rows: %s" % partition) + + Refer to :meth:`_engine.Result.partitions` in the synchronous + SQLAlchemy API for a complete behavioral description. + + """ + + getter = self._manyrow_getter + + while True: + partition = await greenlet_spawn(getter, self, size) + if partition: + yield partition + else: + break + + async def fetchall(self) -> Sequence[Row[_TP]]: + """A synonym for the :meth:`_asyncio.AsyncResult.all` method. + + .. versionadded:: 2.0 + + """ + + return await greenlet_spawn(self._allrows) + + async def fetchone(self) -> Optional[Row[_TP]]: + """Fetch one row. + + When all rows are exhausted, returns None. + + This method is provided for backwards compatibility with + SQLAlchemy 1.x.x. + + To fetch the first row of a result only, use the + :meth:`_asyncio.AsyncResult.first` method. To iterate through all + rows, iterate the :class:`_asyncio.AsyncResult` object directly. + + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. + + """ + row = await greenlet_spawn(self._onerow_getter, self) + if row is _NO_ROW: + return None + else: + return row + + async def fetchmany( + self, size: Optional[int] = None + ) -> Sequence[Row[_TP]]: + """Fetch many rows. + + When all rows are exhausted, returns an empty list. + + This method is provided for backwards compatibility with + SQLAlchemy 1.x.x. + + To fetch rows in groups, use the + :meth:`._asyncio.AsyncResult.partitions` method. + + :return: a list of :class:`_engine.Row` objects. + + .. seealso:: + + :meth:`_asyncio.AsyncResult.partitions` + + """ + + return await greenlet_spawn(self._manyrow_getter, self, size) + + async def all(self) -> Sequence[Row[_TP]]: + """Return all rows in a list. + + Closes the result set after invocation. Subsequent invocations + will return an empty list. + + :return: a list of :class:`_engine.Row` objects. + + """ + + return await greenlet_spawn(self._allrows) + + def __aiter__(self) -> AsyncResult[_TP]: + return self + + async def __anext__(self) -> Row[_TP]: + row = await greenlet_spawn(self._onerow_getter, self) + if row is _NO_ROW: + raise StopAsyncIteration() + else: + return row + + async def first(self) -> Optional[Row[_TP]]: + """Fetch the first row or ``None`` if no row is present. + + Closes the result set and discards remaining rows. + + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_asyncio.AsyncResult.scalar` method, + or combine :meth:`_asyncio.AsyncResult.scalars` and + :meth:`_asyncio.AsyncResult.first`. + + Additionally, in contrast to the behavior of the legacy ORM + :meth:`_orm.Query.first` method, **no limit is applied** to the + SQL query which was invoked to produce this + :class:`_asyncio.AsyncResult`; + for a DBAPI driver that buffers results in memory before yielding + rows, all rows will be sent to the Python process and all but + the first row will be discarded. + + .. seealso:: + + :ref:`migration_20_unify_select` + + :return: a :class:`_engine.Row` object, or None + if no rows remain. + + .. seealso:: + + :meth:`_asyncio.AsyncResult.scalar` + + :meth:`_asyncio.AsyncResult.one` + + """ + return await greenlet_spawn(self._only_one_row, False, False, False) + + async def one_or_none(self) -> Optional[Row[_TP]]: + """Return at most one result or raise an exception. + + Returns ``None`` if the result has no rows. + Raises :class:`.MultipleResultsFound` + if multiple rows are returned. + + .. versionadded:: 1.4 + + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. + + :raises: :class:`.MultipleResultsFound` + + .. seealso:: + + :meth:`_asyncio.AsyncResult.first` + + :meth:`_asyncio.AsyncResult.one` + + """ + return await greenlet_spawn(self._only_one_row, True, False, False) + + @overload + async def scalar_one(self: AsyncResult[Tuple[_T]]) -> _T: ... + + @overload + async def scalar_one(self) -> Any: ... + + async def scalar_one(self) -> Any: + """Return exactly one scalar result or raise an exception. + + This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and + then :meth:`_asyncio.AsyncScalarResult.one`. + + .. seealso:: + + :meth:`_asyncio.AsyncScalarResult.one` + + :meth:`_asyncio.AsyncResult.scalars` + + """ + return await greenlet_spawn(self._only_one_row, True, True, True) + + @overload + async def scalar_one_or_none( + self: AsyncResult[Tuple[_T]], + ) -> Optional[_T]: ... + + @overload + async def scalar_one_or_none(self) -> Optional[Any]: ... + + async def scalar_one_or_none(self) -> Optional[Any]: + """Return exactly one scalar result or ``None``. + + This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and + then :meth:`_asyncio.AsyncScalarResult.one_or_none`. + + .. seealso:: + + :meth:`_asyncio.AsyncScalarResult.one_or_none` + + :meth:`_asyncio.AsyncResult.scalars` + + """ + return await greenlet_spawn(self._only_one_row, True, False, True) + + async def one(self) -> Row[_TP]: + """Return exactly one row or raise an exception. + + Raises :class:`.NoResultFound` if the result returns no + rows, or :class:`.MultipleResultsFound` if multiple rows + would be returned. + + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_asyncio.AsyncResult.scalar_one` method, or combine + :meth:`_asyncio.AsyncResult.scalars` and + :meth:`_asyncio.AsyncResult.one`. + + .. versionadded:: 1.4 + + :return: The first :class:`_engine.Row`. + + :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` + + .. seealso:: + + :meth:`_asyncio.AsyncResult.first` + + :meth:`_asyncio.AsyncResult.one_or_none` + + :meth:`_asyncio.AsyncResult.scalar_one` + + """ + return await greenlet_spawn(self._only_one_row, True, True, False) + + @overload + async def scalar(self: AsyncResult[Tuple[_T]]) -> Optional[_T]: ... + + @overload + async def scalar(self) -> Any: ... + + async def scalar(self) -> Any: + """Fetch the first column of the first row, and close the result set. + + Returns ``None`` if there are no rows to fetch. + + No validation is performed to test if additional rows remain. + + After calling this method, the object is fully closed, + e.g. the :meth:`_engine.CursorResult.close` + method will have been called. + + :return: a Python scalar value, or ``None`` if no rows remain. + + """ + return await greenlet_spawn(self._only_one_row, False, False, True) + + async def freeze(self) -> FrozenResult[_TP]: + """Return a callable object that will produce copies of this + :class:`_asyncio.AsyncResult` when invoked. + + The callable object returned is an instance of + :class:`_engine.FrozenResult`. + + This is used for result set caching. The method must be called + on the result when it has been unconsumed, and calling the method + will consume the result fully. When the :class:`_engine.FrozenResult` + is retrieved from a cache, it can be called any number of times where + it will produce a new :class:`_engine.Result` object each time + against its stored set of rows. + + .. seealso:: + + :ref:`do_orm_execute_re_executing` - example usage within the + ORM to implement a result-set cache. + + """ + + return await greenlet_spawn(FrozenResult, self) + + @overload + def scalars( + self: AsyncResult[Tuple[_T]], index: Literal[0] + ) -> AsyncScalarResult[_T]: ... + + @overload + def scalars(self: AsyncResult[Tuple[_T]]) -> AsyncScalarResult[_T]: ... + + @overload + def scalars(self, index: _KeyIndexType = 0) -> AsyncScalarResult[Any]: ... + + def scalars(self, index: _KeyIndexType = 0) -> AsyncScalarResult[Any]: + """Return an :class:`_asyncio.AsyncScalarResult` filtering object which + will return single elements rather than :class:`_row.Row` objects. + + Refer to :meth:`_result.Result.scalars` in the synchronous + SQLAlchemy API for a complete behavioral description. + + :param index: integer or row key indicating the column to be fetched + from each row, defaults to ``0`` indicating the first column. + + :return: a new :class:`_asyncio.AsyncScalarResult` filtering object + referring to this :class:`_asyncio.AsyncResult` object. + + """ + return AsyncScalarResult(self._real_result, index) + + def mappings(self) -> AsyncMappingResult: + """Apply a mappings filter to returned rows, returning an instance of + :class:`_asyncio.AsyncMappingResult`. + + When this filter is applied, fetching rows will return + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. + + :return: a new :class:`_asyncio.AsyncMappingResult` filtering object + referring to the underlying :class:`_result.Result` object. + + """ + + return AsyncMappingResult(self._real_result) + + +class AsyncScalarResult(AsyncCommon[_R]): + """A wrapper for a :class:`_asyncio.AsyncResult` that returns scalar values + rather than :class:`_row.Row` values. + + The :class:`_asyncio.AsyncScalarResult` object is acquired by calling the + :meth:`_asyncio.AsyncResult.scalars` method. + + Refer to the :class:`_result.ScalarResult` object in the synchronous + SQLAlchemy API for a complete behavioral description. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + _generate_rows = False + + def __init__(self, real_result: Result[Any], index: _KeyIndexType): + self._real_result = real_result + + if real_result._source_supports_scalars: + self._metadata = real_result._metadata + self._post_creational_filter = None + else: + self._metadata = real_result._metadata._reduce([index]) + self._post_creational_filter = operator.itemgetter(0) + + self._unique_filter_state = real_result._unique_filter_state + + def unique( + self, + strategy: Optional[_UniqueFilterType] = None, + ) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_asyncio.AsyncScalarResult`. + + See :meth:`_asyncio.AsyncResult.unique` for usage details. + + """ + self._unique_filter_state = (set(), strategy) + return self + + async def partitions( + self, size: Optional[int] = None + ) -> AsyncIterator[Sequence[_R]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + + getter = self._manyrow_getter + + while True: + partition = await greenlet_spawn(getter, self, size) + if partition: + yield partition + else: + break + + async def fetchall(self) -> Sequence[_R]: + """A synonym for the :meth:`_asyncio.AsyncScalarResult.all` method.""" + + return await greenlet_spawn(self._allrows) + + async def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]: + """Fetch many objects. + + Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return await greenlet_spawn(self._manyrow_getter, self, size) + + async def all(self) -> Sequence[_R]: + """Return all scalar values in a list. + + Equivalent to :meth:`_asyncio.AsyncResult.all` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return await greenlet_spawn(self._allrows) + + def __aiter__(self) -> AsyncScalarResult[_R]: + return self + + async def __anext__(self) -> _R: + row = await greenlet_spawn(self._onerow_getter, self) + if row is _NO_ROW: + raise StopAsyncIteration() + else: + return row + + async def first(self) -> Optional[_R]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_asyncio.AsyncResult.first` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return await greenlet_spawn(self._only_one_row, False, False, False) + + async def one_or_none(self) -> Optional[_R]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return await greenlet_spawn(self._only_one_row, True, False, False) + + async def one(self) -> _R: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_asyncio.AsyncResult.one` except that + scalar values, rather than :class:`_engine.Row` objects, + are returned. + + """ + return await greenlet_spawn(self._only_one_row, True, True, False) + + +class AsyncMappingResult(_WithKeys, AsyncCommon[RowMapping]): + """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary + values rather than :class:`_engine.Row` values. + + The :class:`_asyncio.AsyncMappingResult` object is acquired by calling the + :meth:`_asyncio.AsyncResult.mappings` method. + + Refer to the :class:`_result.MappingResult` object in the synchronous + SQLAlchemy API for a complete behavioral description. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + _generate_rows = True + + _post_creational_filter = operator.attrgetter("_mapping") + + def __init__(self, result: Result[Any]): + self._real_result = result + self._unique_filter_state = result._unique_filter_state + self._metadata = result._metadata + if result._source_supports_scalars: + self._metadata = self._metadata._reduce([0]) + + def unique( + self, + strategy: Optional[_UniqueFilterType] = None, + ) -> Self: + """Apply unique filtering to the objects returned by this + :class:`_asyncio.AsyncMappingResult`. + + See :meth:`_asyncio.AsyncResult.unique` for usage details. + + """ + self._unique_filter_state = (set(), strategy) + return self + + def columns(self, *col_expressions: _KeyIndexType) -> Self: + r"""Establish the columns that should be returned in each row.""" + return self._column_slices(col_expressions) + + async def partitions( + self, size: Optional[int] = None + ) -> AsyncIterator[Sequence[RowMapping]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + getter = self._manyrow_getter + + while True: + partition = await greenlet_spawn(getter, self, size) + if partition: + yield partition + else: + break + + async def fetchall(self) -> Sequence[RowMapping]: + """A synonym for the :meth:`_asyncio.AsyncMappingResult.all` method.""" + + return await greenlet_spawn(self._allrows) + + async def fetchone(self) -> Optional[RowMapping]: + """Fetch one object. + + Equivalent to :meth:`_asyncio.AsyncResult.fetchone` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + row = await greenlet_spawn(self._onerow_getter, self) + if row is _NO_ROW: + return None + else: + return row + + async def fetchmany( + self, size: Optional[int] = None + ) -> Sequence[RowMapping]: + """Fetch many rows. + + Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + return await greenlet_spawn(self._manyrow_getter, self, size) + + async def all(self) -> Sequence[RowMapping]: + """Return all rows in a list. + + Equivalent to :meth:`_asyncio.AsyncResult.all` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + + return await greenlet_spawn(self._allrows) + + def __aiter__(self) -> AsyncMappingResult: + return self + + async def __anext__(self) -> RowMapping: + row = await greenlet_spawn(self._onerow_getter, self) + if row is _NO_ROW: + raise StopAsyncIteration() + else: + return row + + async def first(self) -> Optional[RowMapping]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_asyncio.AsyncResult.first` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + return await greenlet_spawn(self._only_one_row, False, False, False) + + async def one_or_none(self) -> Optional[RowMapping]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + return await greenlet_spawn(self._only_one_row, True, False, False) + + async def one(self) -> RowMapping: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_asyncio.AsyncResult.one` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. + + """ + return await greenlet_spawn(self._only_one_row, True, True, False) + + +class AsyncTupleResult(AsyncCommon[_R], util.TypingOnly): + """A :class:`_asyncio.AsyncResult` that's typed as returning plain + Python tuples instead of rows. + + Since :class:`_engine.Row` acts like a tuple in every way already, + this class is a typing only class, regular :class:`_asyncio.AsyncResult` is + still used at runtime. + + """ + + __slots__ = () + + if TYPE_CHECKING: + + async def partitions( + self, size: Optional[int] = None + ) -> AsyncIterator[Sequence[_R]]: + """Iterate through sub-lists of elements of the size given. + + Equivalent to :meth:`_result.Result.partitions` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + async def fetchone(self) -> Optional[_R]: + """Fetch one tuple. + + Equivalent to :meth:`_result.Result.fetchone` except that + tuple values, rather than :class:`_engine.Row` + objects, are returned. + + """ + ... + + async def fetchall(self) -> Sequence[_R]: + """A synonym for the :meth:`_engine.ScalarResult.all` method.""" + ... + + async def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]: + """Fetch many objects. + + Equivalent to :meth:`_result.Result.fetchmany` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + async def all(self) -> Sequence[_R]: # noqa: A001 + """Return all scalar values in a list. + + Equivalent to :meth:`_result.Result.all` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + async def __aiter__(self) -> AsyncIterator[_R]: ... + + async def __anext__(self) -> _R: ... + + async def first(self) -> Optional[_R]: + """Fetch the first object or ``None`` if no object is present. + + Equivalent to :meth:`_result.Result.first` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + + """ + ... + + async def one_or_none(self) -> Optional[_R]: + """Return at most one object or raise an exception. + + Equivalent to :meth:`_result.Result.one_or_none` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + async def one(self) -> _R: + """Return exactly one object or raise an exception. + + Equivalent to :meth:`_result.Result.one` except that + tuple values, rather than :class:`_engine.Row` objects, + are returned. + + """ + ... + + @overload + async def scalar_one(self: AsyncTupleResult[Tuple[_T]]) -> _T: ... + + @overload + async def scalar_one(self) -> Any: ... + + async def scalar_one(self) -> Any: + """Return exactly one scalar result or raise an exception. + + This is equivalent to calling :meth:`_engine.Result.scalars` + and then :meth:`_engine.AsyncScalarResult.one`. + + .. seealso:: + + :meth:`_engine.AsyncScalarResult.one` + + :meth:`_engine.Result.scalars` + + """ + ... + + @overload + async def scalar_one_or_none( + self: AsyncTupleResult[Tuple[_T]], + ) -> Optional[_T]: ... + + @overload + async def scalar_one_or_none(self) -> Optional[Any]: ... + + async def scalar_one_or_none(self) -> Optional[Any]: + """Return exactly one or no scalar result. + + This is equivalent to calling :meth:`_engine.Result.scalars` + and then :meth:`_engine.AsyncScalarResult.one_or_none`. + + .. seealso:: + + :meth:`_engine.AsyncScalarResult.one_or_none` + + :meth:`_engine.Result.scalars` + + """ + ... + + @overload + async def scalar( + self: AsyncTupleResult[Tuple[_T]], + ) -> Optional[_T]: ... + + @overload + async def scalar(self) -> Any: ... + + async def scalar(self) -> Any: + """Fetch the first column of the first row, and close the result + set. + + Returns ``None`` if there are no rows to fetch. + + No validation is performed to test if additional rows remain. + + After calling this method, the object is fully closed, + e.g. the :meth:`_engine.CursorResult.close` + method will have been called. + + :return: a Python scalar value , or ``None`` if no rows remain. + + """ + ... + + +_RT = TypeVar("_RT", bound="Result[Any]") + + +async def _ensure_sync_result(result: _RT, calling_method: Any) -> _RT: + cursor_result: CursorResult[Any] + + try: + is_cursor = result._is_cursor + except AttributeError: + # legacy execute(DefaultGenerator) case + return result + + if not is_cursor: + cursor_result = getattr(result, "raw", None) # type: ignore + else: + cursor_result = result # type: ignore + if cursor_result and cursor_result.context._is_server_side: + await greenlet_spawn(cursor_result.close) + raise async_exc.AsyncMethodRequired( + "Can't use the %s.%s() method with a " + "server-side cursor. " + "Use the %s.stream() method for an async " + "streaming result set." + % ( + calling_method.__self__.__class__.__name__, + calling_method.__name__, + calling_method.__self__.__class__.__name__, + ) + ) + return result diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/scoping.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/scoping.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a9a51b231817a6a486ea221a45857da80a072e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/scoping.py @@ -0,0 +1,1613 @@ +# ext/asyncio/scoping.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .session import _AS +from .session import async_sessionmaker +from .session import AsyncSession +from ... import exc as sa_exc +from ... import util +from ...orm.session import Session +from ...util import create_proxy_methods +from ...util import ScopedRegistry +from ...util import warn +from ...util import warn_deprecated + +if TYPE_CHECKING: + from .engine import AsyncConnection + from .result import AsyncResult + from .result import AsyncScalarResult + from .session import AsyncSessionTransaction + from ...engine import Connection + from ...engine import CursorResult + from ...engine import Engine + from ...engine import Result + from ...engine import Row + from ...engine import RowMapping + from ...engine.interfaces import _CoreAnyExecuteParams + from ...engine.interfaces import CoreExecuteOptionsParameter + from ...engine.result import ScalarResult + from ...orm._typing import _IdentityKeyType + from ...orm._typing import _O + from ...orm._typing import OrmExecuteOptionsParameter + from ...orm.interfaces import ORMOption + from ...orm.session import _BindArguments + from ...orm.session import _EntityBindKey + from ...orm.session import _PKIdentityArgument + from ...orm.session import _SessionBind + from ...sql.base import Executable + from ...sql.dml import UpdateBase + from ...sql.elements import ClauseElement + from ...sql.selectable import ForUpdateParameter + from ...sql.selectable import TypedReturnsRows + +_T = TypeVar("_T", bound=Any) + + +@create_proxy_methods( + AsyncSession, + ":class:`_asyncio.AsyncSession`", + ":class:`_asyncio.scoping.async_scoped_session`", + classmethods=["close_all", "object_session", "identity_key"], + methods=[ + "__contains__", + "__iter__", + "aclose", + "add", + "add_all", + "begin", + "begin_nested", + "close", + "reset", + "commit", + "connection", + "delete", + "execute", + "expire", + "expire_all", + "expunge", + "expunge_all", + "flush", + "get_bind", + "is_modified", + "invalidate", + "merge", + "refresh", + "rollback", + "scalar", + "scalars", + "get", + "get_one", + "stream", + "stream_scalars", + ], + attributes=[ + "bind", + "dirty", + "deleted", + "new", + "identity_map", + "is_active", + "autoflush", + "no_autoflush", + "info", + ], + use_intermediate_variable=["get"], +) +class async_scoped_session(Generic[_AS]): + """Provides scoped management of :class:`.AsyncSession` objects. + + See the section :ref:`asyncio_scoped_session` for usage details. + + .. versionadded:: 1.4.19 + + + """ + + _support_async = True + + session_factory: async_sessionmaker[_AS] + """The `session_factory` provided to `__init__` is stored in this + attribute and may be accessed at a later time. This can be useful when + a new non-scoped :class:`.AsyncSession` is needed.""" + + registry: ScopedRegistry[_AS] + + def __init__( + self, + session_factory: async_sessionmaker[_AS], + scopefunc: Callable[[], Any], + ): + """Construct a new :class:`_asyncio.async_scoped_session`. + + :param session_factory: a factory to create new :class:`_asyncio.AsyncSession` + instances. This is usually, but not necessarily, an instance + of :class:`_asyncio.async_sessionmaker`. + + :param scopefunc: function which defines + the current scope. A function such as ``asyncio.current_task`` + may be useful here. + + """ # noqa: E501 + + self.session_factory = session_factory + self.registry = ScopedRegistry(session_factory, scopefunc) + + @property + def _proxied(self) -> _AS: + return self.registry() + + def __call__(self, **kw: Any) -> _AS: + r"""Return the current :class:`.AsyncSession`, creating it + using the :attr:`.scoped_session.session_factory` if not present. + + :param \**kw: Keyword arguments will be passed to the + :attr:`.scoped_session.session_factory` callable, if an existing + :class:`.AsyncSession` is not present. If the + :class:`.AsyncSession` is present + and keyword arguments have been passed, + :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. + + """ + if kw: + if self.registry.has(): + raise sa_exc.InvalidRequestError( + "Scoped session is already present; " + "no new arguments may be specified." + ) + else: + sess = self.session_factory(**kw) + self.registry.set(sess) + else: + sess = self.registry() + if not self._support_async and sess._is_asyncio: + warn_deprecated( + "Using `scoped_session` with asyncio is deprecated and " + "will raise an error in a future version. " + "Please use `async_scoped_session` instead.", + "1.4.23", + ) + return sess + + def configure(self, **kwargs: Any) -> None: + """reconfigure the :class:`.sessionmaker` used by this + :class:`.scoped_session`. + + See :meth:`.sessionmaker.configure`. + + """ + + if self.registry.has(): + warn( + "At least one scoped session is already present. " + " configure() can not affect sessions that have " + "already been created." + ) + + self.session_factory.configure(**kwargs) + + async def remove(self) -> None: + """Dispose of the current :class:`.AsyncSession`, if present. + + Different from scoped_session's remove method, this method would use + await to wait for the close method of AsyncSession. + + """ + + if self.registry.has(): + await self.registry().close() + self.registry.clear() + + # START PROXY METHODS async_scoped_session + + # code within this block is **programmatically, + # statically generated** by tools/generate_proxy_methods.py + + def __contains__(self, instance: object) -> bool: + r"""Return True if the instance is associated with this session. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + The instance may be pending or persistent within the Session for a + result of True. + + + + """ # noqa: E501 + + return self._proxied.__contains__(instance) + + def __iter__(self) -> Iterator[object]: + r"""Iterate over all pending or persistent instances within this + Session. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + + + """ # noqa: E501 + + return self._proxied.__iter__() + + async def aclose(self) -> None: + r"""A synonym for :meth:`_asyncio.AsyncSession.close`. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + The :meth:`_asyncio.AsyncSession.aclose` name is specifically + to support the Python standard library ``@contextlib.aclosing`` + context manager function. + + .. versionadded:: 2.0.20 + + + """ # noqa: E501 + + return await self._proxied.aclose() + + def add(self, instance: object, _warn: bool = True) -> None: + r"""Place an object into this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. + + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. + + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` + + + + """ # noqa: E501 + + return self._proxied.add(instance, _warn=_warn) + + def add_all(self, instances: Iterable[object]) -> None: + r"""Add the given collection of instances to this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + + + """ # noqa: E501 + + return self._proxied.add_all(instances) + + def begin(self) -> AsyncSessionTransaction: + r"""Return an :class:`_asyncio.AsyncSessionTransaction` object. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + The underlying :class:`_orm.Session` will perform the + "begin" action when the :class:`_asyncio.AsyncSessionTransaction` + object is entered:: + + async with async_session.begin(): + ... # ORM transaction is begun + + Note that database IO will not normally occur when the session-level + transaction is begun, as database transactions begin on an + on-demand basis. However, the begin block is async to accommodate + for a :meth:`_orm.SessionEvents.after_transaction_create` + event hook that may perform IO. + + For a general description of ORM begin, see + :meth:`_orm.Session.begin`. + + + """ # noqa: E501 + + return self._proxied.begin() + + def begin_nested(self) -> AsyncSessionTransaction: + r"""Return an :class:`_asyncio.AsyncSessionTransaction` object + which will begin a "nested" transaction, e.g. SAVEPOINT. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + Behavior is the same as that of :meth:`_asyncio.AsyncSession.begin`. + + For a general description of ORM begin nested, see + :meth:`_orm.Session.begin_nested`. + + .. seealso:: + + :ref:`aiosqlite_serializable` - special workarounds required + with the SQLite asyncio driver in order for SAVEPOINT to work + correctly. + + + """ # noqa: E501 + + return self._proxied.begin_nested() + + async def close(self) -> None: + r"""Close out the transactional resources and ORM objects used by this + :class:`_asyncio.AsyncSession`. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.close` - main documentation for + "close" + + :ref:`session_closing` - detail on the semantics of + :meth:`_asyncio.AsyncSession.close` and + :meth:`_asyncio.AsyncSession.reset`. + + + """ # noqa: E501 + + return await self._proxied.close() + + async def reset(self) -> None: + r"""Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`, resetting the session to its initial state. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. versionadded:: 2.0.22 + + .. seealso:: + + :meth:`_orm.Session.reset` - main documentation for + "reset" + + :ref:`session_closing` - detail on the semantics of + :meth:`_asyncio.AsyncSession.close` and + :meth:`_asyncio.AsyncSession.reset`. + + + """ # noqa: E501 + + return await self._proxied.reset() + + async def commit(self) -> None: + r"""Commit the current transaction in progress. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.commit` - main documentation for + "commit" + + """ # noqa: E501 + + return await self._proxied.commit() + + async def connection( + self, + bind_arguments: Optional[_BindArguments] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + **kw: Any, + ) -> AsyncConnection: + r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to + this :class:`.Session` object's transactional state. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + This method may also be used to establish execution options for the + database connection used by the current transaction. + + .. versionadded:: 1.4.24 Added \**kw arguments which are passed + through to the underlying :meth:`_orm.Session.connection` method. + + .. seealso:: + + :meth:`_orm.Session.connection` - main documentation for + "connection" + + + """ # noqa: E501 + + return await self._proxied.connection( + bind_arguments=bind_arguments, + execution_options=execution_options, + **kw, + ) + + async def delete(self, instance: object) -> None: + r"""Mark an instance as deleted. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + The database delete operation occurs upon ``flush()``. + + As this operation may need to cascade along unloaded relationships, + it is awaitable to allow for those queries to take place. + + .. seealso:: + + :meth:`_orm.Session.delete` - main documentation for delete + + + """ # noqa: E501 + + return await self._proxied.delete(instance) + + @overload + async def execute( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[_T]: ... + + @overload + async def execute( + self, + statement: UpdateBase, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> CursorResult[Any]: ... + + @overload + async def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: ... + + async def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Result[Any]: + r"""Execute a statement and return a buffered + :class:`_engine.Result` object. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.execute` - main documentation for execute + + + """ # noqa: E501 + + return await self._proxied.execute( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + def expire( + self, instance: object, attribute_names: Optional[Iterable[str]] = None + ) -> None: + r"""Expire the attributes on an instance. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + Marks the attributes of an instance as out of date. When an expired + attribute is next accessed, a query will be issued to the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire all objects in the :class:`.Session` simultaneously, + use :meth:`Session.expire_all`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire` only makes sense for the specific + case that a non-ORM SQL statement was emitted in the current + transaction. + + :param instance: The instance to be refreshed. + :param attribute_names: optional list of string attribute names + indicating a subset of attributes to be expired. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + + """ # noqa: E501 + + return self._proxied.expire(instance, attribute_names=attribute_names) + + def expire_all(self) -> None: + r"""Expires all persistent instances within this Session. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + When any attributes on a persistent instance is next accessed, + a query will be issued using the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire individual objects and individual attributes + on those objects, use :meth:`Session.expire`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire_all` is not usually needed, + assuming the transaction is isolated. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + + """ # noqa: E501 + + return self._proxied.expire_all() + + def expunge(self, instance: object) -> None: + r"""Remove the `instance` from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This will free all internal references to the instance. Cascading + will be applied according to the *expunge* cascade rule. + + + + """ # noqa: E501 + + return self._proxied.expunge(instance) + + def expunge_all(self) -> None: + r"""Remove all object instances from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is equivalent to calling ``expunge(obj)`` on all objects in this + ``Session``. + + + + """ # noqa: E501 + + return self._proxied.expunge_all() + + async def flush(self, objects: Optional[Sequence[Any]] = None) -> None: + r"""Flush all the object changes to the database. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.flush` - main documentation for flush + + + """ # noqa: E501 + + return await self._proxied.flush(objects=objects) + + def get_bind( + self, + mapper: Optional[_EntityBindKey[_O]] = None, + clause: Optional[ClauseElement] = None, + bind: Optional[_SessionBind] = None, + **kw: Any, + ) -> Union[Engine, Connection]: + r"""Return a "bind" to which the synchronous proxied :class:`_orm.Session` + is bound. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + Unlike the :meth:`_orm.Session.get_bind` method, this method is + currently **not** used by this :class:`.AsyncSession` in any way + in order to resolve engines for requests. + + .. note:: + + This method proxies directly to the :meth:`_orm.Session.get_bind` + method, however is currently **not** useful as an override target, + in contrast to that of the :meth:`_orm.Session.get_bind` method. + The example below illustrates how to implement custom + :meth:`_orm.Session.get_bind` schemes that work with + :class:`.AsyncSession` and :class:`.AsyncEngine`. + + The pattern introduced at :ref:`session_custom_partitioning` + illustrates how to apply a custom bind-lookup scheme to a + :class:`_orm.Session` given a set of :class:`_engine.Engine` objects. + To apply a corresponding :meth:`_orm.Session.get_bind` implementation + for use with a :class:`.AsyncSession` and :class:`.AsyncEngine` + objects, continue to subclass :class:`_orm.Session` and apply it to + :class:`.AsyncSession` using + :paramref:`.AsyncSession.sync_session_class`. The inner method must + continue to return :class:`_engine.Engine` instances, which can be + acquired from a :class:`_asyncio.AsyncEngine` using the + :attr:`_asyncio.AsyncEngine.sync_engine` attribute:: + + # using example from "Custom Vertical Partitioning" + + + import random + + from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.ext.asyncio import async_sessionmaker + from sqlalchemy.orm import Session + + # construct async engines w/ async drivers + engines = { + "leader": create_async_engine("sqlite+aiosqlite:///leader.db"), + "other": create_async_engine("sqlite+aiosqlite:///other.db"), + "follower1": create_async_engine("sqlite+aiosqlite:///follower1.db"), + "follower2": create_async_engine("sqlite+aiosqlite:///follower2.db"), + } + + + class RoutingSession(Session): + def get_bind(self, mapper=None, clause=None, **kw): + # within get_bind(), return sync engines + if mapper and issubclass(mapper.class_, MyOtherClass): + return engines["other"].sync_engine + elif self._flushing or isinstance(clause, (Update, Delete)): + return engines["leader"].sync_engine + else: + return engines[ + random.choice(["follower1", "follower2"]) + ].sync_engine + + + # apply to AsyncSession using sync_session_class + AsyncSessionMaker = async_sessionmaker(sync_session_class=RoutingSession) + + The :meth:`_orm.Session.get_bind` method is called in a non-asyncio, + implicitly non-blocking context in the same manner as ORM event hooks + and functions that are invoked via :meth:`.AsyncSession.run_sync`, so + routines that wish to run SQL commands inside of + :meth:`_orm.Session.get_bind` can continue to do so using + blocking-style code, which will be translated to implicitly async calls + at the point of invoking IO on the database drivers. + + + """ # noqa: E501 + + return self._proxied.get_bind( + mapper=mapper, clause=clause, bind=bind, **kw + ) + + def is_modified( + self, instance: object, include_collections: bool = True + ) -> bool: + r"""Return ``True`` if the given instance has locally + modified attributes. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This method retrieves the history for each instrumented + attribute on the instance and performs a comparison of the current + value to its previously flushed or committed value, if any. + + It is in effect a more expensive and accurate + version of checking for the given instance in the + :attr:`.Session.dirty` collection; a full test for + each attribute's net "dirty" status is performed. + + E.g.:: + + return session.is_modified(someobject) + + A few caveats to this method apply: + + * Instances present in the :attr:`.Session.dirty` collection may + report ``False`` when tested with this method. This is because + the object may have received change events via attribute mutation, + thus placing it in :attr:`.Session.dirty`, but ultimately the state + is the same as that loaded from the database, resulting in no net + change here. + * Scalar attributes may not have recorded the previously set + value when a new value was applied, if the attribute was not loaded, + or was expired, at the time the new value was received - in these + cases, the attribute is assumed to have a change, even if there is + ultimately no net change against its database value. SQLAlchemy in + most cases does not need the "old" value when a set event occurs, so + it skips the expense of a SQL call if the old value isn't present, + based on the assumption that an UPDATE of the scalar value is + usually needed, and in those few cases where it isn't, is less + expensive on average than issuing a defensive SELECT. + + The "old" value is fetched unconditionally upon set only if the + attribute container has the ``active_history`` flag set to ``True``. + This flag is set typically for primary key attributes and scalar + object references that are not a simple many-to-one. To set this + flag for any arbitrary mapped column, use the ``active_history`` + argument with :func:`.column_property`. + + :param instance: mapped instance to be tested for pending changes. + :param include_collections: Indicates if multivalued collections + should be included in the operation. Setting this to ``False`` is a + way to detect only local-column based properties (i.e. scalar columns + or many-to-one foreign keys) that would result in an UPDATE for this + instance upon flush. + + + + """ # noqa: E501 + + return self._proxied.is_modified( + instance, include_collections=include_collections + ) + + async def invalidate(self) -> None: + r"""Close this Session, using connection invalidation. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + For a complete description, see :meth:`_orm.Session.invalidate`. + + """ # noqa: E501 + + return await self._proxied.invalidate() + + async def merge( + self, + instance: _O, + *, + load: bool = True, + options: Optional[Sequence[ORMOption]] = None, + ) -> _O: + r"""Copy the state of a given instance into a corresponding instance + within this :class:`_asyncio.AsyncSession`. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.merge` - main documentation for merge + + + """ # noqa: E501 + + return await self._proxied.merge(instance, load=load, options=options) + + async def refresh( + self, + instance: object, + attribute_names: Optional[Iterable[str]] = None, + with_for_update: ForUpdateParameter = None, + ) -> None: + r"""Expire and refresh the attributes on the given instance. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + A query will be issued to the database and all attributes will be + refreshed with their current database value. + + This is the async version of the :meth:`_orm.Session.refresh` method. + See that method for a complete description of all options. + + .. seealso:: + + :meth:`_orm.Session.refresh` - main documentation for refresh + + + """ # noqa: E501 + + return await self._proxied.refresh( + instance, + attribute_names=attribute_names, + with_for_update=with_for_update, + ) + + async def rollback(self) -> None: + r"""Rollback the current transaction in progress. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.rollback` - main documentation for + "rollback" + + """ # noqa: E501 + + return await self._proxied.rollback() + + @overload + async def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Optional[_T]: ... + + @overload + async def scalar( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: ... + + async def scalar( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: + r"""Execute a statement and return a scalar result. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.scalar` - main documentation for scalar + + + """ # noqa: E501 + + return await self._proxied.scalar( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @overload + async def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[_T]: ... + + @overload + async def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: ... + + async def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: + r"""Execute a statement and return scalar results. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + :return: a :class:`_result.ScalarResult` object + + .. versionadded:: 1.4.24 Added :meth:`_asyncio.AsyncSession.scalars` + + .. versionadded:: 1.4.26 Added + :meth:`_asyncio.async_scoped_session.scalars` + + .. seealso:: + + :meth:`_orm.Session.scalars` - main documentation for scalars + + :meth:`_asyncio.AsyncSession.stream_scalars` - streaming version + + + """ # noqa: E501 + + return await self._proxied.scalars( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + async def get( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + ) -> Union[_O, None]: + r"""Return an instance based on the given primary key identifier, + or ``None`` if not found. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. seealso:: + + :meth:`_orm.Session.get` - main documentation for get + + + + """ # noqa: E501 + + result = await self._proxied.get( + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + ) + return result + + async def get_one( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + ) -> _O: + r"""Return an instance based on the given primary key identifier, + or raise an exception if not found. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + Raises :class:`_exc.NoResultFound` if the query selects no rows. + + ..versionadded: 2.0.22 + + .. seealso:: + + :meth:`_orm.Session.get_one` - main documentation for get_one + + + """ # noqa: E501 + + return await self._proxied.get_one( + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + ) + + @overload + async def stream( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[_T]: ... + + @overload + async def stream( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[Any]: ... + + async def stream( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[Any]: + r"""Execute a statement and return a streaming + :class:`_asyncio.AsyncResult` object. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + + """ # noqa: E501 + + return await self._proxied.stream( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @overload + async def stream_scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[_T]: ... + + @overload + async def stream_scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[Any]: ... + + async def stream_scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[Any]: + r"""Execute a statement and return a stream of scalar results. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + :return: an :class:`_asyncio.AsyncScalarResult` object + + .. versionadded:: 1.4.24 + + .. seealso:: + + :meth:`_orm.Session.scalars` - main documentation for scalars + + :meth:`_asyncio.AsyncSession.scalars` - non streaming version + + + """ # noqa: E501 + + return await self._proxied.stream_scalars( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @property + def bind(self) -> Any: + r"""Proxy for the :attr:`_asyncio.AsyncSession.bind` attribute + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + """ # noqa: E501 + + return self._proxied.bind + + @bind.setter + def bind(self, attr: Any) -> None: + self._proxied.bind = attr + + @property + def dirty(self) -> Any: + r"""The set of all persistent instances considered dirty. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + E.g.:: + + some_mapped_object in session.dirty + + Instances are considered dirty when they were modified but not + deleted. + + Note that this 'dirty' calculation is 'optimistic'; most + attribute-setting or collection modification operations will + mark an instance as 'dirty' and place it in this set, even if + there is no net change to the attribute's value. At flush + time, the value of each attribute is compared to its + previously saved value, and if there's no net change, no SQL + operation will occur (this is a more expensive operation so + it's only done at flush time). + + To check if an instance has actionable net changes to its + attributes, use the :meth:`.Session.is_modified` method. + + + + """ # noqa: E501 + + return self._proxied.dirty + + @property + def deleted(self) -> Any: + r"""The set of all instances marked as 'deleted' within this ``Session`` + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + + """ # noqa: E501 + + return self._proxied.deleted + + @property + def new(self) -> Any: + r"""The set of all instances marked as 'new' within this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + + """ # noqa: E501 + + return self._proxied.new + + @property + def identity_map(self) -> Any: + r"""Proxy for the :attr:`_orm.Session.identity_map` attribute + on behalf of the :class:`_asyncio.AsyncSession` class. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + + """ # noqa: E501 + + return self._proxied.identity_map + + @identity_map.setter + def identity_map(self, attr: Any) -> None: + self._proxied.identity_map = attr + + @property + def is_active(self) -> Any: + r"""True if this :class:`.Session` not in "partial rollback" state. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + .. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins + a new transaction immediately, so this attribute will be False + when the :class:`_orm.Session` is first instantiated. + + "partial rollback" state typically indicates that the flush process + of the :class:`_orm.Session` has failed, and that the + :meth:`_orm.Session.rollback` method must be emitted in order to + fully roll back the transaction. + + If this :class:`_orm.Session` is not in a transaction at all, the + :class:`_orm.Session` will autobegin when it is first used, so in this + case :attr:`_orm.Session.is_active` will return True. + + Otherwise, if this :class:`_orm.Session` is within a transaction, + and that transaction has not been rolled back internally, the + :attr:`_orm.Session.is_active` will also return True. + + .. seealso:: + + :ref:`faq_session_rollback` + + :meth:`_orm.Session.in_transaction` + + + + """ # noqa: E501 + + return self._proxied.is_active + + @property + def autoflush(self) -> Any: + r"""Proxy for the :attr:`_orm.Session.autoflush` attribute + on behalf of the :class:`_asyncio.AsyncSession` class. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + + """ # noqa: E501 + + return self._proxied.autoflush + + @autoflush.setter + def autoflush(self, attr: Any) -> None: + self._proxied.autoflush = attr + + @property + def no_autoflush(self) -> Any: + r"""Return a context manager that disables autoflush. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + e.g.:: + + with session.no_autoflush: + + some_object = SomeClass() + session.add(some_object) + # won't autoflush + some_object.related_thing = session.query(SomeRelated).first() + + Operations that proceed within the ``with:`` block + will not be subject to flushes occurring upon query + access. This is useful when initializing a series + of objects which involve existing database queries, + where the uncompleted object should not yet be flushed. + + + + """ # noqa: E501 + + return self._proxied.no_autoflush + + @property + def info(self) -> Any: + r"""A user-modifiable dictionary. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class + on behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + The initial value of this dictionary can be populated using the + ``info`` argument to the :class:`.Session` constructor or + :class:`.sessionmaker` constructor or factory methods. The dictionary + here is always local to this :class:`.Session` and can be modified + independently of all other :class:`.Session` objects. + + + + """ # noqa: E501 + + return self._proxied.info + + @classmethod + async def close_all(cls) -> None: + r"""Close all :class:`_asyncio.AsyncSession` sessions. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. deprecated:: 2.0 The :meth:`.AsyncSession.close_all` method is deprecated and will be removed in a future release. Please refer to :func:`_asyncio.close_all_sessions`. + + """ # noqa: E501 + + return await AsyncSession.close_all() + + @classmethod + def object_session(cls, instance: object) -> Optional[Session]: + r"""Return the :class:`.Session` to which an object belongs. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is an alias of :func:`.object_session`. + + + + """ # noqa: E501 + + return AsyncSession.object_session(instance) + + @classmethod + def identity_key( + cls, + class_: Optional[Type[Any]] = None, + ident: Union[Any, Tuple[Any, ...]] = None, + *, + instance: Optional[Any] = None, + row: Optional[Union[Row[Any], RowMapping]] = None, + identity_token: Optional[Any] = None, + ) -> _IdentityKeyType[Any]: + r"""Return an identity key. + + .. container:: class_bases + + Proxied for the :class:`_asyncio.AsyncSession` class on + behalf of the :class:`_asyncio.scoping.async_scoped_session` class. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is an alias of :func:`.util.identity_key`. + + + + """ # noqa: E501 + + return AsyncSession.identity_key( + class_=class_, + ident=ident, + instance=instance, + row=row, + identity_token=identity_token, + ) + + # END PROXY METHODS async_scoped_session diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/session.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/session.py new file mode 100644 index 0000000000000000000000000000000000000000..68cbb59bfd62fa9c20a930990434aac46e1b2c46 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/asyncio/session.py @@ -0,0 +1,1961 @@ +# ext/asyncio/session.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from __future__ import annotations + +import asyncio +from typing import Any +from typing import Awaitable +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import engine +from .base import ReversibleProxy +from .base import StartableContext +from .result import _ensure_sync_result +from .result import AsyncResult +from .result import AsyncScalarResult +from ... import util +from ...orm import close_all_sessions as _sync_close_all_sessions +from ...orm import object_session +from ...orm import Session +from ...orm import SessionTransaction +from ...orm import state as _instance_state +from ...util.concurrency import greenlet_spawn +from ...util.typing import Concatenate +from ...util.typing import ParamSpec + + +if TYPE_CHECKING: + from .engine import AsyncConnection + from .engine import AsyncEngine + from ...engine import Connection + from ...engine import CursorResult + from ...engine import Engine + from ...engine import Result + from ...engine import Row + from ...engine import RowMapping + from ...engine import ScalarResult + from ...engine.interfaces import _CoreAnyExecuteParams + from ...engine.interfaces import CoreExecuteOptionsParameter + from ...event import dispatcher + from ...orm._typing import _IdentityKeyType + from ...orm._typing import _O + from ...orm._typing import OrmExecuteOptionsParameter + from ...orm.identity import IdentityMap + from ...orm.interfaces import ORMOption + from ...orm.session import _BindArguments + from ...orm.session import _EntityBindKey + from ...orm.session import _PKIdentityArgument + from ...orm.session import _SessionBind + from ...orm.session import _SessionBindKey + from ...sql._typing import _InfoType + from ...sql.base import Executable + from ...sql.dml import UpdateBase + from ...sql.elements import ClauseElement + from ...sql.selectable import ForUpdateParameter + from ...sql.selectable import TypedReturnsRows + +_AsyncSessionBind = Union["AsyncEngine", "AsyncConnection"] + +_P = ParamSpec("_P") +_T = TypeVar("_T", bound=Any) + + +_EXECUTE_OPTIONS = util.immutabledict({"prebuffer_rows": True}) +_STREAM_OPTIONS = util.immutabledict({"stream_results": True}) + + +class AsyncAttrs: + """Mixin class which provides an awaitable accessor for all attributes. + + E.g.:: + + from __future__ import annotations + + from typing import List + + from sqlalchemy import ForeignKey + from sqlalchemy import func + from sqlalchemy.ext.asyncio import AsyncAttrs + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship + + + class Base(AsyncAttrs, DeclarativeBase): + pass + + + class A(Base): + __tablename__ = "a" + + id: Mapped[int] = mapped_column(primary_key=True) + data: Mapped[str] + bs: Mapped[List[B]] = relationship() + + + class B(Base): + __tablename__ = "b" + id: Mapped[int] = mapped_column(primary_key=True) + a_id: Mapped[int] = mapped_column(ForeignKey("a.id")) + data: Mapped[str] + + In the above example, the :class:`_asyncio.AsyncAttrs` mixin is applied to + the declarative ``Base`` class where it takes effect for all subclasses. + This mixin adds a single new attribute + :attr:`_asyncio.AsyncAttrs.awaitable_attrs` to all classes, which will + yield the value of any attribute as an awaitable. This allows attributes + which may be subject to lazy loading or deferred / unexpiry loading to be + accessed such that IO can still be emitted:: + + a1 = (await async_session.scalars(select(A).where(A.id == 5))).one() + + # use the lazy loader on ``a1.bs`` via the ``.awaitable_attrs`` + # interface, so that it may be awaited + for b1 in await a1.awaitable_attrs.bs: + print(b1) + + The :attr:`_asyncio.AsyncAttrs.awaitable_attrs` performs a call against the + attribute that is approximately equivalent to using the + :meth:`_asyncio.AsyncSession.run_sync` method, e.g.:: + + for b1 in await async_session.run_sync(lambda sess: a1.bs): + print(b1) + + .. versionadded:: 2.0.13 + + .. seealso:: + + :ref:`asyncio_orm_avoid_lazyloads` + + """ + + class _AsyncAttrGetitem: + __slots__ = "_instance" + + def __init__(self, _instance: Any): + self._instance = _instance + + def __getattr__(self, name: str) -> Awaitable[Any]: + return greenlet_spawn(getattr, self._instance, name) + + @property + def awaitable_attrs(self) -> AsyncAttrs._AsyncAttrGetitem: + """provide a namespace of all attributes on this object wrapped + as awaitables. + + e.g.:: + + + a1 = (await async_session.scalars(select(A).where(A.id == 5))).one() + + some_attribute = await a1.awaitable_attrs.some_deferred_attribute + some_collection = await a1.awaitable_attrs.some_collection + + """ # noqa: E501 + + return AsyncAttrs._AsyncAttrGetitem(self) + + +@util.create_proxy_methods( + Session, + ":class:`_orm.Session`", + ":class:`_asyncio.AsyncSession`", + classmethods=["object_session", "identity_key"], + methods=[ + "__contains__", + "__iter__", + "add", + "add_all", + "expire", + "expire_all", + "expunge", + "expunge_all", + "is_modified", + "in_transaction", + "in_nested_transaction", + ], + attributes=[ + "dirty", + "deleted", + "new", + "identity_map", + "is_active", + "autoflush", + "no_autoflush", + "info", + ], +) +class AsyncSession(ReversibleProxy[Session]): + """Asyncio version of :class:`_orm.Session`. + + The :class:`_asyncio.AsyncSession` is a proxy for a traditional + :class:`_orm.Session` instance. + + The :class:`_asyncio.AsyncSession` is **not safe for use in concurrent + tasks.**. See :ref:`session_faq_threadsafe` for background. + + .. versionadded:: 1.4 + + To use an :class:`_asyncio.AsyncSession` with custom :class:`_orm.Session` + implementations, see the + :paramref:`_asyncio.AsyncSession.sync_session_class` parameter. + + + """ + + _is_asyncio = True + + dispatch: dispatcher[Session] + + def __init__( + self, + bind: Optional[_AsyncSessionBind] = None, + *, + binds: Optional[Dict[_SessionBindKey, _AsyncSessionBind]] = None, + sync_session_class: Optional[Type[Session]] = None, + **kw: Any, + ): + r"""Construct a new :class:`_asyncio.AsyncSession`. + + All parameters other than ``sync_session_class`` are passed to the + ``sync_session_class`` callable directly to instantiate a new + :class:`_orm.Session`. Refer to :meth:`_orm.Session.__init__` for + parameter documentation. + + :param sync_session_class: + A :class:`_orm.Session` subclass or other callable which will be used + to construct the :class:`_orm.Session` which will be proxied. This + parameter may be used to provide custom :class:`_orm.Session` + subclasses. Defaults to the + :attr:`_asyncio.AsyncSession.sync_session_class` class-level + attribute. + + .. versionadded:: 1.4.24 + + """ + sync_bind = sync_binds = None + + if bind: + self.bind = bind + sync_bind = engine._get_sync_engine_or_connection(bind) + + if binds: + self.binds = binds + sync_binds = { + key: engine._get_sync_engine_or_connection(b) + for key, b in binds.items() + } + + if sync_session_class: + self.sync_session_class = sync_session_class + + self.sync_session = self._proxied = self._assign_proxied( + self.sync_session_class(bind=sync_bind, binds=sync_binds, **kw) + ) + + sync_session_class: Type[Session] = Session + """The class or callable that provides the + underlying :class:`_orm.Session` instance for a particular + :class:`_asyncio.AsyncSession`. + + At the class level, this attribute is the default value for the + :paramref:`_asyncio.AsyncSession.sync_session_class` parameter. Custom + subclasses of :class:`_asyncio.AsyncSession` can override this. + + At the instance level, this attribute indicates the current class or + callable that was used to provide the :class:`_orm.Session` instance for + this :class:`_asyncio.AsyncSession` instance. + + .. versionadded:: 1.4.24 + + """ + + sync_session: Session + """Reference to the underlying :class:`_orm.Session` this + :class:`_asyncio.AsyncSession` proxies requests towards. + + This instance can be used as an event target. + + .. seealso:: + + :ref:`asyncio_events` + + """ + + @classmethod + def _no_async_engine_events(cls) -> NoReturn: + raise NotImplementedError( + "asynchronous events are not implemented at this time. Apply " + "synchronous listeners to the AsyncSession.sync_session." + ) + + async def refresh( + self, + instance: object, + attribute_names: Optional[Iterable[str]] = None, + with_for_update: ForUpdateParameter = None, + ) -> None: + """Expire and refresh the attributes on the given instance. + + A query will be issued to the database and all attributes will be + refreshed with their current database value. + + This is the async version of the :meth:`_orm.Session.refresh` method. + See that method for a complete description of all options. + + .. seealso:: + + :meth:`_orm.Session.refresh` - main documentation for refresh + + """ + + await greenlet_spawn( + self.sync_session.refresh, + instance, + attribute_names=attribute_names, + with_for_update=with_for_update, + ) + + async def run_sync( + self, + fn: Callable[Concatenate[Session, _P], _T], + *arg: _P.args, + **kw: _P.kwargs, + ) -> _T: + '''Invoke the given synchronous (i.e. not async) callable, + passing a synchronous-style :class:`_orm.Session` as the first + argument. + + This method allows traditional synchronous SQLAlchemy functions to + run within the context of an asyncio application. + + E.g.:: + + def some_business_method(session: Session, param: str) -> str: + """A synchronous function that does not require awaiting + + :param session: a SQLAlchemy Session, used synchronously + + :return: an optional return value is supported + + """ + session.add(MyObject(param=param)) + session.flush() + return "success" + + + async def do_something_async(async_engine: AsyncEngine) -> None: + """an async function that uses awaiting""" + + with AsyncSession(async_engine) as async_session: + # run some_business_method() with a sync-style + # Session, proxied into an awaitable + return_code = await async_session.run_sync( + some_business_method, param="param1" + ) + print(return_code) + + This method maintains the asyncio event loop all the way through + to the database connection by running the given callable in a + specially instrumented greenlet. + + .. tip:: + + The provided callable is invoked inline within the asyncio event + loop, and will block on traditional IO calls. IO within this + callable should only call into SQLAlchemy's asyncio database + APIs which will be properly adapted to the greenlet context. + + .. seealso:: + + :class:`.AsyncAttrs` - a mixin for ORM mapped classes that provides + a similar feature more succinctly on a per-attribute basis + + :meth:`.AsyncConnection.run_sync` + + :ref:`session_run_sync` + ''' # noqa: E501 + + return await greenlet_spawn( + fn, self.sync_session, *arg, _require_await=False, **kw + ) + + @overload + async def execute( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[_T]: ... + + @overload + async def execute( + self, + statement: UpdateBase, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> CursorResult[Any]: ... + + @overload + async def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: ... + + async def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Result[Any]: + """Execute a statement and return a buffered + :class:`_engine.Result` object. + + .. seealso:: + + :meth:`_orm.Session.execute` - main documentation for execute + + """ + + if execution_options: + execution_options = util.immutabledict(execution_options).union( + _EXECUTE_OPTIONS + ) + else: + execution_options = _EXECUTE_OPTIONS + + result = await greenlet_spawn( + self.sync_session.execute, + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + return await _ensure_sync_result(result, self.execute) + + @overload + async def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Optional[_T]: ... + + @overload + async def scalar( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: ... + + async def scalar( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: + """Execute a statement and return a scalar result. + + .. seealso:: + + :meth:`_orm.Session.scalar` - main documentation for scalar + + """ + + if execution_options: + execution_options = util.immutabledict(execution_options).union( + _EXECUTE_OPTIONS + ) + else: + execution_options = _EXECUTE_OPTIONS + + return await greenlet_spawn( + self.sync_session.scalar, + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @overload + async def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[_T]: ... + + @overload + async def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: ... + + async def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: + """Execute a statement and return scalar results. + + :return: a :class:`_result.ScalarResult` object + + .. versionadded:: 1.4.24 Added :meth:`_asyncio.AsyncSession.scalars` + + .. versionadded:: 1.4.26 Added + :meth:`_asyncio.async_scoped_session.scalars` + + .. seealso:: + + :meth:`_orm.Session.scalars` - main documentation for scalars + + :meth:`_asyncio.AsyncSession.stream_scalars` - streaming version + + """ + + result = await self.execute( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + return result.scalars() + + async def get( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + ) -> Union[_O, None]: + """Return an instance based on the given primary key identifier, + or ``None`` if not found. + + .. seealso:: + + :meth:`_orm.Session.get` - main documentation for get + + + """ + + return await greenlet_spawn( + cast("Callable[..., _O]", self.sync_session.get), + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + ) + + async def get_one( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + ) -> _O: + """Return an instance based on the given primary key identifier, + or raise an exception if not found. + + Raises :class:`_exc.NoResultFound` if the query selects no rows. + + ..versionadded: 2.0.22 + + .. seealso:: + + :meth:`_orm.Session.get_one` - main documentation for get_one + + """ + + return await greenlet_spawn( + cast("Callable[..., _O]", self.sync_session.get_one), + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + ) + + @overload + async def stream( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[_T]: ... + + @overload + async def stream( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[Any]: ... + + async def stream( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncResult[Any]: + """Execute a statement and return a streaming + :class:`_asyncio.AsyncResult` object. + + """ + + if execution_options: + execution_options = util.immutabledict(execution_options).union( + _STREAM_OPTIONS + ) + else: + execution_options = _STREAM_OPTIONS + + result = await greenlet_spawn( + self.sync_session.execute, + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + return AsyncResult(result) + + @overload + async def stream_scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[_T]: ... + + @overload + async def stream_scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[Any]: ... + + async def stream_scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> AsyncScalarResult[Any]: + """Execute a statement and return a stream of scalar results. + + :return: an :class:`_asyncio.AsyncScalarResult` object + + .. versionadded:: 1.4.24 + + .. seealso:: + + :meth:`_orm.Session.scalars` - main documentation for scalars + + :meth:`_asyncio.AsyncSession.scalars` - non streaming version + + """ + + result = await self.stream( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + return result.scalars() + + async def delete(self, instance: object) -> None: + """Mark an instance as deleted. + + The database delete operation occurs upon ``flush()``. + + As this operation may need to cascade along unloaded relationships, + it is awaitable to allow for those queries to take place. + + .. seealso:: + + :meth:`_orm.Session.delete` - main documentation for delete + + """ + await greenlet_spawn(self.sync_session.delete, instance) + + async def merge( + self, + instance: _O, + *, + load: bool = True, + options: Optional[Sequence[ORMOption]] = None, + ) -> _O: + """Copy the state of a given instance into a corresponding instance + within this :class:`_asyncio.AsyncSession`. + + .. seealso:: + + :meth:`_orm.Session.merge` - main documentation for merge + + """ + return await greenlet_spawn( + self.sync_session.merge, instance, load=load, options=options + ) + + async def flush(self, objects: Optional[Sequence[Any]] = None) -> None: + """Flush all the object changes to the database. + + .. seealso:: + + :meth:`_orm.Session.flush` - main documentation for flush + + """ + await greenlet_spawn(self.sync_session.flush, objects=objects) + + def get_transaction(self) -> Optional[AsyncSessionTransaction]: + """Return the current root transaction in progress, if any. + + :return: an :class:`_asyncio.AsyncSessionTransaction` object, or + ``None``. + + .. versionadded:: 1.4.18 + + """ + trans = self.sync_session.get_transaction() + if trans is not None: + return AsyncSessionTransaction._retrieve_proxy_for_target( + trans, async_session=self + ) + else: + return None + + def get_nested_transaction(self) -> Optional[AsyncSessionTransaction]: + """Return the current nested transaction in progress, if any. + + :return: an :class:`_asyncio.AsyncSessionTransaction` object, or + ``None``. + + .. versionadded:: 1.4.18 + + """ + + trans = self.sync_session.get_nested_transaction() + if trans is not None: + return AsyncSessionTransaction._retrieve_proxy_for_target( + trans, async_session=self + ) + else: + return None + + def get_bind( + self, + mapper: Optional[_EntityBindKey[_O]] = None, + clause: Optional[ClauseElement] = None, + bind: Optional[_SessionBind] = None, + **kw: Any, + ) -> Union[Engine, Connection]: + """Return a "bind" to which the synchronous proxied :class:`_orm.Session` + is bound. + + Unlike the :meth:`_orm.Session.get_bind` method, this method is + currently **not** used by this :class:`.AsyncSession` in any way + in order to resolve engines for requests. + + .. note:: + + This method proxies directly to the :meth:`_orm.Session.get_bind` + method, however is currently **not** useful as an override target, + in contrast to that of the :meth:`_orm.Session.get_bind` method. + The example below illustrates how to implement custom + :meth:`_orm.Session.get_bind` schemes that work with + :class:`.AsyncSession` and :class:`.AsyncEngine`. + + The pattern introduced at :ref:`session_custom_partitioning` + illustrates how to apply a custom bind-lookup scheme to a + :class:`_orm.Session` given a set of :class:`_engine.Engine` objects. + To apply a corresponding :meth:`_orm.Session.get_bind` implementation + for use with a :class:`.AsyncSession` and :class:`.AsyncEngine` + objects, continue to subclass :class:`_orm.Session` and apply it to + :class:`.AsyncSession` using + :paramref:`.AsyncSession.sync_session_class`. The inner method must + continue to return :class:`_engine.Engine` instances, which can be + acquired from a :class:`_asyncio.AsyncEngine` using the + :attr:`_asyncio.AsyncEngine.sync_engine` attribute:: + + # using example from "Custom Vertical Partitioning" + + + import random + + from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.ext.asyncio import async_sessionmaker + from sqlalchemy.orm import Session + + # construct async engines w/ async drivers + engines = { + "leader": create_async_engine("sqlite+aiosqlite:///leader.db"), + "other": create_async_engine("sqlite+aiosqlite:///other.db"), + "follower1": create_async_engine("sqlite+aiosqlite:///follower1.db"), + "follower2": create_async_engine("sqlite+aiosqlite:///follower2.db"), + } + + + class RoutingSession(Session): + def get_bind(self, mapper=None, clause=None, **kw): + # within get_bind(), return sync engines + if mapper and issubclass(mapper.class_, MyOtherClass): + return engines["other"].sync_engine + elif self._flushing or isinstance(clause, (Update, Delete)): + return engines["leader"].sync_engine + else: + return engines[ + random.choice(["follower1", "follower2"]) + ].sync_engine + + + # apply to AsyncSession using sync_session_class + AsyncSessionMaker = async_sessionmaker(sync_session_class=RoutingSession) + + The :meth:`_orm.Session.get_bind` method is called in a non-asyncio, + implicitly non-blocking context in the same manner as ORM event hooks + and functions that are invoked via :meth:`.AsyncSession.run_sync`, so + routines that wish to run SQL commands inside of + :meth:`_orm.Session.get_bind` can continue to do so using + blocking-style code, which will be translated to implicitly async calls + at the point of invoking IO on the database drivers. + + """ # noqa: E501 + + return self.sync_session.get_bind( + mapper=mapper, clause=clause, bind=bind, **kw + ) + + async def connection( + self, + bind_arguments: Optional[_BindArguments] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + **kw: Any, + ) -> AsyncConnection: + r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to + this :class:`.Session` object's transactional state. + + This method may also be used to establish execution options for the + database connection used by the current transaction. + + .. versionadded:: 1.4.24 Added \**kw arguments which are passed + through to the underlying :meth:`_orm.Session.connection` method. + + .. seealso:: + + :meth:`_orm.Session.connection` - main documentation for + "connection" + + """ + + sync_connection = await greenlet_spawn( + self.sync_session.connection, + bind_arguments=bind_arguments, + execution_options=execution_options, + **kw, + ) + return engine.AsyncConnection._retrieve_proxy_for_target( + sync_connection + ) + + def begin(self) -> AsyncSessionTransaction: + """Return an :class:`_asyncio.AsyncSessionTransaction` object. + + The underlying :class:`_orm.Session` will perform the + "begin" action when the :class:`_asyncio.AsyncSessionTransaction` + object is entered:: + + async with async_session.begin(): + ... # ORM transaction is begun + + Note that database IO will not normally occur when the session-level + transaction is begun, as database transactions begin on an + on-demand basis. However, the begin block is async to accommodate + for a :meth:`_orm.SessionEvents.after_transaction_create` + event hook that may perform IO. + + For a general description of ORM begin, see + :meth:`_orm.Session.begin`. + + """ + + return AsyncSessionTransaction(self) + + def begin_nested(self) -> AsyncSessionTransaction: + """Return an :class:`_asyncio.AsyncSessionTransaction` object + which will begin a "nested" transaction, e.g. SAVEPOINT. + + Behavior is the same as that of :meth:`_asyncio.AsyncSession.begin`. + + For a general description of ORM begin nested, see + :meth:`_orm.Session.begin_nested`. + + .. seealso:: + + :ref:`aiosqlite_serializable` - special workarounds required + with the SQLite asyncio driver in order for SAVEPOINT to work + correctly. + + """ + + return AsyncSessionTransaction(self, nested=True) + + async def rollback(self) -> None: + """Rollback the current transaction in progress. + + .. seealso:: + + :meth:`_orm.Session.rollback` - main documentation for + "rollback" + """ + await greenlet_spawn(self.sync_session.rollback) + + async def commit(self) -> None: + """Commit the current transaction in progress. + + .. seealso:: + + :meth:`_orm.Session.commit` - main documentation for + "commit" + """ + await greenlet_spawn(self.sync_session.commit) + + async def close(self) -> None: + """Close out the transactional resources and ORM objects used by this + :class:`_asyncio.AsyncSession`. + + .. seealso:: + + :meth:`_orm.Session.close` - main documentation for + "close" + + :ref:`session_closing` - detail on the semantics of + :meth:`_asyncio.AsyncSession.close` and + :meth:`_asyncio.AsyncSession.reset`. + + """ + await greenlet_spawn(self.sync_session.close) + + async def reset(self) -> None: + """Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`, resetting the session to its initial state. + + .. versionadded:: 2.0.22 + + .. seealso:: + + :meth:`_orm.Session.reset` - main documentation for + "reset" + + :ref:`session_closing` - detail on the semantics of + :meth:`_asyncio.AsyncSession.close` and + :meth:`_asyncio.AsyncSession.reset`. + + """ + await greenlet_spawn(self.sync_session.reset) + + async def aclose(self) -> None: + """A synonym for :meth:`_asyncio.AsyncSession.close`. + + The :meth:`_asyncio.AsyncSession.aclose` name is specifically + to support the Python standard library ``@contextlib.aclosing`` + context manager function. + + .. versionadded:: 2.0.20 + + """ + await self.close() + + async def invalidate(self) -> None: + """Close this Session, using connection invalidation. + + For a complete description, see :meth:`_orm.Session.invalidate`. + """ + await greenlet_spawn(self.sync_session.invalidate) + + @classmethod + @util.deprecated( + "2.0", + "The :meth:`.AsyncSession.close_all` method is deprecated and will be " + "removed in a future release. Please refer to " + ":func:`_asyncio.close_all_sessions`.", + ) + async def close_all(cls) -> None: + """Close all :class:`_asyncio.AsyncSession` sessions.""" + await close_all_sessions() + + async def __aenter__(self: _AS) -> _AS: + return self + + async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None: + task = asyncio.create_task(self.close()) + await asyncio.shield(task) + + def _maker_context_manager(self: _AS) -> _AsyncSessionContextManager[_AS]: + return _AsyncSessionContextManager(self) + + # START PROXY METHODS AsyncSession + + # code within this block is **programmatically, + # statically generated** by tools/generate_proxy_methods.py + + def __contains__(self, instance: object) -> bool: + r"""Return True if the instance is associated with this session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + The instance may be pending or persistent within the Session for a + result of True. + + + """ # noqa: E501 + + return self._proxied.__contains__(instance) + + def __iter__(self) -> Iterator[object]: + r"""Iterate over all pending or persistent instances within this + Session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + + """ # noqa: E501 + + return self._proxied.__iter__() + + def add(self, instance: object, _warn: bool = True) -> None: + r"""Place an object into this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. + + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. + + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` + + + """ # noqa: E501 + + return self._proxied.add(instance, _warn=_warn) + + def add_all(self, instances: Iterable[object]) -> None: + r"""Add the given collection of instances to this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + + """ # noqa: E501 + + return self._proxied.add_all(instances) + + def expire( + self, instance: object, attribute_names: Optional[Iterable[str]] = None + ) -> None: + r"""Expire the attributes on an instance. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + Marks the attributes of an instance as out of date. When an expired + attribute is next accessed, a query will be issued to the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire all objects in the :class:`.Session` simultaneously, + use :meth:`Session.expire_all`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire` only makes sense for the specific + case that a non-ORM SQL statement was emitted in the current + transaction. + + :param instance: The instance to be refreshed. + :param attribute_names: optional list of string attribute names + indicating a subset of attributes to be expired. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + """ # noqa: E501 + + return self._proxied.expire(instance, attribute_names=attribute_names) + + def expire_all(self) -> None: + r"""Expires all persistent instances within this Session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + When any attributes on a persistent instance is next accessed, + a query will be issued using the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire individual objects and individual attributes + on those objects, use :meth:`Session.expire`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire_all` is not usually needed, + assuming the transaction is isolated. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + """ # noqa: E501 + + return self._proxied.expire_all() + + def expunge(self, instance: object) -> None: + r"""Remove the `instance` from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This will free all internal references to the instance. Cascading + will be applied according to the *expunge* cascade rule. + + + """ # noqa: E501 + + return self._proxied.expunge(instance) + + def expunge_all(self) -> None: + r"""Remove all object instances from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is equivalent to calling ``expunge(obj)`` on all objects in this + ``Session``. + + + """ # noqa: E501 + + return self._proxied.expunge_all() + + def is_modified( + self, instance: object, include_collections: bool = True + ) -> bool: + r"""Return ``True`` if the given instance has locally + modified attributes. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This method retrieves the history for each instrumented + attribute on the instance and performs a comparison of the current + value to its previously flushed or committed value, if any. + + It is in effect a more expensive and accurate + version of checking for the given instance in the + :attr:`.Session.dirty` collection; a full test for + each attribute's net "dirty" status is performed. + + E.g.:: + + return session.is_modified(someobject) + + A few caveats to this method apply: + + * Instances present in the :attr:`.Session.dirty` collection may + report ``False`` when tested with this method. This is because + the object may have received change events via attribute mutation, + thus placing it in :attr:`.Session.dirty`, but ultimately the state + is the same as that loaded from the database, resulting in no net + change here. + * Scalar attributes may not have recorded the previously set + value when a new value was applied, if the attribute was not loaded, + or was expired, at the time the new value was received - in these + cases, the attribute is assumed to have a change, even if there is + ultimately no net change against its database value. SQLAlchemy in + most cases does not need the "old" value when a set event occurs, so + it skips the expense of a SQL call if the old value isn't present, + based on the assumption that an UPDATE of the scalar value is + usually needed, and in those few cases where it isn't, is less + expensive on average than issuing a defensive SELECT. + + The "old" value is fetched unconditionally upon set only if the + attribute container has the ``active_history`` flag set to ``True``. + This flag is set typically for primary key attributes and scalar + object references that are not a simple many-to-one. To set this + flag for any arbitrary mapped column, use the ``active_history`` + argument with :func:`.column_property`. + + :param instance: mapped instance to be tested for pending changes. + :param include_collections: Indicates if multivalued collections + should be included in the operation. Setting this to ``False`` is a + way to detect only local-column based properties (i.e. scalar columns + or many-to-one foreign keys) that would result in an UPDATE for this + instance upon flush. + + + """ # noqa: E501 + + return self._proxied.is_modified( + instance, include_collections=include_collections + ) + + def in_transaction(self) -> bool: + r"""Return True if this :class:`_orm.Session` has begun a transaction. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_orm.Session.is_active` + + + + """ # noqa: E501 + + return self._proxied.in_transaction() + + def in_nested_transaction(self) -> bool: + r"""Return True if this :class:`_orm.Session` has begun a nested + transaction, e.g. SAVEPOINT. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + .. versionadded:: 1.4 + + + """ # noqa: E501 + + return self._proxied.in_nested_transaction() + + @property + def dirty(self) -> Any: + r"""The set of all persistent instances considered dirty. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + E.g.:: + + some_mapped_object in session.dirty + + Instances are considered dirty when they were modified but not + deleted. + + Note that this 'dirty' calculation is 'optimistic'; most + attribute-setting or collection modification operations will + mark an instance as 'dirty' and place it in this set, even if + there is no net change to the attribute's value. At flush + time, the value of each attribute is compared to its + previously saved value, and if there's no net change, no SQL + operation will occur (this is a more expensive operation so + it's only done at flush time). + + To check if an instance has actionable net changes to its + attributes, use the :meth:`.Session.is_modified` method. + + + """ # noqa: E501 + + return self._proxied.dirty + + @property + def deleted(self) -> Any: + r"""The set of all instances marked as 'deleted' within this ``Session`` + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + """ # noqa: E501 + + return self._proxied.deleted + + @property + def new(self) -> Any: + r"""The set of all instances marked as 'new' within this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + """ # noqa: E501 + + return self._proxied.new + + @property + def identity_map(self) -> IdentityMap: + r"""Proxy for the :attr:`_orm.Session.identity_map` attribute + on behalf of the :class:`_asyncio.AsyncSession` class. + + """ # noqa: E501 + + return self._proxied.identity_map + + @identity_map.setter + def identity_map(self, attr: IdentityMap) -> None: + self._proxied.identity_map = attr + + @property + def is_active(self) -> Any: + r"""True if this :class:`.Session` not in "partial rollback" state. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + .. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins + a new transaction immediately, so this attribute will be False + when the :class:`_orm.Session` is first instantiated. + + "partial rollback" state typically indicates that the flush process + of the :class:`_orm.Session` has failed, and that the + :meth:`_orm.Session.rollback` method must be emitted in order to + fully roll back the transaction. + + If this :class:`_orm.Session` is not in a transaction at all, the + :class:`_orm.Session` will autobegin when it is first used, so in this + case :attr:`_orm.Session.is_active` will return True. + + Otherwise, if this :class:`_orm.Session` is within a transaction, + and that transaction has not been rolled back internally, the + :attr:`_orm.Session.is_active` will also return True. + + .. seealso:: + + :ref:`faq_session_rollback` + + :meth:`_orm.Session.in_transaction` + + + """ # noqa: E501 + + return self._proxied.is_active + + @property + def autoflush(self) -> bool: + r"""Proxy for the :attr:`_orm.Session.autoflush` attribute + on behalf of the :class:`_asyncio.AsyncSession` class. + + """ # noqa: E501 + + return self._proxied.autoflush + + @autoflush.setter + def autoflush(self, attr: bool) -> None: + self._proxied.autoflush = attr + + @property + def no_autoflush(self) -> Any: + r"""Return a context manager that disables autoflush. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + e.g.:: + + with session.no_autoflush: + + some_object = SomeClass() + session.add(some_object) + # won't autoflush + some_object.related_thing = session.query(SomeRelated).first() + + Operations that proceed within the ``with:`` block + will not be subject to flushes occurring upon query + access. This is useful when initializing a series + of objects which involve existing database queries, + where the uncompleted object should not yet be flushed. + + + """ # noqa: E501 + + return self._proxied.no_autoflush + + @property + def info(self) -> Any: + r"""A user-modifiable dictionary. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_asyncio.AsyncSession` class. + + The initial value of this dictionary can be populated using the + ``info`` argument to the :class:`.Session` constructor or + :class:`.sessionmaker` constructor or factory methods. The dictionary + here is always local to this :class:`.Session` and can be modified + independently of all other :class:`.Session` objects. + + + """ # noqa: E501 + + return self._proxied.info + + @classmethod + def object_session(cls, instance: object) -> Optional[Session]: + r"""Return the :class:`.Session` to which an object belongs. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is an alias of :func:`.object_session`. + + + """ # noqa: E501 + + return Session.object_session(instance) + + @classmethod + def identity_key( + cls, + class_: Optional[Type[Any]] = None, + ident: Union[Any, Tuple[Any, ...]] = None, + *, + instance: Optional[Any] = None, + row: Optional[Union[Row[Any], RowMapping]] = None, + identity_token: Optional[Any] = None, + ) -> _IdentityKeyType[Any]: + r"""Return an identity key. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_asyncio.AsyncSession` class. + + This is an alias of :func:`.util.identity_key`. + + + """ # noqa: E501 + + return Session.identity_key( + class_=class_, + ident=ident, + instance=instance, + row=row, + identity_token=identity_token, + ) + + # END PROXY METHODS AsyncSession + + +_AS = TypeVar("_AS", bound="AsyncSession") + + +class async_sessionmaker(Generic[_AS]): + """A configurable :class:`.AsyncSession` factory. + + The :class:`.async_sessionmaker` factory works in the same way as the + :class:`.sessionmaker` factory, to generate new :class:`.AsyncSession` + objects when called, creating them given + the configurational arguments established here. + + e.g.:: + + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy.ext.asyncio import async_sessionmaker + + + async def run_some_sql( + async_session: async_sessionmaker[AsyncSession], + ) -> None: + async with async_session() as session: + session.add(SomeObject(data="object")) + session.add(SomeOtherObject(name="other object")) + await session.commit() + + + async def main() -> None: + # an AsyncEngine, which the AsyncSession will use for connection + # resources + engine = create_async_engine( + "postgresql+asyncpg://scott:tiger@localhost/" + ) + + # create a reusable factory for new AsyncSession instances + async_session = async_sessionmaker(engine) + + await run_some_sql(async_session) + + await engine.dispose() + + The :class:`.async_sessionmaker` is useful so that different parts + of a program can create new :class:`.AsyncSession` objects with a + fixed configuration established up front. Note that :class:`.AsyncSession` + objects may also be instantiated directly when not using + :class:`.async_sessionmaker`. + + .. versionadded:: 2.0 :class:`.async_sessionmaker` provides a + :class:`.sessionmaker` class that's dedicated to the + :class:`.AsyncSession` object, including pep-484 typing support. + + .. seealso:: + + :ref:`asyncio_orm` - shows example use + + :class:`.sessionmaker` - general overview of the + :class:`.sessionmaker` architecture + + + :ref:`session_getting` - introductory text on creating + sessions using :class:`.sessionmaker`. + + """ # noqa E501 + + class_: Type[_AS] + + @overload + def __init__( + self, + bind: Optional[_AsyncSessionBind] = ..., + *, + class_: Type[_AS], + autoflush: bool = ..., + expire_on_commit: bool = ..., + info: Optional[_InfoType] = ..., + **kw: Any, + ): ... + + @overload + def __init__( + self: "async_sessionmaker[AsyncSession]", + bind: Optional[_AsyncSessionBind] = ..., + *, + autoflush: bool = ..., + expire_on_commit: bool = ..., + info: Optional[_InfoType] = ..., + **kw: Any, + ): ... + + def __init__( + self, + bind: Optional[_AsyncSessionBind] = None, + *, + class_: Type[_AS] = AsyncSession, # type: ignore + autoflush: bool = True, + expire_on_commit: bool = True, + info: Optional[_InfoType] = None, + **kw: Any, + ): + r"""Construct a new :class:`.async_sessionmaker`. + + All arguments here except for ``class_`` correspond to arguments + accepted by :class:`.Session` directly. See the + :meth:`.AsyncSession.__init__` docstring for more details on + parameters. + + + """ + kw["bind"] = bind + kw["autoflush"] = autoflush + kw["expire_on_commit"] = expire_on_commit + if info is not None: + kw["info"] = info + self.kw = kw + self.class_ = class_ + + def begin(self) -> _AsyncSessionContextManager[_AS]: + """Produce a context manager that both provides a new + :class:`_orm.AsyncSession` as well as a transaction that commits. + + + e.g.:: + + async def main(): + Session = async_sessionmaker(some_engine) + + async with Session.begin() as session: + session.add(some_object) + + # commits transaction, closes session + + """ + + session = self() + return session._maker_context_manager() + + def __call__(self, **local_kw: Any) -> _AS: + """Produce a new :class:`.AsyncSession` object using the configuration + established in this :class:`.async_sessionmaker`. + + In Python, the ``__call__`` method is invoked on an object when + it is "called" in the same way as a function:: + + AsyncSession = async_sessionmaker(async_engine, expire_on_commit=False) + session = AsyncSession() # invokes sessionmaker.__call__() + + """ # noqa E501 + for k, v in self.kw.items(): + if k == "info" and "info" in local_kw: + d = v.copy() + d.update(local_kw["info"]) + local_kw["info"] = d + else: + local_kw.setdefault(k, v) + return self.class_(**local_kw) + + def configure(self, **new_kw: Any) -> None: + """(Re)configure the arguments for this async_sessionmaker. + + e.g.:: + + AsyncSession = async_sessionmaker(some_engine) + + AsyncSession.configure(bind=create_async_engine("sqlite+aiosqlite://")) + """ # noqa E501 + + self.kw.update(new_kw) + + def __repr__(self) -> str: + return "%s(class_=%r, %s)" % ( + self.__class__.__name__, + self.class_.__name__, + ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()), + ) + + +class _AsyncSessionContextManager(Generic[_AS]): + __slots__ = ("async_session", "trans") + + async_session: _AS + trans: AsyncSessionTransaction + + def __init__(self, async_session: _AS): + self.async_session = async_session + + async def __aenter__(self) -> _AS: + self.trans = self.async_session.begin() + await self.trans.__aenter__() + return self.async_session + + async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None: + async def go() -> None: + await self.trans.__aexit__(type_, value, traceback) + await self.async_session.__aexit__(type_, value, traceback) + + task = asyncio.create_task(go()) + await asyncio.shield(task) + + +class AsyncSessionTransaction( + ReversibleProxy[SessionTransaction], + StartableContext["AsyncSessionTransaction"], +): + """A wrapper for the ORM :class:`_orm.SessionTransaction` object. + + This object is provided so that a transaction-holding object + for the :meth:`_asyncio.AsyncSession.begin` may be returned. + + The object supports both explicit calls to + :meth:`_asyncio.AsyncSessionTransaction.commit` and + :meth:`_asyncio.AsyncSessionTransaction.rollback`, as well as use as an + async context manager. + + + .. versionadded:: 1.4 + + """ + + __slots__ = ("session", "sync_transaction", "nested") + + session: AsyncSession + sync_transaction: Optional[SessionTransaction] + + def __init__(self, session: AsyncSession, nested: bool = False): + self.session = session + self.nested = nested + self.sync_transaction = None + + @property + def is_active(self) -> bool: + return ( + self._sync_transaction() is not None + and self._sync_transaction().is_active + ) + + def _sync_transaction(self) -> SessionTransaction: + if not self.sync_transaction: + self._raise_for_not_started() + return self.sync_transaction + + async def rollback(self) -> None: + """Roll back this :class:`_asyncio.AsyncTransaction`.""" + await greenlet_spawn(self._sync_transaction().rollback) + + async def commit(self) -> None: + """Commit this :class:`_asyncio.AsyncTransaction`.""" + + await greenlet_spawn(self._sync_transaction().commit) + + @classmethod + def _regenerate_proxy_for_target( # type: ignore[override] + cls, + target: SessionTransaction, + async_session: AsyncSession, + **additional_kw: Any, # noqa: U100 + ) -> AsyncSessionTransaction: + sync_transaction = target + nested = target.nested + obj = cls.__new__(cls) + obj.session = async_session + obj.sync_transaction = obj._assign_proxied(sync_transaction) + obj.nested = nested + return obj + + async def start( + self, is_ctxmanager: bool = False + ) -> AsyncSessionTransaction: + self.sync_transaction = self._assign_proxied( + await greenlet_spawn( + self.session.sync_session.begin_nested + if self.nested + else self.session.sync_session.begin + ) + ) + if is_ctxmanager: + self.sync_transaction.__enter__() + return self + + async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None: + await greenlet_spawn( + self._sync_transaction().__exit__, type_, value, traceback + ) + + +def async_object_session(instance: object) -> Optional[AsyncSession]: + """Return the :class:`_asyncio.AsyncSession` to which the given instance + belongs. + + This function makes use of the sync-API function + :class:`_orm.object_session` to retrieve the :class:`_orm.Session` which + refers to the given instance, and from there links it to the original + :class:`_asyncio.AsyncSession`. + + If the :class:`_asyncio.AsyncSession` has been garbage collected, the + return value is ``None``. + + This functionality is also available from the + :attr:`_orm.InstanceState.async_session` accessor. + + :param instance: an ORM mapped instance + :return: an :class:`_asyncio.AsyncSession` object, or ``None``. + + .. versionadded:: 1.4.18 + + """ + + session = object_session(instance) + if session is not None: + return async_session(session) + else: + return None + + +def async_session(session: Session) -> Optional[AsyncSession]: + """Return the :class:`_asyncio.AsyncSession` which is proxying the given + :class:`_orm.Session` object, if any. + + :param session: a :class:`_orm.Session` instance. + :return: a :class:`_asyncio.AsyncSession` instance, or ``None``. + + .. versionadded:: 1.4.18 + + """ + return AsyncSession._retrieve_proxy_for_target(session, regenerate=False) + + +async def close_all_sessions() -> None: + """Close all :class:`_asyncio.AsyncSession` sessions. + + .. versionadded:: 2.0.23 + + .. seealso:: + + :func:`.session.close_all_sessions` + + """ + await greenlet_spawn(_sync_close_all_sessions) + + +_instance_state._async_provider = async_session # type: ignore diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/automap.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/automap.py new file mode 100644 index 0000000000000000000000000000000000000000..817f91d267b1830abd4bf251402b5d04b0c126aa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/automap.py @@ -0,0 +1,1701 @@ +# ext/automap.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +r"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system +which automatically generates mapped classes and relationships from a database +schema, typically though not necessarily one which is reflected. + +It is hoped that the :class:`.AutomapBase` system provides a quick +and modernized solution to the problem that the very famous +`SQLSoup `_ +also tries to solve, that of generating a quick and rudimentary object +model from an existing database on the fly. By addressing the issue strictly +at the mapper configuration level, and integrating fully with existing +Declarative class techniques, :class:`.AutomapBase` seeks to provide +a well-integrated approach to the issue of expediently auto-generating ad-hoc +mappings. + +.. tip:: The :ref:`automap_toplevel` extension is geared towards a + "zero declaration" approach, where a complete ORM model including classes + and pre-named relationships can be generated on the fly from a database + schema. For applications that still want to use explicit class declarations + including explicit relationship definitions in conjunction with reflection + of tables, the :class:`.DeferredReflection` class, described at + :ref:`orm_declarative_reflected_deferred_reflection`, is a better choice. + +.. _automap_basic_use: + +Basic Use +========= + +The simplest usage is to reflect an existing database into a new model. +We create a new :class:`.AutomapBase` class in a similar manner as to how +we create a declarative base class, using :func:`.automap_base`. +We then call :meth:`.AutomapBase.prepare` on the resulting base class, +asking it to reflect the schema and produce mappings:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy.orm import Session + from sqlalchemy import create_engine + + Base = automap_base() + + # engine, suppose it has two tables 'user' and 'address' set up + engine = create_engine("sqlite:///mydatabase.db") + + # reflect the tables + Base.prepare(autoload_with=engine) + + # mapped classes are now created with names by default + # matching that of the table name. + User = Base.classes.user + Address = Base.classes.address + + session = Session(engine) + + # rudimentary relationships are produced + session.add(Address(email_address="foo@bar.com", user=User(name="foo"))) + session.commit() + + # collection-based relationships are by default named + # "_collection" + u1 = session.query(User).first() + print(u1.address_collection) + +Above, calling :meth:`.AutomapBase.prepare` while passing along the +:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the +:meth:`_schema.MetaData.reflect` +method will be called on this declarative base +classes' :class:`_schema.MetaData` collection; then, each **viable** +:class:`_schema.Table` within the :class:`_schema.MetaData` +will get a new mapped class +generated automatically. The :class:`_schema.ForeignKeyConstraint` +objects which +link the various tables together will be used to produce new, bidirectional +:func:`_orm.relationship` objects between classes. +The classes and relationships +follow along a default naming scheme that we can customize. At this point, +our basic mapping consisting of related ``User`` and ``Address`` classes is +ready to use in the traditional way. + +.. note:: By **viable**, we mean that for a table to be mapped, it must + specify a primary key. Additionally, if the table is detected as being + a pure association table between two other tables, it will not be directly + mapped and will instead be configured as a many-to-many table between + the mappings for the two referring tables. + +Generating Mappings from an Existing MetaData +============================================= + +We can pass a pre-declared :class:`_schema.MetaData` object to +:func:`.automap_base`. +This object can be constructed in any way, including programmatically, from +a serialized file, or from itself being reflected using +:meth:`_schema.MetaData.reflect`. +Below we illustrate a combination of reflection and +explicit table declaration:: + + from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey + from sqlalchemy.ext.automap import automap_base + + engine = create_engine("sqlite:///mydatabase.db") + + # produce our own MetaData object + metadata = MetaData() + + # we can reflect it ourselves from a database, using options + # such as 'only' to limit what tables we look at... + metadata.reflect(engine, only=["user", "address"]) + + # ... or just define our own Table objects with it (or combine both) + Table( + "user_order", + metadata, + Column("id", Integer, primary_key=True), + Column("user_id", ForeignKey("user.id")), + ) + + # we can then produce a set of mappings from this MetaData. + Base = automap_base(metadata=metadata) + + # calling prepare() just sets up mapped classes and relationships. + Base.prepare() + + # mapped classes are ready + User = Base.classes.user + Address = Base.classes.address + Order = Base.classes.user_order + +.. _automap_by_module: + +Generating Mappings from Multiple Schemas +========================================= + +The :meth:`.AutomapBase.prepare` method when used with reflection may reflect +tables from one schema at a time at most, using the +:paramref:`.AutomapBase.prepare.schema` parameter to indicate the name of a +schema to be reflected from. In order to populate the :class:`.AutomapBase` +with tables from multiple schemas, :meth:`.AutomapBase.prepare` may be invoked +multiple times, each time passing a different name to the +:paramref:`.AutomapBase.prepare.schema` parameter. The +:meth:`.AutomapBase.prepare` method keeps an internal list of +:class:`_schema.Table` objects that have already been mapped, and will add new +mappings only for those :class:`_schema.Table` objects that are new since the +last time :meth:`.AutomapBase.prepare` was run:: + + e = create_engine("postgresql://scott:tiger@localhost/test") + + Base.metadata.create_all(e) + + Base = automap_base() + + Base.prepare(e) + Base.prepare(e, schema="test_schema") + Base.prepare(e, schema="test_schema_2") + +.. versionadded:: 2.0 The :meth:`.AutomapBase.prepare` method may be called + any number of times; only newly added tables will be mapped + on each run. Previously in version 1.4 and earlier, multiple calls would + cause errors as it would attempt to re-map an already mapped class. + The previous workaround approach of invoking + :meth:`_schema.MetaData.reflect` directly remains available as well. + +Automapping same-named tables across multiple schemas +----------------------------------------------------- + +For the common case where multiple schemas may have same-named tables and +therefore would generate same-named classes, conflicts can be resolved either +through use of the :paramref:`.AutomapBase.prepare.classname_for_table` hook to +apply different classnames on a per-schema basis, or by using the +:paramref:`.AutomapBase.prepare.modulename_for_table` hook, which allows +disambiguation of same-named classes by changing their effective ``__module__`` +attribute. In the example below, this hook is used to create a ``__module__`` +attribute for all classes that is of the form ``mymodule.``, where +the schema name ``default`` is used if no schema is present:: + + e = create_engine("postgresql://scott:tiger@localhost/test") + + Base.metadata.create_all(e) + + + def module_name_for_table(cls, tablename, table): + if table.schema is not None: + return f"mymodule.{table.schema}" + else: + return f"mymodule.default" + + + Base = automap_base() + + Base.prepare(e, modulename_for_table=module_name_for_table) + Base.prepare( + e, schema="test_schema", modulename_for_table=module_name_for_table + ) + Base.prepare( + e, schema="test_schema_2", modulename_for_table=module_name_for_table + ) + +The same named-classes are organized into a hierarchical collection available +at :attr:`.AutomapBase.by_module`. This collection is traversed using the +dot-separated name of a particular package/module down into the desired +class name. + +.. note:: When using the :paramref:`.AutomapBase.prepare.modulename_for_table` + hook to return a new ``__module__`` that is not ``None``, the class is + **not** placed into the :attr:`.AutomapBase.classes` collection; only + classes that were not given an explicit modulename are placed here, as the + collection cannot represent same-named classes individually. + +In the example above, if the database contained a table named ``accounts`` in +all three of the default schema, the ``test_schema`` schema, and the +``test_schema_2`` schema, three separate classes will be available as:: + + Base.by_module.mymodule.default.accounts + Base.by_module.mymodule.test_schema.accounts + Base.by_module.mymodule.test_schema_2.accounts + +The default module namespace generated for all :class:`.AutomapBase` classes is +``sqlalchemy.ext.automap``. If no +:paramref:`.AutomapBase.prepare.modulename_for_table` hook is used, the +contents of :attr:`.AutomapBase.by_module` will be entirely within the +``sqlalchemy.ext.automap`` namespace (e.g. +``MyBase.by_module.sqlalchemy.ext.automap.``), which would contain +the same series of classes as what would be seen in +:attr:`.AutomapBase.classes`. Therefore it's generally only necessary to use +:attr:`.AutomapBase.by_module` when explicit ``__module__`` conventions are +present. + +.. versionadded: 2.0 + + Added the :attr:`.AutomapBase.by_module` collection, which stores + classes within a named hierarchy based on dot-separated module names, + as well as the :paramref:`.Automap.prepare.modulename_for_table` parameter + which allows for custom ``__module__`` schemes for automapped + classes. + + + +Specifying Classes Explicitly +============================= + +.. tip:: If explicit classes are expected to be prominent in an application, + consider using :class:`.DeferredReflection` instead. + +The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined +explicitly, in a way similar to that of the :class:`.DeferredReflection` class. +Classes that extend from :class:`.AutomapBase` act like regular declarative +classes, but are not immediately mapped after their construction, and are +instead mapped when we call :meth:`.AutomapBase.prepare`. The +:meth:`.AutomapBase.prepare` method will make use of the classes we've +established based on the table name we use. If our schema contains tables +``user`` and ``address``, we can define one or both of the classes to be used:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import create_engine + + # automap base + Base = automap_base() + + + # pre-declare User for the 'user' table + class User(Base): + __tablename__ = "user" + + # override schema elements like Columns + user_name = Column("name", String) + + # override relationships too, if desired. + # we must use the same name that automap would use for the + # relationship, and also must refer to the class name that automap will + # generate for "address" + address_collection = relationship("address", collection_class=set) + + + # reflect + engine = create_engine("sqlite:///mydatabase.db") + Base.prepare(autoload_with=engine) + + # we still have Address generated from the tablename "address", + # but User is the same as Base.classes.User now + + Address = Base.classes.address + + u1 = session.query(User).first() + print(u1.address_collection) + + # the backref is still there: + a1 = session.query(Address).first() + print(a1.user) + +Above, one of the more intricate details is that we illustrated overriding +one of the :func:`_orm.relationship` objects that automap would have created. +To do this, we needed to make sure the names match up with what automap +would normally generate, in that the relationship name would be +``User.address_collection`` and the name of the class referred to, from +automap's perspective, is called ``address``, even though we are referring to +it as ``Address`` within our usage of this class. + +Overriding Naming Schemes +========================= + +:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and +relationship names based on a schema, which means it has decision points in how +these names are determined. These three decision points are provided using +functions which can be passed to the :meth:`.AutomapBase.prepare` method, and +are known as :func:`.classname_for_table`, +:func:`.name_for_scalar_relationship`, +and :func:`.name_for_collection_relationship`. Any or all of these +functions are provided as in the example below, where we use a "camel case" +scheme for class names and a "pluralizer" for collection names using the +`Inflect `_ package:: + + import re + import inflect + + + def camelize_classname(base, tablename, table): + "Produce a 'camelized' class name, e.g." + "'words_and_underscores' -> 'WordsAndUnderscores'" + + return str( + tablename[0].upper() + + re.sub( + r"_([a-z])", + lambda m: m.group(1).upper(), + tablename[1:], + ) + ) + + + _pluralizer = inflect.engine() + + + def pluralize_collection(base, local_cls, referred_cls, constraint): + "Produce an 'uncamelized', 'pluralized' class name, e.g." + "'SomeTerm' -> 'some_terms'" + + referred_name = referred_cls.__name__ + uncamelized = re.sub( + r"[A-Z]", + lambda m: "_%s" % m.group(0).lower(), + referred_name, + )[1:] + pluralized = _pluralizer.plural(uncamelized) + return pluralized + + + from sqlalchemy.ext.automap import automap_base + + Base = automap_base() + + engine = create_engine("sqlite:///mydatabase.db") + + Base.prepare( + autoload_with=engine, + classname_for_table=camelize_classname, + name_for_collection_relationship=pluralize_collection, + ) + +From the above mapping, we would now have classes ``User`` and ``Address``, +where the collection from ``User`` to ``Address`` is called +``User.addresses``:: + + User, Address = Base.classes.User, Base.classes.Address + + u1 = User(addresses=[Address(email="foo@bar.com")]) + +Relationship Detection +====================== + +The vast majority of what automap accomplishes is the generation of +:func:`_orm.relationship` structures based on foreign keys. The mechanism +by which this works for many-to-one and one-to-many relationships is as +follows: + +1. A given :class:`_schema.Table`, known to be mapped to a particular class, + is examined for :class:`_schema.ForeignKeyConstraint` objects. + +2. From each :class:`_schema.ForeignKeyConstraint`, the remote + :class:`_schema.Table` + object present is matched up to the class to which it is to be mapped, + if any, else it is skipped. + +3. As the :class:`_schema.ForeignKeyConstraint` + we are examining corresponds to a + reference from the immediate mapped class, the relationship will be set up + as a many-to-one referring to the referred class; a corresponding + one-to-many backref will be created on the referred class referring + to this class. + +4. If any of the columns that are part of the + :class:`_schema.ForeignKeyConstraint` + are not nullable (e.g. ``nullable=False``), a + :paramref:`_orm.relationship.cascade` keyword argument + of ``all, delete-orphan`` will be added to the keyword arguments to + be passed to the relationship or backref. If the + :class:`_schema.ForeignKeyConstraint` reports that + :paramref:`_schema.ForeignKeyConstraint.ondelete` + is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable + set of columns, the option :paramref:`_orm.relationship.passive_deletes` + flag is set to ``True`` in the set of relationship keyword arguments. + Note that not all backends support reflection of ON DELETE. + +5. The names of the relationships are determined using the + :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and + :paramref:`.AutomapBase.prepare.name_for_collection_relationship` + callable functions. It is important to note that the default relationship + naming derives the name from the **the actual class name**. If you've + given a particular class an explicit name by declaring it, or specified an + alternate class naming scheme, that's the name from which the relationship + name will be derived. + +6. The classes are inspected for an existing mapped property matching these + names. If one is detected on one side, but none on the other side, + :class:`.AutomapBase` attempts to create a relationship on the missing side, + then uses the :paramref:`_orm.relationship.back_populates` + parameter in order to + point the new relationship to the other side. + +7. In the usual case where no relationship is on either side, + :meth:`.AutomapBase.prepare` produces a :func:`_orm.relationship` on the + "many-to-one" side and matches it to the other using the + :paramref:`_orm.relationship.backref` parameter. + +8. Production of the :func:`_orm.relationship` and optionally the + :func:`.backref` + is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship` + function, which can be supplied by the end-user in order to augment + the arguments passed to :func:`_orm.relationship` or :func:`.backref` or to + make use of custom implementations of these functions. + +Custom Relationship Arguments +----------------------------- + +The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used +to add parameters to relationships. For most cases, we can make use of the +existing :func:`.automap.generate_relationship` function to return +the object, after augmenting the given keyword dictionary with our own +arguments. + +Below is an illustration of how to send +:paramref:`_orm.relationship.cascade` and +:paramref:`_orm.relationship.passive_deletes` +options along to all one-to-many relationships:: + + from sqlalchemy.ext.automap import generate_relationship + from sqlalchemy.orm import interfaces + + + def _gen_relationship( + base, direction, return_fn, attrname, local_cls, referred_cls, **kw + ): + if direction is interfaces.ONETOMANY: + kw["cascade"] = "all, delete-orphan" + kw["passive_deletes"] = True + # make use of the built-in function to actually return + # the result. + return generate_relationship( + base, direction, return_fn, attrname, local_cls, referred_cls, **kw + ) + + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import create_engine + + # automap base + Base = automap_base() + + engine = create_engine("sqlite:///mydatabase.db") + Base.prepare(autoload_with=engine, generate_relationship=_gen_relationship) + +Many-to-Many relationships +-------------------------- + +:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g. +those which contain a ``secondary`` argument. The process for producing these +is as follows: + +1. A given :class:`_schema.Table` is examined for + :class:`_schema.ForeignKeyConstraint` + objects, before any mapped class has been assigned to it. + +2. If the table contains two and exactly two + :class:`_schema.ForeignKeyConstraint` + objects, and all columns within this table are members of these two + :class:`_schema.ForeignKeyConstraint` objects, the table is assumed to be a + "secondary" table, and will **not be mapped directly**. + +3. The two (or one, for self-referential) external tables to which the + :class:`_schema.Table` + refers to are matched to the classes to which they will be + mapped, if any. + +4. If mapped classes for both sides are located, a many-to-many bi-directional + :func:`_orm.relationship` / :func:`.backref` + pair is created between the two + classes. + +5. The override logic for many-to-many works the same as that of one-to-many/ + many-to-one; the :func:`.generate_relationship` function is called upon + to generate the structures and existing attributes will be maintained. + +Relationships with Inheritance +------------------------------ + +:mod:`.sqlalchemy.ext.automap` will not generate any relationships between +two classes that are in an inheritance relationship. That is, with two +classes given as follows:: + + class Employee(Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + type = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "employee", + "polymorphic_on": type, + } + + + class Engineer(Employee): + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + __mapper_args__ = { + "polymorphic_identity": "engineer", + } + +The foreign key from ``Engineer`` to ``Employee`` is used not for a +relationship, but to establish joined inheritance between the two classes. + +Note that this means automap will not generate *any* relationships +for foreign keys that link from a subclass to a superclass. If a mapping +has actual relationships from subclass to superclass as well, those +need to be explicit. Below, as we have two separate foreign keys +from ``Engineer`` to ``Employee``, we need to set up both the relationship +we want as well as the ``inherit_condition``, as these are not things +SQLAlchemy can guess:: + + class Employee(Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + type = Column(String(50)) + + __mapper_args__ = { + "polymorphic_identity": "employee", + "polymorphic_on": type, + } + + + class Engineer(Employee): + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + favorite_employee_id = Column(Integer, ForeignKey("employee.id")) + + favorite_employee = relationship( + Employee, foreign_keys=favorite_employee_id + ) + + __mapper_args__ = { + "polymorphic_identity": "engineer", + "inherit_condition": id == Employee.id, + } + +Handling Simple Naming Conflicts +-------------------------------- + +In the case of naming conflicts during mapping, override any of +:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`, +and :func:`.name_for_collection_relationship` as needed. For example, if +automap is attempting to name a many-to-one relationship the same as an +existing column, an alternate convention can be conditionally selected. Given +a schema: + +.. sourcecode:: sql + + CREATE TABLE table_a ( + id INTEGER PRIMARY KEY + ); + + CREATE TABLE table_b ( + id INTEGER PRIMARY KEY, + table_a INTEGER, + FOREIGN KEY(table_a) REFERENCES table_a(id) + ); + +The above schema will first automap the ``table_a`` table as a class named +``table_a``; it will then automap a relationship onto the class for ``table_b`` +with the same name as this related class, e.g. ``table_a``. This +relationship name conflicts with the mapping column ``table_b.table_a``, +and will emit an error on mapping. + +We can resolve this conflict by using an underscore as follows:: + + def name_for_scalar_relationship( + base, local_cls, referred_cls, constraint + ): + name = referred_cls.__name__.lower() + local_table = local_cls.__table__ + if name in local_table.columns: + newname = name + "_" + warnings.warn( + "Already detected name %s present. using %s" % (name, newname) + ) + return newname + return name + + + Base.prepare( + autoload_with=engine, + name_for_scalar_relationship=name_for_scalar_relationship, + ) + +Alternatively, we can change the name on the column side. The columns +that are mapped can be modified using the technique described at +:ref:`mapper_column_distinct_names`, by assigning the column explicitly +to a new name:: + + Base = automap_base() + + + class TableB(Base): + __tablename__ = "table_b" + _table_a = Column("table_a", ForeignKey("table_a.id")) + + + Base.prepare(autoload_with=engine) + +Using Automap with Explicit Declarations +======================================== + +As noted previously, automap has no dependency on reflection, and can make +use of any collection of :class:`_schema.Table` objects within a +:class:`_schema.MetaData` +collection. From this, it follows that automap can also be used +generate missing relationships given an otherwise complete model that fully +defines table metadata:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import Column, Integer, String, ForeignKey + + Base = automap_base() + + + class User(Base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String) + + + class Address(Base): + __tablename__ = "address" + + id = Column(Integer, primary_key=True) + email = Column(String) + user_id = Column(ForeignKey("user.id")) + + + # produce relationships + Base.prepare() + + # mapping is complete, with "address_collection" and + # "user" relationships + a1 = Address(email="u1") + a2 = Address(email="u2") + u1 = User(address_collection=[a1, a2]) + assert a1.user is u1 + +Above, given mostly complete ``User`` and ``Address`` mappings, the +:class:`_schema.ForeignKey` which we defined on ``Address.user_id`` allowed a +bidirectional relationship pair ``Address.user`` and +``User.address_collection`` to be generated on the mapped classes. + +Note that when subclassing :class:`.AutomapBase`, +the :meth:`.AutomapBase.prepare` method is required; if not called, the classes +we've declared are in an un-mapped state. + + +.. _automap_intercepting_columns: + +Intercepting Column Definitions +=============================== + +The :class:`_schema.MetaData` and :class:`_schema.Table` objects support an +event hook :meth:`_events.DDLEvents.column_reflect` that may be used to intercept +the information reflected about a database column before the :class:`_schema.Column` +object is constructed. For example if we wanted to map columns using a +naming convention such as ``"attr_"``, the event could +be applied as:: + + @event.listens_for(Base.metadata, "column_reflect") + def column_reflect(inspector, table, column_info): + # set column.key = "attr_" + column_info["key"] = "attr_%s" % column_info["name"].lower() + + + # run reflection + Base.prepare(autoload_with=engine) + +.. versionadded:: 1.4.0b2 the :meth:`_events.DDLEvents.column_reflect` event + may be applied to a :class:`_schema.MetaData` object. + +.. seealso:: + + :meth:`_events.DDLEvents.column_reflect` + + :ref:`mapper_automated_reflection_schemes` - in the ORM mapping documentation + + +""" # noqa +from __future__ import annotations + +import dataclasses +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import List +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .. import util +from ..orm import backref +from ..orm import declarative_base as _declarative_base +from ..orm import exc as orm_exc +from ..orm import interfaces +from ..orm import relationship +from ..orm.decl_base import _DeferredMapperConfig +from ..orm.mapper import _CONFIGURE_MUTEX +from ..schema import ForeignKeyConstraint +from ..sql import and_ +from ..util import Properties +from ..util.typing import Protocol + +if TYPE_CHECKING: + from ..engine.base import Engine + from ..orm.base import RelationshipDirection + from ..orm.relationships import ORMBackrefArgument + from ..orm.relationships import Relationship + from ..sql.schema import Column + from ..sql.schema import MetaData + from ..sql.schema import Table + from ..util import immutabledict + + +_KT = TypeVar("_KT", bound=Any) +_VT = TypeVar("_VT", bound=Any) + + +class PythonNameForTableType(Protocol): + def __call__( + self, base: Type[Any], tablename: str, table: Table + ) -> str: ... + + +def classname_for_table( + base: Type[Any], + tablename: str, + table: Table, +) -> str: + """Return the class name that should be used, given the name + of a table. + + The default implementation is:: + + return str(tablename) + + Alternate implementations can be specified using the + :paramref:`.AutomapBase.prepare.classname_for_table` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param tablename: string name of the :class:`_schema.Table`. + + :param table: the :class:`_schema.Table` object itself. + + :return: a string class name. + + .. note:: + + In Python 2, the string used for the class name **must** be a + non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute + of :class:`_schema.Table` is typically a Python unicode subclass, + so the + ``str()`` function should be applied to this name, after accounting for + any non-ASCII characters. + + """ + return str(tablename) + + +class NameForScalarRelationshipType(Protocol): + def __call__( + self, + base: Type[Any], + local_cls: Type[Any], + referred_cls: Type[Any], + constraint: ForeignKeyConstraint, + ) -> str: ... + + +def name_for_scalar_relationship( + base: Type[Any], + local_cls: Type[Any], + referred_cls: Type[Any], + constraint: ForeignKeyConstraint, +) -> str: + """Return the attribute name that should be used to refer from one + class to another, for a scalar object reference. + + The default implementation is:: + + return referred_cls.__name__.lower() + + Alternate implementations can be specified using the + :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param local_cls: the class to be mapped on the local side. + + :param referred_cls: the class to be mapped on the referring side. + + :param constraint: the :class:`_schema.ForeignKeyConstraint` that is being + inspected to produce this relationship. + + """ + return referred_cls.__name__.lower() + + +class NameForCollectionRelationshipType(Protocol): + def __call__( + self, + base: Type[Any], + local_cls: Type[Any], + referred_cls: Type[Any], + constraint: ForeignKeyConstraint, + ) -> str: ... + + +def name_for_collection_relationship( + base: Type[Any], + local_cls: Type[Any], + referred_cls: Type[Any], + constraint: ForeignKeyConstraint, +) -> str: + """Return the attribute name that should be used to refer from one + class to another, for a collection reference. + + The default implementation is:: + + return referred_cls.__name__.lower() + "_collection" + + Alternate implementations + can be specified using the + :paramref:`.AutomapBase.prepare.name_for_collection_relationship` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param local_cls: the class to be mapped on the local side. + + :param referred_cls: the class to be mapped on the referring side. + + :param constraint: the :class:`_schema.ForeignKeyConstraint` that is being + inspected to produce this relationship. + + """ + return referred_cls.__name__.lower() + "_collection" + + +class GenerateRelationshipType(Protocol): + @overload + def __call__( + self, + base: Type[Any], + direction: RelationshipDirection, + return_fn: Callable[..., Relationship[Any]], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, + ) -> Relationship[Any]: ... + + @overload + def __call__( + self, + base: Type[Any], + direction: RelationshipDirection, + return_fn: Callable[..., ORMBackrefArgument], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, + ) -> ORMBackrefArgument: ... + + def __call__( + self, + base: Type[Any], + direction: RelationshipDirection, + return_fn: Union[ + Callable[..., Relationship[Any]], Callable[..., ORMBackrefArgument] + ], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, + ) -> Union[ORMBackrefArgument, Relationship[Any]]: ... + + +@overload +def generate_relationship( + base: Type[Any], + direction: RelationshipDirection, + return_fn: Callable[..., Relationship[Any]], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, +) -> Relationship[Any]: ... + + +@overload +def generate_relationship( + base: Type[Any], + direction: RelationshipDirection, + return_fn: Callable[..., ORMBackrefArgument], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, +) -> ORMBackrefArgument: ... + + +def generate_relationship( + base: Type[Any], + direction: RelationshipDirection, + return_fn: Union[ + Callable[..., Relationship[Any]], Callable[..., ORMBackrefArgument] + ], + attrname: str, + local_cls: Type[Any], + referred_cls: Type[Any], + **kw: Any, +) -> Union[Relationship[Any], ORMBackrefArgument]: + r"""Generate a :func:`_orm.relationship` or :func:`.backref` + on behalf of two + mapped classes. + + An alternate implementation of this function can be specified using the + :paramref:`.AutomapBase.prepare.generate_relationship` parameter. + + The default implementation of this function is as follows:: + + if return_fn is backref: + return return_fn(attrname, **kw) + elif return_fn is relationship: + return return_fn(referred_cls, **kw) + else: + raise TypeError("Unknown relationship function: %s" % return_fn) + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param direction: indicate the "direction" of the relationship; this will + be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`. + + :param return_fn: the function that is used by default to create the + relationship. This will be either :func:`_orm.relationship` or + :func:`.backref`. The :func:`.backref` function's result will be used to + produce a new :func:`_orm.relationship` in a second step, + so it is critical + that user-defined implementations correctly differentiate between the two + functions, if a custom relationship function is being used. + + :param attrname: the attribute name to which this relationship is being + assigned. If the value of :paramref:`.generate_relationship.return_fn` is + the :func:`.backref` function, then this name is the name that is being + assigned to the backref. + + :param local_cls: the "local" class to which this relationship or backref + will be locally present. + + :param referred_cls: the "referred" class to which the relationship or + backref refers to. + + :param \**kw: all additional keyword arguments are passed along to the + function. + + :return: a :func:`_orm.relationship` or :func:`.backref` construct, + as dictated + by the :paramref:`.generate_relationship.return_fn` parameter. + + """ + + if return_fn is backref: + return return_fn(attrname, **kw) + elif return_fn is relationship: + return return_fn(referred_cls, **kw) + else: + raise TypeError("Unknown relationship function: %s" % return_fn) + + +ByModuleProperties = Properties[Union["ByModuleProperties", Type[Any]]] + + +class AutomapBase: + """Base class for an "automap" schema. + + The :class:`.AutomapBase` class can be compared to the "declarative base" + class that is produced by the :func:`.declarative.declarative_base` + function. In practice, the :class:`.AutomapBase` class is always used + as a mixin along with an actual declarative base. + + A new subclassable :class:`.AutomapBase` is typically instantiated + using the :func:`.automap_base` function. + + .. seealso:: + + :ref:`automap_toplevel` + + """ + + __abstract__ = True + + classes: ClassVar[Properties[Type[Any]]] + """An instance of :class:`.util.Properties` containing classes. + + This object behaves much like the ``.c`` collection on a table. Classes + are present under the name they were given, e.g.:: + + Base = automap_base() + Base.prepare(autoload_with=some_engine) + + User, Address = Base.classes.User, Base.classes.Address + + For class names that overlap with a method name of + :class:`.util.Properties`, such as ``items()``, the getitem form + is also supported:: + + Item = Base.classes["items"] + + """ + + by_module: ClassVar[ByModuleProperties] + """An instance of :class:`.util.Properties` containing a hierarchal + structure of dot-separated module names linked to classes. + + This collection is an alternative to the :attr:`.AutomapBase.classes` + collection that is useful when making use of the + :paramref:`.AutomapBase.prepare.modulename_for_table` parameter, which will + apply distinct ``__module__`` attributes to generated classes. + + The default ``__module__`` an automap-generated class is + ``sqlalchemy.ext.automap``; to access this namespace using + :attr:`.AutomapBase.by_module` looks like:: + + User = Base.by_module.sqlalchemy.ext.automap.User + + If a class had a ``__module__`` of ``mymodule.account``, accessing + this namespace looks like:: + + MyClass = Base.by_module.mymodule.account.MyClass + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`automap_by_module` + + """ + + metadata: ClassVar[MetaData] + """Refers to the :class:`_schema.MetaData` collection that will be used + for new :class:`_schema.Table` objects. + + .. seealso:: + + :ref:`orm_declarative_metadata` + + """ + + _sa_automapbase_bookkeeping: ClassVar[_Bookkeeping] + + @classmethod + @util.deprecated_params( + engine=( + "2.0", + "The :paramref:`_automap.AutomapBase.prepare.engine` parameter " + "is deprecated and will be removed in a future release. " + "Please use the " + ":paramref:`_automap.AutomapBase.prepare.autoload_with` " + "parameter.", + ), + reflect=( + "2.0", + "The :paramref:`_automap.AutomapBase.prepare.reflect` " + "parameter is deprecated and will be removed in a future " + "release. Reflection is enabled when " + ":paramref:`_automap.AutomapBase.prepare.autoload_with` " + "is passed.", + ), + ) + def prepare( + cls: Type[AutomapBase], + autoload_with: Optional[Engine] = None, + engine: Optional[Any] = None, + reflect: bool = False, + schema: Optional[str] = None, + classname_for_table: Optional[PythonNameForTableType] = None, + modulename_for_table: Optional[PythonNameForTableType] = None, + collection_class: Optional[Any] = None, + name_for_scalar_relationship: Optional[ + NameForScalarRelationshipType + ] = None, + name_for_collection_relationship: Optional[ + NameForCollectionRelationshipType + ] = None, + generate_relationship: Optional[GenerateRelationshipType] = None, + reflection_options: Union[ + Dict[_KT, _VT], immutabledict[_KT, _VT] + ] = util.EMPTY_DICT, + ) -> None: + """Extract mapped classes and relationships from the + :class:`_schema.MetaData` and perform mappings. + + For full documentation and examples see + :ref:`automap_basic_use`. + + :param autoload_with: an :class:`_engine.Engine` or + :class:`_engine.Connection` with which + to perform schema reflection; when specified, the + :meth:`_schema.MetaData.reflect` method will be invoked within + the scope of this method. + + :param engine: legacy; use :paramref:`.AutomapBase.autoload_with`. + Used to indicate the :class:`_engine.Engine` or + :class:`_engine.Connection` with which to reflect tables with, + if :paramref:`.AutomapBase.reflect` is True. + + :param reflect: legacy; use :paramref:`.AutomapBase.autoload_with`. + Indicates that :meth:`_schema.MetaData.reflect` should be invoked. + + :param classname_for_table: callable function which will be used to + produce new class names, given a table name. Defaults to + :func:`.classname_for_table`. + + :param modulename_for_table: callable function which will be used to + produce the effective ``__module__`` for an internally generated + class, to allow for multiple classes of the same name in a single + automap base which would be in different "modules". + + Defaults to ``None``, which will indicate that ``__module__`` will not + be set explicitly; the Python runtime will use the value + ``sqlalchemy.ext.automap`` for these classes. + + When assigning ``__module__`` to generated classes, they can be + accessed based on dot-separated module names using the + :attr:`.AutomapBase.by_module` collection. Classes that have + an explicit ``__module_`` assigned using this hook do **not** get + placed into the :attr:`.AutomapBase.classes` collection, only + into :attr:`.AutomapBase.by_module`. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`automap_by_module` + + :param name_for_scalar_relationship: callable function which will be + used to produce relationship names for scalar relationships. Defaults + to :func:`.name_for_scalar_relationship`. + + :param name_for_collection_relationship: callable function which will + be used to produce relationship names for collection-oriented + relationships. Defaults to :func:`.name_for_collection_relationship`. + + :param generate_relationship: callable function which will be used to + actually generate :func:`_orm.relationship` and :func:`.backref` + constructs. Defaults to :func:`.generate_relationship`. + + :param collection_class: the Python collection class that will be used + when a new :func:`_orm.relationship` + object is created that represents a + collection. Defaults to ``list``. + + :param schema: Schema name to reflect when reflecting tables using + the :paramref:`.AutomapBase.prepare.autoload_with` parameter. The name + is passed to the :paramref:`_schema.MetaData.reflect.schema` parameter + of :meth:`_schema.MetaData.reflect`. When omitted, the default schema + in use by the database connection is used. + + .. note:: The :paramref:`.AutomapBase.prepare.schema` + parameter supports reflection of a single schema at a time. + In order to include tables from many schemas, use + multiple calls to :meth:`.AutomapBase.prepare`. + + For an overview of multiple-schema automap including the use + of additional naming conventions to resolve table name + conflicts, see the section :ref:`automap_by_module`. + + .. versionadded:: 2.0 :meth:`.AutomapBase.prepare` supports being + directly invoked any number of times, keeping track of tables + that have already been processed to avoid processing them + a second time. + + :param reflection_options: When present, this dictionary of options + will be passed to :meth:`_schema.MetaData.reflect` + to supply general reflection-specific options like ``only`` and/or + dialect-specific options like ``oracle_resolve_synonyms``. + + .. versionadded:: 1.4 + + """ + + for mr in cls.__mro__: + if "_sa_automapbase_bookkeeping" in mr.__dict__: + automap_base = cast("Type[AutomapBase]", mr) + break + else: + assert False, "Can't locate automap base in class hierarchy" + + glbls = globals() + if classname_for_table is None: + classname_for_table = glbls["classname_for_table"] + if name_for_scalar_relationship is None: + name_for_scalar_relationship = glbls[ + "name_for_scalar_relationship" + ] + if name_for_collection_relationship is None: + name_for_collection_relationship = glbls[ + "name_for_collection_relationship" + ] + if generate_relationship is None: + generate_relationship = glbls["generate_relationship"] + if collection_class is None: + collection_class = list + + if autoload_with: + reflect = True + + if engine: + autoload_with = engine + + if reflect: + assert autoload_with + opts = dict( + schema=schema, + extend_existing=True, + autoload_replace=False, + ) + if reflection_options: + opts.update(reflection_options) + cls.metadata.reflect(autoload_with, **opts) # type: ignore[arg-type] # noqa: E501 + + with _CONFIGURE_MUTEX: + table_to_map_config: Union[ + Dict[Optional[Table], _DeferredMapperConfig], + Dict[Table, _DeferredMapperConfig], + ] = { + cast("Table", m.local_table): m + for m in _DeferredMapperConfig.classes_for_base( + cls, sort=False + ) + } + + many_to_many: List[ + Tuple[Table, Table, List[ForeignKeyConstraint], Table] + ] + many_to_many = [] + + bookkeeping = automap_base._sa_automapbase_bookkeeping + metadata_tables = cls.metadata.tables + + for table_key in set(metadata_tables).difference( + bookkeeping.table_keys + ): + table = metadata_tables[table_key] + bookkeeping.table_keys.add(table_key) + + lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table) + if lcl_m2m is not None: + assert rem_m2m is not None + assert m2m_const is not None + many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table)) + elif not table.primary_key: + continue + elif table not in table_to_map_config: + clsdict: Dict[str, Any] = {"__table__": table} + if modulename_for_table is not None: + new_module = modulename_for_table( + cls, table.name, table + ) + if new_module is not None: + clsdict["__module__"] = new_module + else: + new_module = None + + newname = classname_for_table(cls, table.name, table) + if new_module is None and newname in cls.classes: + util.warn( + "Ignoring duplicate class name " + f"'{newname}' " + "received in automap base for table " + f"{table.key} without " + "``__module__`` being set; consider using the " + "``modulename_for_table`` hook" + ) + continue + + mapped_cls = type( + newname, + (automap_base,), + clsdict, + ) + map_config = _DeferredMapperConfig.config_for_cls( + mapped_cls + ) + assert map_config.cls.__name__ == newname + if new_module is None: + cls.classes[newname] = mapped_cls + + by_module_properties: ByModuleProperties = cls.by_module + for token in map_config.cls.__module__.split("."): + if token not in by_module_properties: + by_module_properties[token] = util.Properties({}) + + props = by_module_properties[token] + + # we can assert this because the clsregistry + # module would have raised if there was a mismatch + # between modules/classes already. + # see test_cls_schema_name_conflict + assert isinstance(props, Properties) + by_module_properties = props + + by_module_properties[map_config.cls.__name__] = mapped_cls + + table_to_map_config[table] = map_config + + for map_config in table_to_map_config.values(): + _relationships_for_fks( + automap_base, + map_config, + table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship, + ) + + for lcl_m2m, rem_m2m, m2m_const, table in many_to_many: + _m2m_relationship( + automap_base, + lcl_m2m, + rem_m2m, + m2m_const, + table, + table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship, + ) + + for map_config in _DeferredMapperConfig.classes_for_base( + automap_base + ): + map_config.map() + + _sa_decl_prepare = True + """Indicate that the mapping of classes should be deferred. + + The presence of this attribute name indicates to declarative + that the call to mapper() should not occur immediately; instead, + information about the table and attributes to be mapped are gathered + into an internal structure called _DeferredMapperConfig. These + objects can be collected later using classes_for_base(), additional + mapping decisions can be made, and then the map() method will actually + apply the mapping. + + The only real reason this deferral of the whole + thing is needed is to support primary key columns that aren't reflected + yet when the class is declared; everything else can theoretically be + added to the mapper later. However, the _DeferredMapperConfig is a + nice interface in any case which exists at that not usually exposed point + at which declarative has the class and the Table but hasn't called + mapper() yet. + + """ + + @classmethod + def _sa_raise_deferred_config(cls) -> NoReturn: + raise orm_exc.UnmappedClassError( + cls, + msg="Class %s is a subclass of AutomapBase. " + "Mappings are not produced until the .prepare() " + "method is called on the class hierarchy." + % orm_exc._safe_cls_name(cls), + ) + + +@dataclasses.dataclass +class _Bookkeeping: + __slots__ = ("table_keys",) + + table_keys: Set[str] + + +def automap_base( + declarative_base: Optional[Type[Any]] = None, **kw: Any +) -> Any: + r"""Produce a declarative automap base. + + This function produces a new base class that is a product of the + :class:`.AutomapBase` class as well a declarative base produced by + :func:`.declarative.declarative_base`. + + All parameters other than ``declarative_base`` are keyword arguments + that are passed directly to the :func:`.declarative.declarative_base` + function. + + :param declarative_base: an existing class produced by + :func:`.declarative.declarative_base`. When this is passed, the function + no longer invokes :func:`.declarative.declarative_base` itself, and all + other keyword arguments are ignored. + + :param \**kw: keyword arguments are passed along to + :func:`.declarative.declarative_base`. + + """ + if declarative_base is None: + Base = _declarative_base(**kw) + else: + Base = declarative_base + + return type( + Base.__name__, + (AutomapBase, Base), + { + "__abstract__": True, + "classes": util.Properties({}), + "by_module": util.Properties({}), + "_sa_automapbase_bookkeeping": _Bookkeeping(set()), + }, + ) + + +def _is_many_to_many( + automap_base: Type[Any], table: Table +) -> Tuple[ + Optional[Table], Optional[Table], Optional[list[ForeignKeyConstraint]] +]: + fk_constraints = [ + const + for const in table.constraints + if isinstance(const, ForeignKeyConstraint) + ] + if len(fk_constraints) != 2: + return None, None, None + + cols: List[Column[Any]] = sum( + [ + [fk.parent for fk in fk_constraint.elements] + for fk_constraint in fk_constraints + ], + [], + ) + + if set(cols) != set(table.c): + return None, None, None + + return ( + fk_constraints[0].elements[0].column.table, + fk_constraints[1].elements[0].column.table, + fk_constraints, + ) + + +def _relationships_for_fks( + automap_base: Type[Any], + map_config: _DeferredMapperConfig, + table_to_map_config: Union[ + Dict[Optional[Table], _DeferredMapperConfig], + Dict[Table, _DeferredMapperConfig], + ], + collection_class: type, + name_for_scalar_relationship: NameForScalarRelationshipType, + name_for_collection_relationship: NameForCollectionRelationshipType, + generate_relationship: GenerateRelationshipType, +) -> None: + local_table = cast("Optional[Table]", map_config.local_table) + local_cls = cast( + "Optional[Type[Any]]", map_config.cls + ) # derived from a weakref, may be None + + if local_table is None or local_cls is None: + return + for constraint in local_table.constraints: + if isinstance(constraint, ForeignKeyConstraint): + fks = constraint.elements + referred_table = fks[0].column.table + referred_cfg = table_to_map_config.get(referred_table, None) + if referred_cfg is None: + continue + referred_cls = referred_cfg.cls + + if local_cls is not referred_cls and issubclass( + local_cls, referred_cls + ): + continue + + relationship_name = name_for_scalar_relationship( + automap_base, local_cls, referred_cls, constraint + ) + backref_name = name_for_collection_relationship( + automap_base, referred_cls, local_cls, constraint + ) + + o2m_kws: Dict[str, Union[str, bool]] = {} + nullable = False not in {fk.parent.nullable for fk in fks} + if not nullable: + o2m_kws["cascade"] = "all, delete-orphan" + + if ( + constraint.ondelete + and constraint.ondelete.lower() == "cascade" + ): + o2m_kws["passive_deletes"] = True + else: + if ( + constraint.ondelete + and constraint.ondelete.lower() == "set null" + ): + o2m_kws["passive_deletes"] = True + + create_backref = backref_name not in referred_cfg.properties + + if relationship_name not in map_config.properties: + if create_backref: + backref_obj = generate_relationship( + automap_base, + interfaces.ONETOMANY, + backref, + backref_name, + referred_cls, + local_cls, + collection_class=collection_class, + **o2m_kws, + ) + else: + backref_obj = None + rel = generate_relationship( + automap_base, + interfaces.MANYTOONE, + relationship, + relationship_name, + local_cls, + referred_cls, + foreign_keys=[fk.parent for fk in constraint.elements], + backref=backref_obj, + remote_side=[fk.column for fk in constraint.elements], + ) + if rel is not None: + map_config.properties[relationship_name] = rel + if not create_backref: + referred_cfg.properties[ + backref_name + ].back_populates = relationship_name # type: ignore[union-attr] # noqa: E501 + elif create_backref: + rel = generate_relationship( + automap_base, + interfaces.ONETOMANY, + relationship, + backref_name, + referred_cls, + local_cls, + foreign_keys=[fk.parent for fk in constraint.elements], + back_populates=relationship_name, + collection_class=collection_class, + **o2m_kws, + ) + if rel is not None: + referred_cfg.properties[backref_name] = rel + map_config.properties[ + relationship_name + ].back_populates = backref_name # type: ignore[union-attr] + + +def _m2m_relationship( + automap_base: Type[Any], + lcl_m2m: Table, + rem_m2m: Table, + m2m_const: List[ForeignKeyConstraint], + table: Table, + table_to_map_config: Union[ + Dict[Optional[Table], _DeferredMapperConfig], + Dict[Table, _DeferredMapperConfig], + ], + collection_class: type, + name_for_scalar_relationship: NameForCollectionRelationshipType, + name_for_collection_relationship: NameForCollectionRelationshipType, + generate_relationship: GenerateRelationshipType, +) -> None: + map_config = table_to_map_config.get(lcl_m2m, None) + referred_cfg = table_to_map_config.get(rem_m2m, None) + if map_config is None or referred_cfg is None: + return + + local_cls = map_config.cls + referred_cls = referred_cfg.cls + + relationship_name = name_for_collection_relationship( + automap_base, local_cls, referred_cls, m2m_const[0] + ) + backref_name = name_for_collection_relationship( + automap_base, referred_cls, local_cls, m2m_const[1] + ) + + create_backref = backref_name not in referred_cfg.properties + + if table in table_to_map_config: + overlaps = "__*" + else: + overlaps = None + + if relationship_name not in map_config.properties: + if create_backref: + backref_obj = generate_relationship( + automap_base, + interfaces.MANYTOMANY, + backref, + backref_name, + referred_cls, + local_cls, + collection_class=collection_class, + overlaps=overlaps, + ) + else: + backref_obj = None + + rel = generate_relationship( + automap_base, + interfaces.MANYTOMANY, + relationship, + relationship_name, + local_cls, + referred_cls, + overlaps=overlaps, + secondary=table, + primaryjoin=and_( + fk.column == fk.parent for fk in m2m_const[0].elements + ), # type: ignore [arg-type] + secondaryjoin=and_( + fk.column == fk.parent for fk in m2m_const[1].elements + ), # type: ignore [arg-type] + backref=backref_obj, + collection_class=collection_class, + ) + if rel is not None: + map_config.properties[relationship_name] = rel + + if not create_backref: + referred_cfg.properties[ + backref_name + ].back_populates = relationship_name # type: ignore[union-attr] # noqa: E501 + elif create_backref: + rel = generate_relationship( + automap_base, + interfaces.MANYTOMANY, + relationship, + backref_name, + referred_cls, + local_cls, + overlaps=overlaps, + secondary=table, + primaryjoin=and_( + fk.column == fk.parent for fk in m2m_const[1].elements + ), # type: ignore [arg-type] + secondaryjoin=and_( + fk.column == fk.parent for fk in m2m_const[0].elements + ), # type: ignore [arg-type] + back_populates=relationship_name, + collection_class=collection_class, + ) + if rel is not None: + referred_cfg.properties[backref_name] = rel + map_config.properties[ + relationship_name + ].back_populates = backref_name # type: ignore[union-attr] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/baked.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/baked.py new file mode 100644 index 0000000000000000000000000000000000000000..cd3e087931e843b4fb822a2741f72bade0bc823a --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/baked.py @@ -0,0 +1,570 @@ +# ext/baked.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""Baked query extension. + +Provides a creational pattern for the :class:`.query.Query` object which +allows the fully constructed object, Core select statement, and string +compiled result to be fully cached. + + +""" + +import collections.abc as collections_abc +import logging + +from .. import exc as sa_exc +from .. import util +from ..orm import exc as orm_exc +from ..orm.query import Query +from ..orm.session import Session +from ..sql import func +from ..sql import literal_column +from ..sql import util as sql_util + + +log = logging.getLogger(__name__) + + +class Bakery: + """Callable which returns a :class:`.BakedQuery`. + + This object is returned by the class method + :meth:`.BakedQuery.bakery`. It exists as an object + so that the "cache" can be easily inspected. + + .. versionadded:: 1.2 + + + """ + + __slots__ = "cls", "cache" + + def __init__(self, cls_, cache): + self.cls = cls_ + self.cache = cache + + def __call__(self, initial_fn, *args): + return self.cls(self.cache, initial_fn, args) + + +class BakedQuery: + """A builder object for :class:`.query.Query` objects.""" + + __slots__ = "steps", "_bakery", "_cache_key", "_spoiled" + + def __init__(self, bakery, initial_fn, args=()): + self._cache_key = () + self._update_cache_key(initial_fn, args) + self.steps = [initial_fn] + self._spoiled = False + self._bakery = bakery + + @classmethod + def bakery(cls, size=200, _size_alert=None): + """Construct a new bakery. + + :return: an instance of :class:`.Bakery` + + """ + + return Bakery(cls, util.LRUCache(size, size_alert=_size_alert)) + + def _clone(self): + b1 = BakedQuery.__new__(BakedQuery) + b1._cache_key = self._cache_key + b1.steps = list(self.steps) + b1._bakery = self._bakery + b1._spoiled = self._spoiled + return b1 + + def _update_cache_key(self, fn, args=()): + self._cache_key += (fn.__code__,) + args + + def __iadd__(self, other): + if isinstance(other, tuple): + self.add_criteria(*other) + else: + self.add_criteria(other) + return self + + def __add__(self, other): + if isinstance(other, tuple): + return self.with_criteria(*other) + else: + return self.with_criteria(other) + + def add_criteria(self, fn, *args): + """Add a criteria function to this :class:`.BakedQuery`. + + This is equivalent to using the ``+=`` operator to + modify a :class:`.BakedQuery` in-place. + + """ + self._update_cache_key(fn, args) + self.steps.append(fn) + return self + + def with_criteria(self, fn, *args): + """Add a criteria function to a :class:`.BakedQuery` cloned from this + one. + + This is equivalent to using the ``+`` operator to + produce a new :class:`.BakedQuery` with modifications. + + """ + return self._clone().add_criteria(fn, *args) + + def for_session(self, session): + """Return a :class:`_baked.Result` object for this + :class:`.BakedQuery`. + + This is equivalent to calling the :class:`.BakedQuery` as a + Python callable, e.g. ``result = my_baked_query(session)``. + + """ + return Result(self, session) + + def __call__(self, session): + return self.for_session(session) + + def spoil(self, full=False): + """Cancel any query caching that will occur on this BakedQuery object. + + The BakedQuery can continue to be used normally, however additional + creational functions will not be cached; they will be called + on every invocation. + + This is to support the case where a particular step in constructing + a baked query disqualifies the query from being cacheable, such + as a variant that relies upon some uncacheable value. + + :param full: if False, only functions added to this + :class:`.BakedQuery` object subsequent to the spoil step will be + non-cached; the state of the :class:`.BakedQuery` up until + this point will be pulled from the cache. If True, then the + entire :class:`_query.Query` object is built from scratch each + time, with all creational functions being called on each + invocation. + + """ + if not full and not self._spoiled: + _spoil_point = self._clone() + _spoil_point._cache_key += ("_query_only",) + self.steps = [_spoil_point._retrieve_baked_query] + self._spoiled = True + return self + + def _effective_key(self, session): + """Return the key that actually goes into the cache dictionary for + this :class:`.BakedQuery`, taking into account the given + :class:`.Session`. + + This basically means we also will include the session's query_class, + as the actual :class:`_query.Query` object is part of what's cached + and needs to match the type of :class:`_query.Query` that a later + session will want to use. + + """ + return self._cache_key + (session._query_cls,) + + def _with_lazyload_options(self, options, effective_path, cache_path=None): + """Cloning version of _add_lazyload_options.""" + q = self._clone() + q._add_lazyload_options(options, effective_path, cache_path=cache_path) + return q + + def _add_lazyload_options(self, options, effective_path, cache_path=None): + """Used by per-state lazy loaders to add options to the + "lazy load" query from a parent query. + + Creates a cache key based on given load path and query options; + if a repeatable cache key cannot be generated, the query is + "spoiled" so that it won't use caching. + + """ + + key = () + + if not cache_path: + cache_path = effective_path + + for opt in options: + if opt._is_legacy_option or opt._is_compile_state: + ck = opt._generate_cache_key() + if ck is None: + self.spoil(full=True) + else: + assert not ck[1], ( + "loader options with variable bound parameters " + "not supported with baked queries. Please " + "use new-style select() statements for cached " + "ORM queries." + ) + key += ck[0] + + self.add_criteria( + lambda q: q._with_current_path(effective_path).options(*options), + cache_path.path, + key, + ) + + def _retrieve_baked_query(self, session): + query = self._bakery.get(self._effective_key(session), None) + if query is None: + query = self._as_query(session) + self._bakery[self._effective_key(session)] = query.with_session( + None + ) + return query.with_session(session) + + def _bake(self, session): + query = self._as_query(session) + query.session = None + + # in 1.4, this is where before_compile() event is + # invoked + statement = query._statement_20() + + # if the query is not safe to cache, we still do everything as though + # we did cache it, since the receiver of _bake() assumes subqueryload + # context was set up, etc. + # + # note also we want to cache the statement itself because this + # allows the statement itself to hold onto its cache key that is + # used by the Connection, which in itself is more expensive to + # generate than what BakedQuery was able to provide in 1.3 and prior + + if statement._compile_options._bake_ok: + self._bakery[self._effective_key(session)] = ( + query, + statement, + ) + + return query, statement + + def to_query(self, query_or_session): + """Return the :class:`_query.Query` object for use as a subquery. + + This method should be used within the lambda callable being used + to generate a step of an enclosing :class:`.BakedQuery`. The + parameter should normally be the :class:`_query.Query` object that + is passed to the lambda:: + + sub_bq = self.bakery(lambda s: s.query(User.name)) + sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address) + + main_bq = self.bakery(lambda s: s.query(Address)) + main_bq += lambda q: q.filter(sub_bq.to_query(q).exists()) + + In the case where the subquery is used in the first callable against + a :class:`.Session`, the :class:`.Session` is also accepted:: + + sub_bq = self.bakery(lambda s: s.query(User.name)) + sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address) + + main_bq = self.bakery( + lambda s: s.query(Address.id, sub_bq.to_query(q).scalar_subquery()) + ) + + :param query_or_session: a :class:`_query.Query` object or a class + :class:`.Session` object, that is assumed to be within the context + of an enclosing :class:`.BakedQuery` callable. + + + .. versionadded:: 1.3 + + + """ # noqa: E501 + + if isinstance(query_or_session, Session): + session = query_or_session + elif isinstance(query_or_session, Query): + session = query_or_session.session + if session is None: + raise sa_exc.ArgumentError( + "Given Query needs to be associated with a Session" + ) + else: + raise TypeError( + "Query or Session object expected, got %r." + % type(query_or_session) + ) + return self._as_query(session) + + def _as_query(self, session): + query = self.steps[0](session) + + for step in self.steps[1:]: + query = step(query) + + return query + + +class Result: + """Invokes a :class:`.BakedQuery` against a :class:`.Session`. + + The :class:`_baked.Result` object is where the actual :class:`.query.Query` + object gets created, or retrieved from the cache, + against a target :class:`.Session`, and is then invoked for results. + + """ + + __slots__ = "bq", "session", "_params", "_post_criteria" + + def __init__(self, bq, session): + self.bq = bq + self.session = session + self._params = {} + self._post_criteria = [] + + def params(self, *args, **kw): + """Specify parameters to be replaced into the string SQL statement.""" + + if len(args) == 1: + kw.update(args[0]) + elif len(args) > 0: + raise sa_exc.ArgumentError( + "params() takes zero or one positional argument, " + "which is a dictionary." + ) + self._params.update(kw) + return self + + def _using_post_criteria(self, fns): + if fns: + self._post_criteria.extend(fns) + return self + + def with_post_criteria(self, fn): + """Add a criteria function that will be applied post-cache. + + This adds a function that will be run against the + :class:`_query.Query` object after it is retrieved from the + cache. This currently includes **only** the + :meth:`_query.Query.params` and :meth:`_query.Query.execution_options` + methods. + + .. warning:: :meth:`_baked.Result.with_post_criteria` + functions are applied + to the :class:`_query.Query` + object **after** the query's SQL statement + object has been retrieved from the cache. Only + :meth:`_query.Query.params` and + :meth:`_query.Query.execution_options` + methods should be used. + + + .. versionadded:: 1.2 + + + """ + return self._using_post_criteria([fn]) + + def _as_query(self): + q = self.bq._as_query(self.session).params(self._params) + for fn in self._post_criteria: + q = fn(q) + return q + + def __str__(self): + return str(self._as_query()) + + def __iter__(self): + return self._iter().__iter__() + + def _iter(self): + bq = self.bq + + if not self.session.enable_baked_queries or bq._spoiled: + return self._as_query()._iter() + + query, statement = bq._bakery.get( + bq._effective_key(self.session), (None, None) + ) + if query is None: + query, statement = bq._bake(self.session) + + if self._params: + q = query.params(self._params) + else: + q = query + for fn in self._post_criteria: + q = fn(q) + + params = q._params + execution_options = dict(q._execution_options) + execution_options.update( + { + "_sa_orm_load_options": q.load_options, + "compiled_cache": bq._bakery, + } + ) + + result = self.session.execute( + statement, params, execution_options=execution_options + ) + if result._attributes.get("is_single_entity", False): + result = result.scalars() + + if result._attributes.get("filtered", False): + result = result.unique() + + return result + + def count(self): + """return the 'count'. + + Equivalent to :meth:`_query.Query.count`. + + Note this uses a subquery to ensure an accurate count regardless + of the structure of the original statement. + + """ + + col = func.count(literal_column("*")) + bq = self.bq.with_criteria(lambda q: q._legacy_from_self(col)) + return bq.for_session(self.session).params(self._params).scalar() + + def scalar(self): + """Return the first element of the first result or None + if no rows present. If multiple rows are returned, + raises MultipleResultsFound. + + Equivalent to :meth:`_query.Query.scalar`. + + """ + try: + ret = self.one() + if not isinstance(ret, collections_abc.Sequence): + return ret + return ret[0] + except orm_exc.NoResultFound: + return None + + def first(self): + """Return the first row. + + Equivalent to :meth:`_query.Query.first`. + + """ + + bq = self.bq.with_criteria(lambda q: q.slice(0, 1)) + return ( + bq.for_session(self.session) + .params(self._params) + ._using_post_criteria(self._post_criteria) + ._iter() + .first() + ) + + def one(self): + """Return exactly one result or raise an exception. + + Equivalent to :meth:`_query.Query.one`. + + """ + return self._iter().one() + + def one_or_none(self): + """Return one or zero results, or raise an exception for multiple + rows. + + Equivalent to :meth:`_query.Query.one_or_none`. + + """ + return self._iter().one_or_none() + + def all(self): + """Return all rows. + + Equivalent to :meth:`_query.Query.all`. + + """ + return self._iter().all() + + def get(self, ident): + """Retrieve an object based on identity. + + Equivalent to :meth:`_query.Query.get`. + + """ + + query = self.bq.steps[0](self.session) + return query._get_impl(ident, self._load_on_pk_identity) + + def _load_on_pk_identity(self, session, query, primary_key_identity, **kw): + """Load the given primary key identity from the database.""" + + mapper = query._raw_columns[0]._annotations["parententity"] + + _get_clause, _get_params = mapper._get_clause + + def setup(query): + _lcl_get_clause = _get_clause + q = query._clone() + q._get_condition() + q._order_by = None + + # None present in ident - turn those comparisons + # into "IS NULL" + if None in primary_key_identity: + nones = { + _get_params[col].key + for col, value in zip( + mapper.primary_key, primary_key_identity + ) + if value is None + } + _lcl_get_clause = sql_util.adapt_criterion_to_null( + _lcl_get_clause, nones + ) + + # TODO: can mapper._get_clause be pre-adapted? + q._where_criteria = ( + sql_util._deep_annotate(_lcl_get_clause, {"_orm_adapt": True}), + ) + + for fn in self._post_criteria: + q = fn(q) + return q + + # cache the query against a key that includes + # which positions in the primary key are NULL + # (remember, we can map to an OUTER JOIN) + bq = self.bq + + # add the clause we got from mapper._get_clause to the cache + # key so that if a race causes multiple calls to _get_clause, + # we've cached on ours + bq = bq._clone() + bq._cache_key += (_get_clause,) + + bq = bq.with_criteria( + setup, tuple(elem is None for elem in primary_key_identity) + ) + + params = { + _get_params[primary_key].key: id_val + for id_val, primary_key in zip( + primary_key_identity, mapper.primary_key + ) + } + + result = list(bq.for_session(self.session).params(**params)) + l = len(result) + if l > 1: + raise orm_exc.MultipleResultsFound() + elif l: + return result[0] + else: + return None + + +bakery = BakedQuery.bakery diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/compiler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..cc64477ed47633f6a1d9655512e187e18147315e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/compiler.py @@ -0,0 +1,600 @@ +# ext/compiler.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +r"""Provides an API for creation of custom ClauseElements and compilers. + +Synopsis +======== + +Usage involves the creation of one or more +:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or +more callables defining its compilation:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.sql.expression import ColumnClause + + + class MyColumn(ColumnClause): + inherit_cache = True + + + @compiles(MyColumn) + def compile_mycolumn(element, compiler, **kw): + return "[%s]" % element.name + +Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, +the base expression element for named column objects. The ``compiles`` +decorator registers itself with the ``MyColumn`` class so that it is invoked +when the object is compiled to a string:: + + from sqlalchemy import select + + s = select(MyColumn("x"), MyColumn("y")) + print(str(s)) + +Produces: + +.. sourcecode:: sql + + SELECT [x], [y] + +Dialect-specific compilation rules +================================== + +Compilers can also be made dialect-specific. The appropriate compiler will be +invoked for the dialect in use:: + + from sqlalchemy.schema import DDLElement + + + class AlterColumn(DDLElement): + inherit_cache = False + + def __init__(self, column, cmd): + self.column = column + self.cmd = cmd + + + @compiles(AlterColumn) + def visit_alter_column(element, compiler, **kw): + return "ALTER COLUMN %s ..." % element.column.name + + + @compiles(AlterColumn, "postgresql") + def visit_alter_column(element, compiler, **kw): + return "ALTER TABLE %s ALTER COLUMN %s ..." % ( + element.table.name, + element.column.name, + ) + +The second ``visit_alter_table`` will be invoked when any ``postgresql`` +dialect is used. + +.. _compilerext_compiling_subelements: + +Compiling sub-elements of a custom expression construct +======================================================= + +The ``compiler`` argument is the +:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object +can be inspected for any information about the in-progress compilation, +including ``compiler.dialect``, ``compiler.statement`` etc. The +:class:`~sqlalchemy.sql.compiler.SQLCompiler` and +:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` +method which can be used for compilation of embedded attributes:: + + from sqlalchemy.sql.expression import Executable, ClauseElement + + + class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + + def __init__(self, table, select): + self.table = table + self.select = select + + + @compiles(InsertFromSelect) + def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s (%s)" % ( + compiler.process(element.table, asfrom=True, **kw), + compiler.process(element.select, **kw), + ) + + + insert = InsertFromSelect(t1, select(t1).where(t1.c.x > 5)) + print(insert) + +Produces (formatted for readability): + +.. sourcecode:: sql + + INSERT INTO mytable ( + SELECT mytable.x, mytable.y, mytable.z + FROM mytable + WHERE mytable.x > :x_1 + ) + +.. note:: + + The above ``InsertFromSelect`` construct is only an example, this actual + functionality is already available using the + :meth:`_expression.Insert.from_select` method. + + +Cross Compiling between SQL and DDL compilers +--------------------------------------------- + +SQL and DDL constructs are each compiled using different base compilers - +``SQLCompiler`` and ``DDLCompiler``. A common need is to access the +compilation rules of SQL expressions from within a DDL expression. The +``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as +below where we generate a CHECK constraint that embeds a SQL expression:: + + @compiles(MyConstraint) + def compile_my_constraint(constraint, ddlcompiler, **kw): + kw["literal_binds"] = True + return "CONSTRAINT %s CHECK (%s)" % ( + constraint.name, + ddlcompiler.sql_compiler.process(constraint.expression, **kw), + ) + +Above, we add an additional flag to the process step as called by +:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This +indicates that any SQL expression which refers to a :class:`.BindParameter` +object or other "literal" object such as those which refer to strings or +integers should be rendered **in-place**, rather than being referred to as +a bound parameter; when emitting DDL, bound parameters are typically not +supported. + + +Changing the default compilation of existing constructs +======================================================= + +The compiler extension applies just as well to the existing constructs. When +overriding the compilation of a built in SQL construct, the @compiles +decorator is invoked upon the appropriate class (be sure to use the class, +i.e. ``Insert`` or ``Select``, instead of the creation function such +as ``insert()`` or ``select()``). + +Within the new compilation function, to get at the "original" compilation +routine, use the appropriate visit_XXX method - this +because compiler.process() will call upon the overriding routine and cause +an endless loop. Such as, to add "prefix" to all insert statements:: + + from sqlalchemy.sql.expression import Insert + + + @compiles(Insert) + def prefix_inserts(insert, compiler, **kw): + return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) + +The above compiler will prefix all INSERT statements with "some prefix" when +compiled. + +.. _type_compilation_extension: + +Changing Compilation of Types +============================= + +``compiler`` works for types, too, such as below where we implement the +MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: + + @compiles(String, "mssql") + @compiles(VARCHAR, "mssql") + def compile_varchar(element, compiler, **kw): + if element.length == "max": + return "VARCHAR('max')" + else: + return compiler.visit_VARCHAR(element, **kw) + + + foo = Table("foo", metadata, Column("data", VARCHAR("max"))) + +Subclassing Guidelines +====================== + +A big part of using the compiler extension is subclassing SQLAlchemy +expression constructs. To make this easier, the expression and +schema packages feature a set of "bases" intended for common tasks. +A synopsis is as follows: + +* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root + expression class. Any SQL expression can be derived from this base, and is + probably the best choice for longer constructs such as specialized INSERT + statements. + +* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all + "column-like" elements. Anything that you'd place in the "columns" clause of + a SELECT statement (as well as order by and group by) can derive from this - + the object will automatically have Python "comparison" behavior. + + :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a + ``type`` member which is expression's return type. This can be established + at the instance level in the constructor, or at the class level if its + generally constant:: + + class timestamp(ColumnElement): + type = TIMESTAMP() + inherit_cache = True + +* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a + ``ColumnElement`` and a "from clause" like object, and represents a SQL + function or stored procedure type of call. Since most databases support + statements along the line of "SELECT FROM " + ``FunctionElement`` adds in the ability to be used in the FROM clause of a + ``select()`` construct:: + + from sqlalchemy.sql.expression import FunctionElement + + + class coalesce(FunctionElement): + name = "coalesce" + inherit_cache = True + + + @compiles(coalesce) + def compile(element, compiler, **kw): + return "coalesce(%s)" % compiler.process(element.clauses, **kw) + + + @compiles(coalesce, "oracle") + def compile(element, compiler, **kw): + if len(element.clauses) > 2: + raise TypeError( + "coalesce only supports two arguments on " "Oracle Database" + ) + return "nvl(%s)" % compiler.process(element.clauses, **kw) + +* :class:`.ExecutableDDLElement` - The root of all DDL expressions, + like CREATE TABLE, ALTER TABLE, etc. Compilation of + :class:`.ExecutableDDLElement` subclasses is issued by a + :class:`.DDLCompiler` instead of a :class:`.SQLCompiler`. + :class:`.ExecutableDDLElement` can also be used as an event hook in + conjunction with event hooks like :meth:`.DDLEvents.before_create` and + :meth:`.DDLEvents.after_create`, allowing the construct to be invoked + automatically during CREATE TABLE and DROP TABLE sequences. + + .. seealso:: + + :ref:`metadata_ddl_toplevel` - contains examples of associating + :class:`.DDL` objects (which are themselves :class:`.ExecutableDDLElement` + instances) with :class:`.DDLEvents` event hooks. + +* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which + should be used with any expression class that represents a "standalone" + SQL statement that can be passed directly to an ``execute()`` method. It + is already implicit within ``DDLElement`` and ``FunctionElement``. + +Most of the above constructs also respond to SQL statement caching. A +subclassed construct will want to define the caching behavior for the object, +which usually means setting the flag ``inherit_cache`` to the value of +``False`` or ``True``. See the next section :ref:`compilerext_caching` +for background. + + +.. _compilerext_caching: + +Enabling Caching Support for Custom Constructs +============================================== + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +equivalent SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement. + +For reasons discussed at :ref:`caching_caveats`, the implementation of this +caching system takes a conservative approach towards including custom SQL +constructs and/or subclasses within the caching system. This includes that +any user-defined SQL constructs, including all the examples for this +extension, will not participate in caching by default unless they positively +assert that they are able to do so. The :attr:`.HasCacheKey.inherit_cache` +attribute when set to ``True`` at the class level of a specific subclass +will indicate that instances of this class may be safely cached, using the +cache key generation scheme of the immediate superclass. This applies +for example to the "synopsis" example indicated previously:: + + class MyColumn(ColumnClause): + inherit_cache = True + + + @compiles(MyColumn) + def compile_mycolumn(element, compiler, **kw): + return "[%s]" % element.name + +Above, the ``MyColumn`` class does not include any new state that +affects its SQL compilation; the cache key of ``MyColumn`` instances will +make use of that of the ``ColumnClause`` superclass, meaning it will take +into account the class of the object (``MyColumn``), the string name and +datatype of the object:: + + >>> MyColumn("some_name", String())._generate_cache_key() + CacheKey( + key=('0', , + 'name', 'some_name', + 'type', (, + ('length', None), ('collation', None)) + ), bindparams=[]) + +For objects that are likely to be **used liberally as components within many +larger statements**, such as :class:`_schema.Column` subclasses and custom SQL +datatypes, it's important that **caching be enabled as much as possible**, as +this may otherwise negatively affect performance. + +An example of an object that **does** contain state which affects its SQL +compilation is the one illustrated at :ref:`compilerext_compiling_subelements`; +this is an "INSERT FROM SELECT" construct that combines together a +:class:`_schema.Table` as well as a :class:`_sql.Select` construct, each of +which independently affect the SQL string generation of the construct. For +this class, the example illustrates that it simply does not participate in +caching:: + + class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + + def __init__(self, table, select): + self.table = table + self.select = select + + + @compiles(InsertFromSelect) + def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s (%s)" % ( + compiler.process(element.table, asfrom=True, **kw), + compiler.process(element.select, **kw), + ) + +While it is also possible that the above ``InsertFromSelect`` could be made to +produce a cache key that is composed of that of the :class:`_schema.Table` and +:class:`_sql.Select` components together, the API for this is not at the moment +fully public. However, for an "INSERT FROM SELECT" construct, which is only +used by itself for specific operations, caching is not as critical as in the +previous example. + +For objects that are **used in relative isolation and are generally +standalone**, such as custom :term:`DML` constructs like an "INSERT FROM +SELECT", **caching is generally less critical** as the lack of caching for such +a construct will have only localized implications for that specific operation. + + +Further Examples +================ + +"UTC timestamp" function +------------------------- + +A function that works like "CURRENT_TIMESTAMP" except applies the +appropriate conversions so that the time is in UTC time. Timestamps are best +stored in relational databases as UTC, without time zones. UTC so that your +database doesn't think time has gone backwards in the hour when daylight +savings ends, without timezones because timezones are like character +encodings - they're best applied only at the endpoints of an application +(i.e. convert to UTC upon user input, re-apply desired timezone upon display). + +For PostgreSQL and Microsoft SQL Server:: + + from sqlalchemy.sql import expression + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.types import DateTime + + + class utcnow(expression.FunctionElement): + type = DateTime() + inherit_cache = True + + + @compiles(utcnow, "postgresql") + def pg_utcnow(element, compiler, **kw): + return "TIMEZONE('utc', CURRENT_TIMESTAMP)" + + + @compiles(utcnow, "mssql") + def ms_utcnow(element, compiler, **kw): + return "GETUTCDATE()" + +Example usage:: + + from sqlalchemy import Table, Column, Integer, String, DateTime, MetaData + + metadata = MetaData() + event = Table( + "event", + metadata, + Column("id", Integer, primary_key=True), + Column("description", String(50), nullable=False), + Column("timestamp", DateTime, server_default=utcnow()), + ) + +"GREATEST" function +------------------- + +The "GREATEST" function is given any number of arguments and returns the one +that is of the highest value - its equivalent to Python's ``max`` +function. A SQL standard version versus a CASE based version which only +accommodates two arguments:: + + from sqlalchemy.sql import expression, case + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.types import Numeric + + + class greatest(expression.FunctionElement): + type = Numeric() + name = "greatest" + inherit_cache = True + + + @compiles(greatest) + def default_greatest(element, compiler, **kw): + return compiler.visit_function(element) + + + @compiles(greatest, "sqlite") + @compiles(greatest, "mssql") + @compiles(greatest, "oracle") + def case_greatest(element, compiler, **kw): + arg1, arg2 = list(element.clauses) + return compiler.process(case((arg1 > arg2, arg1), else_=arg2), **kw) + +Example usage:: + + Session.query(Account).filter( + greatest(Account.checking_balance, Account.savings_balance) > 10000 + ) + +"false" expression +------------------ + +Render a "false" constant expression, rendering as "0" on platforms that +don't have a "false" constant:: + + from sqlalchemy.sql import expression + from sqlalchemy.ext.compiler import compiles + + + class sql_false(expression.ColumnElement): + inherit_cache = True + + + @compiles(sql_false) + def default_false(element, compiler, **kw): + return "false" + + + @compiles(sql_false, "mssql") + @compiles(sql_false, "mysql") + @compiles(sql_false, "oracle") + def int_false(element, compiler, **kw): + return "0" + +Example usage:: + + from sqlalchemy import select, union_all + + exp = union_all( + select(users.c.name, sql_false().label("enrolled")), + select(customers.c.name, customers.c.enrolled), + ) + +""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar + +from .. import exc +from ..sql import sqltypes + +if TYPE_CHECKING: + from ..sql.compiler import SQLCompiler + +_F = TypeVar("_F", bound=Callable[..., Any]) + + +def compiles(class_: Type[Any], *specs: str) -> Callable[[_F], _F]: + """Register a function as a compiler for a + given :class:`_expression.ClauseElement` type.""" + + def decorate(fn: _F) -> _F: + # get an existing @compiles handler + existing = class_.__dict__.get("_compiler_dispatcher", None) + + # get the original handler. All ClauseElement classes have one + # of these, but some TypeEngine classes will not. + existing_dispatch = getattr(class_, "_compiler_dispatch", None) + + if not existing: + existing = _dispatcher() + + if existing_dispatch: + + def _wrap_existing_dispatch( + element: Any, compiler: SQLCompiler, **kw: Any + ) -> Any: + try: + return existing_dispatch(element, compiler, **kw) + except exc.UnsupportedCompilationError as uce: + raise exc.UnsupportedCompilationError( + compiler, + type(element), + message="%s construct has no default " + "compilation handler." % type(element), + ) from uce + + existing.specs["default"] = _wrap_existing_dispatch + + # TODO: why is the lambda needed ? + setattr( + class_, + "_compiler_dispatch", + lambda *arg, **kw: existing(*arg, **kw), + ) + setattr(class_, "_compiler_dispatcher", existing) + + if specs: + for s in specs: + existing.specs[s] = fn + + else: + existing.specs["default"] = fn + return fn + + return decorate + + +def deregister(class_: Type[Any]) -> None: + """Remove all custom compilers associated with a given + :class:`_expression.ClauseElement` type. + + """ + + if hasattr(class_, "_compiler_dispatcher"): + class_._compiler_dispatch = class_._original_compiler_dispatch + del class_._compiler_dispatcher + + +class _dispatcher: + def __init__(self) -> None: + self.specs: Dict[str, Callable[..., Any]] = {} + + def __call__(self, element: Any, compiler: SQLCompiler, **kw: Any) -> Any: + # TODO: yes, this could also switch off of DBAPI in use. + fn = self.specs.get(compiler.dialect.name, None) + if not fn: + try: + fn = self.specs["default"] + except KeyError as ke: + raise exc.UnsupportedCompilationError( + compiler, + type(element), + message="%s construct has no default " + "compilation handler." % type(element), + ) from ke + + # if compilation includes add_to_result_map, collect add_to_result_map + # arguments from the user-defined callable, which are probably none + # because this is not public API. if it wasn't called, then call it + # ourselves. + arm = kw.get("add_to_result_map", None) + if arm: + arm_collection = [] + kw["add_to_result_map"] = lambda *args: arm_collection.append(args) + + expr = fn(element, compiler, **kw) + + if arm: + if not arm_collection: + arm_collection.append( + (None, None, (element,), sqltypes.NULLTYPE) + ) + for tup in arm_collection: + arm(*tup) + return expr diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0383f9d34f8b77d392270d5db4d4f3f476d7cd12 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/__init__.py @@ -0,0 +1,65 @@ +# ext/declarative/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +from .extensions import AbstractConcreteBase +from .extensions import ConcreteBase +from .extensions import DeferredReflection +from ... import util +from ...orm.decl_api import as_declarative as _as_declarative +from ...orm.decl_api import declarative_base as _declarative_base +from ...orm.decl_api import DeclarativeMeta +from ...orm.decl_api import declared_attr +from ...orm.decl_api import has_inherited_table as _has_inherited_table +from ...orm.decl_api import synonym_for as _synonym_for + + +@util.moved_20( + "The ``declarative_base()`` function is now available as " + ":func:`sqlalchemy.orm.declarative_base`." +) +def declarative_base(*arg, **kw): + return _declarative_base(*arg, **kw) + + +@util.moved_20( + "The ``as_declarative()`` function is now available as " + ":func:`sqlalchemy.orm.as_declarative`" +) +def as_declarative(*arg, **kw): + return _as_declarative(*arg, **kw) + + +@util.moved_20( + "The ``has_inherited_table()`` function is now available as " + ":func:`sqlalchemy.orm.has_inherited_table`." +) +def has_inherited_table(*arg, **kw): + return _has_inherited_table(*arg, **kw) + + +@util.moved_20( + "The ``synonym_for()`` function is now available as " + ":func:`sqlalchemy.orm.synonym_for`" +) +def synonym_for(*arg, **kw): + return _synonym_for(*arg, **kw) + + +__all__ = [ + "declarative_base", + "synonym_for", + "has_inherited_table", + "instrument_declarative", + "declared_attr", + "as_declarative", + "ConcreteBase", + "AbstractConcreteBase", + "DeclarativeMeta", + "DeferredReflection", +] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/extensions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc6bf698c4a7357ccb057574c6afc9742a0948d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/declarative/extensions.py @@ -0,0 +1,564 @@ +# ext/declarative/extensions.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""Public API functions and helpers for declarative.""" +from __future__ import annotations + +import collections +import contextlib +from typing import Any +from typing import Callable +from typing import TYPE_CHECKING +from typing import Union + +from ... import exc as sa_exc +from ...engine import Connection +from ...engine import Engine +from ...orm import exc as orm_exc +from ...orm import relationships +from ...orm.base import _mapper_or_none +from ...orm.clsregistry import _resolver +from ...orm.decl_base import _DeferredMapperConfig +from ...orm.util import polymorphic_union +from ...schema import Table +from ...util import OrderedDict + +if TYPE_CHECKING: + from ...sql.schema import MetaData + + +class ConcreteBase: + """A helper class for 'concrete' declarative mappings. + + :class:`.ConcreteBase` will use the :func:`.polymorphic_union` + function automatically, against all tables mapped as a subclass + to this class. The function is called via the + ``__declare_last__()`` function, which is essentially + a hook for the :meth:`.after_configured` event. + + :class:`.ConcreteBase` produces a mapped + table for the class itself. Compare to :class:`.AbstractConcreteBase`, + which does not. + + Example:: + + from sqlalchemy.ext.declarative import ConcreteBase + + + class Employee(ConcreteBase, Base): + __tablename__ = "employee" + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "employee", + "concrete": True, + } + + + class Manager(Employee): + __tablename__ = "manager" + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + __mapper_args__ = { + "polymorphic_identity": "manager", + "concrete": True, + } + + The name of the discriminator column used by :func:`.polymorphic_union` + defaults to the name ``type``. To suit the use case of a mapping where an + actual column in a mapped table is already named ``type``, the + discriminator name can be configured by setting the + ``_concrete_discriminator_name`` attribute:: + + class Employee(ConcreteBase, Base): + _concrete_discriminator_name = "_concrete_discriminator" + + .. versionadded:: 1.3.19 Added the ``_concrete_discriminator_name`` + attribute to :class:`_declarative.ConcreteBase` so that the + virtual discriminator column name can be customized. + + .. versionchanged:: 1.4.2 The ``_concrete_discriminator_name`` attribute + need only be placed on the basemost class to take correct effect for + all subclasses. An explicit error message is now raised if the + mapped column names conflict with the discriminator name, whereas + in the 1.3.x series there would be some warnings and then a non-useful + query would be generated. + + .. seealso:: + + :class:`.AbstractConcreteBase` + + :ref:`concrete_inheritance` + + + """ + + @classmethod + def _create_polymorphic_union(cls, mappers, discriminator_name): + return polymorphic_union( + OrderedDict( + (mp.polymorphic_identity, mp.local_table) for mp in mappers + ), + discriminator_name, + "pjoin", + ) + + @classmethod + def __declare_first__(cls): + m = cls.__mapper__ + if m.with_polymorphic: + return + + discriminator_name = ( + getattr(cls, "_concrete_discriminator_name", None) or "type" + ) + + mappers = list(m.self_and_descendants) + pjoin = cls._create_polymorphic_union(mappers, discriminator_name) + m._set_with_polymorphic(("*", pjoin)) + m._set_polymorphic_on(pjoin.c[discriminator_name]) + + +class AbstractConcreteBase(ConcreteBase): + """A helper class for 'concrete' declarative mappings. + + :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` + function automatically, against all tables mapped as a subclass + to this class. The function is called via the + ``__declare_first__()`` function, which is essentially + a hook for the :meth:`.before_configured` event. + + :class:`.AbstractConcreteBase` applies :class:`_orm.Mapper` for its + immediately inheriting class, as would occur for any other + declarative mapped class. However, the :class:`_orm.Mapper` is not + mapped to any particular :class:`.Table` object. Instead, it's + mapped directly to the "polymorphic" selectable produced by + :func:`.polymorphic_union`, and performs no persistence operations on its + own. Compare to :class:`.ConcreteBase`, which maps its + immediately inheriting class to an actual + :class:`.Table` that stores rows directly. + + .. note:: + + The :class:`.AbstractConcreteBase` delays the mapper creation of the + base class until all the subclasses have been defined, + as it needs to create a mapping against a selectable that will include + all subclass tables. In order to achieve this, it waits for the + **mapper configuration event** to occur, at which point it scans + through all the configured subclasses and sets up a mapping that will + query against all subclasses at once. + + While this event is normally invoked automatically, in the case of + :class:`.AbstractConcreteBase`, it may be necessary to invoke it + explicitly after **all** subclass mappings are defined, if the first + operation is to be a query against this base class. To do so, once all + the desired classes have been configured, the + :meth:`_orm.registry.configure` method on the :class:`_orm.registry` + in use can be invoked, which is available in relation to a particular + declarative base class:: + + Base.registry.configure() + + Example:: + + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.ext.declarative import AbstractConcreteBase + + + class Base(DeclarativeBase): + pass + + + class Employee(AbstractConcreteBase, Base): + pass + + + class Manager(Employee): + __tablename__ = "manager" + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "manager", + "concrete": True, + } + + + Base.registry.configure() + + The abstract base class is handled by declarative in a special way; + at class configuration time, it behaves like a declarative mixin + or an ``__abstract__`` base class. Once classes are configured + and mappings are produced, it then gets mapped itself, but + after all of its descendants. This is a very unique system of mapping + not found in any other SQLAlchemy API feature. + + Using this approach, we can specify columns and properties + that will take place on mapped subclasses, in the way that + we normally do as in :ref:`declarative_mixins`:: + + from sqlalchemy.ext.declarative import AbstractConcreteBase + + + class Company(Base): + __tablename__ = "company" + id = Column(Integer, primary_key=True) + + + class Employee(AbstractConcreteBase, Base): + strict_attrs = True + + employee_id = Column(Integer, primary_key=True) + + @declared_attr + def company_id(cls): + return Column(ForeignKey("company.id")) + + @declared_attr + def company(cls): + return relationship("Company") + + + class Manager(Employee): + __tablename__ = "manager" + + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "manager", + "concrete": True, + } + + + Base.registry.configure() + + When we make use of our mappings however, both ``Manager`` and + ``Employee`` will have an independently usable ``.company`` attribute:: + + session.execute(select(Employee).filter(Employee.company.has(id=5))) + + :param strict_attrs: when specified on the base class, "strict" attribute + mode is enabled which attempts to limit ORM mapped attributes on the + base class to only those that are immediately present, while still + preserving "polymorphic" loading behavior. + + .. versionadded:: 2.0 + + .. seealso:: + + :class:`.ConcreteBase` + + :ref:`concrete_inheritance` + + :ref:`abstract_concrete_base` + + """ + + __no_table__ = True + + @classmethod + def __declare_first__(cls): + cls._sa_decl_prepare_nocascade() + + @classmethod + def _sa_decl_prepare_nocascade(cls): + if getattr(cls, "__mapper__", None): + return + + to_map = _DeferredMapperConfig.config_for_cls(cls) + + # can't rely on 'self_and_descendants' here + # since technically an immediate subclass + # might not be mapped, but a subclass + # may be. + mappers = [] + stack = list(cls.__subclasses__()) + while stack: + klass = stack.pop() + stack.extend(klass.__subclasses__()) + mn = _mapper_or_none(klass) + if mn is not None: + mappers.append(mn) + + discriminator_name = ( + getattr(cls, "_concrete_discriminator_name", None) or "type" + ) + pjoin = cls._create_polymorphic_union(mappers, discriminator_name) + + # For columns that were declared on the class, these + # are normally ignored with the "__no_table__" mapping, + # unless they have a different attribute key vs. col name + # and are in the properties argument. + # In that case, ensure we update the properties entry + # to the correct column from the pjoin target table. + declared_cols = set(to_map.declared_columns) + declared_col_keys = {c.key for c in declared_cols} + for k, v in list(to_map.properties.items()): + if v in declared_cols: + to_map.properties[k] = pjoin.c[v.key] + declared_col_keys.remove(v.key) + + to_map.local_table = pjoin + + strict_attrs = cls.__dict__.get("strict_attrs", False) + + m_args = to_map.mapper_args_fn or dict + + def mapper_args(): + args = m_args() + args["polymorphic_on"] = pjoin.c[discriminator_name] + args["polymorphic_abstract"] = True + if strict_attrs: + args["include_properties"] = ( + set(pjoin.primary_key) + | declared_col_keys + | {discriminator_name} + ) + args["with_polymorphic"] = ("*", pjoin) + return args + + to_map.mapper_args_fn = mapper_args + + to_map.map() + + stack = [cls] + while stack: + scls = stack.pop(0) + stack.extend(scls.__subclasses__()) + sm = _mapper_or_none(scls) + if sm and sm.concrete and sm.inherits is None: + for sup_ in scls.__mro__[1:]: + sup_sm = _mapper_or_none(sup_) + if sup_sm: + sm._set_concrete_base(sup_sm) + break + + @classmethod + def _sa_raise_deferred_config(cls): + raise orm_exc.UnmappedClassError( + cls, + msg="Class %s is a subclass of AbstractConcreteBase and " + "has a mapping pending until all subclasses are defined. " + "Call the sqlalchemy.orm.configure_mappers() function after " + "all subclasses have been defined to " + "complete the mapping of this class." + % orm_exc._safe_cls_name(cls), + ) + + +class DeferredReflection: + """A helper class for construction of mappings based on + a deferred reflection step. + + Normally, declarative can be used with reflection by + setting a :class:`_schema.Table` object using autoload_with=engine + as the ``__table__`` attribute on a declarative class. + The caveat is that the :class:`_schema.Table` must be fully + reflected, or at the very least have a primary key column, + at the point at which a normal declarative mapping is + constructed, meaning the :class:`_engine.Engine` must be available + at class declaration time. + + The :class:`.DeferredReflection` mixin moves the construction + of mappers to be at a later point, after a specific + method is called which first reflects all :class:`_schema.Table` + objects created so far. Classes can define it as such:: + + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.declarative import DeferredReflection + + Base = declarative_base() + + + class MyClass(DeferredReflection, Base): + __tablename__ = "mytable" + + Above, ``MyClass`` is not yet mapped. After a series of + classes have been defined in the above fashion, all tables + can be reflected and mappings created using + :meth:`.prepare`:: + + engine = create_engine("someengine://...") + DeferredReflection.prepare(engine) + + The :class:`.DeferredReflection` mixin can be applied to individual + classes, used as the base for the declarative base itself, + or used in a custom abstract class. Using an abstract base + allows that only a subset of classes to be prepared for a + particular prepare step, which is necessary for applications + that use more than one engine. For example, if an application + has two engines, you might use two bases, and prepare each + separately, e.g.:: + + class ReflectedOne(DeferredReflection, Base): + __abstract__ = True + + + class ReflectedTwo(DeferredReflection, Base): + __abstract__ = True + + + class MyClass(ReflectedOne): + __tablename__ = "mytable" + + + class MyOtherClass(ReflectedOne): + __tablename__ = "myothertable" + + + class YetAnotherClass(ReflectedTwo): + __tablename__ = "yetanothertable" + + + # ... etc. + + Above, the class hierarchies for ``ReflectedOne`` and + ``ReflectedTwo`` can be configured separately:: + + ReflectedOne.prepare(engine_one) + ReflectedTwo.prepare(engine_two) + + .. seealso:: + + :ref:`orm_declarative_reflected_deferred_reflection` - in the + :ref:`orm_declarative_table_config_toplevel` section. + + """ + + @classmethod + def prepare( + cls, bind: Union[Engine, Connection], **reflect_kw: Any + ) -> None: + r"""Reflect all :class:`_schema.Table` objects for all current + :class:`.DeferredReflection` subclasses + + :param bind: :class:`_engine.Engine` or :class:`_engine.Connection` + instance + + ..versionchanged:: 2.0.16 a :class:`_engine.Connection` is also + accepted. + + :param \**reflect_kw: additional keyword arguments passed to + :meth:`_schema.MetaData.reflect`, such as + :paramref:`_schema.MetaData.reflect.views`. + + .. versionadded:: 2.0.16 + + """ + + to_map = _DeferredMapperConfig.classes_for_base(cls) + + metadata_to_table = collections.defaultdict(set) + + # first collect the primary __table__ for each class into a + # collection of metadata/schemaname -> table names + for thingy in to_map: + if thingy.local_table is not None: + metadata_to_table[ + (thingy.local_table.metadata, thingy.local_table.schema) + ].add(thingy.local_table.name) + + # then reflect all those tables into their metadatas + + if isinstance(bind, Connection): + conn = bind + ctx = contextlib.nullcontext(enter_result=conn) + elif isinstance(bind, Engine): + ctx = bind.connect() + else: + raise sa_exc.ArgumentError( + f"Expected Engine or Connection, got {bind!r}" + ) + + with ctx as conn: + for (metadata, schema), table_names in metadata_to_table.items(): + metadata.reflect( + conn, + only=table_names, + schema=schema, + extend_existing=True, + autoload_replace=False, + **reflect_kw, + ) + + metadata_to_table.clear() + + # .map() each class, then go through relationships and look + # for secondary + for thingy in to_map: + thingy.map() + + mapper = thingy.cls.__mapper__ + metadata = mapper.class_.metadata + + for rel in mapper._props.values(): + if ( + isinstance(rel, relationships.RelationshipProperty) + and rel._init_args.secondary._is_populated() + ): + secondary_arg = rel._init_args.secondary + + if isinstance(secondary_arg.argument, Table): + secondary_table = secondary_arg.argument + metadata_to_table[ + ( + secondary_table.metadata, + secondary_table.schema, + ) + ].add(secondary_table.name) + elif isinstance(secondary_arg.argument, str): + _, resolve_arg = _resolver(rel.parent.class_, rel) + + resolver = resolve_arg( + secondary_arg.argument, True + ) + metadata_to_table[ + (metadata, thingy.local_table.schema) + ].add(secondary_arg.argument) + + resolver._resolvers += ( + cls._sa_deferred_table_resolver(metadata), + ) + + secondary_arg.argument = resolver() + + for (metadata, schema), table_names in metadata_to_table.items(): + metadata.reflect( + conn, + only=table_names, + schema=schema, + extend_existing=True, + autoload_replace=False, + ) + + @classmethod + def _sa_deferred_table_resolver( + cls, metadata: MetaData + ) -> Callable[[str], Table]: + def _resolve(key: str) -> Table: + # reflection has already occurred so this Table would have + # its contents already + return Table(key, metadata) + + return _resolve + + _sa_decl_prepare = True + + @classmethod + def _sa_raise_deferred_config(cls): + raise orm_exc.UnmappedClassError( + cls, + msg="Class %s is a subclass of DeferredReflection. " + "Mappings are not produced until the .prepare() " + "method is called on the class hierarchy." + % orm_exc._safe_cls_name(cls), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/horizontal_shard.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/horizontal_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea3304eb308940b6077f5bc802a5bad197076fa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/horizontal_shard.py @@ -0,0 +1,478 @@ +# ext/horizontal_shard.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Horizontal sharding support. + +Defines a rudimental 'horizontal sharding' system which allows a Session to +distribute queries and persistence operations across multiple databases. + +For a usage example, see the :ref:`examples_sharding` example included in +the source distribution. + +.. deepalchemy:: The horizontal sharding extension is an advanced feature, + involving a complex statement -> database interaction as well as + use of semi-public APIs for non-trivial cases. Simpler approaches to + refering to multiple database "shards", most commonly using a distinct + :class:`_orm.Session` per "shard", should always be considered first + before using this more complex and less-production-tested system. + + + +""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .. import event +from .. import exc +from .. import inspect +from .. import util +from ..orm import PassiveFlag +from ..orm._typing import OrmExecuteOptionsParameter +from ..orm.interfaces import ORMOption +from ..orm.mapper import Mapper +from ..orm.query import Query +from ..orm.session import _BindArguments +from ..orm.session import _PKIdentityArgument +from ..orm.session import Session +from ..util.typing import Protocol +from ..util.typing import Self + +if TYPE_CHECKING: + from ..engine.base import Connection + from ..engine.base import Engine + from ..engine.base import OptionEngine + from ..engine.result import IteratorResult + from ..engine.result import Result + from ..orm import LoaderCallableStatus + from ..orm._typing import _O + from ..orm.bulk_persistence import BulkUDCompileState + from ..orm.context import QueryContext + from ..orm.session import _EntityBindKey + from ..orm.session import _SessionBind + from ..orm.session import ORMExecuteState + from ..orm.state import InstanceState + from ..sql import Executable + from ..sql._typing import _TP + from ..sql.elements import ClauseElement + +__all__ = ["ShardedSession", "ShardedQuery"] + +_T = TypeVar("_T", bound=Any) + + +ShardIdentifier = str + + +class ShardChooser(Protocol): + def __call__( + self, + mapper: Optional[Mapper[_T]], + instance: Any, + clause: Optional[ClauseElement], + ) -> Any: ... + + +class IdentityChooser(Protocol): + def __call__( + self, + mapper: Mapper[_T], + primary_key: _PKIdentityArgument, + *, + lazy_loaded_from: Optional[InstanceState[Any]], + execution_options: OrmExecuteOptionsParameter, + bind_arguments: _BindArguments, + **kw: Any, + ) -> Any: ... + + +class ShardedQuery(Query[_T]): + """Query class used with :class:`.ShardedSession`. + + .. legacy:: The :class:`.ShardedQuery` is a subclass of the legacy + :class:`.Query` class. The :class:`.ShardedSession` now supports + 2.0 style execution via the :meth:`.ShardedSession.execute` method. + + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + assert isinstance(self.session, ShardedSession) + + self.identity_chooser = self.session.identity_chooser + self.execute_chooser = self.session.execute_chooser + self._shard_id = None + + def set_shard(self, shard_id: ShardIdentifier) -> Self: + """Return a new query, limited to a single shard ID. + + All subsequent operations with the returned query will + be against the single shard regardless of other state. + + The shard_id can be passed for a 2.0 style execution to the + bind_arguments dictionary of :meth:`.Session.execute`:: + + results = session.execute(stmt, bind_arguments={"shard_id": "my_shard"}) + + """ # noqa: E501 + return self.execution_options(_sa_shard_id=shard_id) + + +class ShardedSession(Session): + shard_chooser: ShardChooser + identity_chooser: IdentityChooser + execute_chooser: Callable[[ORMExecuteState], Iterable[Any]] + + def __init__( + self, + shard_chooser: ShardChooser, + identity_chooser: Optional[IdentityChooser] = None, + execute_chooser: Optional[ + Callable[[ORMExecuteState], Iterable[Any]] + ] = None, + shards: Optional[Dict[str, Any]] = None, + query_cls: Type[Query[_T]] = ShardedQuery, + *, + id_chooser: Optional[ + Callable[[Query[_T], Iterable[_T]], Iterable[Any]] + ] = None, + query_chooser: Optional[Callable[[Executable], Iterable[Any]]] = None, + **kwargs: Any, + ) -> None: + """Construct a ShardedSession. + + :param shard_chooser: A callable which, passed a Mapper, a mapped + instance, and possibly a SQL clause, returns a shard ID. This id + may be based off of the attributes present within the object, or on + some round-robin scheme. If the scheme is based on a selection, it + should set whatever state on the instance to mark it in the future as + participating in that shard. + + :param identity_chooser: A callable, passed a Mapper and primary key + argument, which should return a list of shard ids where this + primary key might reside. + + .. versionchanged:: 2.0 The ``identity_chooser`` parameter + supersedes the ``id_chooser`` parameter. + + :param execute_chooser: For a given :class:`.ORMExecuteState`, + returns the list of shard_ids + where the query should be issued. Results from all shards returned + will be combined together into a single listing. + + .. versionchanged:: 1.4 The ``execute_chooser`` parameter + supersedes the ``query_chooser`` parameter. + + :param shards: A dictionary of string shard names + to :class:`~sqlalchemy.engine.Engine` objects. + + """ + super().__init__(query_cls=query_cls, **kwargs) + + event.listen( + self, "do_orm_execute", execute_and_instances, retval=True + ) + self.shard_chooser = shard_chooser + + if id_chooser: + _id_chooser = id_chooser + util.warn_deprecated( + "The ``id_chooser`` parameter is deprecated; " + "please use ``identity_chooser``.", + "2.0", + ) + + def _legacy_identity_chooser( + mapper: Mapper[_T], + primary_key: _PKIdentityArgument, + *, + lazy_loaded_from: Optional[InstanceState[Any]], + execution_options: OrmExecuteOptionsParameter, + bind_arguments: _BindArguments, + **kw: Any, + ) -> Any: + q = self.query(mapper) + if lazy_loaded_from: + q = q._set_lazyload_from(lazy_loaded_from) + return _id_chooser(q, primary_key) + + self.identity_chooser = _legacy_identity_chooser + elif identity_chooser: + self.identity_chooser = identity_chooser + else: + raise exc.ArgumentError( + "identity_chooser or id_chooser is required" + ) + + if query_chooser: + _query_chooser = query_chooser + util.warn_deprecated( + "The ``query_chooser`` parameter is deprecated; " + "please use ``execute_chooser``.", + "1.4", + ) + if execute_chooser: + raise exc.ArgumentError( + "Can't pass query_chooser and execute_chooser " + "at the same time." + ) + + def _default_execute_chooser( + orm_context: ORMExecuteState, + ) -> Iterable[Any]: + return _query_chooser(orm_context.statement) + + if execute_chooser is None: + execute_chooser = _default_execute_chooser + + if execute_chooser is None: + raise exc.ArgumentError( + "execute_chooser or query_chooser is required" + ) + self.execute_chooser = execute_chooser + self.__shards: Dict[ShardIdentifier, _SessionBind] = {} + if shards is not None: + for k in shards: + self.bind_shard(k, shards[k]) + + def _identity_lookup( + self, + mapper: Mapper[_O], + primary_key_identity: Union[Any, Tuple[Any, ...]], + identity_token: Optional[Any] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + lazy_loaded_from: Optional[InstanceState[Any]] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Union[Optional[_O], LoaderCallableStatus]: + """override the default :meth:`.Session._identity_lookup` method so + that we search for a given non-token primary key identity across all + possible identity tokens (e.g. shard ids). + + .. versionchanged:: 1.4 Moved :meth:`.Session._identity_lookup` from + the :class:`_query.Query` object to the :class:`.Session`. + + """ + + if identity_token is not None: + obj = super()._identity_lookup( + mapper, + primary_key_identity, + identity_token=identity_token, + **kw, + ) + + return obj + else: + for shard_id in self.identity_chooser( + mapper, + primary_key_identity, + lazy_loaded_from=lazy_loaded_from, + execution_options=execution_options, + bind_arguments=dict(bind_arguments) if bind_arguments else {}, + ): + obj2 = super()._identity_lookup( + mapper, + primary_key_identity, + identity_token=shard_id, + lazy_loaded_from=lazy_loaded_from, + **kw, + ) + if obj2 is not None: + return obj2 + + return None + + def _choose_shard_and_assign( + self, + mapper: Optional[_EntityBindKey[_O]], + instance: Any, + **kw: Any, + ) -> Any: + if instance is not None: + state = inspect(instance) + if state.key: + token = state.key[2] + assert token is not None + return token + elif state.identity_token: + return state.identity_token + + assert isinstance(mapper, Mapper) + shard_id = self.shard_chooser(mapper, instance, **kw) + if instance is not None: + state.identity_token = shard_id + return shard_id + + def connection_callable( + self, + mapper: Optional[Mapper[_T]] = None, + instance: Optional[Any] = None, + shard_id: Optional[ShardIdentifier] = None, + **kw: Any, + ) -> Connection: + """Provide a :class:`_engine.Connection` to use in the unit of work + flush process. + + """ + + if shard_id is None: + shard_id = self._choose_shard_and_assign(mapper, instance) + + if self.in_transaction(): + trans = self.get_transaction() + assert trans is not None + return trans.connection(mapper, shard_id=shard_id) + else: + bind = self.get_bind( + mapper=mapper, shard_id=shard_id, instance=instance + ) + + if isinstance(bind, Engine): + return bind.connect(**kw) + else: + assert isinstance(bind, Connection) + return bind + + def get_bind( + self, + mapper: Optional[_EntityBindKey[_O]] = None, + *, + shard_id: Optional[ShardIdentifier] = None, + instance: Optional[Any] = None, + clause: Optional[ClauseElement] = None, + **kw: Any, + ) -> _SessionBind: + if shard_id is None: + shard_id = self._choose_shard_and_assign( + mapper, instance=instance, clause=clause + ) + assert shard_id is not None + return self.__shards[shard_id] + + def bind_shard( + self, shard_id: ShardIdentifier, bind: Union[Engine, OptionEngine] + ) -> None: + self.__shards[shard_id] = bind + + +class set_shard_id(ORMOption): + """a loader option for statements to apply a specific shard id to the + primary query as well as for additional relationship and column + loaders. + + The :class:`_horizontal.set_shard_id` option may be applied using + the :meth:`_sql.Executable.options` method of any executable statement:: + + stmt = ( + select(MyObject) + .where(MyObject.name == "some name") + .options(set_shard_id("shard1")) + ) + + Above, the statement when invoked will limit to the "shard1" shard + identifier for the primary query as well as for all relationship and + column loading strategies, including eager loaders such as + :func:`_orm.selectinload`, deferred column loaders like :func:`_orm.defer`, + and the lazy relationship loader :func:`_orm.lazyload`. + + In this way, the :class:`_horizontal.set_shard_id` option has much wider + scope than using the "shard_id" argument within the + :paramref:`_orm.Session.execute.bind_arguments` dictionary. + + + .. versionadded:: 2.0.0 + + """ + + __slots__ = ("shard_id", "propagate_to_loaders") + + def __init__( + self, shard_id: ShardIdentifier, propagate_to_loaders: bool = True + ): + """Construct a :class:`_horizontal.set_shard_id` option. + + :param shard_id: shard identifier + :param propagate_to_loaders: if left at its default of ``True``, the + shard option will take place for lazy loaders such as + :func:`_orm.lazyload` and :func:`_orm.defer`; if False, the option + will not be propagated to loaded objects. Note that :func:`_orm.defer` + always limits to the shard_id of the parent row in any case, so the + parameter only has a net effect on the behavior of the + :func:`_orm.lazyload` strategy. + + """ + self.shard_id = shard_id + self.propagate_to_loaders = propagate_to_loaders + + +def execute_and_instances( + orm_context: ORMExecuteState, +) -> Union[Result[_T], IteratorResult[_TP]]: + active_options: Union[ + None, + QueryContext.default_load_options, + Type[QueryContext.default_load_options], + BulkUDCompileState.default_update_options, + Type[BulkUDCompileState.default_update_options], + ] + + if orm_context.is_select: + active_options = orm_context.load_options + + elif orm_context.is_update or orm_context.is_delete: + active_options = orm_context.update_delete_options + else: + active_options = None + + session = orm_context.session + assert isinstance(session, ShardedSession) + + def iter_for_shard( + shard_id: ShardIdentifier, + ) -> Union[Result[_T], IteratorResult[_TP]]: + bind_arguments = dict(orm_context.bind_arguments) + bind_arguments["shard_id"] = shard_id + + orm_context.update_execution_options(identity_token=shard_id) + return orm_context.invoke_statement(bind_arguments=bind_arguments) + + for orm_opt in orm_context._non_compile_orm_options: + # TODO: if we had an ORMOption that gets applied at ORM statement + # execution time, that would allow this to be more generalized. + # for now just iterate and look for our options + if isinstance(orm_opt, set_shard_id): + shard_id = orm_opt.shard_id + break + else: + if active_options and active_options._identity_token is not None: + shard_id = active_options._identity_token + elif "_sa_shard_id" in orm_context.execution_options: + shard_id = orm_context.execution_options["_sa_shard_id"] + elif "shard_id" in orm_context.bind_arguments: + shard_id = orm_context.bind_arguments["shard_id"] + else: + shard_id = None + + if shard_id is not None: + return iter_for_shard(shard_id) + else: + partial = [] + for shard_id in session.execute_chooser(orm_context): + result_ = iter_for_shard(shard_id) + partial.append(result_) + return partial[0].merge(*partial[1:]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/hybrid.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c46e7c5f597d65d7cf78d623826c60f24759f7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/hybrid.py @@ -0,0 +1,1533 @@ +# ext/hybrid.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +r"""Define attributes on ORM-mapped classes that have "hybrid" behavior. + +"hybrid" means the attribute has distinct behaviors defined at the +class level and at the instance level. + +The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of +method decorator and has minimal dependencies on the rest of SQLAlchemy. +Its basic theory of operation can work with any descriptor-based expression +system. + +Consider a mapping ``Interval``, representing integer ``start`` and ``end`` +values. We can define higher level functions on mapped classes that produce SQL +expressions at the class level, and Python expression evaluation at the +instance level. Below, each function decorated with :class:`.hybrid_method` or +:class:`.hybrid_property` may receive ``self`` as an instance of the class, or +may receive the class directly, depending on context:: + + from __future__ import annotations + + from sqlalchemy.ext.hybrid import hybrid_method + from sqlalchemy.ext.hybrid import hybrid_property + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + + + class Base(DeclarativeBase): + pass + + + class Interval(Base): + __tablename__ = "interval" + + id: Mapped[int] = mapped_column(primary_key=True) + start: Mapped[int] + end: Mapped[int] + + def __init__(self, start: int, end: int): + self.start = start + self.end = end + + @hybrid_property + def length(self) -> int: + return self.end - self.start + + @hybrid_method + def contains(self, point: int) -> bool: + return (self.start <= point) & (point <= self.end) + + @hybrid_method + def intersects(self, other: Interval) -> bool: + return self.contains(other.start) | self.contains(other.end) + +Above, the ``length`` property returns the difference between the +``end`` and ``start`` attributes. With an instance of ``Interval``, +this subtraction occurs in Python, using normal Python descriptor +mechanics:: + + >>> i1 = Interval(5, 10) + >>> i1.length + 5 + +When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` +descriptor evaluates the function body given the ``Interval`` class as +the argument, which when evaluated with SQLAlchemy expression mechanics +returns a new SQL expression: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> print(select(Interval.length)) + {printsql}SELECT interval."end" - interval.start AS length + FROM interval{stop} + + + >>> print(select(Interval).filter(Interval.length > 10)) + {printsql}SELECT interval.id, interval.start, interval."end" + FROM interval + WHERE interval."end" - interval.start > :param_1 + +Filtering methods such as :meth:`.Select.filter_by` are supported +with hybrid attributes as well: + +.. sourcecode:: pycon+sql + + >>> print(select(Interval).filter_by(length=5)) + {printsql}SELECT interval.id, interval.start, interval."end" + FROM interval + WHERE interval."end" - interval.start = :param_1 + +The ``Interval`` class example also illustrates two methods, +``contains()`` and ``intersects()``, decorated with +:class:`.hybrid_method`. This decorator applies the same idea to +methods that :class:`.hybrid_property` applies to attributes. The +methods return boolean values, and take advantage of the Python ``|`` +and ``&`` bitwise operators to produce equivalent instance-level and +SQL expression-level boolean behavior: + +.. sourcecode:: pycon+sql + + >>> i1.contains(6) + True + >>> i1.contains(15) + False + >>> i1.intersects(Interval(7, 18)) + True + >>> i1.intersects(Interval(25, 29)) + False + + >>> print(select(Interval).filter(Interval.contains(15))) + {printsql}SELECT interval.id, interval.start, interval."end" + FROM interval + WHERE interval.start <= :start_1 AND interval."end" > :end_1{stop} + + >>> ia = aliased(Interval) + >>> print(select(Interval, ia).filter(Interval.intersects(ia))) + {printsql}SELECT interval.id, interval.start, + interval."end", interval_1.id AS interval_1_id, + interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end + FROM interval, interval AS interval_1 + WHERE interval.start <= interval_1.start + AND interval."end" > interval_1.start + OR interval.start <= interval_1."end" + AND interval."end" > interval_1."end"{stop} + +.. _hybrid_distinct_expression: + +Defining Expression Behavior Distinct from Attribute Behavior +-------------------------------------------------------------- + +In the previous section, our usage of the ``&`` and ``|`` bitwise operators +within the ``Interval.contains`` and ``Interval.intersects`` methods was +fortunate, considering our functions operated on two boolean values to return a +new one. In many cases, the construction of an in-Python function and a +SQLAlchemy SQL expression have enough differences that two separate Python +expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorator +defines a **modifier** :meth:`.hybrid_property.expression` for this purpose. As an +example we'll define the radius of the interval, which requires the usage of +the absolute value function:: + + from sqlalchemy import ColumnElement + from sqlalchemy import Float + from sqlalchemy import func + from sqlalchemy import type_coerce + + + class Interval(Base): + # ... + + @hybrid_property + def radius(self) -> float: + return abs(self.length) / 2 + + @radius.inplace.expression + @classmethod + def _radius_expression(cls) -> ColumnElement[float]: + return type_coerce(func.abs(cls.length) / 2, Float) + +In the above example, the :class:`.hybrid_property` first assigned to the +name ``Interval.radius`` is amended by a subsequent method called +``Interval._radius_expression``, using the decorator +``@radius.inplace.expression``, which chains together two modifiers +:attr:`.hybrid_property.inplace` and :attr:`.hybrid_property.expression`. +The use of :attr:`.hybrid_property.inplace` indicates that the +:meth:`.hybrid_property.expression` modifier should mutate the +existing hybrid object at ``Interval.radius`` in place, without creating a +new object. Notes on this modifier and its +rationale are discussed in the next section :ref:`hybrid_pep484_naming`. +The use of ``@classmethod`` is optional, and is strictly to give typing +tools a hint that ``cls`` in this case is expected to be the ``Interval`` +class, and not an instance of ``Interval``. + +.. note:: :attr:`.hybrid_property.inplace` as well as the use of ``@classmethod`` + for proper typing support are available as of SQLAlchemy 2.0.4, and will + not work in earlier versions. + +With ``Interval.radius`` now including an expression element, the SQL +function ``ABS()`` is returned when accessing ``Interval.radius`` +at the class level: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> print(select(Interval).filter(Interval.radius > 5)) + {printsql}SELECT interval.id, interval.start, interval."end" + FROM interval + WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 + + +.. _hybrid_pep484_naming: + +Using ``inplace`` to create pep-484 compliant hybrid properties +--------------------------------------------------------------- + +In the previous section, a :class:`.hybrid_property` decorator is illustrated +which includes two separate method-level functions being decorated, both +to produce a single object attribute referenced as ``Interval.radius``. +There are actually several different modifiers we can use for +:class:`.hybrid_property` including :meth:`.hybrid_property.expression`, +:meth:`.hybrid_property.setter` and :meth:`.hybrid_property.update_expression`. + +SQLAlchemy's :class:`.hybrid_property` decorator intends that adding on these +methods may be done in the identical manner as Python's built-in +``@property`` decorator, where idiomatic use is to continue to redefine the +attribute repeatedly, using the **same attribute name** each time, as in the +example below that illustrates the use of :meth:`.hybrid_property.setter` and +:meth:`.hybrid_property.expression` for the ``Interval.radius`` descriptor:: + + # correct use, however is not accepted by pep-484 tooling + + + class Interval(Base): + # ... + + @hybrid_property + def radius(self): + return abs(self.length) / 2 + + @radius.setter + def radius(self, value): + self.length = value * 2 + + @radius.expression + def radius(cls): + return type_coerce(func.abs(cls.length) / 2, Float) + +Above, there are three ``Interval.radius`` methods, but as each are decorated, +first by the :class:`.hybrid_property` decorator and then by the +``@radius`` name itself, the end effect is that ``Interval.radius`` is +a single attribute with three different functions contained within it. +This style of use is taken from `Python's documented use of @property +`_. +It is important to note that the way both ``@property`` as well as +:class:`.hybrid_property` work, a **copy of the descriptor is made each time**. +That is, each call to ``@radius.expression``, ``@radius.setter`` etc. +make a new object entirely. This allows the attribute to be re-defined in +subclasses without issue (see :ref:`hybrid_reuse_subclass` later in this +section for how this is used). + +However, the above approach is not compatible with typing tools such as +mypy and pyright. Python's own ``@property`` decorator does not have this +limitation only because +`these tools hardcode the behavior of @property +`_, meaning this syntax +is not available to SQLAlchemy under :pep:`484` compliance. + +In order to produce a reasonable syntax while remaining typing compliant, +the :attr:`.hybrid_property.inplace` decorator allows the same +decorator to be re-used with different method names, while still producing +a single decorator under one name:: + + # correct use which is also accepted by pep-484 tooling + + + class Interval(Base): + # ... + + @hybrid_property + def radius(self) -> float: + return abs(self.length) / 2 + + @radius.inplace.setter + def _radius_setter(self, value: float) -> None: + # for example only + self.length = value * 2 + + @radius.inplace.expression + @classmethod + def _radius_expression(cls) -> ColumnElement[float]: + return type_coerce(func.abs(cls.length) / 2, Float) + +Using :attr:`.hybrid_property.inplace` further qualifies the use of the +decorator that a new copy should not be made, thereby maintaining the +``Interval.radius`` name while allowing additional methods +``Interval._radius_setter`` and ``Interval._radius_expression`` to be +differently named. + + +.. versionadded:: 2.0.4 Added :attr:`.hybrid_property.inplace` to allow + less verbose construction of composite :class:`.hybrid_property` objects + while not having to use repeated method names. Additionally allowed the + use of ``@classmethod`` within :attr:`.hybrid_property.expression`, + :attr:`.hybrid_property.update_expression`, and + :attr:`.hybrid_property.comparator` to allow typing tools to identify + ``cls`` as a class and not an instance in the method signature. + + +Defining Setters +---------------- + +The :meth:`.hybrid_property.setter` modifier allows the construction of a +custom setter method, that can modify values on the object:: + + class Interval(Base): + # ... + + @hybrid_property + def length(self) -> int: + return self.end - self.start + + @length.inplace.setter + def _length_setter(self, value: int) -> None: + self.end = self.start + value + +The ``length(self, value)`` method is now called upon set:: + + >>> i1 = Interval(5, 10) + >>> i1.length + 5 + >>> i1.length = 12 + >>> i1.end + 17 + +.. _hybrid_bulk_update: + +Allowing Bulk ORM Update +------------------------ + +A hybrid can define a custom "UPDATE" handler for when using +ORM-enabled updates, allowing the hybrid to be used in the +SET clause of the update. + +Normally, when using a hybrid with :func:`_sql.update`, the SQL +expression is used as the column that's the target of the SET. If our +``Interval`` class had a hybrid ``start_point`` that linked to +``Interval.start``, this could be substituted directly:: + + from sqlalchemy import update + + stmt = update(Interval).values({Interval.start_point: 10}) + +However, when using a composite hybrid like ``Interval.length``, this +hybrid represents more than one column. We can set up a handler that will +accommodate a value passed in the VALUES expression which can affect +this, using the :meth:`.hybrid_property.update_expression` decorator. +A handler that works similarly to our setter would be:: + + from typing import List, Tuple, Any + + + class Interval(Base): + # ... + + @hybrid_property + def length(self) -> int: + return self.end - self.start + + @length.inplace.setter + def _length_setter(self, value: int) -> None: + self.end = self.start + value + + @length.inplace.update_expression + def _length_update_expression( + cls, value: Any + ) -> List[Tuple[Any, Any]]: + return [(cls.end, cls.start + value)] + +Above, if we use ``Interval.length`` in an UPDATE expression, we get +a hybrid SET expression: + +.. sourcecode:: pycon+sql + + + >>> from sqlalchemy import update + >>> print(update(Interval).values({Interval.length: 25})) + {printsql}UPDATE interval SET "end"=(interval.start + :start_1) + +This SET expression is accommodated by the ORM automatically. + +.. seealso:: + + :ref:`orm_expression_update_delete` - includes background on ORM-enabled + UPDATE statements + + +Working with Relationships +-------------------------- + +There's no essential difference when creating hybrids that work with +related objects as opposed to column-based data. The need for distinct +expressions tends to be greater. The two variants we'll illustrate +are the "join-dependent" hybrid, and the "correlated subquery" hybrid. + +Join-Dependent Relationship Hybrid +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the following declarative +mapping which relates a ``User`` to a ``SavingsAccount``:: + + from __future__ import annotations + + from decimal import Decimal + from typing import cast + from typing import List + from typing import Optional + + from sqlalchemy import ForeignKey + from sqlalchemy import Numeric + from sqlalchemy import String + from sqlalchemy import SQLColumnExpression + from sqlalchemy.ext.hybrid import hybrid_property + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship + + + class Base(DeclarativeBase): + pass + + + class SavingsAccount(Base): + __tablename__ = "account" + id: Mapped[int] = mapped_column(primary_key=True) + user_id: Mapped[int] = mapped_column(ForeignKey("user.id")) + balance: Mapped[Decimal] = mapped_column(Numeric(15, 5)) + + owner: Mapped[User] = relationship(back_populates="accounts") + + + class User(Base): + __tablename__ = "user" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(100)) + + accounts: Mapped[List[SavingsAccount]] = relationship( + back_populates="owner", lazy="selectin" + ) + + @hybrid_property + def balance(self) -> Optional[Decimal]: + if self.accounts: + return self.accounts[0].balance + else: + return None + + @balance.inplace.setter + def _balance_setter(self, value: Optional[Decimal]) -> None: + assert value is not None + + if not self.accounts: + account = SavingsAccount(owner=self) + else: + account = self.accounts[0] + account.balance = value + + @balance.inplace.expression + @classmethod + def _balance_expression(cls) -> SQLColumnExpression[Optional[Decimal]]: + return cast( + "SQLColumnExpression[Optional[Decimal]]", + SavingsAccount.balance, + ) + +The above hybrid property ``balance`` works with the first +``SavingsAccount`` entry in the list of accounts for this user. The +in-Python getter/setter methods can treat ``accounts`` as a Python +list available on ``self``. + +.. tip:: The ``User.balance`` getter in the above example accesses the + ``self.acccounts`` collection, which will normally be loaded via the + :func:`.selectinload` loader strategy configured on the ``User.balance`` + :func:`_orm.relationship`. The default loader strategy when not otherwise + stated on :func:`_orm.relationship` is :func:`.lazyload`, which emits SQL on + demand. When using asyncio, on-demand loaders such as :func:`.lazyload` are + not supported, so care should be taken to ensure the ``self.accounts`` + collection is accessible to this hybrid accessor when using asyncio. + +At the expression level, it's expected that the ``User`` class will +be used in an appropriate context such that an appropriate join to +``SavingsAccount`` will be present: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> print( + ... select(User, User.balance) + ... .join(User.accounts) + ... .filter(User.balance > 5000) + ... ) + {printsql}SELECT "user".id AS user_id, "user".name AS user_name, + account.balance AS account_balance + FROM "user" JOIN account ON "user".id = account.user_id + WHERE account.balance > :balance_1 + +Note however, that while the instance level accessors need to worry +about whether ``self.accounts`` is even present, this issue expresses +itself differently at the SQL expression level, where we basically +would use an outer join: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> from sqlalchemy import or_ + >>> print( + ... select(User, User.balance) + ... .outerjoin(User.accounts) + ... .filter(or_(User.balance < 5000, User.balance == None)) + ... ) + {printsql}SELECT "user".id AS user_id, "user".name AS user_name, + account.balance AS account_balance + FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id + WHERE account.balance < :balance_1 OR account.balance IS NULL + +Correlated Subquery Relationship Hybrid +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We can, of course, forego being dependent on the enclosing query's usage +of joins in favor of the correlated subquery, which can portably be packed +into a single column expression. A correlated subquery is more portable, but +often performs more poorly at the SQL level. Using the same technique +illustrated at :ref:`mapper_column_property_sql_expressions`, +we can adjust our ``SavingsAccount`` example to aggregate the balances for +*all* accounts, and use a correlated subquery for the column expression:: + + from __future__ import annotations + + from decimal import Decimal + from typing import List + + from sqlalchemy import ForeignKey + from sqlalchemy import func + from sqlalchemy import Numeric + from sqlalchemy import select + from sqlalchemy import SQLColumnExpression + from sqlalchemy import String + from sqlalchemy.ext.hybrid import hybrid_property + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship + + + class Base(DeclarativeBase): + pass + + + class SavingsAccount(Base): + __tablename__ = "account" + id: Mapped[int] = mapped_column(primary_key=True) + user_id: Mapped[int] = mapped_column(ForeignKey("user.id")) + balance: Mapped[Decimal] = mapped_column(Numeric(15, 5)) + + owner: Mapped[User] = relationship(back_populates="accounts") + + + class User(Base): + __tablename__ = "user" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(100)) + + accounts: Mapped[List[SavingsAccount]] = relationship( + back_populates="owner", lazy="selectin" + ) + + @hybrid_property + def balance(self) -> Decimal: + return sum( + (acc.balance for acc in self.accounts), start=Decimal("0") + ) + + @balance.inplace.expression + @classmethod + def _balance_expression(cls) -> SQLColumnExpression[Decimal]: + return ( + select(func.sum(SavingsAccount.balance)) + .where(SavingsAccount.user_id == cls.id) + .label("total_balance") + ) + +The above recipe will give us the ``balance`` column which renders +a correlated SELECT: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> print(select(User).filter(User.balance > 400)) + {printsql}SELECT "user".id, "user".name + FROM "user" + WHERE ( + SELECT sum(account.balance) AS sum_1 FROM account + WHERE account.user_id = "user".id + ) > :param_1 + + +.. _hybrid_custom_comparators: + +Building Custom Comparators +--------------------------- + +The hybrid property also includes a helper that allows construction of +custom comparators. A comparator object allows one to customize the +behavior of each SQLAlchemy expression operator individually. They +are useful when creating custom types that have some highly +idiosyncratic behavior on the SQL side. + +.. note:: The :meth:`.hybrid_property.comparator` decorator introduced + in this section **replaces** the use of the + :meth:`.hybrid_property.expression` decorator. + They cannot be used together. + +The example class below allows case-insensitive comparisons on the attribute +named ``word_insensitive``:: + + from __future__ import annotations + + from typing import Any + + from sqlalchemy import ColumnElement + from sqlalchemy import func + from sqlalchemy.ext.hybrid import Comparator + from sqlalchemy.ext.hybrid import hybrid_property + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + + + class Base(DeclarativeBase): + pass + + + class CaseInsensitiveComparator(Comparator[str]): + def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + return func.lower(self.__clause_element__()) == func.lower(other) + + + class SearchWord(Base): + __tablename__ = "searchword" + + id: Mapped[int] = mapped_column(primary_key=True) + word: Mapped[str] + + @hybrid_property + def word_insensitive(self) -> str: + return self.word.lower() + + @word_insensitive.inplace.comparator + @classmethod + def _word_insensitive_comparator(cls) -> CaseInsensitiveComparator: + return CaseInsensitiveComparator(cls.word) + +Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` +SQL function to both sides: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + >>> print(select(SearchWord).filter_by(word_insensitive="Trucks")) + {printsql}SELECT searchword.id, searchword.word + FROM searchword + WHERE lower(searchword.word) = lower(:lower_1) + + +The ``CaseInsensitiveComparator`` above implements part of the +:class:`.ColumnOperators` interface. A "coercion" operation like +lowercasing can be applied to all comparison operations (i.e. ``eq``, +``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: + + class CaseInsensitiveComparator(Comparator): + def operate(self, op, other, **kwargs): + return op( + func.lower(self.__clause_element__()), + func.lower(other), + **kwargs, + ) + +.. _hybrid_reuse_subclass: + +Reusing Hybrid Properties across Subclasses +------------------------------------------- + +A hybrid can be referred to from a superclass, to allow modifying +methods like :meth:`.hybrid_property.getter`, :meth:`.hybrid_property.setter` +to be used to redefine those methods on a subclass. This is similar to +how the standard Python ``@property`` object works:: + + class FirstNameOnly(Base): + # ... + + first_name: Mapped[str] + + @hybrid_property + def name(self) -> str: + return self.first_name + + @name.inplace.setter + def _name_setter(self, value: str) -> None: + self.first_name = value + + + class FirstNameLastName(FirstNameOnly): + # ... + + last_name: Mapped[str] + + # 'inplace' is not used here; calling getter creates a copy + # of FirstNameOnly.name that is local to FirstNameLastName + @FirstNameOnly.name.getter + def name(self) -> str: + return self.first_name + " " + self.last_name + + @name.inplace.setter + def _name_setter(self, value: str) -> None: + self.first_name, self.last_name = value.split(" ", 1) + +Above, the ``FirstNameLastName`` class refers to the hybrid from +``FirstNameOnly.name`` to repurpose its getter and setter for the subclass. + +When overriding :meth:`.hybrid_property.expression` and +:meth:`.hybrid_property.comparator` alone as the first reference to the +superclass, these names conflict with the same-named accessors on the class- +level :class:`.QueryableAttribute` object returned at the class level. To +override these methods when referring directly to the parent class descriptor, +add the special qualifier :attr:`.hybrid_property.overrides`, which will de- +reference the instrumented attribute back to the hybrid object:: + + class FirstNameLastName(FirstNameOnly): + # ... + + last_name: Mapped[str] + + @FirstNameOnly.name.overrides.expression + @classmethod + def name(cls): + return func.concat(cls.first_name, " ", cls.last_name) + +Hybrid Value Objects +-------------------- + +Note in our previous example, if we were to compare the ``word_insensitive`` +attribute of a ``SearchWord`` instance to a plain Python string, the plain +Python string would not be coerced to lower case - the +``CaseInsensitiveComparator`` we built, being returned by +``@word_insensitive.comparator``, only applies to the SQL side. + +A more comprehensive form of the custom comparator is to construct a *Hybrid +Value Object*. This technique applies the target value or expression to a value +object which is then returned by the accessor in all cases. The value object +allows control of all operations upon the value as well as how compared values +are treated, both on the SQL expression side as well as the Python value side. +Replacing the previous ``CaseInsensitiveComparator`` class with a new +``CaseInsensitiveWord`` class:: + + class CaseInsensitiveWord(Comparator): + "Hybrid value representing a lower case representation of a word." + + def __init__(self, word): + if isinstance(word, basestring): + self.word = word.lower() + elif isinstance(word, CaseInsensitiveWord): + self.word = word.word + else: + self.word = func.lower(word) + + def operate(self, op, other, **kwargs): + if not isinstance(other, CaseInsensitiveWord): + other = CaseInsensitiveWord(other) + return op(self.word, other.word, **kwargs) + + def __clause_element__(self): + return self.word + + def __str__(self): + return self.word + + key = "word" + "Label to apply to Query tuple results" + +Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may +be a SQL function, or may be a Python native. By overriding ``operate()`` and +``__clause_element__()`` to work in terms of ``self.word``, all comparison +operations will work against the "converted" form of ``word``, whether it be +SQL side or Python side. Our ``SearchWord`` class can now deliver the +``CaseInsensitiveWord`` object unconditionally from a single hybrid call:: + + class SearchWord(Base): + __tablename__ = "searchword" + id: Mapped[int] = mapped_column(primary_key=True) + word: Mapped[str] + + @hybrid_property + def word_insensitive(self) -> CaseInsensitiveWord: + return CaseInsensitiveWord(self.word) + +The ``word_insensitive`` attribute now has case-insensitive comparison behavior +universally, including SQL expression vs. Python expression (note the Python +value is converted to lower case on the Python side here): + +.. sourcecode:: pycon+sql + + >>> print(select(SearchWord).filter_by(word_insensitive="Trucks")) + {printsql}SELECT searchword.id AS searchword_id, searchword.word AS searchword_word + FROM searchword + WHERE lower(searchword.word) = :lower_1 + +SQL expression versus SQL expression: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.orm import aliased + >>> sw1 = aliased(SearchWord) + >>> sw2 = aliased(SearchWord) + >>> print( + ... select(sw1.word_insensitive, sw2.word_insensitive).filter( + ... sw1.word_insensitive > sw2.word_insensitive + ... ) + ... ) + {printsql}SELECT lower(searchword_1.word) AS lower_1, + lower(searchword_2.word) AS lower_2 + FROM searchword AS searchword_1, searchword AS searchword_2 + WHERE lower(searchword_1.word) > lower(searchword_2.word) + +Python only expression:: + + >>> ws1 = SearchWord(word="SomeWord") + >>> ws1.word_insensitive == "sOmEwOrD" + True + >>> ws1.word_insensitive == "XOmEwOrX" + False + >>> print(ws1.word_insensitive) + someword + +The Hybrid Value pattern is very useful for any kind of value that may have +multiple representations, such as timestamps, time deltas, units of +measurement, currencies and encrypted passwords. + +.. seealso:: + + `Hybrids and Value Agnostic Types + `_ + - on the techspot.zzzeek.org blog + + `Value Agnostic Types, Part II + `_ - + on the techspot.zzzeek.org blog + + +""" # noqa + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import cast +from typing import Generic +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .. import util +from ..orm import attributes +from ..orm import InspectionAttrExtensionType +from ..orm import interfaces +from ..orm import ORMDescriptor +from ..orm.attributes import QueryableAttribute +from ..sql import roles +from ..sql._typing import is_has_clause_element +from ..sql.elements import ColumnElement +from ..sql.elements import SQLCoreOperations +from ..util.typing import Concatenate +from ..util.typing import Literal +from ..util.typing import ParamSpec +from ..util.typing import Protocol +from ..util.typing import Self + +if TYPE_CHECKING: + from ..orm.interfaces import MapperProperty + from ..orm.util import AliasedInsp + from ..sql import SQLColumnExpression + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _DMLColumnArgument + from ..sql._typing import _HasClauseElement + from ..sql._typing import _InfoType + from ..sql.operators import OperatorType + +_P = ParamSpec("_P") +_R = TypeVar("_R") +_T = TypeVar("_T", bound=Any) +_TE = TypeVar("_TE", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) +_T_con = TypeVar("_T_con", bound=Any, contravariant=True) + + +class HybridExtensionType(InspectionAttrExtensionType): + HYBRID_METHOD = "HYBRID_METHOD" + """Symbol indicating an :class:`InspectionAttr` that's + of type :class:`.hybrid_method`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attribute. + + .. seealso:: + + :attr:`_orm.Mapper.all_orm_attributes` + + """ + + HYBRID_PROPERTY = "HYBRID_PROPERTY" + """Symbol indicating an :class:`InspectionAttr` that's + of type :class:`.hybrid_method`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attribute. + + .. seealso:: + + :attr:`_orm.Mapper.all_orm_attributes` + + """ + + +class _HybridGetterType(Protocol[_T_co]): + def __call__(s, self: Any) -> _T_co: ... + + +class _HybridSetterType(Protocol[_T_con]): + def __call__(s, self: Any, value: _T_con) -> None: ... + + +class _HybridUpdaterType(Protocol[_T_con]): + def __call__( + s, + cls: Any, + value: Union[_T_con, _ColumnExpressionArgument[_T_con]], + ) -> List[Tuple[_DMLColumnArgument, Any]]: ... + + +class _HybridDeleterType(Protocol[_T_co]): + def __call__(s, self: Any) -> None: ... + + +class _HybridExprCallableType(Protocol[_T_co]): + def __call__( + s, cls: Any + ) -> Union[_HasClauseElement[_T_co], SQLColumnExpression[_T_co]]: ... + + +class _HybridComparatorCallableType(Protocol[_T]): + def __call__(self, cls: Any) -> Comparator[_T]: ... + + +class _HybridClassLevelAccessor(QueryableAttribute[_T]): + """Describe the object returned by a hybrid_property() when + called as a class-level descriptor. + + """ + + if TYPE_CHECKING: + + def getter( + self, fget: _HybridGetterType[_T] + ) -> hybrid_property[_T]: ... + + def setter( + self, fset: _HybridSetterType[_T] + ) -> hybrid_property[_T]: ... + + def deleter( + self, fdel: _HybridDeleterType[_T] + ) -> hybrid_property[_T]: ... + + @property + def overrides(self) -> hybrid_property[_T]: ... + + def update_expression( + self, meth: _HybridUpdaterType[_T] + ) -> hybrid_property[_T]: ... + + +class hybrid_method(interfaces.InspectionAttrInfo, Generic[_P, _R]): + """A decorator which allows definition of a Python object method with both + instance-level and class-level behavior. + + """ + + is_attribute = True + extension_type = HybridExtensionType.HYBRID_METHOD + + def __init__( + self, + func: Callable[Concatenate[Any, _P], _R], + expr: Optional[ + Callable[Concatenate[Any, _P], SQLCoreOperations[_R]] + ] = None, + ): + """Create a new :class:`.hybrid_method`. + + Usage is typically via decorator:: + + from sqlalchemy.ext.hybrid import hybrid_method + + + class SomeClass: + @hybrid_method + def value(self, x, y): + return self._value + x + y + + @value.expression + @classmethod + def value(cls, x, y): + return func.some_function(cls._value, x, y) + + """ + self.func = func + if expr is not None: + self.expression(expr) + else: + self.expression(func) # type: ignore + + @property + def inplace(self) -> Self: + """Return the inplace mutator for this :class:`.hybrid_method`. + + The :class:`.hybrid_method` class already performs "in place" mutation + when the :meth:`.hybrid_method.expression` decorator is called, + so this attribute returns Self. + + .. versionadded:: 2.0.4 + + .. seealso:: + + :ref:`hybrid_pep484_naming` + + """ + return self + + @overload + def __get__( + self, instance: Literal[None], owner: Type[object] + ) -> Callable[_P, SQLCoreOperations[_R]]: ... + + @overload + def __get__( + self, instance: object, owner: Type[object] + ) -> Callable[_P, _R]: ... + + def __get__( + self, instance: Optional[object], owner: Type[object] + ) -> Union[Callable[_P, _R], Callable[_P, SQLCoreOperations[_R]]]: + if instance is None: + return self.expr.__get__(owner, owner) # type: ignore + else: + return self.func.__get__(instance, owner) # type: ignore + + def expression( + self, expr: Callable[Concatenate[Any, _P], SQLCoreOperations[_R]] + ) -> hybrid_method[_P, _R]: + """Provide a modifying decorator that defines a + SQL-expression producing method.""" + + self.expr = expr + if not self.expr.__doc__: + self.expr.__doc__ = self.func.__doc__ + return self + + +def _unwrap_classmethod(meth: _T) -> _T: + if isinstance(meth, classmethod): + return meth.__func__ # type: ignore + else: + return meth + + +class hybrid_property(interfaces.InspectionAttrInfo, ORMDescriptor[_T]): + """A decorator which allows definition of a Python descriptor with both + instance-level and class-level behavior. + + """ + + is_attribute = True + extension_type = HybridExtensionType.HYBRID_PROPERTY + + __name__: str + + def __init__( + self, + fget: _HybridGetterType[_T], + fset: Optional[_HybridSetterType[_T]] = None, + fdel: Optional[_HybridDeleterType[_T]] = None, + expr: Optional[_HybridExprCallableType[_T]] = None, + custom_comparator: Optional[Comparator[_T]] = None, + update_expr: Optional[_HybridUpdaterType[_T]] = None, + ): + """Create a new :class:`.hybrid_property`. + + Usage is typically via decorator:: + + from sqlalchemy.ext.hybrid import hybrid_property + + + class SomeClass: + @hybrid_property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + """ + self.fget = fget + self.fset = fset + self.fdel = fdel + self.expr = _unwrap_classmethod(expr) + self.custom_comparator = _unwrap_classmethod(custom_comparator) + self.update_expr = _unwrap_classmethod(update_expr) + util.update_wrapper(self, fget) # type: ignore[arg-type] + + @overload + def __get__(self, instance: Any, owner: Literal[None]) -> Self: ... + + @overload + def __get__( + self, instance: Literal[None], owner: Type[object] + ) -> _HybridClassLevelAccessor[_T]: ... + + @overload + def __get__(self, instance: object, owner: Type[object]) -> _T: ... + + def __get__( + self, instance: Optional[object], owner: Optional[Type[object]] + ) -> Union[hybrid_property[_T], _HybridClassLevelAccessor[_T], _T]: + if owner is None: + return self + elif instance is None: + return self._expr_comparator(owner) + else: + return self.fget(instance) + + def __set__(self, instance: object, value: Any) -> None: + if self.fset is None: + raise AttributeError("can't set attribute") + self.fset(instance, value) + + def __delete__(self, instance: object) -> None: + if self.fdel is None: + raise AttributeError("can't delete attribute") + self.fdel(instance) + + def _copy(self, **kw: Any) -> hybrid_property[_T]: + defaults = { + key: value + for key, value in self.__dict__.items() + if not key.startswith("_") + } + defaults.update(**kw) + return type(self)(**defaults) + + @property + def overrides(self) -> Self: + """Prefix for a method that is overriding an existing attribute. + + The :attr:`.hybrid_property.overrides` accessor just returns + this hybrid object, which when called at the class level from + a parent class, will de-reference the "instrumented attribute" + normally returned at this level, and allow modifying decorators + like :meth:`.hybrid_property.expression` and + :meth:`.hybrid_property.comparator` + to be used without conflicting with the same-named attributes + normally present on the :class:`.QueryableAttribute`:: + + class SuperClass: + # ... + + @hybrid_property + def foobar(self): + return self._foobar + + + class SubClass(SuperClass): + # ... + + @SuperClass.foobar.overrides.expression + def foobar(cls): + return func.subfoobar(self._foobar) + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`hybrid_reuse_subclass` + + """ + return self + + class _InPlace(Generic[_TE]): + """A builder helper for .hybrid_property. + + .. versionadded:: 2.0.4 + + """ + + __slots__ = ("attr",) + + def __init__(self, attr: hybrid_property[_TE]): + self.attr = attr + + def _set(self, **kw: Any) -> hybrid_property[_TE]: + for k, v in kw.items(): + setattr(self.attr, k, _unwrap_classmethod(v)) + return self.attr + + def getter(self, fget: _HybridGetterType[_TE]) -> hybrid_property[_TE]: + return self._set(fget=fget) + + def setter(self, fset: _HybridSetterType[_TE]) -> hybrid_property[_TE]: + return self._set(fset=fset) + + def deleter( + self, fdel: _HybridDeleterType[_TE] + ) -> hybrid_property[_TE]: + return self._set(fdel=fdel) + + def expression( + self, expr: _HybridExprCallableType[_TE] + ) -> hybrid_property[_TE]: + return self._set(expr=expr) + + def comparator( + self, comparator: _HybridComparatorCallableType[_TE] + ) -> hybrid_property[_TE]: + return self._set(custom_comparator=comparator) + + def update_expression( + self, meth: _HybridUpdaterType[_TE] + ) -> hybrid_property[_TE]: + return self._set(update_expr=meth) + + @property + def inplace(self) -> _InPlace[_T]: + """Return the inplace mutator for this :class:`.hybrid_property`. + + This is to allow in-place mutation of the hybrid, allowing the first + hybrid method of a certain name to be re-used in order to add + more methods without having to name those methods the same, e.g.:: + + class Interval(Base): + # ... + + @hybrid_property + def radius(self) -> float: + return abs(self.length) / 2 + + @radius.inplace.setter + def _radius_setter(self, value: float) -> None: + self.length = value * 2 + + @radius.inplace.expression + def _radius_expression(cls) -> ColumnElement[float]: + return type_coerce(func.abs(cls.length) / 2, Float) + + .. versionadded:: 2.0.4 + + .. seealso:: + + :ref:`hybrid_pep484_naming` + + """ + return hybrid_property._InPlace(self) + + def getter(self, fget: _HybridGetterType[_T]) -> hybrid_property[_T]: + """Provide a modifying decorator that defines a getter method. + + .. versionadded:: 1.2 + + """ + + return self._copy(fget=fget) + + def setter(self, fset: _HybridSetterType[_T]) -> hybrid_property[_T]: + """Provide a modifying decorator that defines a setter method.""" + + return self._copy(fset=fset) + + def deleter(self, fdel: _HybridDeleterType[_T]) -> hybrid_property[_T]: + """Provide a modifying decorator that defines a deletion method.""" + + return self._copy(fdel=fdel) + + def expression( + self, expr: _HybridExprCallableType[_T] + ) -> hybrid_property[_T]: + """Provide a modifying decorator that defines a SQL-expression + producing method. + + When a hybrid is invoked at the class level, the SQL expression given + here is wrapped inside of a specialized :class:`.QueryableAttribute`, + which is the same kind of object used by the ORM to represent other + mapped attributes. The reason for this is so that other class-level + attributes such as docstrings and a reference to the hybrid itself may + be maintained within the structure that's returned, without any + modifications to the original SQL expression passed in. + + .. note:: + + When referring to a hybrid property from an owning class (e.g. + ``SomeClass.some_hybrid``), an instance of + :class:`.QueryableAttribute` is returned, representing the + expression or comparator object as well as this hybrid object. + However, that object itself has accessors called ``expression`` and + ``comparator``; so when attempting to override these decorators on a + subclass, it may be necessary to qualify it using the + :attr:`.hybrid_property.overrides` modifier first. See that + modifier for details. + + .. seealso:: + + :ref:`hybrid_distinct_expression` + + """ + + return self._copy(expr=expr) + + def comparator( + self, comparator: _HybridComparatorCallableType[_T] + ) -> hybrid_property[_T]: + """Provide a modifying decorator that defines a custom + comparator producing method. + + The return value of the decorated method should be an instance of + :class:`~.hybrid.Comparator`. + + .. note:: The :meth:`.hybrid_property.comparator` decorator + **replaces** the use of the :meth:`.hybrid_property.expression` + decorator. They cannot be used together. + + When a hybrid is invoked at the class level, the + :class:`~.hybrid.Comparator` object given here is wrapped inside of a + specialized :class:`.QueryableAttribute`, which is the same kind of + object used by the ORM to represent other mapped attributes. The + reason for this is so that other class-level attributes such as + docstrings and a reference to the hybrid itself may be maintained + within the structure that's returned, without any modifications to the + original comparator object passed in. + + .. note:: + + When referring to a hybrid property from an owning class (e.g. + ``SomeClass.some_hybrid``), an instance of + :class:`.QueryableAttribute` is returned, representing the + expression or comparator object as this hybrid object. However, + that object itself has accessors called ``expression`` and + ``comparator``; so when attempting to override these decorators on a + subclass, it may be necessary to qualify it using the + :attr:`.hybrid_property.overrides` modifier first. See that + modifier for details. + + """ + return self._copy(custom_comparator=comparator) + + def update_expression( + self, meth: _HybridUpdaterType[_T] + ) -> hybrid_property[_T]: + """Provide a modifying decorator that defines an UPDATE tuple + producing method. + + The method accepts a single value, which is the value to be + rendered into the SET clause of an UPDATE statement. The method + should then process this value into individual column expressions + that fit into the ultimate SET clause, and return them as a + sequence of 2-tuples. Each tuple + contains a column expression as the key and a value to be rendered. + + E.g.:: + + class Person(Base): + # ... + + first_name = Column(String) + last_name = Column(String) + + @hybrid_property + def fullname(self): + return first_name + " " + last_name + + @fullname.update_expression + def fullname(cls, value): + fname, lname = value.split(" ", 1) + return [(cls.first_name, fname), (cls.last_name, lname)] + + .. versionadded:: 1.2 + + """ + return self._copy(update_expr=meth) + + @util.memoized_property + def _expr_comparator( + self, + ) -> Callable[[Any], _HybridClassLevelAccessor[_T]]: + if self.custom_comparator is not None: + return self._get_comparator(self.custom_comparator) + elif self.expr is not None: + return self._get_expr(self.expr) + else: + return self._get_expr(cast(_HybridExprCallableType[_T], self.fget)) + + def _get_expr( + self, expr: _HybridExprCallableType[_T] + ) -> Callable[[Any], _HybridClassLevelAccessor[_T]]: + def _expr(cls: Any) -> ExprComparator[_T]: + return ExprComparator(cls, expr(cls), self) + + util.update_wrapper(_expr, expr) + + return self._get_comparator(_expr) + + def _get_comparator( + self, comparator: Any + ) -> Callable[[Any], _HybridClassLevelAccessor[_T]]: + proxy_attr = attributes.create_proxied_attribute(self) + + def expr_comparator( + owner: Type[object], + ) -> _HybridClassLevelAccessor[_T]: + # because this is the descriptor protocol, we don't really know + # what our attribute name is. so search for it through the + # MRO. + for lookup in owner.__mro__: + if self.__name__ in lookup.__dict__: + if lookup.__dict__[self.__name__] is self: + name = self.__name__ + break + else: + name = attributes._UNKNOWN_ATTR_KEY # type: ignore[assignment] + + return cast( + "_HybridClassLevelAccessor[_T]", + proxy_attr( + owner, + name, + self, + comparator(owner), + doc=comparator.__doc__ or self.__doc__, + ), + ) + + return expr_comparator + + +class Comparator(interfaces.PropComparator[_T]): + """A helper class that allows easy construction of custom + :class:`~.orm.interfaces.PropComparator` + classes for usage with hybrids.""" + + def __init__( + self, expression: Union[_HasClauseElement[_T], SQLColumnExpression[_T]] + ): + self.expression = expression + + def __clause_element__(self) -> roles.ColumnsClauseRole: + expr = self.expression + if is_has_clause_element(expr): + ret_expr = expr.__clause_element__() + else: + if TYPE_CHECKING: + assert isinstance(expr, ColumnElement) + ret_expr = expr + + if TYPE_CHECKING: + # see test_hybrid->test_expression_isnt_clause_element + # that exercises the usual place this is caught if not + # true + assert isinstance(ret_expr, ColumnElement) + return ret_expr + + @util.non_memoized_property + def property(self) -> interfaces.MapperProperty[_T]: + raise NotImplementedError() + + def adapt_to_entity( + self, adapt_to_entity: AliasedInsp[Any] + ) -> Comparator[_T]: + # interesting.... + return self + + +class ExprComparator(Comparator[_T]): + def __init__( + self, + cls: Type[Any], + expression: Union[_HasClauseElement[_T], SQLColumnExpression[_T]], + hybrid: hybrid_property[_T], + ): + self.cls = cls + self.expression = expression + self.hybrid = hybrid + + def __getattr__(self, key: str) -> Any: + return getattr(self.expression, key) + + @util.ro_non_memoized_property + def info(self) -> _InfoType: + return self.hybrid.info + + def _bulk_update_tuples( + self, value: Any + ) -> Sequence[Tuple[_DMLColumnArgument, Any]]: + if isinstance(self.expression, attributes.QueryableAttribute): + return self.expression._bulk_update_tuples(value) + elif self.hybrid.update_expr is not None: + return self.hybrid.update_expr(self.cls, value) + else: + return [(self.expression, value)] + + @util.non_memoized_property + def property(self) -> MapperProperty[_T]: + # this accessor is not normally used, however is accessed by things + # like ORM synonyms if the hybrid is used in this context; the + # .property attribute is not necessarily accessible + return self.expression.property # type: ignore + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(self.expression, *other, **kwargs) + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(other, self.expression, **kwargs) # type: ignore diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/indexable.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/indexable.py new file mode 100644 index 0000000000000000000000000000000000000000..886069ce000b528aa7bb69fce72f177c42893c49 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/indexable.py @@ -0,0 +1,345 @@ +# ext/indexable.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +"""Define attributes on ORM-mapped classes that have "index" attributes for +columns with :class:`_types.Indexable` types. + +"index" means the attribute is associated with an element of an +:class:`_types.Indexable` column with the predefined index to access it. +The :class:`_types.Indexable` types include types such as +:class:`_types.ARRAY`, :class:`_types.JSON` and +:class:`_postgresql.HSTORE`. + + + +The :mod:`~sqlalchemy.ext.indexable` extension provides +:class:`_schema.Column`-like interface for any element of an +:class:`_types.Indexable` typed column. In simple cases, it can be +treated as a :class:`_schema.Column` - mapped attribute. + +Synopsis +======== + +Given ``Person`` as a model with a primary key and JSON data field. +While this field may have any number of elements encoded within it, +we would like to refer to the element called ``name`` individually +as a dedicated attribute which behaves like a standalone column:: + + from sqlalchemy import Column, JSON, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.indexable import index_property + + Base = declarative_base() + + + class Person(Base): + __tablename__ = "person" + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + name = index_property("data", "name") + +Above, the ``name`` attribute now behaves like a mapped column. We +can compose a new ``Person`` and set the value of ``name``:: + + >>> person = Person(name="Alchemist") + +The value is now accessible:: + + >>> person.name + 'Alchemist' + +Behind the scenes, the JSON field was initialized to a new blank dictionary +and the field was set:: + + >>> person.data + {'name': 'Alchemist'} + +The field is mutable in place:: + + >>> person.name = "Renamed" + >>> person.name + 'Renamed' + >>> person.data + {'name': 'Renamed'} + +When using :class:`.index_property`, the change that we make to the indexable +structure is also automatically tracked as history; we no longer need +to use :class:`~.mutable.MutableDict` in order to track this change +for the unit of work. + +Deletions work normally as well:: + + >>> del person.name + >>> person.data + {} + +Above, deletion of ``person.name`` deletes the value from the dictionary, +but not the dictionary itself. + +A missing key will produce ``AttributeError``:: + + >>> person = Person() + >>> person.name + AttributeError: 'name' + +Unless you set a default value:: + + >>> class Person(Base): + ... __tablename__ = "person" + ... + ... id = Column(Integer, primary_key=True) + ... data = Column(JSON) + ... + ... name = index_property("data", "name", default=None) # See default + + >>> person = Person() + >>> print(person.name) + None + + +The attributes are also accessible at the class level. +Below, we illustrate ``Person.name`` used to generate +an indexed SQL criteria:: + + >>> from sqlalchemy.orm import Session + >>> session = Session() + >>> query = session.query(Person).filter(Person.name == "Alchemist") + +The above query is equivalent to:: + + >>> query = session.query(Person).filter(Person.data["name"] == "Alchemist") + +Multiple :class:`.index_property` objects can be chained to produce +multiple levels of indexing:: + + from sqlalchemy import Column, JSON, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.indexable import index_property + + Base = declarative_base() + + + class Person(Base): + __tablename__ = "person" + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + birthday = index_property("data", "birthday") + year = index_property("birthday", "year") + month = index_property("birthday", "month") + day = index_property("birthday", "day") + +Above, a query such as:: + + q = session.query(Person).filter(Person.year == "1980") + +On a PostgreSQL backend, the above query will render as: + +.. sourcecode:: sql + + SELECT person.id, person.data + FROM person + WHERE person.data -> %(data_1)s -> %(param_1)s = %(param_2)s + +Default Values +============== + +:class:`.index_property` includes special behaviors for when the indexed +data structure does not exist, and a set operation is called: + +* For an :class:`.index_property` that is given an integer index value, + the default data structure will be a Python list of ``None`` values, + at least as long as the index value; the value is then set at its + place in the list. This means for an index value of zero, the list + will be initialized to ``[None]`` before setting the given value, + and for an index value of five, the list will be initialized to + ``[None, None, None, None, None]`` before setting the fifth element + to the given value. Note that an existing list is **not** extended + in place to receive a value. + +* for an :class:`.index_property` that is given any other kind of index + value (e.g. strings usually), a Python dictionary is used as the + default data structure. + +* The default data structure can be set to any Python callable using the + :paramref:`.index_property.datatype` parameter, overriding the previous + rules. + + +Subclassing +=========== + +:class:`.index_property` can be subclassed, in particular for the common +use case of providing coercion of values or SQL expressions as they are +accessed. Below is a common recipe for use with a PostgreSQL JSON type, +where we want to also include automatic casting plus ``astext()``:: + + class pg_json_property(index_property): + def __init__(self, attr_name, index, cast_type): + super(pg_json_property, self).__init__(attr_name, index) + self.cast_type = cast_type + + def expr(self, model): + expr = super(pg_json_property, self).expr(model) + return expr.astext.cast(self.cast_type) + +The above subclass can be used with the PostgreSQL-specific +version of :class:`_postgresql.JSON`:: + + from sqlalchemy import Column, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.dialects.postgresql import JSON + + Base = declarative_base() + + + class Person(Base): + __tablename__ = "person" + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + age = pg_json_property("data", "age", Integer) + +The ``age`` attribute at the instance level works as before; however +when rendering SQL, PostgreSQL's ``->>`` operator will be used +for indexed access, instead of the usual index operator of ``->``:: + + >>> query = session.query(Person).filter(Person.age < 20) + +The above query will render: +.. sourcecode:: sql + + SELECT person.id, person.data + FROM person + WHERE CAST(person.data ->> %(data_1)s AS INTEGER) < %(param_1)s + +""" # noqa +from .. import inspect +from ..ext.hybrid import hybrid_property +from ..orm.attributes import flag_modified + + +__all__ = ["index_property"] + + +class index_property(hybrid_property): # noqa + """A property generator. The generated property describes an object + attribute that corresponds to an :class:`_types.Indexable` + column. + + .. seealso:: + + :mod:`sqlalchemy.ext.indexable` + + """ + + _NO_DEFAULT_ARGUMENT = object() + + def __init__( + self, + attr_name, + index, + default=_NO_DEFAULT_ARGUMENT, + datatype=None, + mutable=True, + onebased=True, + ): + """Create a new :class:`.index_property`. + + :param attr_name: + An attribute name of an `Indexable` typed column, or other + attribute that returns an indexable structure. + :param index: + The index to be used for getting and setting this value. This + should be the Python-side index value for integers. + :param default: + A value which will be returned instead of `AttributeError` + when there is not a value at given index. + :param datatype: default datatype to use when the field is empty. + By default, this is derived from the type of index used; a + Python list for an integer index, or a Python dictionary for + any other style of index. For a list, the list will be + initialized to a list of None values that is at least + ``index`` elements long. + :param mutable: if False, writes and deletes to the attribute will + be disallowed. + :param onebased: assume the SQL representation of this value is + one-based; that is, the first index in SQL is 1, not zero. + """ + + if mutable: + super().__init__(self.fget, self.fset, self.fdel, self.expr) + else: + super().__init__(self.fget, None, None, self.expr) + self.attr_name = attr_name + self.index = index + self.default = default + is_numeric = isinstance(index, int) + onebased = is_numeric and onebased + + if datatype is not None: + self.datatype = datatype + else: + if is_numeric: + self.datatype = lambda: [None for x in range(index + 1)] + else: + self.datatype = dict + self.onebased = onebased + + def _fget_default(self, err=None): + if self.default == self._NO_DEFAULT_ARGUMENT: + raise AttributeError(self.attr_name) from err + else: + return self.default + + def fget(self, instance): + attr_name = self.attr_name + column_value = getattr(instance, attr_name) + if column_value is None: + return self._fget_default() + try: + value = column_value[self.index] + except (KeyError, IndexError) as err: + return self._fget_default(err) + else: + return value + + def fset(self, instance, value): + attr_name = self.attr_name + column_value = getattr(instance, attr_name, None) + if column_value is None: + column_value = self.datatype() + setattr(instance, attr_name, column_value) + column_value[self.index] = value + setattr(instance, attr_name, column_value) + if attr_name in inspect(instance).mapper.attrs: + flag_modified(instance, attr_name) + + def fdel(self, instance): + attr_name = self.attr_name + column_value = getattr(instance, attr_name) + if column_value is None: + raise AttributeError(self.attr_name) + try: + del column_value[self.index] + except KeyError as err: + raise AttributeError(self.attr_name) from err + else: + setattr(instance, attr_name, column_value) + flag_modified(instance, attr_name) + + def expr(self, model): + column = getattr(model, self.attr_name) + index = self.index + if self.onebased: + index += 1 + return column[index] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/instrumentation.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/instrumentation.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb01985ecc5bdb06148bda91a3962679e629c65 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/instrumentation.py @@ -0,0 +1,450 @@ +# ext/instrumentation.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +"""Extensible class instrumentation. + +The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate +systems of class instrumentation within the ORM. Class instrumentation +refers to how the ORM places attributes on the class which maintain +data and track changes to that data, as well as event hooks installed +on the class. + +.. note:: + The extension package is provided for the benefit of integration + with other object management packages, which already perform + their own instrumentation. It is not intended for general use. + +For examples of how the instrumentation extension is used, +see the example :ref:`examples_instrumentation`. + +""" +import weakref + +from .. import util +from ..orm import attributes +from ..orm import base as orm_base +from ..orm import collections +from ..orm import exc as orm_exc +from ..orm import instrumentation as orm_instrumentation +from ..orm import util as orm_util +from ..orm.instrumentation import _default_dict_getter +from ..orm.instrumentation import _default_manager_getter +from ..orm.instrumentation import _default_opt_manager_getter +from ..orm.instrumentation import _default_state_getter +from ..orm.instrumentation import ClassManager +from ..orm.instrumentation import InstrumentationFactory + + +INSTRUMENTATION_MANAGER = "__sa_instrumentation_manager__" +"""Attribute, elects custom instrumentation when present on a mapped class. + +Allows a class to specify a slightly or wildly different technique for +tracking changes made to mapped attributes and collections. + +Only one instrumentation implementation is allowed in a given object +inheritance hierarchy. + +The value of this attribute must be a callable and will be passed a class +object. The callable must return one of: + + - An instance of an :class:`.InstrumentationManager` or subclass + - An object implementing all or some of InstrumentationManager (TODO) + - A dictionary of callables, implementing all or some of the above (TODO) + - An instance of a :class:`.ClassManager` or subclass + +This attribute is consulted by SQLAlchemy instrumentation +resolution, once the :mod:`sqlalchemy.ext.instrumentation` module +has been imported. If custom finders are installed in the global +instrumentation_finders list, they may or may not choose to honor this +attribute. + +""" + + +def find_native_user_instrumentation_hook(cls): + """Find user-specified instrumentation management for a class.""" + return getattr(cls, INSTRUMENTATION_MANAGER, None) + + +instrumentation_finders = [find_native_user_instrumentation_hook] +"""An extensible sequence of callables which return instrumentation +implementations + +When a class is registered, each callable will be passed a class object. +If None is returned, the +next finder in the sequence is consulted. Otherwise the return must be an +instrumentation factory that follows the same guidelines as +sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. + +By default, the only finder is find_native_user_instrumentation_hook, which +searches for INSTRUMENTATION_MANAGER. If all finders return None, standard +ClassManager instrumentation is used. + +""" + + +class ExtendedInstrumentationRegistry(InstrumentationFactory): + """Extends :class:`.InstrumentationFactory` with additional + bookkeeping, to accommodate multiple types of + class managers. + + """ + + _manager_finders = weakref.WeakKeyDictionary() + _state_finders = weakref.WeakKeyDictionary() + _dict_finders = weakref.WeakKeyDictionary() + _extended = False + + def _locate_extended_factory(self, class_): + for finder in instrumentation_finders: + factory = finder(class_) + if factory is not None: + manager = self._extended_class_manager(class_, factory) + return manager, factory + else: + return None, None + + def _check_conflicts(self, class_, factory): + existing_factories = self._collect_management_factories_for( + class_ + ).difference([factory]) + if existing_factories: + raise TypeError( + "multiple instrumentation implementations specified " + "in %s inheritance hierarchy: %r" + % (class_.__name__, list(existing_factories)) + ) + + def _extended_class_manager(self, class_, factory): + manager = factory(class_) + if not isinstance(manager, ClassManager): + manager = _ClassInstrumentationAdapter(class_, manager) + + if factory != ClassManager and not self._extended: + # somebody invoked a custom ClassManager. + # reinstall global "getter" functions with the more + # expensive ones. + self._extended = True + _install_instrumented_lookups() + + self._manager_finders[class_] = manager.manager_getter() + self._state_finders[class_] = manager.state_getter() + self._dict_finders[class_] = manager.dict_getter() + return manager + + def _collect_management_factories_for(self, cls): + """Return a collection of factories in play or specified for a + hierarchy. + + Traverses the entire inheritance graph of a cls and returns a + collection of instrumentation factories for those classes. Factories + are extracted from active ClassManagers, if available, otherwise + instrumentation_finders is consulted. + + """ + hierarchy = util.class_hierarchy(cls) + factories = set() + for member in hierarchy: + manager = self.opt_manager_of_class(member) + if manager is not None: + factories.add(manager.factory) + else: + for finder in instrumentation_finders: + factory = finder(member) + if factory is not None: + break + else: + factory = None + factories.add(factory) + factories.discard(None) + return factories + + def unregister(self, class_): + super().unregister(class_) + if class_ in self._manager_finders: + del self._manager_finders[class_] + del self._state_finders[class_] + del self._dict_finders[class_] + + def opt_manager_of_class(self, cls): + try: + finder = self._manager_finders.get( + cls, _default_opt_manager_getter + ) + except TypeError: + # due to weakref lookup on invalid object + return None + else: + return finder(cls) + + def manager_of_class(self, cls): + try: + finder = self._manager_finders.get(cls, _default_manager_getter) + except TypeError: + # due to weakref lookup on invalid object + raise orm_exc.UnmappedClassError( + cls, f"Can't locate an instrumentation manager for class {cls}" + ) + else: + manager = finder(cls) + if manager is None: + raise orm_exc.UnmappedClassError( + cls, + f"Can't locate an instrumentation manager for class {cls}", + ) + return manager + + def state_of(self, instance): + if instance is None: + raise AttributeError("None has no persistent state.") + return self._state_finders.get( + instance.__class__, _default_state_getter + )(instance) + + def dict_of(self, instance): + if instance is None: + raise AttributeError("None has no persistent state.") + return self._dict_finders.get( + instance.__class__, _default_dict_getter + )(instance) + + +orm_instrumentation._instrumentation_factory = _instrumentation_factory = ( + ExtendedInstrumentationRegistry() +) +orm_instrumentation.instrumentation_finders = instrumentation_finders + + +class InstrumentationManager: + """User-defined class instrumentation extension. + + :class:`.InstrumentationManager` can be subclassed in order + to change + how class instrumentation proceeds. This class exists for + the purposes of integration with other object management + frameworks which would like to entirely modify the + instrumentation methodology of the ORM, and is not intended + for regular usage. For interception of class instrumentation + events, see :class:`.InstrumentationEvents`. + + The API for this class should be considered as semi-stable, + and may change slightly with new releases. + + """ + + # r4361 added a mandatory (cls) constructor to this interface. + # given that, perhaps class_ should be dropped from all of these + # signatures. + + def __init__(self, class_): + pass + + def manage(self, class_, manager): + setattr(class_, "_default_class_manager", manager) + + def unregister(self, class_, manager): + delattr(class_, "_default_class_manager") + + def manager_getter(self, class_): + def get(cls): + return cls._default_class_manager + + return get + + def instrument_attribute(self, class_, key, inst): + pass + + def post_configure_attribute(self, class_, key, inst): + pass + + def install_descriptor(self, class_, key, inst): + setattr(class_, key, inst) + + def uninstall_descriptor(self, class_, key): + delattr(class_, key) + + def install_member(self, class_, key, implementation): + setattr(class_, key, implementation) + + def uninstall_member(self, class_, key): + delattr(class_, key) + + def instrument_collection_class(self, class_, key, collection_class): + return collections.prepare_instrumentation(collection_class) + + def get_instance_dict(self, class_, instance): + return instance.__dict__ + + def initialize_instance_dict(self, class_, instance): + pass + + def install_state(self, class_, instance, state): + setattr(instance, "_default_state", state) + + def remove_state(self, class_, instance): + delattr(instance, "_default_state") + + def state_getter(self, class_): + return lambda instance: getattr(instance, "_default_state") + + def dict_getter(self, class_): + return lambda inst: self.get_instance_dict(class_, inst) + + +class _ClassInstrumentationAdapter(ClassManager): + """Adapts a user-defined InstrumentationManager to a ClassManager.""" + + def __init__(self, class_, override): + self._adapted = override + self._get_state = self._adapted.state_getter(class_) + self._get_dict = self._adapted.dict_getter(class_) + + ClassManager.__init__(self, class_) + + def manage(self): + self._adapted.manage(self.class_, self) + + def unregister(self): + self._adapted.unregister(self.class_, self) + + def manager_getter(self): + return self._adapted.manager_getter(self.class_) + + def instrument_attribute(self, key, inst, propagated=False): + ClassManager.instrument_attribute(self, key, inst, propagated) + if not propagated: + self._adapted.instrument_attribute(self.class_, key, inst) + + def post_configure_attribute(self, key): + super().post_configure_attribute(key) + self._adapted.post_configure_attribute(self.class_, key, self[key]) + + def install_descriptor(self, key, inst): + self._adapted.install_descriptor(self.class_, key, inst) + + def uninstall_descriptor(self, key): + self._adapted.uninstall_descriptor(self.class_, key) + + def install_member(self, key, implementation): + self._adapted.install_member(self.class_, key, implementation) + + def uninstall_member(self, key): + self._adapted.uninstall_member(self.class_, key) + + def instrument_collection_class(self, key, collection_class): + return self._adapted.instrument_collection_class( + self.class_, key, collection_class + ) + + def initialize_collection(self, key, state, factory): + delegate = getattr(self._adapted, "initialize_collection", None) + if delegate: + return delegate(key, state, factory) + else: + return ClassManager.initialize_collection( + self, key, state, factory + ) + + def new_instance(self, state=None): + instance = self.class_.__new__(self.class_) + self.setup_instance(instance, state) + return instance + + def _new_state_if_none(self, instance): + """Install a default InstanceState if none is present. + + A private convenience method used by the __init__ decorator. + """ + if self.has_state(instance): + return False + else: + return self.setup_instance(instance) + + def setup_instance(self, instance, state=None): + self._adapted.initialize_instance_dict(self.class_, instance) + + if state is None: + state = self._state_constructor(instance, self) + + # the given instance is assumed to have no state + self._adapted.install_state(self.class_, instance, state) + return state + + def teardown_instance(self, instance): + self._adapted.remove_state(self.class_, instance) + + def has_state(self, instance): + try: + self._get_state(instance) + except orm_exc.NO_STATE: + return False + else: + return True + + def state_getter(self): + return self._get_state + + def dict_getter(self): + return self._get_dict + + +def _install_instrumented_lookups(): + """Replace global class/object management functions + with ExtendedInstrumentationRegistry implementations, which + allow multiple types of class managers to be present, + at the cost of performance. + + This function is called only by ExtendedInstrumentationRegistry + and unit tests specific to this behavior. + + The _reinstall_default_lookups() function can be called + after this one to re-establish the default functions. + + """ + _install_lookups( + dict( + instance_state=_instrumentation_factory.state_of, + instance_dict=_instrumentation_factory.dict_of, + manager_of_class=_instrumentation_factory.manager_of_class, + opt_manager_of_class=_instrumentation_factory.opt_manager_of_class, + ) + ) + + +def _reinstall_default_lookups(): + """Restore simplified lookups.""" + _install_lookups( + dict( + instance_state=_default_state_getter, + instance_dict=_default_dict_getter, + manager_of_class=_default_manager_getter, + opt_manager_of_class=_default_opt_manager_getter, + ) + ) + _instrumentation_factory._extended = False + + +def _install_lookups(lookups): + global instance_state, instance_dict + global manager_of_class, opt_manager_of_class + instance_state = lookups["instance_state"] + instance_dict = lookups["instance_dict"] + manager_of_class = lookups["manager_of_class"] + opt_manager_of_class = lookups["opt_manager_of_class"] + orm_base.instance_state = attributes.instance_state = ( + orm_instrumentation.instance_state + ) = instance_state + orm_base.instance_dict = attributes.instance_dict = ( + orm_instrumentation.instance_dict + ) = instance_dict + orm_base.manager_of_class = attributes.manager_of_class = ( + orm_instrumentation.manager_of_class + ) = manager_of_class + orm_base.opt_manager_of_class = orm_util.opt_manager_of_class = ( + attributes.opt_manager_of_class + ) = orm_instrumentation.opt_manager_of_class = opt_manager_of_class diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mutable.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mutable.py new file mode 100644 index 0000000000000000000000000000000000000000..ed618c3819df923487c36daeda593d376ffefcaf --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mutable.py @@ -0,0 +1,1095 @@ +# ext/mutable.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +r"""Provide support for tracking of in-place changes to scalar values, +which are propagated into ORM change events on owning parent objects. + +.. _mutable_scalars: + +Establishing Mutability on Scalar Column Values +=============================================== + +A typical example of a "mutable" structure is a Python dictionary. +Following the example introduced in :ref:`types_toplevel`, we +begin with a custom type that marshals Python dictionaries into +JSON strings before being persisted:: + + from sqlalchemy.types import TypeDecorator, VARCHAR + import json + + + class JSONEncodedDict(TypeDecorator): + "Represents an immutable structure as a json-encoded string." + + impl = VARCHAR + + def process_bind_param(self, value, dialect): + if value is not None: + value = json.dumps(value) + return value + + def process_result_value(self, value, dialect): + if value is not None: + value = json.loads(value) + return value + +The usage of ``json`` is only for the purposes of example. The +:mod:`sqlalchemy.ext.mutable` extension can be used +with any type whose target Python type may be mutable, including +:class:`.PickleType`, :class:`_postgresql.ARRAY`, etc. + +When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself +tracks all parents which reference it. Below, we illustrate a simple +version of the :class:`.MutableDict` dictionary object, which applies +the :class:`.Mutable` mixin to a plain Python dictionary:: + + from sqlalchemy.ext.mutable import Mutable + + + class MutableDict(Mutable, dict): + @classmethod + def coerce(cls, key, value): + "Convert plain dictionaries to MutableDict." + + if not isinstance(value, MutableDict): + if isinstance(value, dict): + return MutableDict(value) + + # this call will raise ValueError + return Mutable.coerce(key, value) + else: + return value + + def __setitem__(self, key, value): + "Detect dictionary set events and emit change events." + + dict.__setitem__(self, key, value) + self.changed() + + def __delitem__(self, key): + "Detect dictionary del events and emit change events." + + dict.__delitem__(self, key) + self.changed() + +The above dictionary class takes the approach of subclassing the Python +built-in ``dict`` to produce a dict +subclass which routes all mutation events through ``__setitem__``. There are +variants on this approach, such as subclassing ``UserDict.UserDict`` or +``collections.MutableMapping``; the part that's important to this example is +that the :meth:`.Mutable.changed` method is called whenever an in-place +change to the datastructure takes place. + +We also redefine the :meth:`.Mutable.coerce` method which will be used to +convert any values that are not instances of ``MutableDict``, such +as the plain dictionaries returned by the ``json`` module, into the +appropriate type. Defining this method is optional; we could just as well +created our ``JSONEncodedDict`` such that it always returns an instance +of ``MutableDict``, and additionally ensured that all calling code +uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not +overridden, any values applied to a parent object which are not instances +of the mutable type will raise a ``ValueError``. + +Our new ``MutableDict`` type offers a class method +:meth:`~.Mutable.as_mutable` which we can use within column metadata +to associate with types. This method grabs the given type object or +class and associates a listener that will detect all future mappings +of this type, applying event listening instrumentation to the mapped +attribute. Such as, with classical table metadata:: + + from sqlalchemy import Table, Column, Integer + + my_data = Table( + "my_data", + metadata, + Column("id", Integer, primary_key=True), + Column("data", MutableDict.as_mutable(JSONEncodedDict)), + ) + +Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` +(if the type object was not an instance already), which will intercept any +attributes which are mapped against this type. Below we establish a simple +mapping against the ``my_data`` table:: + + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + + + class Base(DeclarativeBase): + pass + + + class MyDataClass(Base): + __tablename__ = "my_data" + id: Mapped[int] = mapped_column(primary_key=True) + data: Mapped[dict[str, str]] = mapped_column( + MutableDict.as_mutable(JSONEncodedDict) + ) + +The ``MyDataClass.data`` member will now be notified of in place changes +to its value. + +Any in-place changes to the ``MyDataClass.data`` member +will flag the attribute as "dirty" on the parent object:: + + >>> from sqlalchemy.orm import Session + + >>> sess = Session(some_engine) + >>> m1 = MyDataClass(data={"value1": "foo"}) + >>> sess.add(m1) + >>> sess.commit() + + >>> m1.data["value1"] = "bar" + >>> assert m1 in sess.dirty + True + +The ``MutableDict`` can be associated with all future instances +of ``JSONEncodedDict`` in one step, using +:meth:`~.Mutable.associate_with`. This is similar to +:meth:`~.Mutable.as_mutable` except it will intercept all occurrences +of ``MutableDict`` in all mappings unconditionally, without +the need to declare it individually:: + + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + + MutableDict.associate_with(JSONEncodedDict) + + + class Base(DeclarativeBase): + pass + + + class MyDataClass(Base): + __tablename__ = "my_data" + id: Mapped[int] = mapped_column(primary_key=True) + data: Mapped[dict[str, str]] = mapped_column(JSONEncodedDict) + +Supporting Pickling +-------------------- + +The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the +placement of a ``weakref.WeakKeyDictionary`` upon the value object, which +stores a mapping of parent mapped objects keyed to the attribute name under +which they are associated with this value. ``WeakKeyDictionary`` objects are +not picklable, due to the fact that they contain weakrefs and function +callbacks. In our case, this is a good thing, since if this dictionary were +picklable, it could lead to an excessively large pickle size for our value +objects that are pickled by themselves outside of the context of the parent. +The developer responsibility here is only to provide a ``__getstate__`` method +that excludes the :meth:`~MutableBase._parents` collection from the pickle +stream:: + + class MyMutableType(Mutable): + def __getstate__(self): + d = self.__dict__.copy() + d.pop("_parents", None) + return d + +With our dictionary example, we need to return the contents of the dict itself +(and also restore them on __setstate__):: + + class MutableDict(Mutable, dict): + # .... + + def __getstate__(self): + return dict(self) + + def __setstate__(self, state): + self.update(state) + +In the case that our mutable value object is pickled as it is attached to one +or more parent objects that are also part of the pickle, the :class:`.Mutable` +mixin will re-establish the :attr:`.Mutable._parents` collection on each value +object as the owning parents themselves are unpickled. + +Receiving Events +---------------- + +The :meth:`.AttributeEvents.modified` event handler may be used to receive +an event when a mutable scalar emits a change event. This event handler +is called when the :func:`.attributes.flag_modified` function is called +from within the mutable extension:: + + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy import event + + + class Base(DeclarativeBase): + pass + + + class MyDataClass(Base): + __tablename__ = "my_data" + id: Mapped[int] = mapped_column(primary_key=True) + data: Mapped[dict[str, str]] = mapped_column( + MutableDict.as_mutable(JSONEncodedDict) + ) + + + @event.listens_for(MyDataClass.data, "modified") + def modified_json(instance, initiator): + print("json value modified:", instance.data) + +.. _mutable_composites: + +Establishing Mutability on Composites +===================================== + +Composites are a special ORM feature which allow a single scalar attribute to +be assigned an object value which represents information "composed" from one +or more columns from the underlying mapped table. The usual example is that of +a geometric "point", and is introduced in :ref:`mapper_composite`. + +As is the case with :class:`.Mutable`, the user-defined composite class +subclasses :class:`.MutableComposite` as a mixin, and detects and delivers +change events to its parents via the :meth:`.MutableComposite.changed` method. +In the case of a composite class, the detection is usually via the usage of the +special Python method ``__setattr__()``. In the example below, we expand upon the ``Point`` +class introduced in :ref:`mapper_composite` to include +:class:`.MutableComposite` in its bases and to route attribute set events via +``__setattr__`` to the :meth:`.MutableComposite.changed` method:: + + import dataclasses + from sqlalchemy.ext.mutable import MutableComposite + + + @dataclasses.dataclass + class Point(MutableComposite): + x: int + y: int + + def __setattr__(self, key, value): + "Intercept set events" + + # set the attribute + object.__setattr__(self, key, value) + + # alert all parents to the change + self.changed() + +The :class:`.MutableComposite` class makes use of class mapping events to +automatically establish listeners for any usage of :func:`_orm.composite` that +specifies our ``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` +class, listeners are established which will route change events from ``Point`` +objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: + + from sqlalchemy.orm import DeclarativeBase, Mapped + from sqlalchemy.orm import composite, mapped_column + + + class Base(DeclarativeBase): + pass + + + class Vertex(Base): + __tablename__ = "vertices" + + id: Mapped[int] = mapped_column(primary_key=True) + + start: Mapped[Point] = composite( + mapped_column("x1"), mapped_column("y1") + ) + end: Mapped[Point] = composite( + mapped_column("x2"), mapped_column("y2") + ) + + def __repr__(self): + return f"Vertex(start={self.start}, end={self.end})" + +Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members +will flag the attribute as "dirty" on the parent object: + +.. sourcecode:: python+sql + + >>> from sqlalchemy.orm import Session + >>> sess = Session(engine) + >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) + >>> sess.add(v1) + {sql}>>> sess.flush() + BEGIN (implicit) + INSERT INTO vertices (x1, y1, x2, y2) VALUES (?, ?, ?, ?) + [...] (3, 4, 12, 15) + + {stop}>>> v1.end.x = 8 + >>> assert v1 in sess.dirty + True + {sql}>>> sess.commit() + UPDATE vertices SET x2=? WHERE vertices.id = ? + [...] (8, 1) + COMMIT + +Coercing Mutable Composites +--------------------------- + +The :meth:`.MutableBase.coerce` method is also supported on composite types. +In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` +method is only called for attribute set operations, not load operations. +Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent +to using a :func:`.validates` validation routine for all attributes which +make use of the custom composite type:: + + @dataclasses.dataclass + class Point(MutableComposite): + # other Point methods + # ... + + def coerce(cls, key, value): + if isinstance(value, tuple): + value = Point(*value) + elif not isinstance(value, Point): + raise ValueError("tuple or Point expected") + return value + +Supporting Pickling +-------------------- + +As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper +class uses a ``weakref.WeakKeyDictionary`` available via the +:meth:`MutableBase._parents` attribute which isn't picklable. If we need to +pickle instances of ``Point`` or its owning class ``Vertex``, we at least need +to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. +Below we define both a ``__getstate__`` and a ``__setstate__`` that package up +the minimal form of our ``Point`` class:: + + @dataclasses.dataclass + class Point(MutableComposite): + # ... + + def __getstate__(self): + return self.x, self.y + + def __setstate__(self, state): + self.x, self.y = state + +As with :class:`.Mutable`, the :class:`.MutableComposite` augments the +pickling process of the parent's object-relational state so that the +:meth:`MutableBase._parents` collection is restored to all ``Point`` objects. + +""" # noqa: E501 + +from __future__ import annotations + +from collections import defaultdict +from typing import AbstractSet +from typing import Any +from typing import Dict +from typing import Iterable +from typing import List +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref +from weakref import WeakKeyDictionary + +from .. import event +from .. import inspect +from .. import types +from .. import util +from ..orm import Mapper +from ..orm._typing import _ExternalEntityType +from ..orm._typing import _O +from ..orm._typing import _T +from ..orm.attributes import AttributeEventToken +from ..orm.attributes import flag_modified +from ..orm.attributes import InstrumentedAttribute +from ..orm.attributes import QueryableAttribute +from ..orm.context import QueryContext +from ..orm.decl_api import DeclarativeAttributeIntercept +from ..orm.state import InstanceState +from ..orm.unitofwork import UOWTransaction +from ..sql._typing import _TypeEngineArgument +from ..sql.base import SchemaEventTarget +from ..sql.schema import Column +from ..sql.type_api import TypeEngine +from ..util import memoized_property +from ..util.typing import SupportsIndex +from ..util.typing import TypeGuard + +_KT = TypeVar("_KT") # Key type. +_VT = TypeVar("_VT") # Value type. + + +class MutableBase: + """Common base class to :class:`.Mutable` + and :class:`.MutableComposite`. + + """ + + @memoized_property + def _parents(self) -> WeakKeyDictionary[Any, Any]: + """Dictionary of parent object's :class:`.InstanceState`->attribute + name on the parent. + + This attribute is a so-called "memoized" property. It initializes + itself with a new ``weakref.WeakKeyDictionary`` the first time + it is accessed, returning the same object upon subsequent access. + + .. versionchanged:: 1.4 the :class:`.InstanceState` is now used + as the key in the weak dictionary rather than the instance + itself. + + """ + + return weakref.WeakKeyDictionary() + + @classmethod + def coerce(cls, key: str, value: Any) -> Optional[Any]: + """Given a value, coerce it into the target type. + + Can be overridden by custom subclasses to coerce incoming + data into a particular type. + + By default, raises ``ValueError``. + + This method is called in different scenarios depending on if + the parent class is of type :class:`.Mutable` or of type + :class:`.MutableComposite`. In the case of the former, it is called + for both attribute-set operations as well as during ORM loading + operations. For the latter, it is only called during attribute-set + operations; the mechanics of the :func:`.composite` construct + handle coercion during load operations. + + + :param key: string name of the ORM-mapped attribute being set. + :param value: the incoming value. + :return: the method should return the coerced value, or raise + ``ValueError`` if the coercion cannot be completed. + + """ + if value is None: + return None + msg = "Attribute '%s' does not accept objects of type %s" + raise ValueError(msg % (key, type(value))) + + @classmethod + def _get_listen_keys(cls, attribute: QueryableAttribute[Any]) -> Set[str]: + """Given a descriptor attribute, return a ``set()`` of the attribute + keys which indicate a change in the state of this attribute. + + This is normally just ``set([attribute.key])``, but can be overridden + to provide for additional keys. E.g. a :class:`.MutableComposite` + augments this set with the attribute keys associated with the columns + that comprise the composite value. + + This collection is consulted in the case of intercepting the + :meth:`.InstanceEvents.refresh` and + :meth:`.InstanceEvents.refresh_flush` events, which pass along a list + of attribute names that have been refreshed; the list is compared + against this set to determine if action needs to be taken. + + """ + return {attribute.key} + + @classmethod + def _listen_on_attribute( + cls, + attribute: QueryableAttribute[Any], + coerce: bool, + parent_cls: _ExternalEntityType[Any], + ) -> None: + """Establish this type as a mutation listener for the given + mapped descriptor. + + """ + key = attribute.key + if parent_cls is not attribute.class_: + return + + # rely on "propagate" here + parent_cls = attribute.class_ + + listen_keys = cls._get_listen_keys(attribute) + + def load(state: InstanceState[_O], *args: Any) -> None: + """Listen for objects loaded or refreshed. + + Wrap the target data member's value with + ``Mutable``. + + """ + val = state.dict.get(key, None) + if val is not None: + if coerce: + val = cls.coerce(key, val) + state.dict[key] = val + val._parents[state] = key + + def load_attrs( + state: InstanceState[_O], + ctx: Union[object, QueryContext, UOWTransaction], + attrs: Iterable[Any], + ) -> None: + if not attrs or listen_keys.intersection(attrs): + load(state) + + def set_( + target: InstanceState[_O], + value: MutableBase | None, + oldvalue: MutableBase | None, + initiator: AttributeEventToken, + ) -> MutableBase | None: + """Listen for set/replace events on the target + data member. + + Establish a weak reference to the parent object + on the incoming value, remove it for the one + outgoing. + + """ + if value is oldvalue: + return value + + if not isinstance(value, cls): + value = cls.coerce(key, value) + if value is not None: + value._parents[target] = key + if isinstance(oldvalue, cls): + oldvalue._parents.pop(inspect(target), None) + return value + + def pickle( + state: InstanceState[_O], state_dict: Dict[str, Any] + ) -> None: + val = state.dict.get(key, None) + if val is not None: + if "ext.mutable.values" not in state_dict: + state_dict["ext.mutable.values"] = defaultdict(list) + state_dict["ext.mutable.values"][key].append(val) + + def unpickle( + state: InstanceState[_O], state_dict: Dict[str, Any] + ) -> None: + if "ext.mutable.values" in state_dict: + collection = state_dict["ext.mutable.values"] + if isinstance(collection, list): + # legacy format + for val in collection: + val._parents[state] = key + else: + for val in state_dict["ext.mutable.values"][key]: + val._parents[state] = key + + event.listen( + parent_cls, + "_sa_event_merge_wo_load", + load, + raw=True, + propagate=True, + ) + + event.listen(parent_cls, "load", load, raw=True, propagate=True) + event.listen( + parent_cls, "refresh", load_attrs, raw=True, propagate=True + ) + event.listen( + parent_cls, "refresh_flush", load_attrs, raw=True, propagate=True + ) + event.listen( + attribute, "set", set_, raw=True, retval=True, propagate=True + ) + event.listen(parent_cls, "pickle", pickle, raw=True, propagate=True) + event.listen( + parent_cls, "unpickle", unpickle, raw=True, propagate=True + ) + + +class Mutable(MutableBase): + """Mixin that defines transparent propagation of change + events to a parent object. + + See the example in :ref:`mutable_scalars` for usage information. + + """ + + def changed(self) -> None: + """Subclasses should call this method whenever change events occur.""" + + for parent, key in self._parents.items(): + flag_modified(parent.obj(), key) + + @classmethod + def associate_with_attribute( + cls, attribute: InstrumentedAttribute[_O] + ) -> None: + """Establish this type as a mutation listener for the given + mapped descriptor. + + """ + cls._listen_on_attribute(attribute, True, attribute.class_) + + @classmethod + def associate_with(cls, sqltype: type) -> None: + """Associate this wrapper with all future mapped columns + of the given type. + + This is a convenience method that calls + ``associate_with_attribute`` automatically. + + .. warning:: + + The listeners established by this method are *global* + to all mappers, and are *not* garbage collected. Only use + :meth:`.associate_with` for types that are permanent to an + application, not with ad-hoc types else this will cause unbounded + growth in memory usage. + + """ + + def listen_for_type(mapper: Mapper[_O], class_: type) -> None: + if mapper.non_primary: + return + for prop in mapper.column_attrs: + if isinstance(prop.columns[0].type, sqltype): + cls.associate_with_attribute(getattr(class_, prop.key)) + + event.listen(Mapper, "mapper_configured", listen_for_type) + + @classmethod + def as_mutable(cls, sqltype: _TypeEngineArgument[_T]) -> TypeEngine[_T]: + """Associate a SQL type with this mutable Python type. + + This establishes listeners that will detect ORM mappings against + the given type, adding mutation event trackers to those mappings. + + The type is returned, unconditionally as an instance, so that + :meth:`.as_mutable` can be used inline:: + + Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", MyMutableType.as_mutable(PickleType)), + ) + + Note that the returned type is always an instance, even if a class + is given, and that only columns which are declared specifically with + that type instance receive additional instrumentation. + + To associate a particular mutable type with all occurrences of a + particular type, use the :meth:`.Mutable.associate_with` classmethod + of the particular :class:`.Mutable` subclass to establish a global + association. + + .. warning:: + + The listeners established by this method are *global* + to all mappers, and are *not* garbage collected. Only use + :meth:`.as_mutable` for types that are permanent to an application, + not with ad-hoc types else this will cause unbounded growth + in memory usage. + + """ + sqltype = types.to_instance(sqltype) + + # a SchemaType will be copied when the Column is copied, + # and we'll lose our ability to link that type back to the original. + # so track our original type w/ columns + if isinstance(sqltype, SchemaEventTarget): + + @event.listens_for(sqltype, "before_parent_attach") + def _add_column_memo( + sqltyp: TypeEngine[Any], + parent: Column[_T], + ) -> None: + parent.info["_ext_mutable_orig_type"] = sqltyp + + schema_event_check = True + else: + schema_event_check = False + + def listen_for_type( + mapper: Mapper[_T], + class_: Union[DeclarativeAttributeIntercept, type], + ) -> None: + if mapper.non_primary: + return + _APPLIED_KEY = "_ext_mutable_listener_applied" + + for prop in mapper.column_attrs: + if ( + # all Mutable types refer to a Column that's mapped, + # since this is the only kind of Core target the ORM can + # "mutate" + isinstance(prop.expression, Column) + and ( + ( + schema_event_check + and prop.expression.info.get( + "_ext_mutable_orig_type" + ) + is sqltype + ) + or prop.expression.type is sqltype + ) + ): + if not prop.expression.info.get(_APPLIED_KEY, False): + prop.expression.info[_APPLIED_KEY] = True + cls.associate_with_attribute(getattr(class_, prop.key)) + + event.listen(Mapper, "mapper_configured", listen_for_type) + + return sqltype + + +class MutableComposite(MutableBase): + """Mixin that defines transparent propagation of change + events on a SQLAlchemy "composite" object to its + owning parent or parents. + + See the example in :ref:`mutable_composites` for usage information. + + """ + + @classmethod + def _get_listen_keys(cls, attribute: QueryableAttribute[_O]) -> Set[str]: + return {attribute.key}.union(attribute.property._attribute_keys) + + def changed(self) -> None: + """Subclasses should call this method whenever change events occur.""" + + for parent, key in self._parents.items(): + prop = parent.mapper.get_property(key) + for value, attr_name in zip( + prop._composite_values_from_instance(self), + prop._attribute_keys, + ): + setattr(parent.obj(), attr_name, value) + + +def _setup_composite_listener() -> None: + def _listen_for_type(mapper: Mapper[_T], class_: type) -> None: + for prop in mapper.iterate_properties: + if ( + hasattr(prop, "composite_class") + and isinstance(prop.composite_class, type) + and issubclass(prop.composite_class, MutableComposite) + ): + prop.composite_class._listen_on_attribute( + getattr(class_, prop.key), False, class_ + ) + + if not event.contains(Mapper, "mapper_configured", _listen_for_type): + event.listen(Mapper, "mapper_configured", _listen_for_type) + + +_setup_composite_listener() + + +class MutableDict(Mutable, Dict[_KT, _VT]): + """A dictionary type that implements :class:`.Mutable`. + + The :class:`.MutableDict` object implements a dictionary that will + emit change events to the underlying mapping when the contents of + the dictionary are altered, including when values are added or removed. + + Note that :class:`.MutableDict` does **not** apply mutable tracking to the + *values themselves* inside the dictionary. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + dictionary structure, such as a JSON structure. To support this use case, + build a subclass of :class:`.MutableDict` that provides appropriate + coercion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. seealso:: + + :class:`.MutableList` + + :class:`.MutableSet` + + """ + + def __setitem__(self, key: _KT, value: _VT) -> None: + """Detect dictionary set events and emit change events.""" + super().__setitem__(key, value) + self.changed() + + if TYPE_CHECKING: + # from https://github.com/python/mypy/issues/14858 + + @overload + def setdefault( + self: MutableDict[_KT, Optional[_T]], key: _KT, value: None = None + ) -> Optional[_T]: ... + + @overload + def setdefault(self, key: _KT, value: _VT) -> _VT: ... + + def setdefault(self, key: _KT, value: object = None) -> object: ... + + else: + + def setdefault(self, *arg): # noqa: F811 + result = super().setdefault(*arg) + self.changed() + return result + + def __delitem__(self, key: _KT) -> None: + """Detect dictionary del events and emit change events.""" + super().__delitem__(key) + self.changed() + + def update(self, *a: Any, **kw: _VT) -> None: + super().update(*a, **kw) + self.changed() + + if TYPE_CHECKING: + + @overload + def pop(self, __key: _KT) -> _VT: ... + + @overload + def pop(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... + + def pop( + self, __key: _KT, __default: _VT | _T | None = None + ) -> _VT | _T: ... + + else: + + def pop(self, *arg): # noqa: F811 + result = super().pop(*arg) + self.changed() + return result + + def popitem(self) -> Tuple[_KT, _VT]: + result = super().popitem() + self.changed() + return result + + def clear(self) -> None: + super().clear() + self.changed() + + @classmethod + def coerce(cls, key: str, value: Any) -> MutableDict[_KT, _VT] | None: + """Convert plain dictionary to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, dict): + return cls(value) + return Mutable.coerce(key, value) + else: + return value + + def __getstate__(self) -> Dict[_KT, _VT]: + return dict(self) + + def __setstate__( + self, state: Union[Dict[str, int], Dict[str, str]] + ) -> None: + self.update(state) + + +class MutableList(Mutable, List[_T]): + """A list type that implements :class:`.Mutable`. + + The :class:`.MutableList` object implements a list that will + emit change events to the underlying mapping when the contents of + the list are altered, including when values are added or removed. + + Note that :class:`.MutableList` does **not** apply mutable tracking to the + *values themselves* inside the list. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + mutable structure, such as a JSON structure. To support this use case, + build a subclass of :class:`.MutableList` that provides appropriate + coercion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. seealso:: + + :class:`.MutableDict` + + :class:`.MutableSet` + + """ + + def __reduce_ex__( + self, proto: SupportsIndex + ) -> Tuple[type, Tuple[List[int]]]: + return (self.__class__, (list(self),)) + + # needed for backwards compatibility with + # older pickles + def __setstate__(self, state: Iterable[_T]) -> None: + self[:] = state + + def is_scalar(self, value: _T | Iterable[_T]) -> TypeGuard[_T]: + return not util.is_non_string_iterable(value) + + def is_iterable(self, value: _T | Iterable[_T]) -> TypeGuard[Iterable[_T]]: + return util.is_non_string_iterable(value) + + def __setitem__( + self, index: SupportsIndex | slice, value: _T | Iterable[_T] + ) -> None: + """Detect list set events and emit change events.""" + if isinstance(index, SupportsIndex) and self.is_scalar(value): + super().__setitem__(index, value) + elif isinstance(index, slice) and self.is_iterable(value): + super().__setitem__(index, value) + self.changed() + + def __delitem__(self, index: SupportsIndex | slice) -> None: + """Detect list del events and emit change events.""" + super().__delitem__(index) + self.changed() + + def pop(self, *arg: SupportsIndex) -> _T: + result = super().pop(*arg) + self.changed() + return result + + def append(self, x: _T) -> None: + super().append(x) + self.changed() + + def extend(self, x: Iterable[_T]) -> None: + super().extend(x) + self.changed() + + def __iadd__(self, x: Iterable[_T]) -> MutableList[_T]: # type: ignore[override,misc] # noqa: E501 + self.extend(x) + return self + + def insert(self, i: SupportsIndex, x: _T) -> None: + super().insert(i, x) + self.changed() + + def remove(self, i: _T) -> None: + super().remove(i) + self.changed() + + def clear(self) -> None: + super().clear() + self.changed() + + def sort(self, **kw: Any) -> None: + super().sort(**kw) + self.changed() + + def reverse(self) -> None: + super().reverse() + self.changed() + + @classmethod + def coerce( + cls, key: str, value: MutableList[_T] | _T + ) -> Optional[MutableList[_T]]: + """Convert plain list to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, list): + return cls(value) + return Mutable.coerce(key, value) + else: + return value + + +class MutableSet(Mutable, Set[_T]): + """A set type that implements :class:`.Mutable`. + + The :class:`.MutableSet` object implements a set that will + emit change events to the underlying mapping when the contents of + the set are altered, including when values are added or removed. + + Note that :class:`.MutableSet` does **not** apply mutable tracking to the + *values themselves* inside the set. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + mutable structure. To support this use case, + build a subclass of :class:`.MutableSet` that provides appropriate + coercion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. seealso:: + + :class:`.MutableDict` + + :class:`.MutableList` + + + """ + + def update(self, *arg: Iterable[_T]) -> None: + super().update(*arg) + self.changed() + + def intersection_update(self, *arg: Iterable[Any]) -> None: + super().intersection_update(*arg) + self.changed() + + def difference_update(self, *arg: Iterable[Any]) -> None: + super().difference_update(*arg) + self.changed() + + def symmetric_difference_update(self, *arg: Iterable[_T]) -> None: + super().symmetric_difference_update(*arg) + self.changed() + + def __ior__(self, other: AbstractSet[_T]) -> MutableSet[_T]: # type: ignore[override,misc] # noqa: E501 + self.update(other) + return self + + def __iand__(self, other: AbstractSet[object]) -> MutableSet[_T]: + self.intersection_update(other) + return self + + def __ixor__(self, other: AbstractSet[_T]) -> MutableSet[_T]: # type: ignore[override,misc] # noqa: E501 + self.symmetric_difference_update(other) + return self + + def __isub__(self, other: AbstractSet[object]) -> MutableSet[_T]: # type: ignore[misc] # noqa: E501 + self.difference_update(other) + return self + + def add(self, elem: _T) -> None: + super().add(elem) + self.changed() + + def remove(self, elem: _T) -> None: + super().remove(elem) + self.changed() + + def discard(self, elem: _T) -> None: + super().discard(elem) + self.changed() + + def pop(self, *arg: Any) -> _T: + result = super().pop(*arg) + self.changed() + return result + + def clear(self) -> None: + super().clear() + self.changed() + + @classmethod + def coerce(cls, index: str, value: Any) -> Optional[MutableSet[_T]]: + """Convert plain set to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, set): + return cls(value) + return Mutable.coerce(index, value) + else: + return value + + def __getstate__(self) -> Set[_T]: + return set(self) + + def __setstate__(self, state: Iterable[_T]) -> None: + self.update(state) + + def __reduce_ex__( + self, proto: SupportsIndex + ) -> Tuple[type, Tuple[List[int]]]: + return (self.__class__, (list(self),)) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5827cb8d369364b05c1711b7396001688ba6618 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/__init__.py @@ -0,0 +1,6 @@ +# ext/mypy/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/apply.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/apply.py new file mode 100644 index 0000000000000000000000000000000000000000..02908cc14b4362f506b7a2ab062b66ffe13461c3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/apply.py @@ -0,0 +1,324 @@ +# ext/mypy/apply.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import List +from typing import Optional +from typing import Union + +from mypy.nodes import ARG_NAMED_OPT +from mypy.nodes import Argument +from mypy.nodes import AssignmentStmt +from mypy.nodes import CallExpr +from mypy.nodes import ClassDef +from mypy.nodes import MDEF +from mypy.nodes import MemberExpr +from mypy.nodes import NameExpr +from mypy.nodes import RefExpr +from mypy.nodes import StrExpr +from mypy.nodes import SymbolTableNode +from mypy.nodes import TempNode +from mypy.nodes import TypeInfo +from mypy.nodes import Var +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.plugins.common import add_method_to_class +from mypy.types import AnyType +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import NoneTyp +from mypy.types import ProperType +from mypy.types import TypeOfAny +from mypy.types import UnboundType +from mypy.types import UnionType + +from . import infer +from . import util +from .names import expr_to_mapped_constructor +from .names import NAMED_TYPE_SQLA_MAPPED + + +def apply_mypy_mapped_attr( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + item: Union[NameExpr, StrExpr], + attributes: List[util.SQLAlchemyAttribute], +) -> None: + if isinstance(item, NameExpr): + name = item.name + elif isinstance(item, StrExpr): + name = item.value + else: + return None + + for stmt in cls.defs.body: + if ( + isinstance(stmt, AssignmentStmt) + and isinstance(stmt.lvalues[0], NameExpr) + and stmt.lvalues[0].name == name + ): + break + else: + util.fail(api, f"Can't find mapped attribute {name}", cls) + return None + + if stmt.type is None: + util.fail( + api, + "Statement linked from _mypy_mapped_attrs has no " + "typing information", + stmt, + ) + return None + + left_hand_explicit_type = get_proper_type(stmt.type) + assert isinstance( + left_hand_explicit_type, (Instance, UnionType, UnboundType) + ) + + attributes.append( + util.SQLAlchemyAttribute( + name=name, + line=item.line, + column=item.column, + typ=left_hand_explicit_type, + info=cls.info, + ) + ) + + apply_type_to_mapped_statement( + api, stmt, stmt.lvalues[0], left_hand_explicit_type, None + ) + + +def re_apply_declarative_assignments( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + attributes: List[util.SQLAlchemyAttribute], +) -> None: + """For multiple class passes, re-apply our left-hand side types as mypy + seems to reset them in place. + + """ + mapped_attr_lookup = {attr.name: attr for attr in attributes} + update_cls_metadata = False + + for stmt in cls.defs.body: + # for a re-apply, all of our statements are AssignmentStmt; + # @declared_attr calls will have been converted and this + # currently seems to be preserved by mypy (but who knows if this + # will change). + if ( + isinstance(stmt, AssignmentStmt) + and isinstance(stmt.lvalues[0], NameExpr) + and stmt.lvalues[0].name in mapped_attr_lookup + and isinstance(stmt.lvalues[0].node, Var) + ): + left_node = stmt.lvalues[0].node + + python_type_for_type = mapped_attr_lookup[ + stmt.lvalues[0].name + ].type + + left_node_proper_type = get_proper_type(left_node.type) + + # if we have scanned an UnboundType and now there's a more + # specific type than UnboundType, call the re-scan so we + # can get that set up correctly + if ( + isinstance(python_type_for_type, UnboundType) + and not isinstance(left_node_proper_type, UnboundType) + and ( + isinstance(stmt.rvalue, CallExpr) + and isinstance(stmt.rvalue.callee, MemberExpr) + and isinstance(stmt.rvalue.callee.expr, NameExpr) + and stmt.rvalue.callee.expr.node is not None + and stmt.rvalue.callee.expr.node.fullname + == NAMED_TYPE_SQLA_MAPPED + and stmt.rvalue.callee.name == "_empty_constructor" + and isinstance(stmt.rvalue.args[0], CallExpr) + and isinstance(stmt.rvalue.args[0].callee, RefExpr) + ) + ): + new_python_type_for_type = ( + infer.infer_type_from_right_hand_nameexpr( + api, + stmt, + left_node, + left_node_proper_type, + stmt.rvalue.args[0].callee, + ) + ) + + if new_python_type_for_type is not None and not isinstance( + new_python_type_for_type, UnboundType + ): + python_type_for_type = new_python_type_for_type + + # update the SQLAlchemyAttribute with the better + # information + mapped_attr_lookup[stmt.lvalues[0].name].type = ( + python_type_for_type + ) + + update_cls_metadata = True + + if ( + not isinstance(left_node.type, Instance) + or left_node.type.type.fullname != NAMED_TYPE_SQLA_MAPPED + ): + assert python_type_for_type is not None + left_node.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, [python_type_for_type] + ) + + if update_cls_metadata: + util.set_mapped_attributes(cls.info, attributes) + + +def apply_type_to_mapped_statement( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + lvalue: NameExpr, + left_hand_explicit_type: Optional[ProperType], + python_type_for_type: Optional[ProperType], +) -> None: + """Apply the Mapped[] annotation and right hand object to a + declarative assignment statement. + + This converts a Python declarative class statement such as:: + + class User(Base): + # ... + + attrname = Column(Integer) + + To one that describes the final Python behavior to Mypy:: + + ... format: off + + class User(Base): + # ... + + attrname : Mapped[Optional[int]] = + + ... format: on + + """ + left_node = lvalue.node + assert isinstance(left_node, Var) + + # to be completely honest I have no idea what the difference between + # left_node.type and stmt.type is, what it means if these are different + # vs. the same, why in order to get tests to pass I have to assign + # to stmt.type for the second case and not the first. this is complete + # trying every combination until it works stuff. + + if left_hand_explicit_type is not None: + lvalue.is_inferred_def = False + left_node.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] + ) + else: + lvalue.is_inferred_def = False + left_node.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, + ( + [AnyType(TypeOfAny.special_form)] + if python_type_for_type is None + else [python_type_for_type] + ), + ) + + # so to have it skip the right side totally, we can do this: + # stmt.rvalue = TempNode(AnyType(TypeOfAny.special_form)) + + # however, if we instead manufacture a new node that uses the old + # one, then we can still get type checking for the call itself, + # e.g. the Column, relationship() call, etc. + + # rewrite the node as: + # : Mapped[] = + # _sa_Mapped._empty_constructor() + # the original right-hand side is maintained so it gets type checked + # internally + stmt.rvalue = expr_to_mapped_constructor(stmt.rvalue) + + if stmt.type is not None and python_type_for_type is not None: + stmt.type = python_type_for_type + + +def add_additional_orm_attributes( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + attributes: List[util.SQLAlchemyAttribute], +) -> None: + """Apply __init__, __table__ and other attributes to the mapped class.""" + + info = util.info_for_cls(cls, api) + + if info is None: + return + + is_base = util.get_is_base(info) + + if "__init__" not in info.names and not is_base: + mapped_attr_names = {attr.name: attr.type for attr in attributes} + + for base in info.mro[1:-1]: + if "sqlalchemy" not in info.metadata: + continue + + base_cls_attributes = util.get_mapped_attributes(base, api) + if base_cls_attributes is None: + continue + + for attr in base_cls_attributes: + mapped_attr_names.setdefault(attr.name, attr.type) + + arguments = [] + for name, typ in mapped_attr_names.items(): + if typ is None: + typ = AnyType(TypeOfAny.special_form) + arguments.append( + Argument( + variable=Var(name, typ), + type_annotation=typ, + initializer=TempNode(typ), + kind=ARG_NAMED_OPT, + ) + ) + + add_method_to_class(api, cls, "__init__", arguments, NoneTyp()) + + if "__table__" not in info.names and util.get_has_table(info): + _apply_placeholder_attr_to_class( + api, cls, "sqlalchemy.sql.schema.Table", "__table__" + ) + if not is_base: + _apply_placeholder_attr_to_class( + api, cls, "sqlalchemy.orm.mapper.Mapper", "__mapper__" + ) + + +def _apply_placeholder_attr_to_class( + api: SemanticAnalyzerPluginInterface, + cls: ClassDef, + qualified_name: str, + attrname: str, +) -> None: + sym = api.lookup_fully_qualified_or_none(qualified_name) + if sym: + assert isinstance(sym.node, TypeInfo) + type_: ProperType = Instance(sym.node, []) + else: + type_ = AnyType(TypeOfAny.special_form) + var = Var(attrname) + var._fullname = cls.fullname + "." + attrname + var.info = cls.info + var.type = type_ + cls.info.names[attrname] = SymbolTableNode(MDEF, var) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/decl_class.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/decl_class.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce7ad56ccc965c03a6d2f89de5de02d59a7ba04 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/decl_class.py @@ -0,0 +1,515 @@ +# ext/mypy/decl_class.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import List +from typing import Optional +from typing import Union + +from mypy.nodes import AssignmentStmt +from mypy.nodes import CallExpr +from mypy.nodes import ClassDef +from mypy.nodes import Decorator +from mypy.nodes import LambdaExpr +from mypy.nodes import ListExpr +from mypy.nodes import MemberExpr +from mypy.nodes import NameExpr +from mypy.nodes import PlaceholderNode +from mypy.nodes import RefExpr +from mypy.nodes import StrExpr +from mypy.nodes import SymbolNode +from mypy.nodes import SymbolTableNode +from mypy.nodes import TempNode +from mypy.nodes import TypeInfo +from mypy.nodes import Var +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.types import AnyType +from mypy.types import CallableType +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import NoneType +from mypy.types import ProperType +from mypy.types import Type +from mypy.types import TypeOfAny +from mypy.types import UnboundType +from mypy.types import UnionType + +from . import apply +from . import infer +from . import names +from . import util + + +def scan_declarative_assignments_and_apply_types( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + is_mixin_scan: bool = False, +) -> Optional[List[util.SQLAlchemyAttribute]]: + info = util.info_for_cls(cls, api) + + if info is None: + # this can occur during cached passes + return None + elif cls.fullname.startswith("builtins"): + return None + + mapped_attributes: Optional[List[util.SQLAlchemyAttribute]] = ( + util.get_mapped_attributes(info, api) + ) + + # used by assign.add_additional_orm_attributes among others + util.establish_as_sqlalchemy(info) + + if mapped_attributes is not None: + # ensure that a class that's mapped is always picked up by + # its mapped() decorator or declarative metaclass before + # it would be detected as an unmapped mixin class + + if not is_mixin_scan: + # mypy can call us more than once. it then *may* have reset the + # left hand side of everything, but not the right that we removed, + # removing our ability to re-scan. but we have the types + # here, so lets re-apply them, or if we have an UnboundType, + # we can re-scan + + apply.re_apply_declarative_assignments(cls, api, mapped_attributes) + + return mapped_attributes + + mapped_attributes = [] + + if not cls.defs.body: + # when we get a mixin class from another file, the body is + # empty (!) but the names are in the symbol table. so use that. + + for sym_name, sym in info.names.items(): + _scan_symbol_table_entry( + cls, api, sym_name, sym, mapped_attributes + ) + else: + for stmt in util.flatten_typechecking(cls.defs.body): + if isinstance(stmt, AssignmentStmt): + _scan_declarative_assignment_stmt( + cls, api, stmt, mapped_attributes + ) + elif isinstance(stmt, Decorator): + _scan_declarative_decorator_stmt( + cls, api, stmt, mapped_attributes + ) + _scan_for_mapped_bases(cls, api) + + if not is_mixin_scan: + apply.add_additional_orm_attributes(cls, api, mapped_attributes) + + util.set_mapped_attributes(info, mapped_attributes) + + return mapped_attributes + + +def _scan_symbol_table_entry( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + name: str, + value: SymbolTableNode, + attributes: List[util.SQLAlchemyAttribute], +) -> None: + """Extract mapping information from a SymbolTableNode that's in the + type.names dictionary. + + """ + value_type = get_proper_type(value.type) + if not isinstance(value_type, Instance): + return + + left_hand_explicit_type = None + type_id = names.type_id_for_named_node(value_type.type) + # type_id = names._type_id_for_unbound_type(value.type.type, cls, api) + + err = False + + # TODO: this is nearly the same logic as that of + # _scan_declarative_decorator_stmt, likely can be merged + if type_id in { + names.MAPPED, + names.RELATIONSHIP, + names.COMPOSITE_PROPERTY, + names.MAPPER_PROPERTY, + names.SYNONYM_PROPERTY, + names.COLUMN_PROPERTY, + }: + if value_type.args: + left_hand_explicit_type = get_proper_type(value_type.args[0]) + else: + err = True + elif type_id is names.COLUMN: + if not value_type.args: + err = True + else: + typeengine_arg: Union[ProperType, TypeInfo] = get_proper_type( + value_type.args[0] + ) + if isinstance(typeengine_arg, Instance): + typeengine_arg = typeengine_arg.type + + if isinstance(typeengine_arg, (UnboundType, TypeInfo)): + sym = api.lookup_qualified(typeengine_arg.name, typeengine_arg) + if sym is not None and isinstance(sym.node, TypeInfo): + if names.has_base_type_id(sym.node, names.TYPEENGINE): + left_hand_explicit_type = UnionType( + [ + infer.extract_python_type_from_typeengine( + api, sym.node, [] + ), + NoneType(), + ] + ) + else: + util.fail( + api, + "Column type should be a TypeEngine " + "subclass not '{}'".format(sym.node.fullname), + value_type, + ) + + if err: + msg = ( + "Can't infer type from attribute {} on class {}. " + "please specify a return type from this function that is " + "one of: Mapped[], relationship[], " + "Column[], MapperProperty[]" + ) + util.fail(api, msg.format(name, cls.name), cls) + + left_hand_explicit_type = AnyType(TypeOfAny.special_form) + + if left_hand_explicit_type is not None: + assert value.node is not None + attributes.append( + util.SQLAlchemyAttribute( + name=name, + line=value.node.line, + column=value.node.column, + typ=left_hand_explicit_type, + info=cls.info, + ) + ) + + +def _scan_declarative_decorator_stmt( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + stmt: Decorator, + attributes: List[util.SQLAlchemyAttribute], +) -> None: + """Extract mapping information from a @declared_attr in a declarative + class. + + E.g.:: + + @reg.mapped + class MyClass: + # ... + + @declared_attr + def updated_at(cls) -> Column[DateTime]: + return Column(DateTime) + + Will resolve in mypy as:: + + @reg.mapped + class MyClass: + # ... + + updated_at: Mapped[Optional[datetime.datetime]] + + """ + for dec in stmt.decorators: + if ( + isinstance(dec, (NameExpr, MemberExpr, SymbolNode)) + and names.type_id_for_named_node(dec) is names.DECLARED_ATTR + ): + break + else: + return + + dec_index = cls.defs.body.index(stmt) + + left_hand_explicit_type: Optional[ProperType] = None + + if util.name_is_dunder(stmt.name): + # for dunder names like __table_args__, __tablename__, + # __mapper_args__ etc., rewrite these as simple assignment + # statements; otherwise mypy doesn't like if the decorated + # function has an annotation like ``cls: Type[Foo]`` because + # it isn't @classmethod + any_ = AnyType(TypeOfAny.special_form) + left_node = NameExpr(stmt.var.name) + left_node.node = stmt.var + new_stmt = AssignmentStmt([left_node], TempNode(any_)) + new_stmt.type = left_node.node.type + cls.defs.body[dec_index] = new_stmt + return + elif isinstance(stmt.func.type, CallableType): + func_type = stmt.func.type.ret_type + if isinstance(func_type, UnboundType): + type_id = names.type_id_for_unbound_type(func_type, cls, api) + else: + # this does not seem to occur unless the type argument is + # incorrect + return + + if ( + type_id + in { + names.MAPPED, + names.RELATIONSHIP, + names.COMPOSITE_PROPERTY, + names.MAPPER_PROPERTY, + names.SYNONYM_PROPERTY, + names.COLUMN_PROPERTY, + } + and func_type.args + ): + left_hand_explicit_type = get_proper_type(func_type.args[0]) + elif type_id is names.COLUMN and func_type.args: + typeengine_arg = func_type.args[0] + if isinstance(typeengine_arg, UnboundType): + sym = api.lookup_qualified(typeengine_arg.name, typeengine_arg) + if sym is not None and isinstance(sym.node, TypeInfo): + if names.has_base_type_id(sym.node, names.TYPEENGINE): + left_hand_explicit_type = UnionType( + [ + infer.extract_python_type_from_typeengine( + api, sym.node, [] + ), + NoneType(), + ] + ) + else: + util.fail( + api, + "Column type should be a TypeEngine " + "subclass not '{}'".format(sym.node.fullname), + func_type, + ) + + if left_hand_explicit_type is None: + # no type on the decorated function. our option here is to + # dig into the function body and get the return type, but they + # should just have an annotation. + msg = ( + "Can't infer type from @declared_attr on function '{}'; " + "please specify a return type from this function that is " + "one of: Mapped[], relationship[], " + "Column[], MapperProperty[]" + ) + util.fail(api, msg.format(stmt.var.name), stmt) + + left_hand_explicit_type = AnyType(TypeOfAny.special_form) + + left_node = NameExpr(stmt.var.name) + left_node.node = stmt.var + + # totally feeling around in the dark here as I don't totally understand + # the significance of UnboundType. It seems to be something that is + # not going to do what's expected when it is applied as the type of + # an AssignmentStatement. So do a feeling-around-in-the-dark version + # of converting it to the regular Instance/TypeInfo/UnionType structures + # we see everywhere else. + if isinstance(left_hand_explicit_type, UnboundType): + left_hand_explicit_type = get_proper_type( + util.unbound_to_instance(api, left_hand_explicit_type) + ) + + left_node.node.type = api.named_type( + names.NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] + ) + + # this will ignore the rvalue entirely + # rvalue = TempNode(AnyType(TypeOfAny.special_form)) + + # rewrite the node as: + # : Mapped[] = + # _sa_Mapped._empty_constructor(lambda: ) + # the function body is maintained so it gets type checked internally + rvalue = names.expr_to_mapped_constructor( + LambdaExpr(stmt.func.arguments, stmt.func.body) + ) + + new_stmt = AssignmentStmt([left_node], rvalue) + new_stmt.type = left_node.node.type + + attributes.append( + util.SQLAlchemyAttribute( + name=left_node.name, + line=stmt.line, + column=stmt.column, + typ=left_hand_explicit_type, + info=cls.info, + ) + ) + cls.defs.body[dec_index] = new_stmt + + +def _scan_declarative_assignment_stmt( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + attributes: List[util.SQLAlchemyAttribute], +) -> None: + """Extract mapping information from an assignment statement in a + declarative class. + + """ + lvalue = stmt.lvalues[0] + if not isinstance(lvalue, NameExpr): + return + + sym = cls.info.names.get(lvalue.name) + + # this establishes that semantic analysis has taken place, which + # means the nodes are populated and we are called from an appropriate + # hook. + assert sym is not None + node = sym.node + + if isinstance(node, PlaceholderNode): + return + + assert node is lvalue.node + assert isinstance(node, Var) + + if node.name == "__abstract__": + if api.parse_bool(stmt.rvalue) is True: + util.set_is_base(cls.info) + return + elif node.name == "__tablename__": + util.set_has_table(cls.info) + elif node.name.startswith("__"): + return + elif node.name == "_mypy_mapped_attrs": + if not isinstance(stmt.rvalue, ListExpr): + util.fail(api, "_mypy_mapped_attrs is expected to be a list", stmt) + else: + for item in stmt.rvalue.items: + if isinstance(item, (NameExpr, StrExpr)): + apply.apply_mypy_mapped_attr(cls, api, item, attributes) + + left_hand_mapped_type: Optional[Type] = None + left_hand_explicit_type: Optional[ProperType] = None + + if node.is_inferred or node.type is None: + if isinstance(stmt.type, UnboundType): + # look for an explicit Mapped[] type annotation on the left + # side with nothing on the right + + # print(stmt.type) + # Mapped?[Optional?[A?]] + + left_hand_explicit_type = stmt.type + + if stmt.type.name == "Mapped": + mapped_sym = api.lookup_qualified("Mapped", cls) + if ( + mapped_sym is not None + and mapped_sym.node is not None + and names.type_id_for_named_node(mapped_sym.node) + is names.MAPPED + ): + left_hand_explicit_type = get_proper_type( + stmt.type.args[0] + ) + left_hand_mapped_type = stmt.type + + # TODO: do we need to convert from unbound for this case? + # left_hand_explicit_type = util._unbound_to_instance( + # api, left_hand_explicit_type + # ) + else: + node_type = get_proper_type(node.type) + if ( + isinstance(node_type, Instance) + and names.type_id_for_named_node(node_type.type) is names.MAPPED + ): + # print(node.type) + # sqlalchemy.orm.attributes.Mapped[] + left_hand_explicit_type = get_proper_type(node_type.args[0]) + left_hand_mapped_type = node_type + else: + # print(node.type) + # + left_hand_explicit_type = node_type + left_hand_mapped_type = None + + if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None: + # annotation without assignment and Mapped is present + # as type annotation + # equivalent to using _infer_type_from_left_hand_type_only. + + python_type_for_type = left_hand_explicit_type + elif isinstance(stmt.rvalue, CallExpr) and isinstance( + stmt.rvalue.callee, RefExpr + ): + python_type_for_type = infer.infer_type_from_right_hand_nameexpr( + api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee + ) + + if python_type_for_type is None: + return + + else: + return + + assert python_type_for_type is not None + + attributes.append( + util.SQLAlchemyAttribute( + name=node.name, + line=stmt.line, + column=stmt.column, + typ=python_type_for_type, + info=cls.info, + ) + ) + + apply.apply_type_to_mapped_statement( + api, + stmt, + lvalue, + left_hand_explicit_type, + python_type_for_type, + ) + + +def _scan_for_mapped_bases( + cls: ClassDef, + api: SemanticAnalyzerPluginInterface, +) -> None: + """Given a class, iterate through its superclass hierarchy to find + all other classes that are considered as ORM-significant. + + Locates non-mapped mixins and scans them for mapped attributes to be + applied to subclasses. + + """ + + info = util.info_for_cls(cls, api) + + if info is None: + return + + for base_info in info.mro[1:-1]: + if base_info.fullname.startswith("builtins"): + continue + + # scan each base for mapped attributes. if they are not already + # scanned (but have all their type info), that means they are unmapped + # mixins + scan_declarative_assignments_and_apply_types( + base_info.defn, api, is_mixin_scan=True + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/infer.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/infer.py new file mode 100644 index 0000000000000000000000000000000000000000..26a83cca8363a4c227167a78e3711401e6133978 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/infer.py @@ -0,0 +1,590 @@ +# ext/mypy/infer.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Optional +from typing import Sequence + +from mypy.maptype import map_instance_to_supertype +from mypy.nodes import AssignmentStmt +from mypy.nodes import CallExpr +from mypy.nodes import Expression +from mypy.nodes import FuncDef +from mypy.nodes import LambdaExpr +from mypy.nodes import MemberExpr +from mypy.nodes import NameExpr +from mypy.nodes import RefExpr +from mypy.nodes import StrExpr +from mypy.nodes import TypeInfo +from mypy.nodes import Var +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.subtypes import is_subtype +from mypy.types import AnyType +from mypy.types import CallableType +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import NoneType +from mypy.types import ProperType +from mypy.types import TypeOfAny +from mypy.types import UnionType + +from . import names +from . import util + + +def infer_type_from_right_hand_nameexpr( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], + infer_from_right_side: RefExpr, +) -> Optional[ProperType]: + type_id = names.type_id_for_callee(infer_from_right_side) + if type_id is None: + return None + elif type_id is names.MAPPED: + python_type_for_type = _infer_type_from_mapped( + api, stmt, node, left_hand_explicit_type, infer_from_right_side + ) + elif type_id is names.COLUMN: + python_type_for_type = _infer_type_from_decl_column( + api, stmt, node, left_hand_explicit_type + ) + elif type_id is names.RELATIONSHIP: + python_type_for_type = _infer_type_from_relationship( + api, stmt, node, left_hand_explicit_type + ) + elif type_id is names.COLUMN_PROPERTY: + python_type_for_type = _infer_type_from_decl_column_property( + api, stmt, node, left_hand_explicit_type + ) + elif type_id is names.SYNONYM_PROPERTY: + python_type_for_type = infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + elif type_id is names.COMPOSITE_PROPERTY: + python_type_for_type = _infer_type_from_decl_composite_property( + api, stmt, node, left_hand_explicit_type + ) + else: + return None + + return python_type_for_type + + +def _infer_type_from_relationship( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], +) -> Optional[ProperType]: + """Infer the type of mapping from a relationship. + + E.g.:: + + @reg.mapped + class MyClass: + # ... + + addresses = relationship(Address, uselist=True) + + order: Mapped["Order"] = relationship("Order") + + Will resolve in mypy as:: + + @reg.mapped + class MyClass: + # ... + + addresses: Mapped[List[Address]] + + order: Mapped["Order"] + + """ + + assert isinstance(stmt.rvalue, CallExpr) + target_cls_arg = stmt.rvalue.args[0] + python_type_for_type: Optional[ProperType] = None + + if isinstance(target_cls_arg, NameExpr) and isinstance( + target_cls_arg.node, TypeInfo + ): + # type + related_object_type = target_cls_arg.node + python_type_for_type = Instance(related_object_type, []) + + # other cases not covered - an error message directs the user + # to set an explicit type annotation + # + # node.type == str, it's a string + # if isinstance(target_cls_arg, NameExpr) and isinstance( + # target_cls_arg.node, Var + # ) + # points to a type + # isinstance(target_cls_arg, NameExpr) and isinstance( + # target_cls_arg.node, TypeAlias + # ) + # string expression + # isinstance(target_cls_arg, StrExpr) + + uselist_arg = util.get_callexpr_kwarg(stmt.rvalue, "uselist") + collection_cls_arg: Optional[Expression] = util.get_callexpr_kwarg( + stmt.rvalue, "collection_class" + ) + type_is_a_collection = False + + # this can be used to determine Optional for a many-to-one + # in the same way nullable=False could be used, if we start supporting + # that. + # innerjoin_arg = util.get_callexpr_kwarg(stmt.rvalue, "innerjoin") + + if ( + uselist_arg is not None + and api.parse_bool(uselist_arg) is True + and collection_cls_arg is None + ): + type_is_a_collection = True + if python_type_for_type is not None: + python_type_for_type = api.named_type( + names.NAMED_TYPE_BUILTINS_LIST, [python_type_for_type] + ) + elif ( + uselist_arg is None or api.parse_bool(uselist_arg) is True + ) and collection_cls_arg is not None: + type_is_a_collection = True + if isinstance(collection_cls_arg, CallExpr): + collection_cls_arg = collection_cls_arg.callee + + if isinstance(collection_cls_arg, NameExpr) and isinstance( + collection_cls_arg.node, TypeInfo + ): + if python_type_for_type is not None: + # this can still be overridden by the left hand side + # within _infer_Type_from_left_and_inferred_right + python_type_for_type = Instance( + collection_cls_arg.node, [python_type_for_type] + ) + elif ( + isinstance(collection_cls_arg, NameExpr) + and isinstance(collection_cls_arg.node, FuncDef) + and collection_cls_arg.node.type is not None + ): + if python_type_for_type is not None: + # this can still be overridden by the left hand side + # within _infer_Type_from_left_and_inferred_right + + # TODO: handle mypy.types.Overloaded + if isinstance(collection_cls_arg.node.type, CallableType): + rt = get_proper_type(collection_cls_arg.node.type.ret_type) + + if isinstance(rt, CallableType): + callable_ret_type = get_proper_type(rt.ret_type) + if isinstance(callable_ret_type, Instance): + python_type_for_type = Instance( + callable_ret_type.type, + [python_type_for_type], + ) + else: + util.fail( + api, + "Expected Python collection type for " + "collection_class parameter", + stmt.rvalue, + ) + python_type_for_type = None + elif uselist_arg is not None and api.parse_bool(uselist_arg) is False: + if collection_cls_arg is not None: + util.fail( + api, + "Sending uselist=False and collection_class at the same time " + "does not make sense", + stmt.rvalue, + ) + if python_type_for_type is not None: + python_type_for_type = UnionType( + [python_type_for_type, NoneType()] + ) + + else: + if left_hand_explicit_type is None: + msg = ( + "Can't infer scalar or collection for ORM mapped expression " + "assigned to attribute '{}' if both 'uselist' and " + "'collection_class' arguments are absent from the " + "relationship(); please specify a " + "type annotation on the left hand side." + ) + util.fail(api, msg.format(node.name), node) + + if python_type_for_type is None: + return infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + elif left_hand_explicit_type is not None: + if type_is_a_collection: + assert isinstance(left_hand_explicit_type, Instance) + assert isinstance(python_type_for_type, Instance) + return _infer_collection_type_from_left_and_inferred_right( + api, node, left_hand_explicit_type, python_type_for_type + ) + else: + return _infer_type_from_left_and_inferred_right( + api, + node, + left_hand_explicit_type, + python_type_for_type, + ) + else: + return python_type_for_type + + +def _infer_type_from_decl_composite_property( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], +) -> Optional[ProperType]: + """Infer the type of mapping from a Composite.""" + + assert isinstance(stmt.rvalue, CallExpr) + target_cls_arg = stmt.rvalue.args[0] + python_type_for_type = None + + if isinstance(target_cls_arg, NameExpr) and isinstance( + target_cls_arg.node, TypeInfo + ): + related_object_type = target_cls_arg.node + python_type_for_type = Instance(related_object_type, []) + else: + python_type_for_type = None + + if python_type_for_type is None: + return infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + elif left_hand_explicit_type is not None: + return _infer_type_from_left_and_inferred_right( + api, node, left_hand_explicit_type, python_type_for_type + ) + else: + return python_type_for_type + + +def _infer_type_from_mapped( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], + infer_from_right_side: RefExpr, +) -> Optional[ProperType]: + """Infer the type of mapping from a right side expression + that returns Mapped. + + + """ + assert isinstance(stmt.rvalue, CallExpr) + + # (Pdb) print(stmt.rvalue.callee) + # NameExpr(query_expression [sqlalchemy.orm._orm_constructors.query_expression]) # noqa: E501 + # (Pdb) stmt.rvalue.callee.node + # + # (Pdb) stmt.rvalue.callee.node.type + # def [_T] (default_expr: sqlalchemy.sql.elements.ColumnElement[_T`-1] =) -> sqlalchemy.orm.base.Mapped[_T`-1] # noqa: E501 + # sqlalchemy.orm.base.Mapped[_T`-1] + # the_mapped_type = stmt.rvalue.callee.node.type.ret_type + + # TODO: look at generic ref and either use that, + # or reconcile w/ what's present, etc. + the_mapped_type = util.type_for_callee(infer_from_right_side) # noqa + + return infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + + +def _infer_type_from_decl_column_property( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], +) -> Optional[ProperType]: + """Infer the type of mapping from a ColumnProperty. + + This includes mappings against ``column_property()`` as well as the + ``deferred()`` function. + + """ + assert isinstance(stmt.rvalue, CallExpr) + + if stmt.rvalue.args: + first_prop_arg = stmt.rvalue.args[0] + + if isinstance(first_prop_arg, CallExpr): + type_id = names.type_id_for_callee(first_prop_arg.callee) + + # look for column_property() / deferred() etc with Column as first + # argument + if type_id is names.COLUMN: + return _infer_type_from_decl_column( + api, + stmt, + node, + left_hand_explicit_type, + right_hand_expression=first_prop_arg, + ) + + if isinstance(stmt.rvalue, CallExpr): + type_id = names.type_id_for_callee(stmt.rvalue.callee) + # this is probably not strictly necessary as we have to use the left + # hand type for query expression in any case. any other no-arg + # column prop objects would go here also + if type_id is names.QUERY_EXPRESSION: + return _infer_type_from_decl_column( + api, + stmt, + node, + left_hand_explicit_type, + ) + + return infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + + +def _infer_type_from_decl_column( + api: SemanticAnalyzerPluginInterface, + stmt: AssignmentStmt, + node: Var, + left_hand_explicit_type: Optional[ProperType], + right_hand_expression: Optional[CallExpr] = None, +) -> Optional[ProperType]: + """Infer the type of mapping from a Column. + + E.g.:: + + @reg.mapped + class MyClass: + # ... + + a = Column(Integer) + + b = Column("b", String) + + c: Mapped[int] = Column(Integer) + + d: bool = Column(Boolean) + + Will resolve in MyPy as:: + + @reg.mapped + class MyClass: + # ... + + a: Mapped[int] + + b: Mapped[str] + + c: Mapped[int] + + d: Mapped[bool] + + """ + assert isinstance(node, Var) + + callee = None + + if right_hand_expression is None: + if not isinstance(stmt.rvalue, CallExpr): + return None + + right_hand_expression = stmt.rvalue + + for column_arg in right_hand_expression.args[0:2]: + if isinstance(column_arg, CallExpr): + if isinstance(column_arg.callee, RefExpr): + # x = Column(String(50)) + callee = column_arg.callee + type_args: Sequence[Expression] = column_arg.args + break + elif isinstance(column_arg, (NameExpr, MemberExpr)): + if isinstance(column_arg.node, TypeInfo): + # x = Column(String) + callee = column_arg + type_args = () + break + else: + # x = Column(some_name, String), go to next argument + continue + elif isinstance(column_arg, (StrExpr,)): + # x = Column("name", String), go to next argument + continue + elif isinstance(column_arg, (LambdaExpr,)): + # x = Column("name", String, default=lambda: uuid.uuid4()) + # go to next argument + continue + else: + assert False + + if callee is None: + return None + + if isinstance(callee.node, TypeInfo) and names.mro_has_id( + callee.node.mro, names.TYPEENGINE + ): + python_type_for_type = extract_python_type_from_typeengine( + api, callee.node, type_args + ) + + if left_hand_explicit_type is not None: + return _infer_type_from_left_and_inferred_right( + api, node, left_hand_explicit_type, python_type_for_type + ) + + else: + return UnionType([python_type_for_type, NoneType()]) + else: + # it's not TypeEngine, it's typically implicitly typed + # like ForeignKey. we can't infer from the right side. + return infer_type_from_left_hand_type_only( + api, node, left_hand_explicit_type + ) + + +def _infer_type_from_left_and_inferred_right( + api: SemanticAnalyzerPluginInterface, + node: Var, + left_hand_explicit_type: ProperType, + python_type_for_type: ProperType, + orig_left_hand_type: Optional[ProperType] = None, + orig_python_type_for_type: Optional[ProperType] = None, +) -> Optional[ProperType]: + """Validate type when a left hand annotation is present and we also + could infer the right hand side:: + + attrname: SomeType = Column(SomeDBType) + + """ + + if orig_left_hand_type is None: + orig_left_hand_type = left_hand_explicit_type + if orig_python_type_for_type is None: + orig_python_type_for_type = python_type_for_type + + if not is_subtype(left_hand_explicit_type, python_type_for_type): + effective_type = api.named_type( + names.NAMED_TYPE_SQLA_MAPPED, [orig_python_type_for_type] + ) + + msg = ( + "Left hand assignment '{}: {}' not compatible " + "with ORM mapped expression of type {}" + ) + util.fail( + api, + msg.format( + node.name, + util.format_type(orig_left_hand_type, api.options), + util.format_type(effective_type, api.options), + ), + node, + ) + + return orig_left_hand_type + + +def _infer_collection_type_from_left_and_inferred_right( + api: SemanticAnalyzerPluginInterface, + node: Var, + left_hand_explicit_type: Instance, + python_type_for_type: Instance, +) -> Optional[ProperType]: + orig_left_hand_type = left_hand_explicit_type + orig_python_type_for_type = python_type_for_type + + if left_hand_explicit_type.args: + left_hand_arg = get_proper_type(left_hand_explicit_type.args[0]) + python_type_arg = get_proper_type(python_type_for_type.args[0]) + else: + left_hand_arg = left_hand_explicit_type + python_type_arg = python_type_for_type + + assert isinstance(left_hand_arg, (Instance, UnionType)) + assert isinstance(python_type_arg, (Instance, UnionType)) + + return _infer_type_from_left_and_inferred_right( + api, + node, + left_hand_arg, + python_type_arg, + orig_left_hand_type=orig_left_hand_type, + orig_python_type_for_type=orig_python_type_for_type, + ) + + +def infer_type_from_left_hand_type_only( + api: SemanticAnalyzerPluginInterface, + node: Var, + left_hand_explicit_type: Optional[ProperType], +) -> Optional[ProperType]: + """Determine the type based on explicit annotation only. + + if no annotation were present, note that we need one there to know + the type. + + """ + if left_hand_explicit_type is None: + msg = ( + "Can't infer type from ORM mapped expression " + "assigned to attribute '{}'; please specify a " + "Python type or " + "Mapped[] on the left hand side." + ) + util.fail(api, msg.format(node.name), node) + + return api.named_type( + names.NAMED_TYPE_SQLA_MAPPED, [AnyType(TypeOfAny.special_form)] + ) + + else: + # use type from the left hand side + return left_hand_explicit_type + + +def extract_python_type_from_typeengine( + api: SemanticAnalyzerPluginInterface, + node: TypeInfo, + type_args: Sequence[Expression], +) -> ProperType: + if node.fullname == "sqlalchemy.sql.sqltypes.Enum" and type_args: + first_arg = type_args[0] + if isinstance(first_arg, RefExpr) and isinstance( + first_arg.node, TypeInfo + ): + for base_ in first_arg.node.mro: + if base_.fullname == "enum.Enum": + return Instance(first_arg.node, []) + # TODO: support other pep-435 types here + else: + return api.named_type(names.NAMED_TYPE_BUILTINS_STR, []) + + assert node.has_base("sqlalchemy.sql.type_api.TypeEngine"), ( + "could not extract Python type from node: %s" % node + ) + + type_engine_sym = api.lookup_fully_qualified_or_none( + "sqlalchemy.sql.type_api.TypeEngine" + ) + + assert type_engine_sym is not None and isinstance( + type_engine_sym.node, TypeInfo + ) + type_engine = map_instance_to_supertype( + Instance(node, []), + type_engine_sym.node, + ) + return get_proper_type(type_engine.args[-1]) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/names.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/names.py new file mode 100644 index 0000000000000000000000000000000000000000..319786288fdb43ed7b70bfa48be84b18a6013556 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/names.py @@ -0,0 +1,335 @@ +# ext/mypy/names.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Dict +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Union + +from mypy.nodes import ARG_POS +from mypy.nodes import CallExpr +from mypy.nodes import ClassDef +from mypy.nodes import Decorator +from mypy.nodes import Expression +from mypy.nodes import FuncDef +from mypy.nodes import MemberExpr +from mypy.nodes import NameExpr +from mypy.nodes import OverloadedFuncDef +from mypy.nodes import SymbolNode +from mypy.nodes import TypeAlias +from mypy.nodes import TypeInfo +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.types import CallableType +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import UnboundType + +from ... import util + +COLUMN: int = util.symbol("COLUMN") +RELATIONSHIP: int = util.symbol("RELATIONSHIP") +REGISTRY: int = util.symbol("REGISTRY") +COLUMN_PROPERTY: int = util.symbol("COLUMN_PROPERTY") +TYPEENGINE: int = util.symbol("TYPEENGNE") +MAPPED: int = util.symbol("MAPPED") +DECLARATIVE_BASE: int = util.symbol("DECLARATIVE_BASE") +DECLARATIVE_META: int = util.symbol("DECLARATIVE_META") +MAPPED_DECORATOR: int = util.symbol("MAPPED_DECORATOR") +SYNONYM_PROPERTY: int = util.symbol("SYNONYM_PROPERTY") +COMPOSITE_PROPERTY: int = util.symbol("COMPOSITE_PROPERTY") +DECLARED_ATTR: int = util.symbol("DECLARED_ATTR") +MAPPER_PROPERTY: int = util.symbol("MAPPER_PROPERTY") +AS_DECLARATIVE: int = util.symbol("AS_DECLARATIVE") +AS_DECLARATIVE_BASE: int = util.symbol("AS_DECLARATIVE_BASE") +DECLARATIVE_MIXIN: int = util.symbol("DECLARATIVE_MIXIN") +QUERY_EXPRESSION: int = util.symbol("QUERY_EXPRESSION") + +# names that must succeed with mypy.api.named_type +NAMED_TYPE_BUILTINS_OBJECT = "builtins.object" +NAMED_TYPE_BUILTINS_STR = "builtins.str" +NAMED_TYPE_BUILTINS_LIST = "builtins.list" +NAMED_TYPE_SQLA_MAPPED = "sqlalchemy.orm.base.Mapped" + +_RelFullNames = { + "sqlalchemy.orm.relationships.Relationship", + "sqlalchemy.orm.relationships.RelationshipProperty", + "sqlalchemy.orm.relationships._RelationshipDeclared", + "sqlalchemy.orm.Relationship", + "sqlalchemy.orm.RelationshipProperty", +} + +_lookup: Dict[str, Tuple[int, Set[str]]] = { + "Column": ( + COLUMN, + { + "sqlalchemy.sql.schema.Column", + "sqlalchemy.sql.Column", + }, + ), + "Relationship": (RELATIONSHIP, _RelFullNames), + "RelationshipProperty": (RELATIONSHIP, _RelFullNames), + "_RelationshipDeclared": (RELATIONSHIP, _RelFullNames), + "registry": ( + REGISTRY, + { + "sqlalchemy.orm.decl_api.registry", + "sqlalchemy.orm.registry", + }, + ), + "ColumnProperty": ( + COLUMN_PROPERTY, + { + "sqlalchemy.orm.properties.MappedSQLExpression", + "sqlalchemy.orm.MappedSQLExpression", + "sqlalchemy.orm.properties.ColumnProperty", + "sqlalchemy.orm.ColumnProperty", + }, + ), + "MappedSQLExpression": ( + COLUMN_PROPERTY, + { + "sqlalchemy.orm.properties.MappedSQLExpression", + "sqlalchemy.orm.MappedSQLExpression", + "sqlalchemy.orm.properties.ColumnProperty", + "sqlalchemy.orm.ColumnProperty", + }, + ), + "Synonym": ( + SYNONYM_PROPERTY, + { + "sqlalchemy.orm.descriptor_props.Synonym", + "sqlalchemy.orm.Synonym", + "sqlalchemy.orm.descriptor_props.SynonymProperty", + "sqlalchemy.orm.SynonymProperty", + }, + ), + "SynonymProperty": ( + SYNONYM_PROPERTY, + { + "sqlalchemy.orm.descriptor_props.Synonym", + "sqlalchemy.orm.Synonym", + "sqlalchemy.orm.descriptor_props.SynonymProperty", + "sqlalchemy.orm.SynonymProperty", + }, + ), + "Composite": ( + COMPOSITE_PROPERTY, + { + "sqlalchemy.orm.descriptor_props.Composite", + "sqlalchemy.orm.Composite", + "sqlalchemy.orm.descriptor_props.CompositeProperty", + "sqlalchemy.orm.CompositeProperty", + }, + ), + "CompositeProperty": ( + COMPOSITE_PROPERTY, + { + "sqlalchemy.orm.descriptor_props.Composite", + "sqlalchemy.orm.Composite", + "sqlalchemy.orm.descriptor_props.CompositeProperty", + "sqlalchemy.orm.CompositeProperty", + }, + ), + "MapperProperty": ( + MAPPER_PROPERTY, + { + "sqlalchemy.orm.interfaces.MapperProperty", + "sqlalchemy.orm.MapperProperty", + }, + ), + "TypeEngine": (TYPEENGINE, {"sqlalchemy.sql.type_api.TypeEngine"}), + "Mapped": (MAPPED, {NAMED_TYPE_SQLA_MAPPED}), + "declarative_base": ( + DECLARATIVE_BASE, + { + "sqlalchemy.ext.declarative.declarative_base", + "sqlalchemy.orm.declarative_base", + "sqlalchemy.orm.decl_api.declarative_base", + }, + ), + "DeclarativeMeta": ( + DECLARATIVE_META, + { + "sqlalchemy.ext.declarative.DeclarativeMeta", + "sqlalchemy.orm.DeclarativeMeta", + "sqlalchemy.orm.decl_api.DeclarativeMeta", + }, + ), + "mapped": ( + MAPPED_DECORATOR, + { + "sqlalchemy.orm.decl_api.registry.mapped", + "sqlalchemy.orm.registry.mapped", + }, + ), + "as_declarative": ( + AS_DECLARATIVE, + { + "sqlalchemy.ext.declarative.as_declarative", + "sqlalchemy.orm.decl_api.as_declarative", + "sqlalchemy.orm.as_declarative", + }, + ), + "as_declarative_base": ( + AS_DECLARATIVE_BASE, + { + "sqlalchemy.orm.decl_api.registry.as_declarative_base", + "sqlalchemy.orm.registry.as_declarative_base", + }, + ), + "declared_attr": ( + DECLARED_ATTR, + { + "sqlalchemy.orm.decl_api.declared_attr", + "sqlalchemy.orm.declared_attr", + }, + ), + "declarative_mixin": ( + DECLARATIVE_MIXIN, + { + "sqlalchemy.orm.decl_api.declarative_mixin", + "sqlalchemy.orm.declarative_mixin", + }, + ), + "query_expression": ( + QUERY_EXPRESSION, + { + "sqlalchemy.orm.query_expression", + "sqlalchemy.orm._orm_constructors.query_expression", + }, + ), +} + + +def has_base_type_id(info: TypeInfo, type_id: int) -> bool: + for mr in info.mro: + check_type_id, fullnames = _lookup.get(mr.name, (None, None)) + if check_type_id == type_id: + break + else: + return False + + if fullnames is None: + return False + + return mr.fullname in fullnames + + +def mro_has_id(mro: List[TypeInfo], type_id: int) -> bool: + for mr in mro: + check_type_id, fullnames = _lookup.get(mr.name, (None, None)) + if check_type_id == type_id: + break + else: + return False + + if fullnames is None: + return False + + return mr.fullname in fullnames + + +def type_id_for_unbound_type( + type_: UnboundType, cls: ClassDef, api: SemanticAnalyzerPluginInterface +) -> Optional[int]: + sym = api.lookup_qualified(type_.name, type_) + if sym is not None: + if isinstance(sym.node, TypeAlias): + target_type = get_proper_type(sym.node.target) + if isinstance(target_type, Instance): + return type_id_for_named_node(target_type.type) + elif isinstance(sym.node, TypeInfo): + return type_id_for_named_node(sym.node) + + return None + + +def type_id_for_callee(callee: Expression) -> Optional[int]: + if isinstance(callee, (MemberExpr, NameExpr)): + if isinstance(callee.node, Decorator) and isinstance( + callee.node.func, FuncDef + ): + if callee.node.func.type and isinstance( + callee.node.func.type, CallableType + ): + ret_type = get_proper_type(callee.node.func.type.ret_type) + + if isinstance(ret_type, Instance): + return type_id_for_fullname(ret_type.type.fullname) + + return None + + elif isinstance(callee.node, OverloadedFuncDef): + if ( + callee.node.impl + and callee.node.impl.type + and isinstance(callee.node.impl.type, CallableType) + ): + ret_type = get_proper_type(callee.node.impl.type.ret_type) + + if isinstance(ret_type, Instance): + return type_id_for_fullname(ret_type.type.fullname) + + return None + elif isinstance(callee.node, FuncDef): + if callee.node.type and isinstance(callee.node.type, CallableType): + ret_type = get_proper_type(callee.node.type.ret_type) + + if isinstance(ret_type, Instance): + return type_id_for_fullname(ret_type.type.fullname) + + return None + elif isinstance(callee.node, TypeAlias): + target_type = get_proper_type(callee.node.target) + if isinstance(target_type, Instance): + return type_id_for_fullname(target_type.type.fullname) + elif isinstance(callee.node, TypeInfo): + return type_id_for_named_node(callee) + return None + + +def type_id_for_named_node( + node: Union[NameExpr, MemberExpr, SymbolNode] +) -> Optional[int]: + type_id, fullnames = _lookup.get(node.name, (None, None)) + + if type_id is None or fullnames is None: + return None + elif node.fullname in fullnames: + return type_id + else: + return None + + +def type_id_for_fullname(fullname: str) -> Optional[int]: + tokens = fullname.split(".") + immediate = tokens[-1] + + type_id, fullnames = _lookup.get(immediate, (None, None)) + + if type_id is None or fullnames is None: + return None + elif fullname in fullnames: + return type_id + else: + return None + + +def expr_to_mapped_constructor(expr: Expression) -> CallExpr: + column_descriptor = NameExpr("__sa_Mapped") + column_descriptor.fullname = NAMED_TYPE_SQLA_MAPPED + member_expr = MemberExpr(column_descriptor, "_empty_constructor") + return CallExpr( + member_expr, + [expr], + [ARG_POS], + ["arg1"], + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/plugin.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec2c02b9cf3ea2d2ba3652056196ddba5bc48d9 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/plugin.py @@ -0,0 +1,303 @@ +# ext/mypy/plugin.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +""" +Mypy plugin for SQLAlchemy ORM. + +""" +from __future__ import annotations + +from typing import Callable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type as TypingType +from typing import Union + +from mypy import nodes +from mypy.mro import calculate_mro +from mypy.mro import MroError +from mypy.nodes import Block +from mypy.nodes import ClassDef +from mypy.nodes import GDEF +from mypy.nodes import MypyFile +from mypy.nodes import NameExpr +from mypy.nodes import SymbolTable +from mypy.nodes import SymbolTableNode +from mypy.nodes import TypeInfo +from mypy.plugin import AttributeContext +from mypy.plugin import ClassDefContext +from mypy.plugin import DynamicClassDefContext +from mypy.plugin import Plugin +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import Type + +from . import decl_class +from . import names +from . import util + +try: + __import__("sqlalchemy-stubs") +except ImportError: + pass +else: + raise ImportError( + "The SQLAlchemy mypy plugin in SQLAlchemy " + "2.0 does not work with sqlalchemy-stubs or " + "sqlalchemy2-stubs installed, as well as with any other third party " + "SQLAlchemy stubs. Please uninstall all SQLAlchemy stubs " + "packages." + ) + + +class SQLAlchemyPlugin(Plugin): + def get_dynamic_class_hook( + self, fullname: str + ) -> Optional[Callable[[DynamicClassDefContext], None]]: + if names.type_id_for_fullname(fullname) is names.DECLARATIVE_BASE: + return _dynamic_class_hook + return None + + def get_customize_class_mro_hook( + self, fullname: str + ) -> Optional[Callable[[ClassDefContext], None]]: + return _fill_in_decorators + + def get_class_decorator_hook( + self, fullname: str + ) -> Optional[Callable[[ClassDefContext], None]]: + sym = self.lookup_fully_qualified(fullname) + + if sym is not None and sym.node is not None: + type_id = names.type_id_for_named_node(sym.node) + if type_id is names.MAPPED_DECORATOR: + return _cls_decorator_hook + elif type_id in ( + names.AS_DECLARATIVE, + names.AS_DECLARATIVE_BASE, + ): + return _base_cls_decorator_hook + elif type_id is names.DECLARATIVE_MIXIN: + return _declarative_mixin_hook + + return None + + def get_metaclass_hook( + self, fullname: str + ) -> Optional[Callable[[ClassDefContext], None]]: + if names.type_id_for_fullname(fullname) is names.DECLARATIVE_META: + # Set any classes that explicitly have metaclass=DeclarativeMeta + # as declarative so the check in `get_base_class_hook()` works + return _metaclass_cls_hook + + return None + + def get_base_class_hook( + self, fullname: str + ) -> Optional[Callable[[ClassDefContext], None]]: + sym = self.lookup_fully_qualified(fullname) + + if ( + sym + and isinstance(sym.node, TypeInfo) + and util.has_declarative_base(sym.node) + ): + return _base_cls_hook + + return None + + def get_attribute_hook( + self, fullname: str + ) -> Optional[Callable[[AttributeContext], Type]]: + if fullname.startswith( + "sqlalchemy.orm.attributes.QueryableAttribute." + ): + return _queryable_getattr_hook + + return None + + def get_additional_deps( + self, file: MypyFile + ) -> List[Tuple[int, str, int]]: + return [ + # + (10, "sqlalchemy.orm", -1), + (10, "sqlalchemy.orm.attributes", -1), + (10, "sqlalchemy.orm.decl_api", -1), + ] + + +def plugin(version: str) -> TypingType[SQLAlchemyPlugin]: + return SQLAlchemyPlugin + + +def _dynamic_class_hook(ctx: DynamicClassDefContext) -> None: + """Generate a declarative Base class when the declarative_base() function + is encountered.""" + + _add_globals(ctx) + + cls = ClassDef(ctx.name, Block([])) + cls.fullname = ctx.api.qualified_name(ctx.name) + + info = TypeInfo(SymbolTable(), cls, ctx.api.cur_mod_id) + cls.info = info + _set_declarative_metaclass(ctx.api, cls) + + cls_arg = util.get_callexpr_kwarg(ctx.call, "cls", expr_types=(NameExpr,)) + if cls_arg is not None and isinstance(cls_arg.node, TypeInfo): + util.set_is_base(cls_arg.node) + decl_class.scan_declarative_assignments_and_apply_types( + cls_arg.node.defn, ctx.api, is_mixin_scan=True + ) + info.bases = [Instance(cls_arg.node, [])] + else: + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) + + info.bases = [obj] + + try: + calculate_mro(info) + except MroError: + util.fail( + ctx.api, "Not able to calculate MRO for declarative base", ctx.call + ) + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) + info.bases = [obj] + info.fallback_to_any = True + + ctx.api.add_symbol_table_node(ctx.name, SymbolTableNode(GDEF, info)) + util.set_is_base(info) + + +def _fill_in_decorators(ctx: ClassDefContext) -> None: + for decorator in ctx.cls.decorators: + # set the ".fullname" attribute of a class decorator + # that is a MemberExpr. This causes the logic in + # semanal.py->apply_class_plugin_hooks to invoke the + # get_class_decorator_hook for our "registry.map_class()" + # and "registry.as_declarative_base()" methods. + # this seems like a bug in mypy that these decorators are otherwise + # skipped. + + if ( + isinstance(decorator, nodes.CallExpr) + and isinstance(decorator.callee, nodes.MemberExpr) + and decorator.callee.name == "as_declarative_base" + ): + target = decorator.callee + elif ( + isinstance(decorator, nodes.MemberExpr) + and decorator.name == "mapped" + ): + target = decorator + else: + continue + + if isinstance(target.expr, NameExpr): + sym = ctx.api.lookup_qualified( + target.expr.name, target, suppress_errors=True + ) + else: + continue + + if sym and sym.node: + sym_type = get_proper_type(sym.type) + if isinstance(sym_type, Instance): + target.fullname = f"{sym_type.type.fullname}.{target.name}" + else: + # if the registry is in the same file as where the + # decorator is used, it might not have semantic + # symbols applied and we can't get a fully qualified + # name or an inferred type, so we are actually going to + # flag an error in this case that they need to annotate + # it. The "registry" is declared just + # once (or few times), so they have to just not use + # type inference for its assignment in this one case. + util.fail( + ctx.api, + "Class decorator called %s(), but we can't " + "tell if it's from an ORM registry. Please " + "annotate the registry assignment, e.g. " + "my_registry: registry = registry()" % target.name, + sym.node, + ) + + +def _cls_decorator_hook(ctx: ClassDefContext) -> None: + _add_globals(ctx) + assert isinstance(ctx.reason, nodes.MemberExpr) + expr = ctx.reason.expr + + assert isinstance(expr, nodes.RefExpr) and isinstance(expr.node, nodes.Var) + + node_type = get_proper_type(expr.node.type) + + assert ( + isinstance(node_type, Instance) + and names.type_id_for_named_node(node_type.type) is names.REGISTRY + ) + + decl_class.scan_declarative_assignments_and_apply_types(ctx.cls, ctx.api) + + +def _base_cls_decorator_hook(ctx: ClassDefContext) -> None: + _add_globals(ctx) + + cls = ctx.cls + + _set_declarative_metaclass(ctx.api, cls) + + util.set_is_base(ctx.cls.info) + decl_class.scan_declarative_assignments_and_apply_types( + cls, ctx.api, is_mixin_scan=True + ) + + +def _declarative_mixin_hook(ctx: ClassDefContext) -> None: + _add_globals(ctx) + util.set_is_base(ctx.cls.info) + decl_class.scan_declarative_assignments_and_apply_types( + ctx.cls, ctx.api, is_mixin_scan=True + ) + + +def _metaclass_cls_hook(ctx: ClassDefContext) -> None: + util.set_is_base(ctx.cls.info) + + +def _base_cls_hook(ctx: ClassDefContext) -> None: + _add_globals(ctx) + decl_class.scan_declarative_assignments_and_apply_types(ctx.cls, ctx.api) + + +def _queryable_getattr_hook(ctx: AttributeContext) -> Type: + # how do I....tell it it has no attribute of a certain name? + # can't find any Type that seems to match that + return ctx.default_attr_type + + +def _add_globals(ctx: Union[ClassDefContext, DynamicClassDefContext]) -> None: + """Add __sa_DeclarativeMeta and __sa_Mapped symbol to the global space + for all class defs + + """ + + util.add_global(ctx, "sqlalchemy.orm", "Mapped", "__sa_Mapped") + + +def _set_declarative_metaclass( + api: SemanticAnalyzerPluginInterface, target_cls: ClassDef +) -> None: + info = target_cls.info + sym = api.lookup_fully_qualified_or_none( + "sqlalchemy.orm.decl_api.DeclarativeMeta" + ) + assert sym is not None and isinstance(sym.node, TypeInfo) + info.declared_metaclass = info.metaclass_type = Instance(sym.node, []) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/util.py new file mode 100644 index 0000000000000000000000000000000000000000..16761b9ab395c1b8d07307f4f06779f0f3e2a7ad --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/mypy/util.py @@ -0,0 +1,357 @@ +# ext/mypy/util.py +# Copyright (C) 2021-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import re +from typing import Any +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Type as TypingType +from typing import TypeVar +from typing import Union + +from mypy import version +from mypy.messages import format_type as _mypy_format_type +from mypy.nodes import CallExpr +from mypy.nodes import ClassDef +from mypy.nodes import CLASSDEF_NO_INFO +from mypy.nodes import Context +from mypy.nodes import Expression +from mypy.nodes import FuncDef +from mypy.nodes import IfStmt +from mypy.nodes import JsonDict +from mypy.nodes import MemberExpr +from mypy.nodes import NameExpr +from mypy.nodes import Statement +from mypy.nodes import SymbolTableNode +from mypy.nodes import TypeAlias +from mypy.nodes import TypeInfo +from mypy.options import Options +from mypy.plugin import ClassDefContext +from mypy.plugin import DynamicClassDefContext +from mypy.plugin import SemanticAnalyzerPluginInterface +from mypy.plugins.common import deserialize_and_fixup_type +from mypy.typeops import map_type_from_supertype +from mypy.types import CallableType +from mypy.types import get_proper_type +from mypy.types import Instance +from mypy.types import NoneType +from mypy.types import Type +from mypy.types import TypeVarType +from mypy.types import UnboundType +from mypy.types import UnionType + +_vers = tuple( + [int(x) for x in version.__version__.split(".") if re.match(r"^\d+$", x)] +) +mypy_14 = _vers >= (1, 4) + + +_TArgType = TypeVar("_TArgType", bound=Union[CallExpr, NameExpr]) + + +class SQLAlchemyAttribute: + def __init__( + self, + name: str, + line: int, + column: int, + typ: Optional[Type], + info: TypeInfo, + ) -> None: + self.name = name + self.line = line + self.column = column + self.type = typ + self.info = info + + def serialize(self) -> JsonDict: + assert self.type + return { + "name": self.name, + "line": self.line, + "column": self.column, + "type": serialize_type(self.type), + } + + def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None: + """Expands type vars in the context of a subtype when an attribute is + inherited from a generic super type. + """ + if not isinstance(self.type, TypeVarType): + return + + self.type = map_type_from_supertype(self.type, sub_type, self.info) + + @classmethod + def deserialize( + cls, + info: TypeInfo, + data: JsonDict, + api: SemanticAnalyzerPluginInterface, + ) -> SQLAlchemyAttribute: + data = data.copy() + typ = deserialize_and_fixup_type(data.pop("type"), api) + return cls(typ=typ, info=info, **data) + + +def name_is_dunder(name: str) -> bool: + return bool(re.match(r"^__.+?__$", name)) + + +def _set_info_metadata(info: TypeInfo, key: str, data: Any) -> None: + info.metadata.setdefault("sqlalchemy", {})[key] = data + + +def _get_info_metadata(info: TypeInfo, key: str) -> Optional[Any]: + return info.metadata.get("sqlalchemy", {}).get(key, None) + + +def _get_info_mro_metadata(info: TypeInfo, key: str) -> Optional[Any]: + if info.mro: + for base in info.mro: + metadata = _get_info_metadata(base, key) + if metadata is not None: + return metadata + return None + + +def establish_as_sqlalchemy(info: TypeInfo) -> None: + info.metadata.setdefault("sqlalchemy", {}) + + +def set_is_base(info: TypeInfo) -> None: + _set_info_metadata(info, "is_base", True) + + +def get_is_base(info: TypeInfo) -> bool: + is_base = _get_info_metadata(info, "is_base") + return is_base is True + + +def has_declarative_base(info: TypeInfo) -> bool: + is_base = _get_info_mro_metadata(info, "is_base") + return is_base is True + + +def set_has_table(info: TypeInfo) -> None: + _set_info_metadata(info, "has_table", True) + + +def get_has_table(info: TypeInfo) -> bool: + is_base = _get_info_metadata(info, "has_table") + return is_base is True + + +def get_mapped_attributes( + info: TypeInfo, api: SemanticAnalyzerPluginInterface +) -> Optional[List[SQLAlchemyAttribute]]: + mapped_attributes: Optional[List[JsonDict]] = _get_info_metadata( + info, "mapped_attributes" + ) + if mapped_attributes is None: + return None + + attributes: List[SQLAlchemyAttribute] = [] + + for data in mapped_attributes: + attr = SQLAlchemyAttribute.deserialize(info, data, api) + attr.expand_typevar_from_subtype(info) + attributes.append(attr) + + return attributes + + +def format_type(typ_: Type, options: Options) -> str: + if mypy_14: + return _mypy_format_type(typ_, options) + else: + return _mypy_format_type(typ_) # type: ignore + + +def set_mapped_attributes( + info: TypeInfo, attributes: List[SQLAlchemyAttribute] +) -> None: + _set_info_metadata( + info, + "mapped_attributes", + [attribute.serialize() for attribute in attributes], + ) + + +def fail(api: SemanticAnalyzerPluginInterface, msg: str, ctx: Context) -> None: + msg = "[SQLAlchemy Mypy plugin] %s" % msg + return api.fail(msg, ctx) + + +def add_global( + ctx: Union[ClassDefContext, DynamicClassDefContext], + module: str, + symbol_name: str, + asname: str, +) -> None: + module_globals = ctx.api.modules[ctx.api.cur_mod_id].names + + if asname not in module_globals: + lookup_sym: SymbolTableNode = ctx.api.modules[module].names[ + symbol_name + ] + + module_globals[asname] = lookup_sym + + +@overload +def get_callexpr_kwarg( + callexpr: CallExpr, name: str, *, expr_types: None = ... +) -> Optional[Union[CallExpr, NameExpr]]: ... + + +@overload +def get_callexpr_kwarg( + callexpr: CallExpr, + name: str, + *, + expr_types: Tuple[TypingType[_TArgType], ...], +) -> Optional[_TArgType]: ... + + +def get_callexpr_kwarg( + callexpr: CallExpr, + name: str, + *, + expr_types: Optional[Tuple[TypingType[Any], ...]] = None, +) -> Optional[Any]: + try: + arg_idx = callexpr.arg_names.index(name) + except ValueError: + return None + + kwarg = callexpr.args[arg_idx] + if isinstance( + kwarg, expr_types if expr_types is not None else (NameExpr, CallExpr) + ): + return kwarg + + return None + + +def flatten_typechecking(stmts: Iterable[Statement]) -> Iterator[Statement]: + for stmt in stmts: + if ( + isinstance(stmt, IfStmt) + and isinstance(stmt.expr[0], NameExpr) + and stmt.expr[0].fullname == "typing.TYPE_CHECKING" + ): + yield from stmt.body[0].body + else: + yield stmt + + +def type_for_callee(callee: Expression) -> Optional[Union[Instance, TypeInfo]]: + if isinstance(callee, (MemberExpr, NameExpr)): + if isinstance(callee.node, FuncDef): + if callee.node.type and isinstance(callee.node.type, CallableType): + ret_type = get_proper_type(callee.node.type.ret_type) + + if isinstance(ret_type, Instance): + return ret_type + + return None + elif isinstance(callee.node, TypeAlias): + target_type = get_proper_type(callee.node.target) + if isinstance(target_type, Instance): + return target_type + elif isinstance(callee.node, TypeInfo): + return callee.node + return None + + +def unbound_to_instance( + api: SemanticAnalyzerPluginInterface, typ: Type +) -> Type: + """Take the UnboundType that we seem to get as the ret_type from a FuncDef + and convert it into an Instance/TypeInfo kind of structure that seems + to work as the left-hand type of an AssignmentStatement. + + """ + + if not isinstance(typ, UnboundType): + return typ + + # TODO: figure out a more robust way to check this. The node is some + # kind of _SpecialForm, there's a typing.Optional that's _SpecialForm, + # but I can't figure out how to get them to match up + if typ.name == "Optional": + # convert from "Optional?" to the more familiar + # UnionType[..., NoneType()] + return unbound_to_instance( + api, + UnionType( + [unbound_to_instance(api, typ_arg) for typ_arg in typ.args] + + [NoneType()] + ), + ) + + node = api.lookup_qualified(typ.name, typ) + + if ( + node is not None + and isinstance(node, SymbolTableNode) + and isinstance(node.node, TypeInfo) + ): + bound_type = node.node + + return Instance( + bound_type, + [ + ( + unbound_to_instance(api, arg) + if isinstance(arg, UnboundType) + else arg + ) + for arg in typ.args + ], + ) + else: + return typ + + +def info_for_cls( + cls: ClassDef, api: SemanticAnalyzerPluginInterface +) -> Optional[TypeInfo]: + if cls.info is CLASSDEF_NO_INFO: + sym = api.lookup_qualified(cls.name, cls) + if sym is None: + return None + assert sym and isinstance(sym.node, TypeInfo) + return sym.node + + return cls.info + + +def serialize_type(typ: Type) -> Union[str, JsonDict]: + try: + return typ.serialize() + except Exception: + pass + if hasattr(typ, "args"): + typ.args = tuple( + ( + a.resolve_string_annotation() + if hasattr(a, "resolve_string_annotation") + else a + ) + for a in typ.args + ) + elif hasattr(typ, "resolve_string_annotation"): + typ = typ.resolve_string_annotation() + return typ.serialize() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/orderinglist.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/orderinglist.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc67b189649eae8a98454f8f894717dd967e9f6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/orderinglist.py @@ -0,0 +1,427 @@ +# ext/orderinglist.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +"""A custom list that manages index/position information for contained +elements. + +:author: Jason Kirtland + +``orderinglist`` is a helper for mutable ordered relationships. It will +intercept list operations performed on a :func:`_orm.relationship`-managed +collection and +automatically synchronize changes in list position onto a target scalar +attribute. + +Example: A ``slide`` table, where each row refers to zero or more entries +in a related ``bullet`` table. The bullets within a slide are +displayed in order based on the value of the ``position`` column in the +``bullet`` table. As entries are reordered in memory, the value of the +``position`` attribute should be updated to reflect the new sort order:: + + + Base = declarative_base() + + + class Slide(Base): + __tablename__ = "slide" + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship("Bullet", order_by="Bullet.position") + + + class Bullet(Base): + __tablename__ = "bullet" + id = Column(Integer, primary_key=True) + slide_id = Column(Integer, ForeignKey("slide.id")) + position = Column(Integer) + text = Column(String) + +The standard relationship mapping will produce a list-like attribute on each +``Slide`` containing all related ``Bullet`` objects, +but coping with changes in ordering is not handled automatically. +When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` +attribute will remain unset until manually assigned. When the ``Bullet`` +is inserted into the middle of the list, the following ``Bullet`` objects +will also need to be renumbered. + +The :class:`.OrderingList` object automates this task, managing the +``position`` attribute on all ``Bullet`` objects in the collection. It is +constructed using the :func:`.ordering_list` factory:: + + from sqlalchemy.ext.orderinglist import ordering_list + + Base = declarative_base() + + + class Slide(Base): + __tablename__ = "slide" + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship( + "Bullet", + order_by="Bullet.position", + collection_class=ordering_list("position"), + ) + + + class Bullet(Base): + __tablename__ = "bullet" + id = Column(Integer, primary_key=True) + slide_id = Column(Integer, ForeignKey("slide.id")) + position = Column(Integer) + text = Column(String) + +With the above mapping the ``Bullet.position`` attribute is managed:: + + s = Slide() + s.bullets.append(Bullet()) + s.bullets.append(Bullet()) + s.bullets[1].position + >>> 1 + s.bullets.insert(1, Bullet()) + s.bullets[2].position + >>> 2 + +The :class:`.OrderingList` construct only works with **changes** to a +collection, and not the initial load from the database, and requires that the +list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the +:func:`_orm.relationship` against the target ordering attribute, so that the +ordering is correct when first loaded. + +.. warning:: + + :class:`.OrderingList` only provides limited functionality when a primary + key column or unique column is the target of the sort. Operations + that are unsupported or are problematic include: + + * two entries must trade values. This is not supported directly in the + case of a primary key or unique constraint because it means at least + one row would need to be temporarily removed first, or changed to + a third, neutral value while the switch occurs. + + * an entry must be deleted in order to make room for a new entry. + SQLAlchemy's unit of work performs all INSERTs before DELETEs within a + single flush. In the case of a primary key, it will trade + an INSERT/DELETE of the same primary key for an UPDATE statement in order + to lessen the impact of this limitation, however this does not take place + for a UNIQUE column. + A future feature will allow the "DELETE before INSERT" behavior to be + possible, alleviating this limitation, though this feature will require + explicit configuration at the mapper level for sets of columns that + are to be handled in this way. + +:func:`.ordering_list` takes the name of the related object's ordering +attribute as an argument. By default, the zero-based integer index of the +object's position in the :func:`.ordering_list` is synchronized with the +ordering attribute: index 0 will get position 0, index 1 position 1, etc. To +start numbering at 1 or some other integer, provide ``count_from=1``. + + +""" +from __future__ import annotations + +from typing import Callable +from typing import List +from typing import Optional +from typing import Sequence +from typing import TypeVar + +from ..orm.collections import collection +from ..orm.collections import collection_adapter + +_T = TypeVar("_T") +OrderingFunc = Callable[[int, Sequence[_T]], int] + + +__all__ = ["ordering_list"] + + +def ordering_list( + attr: str, + count_from: Optional[int] = None, + ordering_func: Optional[OrderingFunc] = None, + reorder_on_append: bool = False, +) -> Callable[[], OrderingList]: + """Prepares an :class:`OrderingList` factory for use in mapper definitions. + + Returns an object suitable for use as an argument to a Mapper + relationship's ``collection_class`` option. e.g.:: + + from sqlalchemy.ext.orderinglist import ordering_list + + + class Slide(Base): + __tablename__ = "slide" + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship( + "Bullet", + order_by="Bullet.position", + collection_class=ordering_list("position"), + ) + + :param attr: + Name of the mapped attribute to use for storage and retrieval of + ordering information + + :param count_from: + Set up an integer-based ordering, starting at ``count_from``. For + example, ``ordering_list('pos', count_from=1)`` would create a 1-based + list in SQL, storing the value in the 'pos' column. Ignored if + ``ordering_func`` is supplied. + + Additional arguments are passed to the :class:`.OrderingList` constructor. + + """ + + kw = _unsugar_count_from( + count_from=count_from, + ordering_func=ordering_func, + reorder_on_append=reorder_on_append, + ) + return lambda: OrderingList(attr, **kw) + + +# Ordering utility functions + + +def count_from_0(index, collection): + """Numbering function: consecutive integers starting at 0.""" + + return index + + +def count_from_1(index, collection): + """Numbering function: consecutive integers starting at 1.""" + + return index + 1 + + +def count_from_n_factory(start): + """Numbering function: consecutive integers starting at arbitrary start.""" + + def f(index, collection): + return index + start + + try: + f.__name__ = "count_from_%i" % start + except TypeError: + pass + return f + + +def _unsugar_count_from(**kw): + """Builds counting functions from keyword arguments. + + Keyword argument filter, prepares a simple ``ordering_func`` from a + ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. + """ + + count_from = kw.pop("count_from", None) + if kw.get("ordering_func", None) is None and count_from is not None: + if count_from == 0: + kw["ordering_func"] = count_from_0 + elif count_from == 1: + kw["ordering_func"] = count_from_1 + else: + kw["ordering_func"] = count_from_n_factory(count_from) + return kw + + +class OrderingList(List[_T]): + """A custom list that manages position information for its children. + + The :class:`.OrderingList` object is normally set up using the + :func:`.ordering_list` factory function, used in conjunction with + the :func:`_orm.relationship` function. + + """ + + ordering_attr: str + ordering_func: OrderingFunc + reorder_on_append: bool + + def __init__( + self, + ordering_attr: Optional[str] = None, + ordering_func: Optional[OrderingFunc] = None, + reorder_on_append: bool = False, + ): + """A custom list that manages position information for its children. + + ``OrderingList`` is a ``collection_class`` list implementation that + syncs position in a Python list with a position attribute on the + mapped objects. + + This implementation relies on the list starting in the proper order, + so be **sure** to put an ``order_by`` on your relationship. + + :param ordering_attr: + Name of the attribute that stores the object's order in the + relationship. + + :param ordering_func: Optional. A function that maps the position in + the Python list to a value to store in the + ``ordering_attr``. Values returned are usually (but need not be!) + integers. + + An ``ordering_func`` is called with two positional parameters: the + index of the element in the list, and the list itself. + + If omitted, Python list indexes are used for the attribute values. + Two basic pre-built numbering functions are provided in this module: + ``count_from_0`` and ``count_from_1``. For more exotic examples + like stepped numbering, alphabetical and Fibonacci numbering, see + the unit tests. + + :param reorder_on_append: + Default False. When appending an object with an existing (non-None) + ordering value, that value will be left untouched unless + ``reorder_on_append`` is true. This is an optimization to avoid a + variety of dangerous unexpected database writes. + + SQLAlchemy will add instances to the list via append() when your + object loads. If for some reason the result set from the database + skips a step in the ordering (say, row '1' is missing but you get + '2', '3', and '4'), reorder_on_append=True would immediately + renumber the items to '1', '2', '3'. If you have multiple sessions + making changes, any of whom happen to load this collection even in + passing, all of the sessions would try to "clean up" the numbering + in their commits, possibly causing all but one to fail with a + concurrent modification error. + + Recommend leaving this with the default of False, and just call + ``reorder()`` if you're doing ``append()`` operations with + previously ordered instances or when doing some housekeeping after + manual sql operations. + + """ + self.ordering_attr = ordering_attr + if ordering_func is None: + ordering_func = count_from_0 + self.ordering_func = ordering_func + self.reorder_on_append = reorder_on_append + + # More complex serialization schemes (multi column, e.g.) are possible by + # subclassing and reimplementing these two methods. + def _get_order_value(self, entity): + return getattr(entity, self.ordering_attr) + + def _set_order_value(self, entity, value): + setattr(entity, self.ordering_attr, value) + + def reorder(self) -> None: + """Synchronize ordering for the entire collection. + + Sweeps through the list and ensures that each object has accurate + ordering information set. + + """ + for index, entity in enumerate(self): + self._order_entity(index, entity, True) + + # As of 0.5, _reorder is no longer semi-private + _reorder = reorder + + def _order_entity(self, index, entity, reorder=True): + have = self._get_order_value(entity) + + # Don't disturb existing ordering if reorder is False + if have is not None and not reorder: + return + + should_be = self.ordering_func(index, self) + if have != should_be: + self._set_order_value(entity, should_be) + + def append(self, entity): + super().append(entity) + self._order_entity(len(self) - 1, entity, self.reorder_on_append) + + def _raw_append(self, entity): + """Append without any ordering behavior.""" + + super().append(entity) + + _raw_append = collection.adds(1)(_raw_append) + + def insert(self, index, entity): + super().insert(index, entity) + self._reorder() + + def remove(self, entity): + super().remove(entity) + + adapter = collection_adapter(self) + if adapter and adapter._referenced_by_owner: + self._reorder() + + def pop(self, index=-1): + entity = super().pop(index) + self._reorder() + return entity + + def __setitem__(self, index, entity): + if isinstance(index, slice): + step = index.step or 1 + start = index.start or 0 + if start < 0: + start += len(self) + stop = index.stop or len(self) + if stop < 0: + stop += len(self) + + for i in range(start, stop, step): + self.__setitem__(i, entity[i]) + else: + self._order_entity(index, entity, True) + super().__setitem__(index, entity) + + def __delitem__(self, index): + super().__delitem__(index) + self._reorder() + + def __setslice__(self, start, end, values): + super().__setslice__(start, end, values) + self._reorder() + + def __delslice__(self, start, end): + super().__delslice__(start, end) + self._reorder() + + def __reduce__(self): + return _reconstitute, (self.__class__, self.__dict__, list(self)) + + for func_name, func in list(locals().items()): + if ( + callable(func) + and func.__name__ == func_name + and not func.__doc__ + and hasattr(list, func_name) + ): + func.__doc__ = getattr(list, func_name).__doc__ + del func_name, func + + +def _reconstitute(cls, dict_, items): + """Reconstitute an :class:`.OrderingList`. + + This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for + unpickling :class:`.OrderingList` objects. + + """ + obj = cls.__new__(cls) + obj.__dict__.update(dict_) + list.extend(obj, items) + return obj diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/serializer.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..b7032b6595955851f655cb95da095289b03a6119 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/ext/serializer.py @@ -0,0 +1,185 @@ +# ext/serializer.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +"""Serializer/Deserializer objects for usage with SQLAlchemy query structures, +allowing "contextual" deserialization. + +.. legacy:: + + The serializer extension is **legacy** and should not be used for + new development. + +Any SQLAlchemy query structure, either based on sqlalchemy.sql.* +or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session +etc. which are referenced by the structure are not persisted in serialized +form, but are instead re-associated with the query structure +when it is deserialized. + +.. warning:: The serializer extension uses pickle to serialize and + deserialize objects, so the same security consideration mentioned + in the `python documentation + `_ apply. + +Usage is nearly the same as that of the standard Python pickle module:: + + from sqlalchemy.ext.serializer import loads, dumps + + metadata = MetaData(bind=some_engine) + Session = scoped_session(sessionmaker()) + + # ... define mappers + + query = ( + Session.query(MyClass) + .filter(MyClass.somedata == "foo") + .order_by(MyClass.sortkey) + ) + + # pickle the query + serialized = dumps(query) + + # unpickle. Pass in metadata + scoped_session + query2 = loads(serialized, metadata, Session) + + print(query2.all()) + +Similar restrictions as when using raw pickle apply; mapped classes must be +themselves be pickleable, meaning they are importable from a module-level +namespace. + +The serializer module is only appropriate for query structures. It is not +needed for: + +* instances of user-defined classes. These contain no references to engines, + sessions or expression constructs in the typical case and can be serialized + directly. + +* Table metadata that is to be loaded entirely from the serialized structure + (i.e. is not already declared in the application). Regular + pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, + typically one which was reflected from an existing database at some previous + point in time. The serializer module is specifically for the opposite case, + where the Table metadata is already present in memory. + +""" + +from io import BytesIO +import pickle +import re + +from .. import Column +from .. import Table +from ..engine import Engine +from ..orm import class_mapper +from ..orm.interfaces import MapperProperty +from ..orm.mapper import Mapper +from ..orm.session import Session +from ..util import b64decode +from ..util import b64encode + + +__all__ = ["Serializer", "Deserializer", "dumps", "loads"] + + +class Serializer(pickle.Pickler): + + def persistent_id(self, obj): + # print "serializing:", repr(obj) + if isinstance(obj, Mapper) and not obj.non_primary: + id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: + id_ = ( + "mapperprop:" + + b64encode(pickle.dumps(obj.parent.class_)) + + ":" + + obj.key + ) + elif isinstance(obj, Table): + if "parententity" in obj._annotations: + id_ = "mapper_selectable:" + b64encode( + pickle.dumps(obj._annotations["parententity"].class_) + ) + else: + id_ = f"table:{obj.key}" + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id_ = f"column:{obj.table.key}:{obj.key}" + elif isinstance(obj, Session): + id_ = "session:" + elif isinstance(obj, Engine): + id_ = "engine:" + else: + return None + return id_ + + +our_ids = re.compile( + r"(mapperprop|mapper|mapper_selectable|table|column|" + r"session|attribute|engine):(.*)" +) + + +class Deserializer(pickle.Unpickler): + + def __init__(self, file, metadata=None, scoped_session=None, engine=None): + super().__init__(file) + self.metadata = metadata + self.scoped_session = scoped_session + self.engine = engine + + def get_engine(self): + if self.engine: + return self.engine + elif self.scoped_session and self.scoped_session().bind: + return self.scoped_session().bind + else: + return None + + def persistent_load(self, id_): + m = our_ids.match(str(id_)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == "attribute": + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapper_selectable": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls).__clause_element__() + elif type_ == "mapperprop": + mapper, keyname = args.split(":") + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return self.metadata.tables[args] + elif type_ == "column": + table, colname = args.split(":") + return self.metadata.tables[table].c[colname] + elif type_ == "session": + return self.scoped_session() + elif type_ == "engine": + return self.get_engine() + else: + raise Exception("Unknown token: %s" % type_) + + +def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL): + buf = BytesIO() + pickler = Serializer(buf, protocol) + pickler.dump(obj) + return buf.getvalue() + + +def loads(data, metadata=None, scoped_session=None, engine=None): + buf = BytesIO(data) + unpickler = Deserializer(buf, metadata, scoped_session, engine) + return unpickler.load() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9afb1a52bade15a1bd800dd7b359cfca5fbe09 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/__init__.py @@ -0,0 +1,16 @@ +# future/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""2.0 API features. + +this module is legacy as 2.0 APIs are now standard. + +""" +from .engine import Connection as Connection +from .engine import create_engine as create_engine +from .engine import Engine as Engine +from ..sql._selectable_constructors import select as select diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/engine.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..0449c3d9f31c448c6efd4d19de3bc26d3d98a76d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/future/engine.py @@ -0,0 +1,15 @@ +# future/engine.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +"""2.0 API features. + +this module is legacy as 2.0 APIs are now standard. + +""" + +from ..engine import Connection as Connection # noqa: F401 +from ..engine import create_engine as create_engine # noqa: F401 +from ..engine import Engine as Engine # noqa: F401 diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7771de47eb2dd9df0dc766f70c69e8fa38a77af5 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/__init__.py @@ -0,0 +1,170 @@ +# orm/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +""" +Functional constructs for ORM configuration. + +See the SQLAlchemy object relational tutorial and mapper configuration +documentation for an overview of how this module is used. + +""" + +from __future__ import annotations + +from typing import Any + +from . import exc as exc +from . import mapper as mapperlib +from . import strategy_options as strategy_options +from ._orm_constructors import _mapper_fn as mapper +from ._orm_constructors import aliased as aliased +from ._orm_constructors import backref as backref +from ._orm_constructors import clear_mappers as clear_mappers +from ._orm_constructors import column_property as column_property +from ._orm_constructors import composite as composite +from ._orm_constructors import contains_alias as contains_alias +from ._orm_constructors import create_session as create_session +from ._orm_constructors import deferred as deferred +from ._orm_constructors import dynamic_loader as dynamic_loader +from ._orm_constructors import join as join +from ._orm_constructors import mapped_column as mapped_column +from ._orm_constructors import orm_insert_sentinel as orm_insert_sentinel +from ._orm_constructors import outerjoin as outerjoin +from ._orm_constructors import query_expression as query_expression +from ._orm_constructors import relationship as relationship +from ._orm_constructors import synonym as synonym +from ._orm_constructors import with_loader_criteria as with_loader_criteria +from ._orm_constructors import with_polymorphic as with_polymorphic +from .attributes import AttributeEventToken as AttributeEventToken +from .attributes import InstrumentedAttribute as InstrumentedAttribute +from .attributes import QueryableAttribute as QueryableAttribute +from .base import class_mapper as class_mapper +from .base import DynamicMapped as DynamicMapped +from .base import InspectionAttrExtensionType as InspectionAttrExtensionType +from .base import LoaderCallableStatus as LoaderCallableStatus +from .base import Mapped as Mapped +from .base import NotExtension as NotExtension +from .base import ORMDescriptor as ORMDescriptor +from .base import PassiveFlag as PassiveFlag +from .base import SQLORMExpression as SQLORMExpression +from .base import WriteOnlyMapped as WriteOnlyMapped +from .context import FromStatement as FromStatement +from .context import QueryContext as QueryContext +from .decl_api import add_mapped_attribute as add_mapped_attribute +from .decl_api import as_declarative as as_declarative +from .decl_api import declarative_base as declarative_base +from .decl_api import declarative_mixin as declarative_mixin +from .decl_api import DeclarativeBase as DeclarativeBase +from .decl_api import DeclarativeBaseNoMeta as DeclarativeBaseNoMeta +from .decl_api import DeclarativeMeta as DeclarativeMeta +from .decl_api import declared_attr as declared_attr +from .decl_api import has_inherited_table as has_inherited_table +from .decl_api import MappedAsDataclass as MappedAsDataclass +from .decl_api import registry as registry +from .decl_api import synonym_for as synonym_for +from .decl_base import MappedClassProtocol as MappedClassProtocol +from .descriptor_props import Composite as Composite +from .descriptor_props import CompositeProperty as CompositeProperty +from .descriptor_props import Synonym as Synonym +from .descriptor_props import SynonymProperty as SynonymProperty +from .dynamic import AppenderQuery as AppenderQuery +from .events import AttributeEvents as AttributeEvents +from .events import InstanceEvents as InstanceEvents +from .events import InstrumentationEvents as InstrumentationEvents +from .events import MapperEvents as MapperEvents +from .events import QueryEvents as QueryEvents +from .events import SessionEvents as SessionEvents +from .identity import IdentityMap as IdentityMap +from .instrumentation import ClassManager as ClassManager +from .interfaces import EXT_CONTINUE as EXT_CONTINUE +from .interfaces import EXT_SKIP as EXT_SKIP +from .interfaces import EXT_STOP as EXT_STOP +from .interfaces import InspectionAttr as InspectionAttr +from .interfaces import InspectionAttrInfo as InspectionAttrInfo +from .interfaces import MANYTOMANY as MANYTOMANY +from .interfaces import MANYTOONE as MANYTOONE +from .interfaces import MapperProperty as MapperProperty +from .interfaces import NO_KEY as NO_KEY +from .interfaces import NO_VALUE as NO_VALUE +from .interfaces import ONETOMANY as ONETOMANY +from .interfaces import PropComparator as PropComparator +from .interfaces import RelationshipDirection as RelationshipDirection +from .interfaces import UserDefinedOption as UserDefinedOption +from .loading import merge_frozen_result as merge_frozen_result +from .loading import merge_result as merge_result +from .mapped_collection import attribute_keyed_dict as attribute_keyed_dict +from .mapped_collection import ( + attribute_mapped_collection as attribute_mapped_collection, +) +from .mapped_collection import column_keyed_dict as column_keyed_dict +from .mapped_collection import ( + column_mapped_collection as column_mapped_collection, +) +from .mapped_collection import keyfunc_mapping as keyfunc_mapping +from .mapped_collection import KeyFuncDict as KeyFuncDict +from .mapped_collection import mapped_collection as mapped_collection +from .mapped_collection import MappedCollection as MappedCollection +from .mapper import configure_mappers as configure_mappers +from .mapper import Mapper as Mapper +from .mapper import reconstructor as reconstructor +from .mapper import validates as validates +from .properties import ColumnProperty as ColumnProperty +from .properties import MappedColumn as MappedColumn +from .properties import MappedSQLExpression as MappedSQLExpression +from .query import AliasOption as AliasOption +from .query import Query as Query +from .relationships import foreign as foreign +from .relationships import Relationship as Relationship +from .relationships import RelationshipProperty as RelationshipProperty +from .relationships import remote as remote +from .scoping import QueryPropertyDescriptor as QueryPropertyDescriptor +from .scoping import scoped_session as scoped_session +from .session import close_all_sessions as close_all_sessions +from .session import make_transient as make_transient +from .session import make_transient_to_detached as make_transient_to_detached +from .session import object_session as object_session +from .session import ORMExecuteState as ORMExecuteState +from .session import Session as Session +from .session import sessionmaker as sessionmaker +from .session import SessionTransaction as SessionTransaction +from .session import SessionTransactionOrigin as SessionTransactionOrigin +from .state import AttributeState as AttributeState +from .state import InstanceState as InstanceState +from .strategy_options import contains_eager as contains_eager +from .strategy_options import defaultload as defaultload +from .strategy_options import defer as defer +from .strategy_options import immediateload as immediateload +from .strategy_options import joinedload as joinedload +from .strategy_options import lazyload as lazyload +from .strategy_options import Load as Load +from .strategy_options import load_only as load_only +from .strategy_options import noload as noload +from .strategy_options import raiseload as raiseload +from .strategy_options import selectin_polymorphic as selectin_polymorphic +from .strategy_options import selectinload as selectinload +from .strategy_options import subqueryload as subqueryload +from .strategy_options import undefer as undefer +from .strategy_options import undefer_group as undefer_group +from .strategy_options import with_expression as with_expression +from .unitofwork import UOWTransaction as UOWTransaction +from .util import Bundle as Bundle +from .util import CascadeOptions as CascadeOptions +from .util import LoaderCriteriaOption as LoaderCriteriaOption +from .util import object_mapper as object_mapper +from .util import polymorphic_union as polymorphic_union +from .util import was_deleted as was_deleted +from .util import with_parent as with_parent +from .writeonly import WriteOnlyCollection as WriteOnlyCollection +from .. import util as _sa_util + + +def __go(lcls: Any) -> None: + _sa_util.preloaded.import_prefix("sqlalchemy.orm") + _sa_util.preloaded.import_prefix("sqlalchemy.ext") + + +__go(locals()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_orm_constructors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_orm_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e3ec37ba2d8aea62634e19a912e9dc6f178619 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_orm_constructors.py @@ -0,0 +1,2590 @@ +# orm/_orm_constructors.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import Collection +from typing import Iterable +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from . import mapperlib as mapperlib +from ._typing import _O +from .descriptor_props import Composite +from .descriptor_props import Synonym +from .interfaces import _AttributeOptions +from .properties import MappedColumn +from .properties import MappedSQLExpression +from .query import AliasOption +from .relationships import _RelationshipArgumentType +from .relationships import _RelationshipDeclared +from .relationships import _RelationshipSecondaryArgument +from .relationships import RelationshipProperty +from .session import Session +from .util import _ORMJoin +from .util import AliasedClass +from .util import AliasedInsp +from .util import LoaderCriteriaOption +from .. import sql +from .. import util +from ..exc import InvalidRequestError +from ..sql._typing import _no_kw +from ..sql.base import _NoArg +from ..sql.base import SchemaEventTarget +from ..sql.schema import _InsertSentinelColumnDefault +from ..sql.schema import SchemaConst +from ..sql.selectable import FromClause +from ..util.typing import Annotated +from ..util.typing import Literal + +if TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _ORMColumnExprArgument + from .descriptor_props import _CC + from .descriptor_props import _CompositeAttrType + from .interfaces import PropComparator + from .mapper import Mapper + from .query import Query + from .relationships import _LazyLoadArgumentType + from .relationships import _ORMColCollectionArgument + from .relationships import _ORMOrderByArgument + from .relationships import _RelationshipJoinConditionArgument + from .relationships import ORMBackrefArgument + from .session import _SessionBind + from ..sql._typing import _AutoIncrementType + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _FromClauseArgument + from ..sql._typing import _InfoType + from ..sql._typing import _OnClauseArgument + from ..sql._typing import _TypeEngineArgument + from ..sql.elements import ColumnElement + from ..sql.schema import _ServerDefaultArgument + from ..sql.schema import _ServerOnUpdateArgument + from ..sql.selectable import Alias + from ..sql.selectable import Subquery + + +_T = typing.TypeVar("_T") + + +@util.deprecated( + "1.4", + "The :class:`.AliasOption` object is not necessary " + "for entities to be matched up to a query that is established " + "via :meth:`.Query.from_statement` and now does nothing.", + enable_warnings=False, # AliasOption itself warns +) +def contains_alias(alias: Union[Alias, Subquery]) -> AliasOption: + r"""Return a :class:`.MapperOption` that will indicate to the + :class:`_query.Query` + that the main table has been aliased. + + """ + return AliasOption(alias) + + +def mapped_column( + __name_pos: Optional[ + Union[str, _TypeEngineArgument[Any], SchemaEventTarget] + ] = None, + __type_pos: Optional[ + Union[_TypeEngineArgument[Any], SchemaEventTarget] + ] = None, + *args: SchemaEventTarget, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + nullable: Optional[ + Union[bool, Literal[SchemaConst.NULL_UNSPECIFIED]] + ] = SchemaConst.NULL_UNSPECIFIED, + primary_key: Optional[bool] = False, + deferred: Union[_NoArg, bool] = _NoArg.NO_ARG, + deferred_group: Optional[str] = None, + deferred_raiseload: Optional[bool] = None, + use_existing_column: bool = False, + name: Optional[str] = None, + type_: Optional[_TypeEngineArgument[Any]] = None, + autoincrement: _AutoIncrementType = "auto", + doc: Optional[str] = None, + key: Optional[str] = None, + index: Optional[bool] = None, + unique: Optional[bool] = None, + info: Optional[_InfoType] = None, + onupdate: Optional[Any] = None, + insert_default: Optional[Any] = _NoArg.NO_ARG, + server_default: Optional[_ServerDefaultArgument] = None, + server_onupdate: Optional[_ServerOnUpdateArgument] = None, + active_history: bool = False, + quote: Optional[bool] = None, + system: bool = False, + comment: Optional[str] = None, + sort_order: Union[_NoArg, int] = _NoArg.NO_ARG, + **kw: Any, +) -> MappedColumn[Any]: + r"""declare a new ORM-mapped :class:`_schema.Column` construct + for use within :ref:`Declarative Table ` + configuration. + + The :func:`_orm.mapped_column` function provides an ORM-aware and + Python-typing-compatible construct which is used with + :ref:`declarative ` mappings to indicate an + attribute that's mapped to a Core :class:`_schema.Column` object. It + provides the equivalent feature as mapping an attribute to a + :class:`_schema.Column` object directly when using Declarative, + specifically when using :ref:`Declarative Table ` + configuration. + + .. versionadded:: 2.0 + + :func:`_orm.mapped_column` is normally used with explicit typing along with + the :class:`_orm.Mapped` annotation type, where it can derive the SQL + type and nullability for the column based on what's present within the + :class:`_orm.Mapped` annotation. It also may be used without annotations + as a drop-in replacement for how :class:`_schema.Column` is used in + Declarative mappings in SQLAlchemy 1.x style. + + For usage examples of :func:`_orm.mapped_column`, see the documentation + at :ref:`orm_declarative_table`. + + .. seealso:: + + :ref:`orm_declarative_table` - complete documentation + + :ref:`whatsnew_20_orm_declarative_typing` - migration notes for + Declarative mappings using 1.x style mappings + + :param __name: String name to give to the :class:`_schema.Column`. This + is an optional, positional only argument that if present must be the + first positional argument passed. If omitted, the attribute name to + which the :func:`_orm.mapped_column` is mapped will be used as the SQL + column name. + :param __type: :class:`_types.TypeEngine` type or instance which will + indicate the datatype to be associated with the :class:`_schema.Column`. + This is an optional, positional-only argument that if present must + immediately follow the ``__name`` parameter if present also, or otherwise + be the first positional parameter. If omitted, the ultimate type for + the column may be derived either from the annotated type, or if a + :class:`_schema.ForeignKey` is present, from the datatype of the + referenced column. + :param \*args: Additional positional arguments include constructs such + as :class:`_schema.ForeignKey`, :class:`_schema.CheckConstraint`, + and :class:`_schema.Identity`, which are passed through to the constructed + :class:`_schema.Column`. + :param nullable: Optional bool, whether the column should be "NULL" or + "NOT NULL". If omitted, the nullability is derived from the type + annotation based on whether or not ``typing.Optional`` is present. + ``nullable`` defaults to ``True`` otherwise for non-primary key columns, + and ``False`` for primary key columns. + :param primary_key: optional bool, indicates the :class:`_schema.Column` + would be part of the table's primary key or not. + :param deferred: Optional bool - this keyword argument is consumed by the + ORM declarative process, and is not part of the :class:`_schema.Column` + itself; instead, it indicates that this column should be "deferred" for + loading as though mapped by :func:`_orm.deferred`. + + .. seealso:: + + :ref:`orm_queryguide_deferred_declarative` + + :param deferred_group: Implies :paramref:`_orm.mapped_column.deferred` + to ``True``, and set the :paramref:`_orm.deferred.group` parameter. + + .. seealso:: + + :ref:`orm_queryguide_deferred_group` + + :param deferred_raiseload: Implies :paramref:`_orm.mapped_column.deferred` + to ``True``, and set the :paramref:`_orm.deferred.raiseload` parameter. + + .. seealso:: + + :ref:`orm_queryguide_deferred_raiseload` + + :param use_existing_column: if True, will attempt to locate the given + column name on an inherited superclass (typically single inheriting + superclass), and if present, will not produce a new column, mapping + to the superclass column as though it were omitted from this class. + This is used for mixins that add new columns to an inherited superclass. + + .. seealso:: + + :ref:`orm_inheritance_column_conflicts` + + .. versionadded:: 2.0.0b4 + + :param default: Passed directly to the + :paramref:`_schema.Column.default` parameter if the + :paramref:`_orm.mapped_column.insert_default` parameter is not present. + Additionally, when used with :ref:`orm_declarative_native_dataclasses`, + indicates a default Python value that should be applied to the keyword + constructor within the generated ``__init__()`` method. + + Note that in the case of dataclass generation when + :paramref:`_orm.mapped_column.insert_default` is not present, this means + the :paramref:`_orm.mapped_column.default` value is used in **two** + places, both the ``__init__()`` method as well as the + :paramref:`_schema.Column.default` parameter. While this behavior may + change in a future release, for the moment this tends to "work out"; a + default of ``None`` will mean that the :class:`_schema.Column` gets no + default generator, whereas a default that refers to a non-``None`` Python + or SQL expression value will be assigned up front on the object when + ``__init__()`` is called, which is the same value that the Core + :class:`_sql.Insert` construct would use in any case, leading to the same + end result. + + .. note:: When using Core level column defaults that are callables to + be interpreted by the underlying :class:`_schema.Column` in conjunction + with :ref:`ORM-mapped dataclasses + `, especially those that are + :ref:`context-aware default functions `, + **the** :paramref:`_orm.mapped_column.insert_default` **parameter must + be used instead**. This is necessary to disambiguate the callable from + being interpreted as a dataclass level default. + + .. seealso:: + + :ref:`defaults_default_factory_insert_default` + + :paramref:`_orm.mapped_column.insert_default` + + :paramref:`_orm.mapped_column.default_factory` + + :param insert_default: Passed directly to the + :paramref:`_schema.Column.default` parameter; will supersede the value + of :paramref:`_orm.mapped_column.default` when present, however + :paramref:`_orm.mapped_column.default` will always apply to the + constructor default for a dataclasses mapping. + + .. seealso:: + + :ref:`defaults_default_factory_insert_default` + + :paramref:`_orm.mapped_column.default` + + :paramref:`_orm.mapped_column.default_factory` + + :param sort_order: An integer that indicates how this mapped column + should be sorted compared to the others when the ORM is creating a + :class:`_schema.Table`. Among mapped columns that have the same + value the default ordering is used, placing first the mapped columns + defined in the main class, then the ones in the super classes. + Defaults to 0. The sort is ascending. + + .. versionadded:: 2.0.4 + + :param active_history=False: + + When ``True``, indicates that the "previous" value for a + scalar attribute should be loaded when replaced, if not + already loaded. Normally, history tracking logic for + simple non-primary-key scalar values only needs to be + aware of the "new" value in order to perform a flush. This + flag is available for applications that make use of + :func:`.attributes.get_history` or :meth:`.Session.is_modified` + which also need to know the "previous" value of the attribute. + + .. versionadded:: 2.0.10 + + + :param init: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__init__()`` + method as generated by the dataclass process. + :param repr: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__repr__()`` + method as generated by the dataclass process. + :param default_factory: Specific to + :ref:`orm_declarative_native_dataclasses`, + specifies a default-value generation function that will take place + as part of the ``__init__()`` + method as generated by the dataclass process. + + .. seealso:: + + :ref:`defaults_default_factory_insert_default` + + :paramref:`_orm.mapped_column.default` + + :paramref:`_orm.mapped_column.insert_default` + + :param compare: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be included in comparison operations when generating the + ``__eq__()`` and ``__ne__()`` methods for the mapped class. + + .. versionadded:: 2.0.0b4 + + :param kw_only: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be marked as keyword-only when generating the ``__init__()``. + + :param hash: Specific to + :ref:`orm_declarative_native_dataclasses`, controls if this field + is included when generating the ``__hash__()`` method for the mapped + class. + + .. versionadded:: 2.0.36 + + :param \**kw: All remaining keyword arguments are passed through to the + constructor for the :class:`_schema.Column`. + + """ + + return MappedColumn( + __name_pos, + __type_pos, + *args, + name=name, + type_=type_, + autoincrement=autoincrement, + insert_default=insert_default, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + doc=doc, + key=key, + index=index, + unique=unique, + info=info, + active_history=active_history, + nullable=nullable, + onupdate=onupdate, + primary_key=primary_key, + server_default=server_default, + server_onupdate=server_onupdate, + use_existing_column=use_existing_column, + quote=quote, + comment=comment, + system=system, + deferred=deferred, + deferred_group=deferred_group, + deferred_raiseload=deferred_raiseload, + sort_order=sort_order, + **kw, + ) + + +def orm_insert_sentinel( + name: Optional[str] = None, + type_: Optional[_TypeEngineArgument[Any]] = None, + *, + default: Optional[Any] = None, + omit_from_statements: bool = True, +) -> MappedColumn[Any]: + """Provides a surrogate :func:`_orm.mapped_column` that generates + a so-called :term:`sentinel` column, allowing efficient bulk + inserts with deterministic RETURNING sorting for tables that don't + otherwise have qualifying primary key configurations. + + Use of :func:`_orm.orm_insert_sentinel` is analogous to the use of the + :func:`_schema.insert_sentinel` construct within a Core + :class:`_schema.Table` construct. + + Guidelines for adding this construct to a Declarative mapped class + are the same as that of the :func:`_schema.insert_sentinel` construct; + the database table itself also needs to have a column with this name + present. + + For background on how this object is used, see the section + :ref:`engine_insertmanyvalues_sentinel_columns` as part of the + section :ref:`engine_insertmanyvalues`. + + .. seealso:: + + :func:`_schema.insert_sentinel` + + :ref:`engine_insertmanyvalues` + + :ref:`engine_insertmanyvalues_sentinel_columns` + + + .. versionadded:: 2.0.10 + + """ + + return mapped_column( + name=name, + default=( + default if default is not None else _InsertSentinelColumnDefault() + ), + _omit_from_statements=omit_from_statements, + insert_sentinel=True, + use_existing_column=True, + nullable=True, + ) + + +@util.deprecated_params( + **{ + arg: ( + "2.0", + f"The :paramref:`_orm.column_property.{arg}` parameter is " + "deprecated for :func:`_orm.column_property`. This parameter " + "applies to a writeable-attribute in a Declarative Dataclasses " + "configuration only, and :func:`_orm.column_property` is treated " + "as a read-only attribute in this context.", + ) + for arg in ("init", "kw_only", "default", "default_factory") + } +) +def column_property( + column: _ORMColumnExprArgument[_T], + *additional_columns: _ORMColumnExprArgument[Any], + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[PropComparator[_T]]] = None, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + active_history: bool = False, + expire_on_flush: bool = True, + info: Optional[_InfoType] = None, + doc: Optional[str] = None, +) -> MappedSQLExpression[_T]: + r"""Provide a column-level property for use with a mapping. + + With Declarative mappings, :func:`_orm.column_property` is used to + map read-only SQL expressions to a mapped class. + + When using Imperative mappings, :func:`_orm.column_property` also + takes on the role of mapping table columns with additional features. + When using fully Declarative mappings, the :func:`_orm.mapped_column` + construct should be used for this purpose. + + With Declarative Dataclass mappings, :func:`_orm.column_property` + is considered to be **read only**, and will not be included in the + Dataclass ``__init__()`` constructor. + + The :func:`_orm.column_property` function returns an instance of + :class:`.ColumnProperty`. + + .. seealso:: + + :ref:`mapper_column_property_sql_expressions` - general use of + :func:`_orm.column_property` to map SQL expressions + + :ref:`orm_imperative_table_column_options` - usage of + :func:`_orm.column_property` with Imperative Table mappings to apply + additional options to a plain :class:`_schema.Column` object + + :param \*cols: + list of Column objects to be mapped. + + :param active_history=False: + + Used only for Imperative Table mappings, or legacy-style Declarative + mappings (i.e. which have not been upgraded to + :func:`_orm.mapped_column`), for column-based attributes that are + expected to be writeable; use :func:`_orm.mapped_column` with + :paramref:`_orm.mapped_column.active_history` for Declarative mappings. + See that parameter for functional details. + + :param comparator_factory: a class which extends + :class:`.ColumnProperty.Comparator` which provides custom SQL + clause generation for comparison operations. + + :param group: + a group name for this property when marked as deferred. + + :param deferred: + when True, the column property is "deferred", meaning that + it does not load immediately, and is instead loaded when the + attribute is first accessed on an instance. See also + :func:`~sqlalchemy.orm.deferred`. + + :param doc: + optional string that will be applied as the doc on the + class-bound descriptor. + + :param expire_on_flush=True: + Disable expiry on flush. A column_property() which refers + to a SQL expression (and not a single table-bound column) + is considered to be a "read only" property; populating it + has no effect on the state of data, and it can only return + database state. For this reason a column_property()'s value + is expired whenever the parent object is involved in a + flush, that is, has any kind of "dirty" state within a flush. + Setting this parameter to ``False`` will have the effect of + leaving any existing value present after the flush proceeds. + Note that the :class:`.Session` with default expiration + settings still expires + all attributes after a :meth:`.Session.commit` call, however. + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + :param raiseload: if True, indicates the column should raise an error + when undeferred, rather than loading the value. This can be + altered at query time by using the :func:`.deferred` option with + raiseload=False. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`orm_queryguide_deferred_raiseload` + + :param init: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__init__()`` + method as generated by the dataclass process. + :param repr: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__repr__()`` + method as generated by the dataclass process. + :param default_factory: Specific to + :ref:`orm_declarative_native_dataclasses`, + specifies a default-value generation function that will take place + as part of the ``__init__()`` + method as generated by the dataclass process. + + .. seealso:: + + :ref:`defaults_default_factory_insert_default` + + :paramref:`_orm.mapped_column.default` + + :paramref:`_orm.mapped_column.insert_default` + + :param compare: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be included in comparison operations when generating the + ``__eq__()`` and ``__ne__()`` methods for the mapped class. + + .. versionadded:: 2.0.0b4 + + :param kw_only: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be marked as keyword-only when generating the ``__init__()``. + + :param hash: Specific to + :ref:`orm_declarative_native_dataclasses`, controls if this field + is included when generating the ``__hash__()`` method for the mapped + class. + + .. versionadded:: 2.0.36 + + """ + return MappedSQLExpression( + column, + *additional_columns, + attribute_options=_AttributeOptions( + False if init is _NoArg.NO_ARG else init, + repr, + default, + default_factory, + compare, + kw_only, + hash, + ), + group=group, + deferred=deferred, + raiseload=raiseload, + comparator_factory=comparator_factory, + active_history=active_history, + expire_on_flush=expire_on_flush, + info=info, + doc=doc, + _assume_readonly_dc_attributes=True, + ) + + +@overload +def composite( + _class_or_attr: _CompositeAttrType[Any], + *attrs: _CompositeAttrType[Any], + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[Composite.Comparator[_T]]] = None, + active_history: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + **__kw: Any, +) -> Composite[Any]: ... + + +@overload +def composite( + _class_or_attr: Type[_CC], + *attrs: _CompositeAttrType[Any], + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[Composite.Comparator[_T]]] = None, + active_history: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + **__kw: Any, +) -> Composite[_CC]: ... + + +@overload +def composite( + _class_or_attr: Callable[..., _CC], + *attrs: _CompositeAttrType[Any], + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[Composite.Comparator[_T]]] = None, + active_history: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + **__kw: Any, +) -> Composite[_CC]: ... + + +def composite( + _class_or_attr: Union[ + None, Type[_CC], Callable[..., _CC], _CompositeAttrType[Any] + ] = None, + *attrs: _CompositeAttrType[Any], + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[Composite.Comparator[_T]]] = None, + active_history: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + **__kw: Any, +) -> Composite[Any]: + r"""Return a composite column-based property for use with a Mapper. + + See the mapping documentation section :ref:`mapper_composite` for a + full usage example. + + The :class:`.MapperProperty` returned by :func:`.composite` + is the :class:`.Composite`. + + :param class\_: + The "composite type" class, or any classmethod or callable which + will produce a new instance of the composite object given the + column values in order. + + :param \*attrs: + List of elements to be mapped, which may include: + + * :class:`_schema.Column` objects + * :func:`_orm.mapped_column` constructs + * string names of other attributes on the mapped class, which may be + any other SQL or object-mapped attribute. This can for + example allow a composite that refers to a many-to-one relationship + + :param active_history=False: + When ``True``, indicates that the "previous" value for a + scalar attribute should be loaded when replaced, if not + already loaded. See the same flag on :func:`.column_property`. + + :param group: + A group name for this property when marked as deferred. + + :param deferred: + When True, the column property is "deferred", meaning that it does + not load immediately, and is instead loaded when the attribute is + first accessed on an instance. See also + :func:`~sqlalchemy.orm.deferred`. + + :param comparator_factory: a class which extends + :class:`.Composite.Comparator` which provides custom SQL + clause generation for comparison operations. + + :param doc: + optional string that will be applied as the doc on the + class-bound descriptor. + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + :param init: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__init__()`` + method as generated by the dataclass process. + :param repr: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__repr__()`` + method as generated by the dataclass process. + :param default_factory: Specific to + :ref:`orm_declarative_native_dataclasses`, + specifies a default-value generation function that will take place + as part of the ``__init__()`` + method as generated by the dataclass process. + + :param compare: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be included in comparison operations when generating the + ``__eq__()`` and ``__ne__()`` methods for the mapped class. + + .. versionadded:: 2.0.0b4 + + :param kw_only: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be marked as keyword-only when generating the ``__init__()``. + + :param hash: Specific to + :ref:`orm_declarative_native_dataclasses`, controls if this field + is included when generating the ``__hash__()`` method for the mapped + class. + + .. versionadded:: 2.0.36 + """ + if __kw: + raise _no_kw() + + return Composite( + _class_or_attr, + *attrs, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + group=group, + deferred=deferred, + raiseload=raiseload, + comparator_factory=comparator_factory, + active_history=active_history, + info=info, + doc=doc, + ) + + +def with_loader_criteria( + entity_or_base: _EntityType[Any], + where_criteria: Union[ + _ColumnExpressionArgument[bool], + Callable[[Any], _ColumnExpressionArgument[bool]], + ], + loader_only: bool = False, + include_aliases: bool = False, + propagate_to_loaders: bool = True, + track_closure_variables: bool = True, +) -> LoaderCriteriaOption: + """Add additional WHERE criteria to the load for all occurrences of + a particular entity. + + .. versionadded:: 1.4 + + The :func:`_orm.with_loader_criteria` option is intended to add + limiting criteria to a particular kind of entity in a query, + **globally**, meaning it will apply to the entity as it appears + in the SELECT query as well as within any subqueries, join + conditions, and relationship loads, including both eager and lazy + loaders, without the need for it to be specified in any particular + part of the query. The rendering logic uses the same system used by + single table inheritance to ensure a certain discriminator is applied + to a table. + + E.g., using :term:`2.0-style` queries, we can limit the way the + ``User.addresses`` collection is loaded, regardless of the kind + of loading used:: + + from sqlalchemy.orm import with_loader_criteria + + stmt = select(User).options( + selectinload(User.addresses), + with_loader_criteria(Address, Address.email_address != "foo"), + ) + + Above, the "selectinload" for ``User.addresses`` will apply the + given filtering criteria to the WHERE clause. + + Another example, where the filtering will be applied to the + ON clause of the join, in this example using :term:`1.x style` + queries:: + + q = ( + session.query(User) + .outerjoin(User.addresses) + .options(with_loader_criteria(Address, Address.email_address != "foo")) + ) + + The primary purpose of :func:`_orm.with_loader_criteria` is to use + it in the :meth:`_orm.SessionEvents.do_orm_execute` event handler + to ensure that all occurrences of a particular entity are filtered + in a certain way, such as filtering for access control roles. It + also can be used to apply criteria to relationship loads. In the + example below, we can apply a certain set of rules to all queries + emitted by a particular :class:`_orm.Session`:: + + session = Session(bind=engine) + + + @event.listens_for("do_orm_execute", session) + def _add_filtering_criteria(execute_state): + + if ( + execute_state.is_select + and not execute_state.is_column_load + and not execute_state.is_relationship_load + ): + execute_state.statement = execute_state.statement.options( + with_loader_criteria( + SecurityRole, + lambda cls: cls.role.in_(["some_role"]), + include_aliases=True, + ) + ) + + In the above example, the :meth:`_orm.SessionEvents.do_orm_execute` + event will intercept all queries emitted using the + :class:`_orm.Session`. For those queries which are SELECT statements + and are not attribute or relationship loads a custom + :func:`_orm.with_loader_criteria` option is added to the query. The + :func:`_orm.with_loader_criteria` option will be used in the given + statement and will also be automatically propagated to all relationship + loads that descend from this query. + + The criteria argument given is a ``lambda`` that accepts a ``cls`` + argument. The given class will expand to include all mapped subclass + and need not itself be a mapped class. + + .. tip:: + + When using :func:`_orm.with_loader_criteria` option in + conjunction with the :func:`_orm.contains_eager` loader option, + it's important to note that :func:`_orm.with_loader_criteria` only + affects the part of the query that determines what SQL is rendered + in terms of the WHERE and FROM clauses. The + :func:`_orm.contains_eager` option does not affect the rendering of + the SELECT statement outside of the columns clause, so does not have + any interaction with the :func:`_orm.with_loader_criteria` option. + However, the way things "work" is that :func:`_orm.contains_eager` + is meant to be used with a query that is already selecting from the + additional entities in some way, where + :func:`_orm.with_loader_criteria` can apply it's additional + criteria. + + In the example below, assuming a mapping relationship as + ``A -> A.bs -> B``, the given :func:`_orm.with_loader_criteria` + option will affect the way in which the JOIN is rendered:: + + stmt = ( + select(A) + .join(A.bs) + .options(contains_eager(A.bs), with_loader_criteria(B, B.flag == 1)) + ) + + Above, the given :func:`_orm.with_loader_criteria` option will + affect the ON clause of the JOIN that is specified by + ``.join(A.bs)``, so is applied as expected. The + :func:`_orm.contains_eager` option has the effect that columns from + ``B`` are added to the columns clause: + + .. sourcecode:: sql + + SELECT + b.id, b.a_id, b.data, b.flag, + a.id AS id_1, + a.data AS data_1 + FROM a JOIN b ON a.id = b.a_id AND b.flag = :flag_1 + + + The use of the :func:`_orm.contains_eager` option within the above + statement has no effect on the behavior of the + :func:`_orm.with_loader_criteria` option. If the + :func:`_orm.contains_eager` option were omitted, the SQL would be + the same as regards the FROM and WHERE clauses, where + :func:`_orm.with_loader_criteria` continues to add its criteria to + the ON clause of the JOIN. The addition of + :func:`_orm.contains_eager` only affects the columns clause, in that + additional columns against ``b`` are added which are then consumed + by the ORM to produce ``B`` instances. + + .. warning:: The use of a lambda inside of the call to + :func:`_orm.with_loader_criteria` is only invoked **once per unique + class**. Custom functions should not be invoked within this lambda. + See :ref:`engine_lambda_caching` for an overview of the "lambda SQL" + feature, which is for advanced use only. + + :param entity_or_base: a mapped class, or a class that is a super + class of a particular set of mapped classes, to which the rule + will apply. + + :param where_criteria: a Core SQL expression that applies limiting + criteria. This may also be a "lambda:" or Python function that + accepts a target class as an argument, when the given class is + a base with many different mapped subclasses. + + .. note:: To support pickling, use a module-level Python function to + produce the SQL expression instead of a lambda or a fixed SQL + expression, which tend to not be picklable. + + :param include_aliases: if True, apply the rule to :func:`_orm.aliased` + constructs as well. + + :param propagate_to_loaders: defaults to True, apply to relationship + loaders such as lazy loaders. This indicates that the + option object itself including SQL expression is carried along with + each loaded instance. Set to ``False`` to prevent the object from + being assigned to individual instances. + + + .. seealso:: + + :ref:`examples_session_orm_events` - includes examples of using + :func:`_orm.with_loader_criteria`. + + :ref:`do_orm_execute_global_criteria` - basic example on how to + combine :func:`_orm.with_loader_criteria` with the + :meth:`_orm.SessionEvents.do_orm_execute` event. + + :param track_closure_variables: when False, closure variables inside + of a lambda expression will not be used as part of + any cache key. This allows more complex expressions to be used + inside of a lambda expression but requires that the lambda ensures + it returns the identical SQL every time given a particular class. + + .. versionadded:: 1.4.0b2 + + """ # noqa: E501 + return LoaderCriteriaOption( + entity_or_base, + where_criteria, + loader_only, + include_aliases, + propagate_to_loaders, + track_closure_variables, + ) + + +def relationship( + argument: Optional[_RelationshipArgumentType[Any]] = None, + secondary: Optional[_RelationshipSecondaryArgument] = None, + *, + uselist: Optional[bool] = None, + collection_class: Optional[ + Union[Type[Collection[Any]], Callable[[], Collection[Any]]] + ] = None, + primaryjoin: Optional[_RelationshipJoinConditionArgument] = None, + secondaryjoin: Optional[_RelationshipJoinConditionArgument] = None, + back_populates: Optional[str] = None, + order_by: _ORMOrderByArgument = False, + backref: Optional[ORMBackrefArgument] = None, + overlaps: Optional[str] = None, + post_update: bool = False, + cascade: str = "save-update, merge", + viewonly: bool = False, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Union[_NoArg, _T] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + lazy: _LazyLoadArgumentType = "select", + passive_deletes: Union[Literal["all"], bool] = False, + passive_updates: bool = True, + active_history: bool = False, + enable_typechecks: bool = True, + foreign_keys: Optional[_ORMColCollectionArgument] = None, + remote_side: Optional[_ORMColCollectionArgument] = None, + join_depth: Optional[int] = None, + comparator_factory: Optional[ + Type[RelationshipProperty.Comparator[Any]] + ] = None, + single_parent: bool = False, + innerjoin: bool = False, + distinct_target_key: Optional[bool] = None, + load_on_pending: bool = False, + query_class: Optional[Type[Query[Any]]] = None, + info: Optional[_InfoType] = None, + omit_join: Literal[None, False] = None, + sync_backref: Optional[bool] = None, + **kw: Any, +) -> _RelationshipDeclared[Any]: + """Provide a relationship between two mapped classes. + + This corresponds to a parent-child or associative table relationship. + The constructed class is an instance of :class:`.Relationship`. + + .. seealso:: + + :ref:`tutorial_orm_related_objects` - tutorial introduction + to :func:`_orm.relationship` in the :ref:`unified_tutorial` + + :ref:`relationship_config_toplevel` - narrative documentation + + :param argument: + This parameter refers to the class that is to be related. It + accepts several forms, including a direct reference to the target + class itself, the :class:`_orm.Mapper` instance for the target class, + a Python callable / lambda that will return a reference to the + class or :class:`_orm.Mapper` when called, and finally a string + name for the class, which will be resolved from the + :class:`_orm.registry` in use in order to locate the class, e.g.:: + + class SomeClass(Base): + # ... + + related = relationship("RelatedClass") + + The :paramref:`_orm.relationship.argument` may also be omitted from the + :func:`_orm.relationship` construct entirely, and instead placed inside + a :class:`_orm.Mapped` annotation on the left side, which should + include a Python collection type if the relationship is expected + to be a collection, such as:: + + class SomeClass(Base): + # ... + + related_items: Mapped[List["RelatedItem"]] = relationship() + + Or for a many-to-one or one-to-one relationship:: + + class SomeClass(Base): + # ... + + related_item: Mapped["RelatedItem"] = relationship() + + .. seealso:: + + :ref:`orm_declarative_properties` - further detail + on relationship configuration when using Declarative. + + :param secondary: + For a many-to-many relationship, specifies the intermediary + table, and is typically an instance of :class:`_schema.Table`. + In less common circumstances, the argument may also be specified + as an :class:`_expression.Alias` construct, or even a + :class:`_expression.Join` construct. + + :paramref:`_orm.relationship.secondary` may + also be passed as a callable function which is evaluated at + mapper initialization time. When using Declarative, it may also + be a string argument noting the name of a :class:`_schema.Table` + that is + present in the :class:`_schema.MetaData` + collection associated with the + parent-mapped :class:`_schema.Table`. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + The :paramref:`_orm.relationship.secondary` keyword argument is + typically applied in the case where the intermediary + :class:`_schema.Table` + is not otherwise expressed in any direct class mapping. If the + "secondary" table is also explicitly mapped elsewhere (e.g. as in + :ref:`association_pattern`), one should consider applying the + :paramref:`_orm.relationship.viewonly` flag so that this + :func:`_orm.relationship` + is not used for persistence operations which + may conflict with those of the association object pattern. + + .. seealso:: + + :ref:`relationships_many_to_many` - Reference example of "many + to many". + + :ref:`self_referential_many_to_many` - Specifics on using + many-to-many in a self-referential case. + + :ref:`declarative_many_to_many` - Additional options when using + Declarative. + + :ref:`association_pattern` - an alternative to + :paramref:`_orm.relationship.secondary` + when composing association + table relationships, allowing additional attributes to be + specified on the association table. + + :ref:`composite_secondary_join` - a lesser-used pattern which + in some cases can enable complex :func:`_orm.relationship` SQL + conditions to be used. + + :param active_history=False: + When ``True``, indicates that the "previous" value for a + many-to-one reference should be loaded when replaced, if + not already loaded. Normally, history tracking logic for + simple many-to-ones only needs to be aware of the "new" + value in order to perform a flush. This flag is available + for applications that make use of + :func:`.attributes.get_history` which also need to know + the "previous" value of the attribute. + + :param backref: + A reference to a string relationship name, or a :func:`_orm.backref` + construct, which will be used to automatically generate a new + :func:`_orm.relationship` on the related class, which then refers to this + one using a bi-directional :paramref:`_orm.relationship.back_populates` + configuration. + + In modern Python, explicit use of :func:`_orm.relationship` + with :paramref:`_orm.relationship.back_populates` should be preferred, + as it is more robust in terms of mapper configuration as well as + more conceptually straightforward. It also integrates with + new :pep:`484` typing features introduced in SQLAlchemy 2.0 which + is not possible with dynamically generated attributes. + + .. seealso:: + + :ref:`relationships_backref` - notes on using + :paramref:`_orm.relationship.backref` + + :ref:`tutorial_orm_related_objects` - in the :ref:`unified_tutorial`, + presents an overview of bi-directional relationship configuration + and behaviors using :paramref:`_orm.relationship.back_populates` + + :func:`.backref` - allows control over :func:`_orm.relationship` + configuration when using :paramref:`_orm.relationship.backref`. + + + :param back_populates: + Indicates the name of a :func:`_orm.relationship` on the related + class that will be synchronized with this one. It is usually + expected that the :func:`_orm.relationship` on the related class + also refer to this one. This allows objects on both sides of + each :func:`_orm.relationship` to synchronize in-Python state + changes and also provides directives to the :term:`unit of work` + flush process how changes along these relationships should + be persisted. + + .. seealso:: + + :ref:`tutorial_orm_related_objects` - in the :ref:`unified_tutorial`, + presents an overview of bi-directional relationship configuration + and behaviors. + + :ref:`relationship_patterns` - includes many examples of + :paramref:`_orm.relationship.back_populates`. + + :paramref:`_orm.relationship.backref` - legacy form which allows + more succinct configuration, but does not support explicit typing + + :param overlaps: + A string name or comma-delimited set of names of other relationships + on either this mapper, a descendant mapper, or a target mapper with + which this relationship may write to the same foreign keys upon + persistence. The only effect this has is to eliminate the + warning that this relationship will conflict with another upon + persistence. This is used for such relationships that are truly + capable of conflicting with each other on write, but the application + will ensure that no such conflicts occur. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`error_qzyx` - usage example + + :param cascade: + A comma-separated list of cascade rules which determines how + Session operations should be "cascaded" from parent to child. + This defaults to ``False``, which means the default cascade + should be used - this default cascade is ``"save-update, merge"``. + + The available cascades are ``save-update``, ``merge``, + ``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``. + An additional option, ``all`` indicates shorthand for + ``"save-update, merge, refresh-expire, + expunge, delete"``, and is often used as in ``"all, delete-orphan"`` + to indicate that related objects should follow along with the + parent object in all cases, and be deleted when de-associated. + + .. seealso:: + + :ref:`unitofwork_cascades` - Full detail on each of the available + cascade options. + + :param cascade_backrefs=False: + Legacy; this flag is always False. + + .. versionchanged:: 2.0 "cascade_backrefs" functionality has been + removed. + + :param collection_class: + A class or callable that returns a new list-holding object. will + be used in place of a plain list for storing elements. + + .. seealso:: + + :ref:`custom_collections` - Introductory documentation and + examples. + + :param comparator_factory: + A class which extends :class:`.Relationship.Comparator` + which provides custom SQL clause generation for comparison + operations. + + .. seealso:: + + :class:`.PropComparator` - some detail on redefining comparators + at this level. + + :ref:`custom_comparators` - Brief intro to this feature. + + + :param distinct_target_key=None: + Indicate if a "subquery" eager load should apply the DISTINCT + keyword to the innermost SELECT statement. When left as ``None``, + the DISTINCT keyword will be applied in those cases when the target + columns do not comprise the full primary key of the target table. + When set to ``True``, the DISTINCT keyword is applied to the + innermost SELECT unconditionally. + + It may be desirable to set this flag to False when the DISTINCT is + reducing performance of the innermost subquery beyond that of what + duplicate innermost rows may be causing. + + .. seealso:: + + :ref:`loading_toplevel` - includes an introduction to subquery + eager loading. + + :param doc: + Docstring which will be applied to the resulting descriptor. + + :param foreign_keys: + + A list of columns which are to be used as "foreign key" + columns, or columns which refer to the value in a remote + column, within the context of this :func:`_orm.relationship` + object's :paramref:`_orm.relationship.primaryjoin` condition. + That is, if the :paramref:`_orm.relationship.primaryjoin` + condition of this :func:`_orm.relationship` is ``a.id == + b.a_id``, and the values in ``b.a_id`` are required to be + present in ``a.id``, then the "foreign key" column of this + :func:`_orm.relationship` is ``b.a_id``. + + In normal cases, the :paramref:`_orm.relationship.foreign_keys` + parameter is **not required.** :func:`_orm.relationship` will + automatically determine which columns in the + :paramref:`_orm.relationship.primaryjoin` condition are to be + considered "foreign key" columns based on those + :class:`_schema.Column` objects that specify + :class:`_schema.ForeignKey`, + or are otherwise listed as referencing columns in a + :class:`_schema.ForeignKeyConstraint` construct. + :paramref:`_orm.relationship.foreign_keys` is only needed when: + + 1. There is more than one way to construct a join from the local + table to the remote table, as there are multiple foreign key + references present. Setting ``foreign_keys`` will limit the + :func:`_orm.relationship` + to consider just those columns specified + here as "foreign". + + 2. The :class:`_schema.Table` being mapped does not actually have + :class:`_schema.ForeignKey` or + :class:`_schema.ForeignKeyConstraint` + constructs present, often because the table + was reflected from a database that does not support foreign key + reflection (MySQL MyISAM). + + 3. The :paramref:`_orm.relationship.primaryjoin` + argument is used to + construct a non-standard join condition, which makes use of + columns or expressions that do not normally refer to their + "parent" column, such as a join condition expressed by a + complex comparison using a SQL function. + + The :func:`_orm.relationship` construct will raise informative + error messages that suggest the use of the + :paramref:`_orm.relationship.foreign_keys` parameter when + presented with an ambiguous condition. In typical cases, + if :func:`_orm.relationship` doesn't raise any exceptions, the + :paramref:`_orm.relationship.foreign_keys` parameter is usually + not needed. + + :paramref:`_orm.relationship.foreign_keys` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + .. seealso:: + + :ref:`relationship_foreign_keys` + + :ref:`relationship_custom_foreign` + + :func:`.foreign` - allows direct annotation of the "foreign" + columns within a :paramref:`_orm.relationship.primaryjoin` + condition. + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + :param innerjoin=False: + When ``True``, joined eager loads will use an inner join to join + against related tables instead of an outer join. The purpose + of this option is generally one of performance, as inner joins + generally perform better than outer joins. + + This flag can be set to ``True`` when the relationship references an + object via many-to-one using local foreign keys that are not + nullable, or when the reference is one-to-one or a collection that + is guaranteed to have one or at least one entry. + + The option supports the same "nested" and "unnested" options as + that of :paramref:`_orm.joinedload.innerjoin`. See that flag + for details on nested / unnested behaviors. + + .. seealso:: + + :paramref:`_orm.joinedload.innerjoin` - the option as specified by + loader option, including detail on nesting behavior. + + :ref:`what_kind_of_loading` - Discussion of some details of + various loader options. + + + :param join_depth: + When non-``None``, an integer value indicating how many levels + deep "eager" loaders should join on a self-referring or cyclical + relationship. The number counts how many times the same Mapper + shall be present in the loading condition along a particular join + branch. When left at its default of ``None``, eager loaders + will stop chaining when they encounter a the same target mapper + which is already higher up in the chain. This option applies + both to joined- and subquery- eager loaders. + + .. seealso:: + + :ref:`self_referential_eager_loading` - Introductory documentation + and examples. + + :param lazy='select': specifies + How the related items should be loaded. Default value is + ``select``. Values include: + + * ``select`` - items should be loaded lazily when the property is + first accessed, using a separate SELECT statement, or identity map + fetch for simple many-to-one references. + + * ``immediate`` - items should be loaded as the parents are loaded, + using a separate SELECT statement, or identity map fetch for + simple many-to-one references. + + * ``joined`` - items should be loaded "eagerly" in the same query as + that of the parent, using a JOIN or LEFT OUTER JOIN. Whether + the join is "outer" or not is determined by the + :paramref:`_orm.relationship.innerjoin` parameter. + + * ``subquery`` - items should be loaded "eagerly" as the parents are + loaded, using one additional SQL statement, which issues a JOIN to + a subquery of the original statement, for each collection + requested. + + * ``selectin`` - items should be loaded "eagerly" as the parents + are loaded, using one or more additional SQL statements, which + issues a JOIN to the immediate parent object, specifying primary + key identifiers using an IN clause. + + * ``noload`` - no loading should occur at any time. The related + collection will remain empty. The ``noload`` strategy is not + recommended for general use. For a general use "never load" + approach, see :ref:`write_only_relationship` + + * ``raise`` - lazy loading is disallowed; accessing + the attribute, if its value were not already loaded via eager + loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`. + This strategy can be used when objects are to be detached from + their attached :class:`.Session` after they are loaded. + + * ``raise_on_sql`` - lazy loading that emits SQL is disallowed; + accessing the attribute, if its value were not already loaded via + eager loading, will raise an + :exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load + needs to emit SQL**. If the lazy load can pull the related value + from the identity map or determine that it should be None, the + value is loaded. This strategy can be used when objects will + remain associated with the attached :class:`.Session`, however + additional SELECT statements should be blocked. + + * ``write_only`` - the attribute will be configured with a special + "virtual collection" that may receive + :meth:`_orm.WriteOnlyCollection.add` and + :meth:`_orm.WriteOnlyCollection.remove` commands to add or remove + individual objects, but will not under any circumstances load or + iterate the full set of objects from the database directly. Instead, + methods such as :meth:`_orm.WriteOnlyCollection.select`, + :meth:`_orm.WriteOnlyCollection.insert`, + :meth:`_orm.WriteOnlyCollection.update` and + :meth:`_orm.WriteOnlyCollection.delete` are provided which generate SQL + constructs that may be used to load and modify rows in bulk. Used for + large collections that are never appropriate to load at once into + memory. + + The ``write_only`` loader style is configured automatically when + the :class:`_orm.WriteOnlyMapped` annotation is provided on the + left hand side within a Declarative mapping. See the section + :ref:`write_only_relationship` for examples. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`write_only_relationship` - in the :ref:`queryguide_toplevel` + + * ``dynamic`` - the attribute will return a pre-configured + :class:`_query.Query` object for all read + operations, onto which further filtering operations can be + applied before iterating the results. + + The ``dynamic`` loader style is configured automatically when + the :class:`_orm.DynamicMapped` annotation is provided on the + left hand side within a Declarative mapping. See the section + :ref:`dynamic_relationship` for examples. + + .. legacy:: The "dynamic" lazy loader strategy is the legacy form of + what is now the "write_only" strategy described in the section + :ref:`write_only_relationship`. + + .. seealso:: + + :ref:`dynamic_relationship` - in the :ref:`queryguide_toplevel` + + :ref:`write_only_relationship` - more generally useful approach + for large collections that should not fully load into memory + + * True - a synonym for 'select' + + * False - a synonym for 'joined' + + * None - a synonym for 'noload' + + .. seealso:: + + :ref:`orm_queryguide_relationship_loaders` - Full documentation on + relationship loader configuration in the :ref:`queryguide_toplevel`. + + + :param load_on_pending=False: + Indicates loading behavior for transient or pending parent objects. + + When set to ``True``, causes the lazy-loader to + issue a query for a parent object that is not persistent, meaning it + has never been flushed. This may take effect for a pending object + when autoflush is disabled, or for a transient object that has been + "attached" to a :class:`.Session` but is not part of its pending + collection. + + The :paramref:`_orm.relationship.load_on_pending` + flag does not improve + behavior when the ORM is used normally - object references should be + constructed at the object level, not at the foreign key level, so + that they are present in an ordinary way before a flush proceeds. + This flag is not not intended for general use. + + .. seealso:: + + :meth:`.Session.enable_relationship_loading` - this method + establishes "load on pending" behavior for the whole object, and + also allows loading on objects that remain transient or + detached. + + :param order_by: + Indicates the ordering that should be applied when loading these + items. :paramref:`_orm.relationship.order_by` + is expected to refer to + one of the :class:`_schema.Column` + objects to which the target class is + mapped, or the attribute itself bound to the target class which + refers to the column. + + :paramref:`_orm.relationship.order_by` + may also be passed as a callable + function which is evaluated at mapper initialization time, and may + be passed as a Python-evaluable string when using Declarative. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + :param passive_deletes=False: + Indicates loading behavior during delete operations. + + A value of True indicates that unloaded child items should not + be loaded during a delete operation on the parent. Normally, + when a parent item is deleted, all child items are loaded so + that they can either be marked as deleted, or have their + foreign key to the parent set to NULL. Marking this flag as + True usually implies an ON DELETE rule is in + place which will handle updating/deleting child rows on the + database side. + + Additionally, setting the flag to the string value 'all' will + disable the "nulling out" of the child foreign keys, when the parent + object is deleted and there is no delete or delete-orphan cascade + enabled. This is typically used when a triggering or error raise + scenario is in place on the database side. Note that the foreign + key attributes on in-session child objects will not be changed after + a flush occurs so this is a very special use-case setting. + Additionally, the "nulling out" will still occur if the child + object is de-associated with the parent. + + .. seealso:: + + :ref:`passive_deletes` - Introductory documentation + and examples. + + :param passive_updates=True: + Indicates the persistence behavior to take when a referenced + primary key value changes in place, indicating that the referencing + foreign key columns will also need their value changed. + + When True, it is assumed that ``ON UPDATE CASCADE`` is configured on + the foreign key in the database, and that the database will + handle propagation of an UPDATE from a source column to + dependent rows. When False, the SQLAlchemy + :func:`_orm.relationship` + construct will attempt to emit its own UPDATE statements to + modify related targets. However note that SQLAlchemy **cannot** + emit an UPDATE for more than one level of cascade. Also, + setting this flag to False is not compatible in the case where + the database is in fact enforcing referential integrity, unless + those constraints are explicitly "deferred", if the target backend + supports it. + + It is highly advised that an application which is employing + mutable primary keys keeps ``passive_updates`` set to True, + and instead uses the referential integrity features of the database + itself in order to handle the change efficiently and fully. + + .. seealso:: + + :ref:`passive_updates` - Introductory documentation and + examples. + + :paramref:`.mapper.passive_updates` - a similar flag which + takes effect for joined-table inheritance mappings. + + :param post_update: + This indicates that the relationship should be handled by a + second UPDATE statement after an INSERT or before a + DELETE. This flag is used to handle saving bi-directional + dependencies between two individual rows (i.e. each row + references the other), where it would otherwise be impossible to + INSERT or DELETE both rows fully since one row exists before the + other. Use this flag when a particular mapping arrangement will + incur two rows that are dependent on each other, such as a table + that has a one-to-many relationship to a set of child rows, and + also has a column that references a single child row within that + list (i.e. both tables contain a foreign key to each other). If + a flush operation returns an error that a "cyclical + dependency" was detected, this is a cue that you might want to + use :paramref:`_orm.relationship.post_update` to "break" the cycle. + + .. seealso:: + + :ref:`post_update` - Introductory documentation and examples. + + :param primaryjoin: + A SQL expression that will be used as the primary + join of the child object against the parent object, or in a + many-to-many relationship the join of the parent object to the + association table. By default, this value is computed based on the + foreign key relationships of the parent and child tables (or + association table). + + :paramref:`_orm.relationship.primaryjoin` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + .. seealso:: + + :ref:`relationship_primaryjoin` + + :param remote_side: + Used for self-referential relationships, indicates the column or + list of columns that form the "remote side" of the relationship. + + :paramref:`_orm.relationship.remote_side` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + .. seealso:: + + :ref:`self_referential` - in-depth explanation of how + :paramref:`_orm.relationship.remote_side` + is used to configure self-referential relationships. + + :func:`.remote` - an annotation function that accomplishes the + same purpose as :paramref:`_orm.relationship.remote_side`, + typically + when a custom :paramref:`_orm.relationship.primaryjoin` condition + is used. + + :param query_class: + A :class:`_query.Query` + subclass that will be used internally by the + ``AppenderQuery`` returned by a "dynamic" relationship, that + is, a relationship that specifies ``lazy="dynamic"`` or was + otherwise constructed using the :func:`_orm.dynamic_loader` + function. + + .. seealso:: + + :ref:`dynamic_relationship` - Introduction to "dynamic" + relationship loaders. + + :param secondaryjoin: + A SQL expression that will be used as the join of + an association table to the child object. By default, this value is + computed based on the foreign key relationships of the association + and child tables. + + :paramref:`_orm.relationship.secondaryjoin` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. warning:: When passed as a Python-evaluable string, the + argument is interpreted using Python's ``eval()`` function. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + See :ref:`declarative_relationship_eval` for details on + declarative evaluation of :func:`_orm.relationship` arguments. + + .. seealso:: + + :ref:`relationship_primaryjoin` + + :param single_parent: + When True, installs a validator which will prevent objects + from being associated with more than one parent at a time. + This is used for many-to-one or many-to-many relationships that + should be treated either as one-to-one or one-to-many. Its usage + is optional, except for :func:`_orm.relationship` constructs which + are many-to-one or many-to-many and also + specify the ``delete-orphan`` cascade option. The + :func:`_orm.relationship` construct itself will raise an error + instructing when this option is required. + + .. seealso:: + + :ref:`unitofwork_cascades` - includes detail on when the + :paramref:`_orm.relationship.single_parent` + flag may be appropriate. + + :param uselist: + A boolean that indicates if this property should be loaded as a + list or a scalar. In most cases, this value is determined + automatically by :func:`_orm.relationship` at mapper configuration + time. When using explicit :class:`_orm.Mapped` annotations, + :paramref:`_orm.relationship.uselist` may be derived from the + whether or not the annotation within :class:`_orm.Mapped` contains + a collection class. + Otherwise, :paramref:`_orm.relationship.uselist` may be derived from + the type and direction + of the relationship - one to many forms a list, many to one + forms a scalar, many to many is a list. If a scalar is desired + where normally a list would be present, such as a bi-directional + one-to-one relationship, use an appropriate :class:`_orm.Mapped` + annotation or set :paramref:`_orm.relationship.uselist` to False. + + The :paramref:`_orm.relationship.uselist` + flag is also available on an + existing :func:`_orm.relationship` + construct as a read-only attribute, + which can be used to determine if this :func:`_orm.relationship` + deals + with collections or scalar attributes:: + + >>> User.addresses.property.uselist + True + + .. seealso:: + + :ref:`relationships_one_to_one` - Introduction to the "one to + one" relationship pattern, which is typically when an alternate + setting for :paramref:`_orm.relationship.uselist` is involved. + + :param viewonly=False: + When set to ``True``, the relationship is used only for loading + objects, and not for any persistence operation. A + :func:`_orm.relationship` which specifies + :paramref:`_orm.relationship.viewonly` can work + with a wider range of SQL operations within the + :paramref:`_orm.relationship.primaryjoin` condition, including + operations that feature the use of a variety of comparison operators + as well as SQL functions such as :func:`_expression.cast`. The + :paramref:`_orm.relationship.viewonly` + flag is also of general use when defining any kind of + :func:`_orm.relationship` that doesn't represent + the full set of related objects, to prevent modifications of the + collection from resulting in persistence operations. + + .. seealso:: + + :ref:`relationship_viewonly_notes` - more details on best practices + when using :paramref:`_orm.relationship.viewonly`. + + :param sync_backref: + A boolean that enables the events used to synchronize the in-Python + attributes when this relationship is target of either + :paramref:`_orm.relationship.backref` or + :paramref:`_orm.relationship.back_populates`. + + Defaults to ``None``, which indicates that an automatic value should + be selected based on the value of the + :paramref:`_orm.relationship.viewonly` flag. When left at its + default, changes in state will be back-populated only if neither + sides of a relationship is viewonly. + + .. versionadded:: 1.3.17 + + .. versionchanged:: 1.4 - A relationship that specifies + :paramref:`_orm.relationship.viewonly` automatically implies + that :paramref:`_orm.relationship.sync_backref` is ``False``. + + .. seealso:: + + :paramref:`_orm.relationship.viewonly` + + :param omit_join: + Allows manual control over the "selectin" automatic join + optimization. Set to ``False`` to disable the "omit join" feature + added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic + optimization in place. + + .. note:: This flag may only be set to ``False``. It is not + necessary to set it to ``True`` as the "omit_join" optimization is + automatically detected; if it is not detected, then the + optimization is not supported. + + .. versionchanged:: 1.3.11 setting ``omit_join`` to True will now + emit a warning as this was not the intended use of this flag. + + .. versionadded:: 1.3 + + :param init: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__init__()`` + method as generated by the dataclass process. + :param repr: Specific to :ref:`orm_declarative_native_dataclasses`, + specifies if the mapped attribute should be part of the ``__repr__()`` + method as generated by the dataclass process. + :param default_factory: Specific to + :ref:`orm_declarative_native_dataclasses`, + specifies a default-value generation function that will take place + as part of the ``__init__()`` + method as generated by the dataclass process. + :param compare: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be included in comparison operations when generating the + ``__eq__()`` and ``__ne__()`` methods for the mapped class. + + .. versionadded:: 2.0.0b4 + + :param kw_only: Specific to + :ref:`orm_declarative_native_dataclasses`, indicates if this field + should be marked as keyword-only when generating the ``__init__()``. + + :param hash: Specific to + :ref:`orm_declarative_native_dataclasses`, controls if this field + is included when generating the ``__hash__()`` method for the mapped + class. + + .. versionadded:: 2.0.36 + """ + + return _RelationshipDeclared( + argument, + secondary=secondary, + uselist=uselist, + collection_class=collection_class, + primaryjoin=primaryjoin, + secondaryjoin=secondaryjoin, + back_populates=back_populates, + order_by=order_by, + backref=backref, + overlaps=overlaps, + post_update=post_update, + cascade=cascade, + viewonly=viewonly, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + lazy=lazy, + passive_deletes=passive_deletes, + passive_updates=passive_updates, + active_history=active_history, + enable_typechecks=enable_typechecks, + foreign_keys=foreign_keys, + remote_side=remote_side, + join_depth=join_depth, + comparator_factory=comparator_factory, + single_parent=single_parent, + innerjoin=innerjoin, + distinct_target_key=distinct_target_key, + load_on_pending=load_on_pending, + query_class=query_class, + info=info, + omit_join=omit_join, + sync_backref=sync_backref, + **kw, + ) + + +def synonym( + name: str, + *, + map_column: Optional[bool] = None, + descriptor: Optional[Any] = None, + comparator_factory: Optional[Type[PropComparator[_T]]] = None, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Union[_NoArg, _T] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + info: Optional[_InfoType] = None, + doc: Optional[str] = None, +) -> Synonym[Any]: + """Denote an attribute name as a synonym to a mapped property, + in that the attribute will mirror the value and expression behavior + of another attribute. + + e.g.:: + + class MyClass(Base): + __tablename__ = "my_table" + + id = Column(Integer, primary_key=True) + job_status = Column(String(50)) + + status = synonym("job_status") + + :param name: the name of the existing mapped property. This + can refer to the string name ORM-mapped attribute + configured on the class, including column-bound attributes + and relationships. + + :param descriptor: a Python :term:`descriptor` that will be used + as a getter (and potentially a setter) when this attribute is + accessed at the instance level. + + :param map_column: **For classical mappings and mappings against + an existing Table object only**. if ``True``, the :func:`.synonym` + construct will locate the :class:`_schema.Column` + object upon the mapped + table that would normally be associated with the attribute name of + this synonym, and produce a new :class:`.ColumnProperty` that instead + maps this :class:`_schema.Column` + to the alternate name given as the "name" + argument of the synonym; in this way, the usual step of redefining + the mapping of the :class:`_schema.Column` + to be under a different name is + unnecessary. This is usually intended to be used when a + :class:`_schema.Column` + is to be replaced with an attribute that also uses a + descriptor, that is, in conjunction with the + :paramref:`.synonym.descriptor` parameter:: + + my_table = Table( + "my_table", + metadata, + Column("id", Integer, primary_key=True), + Column("job_status", String(50)), + ) + + + class MyClass: + @property + def _job_status_descriptor(self): + return "Status: %s" % self._job_status + + + mapper( + MyClass, + my_table, + properties={ + "job_status": synonym( + "_job_status", + map_column=True, + descriptor=MyClass._job_status_descriptor, + ) + }, + ) + + Above, the attribute named ``_job_status`` is automatically + mapped to the ``job_status`` column:: + + >>> j1 = MyClass() + >>> j1._job_status = "employed" + >>> j1.job_status + Status: employed + + When using Declarative, in order to provide a descriptor in + conjunction with a synonym, use the + :func:`sqlalchemy.ext.declarative.synonym_for` helper. However, + note that the :ref:`hybrid properties ` feature + should usually be preferred, particularly when redefining attribute + behavior. + + :param info: Optional data dictionary which will be populated into the + :attr:`.InspectionAttr.info` attribute of this object. + + :param comparator_factory: A subclass of :class:`.PropComparator` + that will provide custom comparison behavior at the SQL expression + level. + + .. note:: + + For the use case of providing an attribute which redefines both + Python-level and SQL-expression level behavior of an attribute, + please refer to the Hybrid attribute introduced at + :ref:`mapper_hybrids` for a more effective technique. + + .. seealso:: + + :ref:`synonyms` - Overview of synonyms + + :func:`.synonym_for` - a helper oriented towards Declarative + + :ref:`mapper_hybrids` - The Hybrid Attribute extension provides an + updated approach to augmenting attribute behavior more flexibly + than can be achieved with synonyms. + + """ + return Synonym( + name, + map_column=map_column, + descriptor=descriptor, + comparator_factory=comparator_factory, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + doc=doc, + info=info, + ) + + +def create_session( + bind: Optional[_SessionBind] = None, **kwargs: Any +) -> Session: + r"""Create a new :class:`.Session` + with no automation enabled by default. + + This function is used primarily for testing. The usual + route to :class:`.Session` creation is via its constructor + or the :func:`.sessionmaker` function. + + :param bind: optional, a single Connectable to use for all + database access in the created + :class:`~sqlalchemy.orm.session.Session`. + + :param \*\*kwargs: optional, passed through to the + :class:`.Session` constructor. + + :returns: an :class:`~sqlalchemy.orm.session.Session` instance + + The defaults of create_session() are the opposite of that of + :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are + False. + + Usage:: + + >>> from sqlalchemy.orm import create_session + >>> session = create_session() + + It is recommended to use :func:`sessionmaker` instead of + create_session(). + + """ + + kwargs.setdefault("autoflush", False) + kwargs.setdefault("expire_on_commit", False) + return Session(bind=bind, **kwargs) + + +def _mapper_fn(*arg: Any, **kw: Any) -> NoReturn: + """Placeholder for the now-removed ``mapper()`` function. + + Classical mappings should be performed using the + :meth:`_orm.registry.map_imperatively` method. + + This symbol remains in SQLAlchemy 2.0 to suit the deprecated use case + of using the ``mapper()`` function as a target for ORM event listeners, + which failed to be marked as deprecated in the 1.4 series. + + Global ORM mapper listeners should instead use the :class:`_orm.Mapper` + class as the target. + + .. versionchanged:: 2.0 The ``mapper()`` function was removed; the + symbol remains temporarily as a placeholder for the event listening + use case. + + """ + raise InvalidRequestError( + "The 'sqlalchemy.orm.mapper()' function is removed as of " + "SQLAlchemy 2.0. Use the " + "'sqlalchemy.orm.registry.map_imperatively()` " + "method of the ``sqlalchemy.orm.registry`` class to perform " + "classical mapping." + ) + + +def dynamic_loader( + argument: Optional[_RelationshipArgumentType[Any]] = None, **kw: Any +) -> RelationshipProperty[Any]: + """Construct a dynamically-loading mapper property. + + This is essentially the same as + using the ``lazy='dynamic'`` argument with :func:`relationship`:: + + dynamic_loader(SomeClass) + + # is the same as + + relationship(SomeClass, lazy="dynamic") + + See the section :ref:`dynamic_relationship` for more details + on dynamic loading. + + """ + kw["lazy"] = "dynamic" + return relationship(argument, **kw) + + +def backref(name: str, **kwargs: Any) -> ORMBackrefArgument: + """When using the :paramref:`_orm.relationship.backref` parameter, + provides specific parameters to be used when the new + :func:`_orm.relationship` is generated. + + E.g.:: + + "items": relationship(SomeItem, backref=backref("parent", lazy="subquery")) + + The :paramref:`_orm.relationship.backref` parameter is generally + considered to be legacy; for modern applications, using + explicit :func:`_orm.relationship` constructs linked together using + the :paramref:`_orm.relationship.back_populates` parameter should be + preferred. + + .. seealso:: + + :ref:`relationships_backref` - background on backrefs + + """ # noqa: E501 + + return (name, kwargs) + + +def deferred( + column: _ORMColumnExprArgument[_T], + *additional_columns: _ORMColumnExprArgument[Any], + group: Optional[str] = None, + raiseload: bool = False, + comparator_factory: Optional[Type[PropComparator[_T]]] = None, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + default: Optional[Any] = _NoArg.NO_ARG, + default_factory: Union[_NoArg, Callable[[], _T]] = _NoArg.NO_ARG, + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + hash: Union[_NoArg, bool, None] = _NoArg.NO_ARG, # noqa: A002 + active_history: bool = False, + expire_on_flush: bool = True, + info: Optional[_InfoType] = None, + doc: Optional[str] = None, +) -> MappedSQLExpression[_T]: + r"""Indicate a column-based mapped attribute that by default will + not load unless accessed. + + When using :func:`_orm.mapped_column`, the same functionality as + that of :func:`_orm.deferred` construct is provided by using the + :paramref:`_orm.mapped_column.deferred` parameter. + + :param \*columns: columns to be mapped. This is typically a single + :class:`_schema.Column` object, + however a collection is supported in order + to support multiple columns mapped under the same attribute. + + :param raiseload: boolean, if True, indicates an exception should be raised + if the load operation is to take place. + + .. versionadded:: 1.4 + + + Additional arguments are the same as that of :func:`_orm.column_property`. + + .. seealso:: + + :ref:`orm_queryguide_deferred_imperative` + + """ + return MappedSQLExpression( + column, + *additional_columns, + attribute_options=_AttributeOptions( + init, repr, default, default_factory, compare, kw_only, hash + ), + group=group, + deferred=True, + raiseload=raiseload, + comparator_factory=comparator_factory, + active_history=active_history, + expire_on_flush=expire_on_flush, + info=info, + doc=doc, + ) + + +def query_expression( + default_expr: _ORMColumnExprArgument[_T] = sql.null(), + *, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + compare: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + expire_on_flush: bool = True, + info: Optional[_InfoType] = None, + doc: Optional[str] = None, +) -> MappedSQLExpression[_T]: + """Indicate an attribute that populates from a query-time SQL expression. + + :param default_expr: Optional SQL expression object that will be used in + all cases if not assigned later with :func:`_orm.with_expression`. + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`orm_queryguide_with_expression` - background and usage examples + + """ + prop = MappedSQLExpression( + default_expr, + attribute_options=_AttributeOptions( + False, + repr, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + compare, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + ), + expire_on_flush=expire_on_flush, + info=info, + doc=doc, + _assume_readonly_dc_attributes=True, + ) + + prop.strategy_key = (("query_expression", True),) + return prop + + +def clear_mappers() -> None: + """Remove all mappers from all classes. + + .. versionchanged:: 1.4 This function now locates all + :class:`_orm.registry` objects and calls upon the + :meth:`_orm.registry.dispose` method of each. + + This function removes all instrumentation from classes and disposes + of their associated mappers. Once called, the classes are unmapped + and can be later re-mapped with new mappers. + + :func:`.clear_mappers` is *not* for normal use, as there is literally no + valid usage for it outside of very specific testing scenarios. Normally, + mappers are permanent structural components of user-defined classes, and + are never discarded independently of their class. If a mapped class + itself is garbage collected, its mapper is automatically disposed of as + well. As such, :func:`.clear_mappers` is only for usage in test suites + that re-use the same classes with different mappings, which is itself an + extremely rare use case - the only such use case is in fact SQLAlchemy's + own test suite, and possibly the test suites of other ORM extension + libraries which intend to test various combinations of mapper construction + upon a fixed set of classes. + + """ + + mapperlib._dispose_registries(mapperlib._all_registries(), False) + + +# I would really like a way to get the Type[] here that shows up +# in a different way in typing tools, however there is no current method +# that is accepted by mypy (subclass of Type[_O] works in pylance, rejected +# by mypy). +AliasedType = Annotated[Type[_O], "aliased"] + + +@overload +def aliased( + element: Type[_O], + alias: Optional[FromClause] = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, +) -> AliasedType[_O]: ... + + +@overload +def aliased( + element: Union[AliasedClass[_O], Mapper[_O], AliasedInsp[_O]], + alias: Optional[FromClause] = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, +) -> AliasedClass[_O]: ... + + +@overload +def aliased( + element: FromClause, + alias: None = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, +) -> FromClause: ... + + +def aliased( + element: Union[_EntityType[_O], FromClause], + alias: Optional[FromClause] = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, +) -> Union[AliasedClass[_O], FromClause, AliasedType[_O]]: + """Produce an alias of the given element, usually an :class:`.AliasedClass` + instance. + + E.g.:: + + my_alias = aliased(MyClass) + + stmt = select(MyClass, my_alias).filter(MyClass.id > my_alias.id) + result = session.execute(stmt) + + The :func:`.aliased` function is used to create an ad-hoc mapping of a + mapped class to a new selectable. By default, a selectable is generated + from the normally mapped selectable (typically a :class:`_schema.Table` + ) using the + :meth:`_expression.FromClause.alias` method. However, :func:`.aliased` + can also be + used to link the class to a new :func:`_expression.select` statement. + Also, the :func:`.with_polymorphic` function is a variant of + :func:`.aliased` that is intended to specify a so-called "polymorphic + selectable", that corresponds to the union of several joined-inheritance + subclasses at once. + + For convenience, the :func:`.aliased` function also accepts plain + :class:`_expression.FromClause` constructs, such as a + :class:`_schema.Table` or + :func:`_expression.select` construct. In those cases, the + :meth:`_expression.FromClause.alias` + method is called on the object and the new + :class:`_expression.Alias` object returned. The returned + :class:`_expression.Alias` is not + ORM-mapped in this case. + + .. seealso:: + + :ref:`tutorial_orm_entity_aliases` - in the :ref:`unified_tutorial` + + :ref:`orm_queryguide_orm_aliases` - in the :ref:`queryguide_toplevel` + + :param element: element to be aliased. Is normally a mapped class, + but for convenience can also be a :class:`_expression.FromClause` + element. + + :param alias: Optional selectable unit to map the element to. This is + usually used to link the object to a subquery, and should be an aliased + select construct as one would produce from the + :meth:`_query.Query.subquery` method or + the :meth:`_expression.Select.subquery` or + :meth:`_expression.Select.alias` methods of the :func:`_expression.select` + construct. + + :param name: optional string name to use for the alias, if not specified + by the ``alias`` parameter. The name, among other things, forms the + attribute name that will be accessible via tuples returned by a + :class:`_query.Query` object. Not supported when creating aliases + of :class:`_sql.Join` objects. + + :param flat: Boolean, will be passed through to the + :meth:`_expression.FromClause.alias` call so that aliases of + :class:`_expression.Join` objects will alias the individual tables + inside the join, rather than creating a subquery. This is generally + supported by all modern databases with regards to right-nested joins + and generally produces more efficient queries. + + When :paramref:`_orm.aliased.flat` is combined with + :paramref:`_orm.aliased.name`, the resulting joins will alias individual + tables using a naming scheme similar to ``_``. This + naming scheme is for visibility / debugging purposes only and the + specific scheme is subject to change without notice. + + .. versionadded:: 2.0.32 added support for combining + :paramref:`_orm.aliased.name` with :paramref:`_orm.aliased.flat`. + Previously, this would raise ``NotImplementedError``. + + :param adapt_on_names: if True, more liberal "matching" will be used when + mapping the mapped columns of the ORM entity to those of the + given selectable - a name-based match will be performed if the + given selectable doesn't otherwise have a column that corresponds + to one on the entity. The use case for this is when associating + an entity with some derived selectable such as one that uses + aggregate functions:: + + class UnitPrice(Base): + __tablename__ = "unit_price" + ... + unit_id = Column(Integer) + price = Column(Numeric) + + + aggregated_unit_price = ( + Session.query(func.sum(UnitPrice.price).label("price")) + .group_by(UnitPrice.unit_id) + .subquery() + ) + + aggregated_unit_price = aliased( + UnitPrice, alias=aggregated_unit_price, adapt_on_names=True + ) + + Above, functions on ``aggregated_unit_price`` which refer to + ``.price`` will return the + ``func.sum(UnitPrice.price).label('price')`` column, as it is + matched on the name "price". Ordinarily, the "price" function + wouldn't have any "column correspondence" to the actual + ``UnitPrice.price`` column as it is not a proxy of the original. + + """ + return AliasedInsp._alias_factory( + element, + alias=alias, + name=name, + flat=flat, + adapt_on_names=adapt_on_names, + ) + + +def with_polymorphic( + base: Union[Type[_O], Mapper[_O]], + classes: Union[Literal["*"], Iterable[Type[Any]]], + selectable: Union[Literal[False, None], FromClause] = False, + flat: bool = False, + polymorphic_on: Optional[ColumnElement[Any]] = None, + aliased: bool = False, + innerjoin: bool = False, + adapt_on_names: bool = False, + name: Optional[str] = None, + _use_mapper_path: bool = False, +) -> AliasedClass[_O]: + """Produce an :class:`.AliasedClass` construct which specifies + columns for descendant mappers of the given base. + + Using this method will ensure that each descendant mapper's + tables are included in the FROM clause, and will allow filter() + criterion to be used against those tables. The resulting + instances will also have those columns already loaded so that + no "post fetch" of those columns will be required. + + .. seealso:: + + :ref:`with_polymorphic` - full discussion of + :func:`_orm.with_polymorphic`. + + :param base: Base class to be aliased. + + :param classes: a single class or mapper, or list of + class/mappers, which inherit from the base class. + Alternatively, it may also be the string ``'*'``, in which case + all descending mapped classes will be added to the FROM clause. + + :param aliased: when True, the selectable will be aliased. For a + JOIN, this means the JOIN will be SELECTed from inside of a subquery + unless the :paramref:`_orm.with_polymorphic.flat` flag is set to + True, which is recommended for simpler use cases. + + :param flat: Boolean, will be passed through to the + :meth:`_expression.FromClause.alias` call so that aliases of + :class:`_expression.Join` objects will alias the individual tables + inside the join, rather than creating a subquery. This is generally + supported by all modern databases with regards to right-nested joins + and generally produces more efficient queries. Setting this flag is + recommended as long as the resulting SQL is functional. + + :param selectable: a table or subquery that will + be used in place of the generated FROM clause. This argument is + required if any of the desired classes use concrete table + inheritance, since SQLAlchemy currently cannot generate UNIONs + among tables automatically. If used, the ``selectable`` argument + must represent the full set of tables and columns mapped by every + mapped class. Otherwise, the unaccounted mapped columns will + result in their table being appended directly to the FROM clause + which will usually lead to incorrect results. + + When left at its default value of ``False``, the polymorphic + selectable assigned to the base mapper is used for selecting rows. + However, it may also be passed as ``None``, which will bypass the + configured polymorphic selectable and instead construct an ad-hoc + selectable for the target classes given; for joined table inheritance + this will be a join that includes all target mappers and their + subclasses. + + :param polymorphic_on: a column to be used as the "discriminator" + column for the given selectable. If not given, the polymorphic_on + attribute of the base classes' mapper will be used, if any. This + is useful for mappings that don't have polymorphic loading + behavior by default. + + :param innerjoin: if True, an INNER JOIN will be used. This should + only be specified if querying for one specific subtype only + + :param adapt_on_names: Passes through the + :paramref:`_orm.aliased.adapt_on_names` + parameter to the aliased object. This may be useful in situations where + the given selectable is not directly related to the existing mapped + selectable. + + .. versionadded:: 1.4.33 + + :param name: Name given to the generated :class:`.AliasedClass`. + + .. versionadded:: 2.0.31 + + """ + return AliasedInsp._with_polymorphic_factory( + base, + classes, + selectable=selectable, + flat=flat, + polymorphic_on=polymorphic_on, + adapt_on_names=adapt_on_names, + aliased=aliased, + innerjoin=innerjoin, + name=name, + _use_mapper_path=_use_mapper_path, + ) + + +def join( + left: _FromClauseArgument, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + isouter: bool = False, + full: bool = False, +) -> _ORMJoin: + r"""Produce an inner join between left and right clauses. + + :func:`_orm.join` is an extension to the core join interface + provided by :func:`_expression.join()`, where the + left and right selectable may be not only core selectable + objects such as :class:`_schema.Table`, but also mapped classes or + :class:`.AliasedClass` instances. The "on" clause can + be a SQL expression or an ORM mapped attribute + referencing a configured :func:`_orm.relationship`. + + :func:`_orm.join` is not commonly needed in modern usage, + as its functionality is encapsulated within that of the + :meth:`_sql.Select.join` and :meth:`_query.Query.join` + methods. which feature a + significant amount of automation beyond :func:`_orm.join` + by itself. Explicit use of :func:`_orm.join` + with ORM-enabled SELECT statements involves use of the + :meth:`_sql.Select.select_from` method, as in:: + + from sqlalchemy.orm import join + + stmt = ( + select(User) + .select_from(join(User, Address, User.addresses)) + .filter(Address.email_address == "foo@bar.com") + ) + + In modern SQLAlchemy the above join can be written more + succinctly as:: + + stmt = ( + select(User) + .join(User.addresses) + .filter(Address.email_address == "foo@bar.com") + ) + + .. warning:: using :func:`_orm.join` directly may not work properly + with modern ORM options such as :func:`_orm.with_loader_criteria`. + It is strongly recommended to use the idiomatic join patterns + provided by methods such as :meth:`.Select.join` and + :meth:`.Select.join_from` when creating ORM joins. + + .. seealso:: + + :ref:`orm_queryguide_joins` - in the :ref:`queryguide_toplevel` for + background on idiomatic ORM join patterns + + """ + return _ORMJoin(left, right, onclause, isouter, full) + + +def outerjoin( + left: _FromClauseArgument, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + full: bool = False, +) -> _ORMJoin: + """Produce a left outer join between left and right clauses. + + This is the "outer join" version of the :func:`_orm.join` function, + featuring the same behavior except that an OUTER JOIN is generated. + See that function's documentation for other usage details. + + """ + return _ORMJoin(left, right, onclause, True, full) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_typing.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb8413b5243ab08eec0fdeaad8f1e247cc7e281 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/_typing.py @@ -0,0 +1,179 @@ +# orm/_typing.py +# Copyright (C) 2022-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import operator +from typing import Any +from typing import Dict +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from ..engine.interfaces import _CoreKnownExecutionOptions +from ..sql import roles +from ..sql._orm_types import DMLStrategyArgument as DMLStrategyArgument +from ..sql._orm_types import ( + SynchronizeSessionArgument as SynchronizeSessionArgument, +) +from ..sql._typing import _HasClauseElement +from ..sql.elements import ColumnElement +from ..util.typing import Protocol +from ..util.typing import TypeGuard + +if TYPE_CHECKING: + from .attributes import AttributeImpl + from .attributes import CollectionAttributeImpl + from .attributes import HasCollectionAdapter + from .attributes import QueryableAttribute + from .base import PassiveFlag + from .decl_api import registry as _registry_type + from .interfaces import InspectionAttr + from .interfaces import MapperProperty + from .interfaces import ORMOption + from .interfaces import UserDefinedOption + from .mapper import Mapper + from .relationships import RelationshipProperty + from .state import InstanceState + from .util import AliasedClass + from .util import AliasedInsp + from ..sql._typing import _CE + from ..sql.base import ExecutableOption + +_T = TypeVar("_T", bound=Any) + + +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + +_O = TypeVar("_O", bound=object) +"""The 'ORM mapped object' type. + +""" + + +if TYPE_CHECKING: + _RegistryType = _registry_type + +_InternalEntityType = Union["Mapper[_T]", "AliasedInsp[_T]"] + +_ExternalEntityType = Union[Type[_T], "AliasedClass[_T]"] + +_EntityType = Union[ + Type[_T], "AliasedClass[_T]", "Mapper[_T]", "AliasedInsp[_T]" +] + + +_ClassDict = Mapping[str, Any] +_InstanceDict = Dict[str, Any] + +_IdentityKeyType = Tuple[Type[_T], Tuple[Any, ...], Optional[Any]] + +_ORMColumnExprArgument = Union[ + ColumnElement[_T], + _HasClauseElement[_T], + roles.ExpressionElementRole[_T], +] + + +_ORMCOLEXPR = TypeVar("_ORMCOLEXPR", bound=ColumnElement[Any]) + + +class _OrmKnownExecutionOptions(_CoreKnownExecutionOptions, total=False): + populate_existing: bool + autoflush: bool + synchronize_session: SynchronizeSessionArgument + dml_strategy: DMLStrategyArgument + is_delete_using: bool + is_update_from: bool + render_nulls: bool + + +OrmExecuteOptionsParameter = Union[ + _OrmKnownExecutionOptions, Mapping[str, Any] +] + + +class _ORMAdapterProto(Protocol): + """protocol for the :class:`.AliasedInsp._orm_adapt_element` method + which is a synonym for :class:`.AliasedInsp._adapt_element`. + + + """ + + def __call__(self, obj: _CE, key: Optional[str] = None) -> _CE: ... + + +class _LoaderCallable(Protocol): + def __call__( + self, state: InstanceState[Any], passive: PassiveFlag + ) -> Any: ... + + +def is_orm_option( + opt: ExecutableOption, +) -> TypeGuard[ORMOption]: + return not opt._is_core + + +def is_user_defined_option( + opt: ExecutableOption, +) -> TypeGuard[UserDefinedOption]: + return not opt._is_core and opt._is_user_defined # type: ignore + + +def is_composite_class(obj: Any) -> bool: + # inlining is_dataclass(obj) + return hasattr(obj, "__composite_values__") or hasattr( + obj, "__dataclass_fields__" + ) + + +if TYPE_CHECKING: + + def insp_is_mapper_property( + obj: Any, + ) -> TypeGuard[MapperProperty[Any]]: ... + + def insp_is_mapper(obj: Any) -> TypeGuard[Mapper[Any]]: ... + + def insp_is_aliased_class(obj: Any) -> TypeGuard[AliasedInsp[Any]]: ... + + def insp_is_attribute( + obj: InspectionAttr, + ) -> TypeGuard[QueryableAttribute[Any]]: ... + + def attr_is_internal_proxy( + obj: InspectionAttr, + ) -> TypeGuard[QueryableAttribute[Any]]: ... + + def prop_is_relationship( + prop: MapperProperty[Any], + ) -> TypeGuard[RelationshipProperty[Any]]: ... + + def is_collection_impl( + impl: AttributeImpl, + ) -> TypeGuard[CollectionAttributeImpl]: ... + + def is_has_collection_adapter( + impl: AttributeImpl, + ) -> TypeGuard[HasCollectionAdapter]: ... + +else: + insp_is_mapper_property = operator.attrgetter("is_property") + insp_is_mapper = operator.attrgetter("is_mapper") + insp_is_aliased_class = operator.attrgetter("is_aliased_class") + insp_is_attribute = operator.attrgetter("is_attribute") + attr_is_internal_proxy = operator.attrgetter("_is_internal_proxy") + is_collection_impl = operator.attrgetter("collection") + prop_is_relationship = operator.attrgetter("_is_relationship") + is_has_collection_adapter = operator.attrgetter( + "_is_has_collection_adapter" + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/attributes.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..8207b4cace2ffbaaa4323e5f665bada438aa19bd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/attributes.py @@ -0,0 +1,2835 @@ +# orm/attributes.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Defines instrumentation for class attributes and their interaction +with instances. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + + +""" + +from __future__ import annotations + +import dataclasses +import operator +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import Iterable +from typing import List +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import collections +from . import exc as orm_exc +from . import interfaces +from ._typing import insp_is_aliased_class +from .base import _DeclarativeMapped +from .base import ATTR_EMPTY +from .base import ATTR_WAS_SET +from .base import CALLABLES_OK +from .base import DEFERRED_HISTORY_LOAD +from .base import INCLUDE_PENDING_MUTATIONS # noqa +from .base import INIT_OK +from .base import instance_dict as instance_dict +from .base import instance_state as instance_state +from .base import instance_str +from .base import LOAD_AGAINST_COMMITTED +from .base import LoaderCallableStatus +from .base import manager_of_class as manager_of_class +from .base import Mapped as Mapped # noqa +from .base import NEVER_SET # noqa +from .base import NO_AUTOFLUSH +from .base import NO_CHANGE # noqa +from .base import NO_KEY +from .base import NO_RAISE +from .base import NO_VALUE +from .base import NON_PERSISTENT_OK # noqa +from .base import opt_manager_of_class as opt_manager_of_class +from .base import PASSIVE_CLASS_MISMATCH # noqa +from .base import PASSIVE_NO_FETCH +from .base import PASSIVE_NO_FETCH_RELATED # noqa +from .base import PASSIVE_NO_INITIALIZE +from .base import PASSIVE_NO_RESULT +from .base import PASSIVE_OFF +from .base import PASSIVE_ONLY_PERSISTENT +from .base import PASSIVE_RETURN_NO_VALUE +from .base import PassiveFlag +from .base import RELATED_OBJECT_OK # noqa +from .base import SQL_OK # noqa +from .base import SQLORMExpression +from .base import state_str +from .. import event +from .. import exc +from .. import inspection +from .. import util +from ..event import dispatcher +from ..event import EventTarget +from ..sql import base as sql_base +from ..sql import cache_key +from ..sql import coercions +from ..sql import roles +from ..sql import visitors +from ..sql.cache_key import HasCacheKey +from ..sql.visitors import _TraverseInternalsType +from ..sql.visitors import InternalTraversal +from ..util.typing import Literal +from ..util.typing import Self +from ..util.typing import TypeGuard + +if TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _ExternalEntityType + from ._typing import _InstanceDict + from ._typing import _InternalEntityType + from ._typing import _LoaderCallable + from ._typing import _O + from .collections import _AdaptedCollectionProtocol + from .collections import CollectionAdapter + from .interfaces import MapperProperty + from .relationships import RelationshipProperty + from .state import InstanceState + from .util import AliasedInsp + from .writeonly import WriteOnlyAttributeImpl + from ..event.base import _Dispatch + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _DMLColumnArgument + from ..sql._typing import _InfoType + from ..sql._typing import _PropagateAttrsType + from ..sql.annotation import _AnnotationDict + from ..sql.elements import ColumnElement + from ..sql.elements import Label + from ..sql.operators import OperatorType + from ..sql.selectable import FromClause + + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + + +_AllPendingType = Sequence[ + Tuple[Optional["InstanceState[Any]"], Optional[object]] +] + + +_UNKNOWN_ATTR_KEY = object() + + +@inspection._self_inspects +class QueryableAttribute( + _DeclarativeMapped[_T_co], + SQLORMExpression[_T_co], + interfaces.InspectionAttr, + interfaces.PropComparator[_T_co], + roles.JoinTargetRole, + roles.OnClauseRole, + sql_base.Immutable, + cache_key.SlotsMemoizedHasCacheKey, + util.MemoizedSlots, + EventTarget, +): + """Base class for :term:`descriptor` objects that intercept + attribute events on behalf of a :class:`.MapperProperty` + object. The actual :class:`.MapperProperty` is accessible + via the :attr:`.QueryableAttribute.property` + attribute. + + + .. seealso:: + + :class:`.InstrumentedAttribute` + + :class:`.MapperProperty` + + :attr:`_orm.Mapper.all_orm_descriptors` + + :attr:`_orm.Mapper.attrs` + """ + + __slots__ = ( + "class_", + "key", + "impl", + "comparator", + "property", + "parent", + "expression", + "_of_type", + "_extra_criteria", + "_slots_dispatch", + "_propagate_attrs", + "_doc", + ) + + is_attribute = True + + dispatch: dispatcher[QueryableAttribute[_T_co]] + + class_: _ExternalEntityType[Any] + key: str + parententity: _InternalEntityType[Any] + impl: AttributeImpl + comparator: interfaces.PropComparator[_T_co] + _of_type: Optional[_InternalEntityType[Any]] + _extra_criteria: Tuple[ColumnElement[bool], ...] + _doc: Optional[str] + + # PropComparator has a __visit_name__ to participate within + # traversals. Disambiguate the attribute vs. a comparator. + __visit_name__ = "orm_instrumented_attribute" + + def __init__( + self, + class_: _ExternalEntityType[_O], + key: str, + parententity: _InternalEntityType[_O], + comparator: interfaces.PropComparator[_T_co], + impl: Optional[AttributeImpl] = None, + of_type: Optional[_InternalEntityType[Any]] = None, + extra_criteria: Tuple[ColumnElement[bool], ...] = (), + ): + self.class_ = class_ + self.key = key + + self._parententity = self.parent = parententity + + # this attribute is non-None after mappers are set up, however in the + # interim class manager setup, there's a check for None to see if it + # needs to be populated, so we assign None here leaving the attribute + # in a temporarily not-type-correct state + self.impl = impl # type: ignore + + assert comparator is not None + self.comparator = comparator + self._of_type = of_type + self._extra_criteria = extra_criteria + self._doc = None + + manager = opt_manager_of_class(class_) + # manager is None in the case of AliasedClass + if manager: + # propagate existing event listeners from + # immediate superclass + for base in manager._bases: + if key in base: + self.dispatch._update(base[key].dispatch) + if base[key].dispatch._active_history: + self.dispatch._active_history = True # type: ignore + + _cache_key_traversal = [ + ("key", visitors.ExtendedInternalTraversal.dp_string), + ("_parententity", visitors.ExtendedInternalTraversal.dp_multi), + ("_of_type", visitors.ExtendedInternalTraversal.dp_multi), + ("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list), + ] + + def __reduce__(self) -> Any: + # this method is only used in terms of the + # sqlalchemy.ext.serializer extension + return ( + _queryable_attribute_unreduce, + ( + self.key, + self._parententity.mapper.class_, + self._parententity, + self._parententity.entity, + ), + ) + + @property + def _impl_uses_objects(self) -> bool: + return self.impl.uses_objects + + def get_history( + self, instance: Any, passive: PassiveFlag = PASSIVE_OFF + ) -> History: + return self.impl.get_history( + instance_state(instance), instance_dict(instance), passive + ) + + @property + def info(self) -> _InfoType: + """Return the 'info' dictionary for the underlying SQL element. + + The behavior here is as follows: + + * If the attribute is a column-mapped property, i.e. + :class:`.ColumnProperty`, which is mapped directly + to a schema-level :class:`_schema.Column` object, this attribute + will return the :attr:`.SchemaItem.info` dictionary associated + with the core-level :class:`_schema.Column` object. + + * If the attribute is a :class:`.ColumnProperty` but is mapped to + any other kind of SQL expression other than a + :class:`_schema.Column`, + the attribute will refer to the :attr:`.MapperProperty.info` + dictionary associated directly with the :class:`.ColumnProperty`, + assuming the SQL expression itself does not have its own ``.info`` + attribute (which should be the case, unless a user-defined SQL + construct has defined one). + + * If the attribute refers to any other kind of + :class:`.MapperProperty`, including :class:`.Relationship`, + the attribute will refer to the :attr:`.MapperProperty.info` + dictionary associated with that :class:`.MapperProperty`. + + * To access the :attr:`.MapperProperty.info` dictionary of the + :class:`.MapperProperty` unconditionally, including for a + :class:`.ColumnProperty` that's associated directly with a + :class:`_schema.Column`, the attribute can be referred to using + :attr:`.QueryableAttribute.property` attribute, as + ``MyClass.someattribute.property.info``. + + .. seealso:: + + :attr:`.SchemaItem.info` + + :attr:`.MapperProperty.info` + + """ + return self.comparator.info + + parent: _InternalEntityType[Any] + """Return an inspection instance representing the parent. + + This will be either an instance of :class:`_orm.Mapper` + or :class:`.AliasedInsp`, depending upon the nature + of the parent entity which this attribute is associated + with. + + """ + + expression: ColumnElement[_T_co] + """The SQL expression object represented by this + :class:`.QueryableAttribute`. + + This will typically be an instance of a :class:`_sql.ColumnElement` + subclass representing a column expression. + + """ + + def _memoized_attr_expression(self) -> ColumnElement[_T]: + annotations: _AnnotationDict + + # applies only to Proxy() as used by hybrid. + # currently is an exception to typing rather than feeding through + # non-string keys. + # ideally Proxy() would have a separate set of methods to deal + # with this case. + entity_namespace = self._entity_namespace + assert isinstance(entity_namespace, HasCacheKey) + + if self.key is _UNKNOWN_ATTR_KEY: + annotations = {"entity_namespace": entity_namespace} + else: + annotations = { + "proxy_key": self.key, + "proxy_owner": self._parententity, + "entity_namespace": entity_namespace, + } + + ce = self.comparator.__clause_element__() + try: + if TYPE_CHECKING: + assert isinstance(ce, ColumnElement) + anno = ce._annotate + except AttributeError as ae: + raise exc.InvalidRequestError( + 'When interpreting attribute "%s" as a SQL expression, ' + "expected __clause_element__() to return " + "a ClauseElement object, got: %r" % (self, ce) + ) from ae + else: + return anno(annotations) + + def _memoized_attr__propagate_attrs(self) -> _PropagateAttrsType: + # this suits the case in coercions where we don't actually + # call ``__clause_element__()`` but still need to get + # resolved._propagate_attrs. See #6558. + return util.immutabledict( + { + "compile_state_plugin": "orm", + "plugin_subject": self._parentmapper, + } + ) + + @property + def _entity_namespace(self) -> _InternalEntityType[Any]: + return self._parententity + + @property + def _annotations(self) -> _AnnotationDict: + return self.__clause_element__()._annotations + + def __clause_element__(self) -> ColumnElement[_T_co]: + return self.expression + + @property + def _from_objects(self) -> List[FromClause]: + return self.expression._from_objects + + def _bulk_update_tuples( + self, value: Any + ) -> Sequence[Tuple[_DMLColumnArgument, Any]]: + """Return setter tuples for a bulk UPDATE.""" + + return self.comparator._bulk_update_tuples(value) + + def adapt_to_entity(self, adapt_to_entity: AliasedInsp[Any]) -> Self: + assert not self._of_type + return self.__class__( + adapt_to_entity.entity, + self.key, + impl=self.impl, + comparator=self.comparator.adapt_to_entity(adapt_to_entity), + parententity=adapt_to_entity, + ) + + def of_type(self, entity: _EntityType[_T]) -> QueryableAttribute[_T]: + return QueryableAttribute( + self.class_, + self.key, + self._parententity, + impl=self.impl, + comparator=self.comparator.of_type(entity), + of_type=inspection.inspect(entity), + extra_criteria=self._extra_criteria, + ) + + def and_( + self, *clauses: _ColumnExpressionArgument[bool] + ) -> QueryableAttribute[bool]: + if TYPE_CHECKING: + assert isinstance(self.comparator, RelationshipProperty.Comparator) + + exprs = tuple( + coercions.expect(roles.WhereHavingRole, clause) + for clause in util.coerce_generator_arg(clauses) + ) + + return QueryableAttribute( + self.class_, + self.key, + self._parententity, + impl=self.impl, + comparator=self.comparator.and_(*exprs), + of_type=self._of_type, + extra_criteria=self._extra_criteria + exprs, + ) + + def _clone(self, **kw: Any) -> QueryableAttribute[_T]: + return QueryableAttribute( + self.class_, + self.key, + self._parententity, + impl=self.impl, + comparator=self.comparator, + of_type=self._of_type, + extra_criteria=self._extra_criteria, + ) + + def label(self, name: Optional[str]) -> Label[_T_co]: + return self.__clause_element__().label(name) + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(self.comparator, *other, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(other, self.comparator, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def hasparent( + self, state: InstanceState[Any], optimistic: bool = False + ) -> bool: + return self.impl.hasparent(state, optimistic=optimistic) is not False + + def __getattr__(self, key: str) -> Any: + try: + return util.MemoizedSlots.__getattr__(self, key) + except AttributeError: + pass + + try: + return getattr(self.comparator, key) + except AttributeError as err: + raise AttributeError( + "Neither %r object nor %r object associated with %s " + "has an attribute %r" + % ( + type(self).__name__, + type(self.comparator).__name__, + self, + key, + ) + ) from err + + def __str__(self) -> str: + return f"{self.class_.__name__}.{self.key}" + + def _memoized_attr_property(self) -> Optional[MapperProperty[Any]]: + return self.comparator.property + + +def _queryable_attribute_unreduce( + key: str, + mapped_class: Type[_O], + parententity: _InternalEntityType[_O], + entity: _ExternalEntityType[Any], +) -> Any: + # this method is only used in terms of the + # sqlalchemy.ext.serializer extension + if insp_is_aliased_class(parententity): + return entity._get_from_serialized(key, mapped_class, parententity) + else: + return getattr(entity, key) + + +class InstrumentedAttribute(QueryableAttribute[_T_co]): + """Class bound instrumented attribute which adds basic + :term:`descriptor` methods. + + See :class:`.QueryableAttribute` for a description of most features. + + + """ + + __slots__ = () + + inherit_cache = True + """:meta private:""" + + # hack to make __doc__ writeable on instances of + # InstrumentedAttribute, while still keeping classlevel + # __doc__ correct + + @util.rw_hybridproperty + def __doc__(self) -> Optional[str]: + return self._doc + + @__doc__.setter # type: ignore + def __doc__(self, value: Optional[str]) -> None: + self._doc = value + + @__doc__.classlevel # type: ignore + def __doc__(cls) -> Optional[str]: + return super().__doc__ + + def __set__(self, instance: object, value: Any) -> None: + self.impl.set( + instance_state(instance), instance_dict(instance), value, None + ) + + def __delete__(self, instance: object) -> None: + self.impl.delete(instance_state(instance), instance_dict(instance)) + + @overload + def __get__( + self, instance: None, owner: Any + ) -> InstrumentedAttribute[_T_co]: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _T_co: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[InstrumentedAttribute[_T_co], _T_co]: + if instance is None: + return self + + dict_ = instance_dict(instance) + if self.impl.supports_population and self.key in dict_: + return dict_[self.key] # type: ignore[no-any-return] + else: + try: + state = instance_state(instance) + except AttributeError as err: + raise orm_exc.UnmappedInstanceError(instance) from err + return self.impl.get(state, dict_) # type: ignore[no-any-return] + + +@dataclasses.dataclass(frozen=True) +class AdHocHasEntityNamespace(HasCacheKey): + _traverse_internals: ClassVar[_TraverseInternalsType] = [ + ("_entity_namespace", InternalTraversal.dp_has_cache_key), + ] + + # py37 compat, no slots=True on dataclass + __slots__ = ("_entity_namespace",) + _entity_namespace: _InternalEntityType[Any] + is_mapper: ClassVar[bool] = False + is_aliased_class: ClassVar[bool] = False + + @property + def entity_namespace(self): + return self._entity_namespace.entity_namespace + + +def create_proxied_attribute( + descriptor: Any, +) -> Callable[..., QueryableAttribute[Any]]: + """Create an QueryableAttribute / user descriptor hybrid. + + Returns a new QueryableAttribute type that delegates descriptor + behavior and getattr() to the given descriptor. + """ + + # TODO: can move this to descriptor_props if the need for this + # function is removed from ext/hybrid.py + + class Proxy(QueryableAttribute[Any]): + """Presents the :class:`.QueryableAttribute` interface as a + proxy on top of a Python descriptor / :class:`.PropComparator` + combination. + + """ + + _extra_criteria = () + + # the attribute error catches inside of __getattr__ basically create a + # singularity if you try putting slots on this too + # __slots__ = ("descriptor", "original_property", "_comparator") + + def __init__( + self, + class_, + key, + descriptor, + comparator, + adapt_to_entity=None, + doc=None, + original_property=None, + ): + self.class_ = class_ + self.key = key + self.descriptor = descriptor + self.original_property = original_property + self._comparator = comparator + self._adapt_to_entity = adapt_to_entity + self._doc = self.__doc__ = doc + + @property + def _parententity(self): + return inspection.inspect(self.class_, raiseerr=False) + + @property + def parent(self): + return inspection.inspect(self.class_, raiseerr=False) + + _is_internal_proxy = True + + _cache_key_traversal = [ + ("key", visitors.ExtendedInternalTraversal.dp_string), + ("_parententity", visitors.ExtendedInternalTraversal.dp_multi), + ] + + @property + def _impl_uses_objects(self): + return ( + self.original_property is not None + and getattr(self.class_, self.key).impl.uses_objects + ) + + @property + def _entity_namespace(self): + if hasattr(self._comparator, "_parententity"): + return self._comparator._parententity + else: + # used by hybrid attributes which try to remain + # agnostic of any ORM concepts like mappers + return AdHocHasEntityNamespace(self._parententity) + + @property + def property(self): + return self.comparator.property + + @util.memoized_property + def comparator(self): + if callable(self._comparator): + self._comparator = self._comparator() + if self._adapt_to_entity: + self._comparator = self._comparator.adapt_to_entity( + self._adapt_to_entity + ) + return self._comparator + + def adapt_to_entity(self, adapt_to_entity): + return self.__class__( + adapt_to_entity.entity, + self.key, + self.descriptor, + self._comparator, + adapt_to_entity, + ) + + def _clone(self, **kw): + return self.__class__( + self.class_, + self.key, + self.descriptor, + self._comparator, + adapt_to_entity=self._adapt_to_entity, + original_property=self.original_property, + ) + + def __get__(self, instance, owner): + retval = self.descriptor.__get__(instance, owner) + # detect if this is a plain Python @property, which just returns + # itself for class level access. If so, then return us. + # Otherwise, return the object returned by the descriptor. + if retval is self.descriptor and instance is None: + return self + else: + return retval + + def __str__(self) -> str: + return f"{self.class_.__name__}.{self.key}" + + def __getattr__(self, attribute): + """Delegate __getattr__ to the original descriptor and/or + comparator.""" + + # this is unfortunately very complicated, and is easily prone + # to recursion overflows when implementations of related + # __getattr__ schemes are changed + + try: + return util.MemoizedSlots.__getattr__(self, attribute) + except AttributeError: + pass + + try: + return getattr(descriptor, attribute) + except AttributeError as err: + if attribute == "comparator": + raise AttributeError("comparator") from err + try: + # comparator itself might be unreachable + comparator = self.comparator + except AttributeError as err2: + raise AttributeError( + "Neither %r object nor unconfigured comparator " + "object associated with %s has an attribute %r" + % (type(descriptor).__name__, self, attribute) + ) from err2 + else: + try: + return getattr(comparator, attribute) + except AttributeError as err3: + raise AttributeError( + "Neither %r object nor %r object " + "associated with %s has an attribute %r" + % ( + type(descriptor).__name__, + type(comparator).__name__, + self, + attribute, + ) + ) from err3 + + Proxy.__name__ = type(descriptor).__name__ + "Proxy" + + util.monkeypatch_proxied_specials( + Proxy, type(descriptor), name="descriptor", from_instance=descriptor + ) + return Proxy + + +OP_REMOVE = util.symbol("REMOVE") +OP_APPEND = util.symbol("APPEND") +OP_REPLACE = util.symbol("REPLACE") +OP_BULK_REPLACE = util.symbol("BULK_REPLACE") +OP_MODIFIED = util.symbol("MODIFIED") + + +class AttributeEventToken: + """A token propagated throughout the course of a chain of attribute + events. + + Serves as an indicator of the source of the event and also provides + a means of controlling propagation across a chain of attribute + operations. + + The :class:`.Event` object is sent as the ``initiator`` argument + when dealing with events such as :meth:`.AttributeEvents.append`, + :meth:`.AttributeEvents.set`, + and :meth:`.AttributeEvents.remove`. + + The :class:`.Event` object is currently interpreted by the backref + event handlers, and is used to control the propagation of operations + across two mutually-dependent attributes. + + .. versionchanged:: 2.0 Changed the name from ``AttributeEvent`` + to ``AttributeEventToken``. + + :attribute impl: The :class:`.AttributeImpl` which is the current event + initiator. + + :attribute op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE`, + :attr:`.OP_REPLACE`, or :attr:`.OP_BULK_REPLACE`, indicating the + source operation. + + """ + + __slots__ = "impl", "op", "parent_token" + + def __init__(self, attribute_impl: AttributeImpl, op: util.symbol): + self.impl = attribute_impl + self.op = op + self.parent_token = self.impl.parent_token + + def __eq__(self, other): + return ( + isinstance(other, AttributeEventToken) + and other.impl is self.impl + and other.op == self.op + ) + + @property + def key(self): + return self.impl.key + + def hasparent(self, state): + return self.impl.hasparent(state) + + +AttributeEvent = AttributeEventToken # legacy +Event = AttributeEventToken # legacy + + +class AttributeImpl: + """internal implementation for instrumented attributes.""" + + collection: bool + default_accepts_scalar_loader: bool + uses_objects: bool + supports_population: bool + dynamic: bool + + _is_has_collection_adapter = False + + _replace_token: AttributeEventToken + _remove_token: AttributeEventToken + _append_token: AttributeEventToken + + def __init__( + self, + class_: _ExternalEntityType[_O], + key: str, + callable_: Optional[_LoaderCallable], + dispatch: _Dispatch[QueryableAttribute[Any]], + trackparent: bool = False, + compare_function: Optional[Callable[..., bool]] = None, + active_history: bool = False, + parent_token: Optional[AttributeEventToken] = None, + load_on_unexpire: bool = True, + send_modified_events: bool = True, + accepts_scalar_loader: Optional[bool] = None, + **kwargs: Any, + ): + r"""Construct an AttributeImpl. + + :param \class_: associated class + + :param key: string name of the attribute + + :param \callable_: + optional function which generates a callable based on a parent + instance, which produces the "default" values for a scalar or + collection attribute when it's first accessed, if not present + already. + + :param trackparent: + if True, attempt to track if an instance has a parent attached + to it via this attribute. + + :param compare_function: + a function that compares two values which are normally + assignable to this attribute. + + :param active_history: + indicates that get_history() should always return the "old" value, + even if it means executing a lazy callable upon attribute change. + + :param parent_token: + Usually references the MapperProperty, used as a key for + the hasparent() function to identify an "owning" attribute. + Allows multiple AttributeImpls to all match a single + owner attribute. + + :param load_on_unexpire: + if False, don't include this attribute in a load-on-expired + operation, i.e. the "expired_attribute_loader" process. + The attribute can still be in the "expired" list and be + considered to be "expired". Previously, this flag was called + "expire_missing" and is only used by a deferred column + attribute. + + :param send_modified_events: + if False, the InstanceState._modified_event method will have no + effect; this means the attribute will never show up as changed in a + history entry. + + """ + self.class_ = class_ + self.key = key + self.callable_ = callable_ + self.dispatch = dispatch + self.trackparent = trackparent + self.parent_token = parent_token or self + self.send_modified_events = send_modified_events + if compare_function is None: + self.is_equal = operator.eq + else: + self.is_equal = compare_function + + if accepts_scalar_loader is not None: + self.accepts_scalar_loader = accepts_scalar_loader + else: + self.accepts_scalar_loader = self.default_accepts_scalar_loader + + _deferred_history = kwargs.pop("_deferred_history", False) + self._deferred_history = _deferred_history + + if active_history: + self.dispatch._active_history = True + + self.load_on_unexpire = load_on_unexpire + self._modified_token = AttributeEventToken(self, OP_MODIFIED) + + __slots__ = ( + "class_", + "key", + "callable_", + "dispatch", + "trackparent", + "parent_token", + "send_modified_events", + "is_equal", + "load_on_unexpire", + "_modified_token", + "accepts_scalar_loader", + "_deferred_history", + ) + + def __str__(self) -> str: + return f"{self.class_.__name__}.{self.key}" + + def _get_active_history(self): + """Backwards compat for impl.active_history""" + + return self.dispatch._active_history + + def _set_active_history(self, value): + self.dispatch._active_history = value + + active_history = property(_get_active_history, _set_active_history) + + def hasparent( + self, state: InstanceState[Any], optimistic: bool = False + ) -> bool: + """Return the boolean value of a `hasparent` flag attached to + the given state. + + The `optimistic` flag determines what the default return value + should be if no `hasparent` flag can be located. + + As this function is used to determine if an instance is an + *orphan*, instances that were loaded from storage should be + assumed to not be orphans, until a True/False value for this + flag is set. + + An instance attribute that is loaded by a callable function + will also not have a `hasparent` flag. + + """ + msg = "This AttributeImpl is not configured to track parents." + assert self.trackparent, msg + + return ( + state.parents.get(id(self.parent_token), optimistic) is not False + ) + + def sethasparent( + self, + state: InstanceState[Any], + parent_state: InstanceState[Any], + value: bool, + ) -> None: + """Set a boolean flag on the given item corresponding to + whether or not it is attached to a parent object via the + attribute represented by this ``InstrumentedAttribute``. + + """ + msg = "This AttributeImpl is not configured to track parents." + assert self.trackparent, msg + + id_ = id(self.parent_token) + if value: + state.parents[id_] = parent_state + else: + if id_ in state.parents: + last_parent = state.parents[id_] + + if ( + last_parent is not False + and last_parent.key != parent_state.key + ): + if last_parent.obj() is None: + raise orm_exc.StaleDataError( + "Removing state %s from parent " + "state %s along attribute '%s', " + "but the parent record " + "has gone stale, can't be sure this " + "is the most recent parent." + % ( + state_str(state), + state_str(parent_state), + self.key, + ) + ) + + return + + state.parents[id_] = False + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_OFF, + ) -> History: + raise NotImplementedError() + + def get_all_pending( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_NO_INITIALIZE, + ) -> _AllPendingType: + """Return a list of tuples of (state, obj) + for all objects in this attribute's current state + + history. + + Only applies to object-based attributes. + + This is an inlining of existing functionality + which roughly corresponds to: + + get_state_history( + state, + key, + passive=PASSIVE_NO_INITIALIZE).sum() + + """ + raise NotImplementedError() + + def _default_value( + self, state: InstanceState[Any], dict_: _InstanceDict + ) -> Any: + """Produce an empty value for an uninitialized scalar attribute.""" + + assert self.key not in dict_, ( + "_default_value should only be invoked for an " + "uninitialized or expired attribute" + ) + + value = None + for fn in self.dispatch.init_scalar: + ret = fn(state, value, dict_) + if ret is not ATTR_EMPTY: + value = ret + + return value + + def get( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_OFF, + ) -> Any: + """Retrieve a value from the given object. + If a callable is assembled on this object's attribute, and + passive is False, the callable will be executed and the + resulting value will be set as the new value for this attribute. + """ + if self.key in dict_: + return dict_[self.key] + else: + # if history present, don't load + key = self.key + if ( + key not in state.committed_state + or state.committed_state[key] is NO_VALUE + ): + if not passive & CALLABLES_OK: + return PASSIVE_NO_RESULT + + value = self._fire_loader_callables(state, key, passive) + + if value is PASSIVE_NO_RESULT or value is NO_VALUE: + return value + elif value is ATTR_WAS_SET: + try: + return dict_[key] + except KeyError as err: + # TODO: no test coverage here. + raise KeyError( + "Deferred loader for attribute " + "%r failed to populate " + "correctly" % key + ) from err + elif value is not ATTR_EMPTY: + return self.set_committed_value(state, dict_, value) + + if not passive & INIT_OK: + return NO_VALUE + else: + return self._default_value(state, dict_) + + def _fire_loader_callables( + self, state: InstanceState[Any], key: str, passive: PassiveFlag + ) -> Any: + if ( + self.accepts_scalar_loader + and self.load_on_unexpire + and key in state.expired_attributes + ): + return state._load_expired(state, passive) + elif key in state.callables: + callable_ = state.callables[key] + return callable_(state, passive) + elif self.callable_: + return self.callable_(state, passive) + else: + return ATTR_EMPTY + + def append( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + self.set(state, dict_, value, initiator, passive=passive) + + def remove( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + self.set( + state, dict_, None, initiator, passive=passive, check_old=value + ) + + def pop( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + self.set( + state, + dict_, + None, + initiator, + passive=passive, + check_old=value, + pop=True, + ) + + def set( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PASSIVE_OFF, + check_old: Any = None, + pop: bool = False, + ) -> None: + raise NotImplementedError() + + def delete(self, state: InstanceState[Any], dict_: _InstanceDict) -> None: + raise NotImplementedError() + + def get_committed_value( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_OFF, + ) -> Any: + """return the unchanged value of this attribute""" + + if self.key in state.committed_state: + value = state.committed_state[self.key] + if value is NO_VALUE: + return None + else: + return value + else: + return self.get(state, dict_, passive=passive) + + def set_committed_value(self, state, dict_, value): + """set an attribute value on the given instance and 'commit' it.""" + + dict_[self.key] = value + state._commit(dict_, [self.key]) + return value + + +class ScalarAttributeImpl(AttributeImpl): + """represents a scalar value-holding InstrumentedAttribute.""" + + default_accepts_scalar_loader = True + uses_objects = False + supports_population = True + collection = False + dynamic = False + + __slots__ = "_replace_token", "_append_token", "_remove_token" + + def __init__(self, *arg, **kw): + super().__init__(*arg, **kw) + self._replace_token = self._append_token = AttributeEventToken( + self, OP_REPLACE + ) + self._remove_token = AttributeEventToken(self, OP_REMOVE) + + def delete(self, state: InstanceState[Any], dict_: _InstanceDict) -> None: + if self.dispatch._active_history: + old = self.get(state, dict_, PASSIVE_RETURN_NO_VALUE) + else: + old = dict_.get(self.key, NO_VALUE) + + if self.dispatch.remove: + self.fire_remove_event(state, dict_, old, self._remove_token) + state._modified_event(dict_, self, old) + + existing = dict_.pop(self.key, NO_VALUE) + if ( + existing is NO_VALUE + and old is NO_VALUE + and not state.expired + and self.key not in state.expired_attributes + ): + raise AttributeError("%s object does not have a value" % self) + + def get_history( + self, + state: InstanceState[Any], + dict_: Dict[str, Any], + passive: PassiveFlag = PASSIVE_OFF, + ) -> History: + if self.key in dict_: + return History.from_scalar_attribute(self, state, dict_[self.key]) + elif self.key in state.committed_state: + return History.from_scalar_attribute(self, state, NO_VALUE) + else: + if passive & INIT_OK: + passive ^= INIT_OK + current = self.get(state, dict_, passive=passive) + if current is PASSIVE_NO_RESULT: + return HISTORY_BLANK + else: + return History.from_scalar_attribute(self, state, current) + + def set( + self, + state: InstanceState[Any], + dict_: Dict[str, Any], + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PASSIVE_OFF, + check_old: Optional[object] = None, + pop: bool = False, + ) -> None: + if self.dispatch._active_history: + old = self.get(state, dict_, PASSIVE_RETURN_NO_VALUE) + else: + old = dict_.get(self.key, NO_VALUE) + + if self.dispatch.set: + value = self.fire_replace_event( + state, dict_, value, old, initiator + ) + state._modified_event(dict_, self, old) + dict_[self.key] = value + + def fire_replace_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: _T, + previous: Any, + initiator: Optional[AttributeEventToken], + ) -> _T: + for fn in self.dispatch.set: + value = fn( + state, value, previous, initiator or self._replace_token + ) + return value + + def fire_remove_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + ) -> None: + for fn in self.dispatch.remove: + fn(state, value, initiator or self._remove_token) + + +class ScalarObjectAttributeImpl(ScalarAttributeImpl): + """represents a scalar-holding InstrumentedAttribute, + where the target object is also instrumented. + + Adds events to delete/set operations. + + """ + + default_accepts_scalar_loader = False + uses_objects = True + supports_population = True + collection = False + + __slots__ = () + + def delete(self, state: InstanceState[Any], dict_: _InstanceDict) -> None: + if self.dispatch._active_history: + old = self.get( + state, + dict_, + passive=PASSIVE_ONLY_PERSISTENT + | NO_AUTOFLUSH + | LOAD_AGAINST_COMMITTED, + ) + else: + old = self.get( + state, + dict_, + passive=PASSIVE_NO_FETCH ^ INIT_OK + | LOAD_AGAINST_COMMITTED + | NO_RAISE, + ) + + self.fire_remove_event(state, dict_, old, self._remove_token) + + existing = dict_.pop(self.key, NO_VALUE) + + # if the attribute is expired, we currently have no way to tell + # that an object-attribute was expired vs. not loaded. So + # for this test, we look to see if the object has a DB identity. + if ( + existing is NO_VALUE + and old is not PASSIVE_NO_RESULT + and state.key is None + ): + raise AttributeError("%s object does not have a value" % self) + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_OFF, + ) -> History: + if self.key in dict_: + current = dict_[self.key] + else: + if passive & INIT_OK: + passive ^= INIT_OK + current = self.get(state, dict_, passive=passive) + if current is PASSIVE_NO_RESULT: + return HISTORY_BLANK + + if not self._deferred_history: + return History.from_object_attribute(self, state, current) + else: + original = state.committed_state.get(self.key, _NO_HISTORY) + if original is PASSIVE_NO_RESULT: + loader_passive = passive | ( + PASSIVE_ONLY_PERSISTENT + | NO_AUTOFLUSH + | LOAD_AGAINST_COMMITTED + | NO_RAISE + | DEFERRED_HISTORY_LOAD + ) + original = self._fire_loader_callables( + state, self.key, loader_passive + ) + return History.from_object_attribute( + self, state, current, original=original + ) + + def get_all_pending( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_NO_INITIALIZE, + ) -> _AllPendingType: + if self.key in dict_: + current = dict_[self.key] + elif passive & CALLABLES_OK: + current = self.get(state, dict_, passive=passive) + else: + return [] + + ret: _AllPendingType + + # can't use __hash__(), can't use __eq__() here + if ( + current is not None + and current is not PASSIVE_NO_RESULT + and current is not NO_VALUE + ): + ret = [(instance_state(current), current)] + else: + ret = [(None, None)] + + if self.key in state.committed_state: + original = state.committed_state[self.key] + if ( + original is not None + and original is not PASSIVE_NO_RESULT + and original is not NO_VALUE + and original is not current + ): + ret.append((instance_state(original), original)) + return ret + + def set( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PASSIVE_OFF, + check_old: Any = None, + pop: bool = False, + ) -> None: + """Set a value on the given InstanceState.""" + + if self.dispatch._active_history: + old = self.get( + state, + dict_, + passive=PASSIVE_ONLY_PERSISTENT + | NO_AUTOFLUSH + | LOAD_AGAINST_COMMITTED, + ) + else: + old = self.get( + state, + dict_, + passive=PASSIVE_NO_FETCH ^ INIT_OK + | LOAD_AGAINST_COMMITTED + | NO_RAISE, + ) + + if ( + check_old is not None + and old is not PASSIVE_NO_RESULT + and check_old is not old + ): + if pop: + return + else: + raise ValueError( + "Object %s not associated with %s on attribute '%s'" + % (instance_str(check_old), state_str(state), self.key) + ) + + value = self.fire_replace_event(state, dict_, value, old, initiator) + dict_[self.key] = value + + def fire_remove_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + ) -> None: + if self.trackparent and value not in ( + None, + PASSIVE_NO_RESULT, + NO_VALUE, + ): + self.sethasparent(instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, initiator or self._remove_token) + + state._modified_event(dict_, self, value) + + def fire_replace_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: _T, + previous: Any, + initiator: Optional[AttributeEventToken], + ) -> _T: + if self.trackparent: + if previous is not value and previous not in ( + None, + PASSIVE_NO_RESULT, + NO_VALUE, + ): + self.sethasparent(instance_state(previous), state, False) + + for fn in self.dispatch.set: + value = fn( + state, value, previous, initiator or self._replace_token + ) + + state._modified_event(dict_, self, previous) + + if self.trackparent: + if value is not None: + self.sethasparent(instance_state(value), state, True) + + return value + + +class HasCollectionAdapter: + __slots__ = () + + collection: bool + _is_has_collection_adapter = True + + def _dispose_previous_collection( + self, + state: InstanceState[Any], + collection: _AdaptedCollectionProtocol, + adapter: CollectionAdapter, + fire_event: bool, + ) -> None: + raise NotImplementedError() + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Literal[None] = ..., + passive: Literal[PassiveFlag.PASSIVE_OFF] = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: _AdaptedCollectionProtocol = ..., + passive: PassiveFlag = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = ..., + passive: PassiveFlag = ..., + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: ... + + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: + raise NotImplementedError() + + def set( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + check_old: Any = None, + pop: bool = False, + _adapt: bool = True, + ) -> None: + raise NotImplementedError() + + +if TYPE_CHECKING: + + def _is_collection_attribute_impl( + impl: AttributeImpl, + ) -> TypeGuard[CollectionAttributeImpl]: ... + +else: + _is_collection_attribute_impl = operator.attrgetter("collection") + + +class CollectionAttributeImpl(HasCollectionAdapter, AttributeImpl): + """A collection-holding attribute that instruments changes in membership. + + Only handles collections of instrumented objects. + + InstrumentedCollectionAttribute holds an arbitrary, user-specified + container object (defaulting to a list) and brokers access to the + CollectionAdapter, a "view" onto that object that presents consistent bag + semantics to the orm layer independent of the user data implementation. + + """ + + uses_objects = True + collection = True + default_accepts_scalar_loader = False + supports_population = True + dynamic = False + + _bulk_replace_token: AttributeEventToken + + __slots__ = ( + "copy", + "collection_factory", + "_append_token", + "_remove_token", + "_bulk_replace_token", + "_duck_typed_as", + ) + + def __init__( + self, + class_, + key, + callable_, + dispatch, + typecallable=None, + trackparent=False, + copy_function=None, + compare_function=None, + **kwargs, + ): + super().__init__( + class_, + key, + callable_, + dispatch, + trackparent=trackparent, + compare_function=compare_function, + **kwargs, + ) + + if copy_function is None: + copy_function = self.__copy + self.copy = copy_function + self.collection_factory = typecallable + self._append_token = AttributeEventToken(self, OP_APPEND) + self._remove_token = AttributeEventToken(self, OP_REMOVE) + self._bulk_replace_token = AttributeEventToken(self, OP_BULK_REPLACE) + self._duck_typed_as = util.duck_type_collection( + self.collection_factory() + ) + + if getattr(self.collection_factory, "_sa_linker", None): + + @event.listens_for(self, "init_collection") + def link(target, collection, collection_adapter): + collection._sa_linker(collection_adapter) + + @event.listens_for(self, "dispose_collection") + def unlink(target, collection, collection_adapter): + collection._sa_linker(None) + + def __copy(self, item): + return [y for y in collections.collection_adapter(item)] + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_OFF, + ) -> History: + current = self.get(state, dict_, passive=passive) + + if current is PASSIVE_NO_RESULT: + if ( + passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS + and self.key in state._pending_mutations + ): + pending = state._pending_mutations[self.key] + return pending.merge_with_history(HISTORY_BLANK) + else: + return HISTORY_BLANK + else: + if passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS: + # this collection is loaded / present. should not be any + # pending mutations + assert self.key not in state._pending_mutations + + return History.from_collection(self, state, current) + + def get_all_pending( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PASSIVE_NO_INITIALIZE, + ) -> _AllPendingType: + # NOTE: passive is ignored here at the moment + + if self.key not in dict_: + return [] + + current = dict_[self.key] + current = getattr(current, "_sa_adapter") + + if self.key in state.committed_state: + original = state.committed_state[self.key] + if original is not NO_VALUE: + current_states = [ + ((c is not None) and instance_state(c) or None, c) + for c in current + ] + original_states = [ + ((c is not None) and instance_state(c) or None, c) + for c in original + ] + + current_set = dict(current_states) + original_set = dict(original_states) + + return ( + [ + (s, o) + for s, o in current_states + if s not in original_set + ] + + [(s, o) for s, o in current_states if s in original_set] + + [ + (s, o) + for s, o in original_states + if s not in current_set + ] + ) + + return [(instance_state(o), o) for o in current] + + def fire_append_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: _T, + initiator: Optional[AttributeEventToken], + key: Optional[Any], + ) -> _T: + for fn in self.dispatch.append: + value = fn(state, value, initiator or self._append_token, key=key) + + state._modified_event(dict_, self, NO_VALUE, True) + + if self.trackparent and value is not None: + self.sethasparent(instance_state(value), state, True) + + return value + + def fire_append_wo_mutation_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: _T, + initiator: Optional[AttributeEventToken], + key: Optional[Any], + ) -> _T: + for fn in self.dispatch.append_wo_mutation: + value = fn(state, value, initiator or self._append_token, key=key) + + return value + + def fire_pre_remove_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + initiator: Optional[AttributeEventToken], + key: Optional[Any], + ) -> None: + """A special event used for pop() operations. + + The "remove" event needs to have the item to be removed passed to + it, which in the case of pop from a set, we don't have a way to access + the item before the operation. the event is used for all pop() + operations (even though set.pop is the one where it is really needed). + + """ + state._modified_event(dict_, self, NO_VALUE, True) + + def fire_remove_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + key: Optional[Any], + ) -> None: + if self.trackparent and value is not None: + self.sethasparent(instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, initiator or self._remove_token, key=key) + + state._modified_event(dict_, self, NO_VALUE, True) + + def delete(self, state: InstanceState[Any], dict_: _InstanceDict) -> None: + if self.key not in dict_: + return + + state._modified_event(dict_, self, NO_VALUE, True) + + collection = self.get_collection(state, state.dict) + collection.clear_with_event() + + # key is always present because we checked above. e.g. + # del is a no-op if collection not present. + del dict_[self.key] + + def _default_value( + self, state: InstanceState[Any], dict_: _InstanceDict + ) -> _AdaptedCollectionProtocol: + """Produce an empty collection for an un-initialized attribute""" + + assert self.key not in dict_, ( + "_default_value should only be invoked for an " + "uninitialized or expired attribute" + ) + + if self.key in state._empty_collections: + return state._empty_collections[self.key] + + adapter, user_data = self._initialize_collection(state) + adapter._set_empty(user_data) + return user_data + + def _initialize_collection( + self, state: InstanceState[Any] + ) -> Tuple[CollectionAdapter, _AdaptedCollectionProtocol]: + adapter, collection = state.manager.initialize_collection( + self.key, state, self.collection_factory + ) + + self.dispatch.init_collection(state, collection, adapter) + + return adapter, collection + + def append( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + collection = self.get_collection( + state, dict_, user_data=None, passive=passive + ) + if collection is PASSIVE_NO_RESULT: + value = self.fire_append_event( + state, dict_, value, initiator, key=NO_KEY + ) + assert ( + self.key not in dict_ + ), "Collection was loaded during event handling." + state._get_pending_mutation(self.key).append(value) + else: + if TYPE_CHECKING: + assert isinstance(collection, CollectionAdapter) + collection.append_with_event(value, initiator) + + def remove( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + collection = self.get_collection( + state, state.dict, user_data=None, passive=passive + ) + if collection is PASSIVE_NO_RESULT: + self.fire_remove_event(state, dict_, value, initiator, key=NO_KEY) + assert ( + self.key not in dict_ + ), "Collection was loaded during event handling." + state._get_pending_mutation(self.key).remove(value) + else: + if TYPE_CHECKING: + assert isinstance(collection, CollectionAdapter) + collection.remove_with_event(value, initiator) + + def pop( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PASSIVE_OFF, + ) -> None: + try: + # TODO: better solution here would be to add + # a "popper" role to collections.py to complement + # "remover". + self.remove(state, dict_, value, initiator, passive=passive) + except (ValueError, KeyError, IndexError): + pass + + def set( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + check_old: Any = None, + pop: bool = False, + _adapt: bool = True, + ) -> None: + iterable = orig_iterable = value + new_keys = None + + # pulling a new collection first so that an adaptation exception does + # not trigger a lazy load of the old collection. + new_collection, user_data = self._initialize_collection(state) + if _adapt: + if new_collection._converter is not None: + iterable = new_collection._converter(iterable) + else: + setting_type = util.duck_type_collection(iterable) + receiving_type = self._duck_typed_as + + if setting_type is not receiving_type: + given = ( + iterable is None + and "None" + or iterable.__class__.__name__ + ) + wanted = self._duck_typed_as.__name__ + raise TypeError( + "Incompatible collection type: %s is not %s-like" + % (given, wanted) + ) + + # If the object is an adapted collection, return the (iterable) + # adapter. + if hasattr(iterable, "_sa_iterator"): + iterable = iterable._sa_iterator() + elif setting_type is dict: + new_keys = list(iterable) + iterable = iterable.values() + else: + iterable = iter(iterable) + elif util.duck_type_collection(iterable) is dict: + new_keys = list(value) + + new_values = list(iterable) + + evt = self._bulk_replace_token + + self.dispatch.bulk_replace(state, new_values, evt, keys=new_keys) + + # propagate NO_RAISE in passive through to the get() for the + # existing object (ticket #8862) + old = self.get( + state, + dict_, + passive=PASSIVE_ONLY_PERSISTENT ^ (passive & PassiveFlag.NO_RAISE), + ) + if old is PASSIVE_NO_RESULT: + old = self._default_value(state, dict_) + elif old is orig_iterable: + # ignore re-assignment of the current collection, as happens + # implicitly with in-place operators (foo.collection |= other) + return + + # place a copy of "old" in state.committed_state + state._modified_event(dict_, self, old, True) + + old_collection = old._sa_adapter + + dict_[self.key] = user_data + + collections.bulk_replace( + new_values, old_collection, new_collection, initiator=evt + ) + + self._dispose_previous_collection(state, old, old_collection, True) + + def _dispose_previous_collection( + self, + state: InstanceState[Any], + collection: _AdaptedCollectionProtocol, + adapter: CollectionAdapter, + fire_event: bool, + ) -> None: + del collection._sa_adapter + + # discarding old collection make sure it is not referenced in empty + # collections. + state._empty_collections.pop(self.key, None) + if fire_event: + self.dispatch.dispose_collection(state, collection, adapter) + + def _invalidate_collection( + self, collection: _AdaptedCollectionProtocol + ) -> None: + adapter = getattr(collection, "_sa_adapter") + adapter.invalidated = True + + def set_committed_value( + self, state: InstanceState[Any], dict_: _InstanceDict, value: Any + ) -> _AdaptedCollectionProtocol: + """Set an attribute value on the given instance and 'commit' it.""" + + collection, user_data = self._initialize_collection(state) + + if value: + collection.append_multiple_without_event(value) + + state.dict[self.key] = user_data + + state._commit(dict_, [self.key]) + + if self.key in state._pending_mutations: + # pending items exist. issue a modified event, + # add/remove new items. + state._modified_event(dict_, self, user_data, True) + + pending = state._pending_mutations.pop(self.key) + added = pending.added_items + removed = pending.deleted_items + for item in added: + collection.append_without_event(item) + for item in removed: + collection.remove_without_event(item) + + return user_data + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Literal[None] = ..., + passive: Literal[PassiveFlag.PASSIVE_OFF] = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: _AdaptedCollectionProtocol = ..., + passive: PassiveFlag = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = ..., + passive: PassiveFlag = PASSIVE_OFF, + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: ... + + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = None, + passive: PassiveFlag = PASSIVE_OFF, + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: + """Retrieve the CollectionAdapter associated with the given state. + + if user_data is None, retrieves it from the state using normal + "get()" rules, which will fire lazy callables or return the "empty" + collection value. + + """ + if user_data is None: + fetch_user_data = self.get(state, dict_, passive=passive) + if fetch_user_data is LoaderCallableStatus.PASSIVE_NO_RESULT: + return fetch_user_data + else: + user_data = cast("_AdaptedCollectionProtocol", fetch_user_data) + + return user_data._sa_adapter + + +def backref_listeners( + attribute: QueryableAttribute[Any], key: str, uselist: bool +) -> None: + """Apply listeners to synchronize a two-way relationship.""" + + # use easily recognizable names for stack traces. + + # in the sections marked "tokens to test for a recursive loop", + # this is somewhat brittle and very performance-sensitive logic + # that is specific to how we might arrive at each event. a marker + # that can target us directly to arguments being invoked against + # the impl might be simpler, but could interfere with other systems. + + parent_token = attribute.impl.parent_token + parent_impl = attribute.impl + + def _acceptable_key_err(child_state, initiator, child_impl): + raise ValueError( + "Bidirectional attribute conflict detected: " + 'Passing object %s to attribute "%s" ' + 'triggers a modify event on attribute "%s" ' + 'via the backref "%s".' + % ( + state_str(child_state), + initiator.parent_token, + child_impl.parent_token, + attribute.impl.parent_token, + ) + ) + + def emit_backref_from_scalar_set_event( + state, child, oldchild, initiator, **kw + ): + if oldchild is child: + return child + if ( + oldchild is not None + and oldchild is not PASSIVE_NO_RESULT + and oldchild is not NO_VALUE + ): + # With lazy=None, there's no guarantee that the full collection is + # present when updating via a backref. + old_state, old_dict = ( + instance_state(oldchild), + instance_dict(oldchild), + ) + impl = old_state.manager[key].impl + + # tokens to test for a recursive loop. + if not impl.collection and not impl.dynamic: + check_recursive_token = impl._replace_token + else: + check_recursive_token = impl._remove_token + + if initiator is not check_recursive_token: + impl.pop( + old_state, + old_dict, + state.obj(), + parent_impl._append_token, + passive=PASSIVE_NO_FETCH, + ) + + if child is not None: + child_state, child_dict = ( + instance_state(child), + instance_dict(child), + ) + child_impl = child_state.manager[key].impl + + if ( + initiator.parent_token is not parent_token + and initiator.parent_token is not child_impl.parent_token + ): + _acceptable_key_err(state, initiator, child_impl) + + # tokens to test for a recursive loop. + check_append_token = child_impl._append_token + check_bulk_replace_token = ( + child_impl._bulk_replace_token + if _is_collection_attribute_impl(child_impl) + else None + ) + + if ( + initiator is not check_append_token + and initiator is not check_bulk_replace_token + ): + child_impl.append( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH, + ) + return child + + def emit_backref_from_collection_append_event( + state, child, initiator, **kw + ): + if child is None: + return + + child_state, child_dict = instance_state(child), instance_dict(child) + child_impl = child_state.manager[key].impl + + if ( + initiator.parent_token is not parent_token + and initiator.parent_token is not child_impl.parent_token + ): + _acceptable_key_err(state, initiator, child_impl) + + # tokens to test for a recursive loop. + check_append_token = child_impl._append_token + check_bulk_replace_token = ( + child_impl._bulk_replace_token + if _is_collection_attribute_impl(child_impl) + else None + ) + + if ( + initiator is not check_append_token + and initiator is not check_bulk_replace_token + ): + child_impl.append( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH, + ) + return child + + def emit_backref_from_collection_remove_event( + state, child, initiator, **kw + ): + if ( + child is not None + and child is not PASSIVE_NO_RESULT + and child is not NO_VALUE + ): + child_state, child_dict = ( + instance_state(child), + instance_dict(child), + ) + child_impl = child_state.manager[key].impl + + check_replace_token: Optional[AttributeEventToken] + + # tokens to test for a recursive loop. + if not child_impl.collection and not child_impl.dynamic: + check_remove_token = child_impl._remove_token + check_replace_token = child_impl._replace_token + check_for_dupes_on_remove = uselist and not parent_impl.dynamic + else: + check_remove_token = child_impl._remove_token + check_replace_token = ( + child_impl._bulk_replace_token + if _is_collection_attribute_impl(child_impl) + else None + ) + check_for_dupes_on_remove = False + + if ( + initiator is not check_remove_token + and initiator is not check_replace_token + ): + if not check_for_dupes_on_remove or not util.has_dupes( + # when this event is called, the item is usually + # present in the list, except for a pop() operation. + state.dict[parent_impl.key], + child, + ): + child_impl.pop( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH, + ) + + if uselist: + event.listen( + attribute, + "append", + emit_backref_from_collection_append_event, + retval=True, + raw=True, + include_key=True, + ) + else: + event.listen( + attribute, + "set", + emit_backref_from_scalar_set_event, + retval=True, + raw=True, + include_key=True, + ) + # TODO: need coverage in test/orm/ of remove event + event.listen( + attribute, + "remove", + emit_backref_from_collection_remove_event, + retval=True, + raw=True, + include_key=True, + ) + + +_NO_HISTORY = util.symbol("NO_HISTORY") +_NO_STATE_SYMBOLS = frozenset([id(PASSIVE_NO_RESULT), id(NO_VALUE)]) + + +class History(NamedTuple): + """A 3-tuple of added, unchanged and deleted values, + representing the changes which have occurred on an instrumented + attribute. + + The easiest way to get a :class:`.History` object for a particular + attribute on an object is to use the :func:`_sa.inspect` function:: + + from sqlalchemy import inspect + + hist = inspect(myobject).attrs.myattribute.history + + Each tuple member is an iterable sequence: + + * ``added`` - the collection of items added to the attribute (the first + tuple element). + + * ``unchanged`` - the collection of items that have not changed on the + attribute (the second tuple element). + + * ``deleted`` - the collection of items that have been removed from the + attribute (the third tuple element). + + """ + + added: Union[Tuple[()], List[Any]] + unchanged: Union[Tuple[()], List[Any]] + deleted: Union[Tuple[()], List[Any]] + + def __bool__(self) -> bool: + return self != HISTORY_BLANK + + def empty(self) -> bool: + """Return True if this :class:`.History` has no changes + and no existing, unchanged state. + + """ + + return not bool((self.added or self.deleted) or self.unchanged) + + def sum(self) -> Sequence[Any]: + """Return a collection of added + unchanged + deleted.""" + + return ( + (self.added or []) + (self.unchanged or []) + (self.deleted or []) + ) + + def non_deleted(self) -> Sequence[Any]: + """Return a collection of added + unchanged.""" + + return (self.added or []) + (self.unchanged or []) + + def non_added(self) -> Sequence[Any]: + """Return a collection of unchanged + deleted.""" + + return (self.unchanged or []) + (self.deleted or []) + + def has_changes(self) -> bool: + """Return True if this :class:`.History` has changes.""" + + return bool(self.added or self.deleted) + + def _merge(self, added: Iterable[Any], deleted: Iterable[Any]) -> History: + return History( + list(self.added) + list(added), + self.unchanged, + list(self.deleted) + list(deleted), + ) + + def as_state(self) -> History: + return History( + [ + (c is not None) and instance_state(c) or None + for c in self.added + ], + [ + (c is not None) and instance_state(c) or None + for c in self.unchanged + ], + [ + (c is not None) and instance_state(c) or None + for c in self.deleted + ], + ) + + @classmethod + def from_scalar_attribute( + cls, + attribute: ScalarAttributeImpl, + state: InstanceState[Any], + current: Any, + ) -> History: + original = state.committed_state.get(attribute.key, _NO_HISTORY) + + deleted: Union[Tuple[()], List[Any]] + + if original is _NO_HISTORY: + if current is NO_VALUE: + return cls((), (), ()) + else: + return cls((), [current], ()) + # don't let ClauseElement expressions here trip things up + elif ( + current is not NO_VALUE + and attribute.is_equal(current, original) is True + ): + return cls((), [current], ()) + else: + # current convention on native scalars is to not + # include information + # about missing previous value in "deleted", but + # we do include None, which helps in some primary + # key situations + if id(original) in _NO_STATE_SYMBOLS: + deleted = () + # indicate a "del" operation occurred when we don't have + # the previous value as: ([None], (), ()) + if id(current) in _NO_STATE_SYMBOLS: + current = None + else: + deleted = [original] + if current is NO_VALUE: + return cls((), (), deleted) + else: + return cls([current], (), deleted) + + @classmethod + def from_object_attribute( + cls, + attribute: ScalarObjectAttributeImpl, + state: InstanceState[Any], + current: Any, + original: Any = _NO_HISTORY, + ) -> History: + deleted: Union[Tuple[()], List[Any]] + + if original is _NO_HISTORY: + original = state.committed_state.get(attribute.key, _NO_HISTORY) + + if original is _NO_HISTORY: + if current is NO_VALUE: + return cls((), (), ()) + else: + return cls((), [current], ()) + elif current is original and current is not NO_VALUE: + return cls((), [current], ()) + else: + # current convention on related objects is to not + # include information + # about missing previous value in "deleted", and + # to also not include None - the dependency.py rules + # ignore the None in any case. + if id(original) in _NO_STATE_SYMBOLS or original is None: + deleted = () + # indicate a "del" operation occurred when we don't have + # the previous value as: ([None], (), ()) + if id(current) in _NO_STATE_SYMBOLS: + current = None + else: + deleted = [original] + if current is NO_VALUE: + return cls((), (), deleted) + else: + return cls([current], (), deleted) + + @classmethod + def from_collection( + cls, + attribute: CollectionAttributeImpl, + state: InstanceState[Any], + current: Any, + ) -> History: + original = state.committed_state.get(attribute.key, _NO_HISTORY) + if current is NO_VALUE: + return cls((), (), ()) + + current = getattr(current, "_sa_adapter") + if original is NO_VALUE: + return cls(list(current), (), ()) + elif original is _NO_HISTORY: + return cls((), list(current), ()) + else: + current_states = [ + ((c is not None) and instance_state(c) or None, c) + for c in current + ] + original_states = [ + ((c is not None) and instance_state(c) or None, c) + for c in original + ] + + current_set = dict(current_states) + original_set = dict(original_states) + + return cls( + [o for s, o in current_states if s not in original_set], + [o for s, o in current_states if s in original_set], + [o for s, o in original_states if s not in current_set], + ) + + +HISTORY_BLANK = History((), (), ()) + + +def get_history( + obj: object, key: str, passive: PassiveFlag = PASSIVE_OFF +) -> History: + """Return a :class:`.History` record for the given object + and attribute key. + + This is the **pre-flush** history for a given attribute, which is + reset each time the :class:`.Session` flushes changes to the + current database transaction. + + .. note:: + + Prefer to use the :attr:`.AttributeState.history` and + :meth:`.AttributeState.load_history` accessors to retrieve the + :class:`.History` for instance attributes. + + + :param obj: an object whose class is instrumented by the + attributes package. + + :param key: string attribute name. + + :param passive: indicates loading behavior for the attribute + if the value is not already present. This is a + bitflag attribute, which defaults to the symbol + :attr:`.PASSIVE_OFF` indicating all necessary SQL + should be emitted. + + .. seealso:: + + :attr:`.AttributeState.history` + + :meth:`.AttributeState.load_history` - retrieve history + using loader callables if the value is not locally present. + + """ + + return get_state_history(instance_state(obj), key, passive) + + +def get_state_history( + state: InstanceState[Any], key: str, passive: PassiveFlag = PASSIVE_OFF +) -> History: + return state.get_history(key, passive) + + +def has_parent( + cls: Type[_O], obj: _O, key: str, optimistic: bool = False +) -> bool: + """TODO""" + manager = manager_of_class(cls) + state = instance_state(obj) + return manager.has_parent(state, key, optimistic) + + +def register_attribute( + class_: Type[_O], + key: str, + *, + comparator: interfaces.PropComparator[_T], + parententity: _InternalEntityType[_O], + doc: Optional[str] = None, + **kw: Any, +) -> InstrumentedAttribute[_T]: + desc = register_descriptor( + class_, key, comparator=comparator, parententity=parententity, doc=doc + ) + register_attribute_impl(class_, key, **kw) + return desc + + +def register_attribute_impl( + class_: Type[_O], + key: str, + uselist: bool = False, + callable_: Optional[_LoaderCallable] = None, + useobject: bool = False, + impl_class: Optional[Type[AttributeImpl]] = None, + backref: Optional[str] = None, + **kw: Any, +) -> QueryableAttribute[Any]: + manager = manager_of_class(class_) + if uselist: + factory = kw.pop("typecallable", None) + typecallable = manager.instrument_collection_class( + key, factory or list + ) + else: + typecallable = kw.pop("typecallable", None) + + dispatch = cast( + "_Dispatch[QueryableAttribute[Any]]", manager[key].dispatch + ) # noqa: E501 + + impl: AttributeImpl + + if impl_class: + # TODO: this appears to be the WriteOnlyAttributeImpl / + # DynamicAttributeImpl constructor which is hardcoded + impl = cast("Type[WriteOnlyAttributeImpl]", impl_class)( + class_, key, dispatch, **kw + ) + elif uselist: + impl = CollectionAttributeImpl( + class_, key, callable_, dispatch, typecallable=typecallable, **kw + ) + elif useobject: + impl = ScalarObjectAttributeImpl( + class_, key, callable_, dispatch, **kw + ) + else: + impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw) + + manager[key].impl = impl + + if backref: + backref_listeners(manager[key], backref, uselist) + + manager.post_configure_attribute(key) + return manager[key] + + +def register_descriptor( + class_: Type[Any], + key: str, + *, + comparator: interfaces.PropComparator[_T], + parententity: _InternalEntityType[Any], + doc: Optional[str] = None, +) -> InstrumentedAttribute[_T]: + manager = manager_of_class(class_) + + descriptor = InstrumentedAttribute( + class_, key, comparator=comparator, parententity=parententity + ) + + descriptor.__doc__ = doc # type: ignore + + manager.instrument_attribute(key, descriptor) + return descriptor + + +def unregister_attribute(class_: Type[Any], key: str) -> None: + manager_of_class(class_).uninstrument_attribute(key) + + +def init_collection(obj: object, key: str) -> CollectionAdapter: + """Initialize a collection attribute and return the collection adapter. + + This function is used to provide direct access to collection internals + for a previously unloaded attribute. e.g.:: + + collection_adapter = init_collection(someobject, "elements") + for elem in values: + collection_adapter.append_without_event(elem) + + For an easier way to do the above, see + :func:`~sqlalchemy.orm.attributes.set_committed_value`. + + :param obj: a mapped object + + :param key: string attribute name where the collection is located. + + """ + state = instance_state(obj) + dict_ = state.dict + return init_state_collection(state, dict_, key) + + +def init_state_collection( + state: InstanceState[Any], dict_: _InstanceDict, key: str +) -> CollectionAdapter: + """Initialize a collection attribute and return the collection adapter. + + Discards any existing collection which may be there. + + """ + attr = state.manager[key].impl + + if TYPE_CHECKING: + assert isinstance(attr, HasCollectionAdapter) + + old = dict_.pop(key, None) # discard old collection + if old is not None: + old_collection = old._sa_adapter + attr._dispose_previous_collection(state, old, old_collection, False) + + user_data = attr._default_value(state, dict_) + adapter: CollectionAdapter = attr.get_collection( + state, dict_, user_data, passive=PassiveFlag.PASSIVE_NO_FETCH + ) + adapter._reset_empty() + + return adapter + + +def set_committed_value(instance, key, value): + """Set the value of an attribute with no history events. + + Cancels any previous history present. The value should be + a scalar value for scalar-holding attributes, or + an iterable for any collection-holding attribute. + + This is the same underlying method used when a lazy loader + fires off and loads additional data from the database. + In particular, this method can be used by application code + which has loaded additional attributes or collections through + separate queries, which can then be attached to an instance + as though it were part of its original loaded state. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.set_committed_value(state, dict_, value) + + +def set_attribute( + instance: object, + key: str, + value: Any, + initiator: Optional[AttributeEventToken] = None, +) -> None: + """Set the value of an attribute, firing history events. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to establish attribute state as understood + by SQLAlchemy. + + :param instance: the object that will be modified + + :param key: string name of the attribute + + :param value: value to assign + + :param initiator: an instance of :class:`.Event` that would have + been propagated from a previous event listener. This argument + is used when the :func:`.set_attribute` function is being used within + an existing event listening function where an :class:`.Event` object + is being supplied; the object may be used to track the origin of the + chain of events. + + .. versionadded:: 1.2.3 + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.set(state, dict_, value, initiator) + + +def get_attribute(instance: object, key: str) -> Any: + """Get the value of an attribute, firing any callables required. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to make usage of attribute state as understood + by SQLAlchemy. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + return state.manager[key].impl.get(state, dict_) + + +def del_attribute(instance: object, key: str) -> None: + """Delete the value of an attribute, firing history events. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to establish attribute state as understood + by SQLAlchemy. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.delete(state, dict_) + + +def flag_modified(instance: object, key: str) -> None: + """Mark an attribute on an instance as 'modified'. + + This sets the 'modified' flag on the instance and + establishes an unconditional change event for the given attribute. + The attribute must have a value present, else an + :class:`.InvalidRequestError` is raised. + + To mark an object "dirty" without referring to any specific attribute + so that it is considered within a flush, use the + :func:`.attributes.flag_dirty` call. + + .. seealso:: + + :func:`.attributes.flag_dirty` + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + impl = state.manager[key].impl + impl.dispatch.modified(state, impl._modified_token) + state._modified_event(dict_, impl, NO_VALUE, is_userland=True) + + +def flag_dirty(instance: object) -> None: + """Mark an instance as 'dirty' without any specific attribute mentioned. + + This is a special operation that will allow the object to travel through + the flush process for interception by events such as + :meth:`.SessionEvents.before_flush`. Note that no SQL will be emitted in + the flush process for an object that has no changes, even if marked dirty + via this method. However, a :meth:`.SessionEvents.before_flush` handler + will be able to see the object in the :attr:`.Session.dirty` collection and + may establish changes on it, which will then be included in the SQL + emitted. + + .. versionadded:: 1.2 + + .. seealso:: + + :func:`.attributes.flag_modified` + + """ + + state, dict_ = instance_state(instance), instance_dict(instance) + state._modified_event(dict_, None, NO_VALUE, is_userland=True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0ba1029d14577a1bc25818008ff5ae4d4198b2 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/base.py @@ -0,0 +1,973 @@ +# orm/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Constants and rudimental functions used throughout the ORM. + +""" + +from __future__ import annotations + +from enum import Enum +import operator +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import no_type_check +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import exc +from ._typing import insp_is_mapper +from .. import exc as sa_exc +from .. import inspection +from .. import util +from ..sql import roles +from ..sql.elements import SQLColumnExpression +from ..sql.elements import SQLCoreOperations +from ..util import FastIntFlag +from ..util.langhelpers import TypingOnly +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _ExternalEntityType + from ._typing import _InternalEntityType + from .attributes import InstrumentedAttribute + from .dynamic import AppenderQuery + from .instrumentation import ClassManager + from .interfaces import PropComparator + from .mapper import Mapper + from .state import InstanceState + from .util import AliasedClass + from .writeonly import WriteOnlyCollection + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _InfoType + from ..sql.elements import ColumnElement + from ..sql.operators import OperatorType + +_T = TypeVar("_T", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + +_O = TypeVar("_O", bound=object) + + +class LoaderCallableStatus(Enum): + PASSIVE_NO_RESULT = 0 + """Symbol returned by a loader callable or other attribute/history + retrieval operation when a value could not be determined, based + on loader callable flags. + """ + + PASSIVE_CLASS_MISMATCH = 1 + """Symbol indicating that an object is locally present for a given + primary key identity but it is not of the requested class. The + return value is therefore None and no SQL should be emitted.""" + + ATTR_WAS_SET = 2 + """Symbol returned by a loader callable to indicate the + retrieved value, or values, were assigned to their attributes + on the target object. + """ + + ATTR_EMPTY = 3 + """Symbol used internally to indicate an attribute had no callable.""" + + NO_VALUE = 4 + """Symbol which may be placed as the 'previous' value of an attribute, + indicating no value was loaded for an attribute when it was modified, + and flags indicated we were not to load it. + """ + + NEVER_SET = NO_VALUE + """ + Synonymous with NO_VALUE + + .. versionchanged:: 1.4 NEVER_SET was merged with NO_VALUE + + """ + + +( + PASSIVE_NO_RESULT, + PASSIVE_CLASS_MISMATCH, + ATTR_WAS_SET, + ATTR_EMPTY, + NO_VALUE, +) = tuple(LoaderCallableStatus) + +NEVER_SET = NO_VALUE + + +class PassiveFlag(FastIntFlag): + """Bitflag interface that passes options onto loader callables""" + + NO_CHANGE = 0 + """No callables or SQL should be emitted on attribute access + and no state should change + """ + + CALLABLES_OK = 1 + """Loader callables can be fired off if a value + is not present. + """ + + SQL_OK = 2 + """Loader callables can emit SQL at least on scalar value attributes.""" + + RELATED_OBJECT_OK = 4 + """Callables can use SQL to load related objects as well + as scalar value attributes. + """ + + INIT_OK = 8 + """Attributes should be initialized with a blank + value (None or an empty collection) upon get, if no other + value can be obtained. + """ + + NON_PERSISTENT_OK = 16 + """Callables can be emitted if the parent is not persistent.""" + + LOAD_AGAINST_COMMITTED = 32 + """Callables should use committed values as primary/foreign keys during a + load. + """ + + NO_AUTOFLUSH = 64 + """Loader callables should disable autoflush.""" + + NO_RAISE = 128 + """Loader callables should not raise any assertions""" + + DEFERRED_HISTORY_LOAD = 256 + """indicates special load of the previous value of an attribute""" + + INCLUDE_PENDING_MUTATIONS = 512 + + # pre-packaged sets of flags used as inputs + PASSIVE_OFF = ( + RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK + ) + "Callables can be emitted in all cases." + + PASSIVE_RETURN_NO_VALUE = PASSIVE_OFF ^ INIT_OK + """PASSIVE_OFF ^ INIT_OK""" + + PASSIVE_NO_INITIALIZE = PASSIVE_RETURN_NO_VALUE ^ CALLABLES_OK + "PASSIVE_RETURN_NO_VALUE ^ CALLABLES_OK" + + PASSIVE_NO_FETCH = PASSIVE_OFF ^ SQL_OK + "PASSIVE_OFF ^ SQL_OK" + + PASSIVE_NO_FETCH_RELATED = PASSIVE_OFF ^ RELATED_OBJECT_OK + "PASSIVE_OFF ^ RELATED_OBJECT_OK" + + PASSIVE_ONLY_PERSISTENT = PASSIVE_OFF ^ NON_PERSISTENT_OK + "PASSIVE_OFF ^ NON_PERSISTENT_OK" + + PASSIVE_MERGE = PASSIVE_OFF | NO_RAISE + """PASSIVE_OFF | NO_RAISE + + Symbol used specifically for session.merge() and similar cases + + """ + + +( + NO_CHANGE, + CALLABLES_OK, + SQL_OK, + RELATED_OBJECT_OK, + INIT_OK, + NON_PERSISTENT_OK, + LOAD_AGAINST_COMMITTED, + NO_AUTOFLUSH, + NO_RAISE, + DEFERRED_HISTORY_LOAD, + INCLUDE_PENDING_MUTATIONS, + PASSIVE_OFF, + PASSIVE_RETURN_NO_VALUE, + PASSIVE_NO_INITIALIZE, + PASSIVE_NO_FETCH, + PASSIVE_NO_FETCH_RELATED, + PASSIVE_ONLY_PERSISTENT, + PASSIVE_MERGE, +) = PassiveFlag.__members__.values() + +DEFAULT_MANAGER_ATTR = "_sa_class_manager" +DEFAULT_STATE_ATTR = "_sa_instance_state" + + +class EventConstants(Enum): + EXT_CONTINUE = 1 + EXT_STOP = 2 + EXT_SKIP = 3 + NO_KEY = 4 + """indicates an :class:`.AttributeEvent` event that did not have any + key argument. + + .. versionadded:: 2.0 + + """ + + +EXT_CONTINUE, EXT_STOP, EXT_SKIP, NO_KEY = tuple(EventConstants) + + +class RelationshipDirection(Enum): + """enumeration which indicates the 'direction' of a + :class:`_orm.RelationshipProperty`. + + :class:`.RelationshipDirection` is accessible from the + :attr:`_orm.Relationship.direction` attribute of + :class:`_orm.RelationshipProperty`. + + """ + + ONETOMANY = 1 + """Indicates the one-to-many direction for a :func:`_orm.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """ + + MANYTOONE = 2 + """Indicates the many-to-one direction for a :func:`_orm.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """ + + MANYTOMANY = 3 + """Indicates the many-to-many direction for a :func:`_orm.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """ + + +ONETOMANY, MANYTOONE, MANYTOMANY = tuple(RelationshipDirection) + + +class InspectionAttrExtensionType(Enum): + """Symbols indicating the type of extension that a + :class:`.InspectionAttr` is part of.""" + + +class NotExtension(InspectionAttrExtensionType): + NOT_EXTENSION = "not_extension" + """Symbol indicating an :class:`InspectionAttr` that's + not part of sqlalchemy.ext. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attribute. + + """ + + +_never_set = frozenset([NEVER_SET]) + +_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT]) + +_none_only_set = frozenset([None]) + +_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED") + +_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE") + +_RAISE_FOR_STATE = util.symbol("RAISE_FOR_STATE") + + +_F = TypeVar("_F", bound=Callable[..., Any]) +_Self = TypeVar("_Self") + + +def _assertions( + *assertions: Any, +) -> Callable[[_F], _F]: + @util.decorator + def generate(fn: _F, self: _Self, *args: Any, **kw: Any) -> _Self: + for assertion in assertions: + assertion(self, fn.__name__) + fn(self, *args, **kw) + return self + + return generate + + +if TYPE_CHECKING: + + def manager_of_class(cls: Type[_O]) -> ClassManager[_O]: ... + + @overload + def opt_manager_of_class(cls: AliasedClass[Any]) -> None: ... + + @overload + def opt_manager_of_class( + cls: _ExternalEntityType[_O], + ) -> Optional[ClassManager[_O]]: ... + + def opt_manager_of_class( + cls: _ExternalEntityType[_O], + ) -> Optional[ClassManager[_O]]: ... + + def instance_state(instance: _O) -> InstanceState[_O]: ... + + def instance_dict(instance: object) -> Dict[str, Any]: ... + +else: + # these can be replaced by sqlalchemy.ext.instrumentation + # if augmented class instrumentation is enabled. + + def manager_of_class(cls): + try: + return cls.__dict__[DEFAULT_MANAGER_ATTR] + except KeyError as ke: + raise exc.UnmappedClassError( + cls, f"Can't locate an instrumentation manager for class {cls}" + ) from ke + + def opt_manager_of_class(cls): + return cls.__dict__.get(DEFAULT_MANAGER_ATTR) + + instance_state = operator.attrgetter(DEFAULT_STATE_ATTR) + + instance_dict = operator.attrgetter("__dict__") + + +def instance_str(instance: object) -> str: + """Return a string describing an instance.""" + + return state_str(instance_state(instance)) + + +def state_str(state: InstanceState[Any]) -> str: + """Return a string describing an instance via its InstanceState.""" + + if state is None: + return "None" + else: + return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj())) + + +def state_class_str(state: InstanceState[Any]) -> str: + """Return a string describing an instance's class via its + InstanceState. + """ + + if state is None: + return "None" + else: + return "<%s>" % (state.class_.__name__,) + + +def attribute_str(instance: object, attribute: str) -> str: + return instance_str(instance) + "." + attribute + + +def state_attribute_str(state: InstanceState[Any], attribute: str) -> str: + return state_str(state) + "." + attribute + + +def object_mapper(instance: _T) -> Mapper[_T]: + """Given an object, return the primary Mapper associated with the object + instance. + + Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` + if no mapping is configured. + + This function is available via the inspection system as:: + + inspect(instance).mapper + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is + not part of a mapping. + + """ + return object_state(instance).mapper + + +def object_state(instance: _T) -> InstanceState[_T]: + """Given an object, return the :class:`.InstanceState` + associated with the object. + + Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` + if no mapping is configured. + + Equivalent functionality is available via the :func:`_sa.inspect` + function as:: + + inspect(instance) + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is + not part of a mapping. + + """ + state = _inspect_mapped_object(instance) + if state is None: + raise exc.UnmappedInstanceError(instance) + else: + return state + + +@inspection._inspects(object) +def _inspect_mapped_object(instance: _T) -> Optional[InstanceState[_T]]: + try: + return instance_state(instance) + except (exc.UnmappedClassError,) + exc.NO_STATE: + return None + + +def _class_to_mapper( + class_or_mapper: Union[Mapper[_T], Type[_T]] +) -> Mapper[_T]: + # can't get mypy to see an overload for this + insp = inspection.inspect(class_or_mapper, False) + if insp is not None: + return insp.mapper # type: ignore + else: + assert isinstance(class_or_mapper, type) + raise exc.UnmappedClassError(class_or_mapper) + + +def _mapper_or_none( + entity: Union[Type[_T], _InternalEntityType[_T]] +) -> Optional[Mapper[_T]]: + """Return the :class:`_orm.Mapper` for the given class or None if the + class is not mapped. + """ + + # can't get mypy to see an overload for this + insp = inspection.inspect(entity, False) + if insp is not None: + return insp.mapper # type: ignore + else: + return None + + +def _is_mapped_class(entity: Any) -> bool: + """Return True if the given object is a mapped class, + :class:`_orm.Mapper`, or :class:`.AliasedClass`. + """ + + insp = inspection.inspect(entity, False) + return ( + insp is not None + and not insp.is_clause_element + and (insp.is_mapper or insp.is_aliased_class) + ) + + +def _is_aliased_class(entity: Any) -> bool: + insp = inspection.inspect(entity, False) + return insp is not None and getattr(insp, "is_aliased_class", False) + + +@no_type_check +def _entity_descriptor(entity: _EntityType[Any], key: str) -> Any: + """Return a class attribute given an entity and string name. + + May return :class:`.InstrumentedAttribute` or user-defined + attribute. + + """ + insp = inspection.inspect(entity) + if insp.is_selectable: + description = entity + entity = insp.c + elif insp.is_aliased_class: + entity = insp.entity + description = entity + elif hasattr(insp, "mapper"): + description = entity = insp.mapper.class_ + else: + description = entity + + try: + return getattr(entity, key) + except AttributeError as err: + raise sa_exc.InvalidRequestError( + "Entity '%s' has no property '%s'" % (description, key) + ) from err + + +if TYPE_CHECKING: + + def _state_mapper(state: InstanceState[_O]) -> Mapper[_O]: ... + +else: + _state_mapper = util.dottedgetter("manager.mapper") + + +def _inspect_mapped_class( + class_: Type[_O], configure: bool = False +) -> Optional[Mapper[_O]]: + try: + class_manager = opt_manager_of_class(class_) + if class_manager is None or not class_manager.is_mapped: + return None + mapper = class_manager.mapper + except exc.NO_STATE: + return None + else: + if configure: + mapper._check_configure() + return mapper + + +def _parse_mapper_argument(arg: Union[Mapper[_O], Type[_O]]) -> Mapper[_O]: + insp = inspection.inspect(arg, raiseerr=False) + if insp_is_mapper(insp): + return insp + + raise sa_exc.ArgumentError(f"Mapper or mapped class expected, got {arg!r}") + + +def class_mapper(class_: Type[_O], configure: bool = True) -> Mapper[_O]: + """Given a class, return the primary :class:`_orm.Mapper` associated + with the key. + + Raises :exc:`.UnmappedClassError` if no mapping is configured + on the given class, or :exc:`.ArgumentError` if a non-class + object is passed. + + Equivalent functionality is available via the :func:`_sa.inspect` + function as:: + + inspect(some_mapped_class) + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped. + + """ + mapper = _inspect_mapped_class(class_, configure=configure) + if mapper is None: + if not isinstance(class_, type): + raise sa_exc.ArgumentError( + "Class object expected, got '%r'." % (class_,) + ) + raise exc.UnmappedClassError(class_) + else: + return mapper + + +class InspectionAttr: + """A base class applied to all ORM objects and attributes that are + related to things that can be returned by the :func:`_sa.inspect` function. + + The attributes defined here allow the usage of simple boolean + checks to test basic facts about the object returned. + + While the boolean checks here are basically the same as using + the Python isinstance() function, the flags here can be used without + the need to import all of these classes, and also such that + the SQLAlchemy class system can change while leaving the flags + here intact for forwards-compatibility. + + """ + + __slots__: Tuple[str, ...] = () + + is_selectable = False + """Return True if this object is an instance of + :class:`_expression.Selectable`.""" + + is_aliased_class = False + """True if this object is an instance of :class:`.AliasedClass`.""" + + is_instance = False + """True if this object is an instance of :class:`.InstanceState`.""" + + is_mapper = False + """True if this object is an instance of :class:`_orm.Mapper`.""" + + is_bundle = False + """True if this object is an instance of :class:`.Bundle`.""" + + is_property = False + """True if this object is an instance of :class:`.MapperProperty`.""" + + is_attribute = False + """True if this object is a Python :term:`descriptor`. + + This can refer to one of many types. Usually a + :class:`.QueryableAttribute` which handles attributes events on behalf + of a :class:`.MapperProperty`. But can also be an extension type + such as :class:`.AssociationProxy` or :class:`.hybrid_property`. + The :attr:`.InspectionAttr.extension_type` will refer to a constant + identifying the specific subtype. + + .. seealso:: + + :attr:`_orm.Mapper.all_orm_descriptors` + + """ + + _is_internal_proxy = False + """True if this object is an internal proxy object. + + .. versionadded:: 1.2.12 + + """ + + is_clause_element = False + """True if this object is an instance of + :class:`_expression.ClauseElement`.""" + + extension_type: InspectionAttrExtensionType = NotExtension.NOT_EXTENSION + """The extension type, if any. + Defaults to :attr:`.interfaces.NotExtension.NOT_EXTENSION` + + .. seealso:: + + :class:`.HybridExtensionType` + + :class:`.AssociationProxyExtensionType` + + """ + + +class InspectionAttrInfo(InspectionAttr): + """Adds the ``.info`` attribute to :class:`.InspectionAttr`. + + The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo` + is that the former is compatible as a mixin for classes that specify + ``__slots__``; this is essentially an implementation artifact. + + """ + + __slots__ = () + + @util.ro_memoized_property + def info(self) -> _InfoType: + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`_orm.relationship`, or + :func:`.composite` + functions. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} + + +class SQLORMOperations(SQLCoreOperations[_T_co], TypingOnly): + __slots__ = () + + if typing.TYPE_CHECKING: + + def of_type( + self, class_: _EntityType[Any] + ) -> PropComparator[_T_co]: ... + + def and_( + self, *criteria: _ColumnExpressionArgument[bool] + ) -> PropComparator[bool]: ... + + def any( # noqa: A001 + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: ... + + def has( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: ... + + +class ORMDescriptor(Generic[_T_co], TypingOnly): + """Represent any Python descriptor that provides a SQL expression + construct at the class level.""" + + __slots__ = () + + if typing.TYPE_CHECKING: + + @overload + def __get__( + self, instance: Any, owner: Literal[None] + ) -> ORMDescriptor[_T_co]: ... + + @overload + def __get__( + self, instance: Literal[None], owner: Any + ) -> SQLCoreOperations[_T_co]: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _T_co: ... + + def __get__( + self, instance: object, owner: Any + ) -> Union[ORMDescriptor[_T_co], SQLCoreOperations[_T_co], _T_co]: ... + + +class _MappedAnnotationBase(Generic[_T_co], TypingOnly): + """common class for Mapped and similar ORM container classes. + + these are classes that can appear on the left side of an ORM declarative + mapping, containing a mapped class or in some cases a collection + surrounding a mapped class. + + """ + + __slots__ = () + + +class SQLORMExpression( + SQLORMOperations[_T_co], SQLColumnExpression[_T_co], TypingOnly +): + """A type that may be used to indicate any ORM-level attribute or + object that acts in place of one, in the context of SQL expression + construction. + + :class:`.SQLORMExpression` extends from the Core + :class:`.SQLColumnExpression` to add additional SQL methods that are ORM + specific, such as :meth:`.PropComparator.of_type`, and is part of the bases + for :class:`.InstrumentedAttribute`. It may be used in :pep:`484` typing to + indicate arguments or return values that should behave as ORM-level + attribute expressions. + + .. versionadded:: 2.0.0b4 + + + """ + + __slots__ = () + + +class Mapped( + SQLORMExpression[_T_co], + ORMDescriptor[_T_co], + _MappedAnnotationBase[_T_co], + roles.DDLConstraintColumnRole, +): + """Represent an ORM mapped attribute on a mapped class. + + This class represents the complete descriptor interface for any class + attribute that will have been :term:`instrumented` by the ORM + :class:`_orm.Mapper` class. Provides appropriate information to type + checkers such as pylance and mypy so that ORM-mapped attributes + are correctly typed. + + The most prominent use of :class:`_orm.Mapped` is in + the :ref:`Declarative Mapping ` form + of :class:`_orm.Mapper` configuration, where used explicitly it drives + the configuration of ORM attributes such as :func:`_orm.mapped_class` + and :func:`_orm.relationship`. + + .. seealso:: + + :ref:`orm_explicit_declarative_base` + + :ref:`orm_declarative_table` + + .. tip:: + + The :class:`_orm.Mapped` class represents attributes that are handled + directly by the :class:`_orm.Mapper` class. It does not include other + Python descriptor classes that are provided as extensions, including + :ref:`hybrids_toplevel` and the :ref:`associationproxy_toplevel`. + While these systems still make use of ORM-specific superclasses + and structures, they are not :term:`instrumented` by the + :class:`_orm.Mapper` and instead provide their own functionality + when they are accessed on a class. + + .. versionadded:: 1.4 + + + """ + + __slots__ = () + + if typing.TYPE_CHECKING: + + @overload + def __get__( + self, instance: None, owner: Any + ) -> InstrumentedAttribute[_T_co]: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _T_co: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[InstrumentedAttribute[_T_co], _T_co]: ... + + @classmethod + def _empty_constructor(cls, arg1: Any) -> Mapped[_T_co]: ... + + def __set__( + self, instance: Any, value: Union[SQLCoreOperations[_T_co], _T_co] + ) -> None: ... + + def __delete__(self, instance: Any) -> None: ... + + +class _MappedAttribute(Generic[_T_co], TypingOnly): + """Mixin for attributes which should be replaced by mapper-assigned + attributes. + + """ + + __slots__ = () + + +class _DeclarativeMapped(Mapped[_T_co], _MappedAttribute[_T_co]): + """Mixin for :class:`.MapperProperty` subclasses that allows them to + be compatible with ORM-annotated declarative mappings. + + """ + + __slots__ = () + + # MappedSQLExpression, Relationship, Composite etc. dont actually do + # SQL expression behavior. yet there is code that compares them with + # __eq__(), __ne__(), etc. Since #8847 made Mapped even more full + # featured including ColumnOperators, we need to have those methods + # be no-ops for these objects, so return NotImplemented to fall back + # to normal comparison behavior. + def operate(self, op: OperatorType, *other: Any, **kwargs: Any) -> Any: + return NotImplemented + + __sa_operate__ = operate + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> Any: + return NotImplemented + + +class DynamicMapped(_MappedAnnotationBase[_T_co]): + """Represent the ORM mapped attribute type for a "dynamic" relationship. + + The :class:`_orm.DynamicMapped` type annotation may be used in an + :ref:`Annotated Declarative Table ` mapping + to indicate that the ``lazy="dynamic"`` loader strategy should be used + for a particular :func:`_orm.relationship`. + + .. legacy:: The "dynamic" lazy loader strategy is the legacy form of what + is now the "write_only" strategy described in the section + :ref:`write_only_relationship`. + + E.g.:: + + class User(Base): + __tablename__ = "user" + id: Mapped[int] = mapped_column(primary_key=True) + addresses: DynamicMapped[Address] = relationship( + cascade="all,delete-orphan" + ) + + See the section :ref:`dynamic_relationship` for background. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`dynamic_relationship` - complete background + + :class:`.WriteOnlyMapped` - fully 2.0 style version + + """ + + __slots__ = () + + if TYPE_CHECKING: + + @overload + def __get__( + self, instance: None, owner: Any + ) -> InstrumentedAttribute[_T_co]: ... + + @overload + def __get__( + self, instance: object, owner: Any + ) -> AppenderQuery[_T_co]: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[InstrumentedAttribute[_T_co], AppenderQuery[_T_co]]: ... + + def __set__( + self, instance: Any, value: typing.Collection[_T_co] + ) -> None: ... + + +class WriteOnlyMapped(_MappedAnnotationBase[_T_co]): + """Represent the ORM mapped attribute type for a "write only" relationship. + + The :class:`_orm.WriteOnlyMapped` type annotation may be used in an + :ref:`Annotated Declarative Table ` mapping + to indicate that the ``lazy="write_only"`` loader strategy should be used + for a particular :func:`_orm.relationship`. + + E.g.:: + + class User(Base): + __tablename__ = "user" + id: Mapped[int] = mapped_column(primary_key=True) + addresses: WriteOnlyMapped[Address] = relationship( + cascade="all,delete-orphan" + ) + + See the section :ref:`write_only_relationship` for background. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`write_only_relationship` - complete background + + :class:`.DynamicMapped` - includes legacy :class:`_orm.Query` support + + """ + + __slots__ = () + + if TYPE_CHECKING: + + @overload + def __get__( + self, instance: None, owner: Any + ) -> InstrumentedAttribute[_T_co]: ... + + @overload + def __get__( + self, instance: object, owner: Any + ) -> WriteOnlyCollection[_T_co]: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[ + InstrumentedAttribute[_T_co], WriteOnlyCollection[_T_co] + ]: ... + + def __set__( + self, instance: Any, value: typing.Collection[_T_co] + ) -> None: ... diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/bulk_persistence.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/bulk_persistence.py new file mode 100644 index 0000000000000000000000000000000000000000..402d7bede6dc9c763adb6b0a19e48e54f44c7896 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/bulk_persistence.py @@ -0,0 +1,2123 @@ +# orm/bulk_persistence.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""additional ORM persistence classes related to "bulk" operations, +specifically outside of the flush() process. + +""" + +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Optional +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import context +from . import evaluator +from . import exc as orm_exc +from . import loading +from . import persistence +from .base import NO_VALUE +from .context import AbstractORMCompileState +from .context import FromStatement +from .context import ORMFromStatementCompileState +from .context import QueryContext +from .. import exc as sa_exc +from .. import util +from ..engine import Dialect +from ..engine import result as _result +from ..sql import coercions +from ..sql import dml +from ..sql import expression +from ..sql import roles +from ..sql import select +from ..sql import sqltypes +from ..sql.base import _entity_namespace_key +from ..sql.base import CompileState +from ..sql.base import Options +from ..sql.dml import DeleteDMLState +from ..sql.dml import InsertDMLState +from ..sql.dml import UpdateDMLState +from ..util import EMPTY_DICT +from ..util.typing import Literal + +if TYPE_CHECKING: + from ._typing import DMLStrategyArgument + from ._typing import OrmExecuteOptionsParameter + from ._typing import SynchronizeSessionArgument + from .mapper import Mapper + from .session import _BindArguments + from .session import ORMExecuteState + from .session import Session + from .session import SessionTransaction + from .state import InstanceState + from ..engine import Connection + from ..engine import cursor + from ..engine.interfaces import _CoreAnyExecuteParams + +_O = TypeVar("_O", bound=object) + + +@overload +def _bulk_insert( + mapper: Mapper[_O], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + return_defaults: bool, + render_nulls: bool, + use_orm_insert_stmt: Literal[None] = ..., + execution_options: Optional[OrmExecuteOptionsParameter] = ..., +) -> None: ... + + +@overload +def _bulk_insert( + mapper: Mapper[_O], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + return_defaults: bool, + render_nulls: bool, + use_orm_insert_stmt: Optional[dml.Insert] = ..., + execution_options: Optional[OrmExecuteOptionsParameter] = ..., +) -> cursor.CursorResult[Any]: ... + + +def _bulk_insert( + mapper: Mapper[_O], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + return_defaults: bool, + render_nulls: bool, + use_orm_insert_stmt: Optional[dml.Insert] = None, + execution_options: Optional[OrmExecuteOptionsParameter] = None, +) -> Optional[cursor.CursorResult[Any]]: + base_mapper = mapper.base_mapper + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_insert()" + ) + + if isstates: + if TYPE_CHECKING: + mappings = cast(Iterable[InstanceState[_O]], mappings) + + if return_defaults: + # list of states allows us to attach .key for return_defaults case + states = [(state, state.dict) for state in mappings] + mappings = [dict_ for (state, dict_) in states] + else: + mappings = [state.dict for state in mappings] + else: + if TYPE_CHECKING: + mappings = cast(Iterable[Dict[str, Any]], mappings) + + if return_defaults: + # use dictionaries given, so that newly populated defaults + # can be delivered back to the caller (see #11661). This is **not** + # compatible with other use cases such as a session-executed + # insert() construct, as this will confuse the case of + # insert-per-subclass for joined inheritance cases (see + # test_bulk_statements.py::BulkDMLReturningJoinedInhTest). + # + # So in this conditional, we have **only** called + # session.bulk_insert_mappings() which does not have this + # requirement + mappings = list(mappings) + else: + # for all other cases we need to establish a local dictionary + # so that the incoming dictionaries aren't mutated + mappings = [dict(m) for m in mappings] + _expand_composites(mapper, mappings) + + connection = session_transaction.connection(base_mapper) + + return_result: Optional[cursor.CursorResult[Any]] = None + + mappers_to_run = [ + (table, mp) + for table, mp in base_mapper._sorted_tables.items() + if table in mapper._pks_by_table + ] + + if return_defaults: + # not used by new-style bulk inserts, only used for legacy + bookkeeping = True + elif len(mappers_to_run) > 1: + # if we have more than one table, mapper to run where we will be + # either horizontally splicing, or copying values between tables, + # we need the "bookkeeping" / deterministic returning order + bookkeeping = True + else: + bookkeeping = False + + for table, super_mapper in mappers_to_run: + # find bindparams in the statement. For bulk, we don't really know if + # a key in the params applies to a different table since we are + # potentially inserting for multiple tables here; looking at the + # bindparam() is a lot more direct. in most cases this will + # use _generate_cache_key() which is memoized, although in practice + # the ultimate statement that's executed is probably not the same + # object so that memoization might not matter much. + extra_bp_names = ( + [ + b.key + for b in use_orm_insert_stmt._get_embedded_bindparams() + if b.key in mappings[0] + ] + if use_orm_insert_stmt is not None + else () + ) + + records = ( + ( + None, + state_dict, + params, + mapper, + connection, + value_params, + has_all_pks, + has_all_defaults, + ) + for ( + state, + state_dict, + params, + mp, + conn, + value_params, + has_all_pks, + has_all_defaults, + ) in persistence._collect_insert_commands( + table, + ((None, mapping, mapper, connection) for mapping in mappings), + bulk=True, + return_defaults=bookkeeping, + render_nulls=render_nulls, + include_bulk_keys=extra_bp_names, + ) + ) + + result = persistence._emit_insert_statements( + base_mapper, + None, + super_mapper, + table, + records, + bookkeeping=bookkeeping, + use_orm_insert_stmt=use_orm_insert_stmt, + execution_options=execution_options, + ) + if use_orm_insert_stmt is not None: + if not use_orm_insert_stmt._returning or return_result is None: + return_result = result + elif result.returns_rows: + assert bookkeeping + return_result = return_result.splice_horizontally(result) + + if return_defaults and isstates: + identity_cls = mapper._identity_class + identity_props = [p.key for p in mapper._identity_key_props] + for state, dict_ in states: + state.key = ( + identity_cls, + tuple([dict_[key] for key in identity_props]), + None, + ) + + if use_orm_insert_stmt is not None: + assert return_result is not None + return return_result + + +@overload +def _bulk_update( + mapper: Mapper[Any], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + update_changed_only: bool, + use_orm_update_stmt: Literal[None] = ..., + enable_check_rowcount: bool = True, +) -> None: ... + + +@overload +def _bulk_update( + mapper: Mapper[Any], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + update_changed_only: bool, + use_orm_update_stmt: Optional[dml.Update] = ..., + enable_check_rowcount: bool = True, +) -> _result.Result[Any]: ... + + +def _bulk_update( + mapper: Mapper[Any], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + session_transaction: SessionTransaction, + *, + isstates: bool, + update_changed_only: bool, + use_orm_update_stmt: Optional[dml.Update] = None, + enable_check_rowcount: bool = True, +) -> Optional[_result.Result[Any]]: + base_mapper = mapper.base_mapper + + search_keys = mapper._primary_key_propkeys + if mapper._version_id_prop: + search_keys = {mapper._version_id_prop.key}.union(search_keys) + + def _changed_dict(mapper, state): + return { + k: v + for k, v in state.dict.items() + if k in state.committed_state or k in search_keys + } + + if isstates: + if update_changed_only: + mappings = [_changed_dict(mapper, state) for state in mappings] + else: + mappings = [state.dict for state in mappings] + else: + mappings = [dict(m) for m in mappings] + _expand_composites(mapper, mappings) + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_update()" + ) + + connection = session_transaction.connection(base_mapper) + + # find bindparams in the statement. see _bulk_insert for similar + # notes for the insert case + extra_bp_names = ( + [ + b.key + for b in use_orm_update_stmt._get_embedded_bindparams() + if b.key in mappings[0] + ] + if use_orm_update_stmt is not None + else () + ) + + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper) or table not in mapper._pks_by_table: + continue + + records = persistence._collect_update_commands( + None, + table, + ( + ( + None, + mapping, + mapper, + connection, + ( + mapping[mapper._version_id_prop.key] + if mapper._version_id_prop + else None + ), + ) + for mapping in mappings + ), + bulk=True, + use_orm_update_stmt=use_orm_update_stmt, + include_bulk_keys=extra_bp_names, + ) + persistence._emit_update_statements( + base_mapper, + None, + super_mapper, + table, + records, + bookkeeping=False, + use_orm_update_stmt=use_orm_update_stmt, + enable_check_rowcount=enable_check_rowcount, + ) + + if use_orm_update_stmt is not None: + return _result.null_result() + + +def _expand_composites(mapper, mappings): + composite_attrs = mapper.composites + if not composite_attrs: + return + + composite_keys = set(composite_attrs.keys()) + populators = { + key: composite_attrs[key]._populate_composite_bulk_save_mappings_fn() + for key in composite_keys + } + for mapping in mappings: + for key in composite_keys.intersection(mapping): + populators[key](mapping) + + +class ORMDMLState(AbstractORMCompileState): + is_dml_returning = True + from_statement_ctx: Optional[ORMFromStatementCompileState] = None + + @classmethod + def _get_orm_crud_kv_pairs( + cls, mapper, statement, kv_iterator, needs_to_be_cacheable + ): + core_get_crud_kv_pairs = UpdateDMLState._get_crud_kv_pairs + + for k, v in kv_iterator: + k = coercions.expect(roles.DMLColumnRole, k) + + if isinstance(k, str): + desc = _entity_namespace_key(mapper, k, default=NO_VALUE) + if desc is NO_VALUE: + yield ( + coercions.expect(roles.DMLColumnRole, k), + ( + coercions.expect( + roles.ExpressionElementRole, + v, + type_=sqltypes.NullType(), + is_crud=True, + ) + if needs_to_be_cacheable + else v + ), + ) + else: + yield from core_get_crud_kv_pairs( + statement, + desc._bulk_update_tuples(v), + needs_to_be_cacheable, + ) + elif "entity_namespace" in k._annotations: + k_anno = k._annotations + attr = _entity_namespace_key( + k_anno["entity_namespace"], k_anno["proxy_key"] + ) + yield from core_get_crud_kv_pairs( + statement, + attr._bulk_update_tuples(v), + needs_to_be_cacheable, + ) + else: + yield ( + k, + ( + v + if not needs_to_be_cacheable + else coercions.expect( + roles.ExpressionElementRole, + v, + type_=sqltypes.NullType(), + is_crud=True, + ) + ), + ) + + @classmethod + def _get_multi_crud_kv_pairs(cls, statement, kv_iterator): + plugin_subject = statement._propagate_attrs["plugin_subject"] + + if not plugin_subject or not plugin_subject.mapper: + return UpdateDMLState._get_multi_crud_kv_pairs( + statement, kv_iterator + ) + + return [ + dict( + cls._get_orm_crud_kv_pairs( + plugin_subject.mapper, statement, value_dict.items(), False + ) + ) + for value_dict in kv_iterator + ] + + @classmethod + def _get_crud_kv_pairs(cls, statement, kv_iterator, needs_to_be_cacheable): + assert ( + needs_to_be_cacheable + ), "no test coverage for needs_to_be_cacheable=False" + + plugin_subject = statement._propagate_attrs["plugin_subject"] + + if not plugin_subject or not plugin_subject.mapper: + return UpdateDMLState._get_crud_kv_pairs( + statement, kv_iterator, needs_to_be_cacheable + ) + + return list( + cls._get_orm_crud_kv_pairs( + plugin_subject.mapper, + statement, + kv_iterator, + needs_to_be_cacheable, + ) + ) + + @classmethod + def get_entity_description(cls, statement): + ext_info = statement.table._annotations["parententity"] + mapper = ext_info.mapper + if ext_info.is_aliased_class: + _label_name = ext_info.name + else: + _label_name = mapper.class_.__name__ + + return { + "name": _label_name, + "type": mapper.class_, + "expr": ext_info.entity, + "entity": ext_info.entity, + "table": mapper.local_table, + } + + @classmethod + def get_returning_column_descriptions(cls, statement): + def _ent_for_col(c): + return c._annotations.get("parententity", None) + + def _attr_for_col(c, ent): + if ent is None: + return c + proxy_key = c._annotations.get("proxy_key", None) + if not proxy_key: + return c + else: + return getattr(ent.entity, proxy_key, c) + + return [ + { + "name": c.key, + "type": c.type, + "expr": _attr_for_col(c, ent), + "aliased": ent.is_aliased_class, + "entity": ent.entity, + } + for c, ent in [ + (c, _ent_for_col(c)) for c in statement._all_selected_columns + ] + ] + + def _setup_orm_returning( + self, + compiler, + orm_level_statement, + dml_level_statement, + dml_mapper, + *, + use_supplemental_cols=True, + ): + """establish ORM column handlers for an INSERT, UPDATE, or DELETE + which uses explicit returning(). + + called within compilation level create_for_statement. + + The _return_orm_returning() method then receives the Result + after the statement was executed, and applies ORM loading to the + state that we first established here. + + """ + + if orm_level_statement._returning: + fs = FromStatement( + orm_level_statement._returning, + dml_level_statement, + _adapt_on_names=False, + ) + fs = fs.execution_options(**orm_level_statement._execution_options) + fs = fs.options(*orm_level_statement._with_options) + self.select_statement = fs + self.from_statement_ctx = fsc = ( + ORMFromStatementCompileState.create_for_statement(fs, compiler) + ) + fsc.setup_dml_returning_compile_state(dml_mapper) + + dml_level_statement = dml_level_statement._generate() + dml_level_statement._returning = () + + cols_to_return = [c for c in fsc.primary_columns if c is not None] + + # since we are splicing result sets together, make sure there + # are columns of some kind returned in each result set + if not cols_to_return: + cols_to_return.extend(dml_mapper.primary_key) + + if use_supplemental_cols: + dml_level_statement = dml_level_statement.return_defaults( + # this is a little weird looking, but by passing + # primary key as the main list of cols, this tells + # return_defaults to omit server-default cols (and + # actually all cols, due to some weird thing we should + # clean up in crud.py). + # Since we have cols_to_return, just return what we asked + # for (plus primary key, which ORM persistence needs since + # we likely set bookkeeping=True here, which is another + # whole thing...). We dont want to clutter the + # statement up with lots of other cols the user didn't + # ask for. see #9685 + *dml_mapper.primary_key, + supplemental_cols=cols_to_return, + ) + else: + dml_level_statement = dml_level_statement.returning( + *cols_to_return + ) + + return dml_level_statement + + @classmethod + def _return_orm_returning( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + execution_context = result.context + compile_state = execution_context.compiled.compile_state + + if ( + compile_state.from_statement_ctx + and not compile_state.from_statement_ctx.compile_options._is_star + ): + load_options = execution_options.get( + "_sa_orm_load_options", QueryContext.default_load_options + ) + + querycontext = QueryContext( + compile_state.from_statement_ctx, + compile_state.select_statement, + statement, + params, + session, + load_options, + execution_options, + bind_arguments, + ) + return loading.instances(result, querycontext) + else: + return result + + +class BulkUDCompileState(ORMDMLState): + class default_update_options(Options): + _dml_strategy: DMLStrategyArgument = "auto" + _synchronize_session: SynchronizeSessionArgument = "auto" + _can_use_returning: bool = False + _is_delete_using: bool = False + _is_update_from: bool = False + _autoflush: bool = True + _subject_mapper: Optional[Mapper[Any]] = None + _resolved_values = EMPTY_DICT + _eval_condition = None + _matched_rows = None + _identity_token = None + _populate_existing: bool = False + + @classmethod + def can_use_returning( + cls, + dialect: Dialect, + mapper: Mapper[Any], + *, + is_multitable: bool = False, + is_update_from: bool = False, + is_delete_using: bool = False, + is_executemany: bool = False, + ) -> bool: + raise NotImplementedError() + + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_pre_event, + ): + ( + update_options, + execution_options, + ) = BulkUDCompileState.default_update_options.from_execution_options( + "_sa_orm_update_options", + { + "synchronize_session", + "autoflush", + "populate_existing", + "identity_token", + "is_delete_using", + "is_update_from", + "dml_strategy", + }, + execution_options, + statement._execution_options, + ) + bind_arguments["clause"] = statement + try: + plugin_subject = statement._propagate_attrs["plugin_subject"] + except KeyError: + assert False, "statement had 'orm' plugin but no plugin_subject" + else: + if plugin_subject: + bind_arguments["mapper"] = plugin_subject.mapper + update_options += {"_subject_mapper": plugin_subject.mapper} + + if "parententity" not in statement.table._annotations: + update_options += {"_dml_strategy": "core_only"} + elif not isinstance(params, list): + if update_options._dml_strategy == "auto": + update_options += {"_dml_strategy": "orm"} + elif update_options._dml_strategy == "bulk": + raise sa_exc.InvalidRequestError( + 'Can\'t use "bulk" ORM insert strategy without ' + "passing separate parameters" + ) + else: + if update_options._dml_strategy == "auto": + update_options += {"_dml_strategy": "bulk"} + + sync = update_options._synchronize_session + if sync is not None: + if sync not in ("auto", "evaluate", "fetch", False): + raise sa_exc.ArgumentError( + "Valid strategies for session synchronization " + "are 'auto', 'evaluate', 'fetch', False" + ) + if update_options._dml_strategy == "bulk" and sync == "fetch": + raise sa_exc.InvalidRequestError( + "The 'fetch' synchronization strategy is not available " + "for 'bulk' ORM updates (i.e. multiple parameter sets)" + ) + + if not is_pre_event: + if update_options._autoflush: + session._autoflush() + + if update_options._dml_strategy == "orm": + if update_options._synchronize_session == "auto": + update_options = cls._do_pre_synchronize_auto( + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ) + elif update_options._synchronize_session == "evaluate": + update_options = cls._do_pre_synchronize_evaluate( + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ) + elif update_options._synchronize_session == "fetch": + update_options = cls._do_pre_synchronize_fetch( + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ) + elif update_options._dml_strategy == "bulk": + if update_options._synchronize_session == "auto": + update_options += {"_synchronize_session": "evaluate"} + + # indicators from the "pre exec" step that are then + # added to the DML statement, which will also be part of the cache + # key. The compile level create_for_statement() method will then + # consume these at compiler time. + statement = statement._annotate( + { + "synchronize_session": update_options._synchronize_session, + "is_delete_using": update_options._is_delete_using, + "is_update_from": update_options._is_update_from, + "dml_strategy": update_options._dml_strategy, + "can_use_returning": update_options._can_use_returning, + } + ) + + return ( + statement, + util.immutabledict(execution_options).union( + {"_sa_orm_update_options": update_options} + ), + ) + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + # this stage of the execution is called after the + # do_orm_execute event hook. meaning for an extension like + # horizontal sharding, this step happens *within* the horizontal + # sharding event handler which calls session.execute() re-entrantly + # and will occur for each backend individually. + # the sharding extension then returns its own merged result from the + # individual ones we return here. + + update_options = execution_options["_sa_orm_update_options"] + if update_options._dml_strategy == "orm": + if update_options._synchronize_session == "evaluate": + cls._do_post_synchronize_evaluate( + session, statement, result, update_options + ) + elif update_options._synchronize_session == "fetch": + cls._do_post_synchronize_fetch( + session, statement, result, update_options + ) + elif update_options._dml_strategy == "bulk": + if update_options._synchronize_session == "evaluate": + cls._do_post_synchronize_bulk_evaluate( + session, params, result, update_options + ) + return result + + return cls._return_orm_returning( + session, + statement, + params, + execution_options, + bind_arguments, + result, + ) + + @classmethod + def _adjust_for_extra_criteria(cls, global_attributes, ext_info): + """Apply extra criteria filtering. + + For all distinct single-table-inheritance mappers represented in the + table being updated or deleted, produce additional WHERE criteria such + that only the appropriate subtypes are selected from the total results. + + Additionally, add WHERE criteria originating from LoaderCriteriaOptions + collected from the statement. + + """ + + return_crit = () + + adapter = ext_info._adapter if ext_info.is_aliased_class else None + + if ( + "additional_entity_criteria", + ext_info.mapper, + ) in global_attributes: + return_crit += tuple( + ae._resolve_where_criteria(ext_info) + for ae in global_attributes[ + ("additional_entity_criteria", ext_info.mapper) + ] + if ae.include_aliases or ae.entity is ext_info + ) + + if ext_info.mapper._single_table_criterion is not None: + return_crit += (ext_info.mapper._single_table_criterion,) + + if adapter: + return_crit = tuple(adapter.traverse(crit) for crit in return_crit) + + return return_crit + + @classmethod + def _interpret_returning_rows(cls, result, mapper, rows): + """return rows that indicate PK cols in mapper.primary_key position + for RETURNING rows. + + Prior to 2.0.36, this method seemed to be written for some kind of + inheritance scenario but the scenario was unused for actual joined + inheritance, and the function instead seemed to perform some kind of + partial translation that would remove non-PK cols if the PK cols + happened to be first in the row, but not otherwise. The joined + inheritance walk feature here seems to have never been used as it was + always skipped by the "local_table" check. + + As of 2.0.36 the function strips away non-PK cols and provides the + PK cols for the table in mapper PK order. + + """ + + try: + if mapper.local_table is not mapper.base_mapper.local_table: + # TODO: dive more into how a local table PK is used for fetch + # sync, not clear if this is correct as it depends on the + # downstream routine to fetch rows using + # local_table.primary_key order + pk_keys = result._tuple_getter(mapper.local_table.primary_key) + else: + pk_keys = result._tuple_getter(mapper.primary_key) + except KeyError: + # can't use these rows, they don't have PK cols in them + # this is an unusual case where the user would have used + # .return_defaults() + return [] + + return [pk_keys(row) for row in rows] + + @classmethod + def _get_matched_objects_on_criteria(cls, update_options, states): + mapper = update_options._subject_mapper + eval_condition = update_options._eval_condition + + raw_data = [ + (state.obj(), state, state.dict) + for state in states + if state.mapper.isa(mapper) and not state.expired + ] + + identity_token = update_options._identity_token + if identity_token is not None: + raw_data = [ + (obj, state, dict_) + for obj, state, dict_ in raw_data + if state.identity_token == identity_token + ] + + result = [] + for obj, state, dict_ in raw_data: + evaled_condition = eval_condition(obj) + + # caution: don't use "in ()" or == here, _EXPIRE_OBJECT + # evaluates as True for all comparisons + if ( + evaled_condition is True + or evaled_condition is evaluator._EXPIRED_OBJECT + ): + result.append( + ( + obj, + state, + dict_, + evaled_condition is evaluator._EXPIRED_OBJECT, + ) + ) + return result + + @classmethod + def _eval_condition_from_statement(cls, update_options, statement): + mapper = update_options._subject_mapper + target_cls = mapper.class_ + + evaluator_compiler = evaluator._EvaluatorCompiler(target_cls) + crit = () + if statement._where_criteria: + crit += statement._where_criteria + + global_attributes = {} + for opt in statement._with_options: + if opt._is_criteria_option: + opt.get_global_criteria(global_attributes) + + if global_attributes: + crit += cls._adjust_for_extra_criteria(global_attributes, mapper) + + if crit: + eval_condition = evaluator_compiler.process(*crit) + else: + # workaround for mypy https://github.com/python/mypy/issues/14027 + def _eval_condition(obj): + return True + + eval_condition = _eval_condition + + return eval_condition + + @classmethod + def _do_pre_synchronize_auto( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ): + """setup auto sync strategy + + + "auto" checks if we can use "evaluate" first, then falls back + to "fetch" + + evaluate is vastly more efficient for the common case + where session is empty, only has a few objects, and the UPDATE + statement can potentially match thousands/millions of rows. + + OTOH more complex criteria that fails to work with "evaluate" + we would hope usually correlates with fewer net rows. + + """ + + try: + eval_condition = cls._eval_condition_from_statement( + update_options, statement + ) + + except evaluator.UnevaluatableError: + pass + else: + return update_options + { + "_eval_condition": eval_condition, + "_synchronize_session": "evaluate", + } + + update_options += {"_synchronize_session": "fetch"} + return cls._do_pre_synchronize_fetch( + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ) + + @classmethod + def _do_pre_synchronize_evaluate( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ): + try: + eval_condition = cls._eval_condition_from_statement( + update_options, statement + ) + + except evaluator.UnevaluatableError as err: + raise sa_exc.InvalidRequestError( + 'Could not evaluate current criteria in Python: "%s". ' + "Specify 'fetch' or False for the " + "synchronize_session execution option." % err + ) from err + + return update_options + { + "_eval_condition": eval_condition, + } + + @classmethod + def _get_resolved_values(cls, mapper, statement): + if statement._multi_values: + return [] + elif statement._ordered_values: + return list(statement._ordered_values) + elif statement._values: + return list(statement._values.items()) + else: + return [] + + @classmethod + def _resolved_keys_as_propnames(cls, mapper, resolved_values): + values = [] + for k, v in resolved_values: + if mapper and isinstance(k, expression.ColumnElement): + try: + attr = mapper._columntoproperty[k] + except orm_exc.UnmappedColumnError: + pass + else: + values.append((attr.key, v)) + else: + raise sa_exc.InvalidRequestError( + "Attribute name not found, can't be " + "synchronized back to objects: %r" % k + ) + return values + + @classmethod + def _do_pre_synchronize_fetch( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + update_options, + ): + mapper = update_options._subject_mapper + + select_stmt = ( + select(*(mapper.primary_key + (mapper.select_identity_token,))) + .select_from(mapper) + .options(*statement._with_options) + ) + select_stmt._where_criteria = statement._where_criteria + + # conditionally run the SELECT statement for pre-fetch, testing the + # "bind" for if we can use RETURNING or not using the do_orm_execute + # event. If RETURNING is available, the do_orm_execute event + # will cancel the SELECT from being actually run. + # + # The way this is organized seems strange, why don't we just + # call can_use_returning() before invoking the statement and get + # answer?, why does this go through the whole execute phase using an + # event? Answer: because we are integrating with extensions such + # as the horizontal sharding extention that "multiplexes" an individual + # statement run through multiple engines, and it uses + # do_orm_execute() to do that. + + can_use_returning = None + + def skip_for_returning(orm_context: ORMExecuteState) -> Any: + bind = orm_context.session.get_bind(**orm_context.bind_arguments) + nonlocal can_use_returning + + per_bind_result = cls.can_use_returning( + bind.dialect, + mapper, + is_update_from=update_options._is_update_from, + is_delete_using=update_options._is_delete_using, + is_executemany=orm_context.is_executemany, + ) + + if can_use_returning is not None: + if can_use_returning != per_bind_result: + raise sa_exc.InvalidRequestError( + "For synchronize_session='fetch', can't mix multiple " + "backends where some support RETURNING and others " + "don't" + ) + elif orm_context.is_executemany and not per_bind_result: + raise sa_exc.InvalidRequestError( + "For synchronize_session='fetch', can't use multiple " + "parameter sets in ORM mode, which this backend does not " + "support with RETURNING" + ) + else: + can_use_returning = per_bind_result + + if per_bind_result: + return _result.null_result() + else: + return None + + result = session.execute( + select_stmt, + params, + execution_options=execution_options, + bind_arguments=bind_arguments, + _add_event=skip_for_returning, + ) + matched_rows = result.fetchall() + + return update_options + { + "_matched_rows": matched_rows, + "_can_use_returning": can_use_returning, + } + + +@CompileState.plugin_for("orm", "insert") +class BulkORMInsert(ORMDMLState, InsertDMLState): + class default_insert_options(Options): + _dml_strategy: DMLStrategyArgument = "auto" + _render_nulls: bool = False + _return_defaults: bool = False + _subject_mapper: Optional[Mapper[Any]] = None + _autoflush: bool = True + _populate_existing: bool = False + + select_statement: Optional[FromStatement] = None + + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_pre_event, + ): + ( + insert_options, + execution_options, + ) = BulkORMInsert.default_insert_options.from_execution_options( + "_sa_orm_insert_options", + {"dml_strategy", "autoflush", "populate_existing", "render_nulls"}, + execution_options, + statement._execution_options, + ) + bind_arguments["clause"] = statement + try: + plugin_subject = statement._propagate_attrs["plugin_subject"] + except KeyError: + assert False, "statement had 'orm' plugin but no plugin_subject" + else: + if plugin_subject: + bind_arguments["mapper"] = plugin_subject.mapper + insert_options += {"_subject_mapper": plugin_subject.mapper} + + if not params: + if insert_options._dml_strategy == "auto": + insert_options += {"_dml_strategy": "orm"} + elif insert_options._dml_strategy == "bulk": + raise sa_exc.InvalidRequestError( + 'Can\'t use "bulk" ORM insert strategy without ' + "passing separate parameters" + ) + else: + if insert_options._dml_strategy == "auto": + insert_options += {"_dml_strategy": "bulk"} + + if insert_options._dml_strategy != "raw": + # for ORM object loading, like ORMContext, we have to disable + # result set adapt_to_context, because we will be generating a + # new statement with specific columns that's cached inside of + # an ORMFromStatementCompileState, which we will re-use for + # each result. + if not execution_options: + execution_options = context._orm_load_exec_options + else: + execution_options = execution_options.union( + context._orm_load_exec_options + ) + + if not is_pre_event and insert_options._autoflush: + session._autoflush() + + statement = statement._annotate( + {"dml_strategy": insert_options._dml_strategy} + ) + + return ( + statement, + util.immutabledict(execution_options).union( + {"_sa_orm_insert_options": insert_options} + ), + ) + + @classmethod + def orm_execute_statement( + cls, + session: Session, + statement: dml.Insert, + params: _CoreAnyExecuteParams, + execution_options: OrmExecuteOptionsParameter, + bind_arguments: _BindArguments, + conn: Connection, + ) -> _result.Result: + insert_options = execution_options.get( + "_sa_orm_insert_options", cls.default_insert_options + ) + + if insert_options._dml_strategy not in ( + "raw", + "bulk", + "orm", + "auto", + ): + raise sa_exc.ArgumentError( + "Valid strategies for ORM insert strategy " + "are 'raw', 'orm', 'bulk', 'auto" + ) + + result: _result.Result[Any] + + if insert_options._dml_strategy == "raw": + result = conn.execute( + statement, params or {}, execution_options=execution_options + ) + return result + + if insert_options._dml_strategy == "bulk": + mapper = insert_options._subject_mapper + + if ( + statement._post_values_clause is not None + and mapper._multiple_persistence_tables + ): + raise sa_exc.InvalidRequestError( + "bulk INSERT with a 'post values' clause " + "(typically upsert) not supported for multi-table " + f"mapper {mapper}" + ) + + assert mapper is not None + assert session._transaction is not None + result = _bulk_insert( + mapper, + cast( + "Iterable[Dict[str, Any]]", + [params] if isinstance(params, dict) else params, + ), + session._transaction, + isstates=False, + return_defaults=insert_options._return_defaults, + render_nulls=insert_options._render_nulls, + use_orm_insert_stmt=statement, + execution_options=execution_options, + ) + elif insert_options._dml_strategy == "orm": + result = conn.execute( + statement, params or {}, execution_options=execution_options + ) + else: + raise AssertionError() + + if not bool(statement._returning): + return result + + if insert_options._populate_existing: + load_options = execution_options.get( + "_sa_orm_load_options", QueryContext.default_load_options + ) + load_options += {"_populate_existing": True} + execution_options = execution_options.union( + {"_sa_orm_load_options": load_options} + ) + + return cls._return_orm_returning( + session, + statement, + params, + execution_options, + bind_arguments, + result, + ) + + @classmethod + def create_for_statement(cls, statement, compiler, **kw) -> BulkORMInsert: + self = cast( + BulkORMInsert, + super().create_for_statement(statement, compiler, **kw), + ) + + if compiler is not None: + toplevel = not compiler.stack + else: + toplevel = True + if not toplevel: + return self + + mapper = statement._propagate_attrs["plugin_subject"] + dml_strategy = statement._annotations.get("dml_strategy", "raw") + if dml_strategy == "bulk": + self._setup_for_bulk_insert(compiler) + elif dml_strategy == "orm": + self._setup_for_orm_insert(compiler, mapper) + + return self + + @classmethod + def _resolved_keys_as_col_keys(cls, mapper, resolved_value_dict): + return { + col.key if col is not None else k: v + for col, k, v in ( + (mapper.c.get(k), k, v) for k, v in resolved_value_dict.items() + ) + } + + def _setup_for_orm_insert(self, compiler, mapper): + statement = orm_level_statement = cast(dml.Insert, self.statement) + + statement = self._setup_orm_returning( + compiler, + orm_level_statement, + statement, + dml_mapper=mapper, + use_supplemental_cols=False, + ) + self.statement = statement + + def _setup_for_bulk_insert(self, compiler): + """establish an INSERT statement within the context of + bulk insert. + + This method will be within the "conn.execute()" call that is invoked + by persistence._emit_insert_statement(). + + """ + statement = orm_level_statement = cast(dml.Insert, self.statement) + an = statement._annotations + + emit_insert_table, emit_insert_mapper = ( + an["_emit_insert_table"], + an["_emit_insert_mapper"], + ) + + statement = statement._clone() + + statement.table = emit_insert_table + if self._dict_parameters: + self._dict_parameters = { + col: val + for col, val in self._dict_parameters.items() + if col.table is emit_insert_table + } + + statement = self._setup_orm_returning( + compiler, + orm_level_statement, + statement, + dml_mapper=emit_insert_mapper, + use_supplemental_cols=True, + ) + + if ( + self.from_statement_ctx is not None + and self.from_statement_ctx.compile_options._is_star + ): + raise sa_exc.CompileError( + "Can't use RETURNING * with bulk ORM INSERT. " + "Please use a different INSERT form, such as INSERT..VALUES " + "or INSERT with a Core Connection" + ) + + self.statement = statement + + +@CompileState.plugin_for("orm", "update") +class BulkORMUpdate(BulkUDCompileState, UpdateDMLState): + @classmethod + def create_for_statement(cls, statement, compiler, **kw): + self = cls.__new__(cls) + + dml_strategy = statement._annotations.get( + "dml_strategy", "unspecified" + ) + + toplevel = not compiler.stack + + if toplevel and dml_strategy == "bulk": + self._setup_for_bulk_update(statement, compiler) + elif ( + dml_strategy == "core_only" + or dml_strategy == "unspecified" + and "parententity" not in statement.table._annotations + ): + UpdateDMLState.__init__(self, statement, compiler, **kw) + elif not toplevel or dml_strategy in ("orm", "unspecified"): + self._setup_for_orm_update(statement, compiler) + + return self + + def _setup_for_orm_update(self, statement, compiler, **kw): + orm_level_statement = statement + + toplevel = not compiler.stack + + ext_info = statement.table._annotations["parententity"] + + self.mapper = mapper = ext_info.mapper + + self._resolved_values = self._get_resolved_values(mapper, statement) + + self._init_global_attributes( + statement, + compiler, + toplevel=toplevel, + process_criteria_for_toplevel=toplevel, + ) + + if statement._values: + self._resolved_values = dict(self._resolved_values) + + new_stmt = statement._clone() + + if new_stmt.table._annotations["parententity"] is mapper: + new_stmt.table = mapper.local_table + + # note if the statement has _multi_values, these + # are passed through to the new statement, which will then raise + # InvalidRequestError because UPDATE doesn't support multi_values + # right now. + if statement._ordered_values: + new_stmt._ordered_values = self._resolved_values + elif statement._values: + new_stmt._values = self._resolved_values + + new_crit = self._adjust_for_extra_criteria( + self.global_attributes, mapper + ) + if new_crit: + new_stmt = new_stmt.where(*new_crit) + + # if we are against a lambda statement we might not be the + # topmost object that received per-execute annotations + + # do this first as we need to determine if there is + # UPDATE..FROM + + UpdateDMLState.__init__(self, new_stmt, compiler, **kw) + + use_supplemental_cols = False + + if not toplevel: + synchronize_session = None + else: + synchronize_session = compiler._annotations.get( + "synchronize_session", None + ) + can_use_returning = compiler._annotations.get( + "can_use_returning", None + ) + if can_use_returning is not False: + # even though pre_exec has determined basic + # can_use_returning for the dialect, if we are to use + # RETURNING we need to run can_use_returning() at this level + # unconditionally because is_delete_using was not known + # at the pre_exec level + can_use_returning = ( + synchronize_session == "fetch" + and self.can_use_returning( + compiler.dialect, mapper, is_multitable=self.is_multitable + ) + ) + + if synchronize_session == "fetch" and can_use_returning: + use_supplemental_cols = True + + # NOTE: we might want to RETURNING the actual columns to be + # synchronized also. however this is complicated and difficult + # to align against the behavior of "evaluate". Additionally, + # in a large number (if not the majority) of cases, we have the + # "evaluate" answer, usually a fixed value, in memory already and + # there's no need to re-fetch the same value + # over and over again. so perhaps if it could be RETURNING just + # the elements that were based on a SQL expression and not + # a constant. For now it doesn't quite seem worth it + new_stmt = new_stmt.return_defaults(*new_stmt.table.primary_key) + + if toplevel: + new_stmt = self._setup_orm_returning( + compiler, + orm_level_statement, + new_stmt, + dml_mapper=mapper, + use_supplemental_cols=use_supplemental_cols, + ) + + self.statement = new_stmt + + def _setup_for_bulk_update(self, statement, compiler, **kw): + """establish an UPDATE statement within the context of + bulk insert. + + This method will be within the "conn.execute()" call that is invoked + by persistence._emit_update_statement(). + + """ + statement = cast(dml.Update, statement) + an = statement._annotations + + emit_update_table, _ = ( + an["_emit_update_table"], + an["_emit_update_mapper"], + ) + + statement = statement._clone() + statement.table = emit_update_table + + UpdateDMLState.__init__(self, statement, compiler, **kw) + + if self._ordered_values: + raise sa_exc.InvalidRequestError( + "bulk ORM UPDATE does not support ordered_values() for " + "custom UPDATE statements with bulk parameter sets. Use a " + "non-bulk UPDATE statement or use values()." + ) + + if self._dict_parameters: + self._dict_parameters = { + col: val + for col, val in self._dict_parameters.items() + if col.table is emit_update_table + } + self.statement = statement + + @classmethod + def orm_execute_statement( + cls, + session: Session, + statement: dml.Update, + params: _CoreAnyExecuteParams, + execution_options: OrmExecuteOptionsParameter, + bind_arguments: _BindArguments, + conn: Connection, + ) -> _result.Result: + + update_options = execution_options.get( + "_sa_orm_update_options", cls.default_update_options + ) + + if update_options._populate_existing: + load_options = execution_options.get( + "_sa_orm_load_options", QueryContext.default_load_options + ) + load_options += {"_populate_existing": True} + execution_options = execution_options.union( + {"_sa_orm_load_options": load_options} + ) + + if update_options._dml_strategy not in ( + "orm", + "auto", + "bulk", + "core_only", + ): + raise sa_exc.ArgumentError( + "Valid strategies for ORM UPDATE strategy " + "are 'orm', 'auto', 'bulk', 'core_only'" + ) + + result: _result.Result[Any] + + if update_options._dml_strategy == "bulk": + enable_check_rowcount = not statement._where_criteria + + assert update_options._synchronize_session != "fetch" + + if ( + statement._where_criteria + and update_options._synchronize_session == "evaluate" + ): + raise sa_exc.InvalidRequestError( + "bulk synchronize of persistent objects not supported " + "when using bulk update with additional WHERE " + "criteria right now. add synchronize_session=None " + "execution option to bypass synchronize of persistent " + "objects." + ) + mapper = update_options._subject_mapper + assert mapper is not None + assert session._transaction is not None + result = _bulk_update( + mapper, + cast( + "Iterable[Dict[str, Any]]", + [params] if isinstance(params, dict) else params, + ), + session._transaction, + isstates=False, + update_changed_only=False, + use_orm_update_stmt=statement, + enable_check_rowcount=enable_check_rowcount, + ) + return cls.orm_setup_cursor_result( + session, + statement, + params, + execution_options, + bind_arguments, + result, + ) + else: + return super().orm_execute_statement( + session, + statement, + params, + execution_options, + bind_arguments, + conn, + ) + + @classmethod + def can_use_returning( + cls, + dialect: Dialect, + mapper: Mapper[Any], + *, + is_multitable: bool = False, + is_update_from: bool = False, + is_delete_using: bool = False, + is_executemany: bool = False, + ) -> bool: + # normal answer for "should we use RETURNING" at all. + normal_answer = ( + dialect.update_returning and mapper.local_table.implicit_returning + ) + if not normal_answer: + return False + + if is_executemany: + return dialect.update_executemany_returning + + # these workarounds are currently hypothetical for UPDATE, + # unlike DELETE where they impact MariaDB + if is_update_from: + return dialect.update_returning_multifrom + + elif is_multitable and not dialect.update_returning_multifrom: + raise sa_exc.CompileError( + f'Dialect "{dialect.name}" does not support RETURNING ' + "with UPDATE..FROM; for synchronize_session='fetch', " + "please add the additional execution option " + "'is_update_from=True' to the statement to indicate that " + "a separate SELECT should be used for this backend." + ) + + return True + + @classmethod + def _do_post_synchronize_bulk_evaluate( + cls, session, params, result, update_options + ): + if not params: + return + + mapper = update_options._subject_mapper + pk_keys = [prop.key for prop in mapper._identity_key_props] + + identity_map = session.identity_map + + for param in params: + identity_key = mapper.identity_key_from_primary_key( + (param[key] for key in pk_keys), + update_options._identity_token, + ) + state = identity_map.fast_get_state(identity_key) + if not state: + continue + + evaluated_keys = set(param).difference(pk_keys) + + dict_ = state.dict + # only evaluate unmodified attributes + to_evaluate = state.unmodified.intersection(evaluated_keys) + for key in to_evaluate: + if key in dict_: + dict_[key] = param[key] + + state.manager.dispatch.refresh(state, None, to_evaluate) + + state._commit(dict_, list(to_evaluate)) + + # attributes that were formerly modified instead get expired. + # this only gets hit if the session had pending changes + # and autoflush were set to False. + to_expire = evaluated_keys.intersection(dict_).difference( + to_evaluate + ) + if to_expire: + state._expire_attributes(dict_, to_expire) + + @classmethod + def _do_post_synchronize_evaluate( + cls, session, statement, result, update_options + ): + matched_objects = cls._get_matched_objects_on_criteria( + update_options, + session.identity_map.all_states(), + ) + + cls._apply_update_set_values_to_objects( + session, + update_options, + statement, + result.context.compiled_parameters[0], + [(obj, state, dict_) for obj, state, dict_, _ in matched_objects], + result.prefetch_cols(), + result.postfetch_cols(), + ) + + @classmethod + def _do_post_synchronize_fetch( + cls, session, statement, result, update_options + ): + target_mapper = update_options._subject_mapper + + returned_defaults_rows = result.returned_defaults_rows + if returned_defaults_rows: + pk_rows = cls._interpret_returning_rows( + result, target_mapper, returned_defaults_rows + ) + matched_rows = [ + tuple(row) + (update_options._identity_token,) + for row in pk_rows + ] + else: + matched_rows = update_options._matched_rows + + objs = [ + session.identity_map[identity_key] + for identity_key in [ + target_mapper.identity_key_from_primary_key( + list(primary_key), + identity_token=identity_token, + ) + for primary_key, identity_token in [ + (row[0:-1], row[-1]) for row in matched_rows + ] + if update_options._identity_token is None + or identity_token == update_options._identity_token + ] + if identity_key in session.identity_map + ] + + if not objs: + return + + cls._apply_update_set_values_to_objects( + session, + update_options, + statement, + result.context.compiled_parameters[0], + [ + ( + obj, + attributes.instance_state(obj), + attributes.instance_dict(obj), + ) + for obj in objs + ], + result.prefetch_cols(), + result.postfetch_cols(), + ) + + @classmethod + def _apply_update_set_values_to_objects( + cls, + session, + update_options, + statement, + effective_params, + matched_objects, + prefetch_cols, + postfetch_cols, + ): + """apply values to objects derived from an update statement, e.g. + UPDATE..SET + + """ + + mapper = update_options._subject_mapper + target_cls = mapper.class_ + evaluator_compiler = evaluator._EvaluatorCompiler(target_cls) + resolved_values = cls._get_resolved_values(mapper, statement) + resolved_keys_as_propnames = cls._resolved_keys_as_propnames( + mapper, resolved_values + ) + value_evaluators = {} + for key, value in resolved_keys_as_propnames: + try: + _evaluator = evaluator_compiler.process( + coercions.expect(roles.ExpressionElementRole, value) + ) + except evaluator.UnevaluatableError: + pass + else: + value_evaluators[key] = _evaluator + + evaluated_keys = list(value_evaluators.keys()) + attrib = {k for k, v in resolved_keys_as_propnames} + + states = set() + + to_prefetch = { + c + for c in prefetch_cols + if c.key in effective_params + and c in mapper._columntoproperty + and c.key not in evaluated_keys + } + to_expire = { + mapper._columntoproperty[c].key + for c in postfetch_cols + if c in mapper._columntoproperty + }.difference(evaluated_keys) + + prefetch_transfer = [ + (mapper._columntoproperty[c].key, c.key) for c in to_prefetch + ] + + for obj, state, dict_ in matched_objects: + + dict_.update( + { + col_to_prop: effective_params[c_key] + for col_to_prop, c_key in prefetch_transfer + } + ) + + state._expire_attributes(state.dict, to_expire) + + to_evaluate = state.unmodified.intersection(evaluated_keys) + + for key in to_evaluate: + if key in dict_: + # only run eval for attributes that are present. + dict_[key] = value_evaluators[key](obj) + + state.manager.dispatch.refresh(state, None, to_evaluate) + + state._commit(dict_, list(to_evaluate)) + + # attributes that were formerly modified instead get expired. + # this only gets hit if the session had pending changes + # and autoflush were set to False. + to_expire = attrib.intersection(dict_).difference(to_evaluate) + if to_expire: + state._expire_attributes(dict_, to_expire) + + states.add(state) + session._register_altered(states) + + +@CompileState.plugin_for("orm", "delete") +class BulkORMDelete(BulkUDCompileState, DeleteDMLState): + @classmethod + def create_for_statement(cls, statement, compiler, **kw): + self = cls.__new__(cls) + + dml_strategy = statement._annotations.get( + "dml_strategy", "unspecified" + ) + + if ( + dml_strategy == "core_only" + or dml_strategy == "unspecified" + and "parententity" not in statement.table._annotations + ): + DeleteDMLState.__init__(self, statement, compiler, **kw) + return self + + toplevel = not compiler.stack + + orm_level_statement = statement + + ext_info = statement.table._annotations["parententity"] + self.mapper = mapper = ext_info.mapper + + self._init_global_attributes( + statement, + compiler, + toplevel=toplevel, + process_criteria_for_toplevel=toplevel, + ) + + new_stmt = statement._clone() + + if new_stmt.table._annotations["parententity"] is mapper: + new_stmt.table = mapper.local_table + + new_crit = cls._adjust_for_extra_criteria( + self.global_attributes, mapper + ) + if new_crit: + new_stmt = new_stmt.where(*new_crit) + + # do this first as we need to determine if there is + # DELETE..FROM + DeleteDMLState.__init__(self, new_stmt, compiler, **kw) + + use_supplemental_cols = False + + if not toplevel: + synchronize_session = None + else: + synchronize_session = compiler._annotations.get( + "synchronize_session", None + ) + can_use_returning = compiler._annotations.get( + "can_use_returning", None + ) + if can_use_returning is not False: + # even though pre_exec has determined basic + # can_use_returning for the dialect, if we are to use + # RETURNING we need to run can_use_returning() at this level + # unconditionally because is_delete_using was not known + # at the pre_exec level + can_use_returning = ( + synchronize_session == "fetch" + and self.can_use_returning( + compiler.dialect, + mapper, + is_multitable=self.is_multitable, + is_delete_using=compiler._annotations.get( + "is_delete_using", False + ), + ) + ) + + if can_use_returning: + use_supplemental_cols = True + + new_stmt = new_stmt.return_defaults(*new_stmt.table.primary_key) + + if toplevel: + new_stmt = self._setup_orm_returning( + compiler, + orm_level_statement, + new_stmt, + dml_mapper=mapper, + use_supplemental_cols=use_supplemental_cols, + ) + + self.statement = new_stmt + + return self + + @classmethod + def orm_execute_statement( + cls, + session: Session, + statement: dml.Delete, + params: _CoreAnyExecuteParams, + execution_options: OrmExecuteOptionsParameter, + bind_arguments: _BindArguments, + conn: Connection, + ) -> _result.Result: + update_options = execution_options.get( + "_sa_orm_update_options", cls.default_update_options + ) + + if update_options._dml_strategy == "bulk": + raise sa_exc.InvalidRequestError( + "Bulk ORM DELETE not supported right now. " + "Statement may be invoked at the " + "Core level using " + "session.connection().execute(stmt, parameters)" + ) + + if update_options._dml_strategy not in ("orm", "auto", "core_only"): + raise sa_exc.ArgumentError( + "Valid strategies for ORM DELETE strategy are 'orm', 'auto', " + "'core_only'" + ) + + return super().orm_execute_statement( + session, statement, params, execution_options, bind_arguments, conn + ) + + @classmethod + def can_use_returning( + cls, + dialect: Dialect, + mapper: Mapper[Any], + *, + is_multitable: bool = False, + is_update_from: bool = False, + is_delete_using: bool = False, + is_executemany: bool = False, + ) -> bool: + # normal answer for "should we use RETURNING" at all. + normal_answer = ( + dialect.delete_returning and mapper.local_table.implicit_returning + ) + if not normal_answer: + return False + + # now get into special workarounds because MariaDB supports + # DELETE...RETURNING but not DELETE...USING...RETURNING. + if is_delete_using: + # is_delete_using hint was passed. use + # additional dialect feature (True for PG, False for MariaDB) + return dialect.delete_returning_multifrom + + elif is_multitable and not dialect.delete_returning_multifrom: + # is_delete_using hint was not passed, but we determined + # at compile time that this is in fact a DELETE..USING. + # it's too late to continue since we did not pre-SELECT. + # raise that we need that hint up front. + + raise sa_exc.CompileError( + f'Dialect "{dialect.name}" does not support RETURNING ' + "with DELETE..USING; for synchronize_session='fetch', " + "please add the additional execution option " + "'is_delete_using=True' to the statement to indicate that " + "a separate SELECT should be used for this backend." + ) + + return True + + @classmethod + def _do_post_synchronize_evaluate( + cls, session, statement, result, update_options + ): + matched_objects = cls._get_matched_objects_on_criteria( + update_options, + session.identity_map.all_states(), + ) + + to_delete = [] + + for _, state, dict_, is_partially_expired in matched_objects: + if is_partially_expired: + state._expire(dict_, session.identity_map._modified) + else: + to_delete.append(state) + + if to_delete: + session._remove_newly_deleted(to_delete) + + @classmethod + def _do_post_synchronize_fetch( + cls, session, statement, result, update_options + ): + target_mapper = update_options._subject_mapper + + returned_defaults_rows = result.returned_defaults_rows + + if returned_defaults_rows: + pk_rows = cls._interpret_returning_rows( + result, target_mapper, returned_defaults_rows + ) + + matched_rows = [ + tuple(row) + (update_options._identity_token,) + for row in pk_rows + ] + else: + matched_rows = update_options._matched_rows + + for row in matched_rows: + primary_key = row[0:-1] + identity_token = row[-1] + + # TODO: inline this and call remove_newly_deleted + # once + identity_key = target_mapper.identity_key_from_primary_key( + list(primary_key), + identity_token=identity_token, + ) + if identity_key in session.identity_map: + session._remove_newly_deleted( + [ + attributes.instance_state( + session.identity_map[identity_key] + ) + ] + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/clsregistry.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/clsregistry.py new file mode 100644 index 0000000000000000000000000000000000000000..fd4828e8559614049d38ba01f1cbd973b12ac286 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/clsregistry.py @@ -0,0 +1,571 @@ +# orm/clsregistry.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Routines to handle the string class registry used by declarative. + +This system allows specification of classes and expressions used in +:func:`_orm.relationship` using strings. + +""" + +from __future__ import annotations + +import re +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import interfaces +from .descriptor_props import SynonymProperty +from .properties import ColumnProperty +from .util import class_mapper +from .. import exc +from .. import inspection +from .. import util +from ..sql.schema import _get_table_key +from ..util.typing import CallableReference + +if TYPE_CHECKING: + from .relationships import RelationshipProperty + from ..sql.schema import MetaData + from ..sql.schema import Table + +_T = TypeVar("_T", bound=Any) + +_ClsRegistryType = MutableMapping[str, Union[type, "ClsRegistryToken"]] + +# strong references to registries which we place in +# the _decl_class_registry, which is usually weak referencing. +# the internal registries here link to classes with weakrefs and remove +# themselves when all references to contained classes are removed. +_registries: Set[ClsRegistryToken] = set() + + +def add_class( + classname: str, cls: Type[_T], decl_class_registry: _ClsRegistryType +) -> None: + """Add a class to the _decl_class_registry associated with the + given declarative class. + + """ + if classname in decl_class_registry: + # class already exists. + existing = decl_class_registry[classname] + if not isinstance(existing, _MultipleClassMarker): + decl_class_registry[classname] = _MultipleClassMarker( + [cls, cast("Type[Any]", existing)] + ) + else: + decl_class_registry[classname] = cls + + try: + root_module = cast( + _ModuleMarker, decl_class_registry["_sa_module_registry"] + ) + except KeyError: + decl_class_registry["_sa_module_registry"] = root_module = ( + _ModuleMarker("_sa_module_registry", None) + ) + + tokens = cls.__module__.split(".") + + # build up a tree like this: + # modulename: myapp.snacks.nuts + # + # myapp->snack->nuts->(classes) + # snack->nuts->(classes) + # nuts->(classes) + # + # this allows partial token paths to be used. + while tokens: + token = tokens.pop(0) + module = root_module.get_module(token) + for token in tokens: + module = module.get_module(token) + + try: + module.add_class(classname, cls) + except AttributeError as ae: + if not isinstance(module, _ModuleMarker): + raise exc.InvalidRequestError( + f'name "{classname}" matches both a ' + "class name and a module name" + ) from ae + else: + raise + + +def remove_class( + classname: str, cls: Type[Any], decl_class_registry: _ClsRegistryType +) -> None: + if classname in decl_class_registry: + existing = decl_class_registry[classname] + if isinstance(existing, _MultipleClassMarker): + existing.remove_item(cls) + else: + del decl_class_registry[classname] + + try: + root_module = cast( + _ModuleMarker, decl_class_registry["_sa_module_registry"] + ) + except KeyError: + return + + tokens = cls.__module__.split(".") + + while tokens: + token = tokens.pop(0) + module = root_module.get_module(token) + for token in tokens: + module = module.get_module(token) + try: + module.remove_class(classname, cls) + except AttributeError: + if not isinstance(module, _ModuleMarker): + pass + else: + raise + + +def _key_is_empty( + key: str, + decl_class_registry: _ClsRegistryType, + test: Callable[[Any], bool], +) -> bool: + """test if a key is empty of a certain object. + + used for unit tests against the registry to see if garbage collection + is working. + + "test" is a callable that will be passed an object should return True + if the given object is the one we were looking for. + + We can't pass the actual object itself b.c. this is for testing garbage + collection; the caller will have to have removed references to the + object itself. + + """ + if key not in decl_class_registry: + return True + + thing = decl_class_registry[key] + if isinstance(thing, _MultipleClassMarker): + for sub_thing in thing.contents: + if test(sub_thing): + return False + else: + raise NotImplementedError("unknown codepath") + else: + return not test(thing) + + +class ClsRegistryToken: + """an object that can be in the registry._class_registry as a value.""" + + __slots__ = () + + +class _MultipleClassMarker(ClsRegistryToken): + """refers to multiple classes of the same name + within _decl_class_registry. + + """ + + __slots__ = "on_remove", "contents", "__weakref__" + + contents: Set[weakref.ref[Type[Any]]] + on_remove: CallableReference[Optional[Callable[[], None]]] + + def __init__( + self, + classes: Iterable[Type[Any]], + on_remove: Optional[Callable[[], None]] = None, + ): + self.on_remove = on_remove + self.contents = { + weakref.ref(item, self._remove_item) for item in classes + } + _registries.add(self) + + def remove_item(self, cls: Type[Any]) -> None: + self._remove_item(weakref.ref(cls)) + + def __iter__(self) -> Generator[Optional[Type[Any]], None, None]: + return (ref() for ref in self.contents) + + def attempt_get(self, path: List[str], key: str) -> Type[Any]: + if len(self.contents) > 1: + raise exc.InvalidRequestError( + 'Multiple classes found for path "%s" ' + "in the registry of this declarative " + "base. Please use a fully module-qualified path." + % (".".join(path + [key])) + ) + else: + ref = list(self.contents)[0] + cls = ref() + if cls is None: + raise NameError(key) + return cls + + def _remove_item(self, ref: weakref.ref[Type[Any]]) -> None: + self.contents.discard(ref) + if not self.contents: + _registries.discard(self) + if self.on_remove: + self.on_remove() + + def add_item(self, item: Type[Any]) -> None: + # protect against class registration race condition against + # asynchronous garbage collection calling _remove_item, + # [ticket:3208] and [ticket:10782] + modules = { + cls.__module__ + for cls in [ref() for ref in list(self.contents)] + if cls is not None + } + if item.__module__ in modules: + util.warn( + "This declarative base already contains a class with the " + "same class name and module name as %s.%s, and will " + "be replaced in the string-lookup table." + % (item.__module__, item.__name__) + ) + self.contents.add(weakref.ref(item, self._remove_item)) + + +class _ModuleMarker(ClsRegistryToken): + """Refers to a module name within + _decl_class_registry. + + """ + + __slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__" + + parent: Optional[_ModuleMarker] + contents: Dict[str, Union[_ModuleMarker, _MultipleClassMarker]] + mod_ns: _ModNS + path: List[str] + + def __init__(self, name: str, parent: Optional[_ModuleMarker]): + self.parent = parent + self.name = name + self.contents = {} + self.mod_ns = _ModNS(self) + if self.parent: + self.path = self.parent.path + [self.name] + else: + self.path = [] + _registries.add(self) + + def __contains__(self, name: str) -> bool: + return name in self.contents + + def __getitem__(self, name: str) -> ClsRegistryToken: + return self.contents[name] + + def _remove_item(self, name: str) -> None: + self.contents.pop(name, None) + if not self.contents: + if self.parent is not None: + self.parent._remove_item(self.name) + _registries.discard(self) + + def resolve_attr(self, key: str) -> Union[_ModNS, Type[Any]]: + return self.mod_ns.__getattr__(key) + + def get_module(self, name: str) -> _ModuleMarker: + if name not in self.contents: + marker = _ModuleMarker(name, self) + self.contents[name] = marker + else: + marker = cast(_ModuleMarker, self.contents[name]) + return marker + + def add_class(self, name: str, cls: Type[Any]) -> None: + if name in self.contents: + existing = cast(_MultipleClassMarker, self.contents[name]) + try: + existing.add_item(cls) + except AttributeError as ae: + if not isinstance(existing, _MultipleClassMarker): + raise exc.InvalidRequestError( + f'name "{name}" matches both a ' + "class name and a module name" + ) from ae + else: + raise + else: + self.contents[name] = _MultipleClassMarker( + [cls], on_remove=lambda: self._remove_item(name) + ) + + def remove_class(self, name: str, cls: Type[Any]) -> None: + if name in self.contents: + existing = cast(_MultipleClassMarker, self.contents[name]) + existing.remove_item(cls) + + +class _ModNS: + __slots__ = ("__parent",) + + __parent: _ModuleMarker + + def __init__(self, parent: _ModuleMarker): + self.__parent = parent + + def __getattr__(self, key: str) -> Union[_ModNS, Type[Any]]: + try: + value = self.__parent.contents[key] + except KeyError: + pass + else: + if value is not None: + if isinstance(value, _ModuleMarker): + return value.mod_ns + else: + assert isinstance(value, _MultipleClassMarker) + return value.attempt_get(self.__parent.path, key) + raise NameError( + "Module %r has no mapped classes " + "registered under the name %r" % (self.__parent.name, key) + ) + + +class _GetColumns: + __slots__ = ("cls",) + + cls: Type[Any] + + def __init__(self, cls: Type[Any]): + self.cls = cls + + def __getattr__(self, key: str) -> Any: + mp = class_mapper(self.cls, configure=False) + if mp: + if key not in mp.all_orm_descriptors: + raise AttributeError( + "Class %r does not have a mapped column named %r" + % (self.cls, key) + ) + + desc = mp.all_orm_descriptors[key] + if desc.extension_type is interfaces.NotExtension.NOT_EXTENSION: + assert isinstance(desc, attributes.QueryableAttribute) + prop = desc.property + if isinstance(prop, SynonymProperty): + key = prop.name + elif not isinstance(prop, ColumnProperty): + raise exc.InvalidRequestError( + "Property %r is not an instance of" + " ColumnProperty (i.e. does not correspond" + " directly to a Column)." % key + ) + return getattr(self.cls, key) + + +inspection._inspects(_GetColumns)( + lambda target: inspection.inspect(target.cls) +) + + +class _GetTable: + __slots__ = "key", "metadata" + + key: str + metadata: MetaData + + def __init__(self, key: str, metadata: MetaData): + self.key = key + self.metadata = metadata + + def __getattr__(self, key: str) -> Table: + return self.metadata.tables[_get_table_key(key, self.key)] + + +def _determine_container(key: str, value: Any) -> _GetColumns: + if isinstance(value, _MultipleClassMarker): + value = value.attempt_get([], key) + return _GetColumns(value) + + +class _class_resolver: + __slots__ = ( + "cls", + "prop", + "arg", + "fallback", + "_dict", + "_resolvers", + "favor_tables", + ) + + cls: Type[Any] + prop: RelationshipProperty[Any] + fallback: Mapping[str, Any] + arg: str + favor_tables: bool + _resolvers: Tuple[Callable[[str], Any], ...] + + def __init__( + self, + cls: Type[Any], + prop: RelationshipProperty[Any], + fallback: Mapping[str, Any], + arg: str, + favor_tables: bool = False, + ): + self.cls = cls + self.prop = prop + self.arg = arg + self.fallback = fallback + self._dict = util.PopulateDict(self._access_cls) + self._resolvers = () + self.favor_tables = favor_tables + + def _access_cls(self, key: str) -> Any: + cls = self.cls + + manager = attributes.manager_of_class(cls) + decl_base = manager.registry + assert decl_base is not None + decl_class_registry = decl_base._class_registry + metadata = decl_base.metadata + + if self.favor_tables: + if key in metadata.tables: + return metadata.tables[key] + elif key in metadata._schemas: + return _GetTable(key, getattr(cls, "metadata", metadata)) + + if key in decl_class_registry: + return _determine_container(key, decl_class_registry[key]) + + if not self.favor_tables: + if key in metadata.tables: + return metadata.tables[key] + elif key in metadata._schemas: + return _GetTable(key, getattr(cls, "metadata", metadata)) + + if "_sa_module_registry" in decl_class_registry and key in cast( + _ModuleMarker, decl_class_registry["_sa_module_registry"] + ): + registry = cast( + _ModuleMarker, decl_class_registry["_sa_module_registry"] + ) + return registry.resolve_attr(key) + elif self._resolvers: + for resolv in self._resolvers: + value = resolv(key) + if value is not None: + return value + + return self.fallback[key] + + def _raise_for_name(self, name: str, err: Exception) -> NoReturn: + generic_match = re.match(r"(.+)\[(.+)\]", name) + + if generic_match: + clsarg = generic_match.group(2).strip("'") + raise exc.InvalidRequestError( + f"When initializing mapper {self.prop.parent}, " + f'expression "relationship({self.arg!r})" seems to be ' + "using a generic class as the argument to relationship(); " + "please state the generic argument " + "using an annotation, e.g. " + f'"{self.prop.key}: Mapped[{generic_match.group(1)}' + f"['{clsarg}']] = relationship()\"" + ) from err + else: + raise exc.InvalidRequestError( + "When initializing mapper %s, expression %r failed to " + "locate a name (%r). If this is a class name, consider " + "adding this relationship() to the %r class after " + "both dependent classes have been defined." + % (self.prop.parent, self.arg, name, self.cls) + ) from err + + def _resolve_name(self) -> Union[Table, Type[Any], _ModNS]: + name = self.arg + d = self._dict + rval = None + try: + for token in name.split("."): + if rval is None: + rval = d[token] + else: + rval = getattr(rval, token) + except KeyError as err: + self._raise_for_name(name, err) + except NameError as n: + self._raise_for_name(n.args[0], n) + else: + if isinstance(rval, _GetColumns): + return rval.cls + else: + if TYPE_CHECKING: + assert isinstance(rval, (type, Table, _ModNS)) + return rval + + def __call__(self) -> Any: + try: + x = eval(self.arg, globals(), self._dict) + + if isinstance(x, _GetColumns): + return x.cls + else: + return x + except NameError as n: + self._raise_for_name(n.args[0], n) + + +_fallback_dict: Mapping[str, Any] = None # type: ignore + + +def _resolver(cls: Type[Any], prop: RelationshipProperty[Any]) -> Tuple[ + Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]], + Callable[[str, bool], _class_resolver], +]: + global _fallback_dict + + if _fallback_dict is None: + import sqlalchemy + from . import foreign + from . import remote + + _fallback_dict = util.immutabledict(sqlalchemy.__dict__).union( + {"foreign": foreign, "remote": remote} + ) + + def resolve_arg(arg: str, favor_tables: bool = False) -> _class_resolver: + return _class_resolver( + cls, prop, _fallback_dict, arg, favor_tables=favor_tables + ) + + def resolve_name( + arg: str, + ) -> Callable[[], Union[Type[Any], Table, _ModNS]]: + return _class_resolver(cls, prop, _fallback_dict, arg)._resolve_name + + return resolve_name, resolve_arg diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/collections.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/collections.py new file mode 100644 index 0000000000000000000000000000000000000000..336b1133d99823231d7bedc2fd1fa1b786464204 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/collections.py @@ -0,0 +1,1627 @@ +# orm/collections.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Support for collections of mapped entities. + +The collections package supplies the machinery used to inform the ORM of +collection membership changes. An instrumentation via decoration approach is +used, allowing arbitrary types (including built-ins) to be used as entity +collections without requiring inheritance from a base class. + +Instrumentation decoration relays membership change events to the +:class:`.CollectionAttributeImpl` that is currently managing the collection. +The decorators observe function call arguments and return values, tracking +entities entering or leaving the collection. Two decorator approaches are +provided. One is a bundle of generic decorators that map function arguments +and return values to events:: + + from sqlalchemy.orm.collections import collection + + + class MyClass: + # ... + + @collection.adds(1) + def store(self, item): + self.data.append(item) + + @collection.removes_return() + def pop(self): + return self.data.pop() + +The second approach is a bundle of targeted decorators that wrap appropriate +append and remove notifiers around the mutation methods present in the +standard Python ``list``, ``set`` and ``dict`` interfaces. These could be +specified in terms of generic decorator recipes, but are instead hand-tooled +for increased efficiency. The targeted decorators occasionally implement +adapter-like behavior, such as mapping bulk-set methods (``extend``, +``update``, ``__setslice__``, etc.) into the series of atomic mutation events +that the ORM requires. + +The targeted decorators are used internally for automatic instrumentation of +entity collection classes. Every collection class goes through a +transformation process roughly like so: + +1. If the class is a built-in, substitute a trivial sub-class +2. Is this class already instrumented? +3. Add in generic decorators +4. Sniff out the collection interface through duck-typing +5. Add targeted decoration to any undecorated interface method + +This process modifies the class at runtime, decorating methods and adding some +bookkeeping properties. This isn't possible (or desirable) for built-in +classes like ``list``, so trivial sub-classes are substituted to hold +decoration:: + + class InstrumentedList(list): + pass + +Collection classes can be specified in ``relationship(collection_class=)`` as +types or a function that returns an instance. Collection classes are +inspected and instrumented during the mapper compilation phase. The +collection_class callable will be executed once to produce a specimen +instance, and the type of that specimen will be instrumented. Functions that +return built-in types like ``lists`` will be adapted to produce instrumented +instances. + +When extending a known type like ``list``, additional decorations are not +generally not needed. Odds are, the extension method will delegate to a +method that's already instrumented. For example:: + + class QueueIsh(list): + def push(self, item): + self.append(item) + + def shift(self): + return self.pop(0) + +There's no need to decorate these methods. ``append`` and ``pop`` are already +instrumented as part of the ``list`` interface. Decorating them would fire +duplicate events, which should be avoided. + +The targeted decoration tries not to rely on other methods in the underlying +collection class, but some are unavoidable. Many depend on 'read' methods +being present to properly instrument a 'write', for example, ``__setitem__`` +needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also +reimplemented in terms of atomic appends and removes, so the ``extend`` +decoration will actually perform many ``append`` operations and not call the +underlying method at all. + +Tight control over bulk operation and the firing of events is also possible by +implementing the instrumentation internally in your methods. The basic +instrumentation package works under the general assumption that collection +mutation will not raise unusual exceptions. If you want to closely +orchestrate append and remove events with exception management, internal +instrumentation may be the answer. Within your method, +``collection_adapter(self)`` will retrieve an object that you can use for +explicit control over triggering append and remove events. + +The owning object and :class:`.CollectionAttributeImpl` are also reachable +through the adapter, allowing for some very sophisticated behavior. + +""" +from __future__ import annotations + +import operator +import threading +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Dict +from typing import Iterable +from typing import List +from typing import NoReturn +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from .base import NO_KEY +from .. import exc as sa_exc +from .. import util +from ..sql.base import NO_ARG +from ..util.compat import inspect_getfullargspec +from ..util.typing import Protocol + +if typing.TYPE_CHECKING: + from .attributes import AttributeEventToken + from .attributes import CollectionAttributeImpl + from .mapped_collection import attribute_keyed_dict + from .mapped_collection import column_keyed_dict + from .mapped_collection import keyfunc_mapping + from .mapped_collection import KeyFuncDict # noqa: F401 + from .state import InstanceState + + +__all__ = [ + "collection", + "collection_adapter", + "keyfunc_mapping", + "column_keyed_dict", + "attribute_keyed_dict", + "KeyFuncDict", + # old names in < 2.0 + "mapped_collection", + "column_mapped_collection", + "attribute_mapped_collection", + "MappedCollection", +] + +__instrumentation_mutex = threading.Lock() + + +_CollectionFactoryType = Callable[[], "_AdaptedCollectionProtocol"] + +_T = TypeVar("_T", bound=Any) +_KT = TypeVar("_KT", bound=Any) +_VT = TypeVar("_VT", bound=Any) +_COL = TypeVar("_COL", bound="Collection[Any]") +_FN = TypeVar("_FN", bound="Callable[..., Any]") + + +class _CollectionConverterProtocol(Protocol): + def __call__(self, collection: _COL) -> _COL: ... + + +class _AdaptedCollectionProtocol(Protocol): + _sa_adapter: CollectionAdapter + _sa_appender: Callable[..., Any] + _sa_remover: Callable[..., Any] + _sa_iterator: Callable[..., Iterable[Any]] + _sa_converter: _CollectionConverterProtocol + + +class collection: + """Decorators for entity collection classes. + + The decorators fall into two groups: annotations and interception recipes. + + The annotating decorators (appender, remover, iterator, converter, + internally_instrumented) indicate the method's purpose and take no + arguments. They are not written with parens:: + + @collection.appender + def append(self, append): ... + + The recipe decorators all require parens, even those that take no + arguments:: + + @collection.adds("entity") + def insert(self, position, entity): ... + + + @collection.removes_return() + def popitem(self): ... + + """ + + # Bundled as a class solely for ease of use: packaging, doc strings, + # importability. + + @staticmethod + def appender(fn): + """Tag the method as the collection appender. + + The appender method is called with one positional argument: the value + to append. The method will be automatically decorated with 'adds(1)' + if not already decorated:: + + @collection.appender + def add(self, append): ... + + + # or, equivalently + @collection.appender + @collection.adds(1) + def add(self, append): ... + + + # for mapping type, an 'append' may kick out a previous value + # that occupies that slot. consider d['a'] = 'foo'- any previous + # value in d['a'] is discarded. + @collection.appender + @collection.replaces(1) + def add(self, entity): + key = some_key_func(entity) + previous = None + if key in self: + previous = self[key] + self[key] = entity + return previous + + If the value to append is not allowed in the collection, you may + raise an exception. Something to remember is that the appender + will be called for each object mapped by a database query. If the + database contains rows that violate your collection semantics, you + will need to get creative to fix the problem, as access via the + collection will not work. + + If the appender method is internally instrumented, you must also + receive the keyword argument '_sa_initiator' and ensure its + promulgation to collection events. + + """ + fn._sa_instrument_role = "appender" + return fn + + @staticmethod + def remover(fn): + """Tag the method as the collection remover. + + The remover method is called with one positional argument: the value + to remove. The method will be automatically decorated with + :meth:`removes_return` if not already decorated:: + + @collection.remover + def zap(self, entity): ... + + + # or, equivalently + @collection.remover + @collection.removes_return() + def zap(self): ... + + If the value to remove is not present in the collection, you may + raise an exception or return None to ignore the error. + + If the remove method is internally instrumented, you must also + receive the keyword argument '_sa_initiator' and ensure its + promulgation to collection events. + + """ + fn._sa_instrument_role = "remover" + return fn + + @staticmethod + def iterator(fn): + """Tag the method as the collection remover. + + The iterator method is called with no arguments. It is expected to + return an iterator over all collection members:: + + @collection.iterator + def __iter__(self): ... + + """ + fn._sa_instrument_role = "iterator" + return fn + + @staticmethod + def internally_instrumented(fn): + """Tag the method as instrumented. + + This tag will prevent any decoration from being applied to the + method. Use this if you are orchestrating your own calls to + :func:`.collection_adapter` in one of the basic SQLAlchemy + interface methods, or to prevent an automatic ABC method + decoration from wrapping your implementation:: + + # normally an 'extend' method on a list-like class would be + # automatically intercepted and re-implemented in terms of + # SQLAlchemy events and append(). your implementation will + # never be called, unless: + @collection.internally_instrumented + def extend(self, items): ... + + """ + fn._sa_instrumented = True + return fn + + @staticmethod + @util.deprecated( + "1.3", + "The :meth:`.collection.converter` handler is deprecated and will " + "be removed in a future release. Please refer to the " + ":class:`.AttributeEvents.bulk_replace` listener interface in " + "conjunction with the :func:`.event.listen` function.", + ) + def converter(fn): + """Tag the method as the collection converter. + + This optional method will be called when a collection is being + replaced entirely, as in:: + + myobj.acollection = [newvalue1, newvalue2] + + The converter method will receive the object being assigned and should + return an iterable of values suitable for use by the ``appender`` + method. A converter must not assign values or mutate the collection, + its sole job is to adapt the value the user provides into an iterable + of values for the ORM's use. + + The default converter implementation will use duck-typing to do the + conversion. A dict-like collection will be convert into an iterable + of dictionary values, and other types will simply be iterated:: + + @collection.converter + def convert(self, other): ... + + If the duck-typing of the object does not match the type of this + collection, a TypeError is raised. + + Supply an implementation of this method if you want to expand the + range of possible types that can be assigned in bulk or perform + validation on the values about to be assigned. + + """ + fn._sa_instrument_role = "converter" + return fn + + @staticmethod + def adds(arg): + """Mark the method as adding an entity to the collection. + + Adds "add to collection" handling to the method. The decorator + argument indicates which method argument holds the SQLAlchemy-relevant + value. Arguments can be specified positionally (i.e. integer) or by + name:: + + @collection.adds(1) + def push(self, item): ... + + + @collection.adds("entity") + def do_stuff(self, thing, entity=None): ... + + """ + + def decorator(fn): + fn._sa_instrument_before = ("fire_append_event", arg) + return fn + + return decorator + + @staticmethod + def replaces(arg): + """Mark the method as replacing an entity in the collection. + + Adds "add to collection" and "remove from collection" handling to + the method. The decorator argument indicates which method argument + holds the SQLAlchemy-relevant value to be added, and return value, if + any will be considered the value to remove. + + Arguments can be specified positionally (i.e. integer) or by name:: + + @collection.replaces(2) + def __setitem__(self, index, item): ... + + """ + + def decorator(fn): + fn._sa_instrument_before = ("fire_append_event", arg) + fn._sa_instrument_after = "fire_remove_event" + return fn + + return decorator + + @staticmethod + def removes(arg): + """Mark the method as removing an entity in the collection. + + Adds "remove from collection" handling to the method. The decorator + argument indicates which method argument holds the SQLAlchemy-relevant + value to be removed. Arguments can be specified positionally (i.e. + integer) or by name:: + + @collection.removes(1) + def zap(self, item): ... + + For methods where the value to remove is not known at call-time, use + collection.removes_return. + + """ + + def decorator(fn): + fn._sa_instrument_before = ("fire_remove_event", arg) + return fn + + return decorator + + @staticmethod + def removes_return(): + """Mark the method as removing an entity in the collection. + + Adds "remove from collection" handling to the method. The return + value of the method, if any, is considered the value to remove. The + method arguments are not inspected:: + + @collection.removes_return() + def pop(self): ... + + For methods where the value to remove is known at call-time, use + collection.remove. + + """ + + def decorator(fn): + fn._sa_instrument_after = "fire_remove_event" + return fn + + return decorator + + +if TYPE_CHECKING: + + def collection_adapter(collection: Collection[Any]) -> CollectionAdapter: + """Fetch the :class:`.CollectionAdapter` for a collection.""" + +else: + collection_adapter = operator.attrgetter("_sa_adapter") + + +class CollectionAdapter: + """Bridges between the ORM and arbitrary Python collections. + + Proxies base-level collection operations (append, remove, iterate) + to the underlying Python collection, and emits add/remove events for + entities entering or leaving the collection. + + The ORM uses :class:`.CollectionAdapter` exclusively for interaction with + entity collections. + + + """ + + __slots__ = ( + "attr", + "_key", + "_data", + "owner_state", + "_converter", + "invalidated", + "empty", + ) + + attr: CollectionAttributeImpl + _key: str + + # this is actually a weakref; see note in constructor + _data: Callable[..., _AdaptedCollectionProtocol] + + owner_state: InstanceState[Any] + _converter: _CollectionConverterProtocol + invalidated: bool + empty: bool + + def __init__( + self, + attr: CollectionAttributeImpl, + owner_state: InstanceState[Any], + data: _AdaptedCollectionProtocol, + ): + self.attr = attr + self._key = attr.key + + # this weakref stays referenced throughout the lifespan of + # CollectionAdapter. so while the weakref can return None, this + # is realistically only during garbage collection of this object, so + # we type this as a callable that returns _AdaptedCollectionProtocol + # in all cases. + self._data = weakref.ref(data) # type: ignore + + self.owner_state = owner_state + data._sa_adapter = self + self._converter = data._sa_converter + self.invalidated = False + self.empty = False + + def _warn_invalidated(self) -> None: + util.warn("This collection has been invalidated.") + + @property + def data(self) -> _AdaptedCollectionProtocol: + "The entity collection being adapted." + return self._data() + + @property + def _referenced_by_owner(self) -> bool: + """return True if the owner state still refers to this collection. + + This will return False within a bulk replace operation, + where this collection is the one being replaced. + + """ + return self.owner_state.dict[self._key] is self._data() + + def bulk_appender(self): + return self._data()._sa_appender + + def append_with_event( + self, item: Any, initiator: Optional[AttributeEventToken] = None + ) -> None: + """Add an entity to the collection, firing mutation events.""" + + self._data()._sa_appender(item, _sa_initiator=initiator) + + def _set_empty(self, user_data): + assert ( + not self.empty + ), "This collection adapter is already in the 'empty' state" + self.empty = True + self.owner_state._empty_collections[self._key] = user_data + + def _reset_empty(self) -> None: + assert ( + self.empty + ), "This collection adapter is not in the 'empty' state" + self.empty = False + self.owner_state.dict[self._key] = ( + self.owner_state._empty_collections.pop(self._key) + ) + + def _refuse_empty(self) -> NoReturn: + raise sa_exc.InvalidRequestError( + "This is a special 'empty' collection which cannot accommodate " + "internal mutation operations" + ) + + def append_without_event(self, item: Any) -> None: + """Add or restore an entity to the collection, firing no events.""" + + if self.empty: + self._refuse_empty() + self._data()._sa_appender(item, _sa_initiator=False) + + def append_multiple_without_event(self, items: Iterable[Any]) -> None: + """Add or restore an entity to the collection, firing no events.""" + if self.empty: + self._refuse_empty() + appender = self._data()._sa_appender + for item in items: + appender(item, _sa_initiator=False) + + def bulk_remover(self): + return self._data()._sa_remover + + def remove_with_event( + self, item: Any, initiator: Optional[AttributeEventToken] = None + ) -> None: + """Remove an entity from the collection, firing mutation events.""" + self._data()._sa_remover(item, _sa_initiator=initiator) + + def remove_without_event(self, item: Any) -> None: + """Remove an entity from the collection, firing no events.""" + if self.empty: + self._refuse_empty() + self._data()._sa_remover(item, _sa_initiator=False) + + def clear_with_event( + self, initiator: Optional[AttributeEventToken] = None + ) -> None: + """Empty the collection, firing a mutation event for each entity.""" + + if self.empty: + self._refuse_empty() + remover = self._data()._sa_remover + for item in list(self): + remover(item, _sa_initiator=initiator) + + def clear_without_event(self) -> None: + """Empty the collection, firing no events.""" + + if self.empty: + self._refuse_empty() + remover = self._data()._sa_remover + for item in list(self): + remover(item, _sa_initiator=False) + + def __iter__(self): + """Iterate over entities in the collection.""" + + return iter(self._data()._sa_iterator()) + + def __len__(self): + """Count entities in the collection.""" + return len(list(self._data()._sa_iterator())) + + def __bool__(self): + return True + + def _fire_append_wo_mutation_event_bulk( + self, items, initiator=None, key=NO_KEY + ): + if not items: + return + + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + + if self.empty: + self._reset_empty() + + for item in items: + self.attr.fire_append_wo_mutation_event( + self.owner_state, + self.owner_state.dict, + item, + initiator, + key, + ) + + def fire_append_wo_mutation_event(self, item, initiator=None, key=NO_KEY): + """Notify that a entity is entering the collection but is already + present. + + + Initiator is a token owned by the InstrumentedAttribute that + initiated the membership mutation, and should be left as None + unless you are passing along an initiator value from a chained + operation. + + .. versionadded:: 1.4.15 + + """ + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + + if self.empty: + self._reset_empty() + + return self.attr.fire_append_wo_mutation_event( + self.owner_state, self.owner_state.dict, item, initiator, key + ) + else: + return item + + def fire_append_event(self, item, initiator=None, key=NO_KEY): + """Notify that a entity has entered the collection. + + Initiator is a token owned by the InstrumentedAttribute that + initiated the membership mutation, and should be left as None + unless you are passing along an initiator value from a chained + operation. + + """ + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + + if self.empty: + self._reset_empty() + + return self.attr.fire_append_event( + self.owner_state, self.owner_state.dict, item, initiator, key + ) + else: + return item + + def _fire_remove_event_bulk(self, items, initiator=None, key=NO_KEY): + if not items: + return + + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + + if self.empty: + self._reset_empty() + + for item in items: + self.attr.fire_remove_event( + self.owner_state, + self.owner_state.dict, + item, + initiator, + key, + ) + + def fire_remove_event(self, item, initiator=None, key=NO_KEY): + """Notify that a entity has been removed from the collection. + + Initiator is the InstrumentedAttribute that initiated the membership + mutation, and should be left as None unless you are passing along + an initiator value from a chained operation. + + """ + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + + if self.empty: + self._reset_empty() + + self.attr.fire_remove_event( + self.owner_state, self.owner_state.dict, item, initiator, key + ) + + def fire_pre_remove_event(self, initiator=None, key=NO_KEY): + """Notify that an entity is about to be removed from the collection. + + Only called if the entity cannot be removed after calling + fire_remove_event(). + + """ + if self.invalidated: + self._warn_invalidated() + self.attr.fire_pre_remove_event( + self.owner_state, + self.owner_state.dict, + initiator=initiator, + key=key, + ) + + def __getstate__(self): + return { + "key": self._key, + "owner_state": self.owner_state, + "owner_cls": self.owner_state.class_, + "data": self.data, + "invalidated": self.invalidated, + "empty": self.empty, + } + + def __setstate__(self, d): + self._key = d["key"] + self.owner_state = d["owner_state"] + + # see note in constructor regarding this type: ignore + self._data = weakref.ref(d["data"]) # type: ignore + + self._converter = d["data"]._sa_converter + d["data"]._sa_adapter = self + self.invalidated = d["invalidated"] + self.attr = getattr(d["owner_cls"], self._key).impl + self.empty = d.get("empty", False) + + +def bulk_replace(values, existing_adapter, new_adapter, initiator=None): + """Load a new collection, firing events based on prior like membership. + + Appends instances in ``values`` onto the ``new_adapter``. Events will be + fired for any instance not present in the ``existing_adapter``. Any + instances in ``existing_adapter`` not present in ``values`` will have + remove events fired upon them. + + :param values: An iterable of collection member instances + + :param existing_adapter: A :class:`.CollectionAdapter` of + instances to be replaced + + :param new_adapter: An empty :class:`.CollectionAdapter` + to load with ``values`` + + + """ + + assert isinstance(values, list) + + idset = util.IdentitySet + existing_idset = idset(existing_adapter or ()) + constants = existing_idset.intersection(values or ()) + additions = idset(values or ()).difference(constants) + removals = existing_idset.difference(constants) + + appender = new_adapter.bulk_appender() + + for member in values or (): + if member in additions: + appender(member, _sa_initiator=initiator) + elif member in constants: + appender(member, _sa_initiator=False) + + if existing_adapter: + existing_adapter._fire_append_wo_mutation_event_bulk( + constants, initiator=initiator + ) + existing_adapter._fire_remove_event_bulk(removals, initiator=initiator) + + +def prepare_instrumentation( + factory: Union[Type[Collection[Any]], _CollectionFactoryType], +) -> _CollectionFactoryType: + """Prepare a callable for future use as a collection class factory. + + Given a collection class factory (either a type or no-arg callable), + return another factory that will produce compatible instances when + called. + + This function is responsible for converting collection_class=list + into the run-time behavior of collection_class=InstrumentedList. + + """ + + impl_factory: _CollectionFactoryType + + # Convert a builtin to 'Instrumented*' + if factory in __canned_instrumentation: + impl_factory = __canned_instrumentation[factory] + else: + impl_factory = cast(_CollectionFactoryType, factory) + + cls: Union[_CollectionFactoryType, Type[Collection[Any]]] + + # Create a specimen + cls = type(impl_factory()) + + # Did factory callable return a builtin? + if cls in __canned_instrumentation: + # if so, just convert. + # in previous major releases, this codepath wasn't working and was + # not covered by tests. prior to that it supplied a "wrapper" + # function that would return the class, though the rationale for this + # case is not known + impl_factory = __canned_instrumentation[cls] + cls = type(impl_factory()) + + # Instrument the class if needed. + if __instrumentation_mutex.acquire(): + try: + if getattr(cls, "_sa_instrumented", None) != id(cls): + _instrument_class(cls) + finally: + __instrumentation_mutex.release() + + return impl_factory + + +def _instrument_class(cls): + """Modify methods in a class and install instrumentation.""" + + # In the normal call flow, a request for any of the 3 basic collection + # types is transformed into one of our trivial subclasses + # (e.g. InstrumentedList). Catch anything else that sneaks in here... + if cls.__module__ == "__builtin__": + raise sa_exc.ArgumentError( + "Can not instrument a built-in type. Use a " + "subclass, even a trivial one." + ) + + roles, methods = _locate_roles_and_methods(cls) + + _setup_canned_roles(cls, roles, methods) + + _assert_required_roles(cls, roles, methods) + + _set_collection_attributes(cls, roles, methods) + + +def _locate_roles_and_methods(cls): + """search for _sa_instrument_role-decorated methods in + method resolution order, assign to roles. + + """ + + roles: Dict[str, str] = {} + methods: Dict[str, Tuple[Optional[str], Optional[int], Optional[str]]] = {} + + for supercls in cls.__mro__: + for name, method in vars(supercls).items(): + if not callable(method): + continue + + # note role declarations + if hasattr(method, "_sa_instrument_role"): + role = method._sa_instrument_role + assert role in ( + "appender", + "remover", + "iterator", + "converter", + ) + roles.setdefault(role, name) + + # transfer instrumentation requests from decorated function + # to the combined queue + before: Optional[Tuple[str, int]] = None + after: Optional[str] = None + + if hasattr(method, "_sa_instrument_before"): + op, argument = method._sa_instrument_before + assert op in ("fire_append_event", "fire_remove_event") + before = op, argument + if hasattr(method, "_sa_instrument_after"): + op = method._sa_instrument_after + assert op in ("fire_append_event", "fire_remove_event") + after = op + if before: + methods[name] = before + (after,) + elif after: + methods[name] = None, None, after + return roles, methods + + +def _setup_canned_roles(cls, roles, methods): + """see if this class has "canned" roles based on a known + collection type (dict, set, list). Apply those roles + as needed to the "roles" dictionary, and also + prepare "decorator" methods + + """ + collection_type = util.duck_type_collection(cls) + if collection_type in __interfaces: + assert collection_type is not None + canned_roles, decorators = __interfaces[collection_type] + for role, name in canned_roles.items(): + roles.setdefault(role, name) + + # apply ABC auto-decoration to methods that need it + for method, decorator in decorators.items(): + fn = getattr(cls, method, None) + if ( + fn + and method not in methods + and not hasattr(fn, "_sa_instrumented") + ): + setattr(cls, method, decorator(fn)) + + +def _assert_required_roles(cls, roles, methods): + """ensure all roles are present, and apply implicit instrumentation if + needed + + """ + if "appender" not in roles or not hasattr(cls, roles["appender"]): + raise sa_exc.ArgumentError( + "Type %s must elect an appender method to be " + "a collection class" % cls.__name__ + ) + elif roles["appender"] not in methods and not hasattr( + getattr(cls, roles["appender"]), "_sa_instrumented" + ): + methods[roles["appender"]] = ("fire_append_event", 1, None) + + if "remover" not in roles or not hasattr(cls, roles["remover"]): + raise sa_exc.ArgumentError( + "Type %s must elect a remover method to be " + "a collection class" % cls.__name__ + ) + elif roles["remover"] not in methods and not hasattr( + getattr(cls, roles["remover"]), "_sa_instrumented" + ): + methods[roles["remover"]] = ("fire_remove_event", 1, None) + + if "iterator" not in roles or not hasattr(cls, roles["iterator"]): + raise sa_exc.ArgumentError( + "Type %s must elect an iterator method to be " + "a collection class" % cls.__name__ + ) + + +def _set_collection_attributes(cls, roles, methods): + """apply ad-hoc instrumentation from decorators, class-level defaults + and implicit role declarations + + """ + for method_name, (before, argument, after) in methods.items(): + setattr( + cls, + method_name, + _instrument_membership_mutator( + getattr(cls, method_name), before, argument, after + ), + ) + # intern the role map + for role, method_name in roles.items(): + setattr(cls, "_sa_%s" % role, getattr(cls, method_name)) + + cls._sa_adapter = None + + if not hasattr(cls, "_sa_converter"): + cls._sa_converter = None + cls._sa_instrumented = id(cls) + + +def _instrument_membership_mutator(method, before, argument, after): + """Route method args and/or return value through the collection + adapter.""" + # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' + if before: + fn_args = list( + util.flatten_iterator(inspect_getfullargspec(method)[0]) + ) + if isinstance(argument, int): + pos_arg = argument + named_arg = len(fn_args) > argument and fn_args[argument] or None + else: + if argument in fn_args: + pos_arg = fn_args.index(argument) + else: + pos_arg = None + named_arg = argument + del fn_args + + def wrapper(*args, **kw): + if before: + if pos_arg is None: + if named_arg not in kw: + raise sa_exc.ArgumentError( + "Missing argument %s" % argument + ) + value = kw[named_arg] + else: + if len(args) > pos_arg: + value = args[pos_arg] + elif named_arg in kw: + value = kw[named_arg] + else: + raise sa_exc.ArgumentError( + "Missing argument %s" % argument + ) + + initiator = kw.pop("_sa_initiator", None) + if initiator is False: + executor = None + else: + executor = args[0]._sa_adapter + + if before and executor: + getattr(executor, before)(value, initiator) + + if not after or not executor: + return method(*args, **kw) + else: + res = method(*args, **kw) + if res is not None: + getattr(executor, after)(res, initiator) + return res + + wrapper._sa_instrumented = True # type: ignore[attr-defined] + if hasattr(method, "_sa_instrument_role"): + wrapper._sa_instrument_role = method._sa_instrument_role # type: ignore[attr-defined] # noqa: E501 + wrapper.__name__ = method.__name__ + wrapper.__doc__ = method.__doc__ + return wrapper + + +def __set_wo_mutation(collection, item, _sa_initiator=None): + """Run set wo mutation events. + + The collection is not mutated. + + """ + if _sa_initiator is not False: + executor = collection._sa_adapter + if executor: + executor.fire_append_wo_mutation_event( + item, _sa_initiator, key=None + ) + + +def __set(collection, item, _sa_initiator, key): + """Run set events. + + This event always occurs before the collection is actually mutated. + + """ + + if _sa_initiator is not False: + executor = collection._sa_adapter + if executor: + item = executor.fire_append_event(item, _sa_initiator, key=key) + return item + + +def __del(collection, item, _sa_initiator, key): + """Run del events. + + This event occurs before the collection is actually mutated, *except* + in the case of a pop operation, in which case it occurs afterwards. + For pop operations, the __before_pop hook is called before the + operation occurs. + + """ + if _sa_initiator is not False: + executor = collection._sa_adapter + if executor: + executor.fire_remove_event(item, _sa_initiator, key=key) + + +def __before_pop(collection, _sa_initiator=None): + """An event which occurs on a before a pop() operation occurs.""" + executor = collection._sa_adapter + if executor: + executor.fire_pre_remove_event(_sa_initiator) + + +def _list_decorators() -> Dict[str, Callable[[_FN], _FN]]: + """Tailored instrumentation wrappers for any list-like class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(list, fn.__name__).__doc__ + + def append(fn): + def append(self, item, _sa_initiator=None): + item = __set(self, item, _sa_initiator, NO_KEY) + fn(self, item) + + _tidy(append) + return append + + def remove(fn): + def remove(self, value, _sa_initiator=None): + __del(self, value, _sa_initiator, NO_KEY) + # testlib.pragma exempt:__eq__ + fn(self, value) + + _tidy(remove) + return remove + + def insert(fn): + def insert(self, index, value): + value = __set(self, value, None, index) + fn(self, index, value) + + _tidy(insert) + return insert + + def __setitem__(fn): + def __setitem__(self, index, value): + if not isinstance(index, slice): + existing = self[index] + if existing is not None: + __del(self, existing, None, index) + value = __set(self, value, None, index) + fn(self, index, value) + else: + # slice assignment requires __delitem__, insert, __len__ + step = index.step or 1 + start = index.start or 0 + if start < 0: + start += len(self) + if index.stop is not None: + stop = index.stop + else: + stop = len(self) + if stop < 0: + stop += len(self) + + if step == 1: + if value is self: + return + for i in range(start, stop, step): + if len(self) > start: + del self[start] + + for i, item in enumerate(value): + self.insert(i + start, item) + else: + rng = list(range(start, stop, step)) + if len(value) != len(rng): + raise ValueError( + "attempt to assign sequence of size %s to " + "extended slice of size %s" + % (len(value), len(rng)) + ) + for i, item in zip(rng, value): + self.__setitem__(i, item) + + _tidy(__setitem__) + return __setitem__ + + def __delitem__(fn): + def __delitem__(self, index): + if not isinstance(index, slice): + item = self[index] + __del(self, item, None, index) + fn(self, index) + else: + # slice deletion requires __getslice__ and a slice-groking + # __getitem__ for stepped deletion + # note: not breaking this into atomic dels + for item in self[index]: + __del(self, item, None, index) + fn(self, index) + + _tidy(__delitem__) + return __delitem__ + + def extend(fn): + def extend(self, iterable): + for value in list(iterable): + self.append(value) + + _tidy(extend) + return extend + + def __iadd__(fn): + def __iadd__(self, iterable): + # list.__iadd__ takes any iterable and seems to let TypeError + # raise as-is instead of returning NotImplemented + for value in list(iterable): + self.append(value) + return self + + _tidy(__iadd__) + return __iadd__ + + def pop(fn): + def pop(self, index=-1): + __before_pop(self) + item = fn(self, index) + __del(self, item, None, index) + return item + + _tidy(pop) + return pop + + def clear(fn): + def clear(self, index=-1): + for item in self: + __del(self, item, None, index) + fn(self) + + _tidy(clear) + return clear + + # __imul__ : not wrapping this. all members of the collection are already + # present, so no need to fire appends... wrapping it with an explicit + # decorator is still possible, so events on *= can be had if they're + # desired. hard to imagine a use case for __imul__, though. + + l = locals().copy() + l.pop("_tidy") + return l + + +def _dict_decorators() -> Dict[str, Callable[[_FN], _FN]]: + """Tailored instrumentation wrappers for any dict-like mapping class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(dict, fn.__name__).__doc__ + + def __setitem__(fn): + def __setitem__(self, key, value, _sa_initiator=None): + if key in self: + __del(self, self[key], _sa_initiator, key) + value = __set(self, value, _sa_initiator, key) + fn(self, key, value) + + _tidy(__setitem__) + return __setitem__ + + def __delitem__(fn): + def __delitem__(self, key, _sa_initiator=None): + if key in self: + __del(self, self[key], _sa_initiator, key) + fn(self, key) + + _tidy(__delitem__) + return __delitem__ + + def clear(fn): + def clear(self): + for key in self: + __del(self, self[key], None, key) + fn(self) + + _tidy(clear) + return clear + + def pop(fn): + def pop(self, key, default=NO_ARG): + __before_pop(self) + _to_del = key in self + if default is NO_ARG: + item = fn(self, key) + else: + item = fn(self, key, default) + if _to_del: + __del(self, item, None, key) + return item + + _tidy(pop) + return pop + + def popitem(fn): + def popitem(self): + __before_pop(self) + item = fn(self) + __del(self, item[1], None, 1) + return item + + _tidy(popitem) + return popitem + + def setdefault(fn): + def setdefault(self, key, default=None): + if key not in self: + self.__setitem__(key, default) + return default + else: + value = self.__getitem__(key) + if value is default: + __set_wo_mutation(self, value, None) + + return value + + _tidy(setdefault) + return setdefault + + def update(fn): + def update(self, __other=NO_ARG, **kw): + if __other is not NO_ARG: + if hasattr(__other, "keys"): + for key in list(__other): + if key not in self or self[key] is not __other[key]: + self[key] = __other[key] + else: + __set_wo_mutation(self, __other[key], None) + else: + for key, value in __other: + if key not in self or self[key] is not value: + self[key] = value + else: + __set_wo_mutation(self, value, None) + for key in kw: + if key not in self or self[key] is not kw[key]: + self[key] = kw[key] + else: + __set_wo_mutation(self, kw[key], None) + + _tidy(update) + return update + + l = locals().copy() + l.pop("_tidy") + return l + + +_set_binop_bases = (set, frozenset) + + +def _set_binops_check_strict(self: Any, obj: Any) -> bool: + """Allow only set, frozenset and self.__class__-derived + objects in binops.""" + return isinstance(obj, _set_binop_bases + (self.__class__,)) + + +def _set_binops_check_loose(self: Any, obj: Any) -> bool: + """Allow anything set-like to participate in set binops.""" + return ( + isinstance(obj, _set_binop_bases + (self.__class__,)) + or util.duck_type_collection(obj) == set + ) + + +def _set_decorators() -> Dict[str, Callable[[_FN], _FN]]: + """Tailored instrumentation wrappers for any set-like class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(set, fn.__name__).__doc__ + + def add(fn): + def add(self, value, _sa_initiator=None): + if value not in self: + value = __set(self, value, _sa_initiator, NO_KEY) + else: + __set_wo_mutation(self, value, _sa_initiator) + # testlib.pragma exempt:__hash__ + fn(self, value) + + _tidy(add) + return add + + def discard(fn): + def discard(self, value, _sa_initiator=None): + # testlib.pragma exempt:__hash__ + if value in self: + __del(self, value, _sa_initiator, NO_KEY) + # testlib.pragma exempt:__hash__ + fn(self, value) + + _tidy(discard) + return discard + + def remove(fn): + def remove(self, value, _sa_initiator=None): + # testlib.pragma exempt:__hash__ + if value in self: + __del(self, value, _sa_initiator, NO_KEY) + # testlib.pragma exempt:__hash__ + fn(self, value) + + _tidy(remove) + return remove + + def pop(fn): + def pop(self): + __before_pop(self) + item = fn(self) + # for set in particular, we have no way to access the item + # that will be popped before pop is called. + __del(self, item, None, NO_KEY) + return item + + _tidy(pop) + return pop + + def clear(fn): + def clear(self): + for item in list(self): + self.remove(item) + + _tidy(clear) + return clear + + def update(fn): + def update(self, value): + for item in value: + self.add(item) + + _tidy(update) + return update + + def __ior__(fn): + def __ior__(self, value): + if not _set_binops_check_strict(self, value): + return NotImplemented + for item in value: + self.add(item) + return self + + _tidy(__ior__) + return __ior__ + + def difference_update(fn): + def difference_update(self, value): + for item in value: + self.discard(item) + + _tidy(difference_update) + return difference_update + + def __isub__(fn): + def __isub__(self, value): + if not _set_binops_check_strict(self, value): + return NotImplemented + for item in value: + self.discard(item) + return self + + _tidy(__isub__) + return __isub__ + + def intersection_update(fn): + def intersection_update(self, other): + want, have = self.intersection(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + + _tidy(intersection_update) + return intersection_update + + def __iand__(fn): + def __iand__(self, other): + if not _set_binops_check_strict(self, other): + return NotImplemented + want, have = self.intersection(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + return self + + _tidy(__iand__) + return __iand__ + + def symmetric_difference_update(fn): + def symmetric_difference_update(self, other): + want, have = self.symmetric_difference(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + + _tidy(symmetric_difference_update) + return symmetric_difference_update + + def __ixor__(fn): + def __ixor__(self, other): + if not _set_binops_check_strict(self, other): + return NotImplemented + want, have = self.symmetric_difference(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + return self + + _tidy(__ixor__) + return __ixor__ + + l = locals().copy() + l.pop("_tidy") + return l + + +class InstrumentedList(List[_T]): + """An instrumented version of the built-in list.""" + + +class InstrumentedSet(Set[_T]): + """An instrumented version of the built-in set.""" + + +class InstrumentedDict(Dict[_KT, _VT]): + """An instrumented version of the built-in dict.""" + + +__canned_instrumentation: util.immutabledict[Any, _CollectionFactoryType] = ( + util.immutabledict( + { + list: InstrumentedList, + set: InstrumentedSet, + dict: InstrumentedDict, + } + ) +) + +__interfaces: util.immutabledict[ + Any, + Tuple[ + Dict[str, str], + Dict[str, Callable[..., Any]], + ], +] = util.immutabledict( + { + list: ( + { + "appender": "append", + "remover": "remove", + "iterator": "__iter__", + }, + _list_decorators(), + ), + set: ( + {"appender": "add", "remover": "remove", "iterator": "__iter__"}, + _set_decorators(), + ), + # decorators are required for dicts and object collections. + dict: ({"iterator": "values"}, _dict_decorators()), + } +) + + +def __go(lcls): + global keyfunc_mapping, mapped_collection + global column_keyed_dict, column_mapped_collection + global MappedCollection, KeyFuncDict + global attribute_keyed_dict, attribute_mapped_collection + + from .mapped_collection import keyfunc_mapping + from .mapped_collection import column_keyed_dict + from .mapped_collection import attribute_keyed_dict + from .mapped_collection import KeyFuncDict + + from .mapped_collection import mapped_collection + from .mapped_collection import column_mapped_collection + from .mapped_collection import attribute_mapped_collection + from .mapped_collection import MappedCollection + + # ensure instrumentation is associated with + # these built-in classes; if a user-defined class + # subclasses these and uses @internally_instrumented, + # the superclass is otherwise not instrumented. + # see [ticket:2406]. + _instrument_class(InstrumentedList) + _instrument_class(InstrumentedSet) + _instrument_class(KeyFuncDict) + + +__go(locals()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/context.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/context.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ed61de53f41334b42c3c6d144aec286f1b987e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/context.py @@ -0,0 +1,3334 @@ +# orm/context.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +from __future__ import annotations + +import itertools +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import interfaces +from . import loading +from .base import _is_aliased_class +from .interfaces import ORMColumnDescription +from .interfaces import ORMColumnsClauseRole +from .path_registry import PathRegistry +from .util import _entity_corresponds_to +from .util import _ORMJoin +from .util import _TraceAdaptRole +from .util import AliasedClass +from .util import Bundle +from .util import ORMAdapter +from .util import ORMStatementAdapter +from .. import exc as sa_exc +from .. import future +from .. import inspect +from .. import sql +from .. import util +from ..sql import coercions +from ..sql import expression +from ..sql import roles +from ..sql import util as sql_util +from ..sql import visitors +from ..sql._typing import _TP +from ..sql._typing import is_dml +from ..sql._typing import is_insert_update +from ..sql._typing import is_select_base +from ..sql.base import _select_iterables +from ..sql.base import CacheableOptions +from ..sql.base import CompileState +from ..sql.base import Executable +from ..sql.base import Generative +from ..sql.base import Options +from ..sql.dml import UpdateBase +from ..sql.elements import GroupedElement +from ..sql.elements import TextClause +from ..sql.selectable import CompoundSelectState +from ..sql.selectable import LABEL_STYLE_DISAMBIGUATE_ONLY +from ..sql.selectable import LABEL_STYLE_NONE +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..sql.selectable import Select +from ..sql.selectable import SelectLabelStyle +from ..sql.selectable import SelectState +from ..sql.selectable import TypedReturnsRows +from ..sql.visitors import InternalTraversal + +if TYPE_CHECKING: + from ._typing import _InternalEntityType + from ._typing import OrmExecuteOptionsParameter + from .loading import PostLoad + from .mapper import Mapper + from .query import Query + from .session import _BindArguments + from .session import Session + from ..engine import Result + from ..engine.interfaces import _CoreSingleExecuteParams + from ..sql._typing import _ColumnsClauseArgument + from ..sql.compiler import SQLCompiler + from ..sql.dml import _DMLTableElement + from ..sql.elements import ColumnElement + from ..sql.selectable import _JoinTargetElement + from ..sql.selectable import _LabelConventionCallable + from ..sql.selectable import _SetupJoinsElement + from ..sql.selectable import ExecutableReturnsRows + from ..sql.selectable import SelectBase + from ..sql.type_api import TypeEngine + +_T = TypeVar("_T", bound=Any) +_path_registry = PathRegistry.root + +_EMPTY_DICT = util.immutabledict() + + +LABEL_STYLE_LEGACY_ORM = SelectLabelStyle.LABEL_STYLE_LEGACY_ORM + + +class QueryContext: + __slots__ = ( + "top_level_context", + "compile_state", + "query", + "user_passed_query", + "params", + "load_options", + "bind_arguments", + "execution_options", + "session", + "autoflush", + "populate_existing", + "invoke_all_eagers", + "version_check", + "refresh_state", + "create_eager_joins", + "propagated_loader_options", + "attributes", + "runid", + "partials", + "post_load_paths", + "identity_token", + "yield_per", + "loaders_require_buffering", + "loaders_require_uniquing", + ) + + runid: int + post_load_paths: Dict[PathRegistry, PostLoad] + compile_state: ORMCompileState + + class default_load_options(Options): + _only_return_tuples = False + _populate_existing = False + _version_check = False + _invoke_all_eagers = True + _autoflush = True + _identity_token = None + _yield_per = None + _refresh_state = None + _lazy_loaded_from = None + _legacy_uniquing = False + _sa_top_level_orm_context = None + _is_user_refresh = False + + def __init__( + self, + compile_state: CompileState, + statement: Union[Select[Any], FromStatement[Any], UpdateBase], + user_passed_query: Union[ + Select[Any], + FromStatement[Any], + UpdateBase, + ], + params: _CoreSingleExecuteParams, + session: Session, + load_options: Union[ + Type[QueryContext.default_load_options], + QueryContext.default_load_options, + ], + execution_options: Optional[OrmExecuteOptionsParameter] = None, + bind_arguments: Optional[_BindArguments] = None, + ): + self.load_options = load_options + self.execution_options = execution_options or _EMPTY_DICT + self.bind_arguments = bind_arguments or _EMPTY_DICT + self.compile_state = compile_state + self.query = statement + + # the query that the end user passed to Session.execute() or similar. + # this is usually the same as .query, except in the bulk_persistence + # routines where a separate FromStatement is manufactured in the + # compile stage; this allows differentiation in that case. + self.user_passed_query = user_passed_query + + self.session = session + self.loaders_require_buffering = False + self.loaders_require_uniquing = False + self.params = params + self.top_level_context = load_options._sa_top_level_orm_context + + cached_options = compile_state.select_statement._with_options + uncached_options = user_passed_query._with_options + + # see issue #7447 , #8399 for some background + # propagated loader options will be present on loaded InstanceState + # objects under state.load_options and are typically used by + # LazyLoader to apply options to the SELECT statement it emits. + # For compile state options (i.e. loader strategy options), these + # need to line up with the ".load_path" attribute which in + # loader.py is pulled from context.compile_state.current_path. + # so, this means these options have to be the ones from the + # *cached* statement that's travelling with compile_state, not the + # *current* statement which won't match up for an ad-hoc + # AliasedClass + self.propagated_loader_options = tuple( + opt._adapt_cached_option_to_uncached_option(self, uncached_opt) + for opt, uncached_opt in zip(cached_options, uncached_options) + if opt.propagate_to_loaders + ) + + self.attributes = dict(compile_state.attributes) + + self.autoflush = load_options._autoflush + self.populate_existing = load_options._populate_existing + self.invoke_all_eagers = load_options._invoke_all_eagers + self.version_check = load_options._version_check + self.refresh_state = load_options._refresh_state + self.yield_per = load_options._yield_per + self.identity_token = load_options._identity_token + + def _get_top_level_context(self) -> QueryContext: + return self.top_level_context or self + + +_orm_load_exec_options = util.immutabledict( + {"_result_disable_adapt_to_context": True} +) + + +class AbstractORMCompileState(CompileState): + is_dml_returning = False + + def _init_global_attributes( + self, statement, compiler, *, toplevel, process_criteria_for_toplevel + ): + self.attributes = {} + + if compiler is None: + # this is the legacy / testing only ORM _compile_state() use case. + # there is no need to apply criteria options for this. + self.global_attributes = {} + assert toplevel + return + else: + self.global_attributes = ga = compiler._global_attributes + + if toplevel: + ga["toplevel_orm"] = True + + if process_criteria_for_toplevel: + for opt in statement._with_options: + if opt._is_criteria_option: + opt.process_compile_state(self) + + return + elif ga.get("toplevel_orm", False): + return + + stack_0 = compiler.stack[0] + + try: + toplevel_stmt = stack_0["selectable"] + except KeyError: + pass + else: + for opt in toplevel_stmt._with_options: + if opt._is_compile_state and opt._is_criteria_option: + opt.process_compile_state(self) + + ga["toplevel_orm"] = True + + @classmethod + def create_for_statement( + cls, + statement: Executable, + compiler: SQLCompiler, + **kw: Any, + ) -> CompileState: + """Create a context for a statement given a :class:`.Compiler`. + + This method is always invoked in the context of SQLCompiler.process(). + + For a Select object, this would be invoked from + SQLCompiler.visit_select(). For the special FromStatement object used + by Query to indicate "Query.from_statement()", this is called by + FromStatement._compiler_dispatch() that would be called by + SQLCompiler.process(). + """ + return super().create_for_statement(statement, compiler, **kw) + + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_pre_event, + ): + raise NotImplementedError() + + @classmethod + def orm_execute_statement( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + conn, + ) -> Result: + result = conn.execute( + statement, params or {}, execution_options=execution_options + ) + return cls.orm_setup_cursor_result( + session, + statement, + params, + execution_options, + bind_arguments, + result, + ) + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + raise NotImplementedError() + + +class AutoflushOnlyORMCompileState(AbstractORMCompileState): + """ORM compile state that is a passthrough, except for autoflush.""" + + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_pre_event, + ): + # consume result-level load_options. These may have been set up + # in an ORMExecuteState hook + ( + load_options, + execution_options, + ) = QueryContext.default_load_options.from_execution_options( + "_sa_orm_load_options", + { + "autoflush", + }, + execution_options, + statement._execution_options, + ) + + if not is_pre_event and load_options._autoflush: + session._autoflush() + + return statement, execution_options + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + return result + + +class ORMCompileState(AbstractORMCompileState): + class default_compile_options(CacheableOptions): + _cache_key_traversal = [ + ("_use_legacy_query_style", InternalTraversal.dp_boolean), + ("_for_statement", InternalTraversal.dp_boolean), + ("_bake_ok", InternalTraversal.dp_boolean), + ("_current_path", InternalTraversal.dp_has_cache_key), + ("_enable_single_crit", InternalTraversal.dp_boolean), + ("_enable_eagerloads", InternalTraversal.dp_boolean), + ("_only_load_props", InternalTraversal.dp_plain_obj), + ("_set_base_alias", InternalTraversal.dp_boolean), + ("_for_refresh_state", InternalTraversal.dp_boolean), + ("_render_for_subquery", InternalTraversal.dp_boolean), + ("_is_star", InternalTraversal.dp_boolean), + ] + + # set to True by default from Query._statement_20(), to indicate + # the rendered query should look like a legacy ORM query. right + # now this basically indicates we should use tablename_columnname + # style labels. Generally indicates the statement originated + # from a Query object. + _use_legacy_query_style = False + + # set *only* when we are coming from the Query.statement + # accessor, or a Query-level equivalent such as + # query.subquery(). this supersedes "toplevel". + _for_statement = False + + _bake_ok = True + _current_path = _path_registry + _enable_single_crit = True + _enable_eagerloads = True + _only_load_props = None + _set_base_alias = False + _for_refresh_state = False + _render_for_subquery = False + _is_star = False + + attributes: Dict[Any, Any] + global_attributes: Dict[Any, Any] + + statement: Union[Select[Any], FromStatement[Any], UpdateBase] + select_statement: Union[Select[Any], FromStatement[Any], UpdateBase] + _entities: List[_QueryEntity] + _polymorphic_adapters: Dict[_InternalEntityType, ORMAdapter] + compile_options: Union[ + Type[default_compile_options], default_compile_options + ] + _primary_entity: Optional[_QueryEntity] + use_legacy_query_style: bool + _label_convention: _LabelConventionCallable + primary_columns: List[ColumnElement[Any]] + secondary_columns: List[ColumnElement[Any]] + dedupe_columns: Set[ColumnElement[Any]] + create_eager_joins: List[ + # TODO: this structure is set up by JoinedLoader + Tuple[Any, ...] + ] + current_path: PathRegistry = _path_registry + _has_mapper_entities = False + + def __init__(self, *arg, **kw): + raise NotImplementedError() + + @classmethod + def create_for_statement( + cls, + statement: Executable, + compiler: SQLCompiler, + **kw: Any, + ) -> ORMCompileState: + return cls._create_orm_context( + cast("Union[Select, FromStatement]", statement), + toplevel=not compiler.stack, + compiler=compiler, + **kw, + ) + + @classmethod + def _create_orm_context( + cls, + statement: Union[Select, FromStatement], + *, + toplevel: bool, + compiler: Optional[SQLCompiler], + **kw: Any, + ) -> ORMCompileState: + raise NotImplementedError() + + def _append_dedupe_col_collection(self, obj, col_collection): + dedupe = self.dedupe_columns + if obj not in dedupe: + dedupe.add(obj) + col_collection.append(obj) + + @classmethod + def _column_naming_convention( + cls, label_style: SelectLabelStyle, legacy: bool + ) -> _LabelConventionCallable: + if legacy: + + def name(col, col_name=None): + if col_name: + return col_name + else: + return getattr(col, "key") + + return name + else: + return SelectState._column_naming_convention(label_style) + + @classmethod + def get_column_descriptions(cls, statement): + return _column_descriptions(statement) + + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_pre_event, + ): + # consume result-level load_options. These may have been set up + # in an ORMExecuteState hook + ( + load_options, + execution_options, + ) = QueryContext.default_load_options.from_execution_options( + "_sa_orm_load_options", + { + "populate_existing", + "autoflush", + "yield_per", + "identity_token", + "sa_top_level_orm_context", + }, + execution_options, + statement._execution_options, + ) + + # default execution options for ORM results: + # 1. _result_disable_adapt_to_context=True + # this will disable the ResultSetMetadata._adapt_to_context() + # step which we don't need, as we have result processors cached + # against the original SELECT statement before caching. + + if "sa_top_level_orm_context" in execution_options: + ctx = execution_options["sa_top_level_orm_context"] + execution_options = ctx.query._execution_options.merge_with( + ctx.execution_options, execution_options + ) + + if not execution_options: + execution_options = _orm_load_exec_options + else: + execution_options = execution_options.union(_orm_load_exec_options) + + # would have been placed here by legacy Query only + if load_options._yield_per: + execution_options = execution_options.union( + {"yield_per": load_options._yield_per} + ) + + if ( + getattr(statement._compile_options, "_current_path", None) + and len(statement._compile_options._current_path) > 10 + and execution_options.get("compiled_cache", True) is not None + ): + execution_options: util.immutabledict[str, Any] = ( + execution_options.union( + { + "compiled_cache": None, + "_cache_disable_reason": "excess depth for " + "ORM loader options", + } + ) + ) + + bind_arguments["clause"] = statement + + # new in 1.4 - the coercions system is leveraged to allow the + # "subject" mapper of a statement be propagated to the top + # as the statement is built. "subject" mapper is the generally + # standard object used as an identifier for multi-database schemes. + + # we are here based on the fact that _propagate_attrs contains + # "compile_state_plugin": "orm". The "plugin_subject" + # needs to be present as well. + + try: + plugin_subject = statement._propagate_attrs["plugin_subject"] + except KeyError: + assert False, "statement had 'orm' plugin but no plugin_subject" + else: + if plugin_subject: + bind_arguments["mapper"] = plugin_subject.mapper + + if not is_pre_event and load_options._autoflush: + session._autoflush() + + return statement, execution_options + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + execution_context = result.context + compile_state = execution_context.compiled.compile_state + + # cover edge case where ORM entities used in legacy select + # were passed to session.execute: + # session.execute(legacy_select([User.id, User.name])) + # see test_query->test_legacy_tuple_old_select + + load_options = execution_options.get( + "_sa_orm_load_options", QueryContext.default_load_options + ) + + if compile_state.compile_options._is_star: + return result + + querycontext = QueryContext( + compile_state, + statement, + statement, + params, + session, + load_options, + execution_options, + bind_arguments, + ) + return loading.instances(result, querycontext) + + @property + def _lead_mapper_entities(self): + """return all _MapperEntity objects in the lead entities collection. + + Does **not** include entities that have been replaced by + with_entities(), with_only_columns() + + """ + return [ + ent for ent in self._entities if isinstance(ent, _MapperEntity) + ] + + def _create_with_polymorphic_adapter(self, ext_info, selectable): + """given MapperEntity or ORMColumnEntity, setup polymorphic loading + if called for by the Mapper. + + As of #8168 in 2.0.0rc1, polymorphic adapters, which greatly increase + the complexity of the query creation process, are not used at all + except in the quasi-legacy cases of with_polymorphic referring to an + alias and/or subquery. This would apply to concrete polymorphic + loading, and joined inheritance where a subquery is + passed to with_polymorphic (which is completely unnecessary in modern + use). + + """ + if ( + not ext_info.is_aliased_class + and ext_info.mapper.persist_selectable + not in self._polymorphic_adapters + ): + for mp in ext_info.mapper.iterate_to_root(): + self._mapper_loads_polymorphically_with( + mp, + ORMAdapter( + _TraceAdaptRole.WITH_POLYMORPHIC_ADAPTER, + mp, + equivalents=mp._equivalent_columns, + selectable=selectable, + ), + ) + + def _mapper_loads_polymorphically_with(self, mapper, adapter): + for m2 in mapper._with_polymorphic_mappers or [mapper]: + self._polymorphic_adapters[m2] = adapter + + for m in m2.iterate_to_root(): + self._polymorphic_adapters[m.local_table] = adapter + + @classmethod + def _create_entities_collection(cls, query, legacy): + raise NotImplementedError( + "this method only works for ORMSelectCompileState" + ) + + +class _DMLReturningColFilter: + """a base for an adapter used for the DML RETURNING cases + + Has a subset of the interface used by + :class:`.ORMAdapter` and is used for :class:`._QueryEntity` + instances to set up their columns as used in RETURNING for a + DML statement. + + """ + + __slots__ = ("mapper", "columns", "__weakref__") + + def __init__(self, target_mapper, immediate_dml_mapper): + if ( + immediate_dml_mapper is not None + and target_mapper.local_table + is not immediate_dml_mapper.local_table + ): + # joined inh, or in theory other kinds of multi-table mappings + self.mapper = immediate_dml_mapper + else: + # single inh, normal mappings, etc. + self.mapper = target_mapper + self.columns = self.columns = util.WeakPopulateDict( + self.adapt_check_present # type: ignore + ) + + def __call__(self, col, as_filter): + for cc in sql_util._find_columns(col): + c2 = self.adapt_check_present(cc) + if c2 is not None: + return col + else: + return None + + def adapt_check_present(self, col): + raise NotImplementedError() + + +class _DMLBulkInsertReturningColFilter(_DMLReturningColFilter): + """an adapter used for the DML RETURNING case specifically + for ORM bulk insert (or any hypothetical DML that is splitting out a class + hierarchy among multiple DML statements....ORM bulk insert is the only + example right now) + + its main job is to limit the columns in a RETURNING to only a specific + mapped table in a hierarchy. + + """ + + def adapt_check_present(self, col): + mapper = self.mapper + prop = mapper._columntoproperty.get(col, None) + if prop is None: + return None + return mapper.local_table.c.corresponding_column(col) + + +class _DMLUpdateDeleteReturningColFilter(_DMLReturningColFilter): + """an adapter used for the DML RETURNING case specifically + for ORM enabled UPDATE/DELETE + + its main job is to limit the columns in a RETURNING to include + only direct persisted columns from the immediate selectable, not + expressions like column_property(), or to also allow columns from other + mappers for the UPDATE..FROM use case. + + """ + + def adapt_check_present(self, col): + mapper = self.mapper + prop = mapper._columntoproperty.get(col, None) + if prop is not None: + # if the col is from the immediate mapper, only return a persisted + # column, not any kind of column_property expression + return mapper.persist_selectable.c.corresponding_column(col) + + # if the col is from some other mapper, just return it, assume the + # user knows what they are doing + return col + + +@sql.base.CompileState.plugin_for("orm", "orm_from_statement") +class ORMFromStatementCompileState(ORMCompileState): + _from_obj_alias = None + _has_mapper_entities = False + + statement_container: FromStatement + requested_statement: Union[SelectBase, TextClause, UpdateBase] + dml_table: Optional[_DMLTableElement] = None + + _has_orm_entities = False + multi_row_eager_loaders = False + eager_adding_joins = False + compound_eager_adapter = None + + extra_criteria_entities = _EMPTY_DICT + eager_joins = _EMPTY_DICT + + @classmethod + def _create_orm_context( + cls, + statement: Union[Select, FromStatement], + *, + toplevel: bool, + compiler: Optional[SQLCompiler], + **kw: Any, + ) -> ORMFromStatementCompileState: + statement_container = statement + + assert isinstance(statement_container, FromStatement) + + if compiler is not None and compiler.stack: + raise sa_exc.CompileError( + "The ORM FromStatement construct only supports being " + "invoked as the topmost statement, as it is only intended to " + "define how result rows should be returned." + ) + + self = cls.__new__(cls) + self._primary_entity = None + + self.use_legacy_query_style = ( + statement_container._compile_options._use_legacy_query_style + ) + self.statement_container = self.select_statement = statement_container + self.requested_statement = statement = statement_container.element + + if statement.is_dml: + self.dml_table = statement.table + self.is_dml_returning = True + + self._entities = [] + self._polymorphic_adapters = {} + + self.compile_options = statement_container._compile_options + + if ( + self.use_legacy_query_style + and isinstance(statement, expression.SelectBase) + and not statement._is_textual + and not statement.is_dml + and statement._label_style is LABEL_STYLE_NONE + ): + self.statement = statement.set_label_style( + LABEL_STYLE_TABLENAME_PLUS_COL + ) + else: + self.statement = statement + + self._label_convention = self._column_naming_convention( + ( + statement._label_style + if not statement._is_textual and not statement.is_dml + else LABEL_STYLE_NONE + ), + self.use_legacy_query_style, + ) + + _QueryEntity.to_compile_state( + self, + statement_container._raw_columns, + self._entities, + is_current_entities=True, + ) + + self.current_path = statement_container._compile_options._current_path + + self._init_global_attributes( + statement_container, + compiler, + process_criteria_for_toplevel=False, + toplevel=True, + ) + + if statement_container._with_options: + for opt in statement_container._with_options: + if opt._is_compile_state: + opt.process_compile_state(self) + + if statement_container._with_context_options: + for fn, key in statement_container._with_context_options: + fn(self) + + self.primary_columns = [] + self.secondary_columns = [] + self.dedupe_columns = set() + self.create_eager_joins = [] + self._fallback_from_clauses = [] + + self.order_by = None + + if isinstance(self.statement, expression.TextClause): + # TextClause has no "column" objects at all. for this case, + # we generate columns from our _QueryEntity objects, then + # flip on all the "please match no matter what" parameters. + self.extra_criteria_entities = {} + + for entity in self._entities: + entity.setup_compile_state(self) + + compiler._ordered_columns = compiler._textual_ordered_columns = ( + False + ) + + # enable looser result column matching. this is shown to be + # needed by test_query.py::TextTest + compiler._loose_column_name_matching = True + + for c in self.primary_columns: + compiler.process( + c, + within_columns_clause=True, + add_to_result_map=compiler._add_to_result_map, + ) + else: + # for everyone else, Select, Insert, Update, TextualSelect, they + # have column objects already. After much + # experimentation here, the best approach seems to be, use + # those columns completely, don't interfere with the compiler + # at all; just in ORM land, use an adapter to convert from + # our ORM columns to whatever columns are in the statement, + # before we look in the result row. Adapt on names + # to accept cases such as issue #9217, however also allow + # this to be overridden for cases such as #9273. + self._from_obj_alias = ORMStatementAdapter( + _TraceAdaptRole.ADAPT_FROM_STATEMENT, + self.statement, + adapt_on_names=statement_container._adapt_on_names, + ) + + return self + + def _adapt_col_list(self, cols, current_adapter): + return cols + + def _get_current_adapter(self): + return None + + def setup_dml_returning_compile_state(self, dml_mapper): + """used by BulkORMInsert, Update, Delete to set up a handler + for RETURNING to return ORM objects and expressions + + """ + target_mapper = self.statement._propagate_attrs.get( + "plugin_subject", None + ) + + if self.statement.is_insert: + adapter = _DMLBulkInsertReturningColFilter( + target_mapper, dml_mapper + ) + elif self.statement.is_update or self.statement.is_delete: + adapter = _DMLUpdateDeleteReturningColFilter( + target_mapper, dml_mapper + ) + else: + adapter = None + + if self.compile_options._is_star and (len(self._entities) != 1): + raise sa_exc.CompileError( + "Can't generate ORM query that includes multiple expressions " + "at the same time as '*'; query for '*' alone if present" + ) + + for entity in self._entities: + entity.setup_dml_returning_compile_state(self, adapter) + + +class FromStatement(GroupedElement, Generative, TypedReturnsRows[_TP]): + """Core construct that represents a load of ORM objects from various + :class:`.ReturnsRows` and other classes including: + + :class:`.Select`, :class:`.TextClause`, :class:`.TextualSelect`, + :class:`.CompoundSelect`, :class`.Insert`, :class:`.Update`, + and in theory, :class:`.Delete`. + + """ + + __visit_name__ = "orm_from_statement" + + _compile_options = ORMFromStatementCompileState.default_compile_options + + _compile_state_factory = ORMFromStatementCompileState.create_for_statement + + _for_update_arg = None + + element: Union[ExecutableReturnsRows, TextClause] + + _adapt_on_names: bool + + _traverse_internals = [ + ("_raw_columns", InternalTraversal.dp_clauseelement_list), + ("element", InternalTraversal.dp_clauseelement), + ] + Executable._executable_traverse_internals + + _cache_key_traversal = _traverse_internals + [ + ("_compile_options", InternalTraversal.dp_has_cache_key) + ] + + is_from_statement = True + + def __init__( + self, + entities: Iterable[_ColumnsClauseArgument[Any]], + element: Union[ExecutableReturnsRows, TextClause], + _adapt_on_names: bool = True, + ): + self._raw_columns = [ + coercions.expect( + roles.ColumnsClauseRole, + ent, + apply_propagate_attrs=self, + post_inspect=True, + ) + for ent in util.to_list(entities) + ] + self.element = element + self.is_dml = element.is_dml + self.is_select = element.is_select + self.is_delete = element.is_delete + self.is_insert = element.is_insert + self.is_update = element.is_update + self._label_style = ( + element._label_style if is_select_base(element) else None + ) + self._adapt_on_names = _adapt_on_names + + def _compiler_dispatch(self, compiler, **kw): + """provide a fixed _compiler_dispatch method. + + This is roughly similar to using the sqlalchemy.ext.compiler + ``@compiles`` extension. + + """ + + compile_state = self._compile_state_factory(self, compiler, **kw) + + toplevel = not compiler.stack + + if toplevel: + compiler.compile_state = compile_state + + return compiler.process(compile_state.statement, **kw) + + @property + def column_descriptions(self): + """Return a :term:`plugin-enabled` 'column descriptions' structure + referring to the columns which are SELECTed by this statement. + + See the section :ref:`queryguide_inspection` for an overview + of this feature. + + .. seealso:: + + :ref:`queryguide_inspection` - ORM background + + """ + meth = cast( + ORMSelectCompileState, SelectState.get_plugin_class(self) + ).get_column_descriptions + return meth(self) + + def _ensure_disambiguated_names(self): + return self + + def get_children(self, **kw): + yield from itertools.chain.from_iterable( + element._from_objects for element in self._raw_columns + ) + yield from super().get_children(**kw) + + @property + def _all_selected_columns(self): + return self.element._all_selected_columns + + @property + def _return_defaults(self): + return self.element._return_defaults if is_dml(self.element) else None + + @property + def _returning(self): + return self.element._returning if is_dml(self.element) else None + + @property + def _inline(self): + return self.element._inline if is_insert_update(self.element) else None + + +@sql.base.CompileState.plugin_for("orm", "compound_select") +class CompoundSelectCompileState( + AutoflushOnlyORMCompileState, CompoundSelectState +): + pass + + +@sql.base.CompileState.plugin_for("orm", "select") +class ORMSelectCompileState(ORMCompileState, SelectState): + _already_joined_edges = () + + _memoized_entities = _EMPTY_DICT + + _from_obj_alias = None + _has_mapper_entities = False + + _has_orm_entities = False + multi_row_eager_loaders = False + eager_adding_joins = False + compound_eager_adapter = None + + correlate = None + correlate_except = None + _where_criteria = () + _having_criteria = () + + @classmethod + def _create_orm_context( + cls, + statement: Union[Select, FromStatement], + *, + toplevel: bool, + compiler: Optional[SQLCompiler], + **kw: Any, + ) -> ORMSelectCompileState: + + self = cls.__new__(cls) + + select_statement = statement + + # if we are a select() that was never a legacy Query, we won't + # have ORM level compile options. + statement._compile_options = cls.default_compile_options.safe_merge( + statement._compile_options + ) + + if select_statement._execution_options: + # execution options should not impact the compilation of a + # query, and at the moment subqueryloader is putting some things + # in here that we explicitly don't want stuck in a cache. + self.select_statement = select_statement._clone() + self.select_statement._execution_options = util.immutabledict() + else: + self.select_statement = select_statement + + # indicates this select() came from Query.statement + self.for_statement = select_statement._compile_options._for_statement + + # generally if we are from Query or directly from a select() + self.use_legacy_query_style = ( + select_statement._compile_options._use_legacy_query_style + ) + + self._entities = [] + self._primary_entity = None + self._polymorphic_adapters = {} + + self.compile_options = select_statement._compile_options + + if not toplevel: + # for subqueries, turn off eagerloads and set + # "render_for_subquery". + self.compile_options += { + "_enable_eagerloads": False, + "_render_for_subquery": True, + } + + # determine label style. we can make different decisions here. + # at the moment, trying to see if we can always use DISAMBIGUATE_ONLY + # rather than LABEL_STYLE_NONE, and if we can use disambiguate style + # for new style ORM selects too. + if ( + self.use_legacy_query_style + and self.select_statement._label_style is LABEL_STYLE_LEGACY_ORM + ): + if not self.for_statement: + self.label_style = LABEL_STYLE_TABLENAME_PLUS_COL + else: + self.label_style = LABEL_STYLE_DISAMBIGUATE_ONLY + else: + self.label_style = self.select_statement._label_style + + if select_statement._memoized_select_entities: + self._memoized_entities = { + memoized_entities: _QueryEntity.to_compile_state( + self, + memoized_entities._raw_columns, + [], + is_current_entities=False, + ) + for memoized_entities in ( + select_statement._memoized_select_entities + ) + } + + # label_convention is stateful and will yield deduping keys if it + # sees the same key twice. therefore it's important that it is not + # invoked for the above "memoized" entities that aren't actually + # in the columns clause + self._label_convention = self._column_naming_convention( + statement._label_style, self.use_legacy_query_style + ) + + _QueryEntity.to_compile_state( + self, + select_statement._raw_columns, + self._entities, + is_current_entities=True, + ) + + self.current_path = select_statement._compile_options._current_path + + self.eager_order_by = () + + self._init_global_attributes( + select_statement, + compiler, + toplevel=toplevel, + process_criteria_for_toplevel=False, + ) + + if toplevel and ( + select_statement._with_options + or select_statement._memoized_select_entities + ): + for ( + memoized_entities + ) in select_statement._memoized_select_entities: + for opt in memoized_entities._with_options: + if opt._is_compile_state: + opt.process_compile_state_replaced_entities( + self, + [ + ent + for ent in self._memoized_entities[ + memoized_entities + ] + if isinstance(ent, _MapperEntity) + ], + ) + + for opt in self.select_statement._with_options: + if opt._is_compile_state: + opt.process_compile_state(self) + + # uncomment to print out the context.attributes structure + # after it's been set up above + # self._dump_option_struct() + + if select_statement._with_context_options: + for fn, key in select_statement._with_context_options: + fn(self) + + self.primary_columns = [] + self.secondary_columns = [] + self.dedupe_columns = set() + self.eager_joins = {} + self.extra_criteria_entities = {} + self.create_eager_joins = [] + self._fallback_from_clauses = [] + + # normalize the FROM clauses early by themselves, as this makes + # it an easier job when we need to assemble a JOIN onto these, + # for select.join() as well as joinedload(). As of 1.4 there are now + # potentially more complex sets of FROM objects here as the use + # of lambda statements for lazyload, load_on_pk etc. uses more + # cloning of the select() construct. See #6495 + self.from_clauses = self._normalize_froms( + info.selectable for info in select_statement._from_obj + ) + + # this is a fairly arbitrary break into a second method, + # so it might be nicer to break up create_for_statement() + # and _setup_for_generate into three or four logical sections + self._setup_for_generate() + + SelectState.__init__(self, self.statement, compiler, **kw) + return self + + def _dump_option_struct(self): + print("\n---------------------------------------------------\n") + print(f"current path: {self.current_path}") + for key in self.attributes: + if isinstance(key, tuple) and key[0] == "loader": + print(f"\nLoader: {PathRegistry.coerce(key[1])}") + print(f" {self.attributes[key]}") + print(f" {self.attributes[key].__dict__}") + elif isinstance(key, tuple) and key[0] == "path_with_polymorphic": + print(f"\nWith Polymorphic: {PathRegistry.coerce(key[1])}") + print(f" {self.attributes[key]}") + + def _setup_for_generate(self): + query = self.select_statement + + self.statement = None + self._join_entities = () + + if self.compile_options._set_base_alias: + # legacy Query only + self._set_select_from_alias() + + for memoized_entities in query._memoized_select_entities: + if memoized_entities._setup_joins: + self._join( + memoized_entities._setup_joins, + self._memoized_entities[memoized_entities], + ) + + if query._setup_joins: + self._join(query._setup_joins, self._entities) + + current_adapter = self._get_current_adapter() + + if query._where_criteria: + self._where_criteria = query._where_criteria + + if current_adapter: + self._where_criteria = tuple( + current_adapter(crit, True) + for crit in self._where_criteria + ) + + # TODO: some complexity with order_by here was due to mapper.order_by. + # now that this is removed we can hopefully make order_by / + # group_by act identically to how they are in Core select. + self.order_by = ( + self._adapt_col_list(query._order_by_clauses, current_adapter) + if current_adapter and query._order_by_clauses not in (None, False) + else query._order_by_clauses + ) + + if query._having_criteria: + self._having_criteria = tuple( + current_adapter(crit, True) if current_adapter else crit + for crit in query._having_criteria + ) + + self.group_by = ( + self._adapt_col_list( + util.flatten_iterator(query._group_by_clauses), current_adapter + ) + if current_adapter and query._group_by_clauses not in (None, False) + else query._group_by_clauses or None + ) + + if self.eager_order_by: + adapter = self.from_clauses[0]._target_adapter + self.eager_order_by = adapter.copy_and_process(self.eager_order_by) + + if query._distinct_on: + self.distinct_on = self._adapt_col_list( + query._distinct_on, current_adapter + ) + else: + self.distinct_on = () + + self.distinct = query._distinct + + if query._correlate: + # ORM mapped entities that are mapped to joins can be passed + # to .correlate, so here they are broken into their component + # tables. + self.correlate = tuple( + util.flatten_iterator( + sql_util.surface_selectables(s) if s is not None else None + for s in query._correlate + ) + ) + elif query._correlate_except is not None: + self.correlate_except = tuple( + util.flatten_iterator( + sql_util.surface_selectables(s) if s is not None else None + for s in query._correlate_except + ) + ) + elif not query._auto_correlate: + self.correlate = (None,) + + # PART II + + self._for_update_arg = query._for_update_arg + + if self.compile_options._is_star and (len(self._entities) != 1): + raise sa_exc.CompileError( + "Can't generate ORM query that includes multiple expressions " + "at the same time as '*'; query for '*' alone if present" + ) + for entity in self._entities: + entity.setup_compile_state(self) + + for rec in self.create_eager_joins: + strategy = rec[0] + strategy(self, *rec[1:]) + + # else "load from discrete FROMs" mode, + # i.e. when each _MappedEntity has its own FROM + + if self.compile_options._enable_single_crit: + self._adjust_for_extra_criteria() + + if not self.primary_columns: + if self.compile_options._only_load_props: + assert False, "no columns were included in _only_load_props" + + raise sa_exc.InvalidRequestError( + "Query contains no columns with which to SELECT from." + ) + + if not self.from_clauses: + self.from_clauses = list(self._fallback_from_clauses) + + if self.order_by is False: + self.order_by = None + + if ( + self.multi_row_eager_loaders + and self.eager_adding_joins + and self._should_nest_selectable + ): + self.statement = self._compound_eager_statement() + else: + self.statement = self._simple_statement() + + if self.for_statement: + ezero = self._mapper_zero() + if ezero is not None: + # TODO: this goes away once we get rid of the deep entity + # thing + self.statement = self.statement._annotate( + {"deepentity": ezero} + ) + + @classmethod + def _create_entities_collection(cls, query, legacy): + """Creates a partial ORMSelectCompileState that includes + the full collection of _MapperEntity and other _QueryEntity objects. + + Supports a few remaining use cases that are pre-compilation + but still need to gather some of the column / adaption information. + + """ + self = cls.__new__(cls) + + self._entities = [] + self._primary_entity = None + self._polymorphic_adapters = {} + + self._label_convention = self._column_naming_convention( + query._label_style, legacy + ) + + # entities will also set up polymorphic adapters for mappers + # that have with_polymorphic configured + _QueryEntity.to_compile_state( + self, query._raw_columns, self._entities, is_current_entities=True + ) + return self + + @classmethod + def determine_last_joined_entity(cls, statement): + setup_joins = statement._setup_joins + + return _determine_last_joined_entity(setup_joins, None) + + @classmethod + def all_selected_columns(cls, statement): + for element in statement._raw_columns: + if ( + element.is_selectable + and "entity_namespace" in element._annotations + ): + ens = element._annotations["entity_namespace"] + if not ens.is_mapper and not ens.is_aliased_class: + yield from _select_iterables([element]) + else: + yield from _select_iterables(ens._all_column_expressions) + else: + yield from _select_iterables([element]) + + @classmethod + def get_columns_clause_froms(cls, statement): + return cls._normalize_froms( + itertools.chain.from_iterable( + ( + element._from_objects + if "parententity" not in element._annotations + else [ + element._annotations[ + "parententity" + ].__clause_element__() + ] + ) + for element in statement._raw_columns + ) + ) + + @classmethod + def from_statement(cls, statement, from_statement): + from_statement = coercions.expect( + roles.ReturnsRowsRole, + from_statement, + apply_propagate_attrs=statement, + ) + + stmt = FromStatement(statement._raw_columns, from_statement) + + stmt.__dict__.update( + _with_options=statement._with_options, + _with_context_options=statement._with_context_options, + _execution_options=statement._execution_options, + _propagate_attrs=statement._propagate_attrs, + ) + return stmt + + def _set_select_from_alias(self): + """used only for legacy Query cases""" + + query = self.select_statement # query + + assert self.compile_options._set_base_alias + assert len(query._from_obj) == 1 + + adapter = self._get_select_from_alias_from_obj(query._from_obj[0]) + if adapter: + self.compile_options += {"_enable_single_crit": False} + self._from_obj_alias = adapter + + def _get_select_from_alias_from_obj(self, from_obj): + """used only for legacy Query cases""" + + info = from_obj + + if "parententity" in info._annotations: + info = info._annotations["parententity"] + + if hasattr(info, "mapper"): + if not info.is_aliased_class: + raise sa_exc.ArgumentError( + "A selectable (FromClause) instance is " + "expected when the base alias is being set." + ) + else: + return info._adapter + + elif isinstance(info.selectable, sql.selectable.AliasedReturnsRows): + equivs = self._all_equivs() + assert info is info.selectable + return ORMStatementAdapter( + _TraceAdaptRole.LEGACY_SELECT_FROM_ALIAS, + info.selectable, + equivalents=equivs, + ) + else: + return None + + def _mapper_zero(self): + """return the Mapper associated with the first QueryEntity.""" + return self._entities[0].mapper + + def _entity_zero(self): + """Return the 'entity' (mapper or AliasedClass) associated + with the first QueryEntity, or alternatively the 'select from' + entity if specified.""" + + for ent in self.from_clauses: + if "parententity" in ent._annotations: + return ent._annotations["parententity"] + for qent in self._entities: + if qent.entity_zero: + return qent.entity_zero + + return None + + def _only_full_mapper_zero(self, methname): + if self._entities != [self._primary_entity]: + raise sa_exc.InvalidRequestError( + "%s() can only be used against " + "a single mapped class." % methname + ) + return self._primary_entity.entity_zero + + def _only_entity_zero(self, rationale=None): + if len(self._entities) > 1: + raise sa_exc.InvalidRequestError( + rationale + or "This operation requires a Query " + "against a single mapper." + ) + return self._entity_zero() + + def _all_equivs(self): + equivs = {} + + for memoized_entities in self._memoized_entities.values(): + for ent in [ + ent + for ent in memoized_entities + if isinstance(ent, _MapperEntity) + ]: + equivs.update(ent.mapper._equivalent_columns) + + for ent in [ + ent for ent in self._entities if isinstance(ent, _MapperEntity) + ]: + equivs.update(ent.mapper._equivalent_columns) + return equivs + + def _compound_eager_statement(self): + # for eager joins present and LIMIT/OFFSET/DISTINCT, + # wrap the query inside a select, + # then append eager joins onto that + + if self.order_by: + # the default coercion for ORDER BY is now the OrderByRole, + # which adds an additional post coercion to ByOfRole in that + # elements are converted into label references. For the + # eager load / subquery wrapping case, we need to un-coerce + # the original expressions outside of the label references + # in order to have them render. + unwrapped_order_by = [ + ( + elem.element + if isinstance(elem, sql.elements._label_reference) + else elem + ) + for elem in self.order_by + ] + + order_by_col_expr = sql_util.expand_column_list_from_order_by( + self.primary_columns, unwrapped_order_by + ) + else: + order_by_col_expr = [] + unwrapped_order_by = None + + # put FOR UPDATE on the inner query, where MySQL will honor it, + # as well as if it has an OF so PostgreSQL can use it. + inner = self._select_statement( + self.primary_columns + + [c for c in order_by_col_expr if c not in self.dedupe_columns], + self.from_clauses, + self._where_criteria, + self._having_criteria, + self.label_style, + self.order_by, + for_update=self._for_update_arg, + hints=self.select_statement._hints, + statement_hints=self.select_statement._statement_hints, + correlate=self.correlate, + correlate_except=self.correlate_except, + **self._select_args, + ) + + inner = inner.alias() + + equivs = self._all_equivs() + + self.compound_eager_adapter = ORMStatementAdapter( + _TraceAdaptRole.COMPOUND_EAGER_STATEMENT, inner, equivalents=equivs + ) + + statement = future.select( + *([inner] + self.secondary_columns) # use_labels=self.labels + ) + statement._label_style = self.label_style + + # Oracle Database however does not allow FOR UPDATE on the subquery, + # and the Oracle Database dialects ignore it, plus for PostgreSQL, + # MySQL we expect that all elements of the row are locked, so also put + # it on the outside (except in the case of PG when OF is used) + if ( + self._for_update_arg is not None + and self._for_update_arg.of is None + ): + statement._for_update_arg = self._for_update_arg + + from_clause = inner + for eager_join in self.eager_joins.values(): + # EagerLoader places a 'stop_on' attribute on the join, + # giving us a marker as to where the "splice point" of + # the join should be + from_clause = sql_util.splice_joins( + from_clause, eager_join, eager_join.stop_on + ) + + statement.select_from.non_generative(statement, from_clause) + + if unwrapped_order_by: + statement.order_by.non_generative( + statement, + *self.compound_eager_adapter.copy_and_process( + unwrapped_order_by + ), + ) + + statement.order_by.non_generative(statement, *self.eager_order_by) + return statement + + def _simple_statement(self): + statement = self._select_statement( + self.primary_columns + self.secondary_columns, + tuple(self.from_clauses) + tuple(self.eager_joins.values()), + self._where_criteria, + self._having_criteria, + self.label_style, + self.order_by, + for_update=self._for_update_arg, + hints=self.select_statement._hints, + statement_hints=self.select_statement._statement_hints, + correlate=self.correlate, + correlate_except=self.correlate_except, + **self._select_args, + ) + + if self.eager_order_by: + statement.order_by.non_generative(statement, *self.eager_order_by) + return statement + + def _select_statement( + self, + raw_columns, + from_obj, + where_criteria, + having_criteria, + label_style, + order_by, + for_update, + hints, + statement_hints, + correlate, + correlate_except, + limit_clause, + offset_clause, + fetch_clause, + fetch_clause_options, + distinct, + distinct_on, + prefixes, + suffixes, + group_by, + independent_ctes, + independent_ctes_opts, + ): + statement = Select._create_raw_select( + _raw_columns=raw_columns, + _from_obj=from_obj, + _label_style=label_style, + ) + + if where_criteria: + statement._where_criteria = where_criteria + if having_criteria: + statement._having_criteria = having_criteria + + if order_by: + statement._order_by_clauses += tuple(order_by) + + if distinct_on: + statement.distinct.non_generative(statement, *distinct_on) + elif distinct: + statement.distinct.non_generative(statement) + + if group_by: + statement._group_by_clauses += tuple(group_by) + + statement._limit_clause = limit_clause + statement._offset_clause = offset_clause + statement._fetch_clause = fetch_clause + statement._fetch_clause_options = fetch_clause_options + statement._independent_ctes = independent_ctes + statement._independent_ctes_opts = independent_ctes_opts + + if prefixes: + statement._prefixes = prefixes + + if suffixes: + statement._suffixes = suffixes + + statement._for_update_arg = for_update + + if hints: + statement._hints = hints + if statement_hints: + statement._statement_hints = statement_hints + + if correlate: + statement.correlate.non_generative(statement, *correlate) + + if correlate_except is not None: + statement.correlate_except.non_generative( + statement, *correlate_except + ) + + return statement + + def _adapt_polymorphic_element(self, element): + if "parententity" in element._annotations: + search = element._annotations["parententity"] + alias = self._polymorphic_adapters.get(search, None) + if alias: + return alias.adapt_clause(element) + + if isinstance(element, expression.FromClause): + search = element + elif hasattr(element, "table"): + search = element.table + else: + return None + + alias = self._polymorphic_adapters.get(search, None) + if alias: + return alias.adapt_clause(element) + + def _adapt_col_list(self, cols, current_adapter): + if current_adapter: + return [current_adapter(o, True) for o in cols] + else: + return cols + + def _get_current_adapter(self): + adapters = [] + + if self._from_obj_alias: + # used for legacy going forward for query set_ops, e.g. + # union(), union_all(), etc. + # 1.4 and previously, also used for from_self(), + # select_entity_from() + # + # for the "from obj" alias, apply extra rule to the + # 'ORM only' check, if this query were generated from a + # subquery of itself, i.e. _from_selectable(), apply adaption + # to all SQL constructs. + adapters.append( + ( + True, + self._from_obj_alias.replace, + ) + ) + + # this was *hopefully* the only adapter we were going to need + # going forward...however, we unfortunately need _from_obj_alias + # for query.union(), which we can't drop + if self._polymorphic_adapters: + adapters.append((False, self._adapt_polymorphic_element)) + + if not adapters: + return None + + def _adapt_clause(clause, as_filter): + # do we adapt all expression elements or only those + # tagged as 'ORM' constructs ? + + def replace(elem): + is_orm_adapt = ( + "_orm_adapt" in elem._annotations + or "parententity" in elem._annotations + ) + for always_adapt, adapter in adapters: + if is_orm_adapt or always_adapt: + e = adapter(elem) + if e is not None: + return e + + return visitors.replacement_traverse(clause, {}, replace) + + return _adapt_clause + + def _join(self, args, entities_collection): + for right, onclause, from_, flags in args: + isouter = flags["isouter"] + full = flags["full"] + + right = inspect(right) + if onclause is not None: + onclause = inspect(onclause) + + if isinstance(right, interfaces.PropComparator): + if onclause is not None: + raise sa_exc.InvalidRequestError( + "No 'on clause' argument may be passed when joining " + "to a relationship path as a target" + ) + + onclause = right + right = None + elif "parententity" in right._annotations: + right = right._annotations["parententity"] + + if onclause is None: + if not right.is_selectable and not hasattr(right, "mapper"): + raise sa_exc.ArgumentError( + "Expected mapped entity or " + "selectable/table as join target" + ) + + if isinstance(onclause, interfaces.PropComparator): + # descriptor/property given (or determined); this tells us + # explicitly what the expected "left" side of the join is. + + of_type = getattr(onclause, "_of_type", None) + + if right is None: + if of_type: + right = of_type + else: + right = onclause.property + + try: + right = right.entity + except AttributeError as err: + raise sa_exc.ArgumentError( + "Join target %s does not refer to a " + "mapped entity" % right + ) from err + + left = onclause._parententity + + prop = onclause.property + if not isinstance(onclause, attributes.QueryableAttribute): + onclause = prop + + # check for this path already present. don't render in that + # case. + if (left, right, prop.key) in self._already_joined_edges: + continue + + if from_ is not None: + if ( + from_ is not left + and from_._annotations.get("parententity", None) + is not left + ): + raise sa_exc.InvalidRequestError( + "explicit from clause %s does not match left side " + "of relationship attribute %s" + % ( + from_._annotations.get("parententity", from_), + onclause, + ) + ) + elif from_ is not None: + prop = None + left = from_ + else: + # no descriptor/property given; we will need to figure out + # what the effective "left" side is + prop = left = None + + # figure out the final "left" and "right" sides and create an + # ORMJoin to add to our _from_obj tuple + self._join_left_to_right( + entities_collection, + left, + right, + onclause, + prop, + isouter, + full, + ) + + def _join_left_to_right( + self, + entities_collection, + left, + right, + onclause, + prop, + outerjoin, + full, + ): + """given raw "left", "right", "onclause" parameters consumed from + a particular key within _join(), add a real ORMJoin object to + our _from_obj list (or augment an existing one) + + """ + + if left is None: + # left not given (e.g. no relationship object/name specified) + # figure out the best "left" side based on our existing froms / + # entities + assert prop is None + ( + left, + replace_from_obj_index, + use_entity_index, + ) = self._join_determine_implicit_left_side( + entities_collection, left, right, onclause + ) + else: + # left is given via a relationship/name, or as explicit left side. + # Determine where in our + # "froms" list it should be spliced/appended as well as what + # existing entity it corresponds to. + ( + replace_from_obj_index, + use_entity_index, + ) = self._join_place_explicit_left_side(entities_collection, left) + + if left is right: + raise sa_exc.InvalidRequestError( + "Can't construct a join from %s to %s, they " + "are the same entity" % (left, right) + ) + + # the right side as given often needs to be adapted. additionally + # a lot of things can be wrong with it. handle all that and + # get back the new effective "right" side + r_info, right, onclause = self._join_check_and_adapt_right_side( + left, right, onclause, prop + ) + + if not r_info.is_selectable: + extra_criteria = self._get_extra_criteria(r_info) + else: + extra_criteria = () + + if replace_from_obj_index is not None: + # splice into an existing element in the + # self._from_obj list + left_clause = self.from_clauses[replace_from_obj_index] + + self.from_clauses = ( + self.from_clauses[:replace_from_obj_index] + + [ + _ORMJoin( + left_clause, + right, + onclause, + isouter=outerjoin, + full=full, + _extra_criteria=extra_criteria, + ) + ] + + self.from_clauses[replace_from_obj_index + 1 :] + ) + else: + # add a new element to the self._from_obj list + if use_entity_index is not None: + # make use of _MapperEntity selectable, which is usually + # entity_zero.selectable, but if with_polymorphic() were used + # might be distinct + assert isinstance( + entities_collection[use_entity_index], _MapperEntity + ) + left_clause = entities_collection[use_entity_index].selectable + else: + left_clause = left + + self.from_clauses = self.from_clauses + [ + _ORMJoin( + left_clause, + r_info, + onclause, + isouter=outerjoin, + full=full, + _extra_criteria=extra_criteria, + ) + ] + + def _join_determine_implicit_left_side( + self, entities_collection, left, right, onclause + ): + """When join conditions don't express the left side explicitly, + determine if an existing FROM or entity in this query + can serve as the left hand side. + + """ + + # when we are here, it means join() was called without an ORM- + # specific way of telling us what the "left" side is, e.g.: + # + # join(RightEntity) + # + # or + # + # join(RightEntity, RightEntity.foo == LeftEntity.bar) + # + + r_info = inspect(right) + + replace_from_obj_index = use_entity_index = None + + if self.from_clauses: + # we have a list of FROMs already. So by definition this + # join has to connect to one of those FROMs. + + indexes = sql_util.find_left_clause_to_join_from( + self.from_clauses, r_info.selectable, onclause + ) + + if len(indexes) == 1: + replace_from_obj_index = indexes[0] + left = self.from_clauses[replace_from_obj_index] + elif len(indexes) > 1: + raise sa_exc.InvalidRequestError( + "Can't determine which FROM clause to join " + "from, there are multiple FROMS which can " + "join to this entity. Please use the .select_from() " + "method to establish an explicit left side, as well as " + "providing an explicit ON clause if not present already " + "to help resolve the ambiguity." + ) + else: + raise sa_exc.InvalidRequestError( + "Don't know how to join to %r. " + "Please use the .select_from() " + "method to establish an explicit left side, as well as " + "providing an explicit ON clause if not present already " + "to help resolve the ambiguity." % (right,) + ) + + elif entities_collection: + # we have no explicit FROMs, so the implicit left has to + # come from our list of entities. + + potential = {} + for entity_index, ent in enumerate(entities_collection): + entity = ent.entity_zero_or_selectable + if entity is None: + continue + ent_info = inspect(entity) + if ent_info is r_info: # left and right are the same, skip + continue + + # by using a dictionary with the selectables as keys this + # de-duplicates those selectables as occurs when the query is + # against a series of columns from the same selectable + if isinstance(ent, _MapperEntity): + potential[ent.selectable] = (entity_index, entity) + else: + potential[ent_info.selectable] = (None, entity) + + all_clauses = list(potential.keys()) + indexes = sql_util.find_left_clause_to_join_from( + all_clauses, r_info.selectable, onclause + ) + + if len(indexes) == 1: + use_entity_index, left = potential[all_clauses[indexes[0]]] + elif len(indexes) > 1: + raise sa_exc.InvalidRequestError( + "Can't determine which FROM clause to join " + "from, there are multiple FROMS which can " + "join to this entity. Please use the .select_from() " + "method to establish an explicit left side, as well as " + "providing an explicit ON clause if not present already " + "to help resolve the ambiguity." + ) + else: + raise sa_exc.InvalidRequestError( + "Don't know how to join to %r. " + "Please use the .select_from() " + "method to establish an explicit left side, as well as " + "providing an explicit ON clause if not present already " + "to help resolve the ambiguity." % (right,) + ) + else: + raise sa_exc.InvalidRequestError( + "No entities to join from; please use " + "select_from() to establish the left " + "entity/selectable of this join" + ) + + return left, replace_from_obj_index, use_entity_index + + def _join_place_explicit_left_side(self, entities_collection, left): + """When join conditions express a left side explicitly, determine + where in our existing list of FROM clauses we should join towards, + or if we need to make a new join, and if so is it from one of our + existing entities. + + """ + + # when we are here, it means join() was called with an indicator + # as to an exact left side, which means a path to a + # Relationship was given, e.g.: + # + # join(RightEntity, LeftEntity.right) + # + # or + # + # join(LeftEntity.right) + # + # as well as string forms: + # + # join(RightEntity, "right") + # + # etc. + # + + replace_from_obj_index = use_entity_index = None + + l_info = inspect(left) + if self.from_clauses: + indexes = sql_util.find_left_clause_that_matches_given( + self.from_clauses, l_info.selectable + ) + + if len(indexes) > 1: + raise sa_exc.InvalidRequestError( + "Can't identify which entity in which to assign the " + "left side of this join. Please use a more specific " + "ON clause." + ) + + # have an index, means the left side is already present in + # an existing FROM in the self._from_obj tuple + if indexes: + replace_from_obj_index = indexes[0] + + # no index, means we need to add a new element to the + # self._from_obj tuple + + # no from element present, so we will have to add to the + # self._from_obj tuple. Determine if this left side matches up + # with existing mapper entities, in which case we want to apply the + # aliasing / adaptation rules present on that entity if any + if ( + replace_from_obj_index is None + and entities_collection + and hasattr(l_info, "mapper") + ): + for idx, ent in enumerate(entities_collection): + # TODO: should we be checking for multiple mapper entities + # matching? + if isinstance(ent, _MapperEntity) and ent.corresponds_to(left): + use_entity_index = idx + break + + return replace_from_obj_index, use_entity_index + + def _join_check_and_adapt_right_side(self, left, right, onclause, prop): + """transform the "right" side of the join as well as the onclause + according to polymorphic mapping translations, aliasing on the query + or on the join, special cases where the right and left side have + overlapping tables. + + """ + + l_info = inspect(left) + r_info = inspect(right) + + overlap = False + + right_mapper = getattr(r_info, "mapper", None) + # if the target is a joined inheritance mapping, + # be more liberal about auto-aliasing. + if right_mapper and ( + right_mapper.with_polymorphic + or isinstance(right_mapper.persist_selectable, expression.Join) + ): + for from_obj in self.from_clauses or [l_info.selectable]: + if sql_util.selectables_overlap( + l_info.selectable, from_obj + ) and sql_util.selectables_overlap( + from_obj, r_info.selectable + ): + overlap = True + break + + if overlap and l_info.selectable is r_info.selectable: + raise sa_exc.InvalidRequestError( + "Can't join table/selectable '%s' to itself" + % l_info.selectable + ) + + right_mapper, right_selectable, right_is_aliased = ( + getattr(r_info, "mapper", None), + r_info.selectable, + getattr(r_info, "is_aliased_class", False), + ) + + if ( + right_mapper + and prop + and not right_mapper.common_parent(prop.mapper) + ): + raise sa_exc.InvalidRequestError( + "Join target %s does not correspond to " + "the right side of join condition %s" % (right, onclause) + ) + + # _join_entities is used as a hint for single-table inheritance + # purposes at the moment + if hasattr(r_info, "mapper"): + self._join_entities += (r_info,) + + need_adapter = False + + # test for joining to an unmapped selectable as the target + if r_info.is_clause_element: + if prop: + right_mapper = prop.mapper + + if right_selectable._is_lateral: + # orm_only is disabled to suit the case where we have to + # adapt an explicit correlate(Entity) - the select() loses + # the ORM-ness in this case right now, ideally it would not + current_adapter = self._get_current_adapter() + if current_adapter is not None: + # TODO: we had orm_only=False here before, removing + # it didn't break things. if we identify the rationale, + # may need to apply "_orm_only" annotation here. + right = current_adapter(right, True) + + elif prop: + # joining to selectable with a mapper property given + # as the ON clause + + if not right_selectable.is_derived_from( + right_mapper.persist_selectable + ): + raise sa_exc.InvalidRequestError( + "Selectable '%s' is not derived from '%s'" + % ( + right_selectable.description, + right_mapper.persist_selectable.description, + ) + ) + + # if the destination selectable is a plain select(), + # turn it into an alias(). + if isinstance(right_selectable, expression.SelectBase): + right_selectable = coercions.expect( + roles.FromClauseRole, right_selectable + ) + need_adapter = True + + # make the right hand side target into an ORM entity + right = AliasedClass(right_mapper, right_selectable) + + util.warn_deprecated( + "An alias is being generated automatically against " + "joined entity %s for raw clauseelement, which is " + "deprecated and will be removed in a later release. " + "Use the aliased() " + "construct explicitly, see the linked example." + % right_mapper, + "1.4", + code="xaj1", + ) + + # test for overlap: + # orm/inheritance/relationships.py + # SelfReferentialM2MTest + aliased_entity = right_mapper and not right_is_aliased and overlap + + if not need_adapter and aliased_entity: + # there are a few places in the ORM that automatic aliasing + # is still desirable, and can't be automatic with a Core + # only approach. For illustrations of "overlaps" see + # test/orm/inheritance/test_relationships.py. There are also + # general overlap cases with many-to-many tables where automatic + # aliasing is desirable. + right = AliasedClass(right, flat=True) + need_adapter = True + + util.warn( + "An alias is being generated automatically against " + "joined entity %s due to overlapping tables. This is a " + "legacy pattern which may be " + "deprecated in a later release. Use the " + "aliased(, flat=True) " + "construct explicitly, see the linked example." % right_mapper, + code="xaj2", + ) + + if need_adapter: + # if need_adapter is True, we are in a deprecated case and + # a warning has been emitted. + assert right_mapper + + adapter = ORMAdapter( + _TraceAdaptRole.DEPRECATED_JOIN_ADAPT_RIGHT_SIDE, + inspect(right), + equivalents=right_mapper._equivalent_columns, + ) + + # if an alias() on the right side was generated, + # which is intended to wrap a the right side in a subquery, + # ensure that columns retrieved from this target in the result + # set are also adapted. + self._mapper_loads_polymorphically_with(right_mapper, adapter) + elif ( + not r_info.is_clause_element + and not right_is_aliased + and right_mapper._has_aliased_polymorphic_fromclause + ): + # for the case where the target mapper has a with_polymorphic + # set up, ensure an adapter is set up for criteria that works + # against this mapper. Previously, this logic used to + # use the "create_aliases or aliased_entity" case to generate + # an aliased() object, but this creates an alias that isn't + # strictly necessary. + # see test/orm/test_core_compilation.py + # ::RelNaturalAliasedJoinsTest::test_straight + # and similar + self._mapper_loads_polymorphically_with( + right_mapper, + ORMAdapter( + _TraceAdaptRole.WITH_POLYMORPHIC_ADAPTER_RIGHT_JOIN, + right_mapper, + selectable=right_mapper.selectable, + equivalents=right_mapper._equivalent_columns, + ), + ) + # if the onclause is a ClauseElement, adapt it with any + # adapters that are in place right now + if isinstance(onclause, expression.ClauseElement): + current_adapter = self._get_current_adapter() + if current_adapter: + onclause = current_adapter(onclause, True) + + # if joining on a MapperProperty path, + # track the path to prevent redundant joins + if prop: + self._already_joined_edges += ((left, right, prop.key),) + + return inspect(right), right, onclause + + @property + def _select_args(self): + return { + "limit_clause": self.select_statement._limit_clause, + "offset_clause": self.select_statement._offset_clause, + "distinct": self.distinct, + "distinct_on": self.distinct_on, + "prefixes": self.select_statement._prefixes, + "suffixes": self.select_statement._suffixes, + "group_by": self.group_by or None, + "fetch_clause": self.select_statement._fetch_clause, + "fetch_clause_options": ( + self.select_statement._fetch_clause_options + ), + "independent_ctes": self.select_statement._independent_ctes, + "independent_ctes_opts": ( + self.select_statement._independent_ctes_opts + ), + } + + @property + def _should_nest_selectable(self): + kwargs = self._select_args + return ( + kwargs.get("limit_clause") is not None + or kwargs.get("offset_clause") is not None + or kwargs.get("distinct", False) + or kwargs.get("distinct_on", ()) + or kwargs.get("group_by", False) + ) + + def _get_extra_criteria(self, ext_info): + if ( + "additional_entity_criteria", + ext_info.mapper, + ) in self.global_attributes: + return tuple( + ae._resolve_where_criteria(ext_info) + for ae in self.global_attributes[ + ("additional_entity_criteria", ext_info.mapper) + ] + if (ae.include_aliases or ae.entity is ext_info) + and ae._should_include(self) + ) + else: + return () + + def _adjust_for_extra_criteria(self): + """Apply extra criteria filtering. + + For all distinct single-table-inheritance mappers represented in + the columns clause of this query, as well as the "select from entity", + add criterion to the WHERE + clause of the given QueryContext such that only the appropriate + subtypes are selected from the total results. + + Additionally, add WHERE criteria originating from LoaderCriteriaOptions + associated with the global context. + + """ + + for fromclause in self.from_clauses: + ext_info = fromclause._annotations.get("parententity", None) + + if ( + ext_info + and ( + ext_info.mapper._single_table_criterion is not None + or ("additional_entity_criteria", ext_info.mapper) + in self.global_attributes + ) + and ext_info not in self.extra_criteria_entities + ): + self.extra_criteria_entities[ext_info] = ( + ext_info, + ext_info._adapter if ext_info.is_aliased_class else None, + ) + + search = set(self.extra_criteria_entities.values()) + + for ext_info, adapter in search: + if ext_info in self._join_entities: + continue + + single_crit = ext_info.mapper._single_table_criterion + + if self.compile_options._for_refresh_state: + additional_entity_criteria = [] + else: + additional_entity_criteria = self._get_extra_criteria(ext_info) + + if single_crit is not None: + additional_entity_criteria += (single_crit,) + + current_adapter = self._get_current_adapter() + for crit in additional_entity_criteria: + if adapter: + crit = adapter.traverse(crit) + + if current_adapter: + crit = sql_util._deep_annotate(crit, {"_orm_adapt": True}) + crit = current_adapter(crit, False) + self._where_criteria += (crit,) + + +def _column_descriptions( + query_or_select_stmt: Union[Query, Select, FromStatement], + compile_state: Optional[ORMSelectCompileState] = None, + legacy: bool = False, +) -> List[ORMColumnDescription]: + if compile_state is None: + compile_state = ORMSelectCompileState._create_entities_collection( + query_or_select_stmt, legacy=legacy + ) + ctx = compile_state + d = [ + { + "name": ent._label_name, + "type": ent.type, + "aliased": getattr(insp_ent, "is_aliased_class", False), + "expr": ent.expr, + "entity": ( + getattr(insp_ent, "entity", None) + if ent.entity_zero is not None + and not insp_ent.is_clause_element + else None + ), + } + for ent, insp_ent in [ + (_ent, _ent.entity_zero) for _ent in ctx._entities + ] + ] + return d + + +def _legacy_filter_by_entity_zero( + query_or_augmented_select: Union[Query[Any], Select[Any]] +) -> Optional[_InternalEntityType[Any]]: + self = query_or_augmented_select + if self._setup_joins: + _last_joined_entity = self._last_joined_entity + if _last_joined_entity is not None: + return _last_joined_entity + + if self._from_obj and "parententity" in self._from_obj[0]._annotations: + return self._from_obj[0]._annotations["parententity"] + + return _entity_from_pre_ent_zero(self) + + +def _entity_from_pre_ent_zero( + query_or_augmented_select: Union[Query[Any], Select[Any]] +) -> Optional[_InternalEntityType[Any]]: + self = query_or_augmented_select + if not self._raw_columns: + return None + + ent = self._raw_columns[0] + + if "parententity" in ent._annotations: + return ent._annotations["parententity"] + elif isinstance(ent, ORMColumnsClauseRole): + return ent.entity + elif "bundle" in ent._annotations: + return ent._annotations["bundle"] + else: + return ent + + +def _determine_last_joined_entity( + setup_joins: Tuple[_SetupJoinsElement, ...], + entity_zero: Optional[_InternalEntityType[Any]] = None, +) -> Optional[Union[_InternalEntityType[Any], _JoinTargetElement]]: + if not setup_joins: + return None + + (target, onclause, from_, flags) = setup_joins[-1] + + if isinstance( + target, + attributes.QueryableAttribute, + ): + return target.entity + else: + return target + + +class _QueryEntity: + """represent an entity column returned within a Query result.""" + + __slots__ = () + + supports_single_entity: bool + + _non_hashable_value = False + _null_column_type = False + use_id_for_hash = False + + _label_name: Optional[str] + type: Union[Type[Any], TypeEngine[Any]] + expr: Union[_InternalEntityType, ColumnElement[Any]] + entity_zero: Optional[_InternalEntityType] + + def setup_compile_state(self, compile_state: ORMCompileState) -> None: + raise NotImplementedError() + + def setup_dml_returning_compile_state( + self, + compile_state: ORMCompileState, + adapter: Optional[_DMLReturningColFilter], + ) -> None: + raise NotImplementedError() + + def row_processor(self, context, result): + raise NotImplementedError() + + @classmethod + def to_compile_state( + cls, compile_state, entities, entities_collection, is_current_entities + ): + for idx, entity in enumerate(entities): + if entity._is_lambda_element: + if entity._is_sequence: + cls.to_compile_state( + compile_state, + entity._resolved, + entities_collection, + is_current_entities, + ) + continue + else: + entity = entity._resolved + + if entity.is_clause_element: + if entity.is_selectable: + if "parententity" in entity._annotations: + _MapperEntity( + compile_state, + entity, + entities_collection, + is_current_entities, + ) + else: + _ColumnEntity._for_columns( + compile_state, + entity._select_iterable, + entities_collection, + idx, + is_current_entities, + ) + else: + if entity._annotations.get("bundle", False): + _BundleEntity( + compile_state, + entity, + entities_collection, + is_current_entities, + ) + elif entity._is_clause_list: + # this is legacy only - test_composites.py + # test_query_cols_legacy + _ColumnEntity._for_columns( + compile_state, + entity._select_iterable, + entities_collection, + idx, + is_current_entities, + ) + else: + _ColumnEntity._for_columns( + compile_state, + [entity], + entities_collection, + idx, + is_current_entities, + ) + elif entity.is_bundle: + _BundleEntity(compile_state, entity, entities_collection) + + return entities_collection + + +class _MapperEntity(_QueryEntity): + """mapper/class/AliasedClass entity""" + + __slots__ = ( + "expr", + "mapper", + "entity_zero", + "is_aliased_class", + "path", + "_extra_entities", + "_label_name", + "_with_polymorphic_mappers", + "selectable", + "_polymorphic_discriminator", + ) + + expr: _InternalEntityType + mapper: Mapper[Any] + entity_zero: _InternalEntityType + is_aliased_class: bool + path: PathRegistry + _label_name: str + + def __init__( + self, compile_state, entity, entities_collection, is_current_entities + ): + entities_collection.append(self) + if is_current_entities: + if compile_state._primary_entity is None: + compile_state._primary_entity = self + compile_state._has_mapper_entities = True + compile_state._has_orm_entities = True + + entity = entity._annotations["parententity"] + entity._post_inspect + ext_info = self.entity_zero = entity + entity = ext_info.entity + + self.expr = entity + self.mapper = mapper = ext_info.mapper + + self._extra_entities = (self.expr,) + + if ext_info.is_aliased_class: + self._label_name = ext_info.name + else: + self._label_name = mapper.class_.__name__ + + self.is_aliased_class = ext_info.is_aliased_class + self.path = ext_info._path_registry + + self.selectable = ext_info.selectable + self._with_polymorphic_mappers = ext_info.with_polymorphic_mappers + self._polymorphic_discriminator = ext_info.polymorphic_on + + if mapper._should_select_with_poly_adapter: + compile_state._create_with_polymorphic_adapter( + ext_info, self.selectable + ) + + supports_single_entity = True + + _non_hashable_value = True + use_id_for_hash = True + + @property + def type(self): + return self.mapper.class_ + + @property + def entity_zero_or_selectable(self): + return self.entity_zero + + def corresponds_to(self, entity): + return _entity_corresponds_to(self.entity_zero, entity) + + def _get_entity_clauses(self, compile_state): + adapter = None + + if not self.is_aliased_class: + if compile_state._polymorphic_adapters: + adapter = compile_state._polymorphic_adapters.get( + self.mapper, None + ) + else: + adapter = self.entity_zero._adapter + + if adapter: + if compile_state._from_obj_alias: + ret = adapter.wrap(compile_state._from_obj_alias) + else: + ret = adapter + else: + ret = compile_state._from_obj_alias + + return ret + + def row_processor(self, context, result): + compile_state = context.compile_state + adapter = self._get_entity_clauses(compile_state) + + if compile_state.compound_eager_adapter and adapter: + adapter = adapter.wrap(compile_state.compound_eager_adapter) + elif not adapter: + adapter = compile_state.compound_eager_adapter + + if compile_state._primary_entity is self: + only_load_props = compile_state.compile_options._only_load_props + refresh_state = context.refresh_state + else: + only_load_props = refresh_state = None + + _instance = loading._instance_processor( + self, + self.mapper, + context, + result, + self.path, + adapter, + only_load_props=only_load_props, + refresh_state=refresh_state, + polymorphic_discriminator=self._polymorphic_discriminator, + ) + + return _instance, self._label_name, self._extra_entities + + def setup_dml_returning_compile_state( + self, + compile_state: ORMCompileState, + adapter: Optional[_DMLReturningColFilter], + ) -> None: + loading._setup_entity_query( + compile_state, + self.mapper, + self, + self.path, + adapter, + compile_state.primary_columns, + with_polymorphic=self._with_polymorphic_mappers, + only_load_props=compile_state.compile_options._only_load_props, + polymorphic_discriminator=self._polymorphic_discriminator, + ) + + def setup_compile_state(self, compile_state): + adapter = self._get_entity_clauses(compile_state) + + single_table_crit = self.mapper._single_table_criterion + if ( + single_table_crit is not None + or ("additional_entity_criteria", self.mapper) + in compile_state.global_attributes + ): + ext_info = self.entity_zero + compile_state.extra_criteria_entities[ext_info] = ( + ext_info, + ext_info._adapter if ext_info.is_aliased_class else None, + ) + + loading._setup_entity_query( + compile_state, + self.mapper, + self, + self.path, + adapter, + compile_state.primary_columns, + with_polymorphic=self._with_polymorphic_mappers, + only_load_props=compile_state.compile_options._only_load_props, + polymorphic_discriminator=self._polymorphic_discriminator, + ) + compile_state._fallback_from_clauses.append(self.selectable) + + +class _BundleEntity(_QueryEntity): + _extra_entities = () + + __slots__ = ( + "bundle", + "expr", + "type", + "_label_name", + "_entities", + "supports_single_entity", + ) + + _entities: List[_QueryEntity] + bundle: Bundle + type: Type[Any] + _label_name: str + supports_single_entity: bool + expr: Bundle + + def __init__( + self, + compile_state, + expr, + entities_collection, + is_current_entities, + setup_entities=True, + parent_bundle=None, + ): + compile_state._has_orm_entities = True + + expr = expr._annotations["bundle"] + if parent_bundle: + parent_bundle._entities.append(self) + else: + entities_collection.append(self) + + if isinstance( + expr, (attributes.QueryableAttribute, interfaces.PropComparator) + ): + bundle = expr.__clause_element__() + else: + bundle = expr + + self.bundle = self.expr = bundle + self.type = type(bundle) + self._label_name = bundle.name + self._entities = [] + + if setup_entities: + for expr in bundle.exprs: + if "bundle" in expr._annotations: + _BundleEntity( + compile_state, + expr, + entities_collection, + is_current_entities, + parent_bundle=self, + ) + elif isinstance(expr, Bundle): + _BundleEntity( + compile_state, + expr, + entities_collection, + is_current_entities, + parent_bundle=self, + ) + else: + _ORMColumnEntity._for_columns( + compile_state, + [expr], + entities_collection, + None, + is_current_entities, + parent_bundle=self, + ) + + self.supports_single_entity = self.bundle.single_entity + + @property + def mapper(self): + ezero = self.entity_zero + if ezero is not None: + return ezero.mapper + else: + return None + + @property + def entity_zero(self): + for ent in self._entities: + ezero = ent.entity_zero + if ezero is not None: + return ezero + else: + return None + + def corresponds_to(self, entity): + # TODO: we might be able to implement this but for now + # we are working around it + return False + + @property + def entity_zero_or_selectable(self): + for ent in self._entities: + ezero = ent.entity_zero_or_selectable + if ezero is not None: + return ezero + else: + return None + + def setup_compile_state(self, compile_state): + for ent in self._entities: + ent.setup_compile_state(compile_state) + + def setup_dml_returning_compile_state( + self, + compile_state: ORMCompileState, + adapter: Optional[_DMLReturningColFilter], + ) -> None: + return self.setup_compile_state(compile_state) + + def row_processor(self, context, result): + procs, labels, extra = zip( + *[ent.row_processor(context, result) for ent in self._entities] + ) + + proc = self.bundle.create_row_processor(context.query, procs, labels) + + return proc, self._label_name, self._extra_entities + + +class _ColumnEntity(_QueryEntity): + __slots__ = ( + "_fetch_column", + "_row_processor", + "raw_column_index", + "translate_raw_column", + ) + + @classmethod + def _for_columns( + cls, + compile_state, + columns, + entities_collection, + raw_column_index, + is_current_entities, + parent_bundle=None, + ): + for column in columns: + annotations = column._annotations + if "parententity" in annotations: + _entity = annotations["parententity"] + else: + _entity = sql_util.extract_first_column_annotation( + column, "parententity" + ) + + if _entity: + if "identity_token" in column._annotations: + _IdentityTokenEntity( + compile_state, + column, + entities_collection, + _entity, + raw_column_index, + is_current_entities, + parent_bundle=parent_bundle, + ) + else: + _ORMColumnEntity( + compile_state, + column, + entities_collection, + _entity, + raw_column_index, + is_current_entities, + parent_bundle=parent_bundle, + ) + else: + _RawColumnEntity( + compile_state, + column, + entities_collection, + raw_column_index, + is_current_entities, + parent_bundle=parent_bundle, + ) + + @property + def type(self): + return self.column.type + + @property + def _non_hashable_value(self): + return not self.column.type.hashable + + @property + def _null_column_type(self): + return self.column.type._isnull + + def row_processor(self, context, result): + compile_state = context.compile_state + + # the resulting callable is entirely cacheable so just return + # it if we already made one + if self._row_processor is not None: + getter, label_name, extra_entities = self._row_processor + if self.translate_raw_column: + extra_entities += ( + context.query._raw_columns[self.raw_column_index], + ) + + return getter, label_name, extra_entities + + # retrieve the column that would have been set up in + # setup_compile_state, to avoid doing redundant work + if self._fetch_column is not None: + column = self._fetch_column + else: + # fetch_column will be None when we are doing a from_statement + # and setup_compile_state may not have been called. + column = self.column + + # previously, the RawColumnEntity didn't look for from_obj_alias + # however I can't think of a case where we would be here and + # we'd want to ignore it if this is the from_statement use case. + # it's not really a use case to have raw columns + from_statement + if compile_state._from_obj_alias: + column = compile_state._from_obj_alias.columns[column] + + if column._annotations: + # annotated columns perform more slowly in compiler and + # result due to the __eq__() method, so use deannotated + column = column._deannotate() + + if compile_state.compound_eager_adapter: + column = compile_state.compound_eager_adapter.columns[column] + + getter = result._getter(column) + ret = getter, self._label_name, self._extra_entities + self._row_processor = ret + + if self.translate_raw_column: + extra_entities = self._extra_entities + ( + context.query._raw_columns[self.raw_column_index], + ) + return getter, self._label_name, extra_entities + else: + return ret + + +class _RawColumnEntity(_ColumnEntity): + entity_zero = None + mapper = None + supports_single_entity = False + + __slots__ = ( + "expr", + "column", + "_label_name", + "entity_zero_or_selectable", + "_extra_entities", + ) + + def __init__( + self, + compile_state, + column, + entities_collection, + raw_column_index, + is_current_entities, + parent_bundle=None, + ): + self.expr = column + self.raw_column_index = raw_column_index + self.translate_raw_column = raw_column_index is not None + + if column._is_star: + compile_state.compile_options += {"_is_star": True} + + if not is_current_entities or column._is_text_clause: + self._label_name = None + else: + if parent_bundle: + self._label_name = column._proxy_key + else: + self._label_name = compile_state._label_convention(column) + + if parent_bundle: + parent_bundle._entities.append(self) + else: + entities_collection.append(self) + + self.column = column + self.entity_zero_or_selectable = ( + self.column._from_objects[0] if self.column._from_objects else None + ) + self._extra_entities = (self.expr, self.column) + self._fetch_column = self._row_processor = None + + def corresponds_to(self, entity): + return False + + def setup_dml_returning_compile_state( + self, + compile_state: ORMCompileState, + adapter: Optional[_DMLReturningColFilter], + ) -> None: + return self.setup_compile_state(compile_state) + + def setup_compile_state(self, compile_state): + current_adapter = compile_state._get_current_adapter() + if current_adapter: + column = current_adapter(self.column, False) + if column is None: + return + else: + column = self.column + + if column._annotations: + # annotated columns perform more slowly in compiler and + # result due to the __eq__() method, so use deannotated + column = column._deannotate() + + compile_state.dedupe_columns.add(column) + compile_state.primary_columns.append(column) + self._fetch_column = column + + +class _ORMColumnEntity(_ColumnEntity): + """Column/expression based entity.""" + + supports_single_entity = False + + __slots__ = ( + "expr", + "mapper", + "column", + "_label_name", + "entity_zero_or_selectable", + "entity_zero", + "_extra_entities", + ) + + def __init__( + self, + compile_state, + column, + entities_collection, + parententity, + raw_column_index, + is_current_entities, + parent_bundle=None, + ): + annotations = column._annotations + + _entity = parententity + + # an AliasedClass won't have proxy_key in the annotations for + # a column if it was acquired using the class' adapter directly, + # such as using AliasedInsp._adapt_element(). this occurs + # within internal loaders. + + orm_key = annotations.get("proxy_key", None) + proxy_owner = annotations.get("proxy_owner", _entity) + if orm_key: + self.expr = getattr(proxy_owner.entity, orm_key) + self.translate_raw_column = False + else: + # if orm_key is not present, that means this is an ad-hoc + # SQL ColumnElement, like a CASE() or other expression. + # include this column position from the invoked statement + # in the ORM-level ResultSetMetaData on each execute, so that + # it can be targeted by identity after caching + self.expr = column + self.translate_raw_column = raw_column_index is not None + + self.raw_column_index = raw_column_index + + if is_current_entities: + if parent_bundle: + self._label_name = orm_key if orm_key else column._proxy_key + else: + self._label_name = compile_state._label_convention( + column, col_name=orm_key + ) + else: + self._label_name = None + + _entity._post_inspect + self.entity_zero = self.entity_zero_or_selectable = ezero = _entity + self.mapper = mapper = _entity.mapper + + if parent_bundle: + parent_bundle._entities.append(self) + else: + entities_collection.append(self) + + compile_state._has_orm_entities = True + + self.column = column + + self._fetch_column = self._row_processor = None + + self._extra_entities = (self.expr, self.column) + + if mapper._should_select_with_poly_adapter: + compile_state._create_with_polymorphic_adapter( + ezero, ezero.selectable + ) + + def corresponds_to(self, entity): + if _is_aliased_class(entity): + # TODO: polymorphic subclasses ? + return entity is self.entity_zero + else: + return not _is_aliased_class( + self.entity_zero + ) and entity.common_parent(self.entity_zero) + + def setup_dml_returning_compile_state( + self, + compile_state: ORMCompileState, + adapter: Optional[_DMLReturningColFilter], + ) -> None: + + self._fetch_column = column = self.column + if adapter: + column = adapter(column, False) + + if column is not None: + compile_state.dedupe_columns.add(column) + compile_state.primary_columns.append(column) + + def setup_compile_state(self, compile_state): + current_adapter = compile_state._get_current_adapter() + if current_adapter: + column = current_adapter(self.column, False) + if column is None: + assert compile_state.is_dml_returning + self._fetch_column = self.column + return + else: + column = self.column + + ezero = self.entity_zero + + single_table_crit = self.mapper._single_table_criterion + if ( + single_table_crit is not None + or ("additional_entity_criteria", self.mapper) + in compile_state.global_attributes + ): + compile_state.extra_criteria_entities[ezero] = ( + ezero, + ezero._adapter if ezero.is_aliased_class else None, + ) + + if column._annotations and not column._expression_label: + # annotated columns perform more slowly in compiler and + # result due to the __eq__() method, so use deannotated + column = column._deannotate() + + # use entity_zero as the from if we have it. this is necessary + # for polymorphic scenarios where our FROM is based on ORM entity, + # not the FROM of the column. but also, don't use it if our column + # doesn't actually have any FROMs that line up, such as when its + # a scalar subquery. + if set(self.column._from_objects).intersection( + ezero.selectable._from_objects + ): + compile_state._fallback_from_clauses.append(ezero.selectable) + + compile_state.dedupe_columns.add(column) + compile_state.primary_columns.append(column) + self._fetch_column = column + + +class _IdentityTokenEntity(_ORMColumnEntity): + translate_raw_column = False + + def setup_compile_state(self, compile_state): + pass + + def row_processor(self, context, result): + def getter(row): + return context.load_options._identity_token + + return getter, self._label_name, self._extra_entities diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_api.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_api.py new file mode 100644 index 0000000000000000000000000000000000000000..60468237ee01bf64ecd624862f3a6712ec41f664 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_api.py @@ -0,0 +1,1917 @@ +# orm/decl_api.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Public API functions and helpers for declarative.""" + +from __future__ import annotations + +import itertools +import re +import typing +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import clsregistry +from . import instrumentation +from . import interfaces +from . import mapperlib +from ._orm_constructors import composite +from ._orm_constructors import deferred +from ._orm_constructors import mapped_column +from ._orm_constructors import relationship +from ._orm_constructors import synonym +from .attributes import InstrumentedAttribute +from .base import _inspect_mapped_class +from .base import _is_mapped_class +from .base import Mapped +from .base import ORMDescriptor +from .decl_base import _add_attribute +from .decl_base import _as_declarative +from .decl_base import _ClassScanMapperConfig +from .decl_base import _declarative_constructor +from .decl_base import _DeferredMapperConfig +from .decl_base import _del_attribute +from .decl_base import _mapper +from .descriptor_props import Composite +from .descriptor_props import Synonym +from .descriptor_props import Synonym as _orm_synonym +from .mapper import Mapper +from .properties import MappedColumn +from .relationships import RelationshipProperty +from .state import InstanceState +from .. import exc +from .. import inspection +from .. import util +from ..sql import sqltypes +from ..sql.base import _NoArg +from ..sql.elements import SQLCoreOperations +from ..sql.schema import MetaData +from ..sql.selectable import FromClause +from ..util import hybridmethod +from ..util import hybridproperty +from ..util import typing as compat_typing +from ..util import warn_deprecated +from ..util.typing import CallableReference +from ..util.typing import de_optionalize_union_types +from ..util.typing import flatten_newtype +from ..util.typing import is_generic +from ..util.typing import is_literal +from ..util.typing import is_newtype +from ..util.typing import is_pep695 +from ..util.typing import Literal +from ..util.typing import LITERAL_TYPES +from ..util.typing import Self + +if TYPE_CHECKING: + from ._typing import _O + from ._typing import _RegistryType + from .decl_base import _DataclassArguments + from .instrumentation import ClassManager + from .interfaces import MapperProperty + from .state import InstanceState # noqa + from ..sql._typing import _TypeEngineArgument + from ..sql.type_api import _MatchedOnType + +_T = TypeVar("_T", bound=Any) + +_TT = TypeVar("_TT", bound=Any) + +# it's not clear how to have Annotated, Union objects etc. as keys here +# from a typing perspective so just leave it open ended for now +_TypeAnnotationMapType = Mapping[Any, "_TypeEngineArgument[Any]"] +_MutableTypeAnnotationMapType = Dict[Any, "_TypeEngineArgument[Any]"] + +_DeclaredAttrDecorated = Callable[ + ..., Union[Mapped[_T], ORMDescriptor[_T], SQLCoreOperations[_T]] +] + + +def has_inherited_table(cls: Type[_O]) -> bool: + """Given a class, return True if any of the classes it inherits from has a + mapped table, otherwise return False. + + This is used in declarative mixins to build attributes that behave + differently for the base class vs. a subclass in an inheritance + hierarchy. + + .. seealso:: + + :ref:`decl_mixin_inheritance` + + """ + for class_ in cls.__mro__[1:]: + if getattr(class_, "__table__", None) is not None: + return True + return False + + +class _DynamicAttributesType(type): + def __setattr__(cls, key: str, value: Any) -> None: + if "__mapper__" in cls.__dict__: + _add_attribute(cls, key, value) + else: + type.__setattr__(cls, key, value) + + def __delattr__(cls, key: str) -> None: + if "__mapper__" in cls.__dict__: + _del_attribute(cls, key) + else: + type.__delattr__(cls, key) + + +class DeclarativeAttributeIntercept( + _DynamicAttributesType, + # Inspectable is used only by the mypy plugin + inspection.Inspectable[Mapper[Any]], +): + """Metaclass that may be used in conjunction with the + :class:`_orm.DeclarativeBase` class to support addition of class + attributes dynamically. + + """ + + +@compat_typing.dataclass_transform( + field_specifiers=( + MappedColumn, + RelationshipProperty, + Composite, + Synonym, + mapped_column, + relationship, + composite, + synonym, + deferred, + ), +) +class DCTransformDeclarative(DeclarativeAttributeIntercept): + """metaclass that includes @dataclass_transforms""" + + +class DeclarativeMeta(DeclarativeAttributeIntercept): + metadata: MetaData + registry: RegistryType + + def __init__( + cls, classname: Any, bases: Any, dict_: Any, **kw: Any + ) -> None: + # use cls.__dict__, which can be modified by an + # __init_subclass__() method (#7900) + dict_ = cls.__dict__ + + # early-consume registry from the initial declarative base, + # assign privately to not conflict with subclass attributes named + # "registry" + reg = getattr(cls, "_sa_registry", None) + if reg is None: + reg = dict_.get("registry", None) + if not isinstance(reg, registry): + raise exc.InvalidRequestError( + "Declarative base class has no 'registry' attribute, " + "or registry is not a sqlalchemy.orm.registry() object" + ) + else: + cls._sa_registry = reg + + if not cls.__dict__.get("__abstract__", False): + _as_declarative(reg, cls, dict_) + type.__init__(cls, classname, bases, dict_) + + +def synonym_for( + name: str, map_column: bool = False +) -> Callable[[Callable[..., Any]], Synonym[Any]]: + """Decorator that produces an :func:`_orm.synonym` + attribute in conjunction with a Python descriptor. + + The function being decorated is passed to :func:`_orm.synonym` as the + :paramref:`.orm.synonym.descriptor` parameter:: + + class MyClass(Base): + __tablename__ = "my_table" + + id = Column(Integer, primary_key=True) + _job_status = Column("job_status", String(50)) + + @synonym_for("job_status") + @property + def job_status(self): + return "Status: %s" % self._job_status + + The :ref:`hybrid properties ` feature of SQLAlchemy + is typically preferred instead of synonyms, which is a more legacy + feature. + + .. seealso:: + + :ref:`synonyms` - Overview of synonyms + + :func:`_orm.synonym` - the mapper-level function + + :ref:`mapper_hybrids` - The Hybrid Attribute extension provides an + updated approach to augmenting attribute behavior more flexibly than + can be achieved with synonyms. + + """ + + def decorate(fn: Callable[..., Any]) -> Synonym[Any]: + return _orm_synonym(name, map_column=map_column, descriptor=fn) + + return decorate + + +class _declared_attr_common: + def __init__( + self, + fn: Callable[..., Any], + cascading: bool = False, + quiet: bool = False, + ): + # suppport + # @declared_attr + # @classmethod + # def foo(cls) -> Mapped[thing]: + # ... + # which seems to help typing tools interpret the fn as a classmethod + # for situations where needed + if isinstance(fn, classmethod): + fn = fn.__func__ + + self.fget = fn + self._cascading = cascading + self._quiet = quiet + self.__doc__ = fn.__doc__ + + def _collect_return_annotation(self) -> Optional[Type[Any]]: + return util.get_annotations(self.fget).get("return") + + def __get__(self, instance: Optional[object], owner: Any) -> Any: + # the declared_attr needs to make use of a cache that exists + # for the span of the declarative scan_attributes() phase. + # to achieve this we look at the class manager that's configured. + + # note this method should not be called outside of the declarative + # setup phase + + cls = owner + manager = attributes.opt_manager_of_class(cls) + if manager is None: + if not re.match(r"^__.+__$", self.fget.__name__): + # if there is no manager at all, then this class hasn't been + # run through declarative or mapper() at all, emit a warning. + util.warn( + "Unmanaged access of declarative attribute %s from " + "non-mapped class %s" % (self.fget.__name__, cls.__name__) + ) + return self.fget(cls) + elif manager.is_mapped: + # the class is mapped, which means we're outside of the declarative + # scan setup, just run the function. + return self.fget(cls) + + # here, we are inside of the declarative scan. use the registry + # that is tracking the values of these attributes. + declarative_scan = manager.declarative_scan() + + # assert that we are in fact in the declarative scan + assert declarative_scan is not None + + reg = declarative_scan.declared_attr_reg + + if self in reg: + return reg[self] + else: + reg[self] = obj = self.fget(cls) + return obj + + +class _declared_directive(_declared_attr_common, Generic[_T]): + # see mapping_api.rst for docstring + + if typing.TYPE_CHECKING: + + def __init__( + self, + fn: Callable[..., _T], + cascading: bool = False, + ): ... + + def __get__(self, instance: Optional[object], owner: Any) -> _T: ... + + def __set__(self, instance: Any, value: Any) -> None: ... + + def __delete__(self, instance: Any) -> None: ... + + def __call__(self, fn: Callable[..., _TT]) -> _declared_directive[_TT]: + # extensive fooling of mypy underway... + ... + + +class declared_attr(interfaces._MappedAttribute[_T], _declared_attr_common): + """Mark a class-level method as representing the definition of + a mapped property or Declarative directive. + + :class:`_orm.declared_attr` is typically applied as a decorator to a class + level method, turning the attribute into a scalar-like property that can be + invoked from the uninstantiated class. The Declarative mapping process + looks for these :class:`_orm.declared_attr` callables as it scans classes, + and assumes any attribute marked with :class:`_orm.declared_attr` will be a + callable that will produce an object specific to the Declarative mapping or + table configuration. + + :class:`_orm.declared_attr` is usually applicable to + :ref:`mixins `, to define relationships that are to be + applied to different implementors of the class. It may also be used to + define dynamically generated column expressions and other Declarative + attributes. + + Example:: + + class ProvidesUserMixin: + "A mixin that adds a 'user' relationship to classes." + + user_id: Mapped[int] = mapped_column(ForeignKey("user_table.id")) + + @declared_attr + def user(cls) -> Mapped["User"]: + return relationship("User") + + When used with Declarative directives such as ``__tablename__``, the + :meth:`_orm.declared_attr.directive` modifier may be used which indicates + to :pep:`484` typing tools that the given method is not dealing with + :class:`_orm.Mapped` attributes:: + + class CreateTableName: + @declared_attr.directive + def __tablename__(cls) -> str: + return cls.__name__.lower() + + :class:`_orm.declared_attr` can also be applied directly to mapped + classes, to allow for attributes that dynamically configure themselves + on subclasses when using mapped inheritance schemes. Below + illustrates :class:`_orm.declared_attr` to create a dynamic scheme + for generating the :paramref:`_orm.Mapper.polymorphic_identity` parameter + for subclasses:: + + class Employee(Base): + __tablename__ = "employee" + + id: Mapped[int] = mapped_column(primary_key=True) + type: Mapped[str] = mapped_column(String(50)) + + @declared_attr.directive + def __mapper_args__(cls) -> Dict[str, Any]: + if cls.__name__ == "Employee": + return { + "polymorphic_on": cls.type, + "polymorphic_identity": "Employee", + } + else: + return {"polymorphic_identity": cls.__name__} + + + class Engineer(Employee): + pass + + :class:`_orm.declared_attr` supports decorating functions that are + explicitly decorated with ``@classmethod``. This is never necessary from a + runtime perspective, however may be needed in order to support :pep:`484` + typing tools that don't otherwise recognize the decorated function as + having class-level behaviors for the ``cls`` parameter:: + + class SomethingMixin: + x: Mapped[int] + y: Mapped[int] + + @declared_attr + @classmethod + def x_plus_y(cls) -> Mapped[int]: + return column_property(cls.x + cls.y) + + .. versionadded:: 2.0 - :class:`_orm.declared_attr` can accommodate a + function decorated with ``@classmethod`` to help with :pep:`484` + integration where needed. + + + .. seealso:: + + :ref:`orm_mixins_toplevel` - Declarative Mixin documentation with + background on use patterns for :class:`_orm.declared_attr`. + + """ # noqa: E501 + + if typing.TYPE_CHECKING: + + def __init__( + self, + fn: _DeclaredAttrDecorated[_T], + cascading: bool = False, + ): ... + + def __set__(self, instance: Any, value: Any) -> None: ... + + def __delete__(self, instance: Any) -> None: ... + + # this is the Mapped[] API where at class descriptor get time we want + # the type checker to see InstrumentedAttribute[_T]. However the + # callable function prior to mapping in fact calls the given + # declarative function that does not return InstrumentedAttribute + @overload + def __get__( + self, instance: None, owner: Any + ) -> InstrumentedAttribute[_T]: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _T: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[InstrumentedAttribute[_T], _T]: ... + + @hybridmethod + def _stateful(cls, **kw: Any) -> _stateful_declared_attr[_T]: + return _stateful_declared_attr(**kw) + + @hybridproperty + def directive(cls) -> _declared_directive[Any]: + # see mapping_api.rst for docstring + return _declared_directive # type: ignore + + @hybridproperty + def cascading(cls) -> _stateful_declared_attr[_T]: + # see mapping_api.rst for docstring + return cls._stateful(cascading=True) + + +class _stateful_declared_attr(declared_attr[_T]): + kw: Dict[str, Any] + + def __init__(self, **kw: Any): + self.kw = kw + + @hybridmethod + def _stateful(self, **kw: Any) -> _stateful_declared_attr[_T]: + new_kw = self.kw.copy() + new_kw.update(kw) + return _stateful_declared_attr(**new_kw) + + def __call__(self, fn: _DeclaredAttrDecorated[_T]) -> declared_attr[_T]: + return declared_attr(fn, **self.kw) + + +def declarative_mixin(cls: Type[_T]) -> Type[_T]: + """Mark a class as providing the feature of "declarative mixin". + + E.g.:: + + from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin + + + @declarative_mixin + class MyMixin: + + @declared_attr + def __tablename__(cls): + return cls.__name__.lower() + + __table_args__ = {"mysql_engine": "InnoDB"} + __mapper_args__ = {"always_refresh": True} + + id = Column(Integer, primary_key=True) + + + class MyModel(MyMixin, Base): + name = Column(String(1000)) + + The :func:`_orm.declarative_mixin` decorator currently does not modify + the given class in any way; it's current purpose is strictly to assist + the :ref:`Mypy plugin ` in being able to identify + SQLAlchemy declarative mixin classes when no other context is present. + + .. versionadded:: 1.4.6 + + .. seealso:: + + :ref:`orm_mixins_toplevel` + + :ref:`mypy_declarative_mixins` - in the + :ref:`Mypy plugin documentation ` + + """ # noqa: E501 + + return cls + + +def _setup_declarative_base(cls: Type[Any]) -> None: + if "metadata" in cls.__dict__: + metadata = cls.__dict__["metadata"] + else: + metadata = None + + if "type_annotation_map" in cls.__dict__: + type_annotation_map = cls.__dict__["type_annotation_map"] + else: + type_annotation_map = None + + reg = cls.__dict__.get("registry", None) + if reg is not None: + if not isinstance(reg, registry): + raise exc.InvalidRequestError( + "Declarative base class has a 'registry' attribute that is " + "not an instance of sqlalchemy.orm.registry()" + ) + elif type_annotation_map is not None: + raise exc.InvalidRequestError( + "Declarative base class has both a 'registry' attribute and a " + "type_annotation_map entry. Per-base type_annotation_maps " + "are not supported. Please apply the type_annotation_map " + "to this registry directly." + ) + + else: + reg = registry( + metadata=metadata, type_annotation_map=type_annotation_map + ) + cls.registry = reg + + cls._sa_registry = reg + + if "metadata" not in cls.__dict__: + cls.metadata = cls.registry.metadata + + if getattr(cls, "__init__", object.__init__) is object.__init__: + cls.__init__ = cls.registry.constructor + + +class MappedAsDataclass(metaclass=DCTransformDeclarative): + """Mixin class to indicate when mapping this class, also convert it to be + a dataclass. + + .. seealso:: + + :ref:`orm_declarative_native_dataclasses` - complete background + on SQLAlchemy native dataclass mapping + + .. versionadded:: 2.0 + + """ + + def __init_subclass__( + cls, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + eq: Union[_NoArg, bool] = _NoArg.NO_ARG, + order: Union[_NoArg, bool] = _NoArg.NO_ARG, + unsafe_hash: Union[_NoArg, bool] = _NoArg.NO_ARG, + match_args: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + dataclass_callable: Union[ + _NoArg, Callable[..., Type[Any]] + ] = _NoArg.NO_ARG, + **kw: Any, + ) -> None: + apply_dc_transforms: _DataclassArguments = { + "init": init, + "repr": repr, + "eq": eq, + "order": order, + "unsafe_hash": unsafe_hash, + "match_args": match_args, + "kw_only": kw_only, + "dataclass_callable": dataclass_callable, + } + + current_transforms: _DataclassArguments + + if hasattr(cls, "_sa_apply_dc_transforms"): + current = cls._sa_apply_dc_transforms + + _ClassScanMapperConfig._assert_dc_arguments(current) + + cls._sa_apply_dc_transforms = current_transforms = { # type: ignore # noqa: E501 + k: current.get(k, _NoArg.NO_ARG) if v is _NoArg.NO_ARG else v + for k, v in apply_dc_transforms.items() + } + else: + cls._sa_apply_dc_transforms = current_transforms = ( + apply_dc_transforms + ) + + super().__init_subclass__(**kw) + + if not _is_mapped_class(cls): + new_anno = ( + _ClassScanMapperConfig._update_annotations_for_non_mapped_class + )(cls) + _ClassScanMapperConfig._apply_dataclasses_to_any_class( + current_transforms, cls, new_anno + ) + + +class DeclarativeBase( + # Inspectable is used only by the mypy plugin + inspection.Inspectable[InstanceState[Any]], + metaclass=DeclarativeAttributeIntercept, +): + """Base class used for declarative class definitions. + + The :class:`_orm.DeclarativeBase` allows for the creation of new + declarative bases in such a way that is compatible with type checkers:: + + + from sqlalchemy.orm import DeclarativeBase + + + class Base(DeclarativeBase): + pass + + The above ``Base`` class is now usable as the base for new declarative + mappings. The superclass makes use of the ``__init_subclass__()`` + method to set up new classes and metaclasses aren't used. + + When first used, the :class:`_orm.DeclarativeBase` class instantiates a new + :class:`_orm.registry` to be used with the base, assuming one was not + provided explicitly. The :class:`_orm.DeclarativeBase` class supports + class-level attributes which act as parameters for the construction of this + registry; such as to indicate a specific :class:`_schema.MetaData` + collection as well as a specific value for + :paramref:`_orm.registry.type_annotation_map`:: + + from typing_extensions import Annotated + + from sqlalchemy import BigInteger + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy.orm import DeclarativeBase + + bigint = Annotated[int, "bigint"] + my_metadata = MetaData() + + + class Base(DeclarativeBase): + metadata = my_metadata + type_annotation_map = { + str: String().with_variant(String(255), "mysql", "mariadb"), + bigint: BigInteger(), + } + + Class-level attributes which may be specified include: + + :param metadata: optional :class:`_schema.MetaData` collection. + If a :class:`_orm.registry` is constructed automatically, this + :class:`_schema.MetaData` collection will be used to construct it. + Otherwise, the local :class:`_schema.MetaData` collection will supercede + that used by an existing :class:`_orm.registry` passed using the + :paramref:`_orm.DeclarativeBase.registry` parameter. + :param type_annotation_map: optional type annotation map that will be + passed to the :class:`_orm.registry` as + :paramref:`_orm.registry.type_annotation_map`. + :param registry: supply a pre-existing :class:`_orm.registry` directly. + + .. versionadded:: 2.0 Added :class:`.DeclarativeBase`, so that declarative + base classes may be constructed in such a way that is also recognized + by :pep:`484` type checkers. As a result, :class:`.DeclarativeBase` + and other subclassing-oriented APIs should be seen as + superseding previous "class returned by a function" APIs, namely + :func:`_orm.declarative_base` and :meth:`_orm.registry.generate_base`, + where the base class returned cannot be recognized by type checkers + without using plugins. + + **__init__ behavior** + + In a plain Python class, the base-most ``__init__()`` method in the class + hierarchy is ``object.__init__()``, which accepts no arguments. However, + when the :class:`_orm.DeclarativeBase` subclass is first declared, the + class is given an ``__init__()`` method that links to the + :paramref:`_orm.registry.constructor` constructor function, if no + ``__init__()`` method is already present; this is the usual declarative + constructor that will assign keyword arguments as attributes on the + instance, assuming those attributes are established at the class level + (i.e. are mapped, or are linked to a descriptor). This constructor is + **never accessed by a mapped class without being called explicitly via + super()**, as mapped classes are themselves given an ``__init__()`` method + directly which calls :paramref:`_orm.registry.constructor`, so in the + default case works independently of what the base-most ``__init__()`` + method does. + + .. versionchanged:: 2.0.1 :class:`_orm.DeclarativeBase` has a default + constructor that links to :paramref:`_orm.registry.constructor` by + default, so that calls to ``super().__init__()`` can access this + constructor. Previously, due to an implementation mistake, this default + constructor was missing, and calling ``super().__init__()`` would invoke + ``object.__init__()``. + + The :class:`_orm.DeclarativeBase` subclass may also declare an explicit + ``__init__()`` method which will replace the use of the + :paramref:`_orm.registry.constructor` function at this level:: + + class Base(DeclarativeBase): + def __init__(self, id=None): + self.id = id + + Mapped classes still will not invoke this constructor implicitly; it + remains only accessible by calling ``super().__init__()``:: + + class MyClass(Base): + def __init__(self, id=None, name=None): + self.name = name + super().__init__(id=id) + + Note that this is a different behavior from what functions like the legacy + :func:`_orm.declarative_base` would do; the base created by those functions + would always install :paramref:`_orm.registry.constructor` for + ``__init__()``. + + + """ + + if typing.TYPE_CHECKING: + + def _sa_inspect_type(self) -> Mapper[Self]: ... + + def _sa_inspect_instance(self) -> InstanceState[Self]: ... + + _sa_registry: ClassVar[_RegistryType] + + registry: ClassVar[_RegistryType] + """Refers to the :class:`_orm.registry` in use where new + :class:`_orm.Mapper` objects will be associated.""" + + metadata: ClassVar[MetaData] + """Refers to the :class:`_schema.MetaData` collection that will be used + for new :class:`_schema.Table` objects. + + .. seealso:: + + :ref:`orm_declarative_metadata` + + """ + + __name__: ClassVar[str] + + # this ideally should be Mapper[Self], but mypy as of 1.4.1 does not + # like it, and breaks the declared_attr_one test. Pyright/pylance is + # ok with it. + __mapper__: ClassVar[Mapper[Any]] + """The :class:`_orm.Mapper` object to which a particular class is + mapped. + + May also be acquired using :func:`_sa.inspect`, e.g. + ``inspect(klass)``. + + """ + + __table__: ClassVar[FromClause] + """The :class:`_sql.FromClause` to which a particular subclass is + mapped. + + This is usually an instance of :class:`_schema.Table` but may also + refer to other kinds of :class:`_sql.FromClause` such as + :class:`_sql.Subquery`, depending on how the class is mapped. + + .. seealso:: + + :ref:`orm_declarative_metadata` + + """ + + # pyright/pylance do not consider a classmethod a ClassVar so use Any + # https://github.com/microsoft/pylance-release/issues/3484 + __tablename__: Any + """String name to assign to the generated + :class:`_schema.Table` object, if not specified directly via + :attr:`_orm.DeclarativeBase.__table__`. + + .. seealso:: + + :ref:`orm_declarative_table` + + """ + + __mapper_args__: Any + """Dictionary of arguments which will be passed to the + :class:`_orm.Mapper` constructor. + + .. seealso:: + + :ref:`orm_declarative_mapper_options` + + """ + + __table_args__: Any + """A dictionary or tuple of arguments that will be passed to the + :class:`_schema.Table` constructor. See + :ref:`orm_declarative_table_configuration` + for background on the specific structure of this collection. + + .. seealso:: + + :ref:`orm_declarative_table_configuration` + + """ + + def __init__(self, **kw: Any): ... + + def __init_subclass__(cls, **kw: Any) -> None: + if DeclarativeBase in cls.__bases__: + _check_not_declarative(cls, DeclarativeBase) + _setup_declarative_base(cls) + else: + _as_declarative(cls._sa_registry, cls, cls.__dict__) + super().__init_subclass__(**kw) + + +def _check_not_declarative(cls: Type[Any], base: Type[Any]) -> None: + cls_dict = cls.__dict__ + if ( + "__table__" in cls_dict + and not ( + callable(cls_dict["__table__"]) + or hasattr(cls_dict["__table__"], "__get__") + ) + ) or isinstance(cls_dict.get("__tablename__", None), str): + raise exc.InvalidRequestError( + f"Cannot use {base.__name__!r} directly as a declarative base " + "class. Create a Base by creating a subclass of it." + ) + + +class DeclarativeBaseNoMeta( + # Inspectable is used only by the mypy plugin + inspection.Inspectable[InstanceState[Any]] +): + """Same as :class:`_orm.DeclarativeBase`, but does not use a metaclass + to intercept new attributes. + + The :class:`_orm.DeclarativeBaseNoMeta` base may be used when use of + custom metaclasses is desirable. + + .. versionadded:: 2.0 + + + """ + + _sa_registry: ClassVar[_RegistryType] + + registry: ClassVar[_RegistryType] + """Refers to the :class:`_orm.registry` in use where new + :class:`_orm.Mapper` objects will be associated.""" + + metadata: ClassVar[MetaData] + """Refers to the :class:`_schema.MetaData` collection that will be used + for new :class:`_schema.Table` objects. + + .. seealso:: + + :ref:`orm_declarative_metadata` + + """ + + # this ideally should be Mapper[Self], but mypy as of 1.4.1 does not + # like it, and breaks the declared_attr_one test. Pyright/pylance is + # ok with it. + __mapper__: ClassVar[Mapper[Any]] + """The :class:`_orm.Mapper` object to which a particular class is + mapped. + + May also be acquired using :func:`_sa.inspect`, e.g. + ``inspect(klass)``. + + """ + + __table__: Optional[FromClause] + """The :class:`_sql.FromClause` to which a particular subclass is + mapped. + + This is usually an instance of :class:`_schema.Table` but may also + refer to other kinds of :class:`_sql.FromClause` such as + :class:`_sql.Subquery`, depending on how the class is mapped. + + .. seealso:: + + :ref:`orm_declarative_metadata` + + """ + + if typing.TYPE_CHECKING: + + def _sa_inspect_type(self) -> Mapper[Self]: ... + + def _sa_inspect_instance(self) -> InstanceState[Self]: ... + + __tablename__: Any + """String name to assign to the generated + :class:`_schema.Table` object, if not specified directly via + :attr:`_orm.DeclarativeBase.__table__`. + + .. seealso:: + + :ref:`orm_declarative_table` + + """ + + __mapper_args__: Any + """Dictionary of arguments which will be passed to the + :class:`_orm.Mapper` constructor. + + .. seealso:: + + :ref:`orm_declarative_mapper_options` + + """ + + __table_args__: Any + """A dictionary or tuple of arguments that will be passed to the + :class:`_schema.Table` constructor. See + :ref:`orm_declarative_table_configuration` + for background on the specific structure of this collection. + + .. seealso:: + + :ref:`orm_declarative_table_configuration` + + """ + + def __init__(self, **kw: Any): ... + + def __init_subclass__(cls, **kw: Any) -> None: + if DeclarativeBaseNoMeta in cls.__bases__: + _check_not_declarative(cls, DeclarativeBaseNoMeta) + _setup_declarative_base(cls) + else: + _as_declarative(cls._sa_registry, cls, cls.__dict__) + super().__init_subclass__(**kw) + + +def add_mapped_attribute( + target: Type[_O], key: str, attr: MapperProperty[Any] +) -> None: + """Add a new mapped attribute to an ORM mapped class. + + E.g.:: + + add_mapped_attribute(User, "addresses", relationship(Address)) + + This may be used for ORM mappings that aren't using a declarative + metaclass that intercepts attribute set operations. + + .. versionadded:: 2.0 + + + """ + _add_attribute(target, key, attr) + + +def declarative_base( + *, + metadata: Optional[MetaData] = None, + mapper: Optional[Callable[..., Mapper[Any]]] = None, + cls: Type[Any] = object, + name: str = "Base", + class_registry: Optional[clsregistry._ClsRegistryType] = None, + type_annotation_map: Optional[_TypeAnnotationMapType] = None, + constructor: Callable[..., None] = _declarative_constructor, + metaclass: Type[Any] = DeclarativeMeta, +) -> Any: + r"""Construct a base class for declarative class definitions. + + The new base class will be given a metaclass that produces + appropriate :class:`~sqlalchemy.schema.Table` objects and makes + the appropriate :class:`_orm.Mapper` calls based on the + information provided declaratively in the class and any subclasses + of the class. + + .. versionchanged:: 2.0 Note that the :func:`_orm.declarative_base` + function is superseded by the new :class:`_orm.DeclarativeBase` class, + which generates a new "base" class using subclassing, rather than + return value of a function. This allows an approach that is compatible + with :pep:`484` typing tools. + + The :func:`_orm.declarative_base` function is a shorthand version + of using the :meth:`_orm.registry.generate_base` + method. That is, the following:: + + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + Is equivalent to:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + Base = mapper_registry.generate_base() + + See the docstring for :class:`_orm.registry` + and :meth:`_orm.registry.generate_base` + for more details. + + .. versionchanged:: 1.4 The :func:`_orm.declarative_base` + function is now a specialization of the more generic + :class:`_orm.registry` class. The function also moves to the + ``sqlalchemy.orm`` package from the ``declarative.ext`` package. + + + :param metadata: + An optional :class:`~sqlalchemy.schema.MetaData` instance. All + :class:`~sqlalchemy.schema.Table` objects implicitly declared by + subclasses of the base will share this MetaData. A MetaData instance + will be created if none is provided. The + :class:`~sqlalchemy.schema.MetaData` instance will be available via the + ``metadata`` attribute of the generated declarative base class. + + :param mapper: + An optional callable, defaults to :class:`_orm.Mapper`. Will + be used to map subclasses to their Tables. + + :param cls: + Defaults to :class:`object`. A type to use as the base for the generated + declarative base class. May be a class or tuple of classes. + + :param name: + Defaults to ``Base``. The display name for the generated + class. Customizing this is not required, but can improve clarity in + tracebacks and debugging. + + :param constructor: + Specify the implementation for the ``__init__`` function on a mapped + class that has no ``__init__`` of its own. Defaults to an + implementation that assigns \**kwargs for declared + fields and relationships to an instance. If ``None`` is supplied, + no __init__ will be provided and construction will fall back to + cls.__init__ by way of the normal Python semantics. + + :param class_registry: optional dictionary that will serve as the + registry of class names-> mapped classes when string names + are used to identify classes inside of :func:`_orm.relationship` + and others. Allows two or more declarative base classes + to share the same registry of class names for simplified + inter-base relationships. + + :param type_annotation_map: optional dictionary of Python types to + SQLAlchemy :class:`_types.TypeEngine` classes or instances. This + is used exclusively by the :class:`_orm.MappedColumn` construct + to produce column types based on annotations within the + :class:`_orm.Mapped` type. + + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`orm_declarative_mapped_column_type_map` + + :param metaclass: + Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ + compatible callable to use as the meta type of the generated + declarative base class. + + .. seealso:: + + :class:`_orm.registry` + + """ + + return registry( + metadata=metadata, + class_registry=class_registry, + constructor=constructor, + type_annotation_map=type_annotation_map, + ).generate_base( + mapper=mapper, + cls=cls, + name=name, + metaclass=metaclass, + ) + + +class registry: + """Generalized registry for mapping classes. + + The :class:`_orm.registry` serves as the basis for maintaining a collection + of mappings, and provides configurational hooks used to map classes. + + The three general kinds of mappings supported are Declarative Base, + Declarative Decorator, and Imperative Mapping. All of these mapping + styles may be used interchangeably: + + * :meth:`_orm.registry.generate_base` returns a new declarative base + class, and is the underlying implementation of the + :func:`_orm.declarative_base` function. + + * :meth:`_orm.registry.mapped` provides a class decorator that will + apply declarative mapping to a class without the use of a declarative + base class. + + * :meth:`_orm.registry.map_imperatively` will produce a + :class:`_orm.Mapper` for a class without scanning the class for + declarative class attributes. This method suits the use case historically + provided by the ``sqlalchemy.orm.mapper()`` classical mapping function, + which is removed as of SQLAlchemy 2.0. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`orm_mapping_classes_toplevel` - overview of class mapping + styles. + + """ + + _class_registry: clsregistry._ClsRegistryType + _managers: weakref.WeakKeyDictionary[ClassManager[Any], Literal[True]] + _non_primary_mappers: weakref.WeakKeyDictionary[Mapper[Any], Literal[True]] + metadata: MetaData + constructor: CallableReference[Callable[..., None]] + type_annotation_map: _MutableTypeAnnotationMapType + _dependents: Set[_RegistryType] + _dependencies: Set[_RegistryType] + _new_mappers: bool + + def __init__( + self, + *, + metadata: Optional[MetaData] = None, + class_registry: Optional[clsregistry._ClsRegistryType] = None, + type_annotation_map: Optional[_TypeAnnotationMapType] = None, + constructor: Callable[..., None] = _declarative_constructor, + ): + r"""Construct a new :class:`_orm.registry` + + :param metadata: + An optional :class:`_schema.MetaData` instance. All + :class:`_schema.Table` objects generated using declarative + table mapping will make use of this :class:`_schema.MetaData` + collection. If this argument is left at its default of ``None``, + a blank :class:`_schema.MetaData` collection is created. + + :param constructor: + Specify the implementation for the ``__init__`` function on a mapped + class that has no ``__init__`` of its own. Defaults to an + implementation that assigns \**kwargs for declared + fields and relationships to an instance. If ``None`` is supplied, + no __init__ will be provided and construction will fall back to + cls.__init__ by way of the normal Python semantics. + + :param class_registry: optional dictionary that will serve as the + registry of class names-> mapped classes when string names + are used to identify classes inside of :func:`_orm.relationship` + and others. Allows two or more declarative base classes + to share the same registry of class names for simplified + inter-base relationships. + + :param type_annotation_map: optional dictionary of Python types to + SQLAlchemy :class:`_types.TypeEngine` classes or instances. + The provided dict will update the default type mapping. This + is used exclusively by the :class:`_orm.MappedColumn` construct + to produce column types based on annotations within the + :class:`_orm.Mapped` type. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`orm_declarative_mapped_column_type_map` + + + """ + lcl_metadata = metadata or MetaData() + + if class_registry is None: + class_registry = weakref.WeakValueDictionary() + + self._class_registry = class_registry + self._managers = weakref.WeakKeyDictionary() + self._non_primary_mappers = weakref.WeakKeyDictionary() + self.metadata = lcl_metadata + self.constructor = constructor + self.type_annotation_map = {} + if type_annotation_map is not None: + self.update_type_annotation_map(type_annotation_map) + self._dependents = set() + self._dependencies = set() + + self._new_mappers = False + + with mapperlib._CONFIGURE_MUTEX: + mapperlib._mapper_registries[self] = True + + def update_type_annotation_map( + self, + type_annotation_map: _TypeAnnotationMapType, + ) -> None: + """update the :paramref:`_orm.registry.type_annotation_map` with new + values.""" + + self.type_annotation_map.update( + { + de_optionalize_union_types(typ): sqltype + for typ, sqltype in type_annotation_map.items() + } + ) + + def _resolve_type( + self, python_type: _MatchedOnType, _do_fallbacks: bool = True + ) -> Optional[sqltypes.TypeEngine[Any]]: + python_type_type: Type[Any] + search: Iterable[Tuple[_MatchedOnType, Type[Any]]] + + if is_generic(python_type): + if is_literal(python_type): + python_type_type = python_type # type: ignore[assignment] + + search = ( + (python_type, python_type_type), + *((lt, python_type_type) for lt in LITERAL_TYPES), + ) + else: + python_type_type = python_type.__origin__ + search = ((python_type, python_type_type),) + elif isinstance(python_type, type): + python_type_type = python_type + search = ((pt, pt) for pt in python_type_type.__mro__) + else: + python_type_type = python_type # type: ignore[assignment] + search = ((python_type, python_type_type),) + + for pt, flattened in search: + # we search through full __mro__ for types. however... + sql_type = self.type_annotation_map.get(pt) + if sql_type is None: + sql_type = sqltypes._type_map_get(pt) # type: ignore # noqa: E501 + + if sql_type is not None: + sql_type_inst = sqltypes.to_instance(sql_type) + + # ... this additional step will reject most + # type -> supertype matches, such as if we had + # a MyInt(int) subclass. note also we pass NewType() + # here directly; these always have to be in the + # type_annotation_map to be useful + resolved_sql_type = sql_type_inst._resolve_for_python_type( + python_type_type, + pt, + flattened, + ) + if resolved_sql_type is not None: + return resolved_sql_type + + # 2.0 fallbacks + if _do_fallbacks: + python_type_to_check: Any = None + kind = None + if is_pep695(python_type): + # NOTE: assume there aren't type alias types of new types. + python_type_to_check = python_type + while is_pep695(python_type_to_check): + python_type_to_check = python_type_to_check.__value__ + python_type_to_check = de_optionalize_union_types( + python_type_to_check + ) + kind = "TypeAliasType" + if is_newtype(python_type): + python_type_to_check = flatten_newtype(python_type) + kind = "NewType" + + if python_type_to_check is not None: + res_after_fallback = self._resolve_type( + python_type_to_check, False + ) + if res_after_fallback is not None: + assert kind is not None + warn_deprecated( + f"Matching the provided {kind} '{python_type}' on " + "its resolved value without matching it in the " + "type_annotation_map is deprecated; add this type to " + "the type_annotation_map to allow it to match " + "explicitly.", + "2.0", + ) + return res_after_fallback + + return None + + @property + def mappers(self) -> FrozenSet[Mapper[Any]]: + """read only collection of all :class:`_orm.Mapper` objects.""" + + return frozenset(manager.mapper for manager in self._managers).union( + self._non_primary_mappers + ) + + def _set_depends_on(self, registry: RegistryType) -> None: + if registry is self: + return + registry._dependents.add(self) + self._dependencies.add(registry) + + def _flag_new_mapper(self, mapper: Mapper[Any]) -> None: + mapper._ready_for_configure = True + if self._new_mappers: + return + + for reg in self._recurse_with_dependents({self}): + reg._new_mappers = True + + @classmethod + def _recurse_with_dependents( + cls, registries: Set[RegistryType] + ) -> Iterator[RegistryType]: + todo = registries + done = set() + while todo: + reg = todo.pop() + done.add(reg) + + # if yielding would remove dependents, make sure we have + # them before + todo.update(reg._dependents.difference(done)) + yield reg + + # if yielding would add dependents, make sure we have them + # after + todo.update(reg._dependents.difference(done)) + + @classmethod + def _recurse_with_dependencies( + cls, registries: Set[RegistryType] + ) -> Iterator[RegistryType]: + todo = registries + done = set() + while todo: + reg = todo.pop() + done.add(reg) + + # if yielding would remove dependencies, make sure we have + # them before + todo.update(reg._dependencies.difference(done)) + + yield reg + + # if yielding would remove dependencies, make sure we have + # them before + todo.update(reg._dependencies.difference(done)) + + def _mappers_to_configure(self) -> Iterator[Mapper[Any]]: + return itertools.chain( + ( + manager.mapper + for manager in list(self._managers) + if manager.is_mapped + and not manager.mapper.configured + and manager.mapper._ready_for_configure + ), + ( + npm + for npm in list(self._non_primary_mappers) + if not npm.configured and npm._ready_for_configure + ), + ) + + def _add_non_primary_mapper(self, np_mapper: Mapper[Any]) -> None: + self._non_primary_mappers[np_mapper] = True + + def _dispose_cls(self, cls: Type[_O]) -> None: + clsregistry.remove_class(cls.__name__, cls, self._class_registry) + + def _add_manager(self, manager: ClassManager[Any]) -> None: + self._managers[manager] = True + if manager.is_mapped: + raise exc.ArgumentError( + "Class '%s' already has a primary mapper defined. " + % manager.class_ + ) + assert manager.registry is None + manager.registry = self + + def configure(self, cascade: bool = False) -> None: + """Configure all as-yet unconfigured mappers in this + :class:`_orm.registry`. + + The configure step is used to reconcile and initialize the + :func:`_orm.relationship` linkages between mapped classes, as well as + to invoke configuration events such as the + :meth:`_orm.MapperEvents.before_configured` and + :meth:`_orm.MapperEvents.after_configured`, which may be used by ORM + extensions or user-defined extension hooks. + + If one or more mappers in this registry contain + :func:`_orm.relationship` constructs that refer to mapped classes in + other registries, this registry is said to be *dependent* on those + registries. In order to configure those dependent registries + automatically, the :paramref:`_orm.registry.configure.cascade` flag + should be set to ``True``. Otherwise, if they are not configured, an + exception will be raised. The rationale behind this behavior is to + allow an application to programmatically invoke configuration of + registries while controlling whether or not the process implicitly + reaches other registries. + + As an alternative to invoking :meth:`_orm.registry.configure`, the ORM + function :func:`_orm.configure_mappers` function may be used to ensure + configuration is complete for all :class:`_orm.registry` objects in + memory. This is generally simpler to use and also predates the usage of + :class:`_orm.registry` objects overall. However, this function will + impact all mappings throughout the running Python process and may be + more memory/time consuming for an application that has many registries + in use for different purposes that may not be needed immediately. + + .. seealso:: + + :func:`_orm.configure_mappers` + + + .. versionadded:: 1.4.0b2 + + """ + mapperlib._configure_registries({self}, cascade=cascade) + + def dispose(self, cascade: bool = False) -> None: + """Dispose of all mappers in this :class:`_orm.registry`. + + After invocation, all the classes that were mapped within this registry + will no longer have class instrumentation associated with them. This + method is the per-:class:`_orm.registry` analogue to the + application-wide :func:`_orm.clear_mappers` function. + + If this registry contains mappers that are dependencies of other + registries, typically via :func:`_orm.relationship` links, then those + registries must be disposed as well. When such registries exist in + relation to this one, their :meth:`_orm.registry.dispose` method will + also be called, if the :paramref:`_orm.registry.dispose.cascade` flag + is set to ``True``; otherwise, an error is raised if those registries + were not already disposed. + + .. versionadded:: 1.4.0b2 + + .. seealso:: + + :func:`_orm.clear_mappers` + + """ + + mapperlib._dispose_registries({self}, cascade=cascade) + + def _dispose_manager_and_mapper(self, manager: ClassManager[Any]) -> None: + if "mapper" in manager.__dict__: + mapper = manager.mapper + + mapper._set_dispose_flags() + + class_ = manager.class_ + self._dispose_cls(class_) + instrumentation._instrumentation_factory.unregister(class_) + + def generate_base( + self, + mapper: Optional[Callable[..., Mapper[Any]]] = None, + cls: Type[Any] = object, + name: str = "Base", + metaclass: Type[Any] = DeclarativeMeta, + ) -> Any: + """Generate a declarative base class. + + Classes that inherit from the returned class object will be + automatically mapped using declarative mapping. + + E.g.:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + Base = mapper_registry.generate_base() + + + class MyClass(Base): + __tablename__ = "my_table" + id = Column(Integer, primary_key=True) + + The above dynamically generated class is equivalent to the + non-dynamic example below:: + + from sqlalchemy.orm import registry + from sqlalchemy.orm.decl_api import DeclarativeMeta + + mapper_registry = registry() + + + class Base(metaclass=DeclarativeMeta): + __abstract__ = True + registry = mapper_registry + metadata = mapper_registry.metadata + + __init__ = mapper_registry.constructor + + .. versionchanged:: 2.0 Note that the + :meth:`_orm.registry.generate_base` method is superseded by the new + :class:`_orm.DeclarativeBase` class, which generates a new "base" + class using subclassing, rather than return value of a function. + This allows an approach that is compatible with :pep:`484` typing + tools. + + The :meth:`_orm.registry.generate_base` method provides the + implementation for the :func:`_orm.declarative_base` function, which + creates the :class:`_orm.registry` and base class all at once. + + See the section :ref:`orm_declarative_mapping` for background and + examples. + + :param mapper: + An optional callable, defaults to :class:`_orm.Mapper`. + This function is used to generate new :class:`_orm.Mapper` objects. + + :param cls: + Defaults to :class:`object`. A type to use as the base for the + generated declarative base class. May be a class or tuple of classes. + + :param name: + Defaults to ``Base``. The display name for the generated + class. Customizing this is not required, but can improve clarity in + tracebacks and debugging. + + :param metaclass: + Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ + compatible callable to use as the meta type of the generated + declarative base class. + + .. seealso:: + + :ref:`orm_declarative_mapping` + + :func:`_orm.declarative_base` + + """ + metadata = self.metadata + + bases = not isinstance(cls, tuple) and (cls,) or cls + + class_dict: Dict[str, Any] = dict(registry=self, metadata=metadata) + if isinstance(cls, type): + class_dict["__doc__"] = cls.__doc__ + + if self.constructor is not None: + class_dict["__init__"] = self.constructor + + class_dict["__abstract__"] = True + if mapper: + class_dict["__mapper_cls__"] = mapper + + if hasattr(cls, "__class_getitem__"): + + def __class_getitem__(cls: Type[_T], key: Any) -> Type[_T]: + # allow generic classes in py3.9+ + return cls + + class_dict["__class_getitem__"] = __class_getitem__ + + return metaclass(name, bases, class_dict) + + @compat_typing.dataclass_transform( + field_specifiers=( + MappedColumn, + RelationshipProperty, + Composite, + Synonym, + mapped_column, + relationship, + composite, + synonym, + deferred, + ), + ) + @overload + def mapped_as_dataclass(self, __cls: Type[_O]) -> Type[_O]: ... + + @overload + def mapped_as_dataclass( + self, + __cls: Literal[None] = ..., + *, + init: Union[_NoArg, bool] = ..., + repr: Union[_NoArg, bool] = ..., # noqa: A002 + eq: Union[_NoArg, bool] = ..., + order: Union[_NoArg, bool] = ..., + unsafe_hash: Union[_NoArg, bool] = ..., + match_args: Union[_NoArg, bool] = ..., + kw_only: Union[_NoArg, bool] = ..., + dataclass_callable: Union[_NoArg, Callable[..., Type[Any]]] = ..., + ) -> Callable[[Type[_O]], Type[_O]]: ... + + def mapped_as_dataclass( + self, + __cls: Optional[Type[_O]] = None, + *, + init: Union[_NoArg, bool] = _NoArg.NO_ARG, + repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 + eq: Union[_NoArg, bool] = _NoArg.NO_ARG, + order: Union[_NoArg, bool] = _NoArg.NO_ARG, + unsafe_hash: Union[_NoArg, bool] = _NoArg.NO_ARG, + match_args: Union[_NoArg, bool] = _NoArg.NO_ARG, + kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, + dataclass_callable: Union[ + _NoArg, Callable[..., Type[Any]] + ] = _NoArg.NO_ARG, + ) -> Union[Type[_O], Callable[[Type[_O]], Type[_O]]]: + """Class decorator that will apply the Declarative mapping process + to a given class, and additionally convert the class to be a + Python dataclass. + + .. seealso:: + + :ref:`orm_declarative_native_dataclasses` - complete background + on SQLAlchemy native dataclass mapping + + + .. versionadded:: 2.0 + + + """ + + def decorate(cls: Type[_O]) -> Type[_O]: + setattr( + cls, + "_sa_apply_dc_transforms", + { + "init": init, + "repr": repr, + "eq": eq, + "order": order, + "unsafe_hash": unsafe_hash, + "match_args": match_args, + "kw_only": kw_only, + "dataclass_callable": dataclass_callable, + }, + ) + _as_declarative(self, cls, cls.__dict__) + return cls + + if __cls: + return decorate(__cls) + else: + return decorate + + def mapped(self, cls: Type[_O]) -> Type[_O]: + """Class decorator that will apply the Declarative mapping process + to a given class. + + E.g.:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + + @mapper_registry.mapped + class Foo: + __tablename__ = "some_table" + + id = Column(Integer, primary_key=True) + name = Column(String) + + See the section :ref:`orm_declarative_mapping` for complete + details and examples. + + :param cls: class to be mapped. + + :return: the class that was passed. + + .. seealso:: + + :ref:`orm_declarative_mapping` + + :meth:`_orm.registry.generate_base` - generates a base class + that will apply Declarative mapping to subclasses automatically + using a Python metaclass. + + .. seealso:: + + :meth:`_orm.registry.mapped_as_dataclass` + + """ + _as_declarative(self, cls, cls.__dict__) + return cls + + def as_declarative_base(self, **kw: Any) -> Callable[[Type[_T]], Type[_T]]: + """ + Class decorator which will invoke + :meth:`_orm.registry.generate_base` + for a given base class. + + E.g.:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + + @mapper_registry.as_declarative_base() + class Base: + @declared_attr + def __tablename__(cls): + return cls.__name__.lower() + + id = Column(Integer, primary_key=True) + + + class MyMappedClass(Base): ... + + All keyword arguments passed to + :meth:`_orm.registry.as_declarative_base` are passed + along to :meth:`_orm.registry.generate_base`. + + """ + + def decorate(cls: Type[_T]) -> Type[_T]: + kw["cls"] = cls + kw["name"] = cls.__name__ + return self.generate_base(**kw) # type: ignore + + return decorate + + def map_declaratively(self, cls: Type[_O]) -> Mapper[_O]: + """Map a class declaratively. + + In this form of mapping, the class is scanned for mapping information, + including for columns to be associated with a table, and/or an + actual table object. + + Returns the :class:`_orm.Mapper` object. + + E.g.:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + + class Foo: + __tablename__ = "some_table" + + id = Column(Integer, primary_key=True) + name = Column(String) + + + mapper = mapper_registry.map_declaratively(Foo) + + This function is more conveniently invoked indirectly via either the + :meth:`_orm.registry.mapped` class decorator or by subclassing a + declarative metaclass generated from + :meth:`_orm.registry.generate_base`. + + See the section :ref:`orm_declarative_mapping` for complete + details and examples. + + :param cls: class to be mapped. + + :return: a :class:`_orm.Mapper` object. + + .. seealso:: + + :ref:`orm_declarative_mapping` + + :meth:`_orm.registry.mapped` - more common decorator interface + to this function. + + :meth:`_orm.registry.map_imperatively` + + """ + _as_declarative(self, cls, cls.__dict__) + return cls.__mapper__ # type: ignore + + def map_imperatively( + self, + class_: Type[_O], + local_table: Optional[FromClause] = None, + **kw: Any, + ) -> Mapper[_O]: + r"""Map a class imperatively. + + In this form of mapping, the class is not scanned for any mapping + information. Instead, all mapping constructs are passed as + arguments. + + This method is intended to be fully equivalent to the now-removed + SQLAlchemy ``mapper()`` function, except that it's in terms of + a particular registry. + + E.g.:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + my_table = Table( + "my_table", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + ) + + + class MyClass: + pass + + + mapper_registry.map_imperatively(MyClass, my_table) + + See the section :ref:`orm_imperative_mapping` for complete background + and usage examples. + + :param class\_: The class to be mapped. Corresponds to the + :paramref:`_orm.Mapper.class_` parameter. + + :param local_table: the :class:`_schema.Table` or other + :class:`_sql.FromClause` object that is the subject of the mapping. + Corresponds to the + :paramref:`_orm.Mapper.local_table` parameter. + + :param \**kw: all other keyword arguments are passed to the + :class:`_orm.Mapper` constructor directly. + + .. seealso:: + + :ref:`orm_imperative_mapping` + + :ref:`orm_declarative_mapping` + + """ + return _mapper(self, class_, local_table, kw) + + +RegistryType = registry + +if not TYPE_CHECKING: + # allow for runtime type resolution of ``ClassVar[_RegistryType]`` + _RegistryType = registry # noqa + + +def as_declarative(**kw: Any) -> Callable[[Type[_T]], Type[_T]]: + """ + Class decorator which will adapt a given class into a + :func:`_orm.declarative_base`. + + This function makes use of the :meth:`_orm.registry.as_declarative_base` + method, by first creating a :class:`_orm.registry` automatically + and then invoking the decorator. + + E.g.:: + + from sqlalchemy.orm import as_declarative + + + @as_declarative() + class Base: + @declared_attr + def __tablename__(cls): + return cls.__name__.lower() + + id = Column(Integer, primary_key=True) + + + class MyMappedClass(Base): ... + + .. seealso:: + + :meth:`_orm.registry.as_declarative_base` + + """ + metadata, class_registry = ( + kw.pop("metadata", None), + kw.pop("class_registry", None), + ) + + return registry( + metadata=metadata, class_registry=class_registry + ).as_declarative_base(**kw) + + +@inspection._inspects( + DeclarativeMeta, DeclarativeBase, DeclarativeAttributeIntercept +) +def _inspect_decl_meta(cls: Type[Any]) -> Optional[Mapper[Any]]: + mp: Optional[Mapper[Any]] = _inspect_mapped_class(cls) + if mp is None: + if _DeferredMapperConfig.has_cls(cls): + _DeferredMapperConfig.raise_unmapped_for_cls(cls) + return mp diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1176b504186f5f1ab66afb5aa488534106b3089e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py @@ -0,0 +1,2186 @@ +# orm/decl_base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Internal implementation for declarative.""" + +from __future__ import annotations + +import collections +import dataclasses +import re +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Iterable +from typing import List +from typing import Mapping +from typing import NamedTuple +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import clsregistry +from . import exc as orm_exc +from . import instrumentation +from . import mapperlib +from ._typing import _O +from ._typing import attr_is_internal_proxy +from .attributes import InstrumentedAttribute +from .attributes import QueryableAttribute +from .base import _is_mapped_class +from .base import InspectionAttr +from .descriptor_props import CompositeProperty +from .descriptor_props import SynonymProperty +from .interfaces import _AttributeOptions +from .interfaces import _DCAttributeOptions +from .interfaces import _IntrospectsAnnotations +from .interfaces import _MappedAttribute +from .interfaces import _MapsColumns +from .interfaces import MapperProperty +from .mapper import Mapper +from .properties import ColumnProperty +from .properties import MappedColumn +from .util import _extract_mapped_subtype +from .util import _is_mapped_annotation +from .util import class_mapper +from .util import de_stringify_annotation +from .. import event +from .. import exc +from .. import util +from ..sql import expression +from ..sql.base import _NoArg +from ..sql.schema import Column +from ..sql.schema import Table +from ..util import topological +from ..util.typing import _AnnotationScanType +from ..util.typing import get_args +from ..util.typing import is_fwd_ref +from ..util.typing import is_literal +from ..util.typing import Protocol +from ..util.typing import TypedDict + +if TYPE_CHECKING: + from ._typing import _ClassDict + from ._typing import _RegistryType + from .base import Mapped + from .decl_api import declared_attr + from .instrumentation import ClassManager + from ..sql.elements import NamedColumn + from ..sql.schema import MetaData + from ..sql.selectable import FromClause + +_T = TypeVar("_T", bound=Any) + +_MapperKwArgs = Mapping[str, Any] +_TableArgsType = Union[Tuple[Any, ...], Dict[str, Any]] + + +class MappedClassProtocol(Protocol[_O]): + """A protocol representing a SQLAlchemy mapped class. + + The protocol is generic on the type of class, use + ``MappedClassProtocol[Any]`` to allow any mapped class. + """ + + __name__: str + __mapper__: Mapper[_O] + __table__: FromClause + + def __call__(self, **kw: Any) -> _O: ... + + +class _DeclMappedClassProtocol(MappedClassProtocol[_O], Protocol): + "Internal more detailed version of ``MappedClassProtocol``." + metadata: MetaData + __tablename__: str + __mapper_args__: _MapperKwArgs + __table_args__: Optional[_TableArgsType] + + _sa_apply_dc_transforms: Optional[_DataclassArguments] + + def __declare_first__(self) -> None: ... + + def __declare_last__(self) -> None: ... + + +class _DataclassArguments(TypedDict): + init: Union[_NoArg, bool] + repr: Union[_NoArg, bool] + eq: Union[_NoArg, bool] + order: Union[_NoArg, bool] + unsafe_hash: Union[_NoArg, bool] + match_args: Union[_NoArg, bool] + kw_only: Union[_NoArg, bool] + dataclass_callable: Union[_NoArg, Callable[..., Type[Any]]] + + +def _declared_mapping_info( + cls: Type[Any], +) -> Optional[Union[_DeferredMapperConfig, Mapper[Any]]]: + # deferred mapping + if _DeferredMapperConfig.has_cls(cls): + return _DeferredMapperConfig.config_for_cls(cls) + # regular mapping + elif _is_mapped_class(cls): + return class_mapper(cls, configure=False) + else: + return None + + +def _is_supercls_for_inherits(cls: Type[Any]) -> bool: + """return True if this class will be used as a superclass to set in + 'inherits'. + + This includes deferred mapper configs that aren't mapped yet, however does + not include classes with _sa_decl_prepare_nocascade (e.g. + ``AbstractConcreteBase``); these concrete-only classes are not set up as + "inherits" until after mappers are configured using + mapper._set_concrete_base() + + """ + if _DeferredMapperConfig.has_cls(cls): + return not _get_immediate_cls_attr( + cls, "_sa_decl_prepare_nocascade", strict=True + ) + # regular mapping + elif _is_mapped_class(cls): + return True + else: + return False + + +def _resolve_for_abstract_or_classical(cls: Type[Any]) -> Optional[Type[Any]]: + if cls is object: + return None + + sup: Optional[Type[Any]] + + if cls.__dict__.get("__abstract__", False): + for base_ in cls.__bases__: + sup = _resolve_for_abstract_or_classical(base_) + if sup is not None: + return sup + else: + return None + else: + clsmanager = _dive_for_cls_manager(cls) + + if clsmanager: + return clsmanager.class_ + else: + return cls + + +def _get_immediate_cls_attr( + cls: Type[Any], attrname: str, strict: bool = False +) -> Optional[Any]: + """return an attribute of the class that is either present directly + on the class, e.g. not on a superclass, or is from a superclass but + this superclass is a non-mapped mixin, that is, not a descendant of + the declarative base and is also not classically mapped. + + This is used to detect attributes that indicate something about + a mapped class independently from any mapped classes that it may + inherit from. + + """ + + # the rules are different for this name than others, + # make sure we've moved it out. transitional + assert attrname != "__abstract__" + + if not issubclass(cls, object): + return None + + if attrname in cls.__dict__: + return getattr(cls, attrname) + + for base in cls.__mro__[1:]: + _is_classical_inherits = _dive_for_cls_manager(base) is not None + + if attrname in base.__dict__ and ( + base is cls + or ( + (base in cls.__bases__ if strict else True) + and not _is_classical_inherits + ) + ): + return getattr(base, attrname) + else: + return None + + +def _dive_for_cls_manager(cls: Type[_O]) -> Optional[ClassManager[_O]]: + # because the class manager registration is pluggable, + # we need to do the search for every class in the hierarchy, + # rather than just a simple "cls._sa_class_manager" + + for base in cls.__mro__: + manager: Optional[ClassManager[_O]] = attributes.opt_manager_of_class( + base + ) + if manager: + return manager + return None + + +def _as_declarative( + registry: _RegistryType, cls: Type[Any], dict_: _ClassDict +) -> Optional[_MapperConfig]: + # declarative scans the class for attributes. no table or mapper + # args passed separately. + return _MapperConfig.setup_mapping(registry, cls, dict_, None, {}) + + +def _mapper( + registry: _RegistryType, + cls: Type[_O], + table: Optional[FromClause], + mapper_kw: _MapperKwArgs, +) -> Mapper[_O]: + _ImperativeMapperConfig(registry, cls, table, mapper_kw) + return cast("MappedClassProtocol[_O]", cls).__mapper__ + + +@util.preload_module("sqlalchemy.orm.decl_api") +def _is_declarative_props(obj: Any) -> bool: + _declared_attr_common = util.preloaded.orm_decl_api._declared_attr_common + + return isinstance(obj, (_declared_attr_common, util.classproperty)) + + +def _check_declared_props_nocascade( + obj: Any, name: str, cls: Type[_O] +) -> bool: + if _is_declarative_props(obj): + if getattr(obj, "_cascading", False): + util.warn( + "@declared_attr.cascading is not supported on the %s " + "attribute on class %s. This attribute invokes for " + "subclasses in any case." % (name, cls) + ) + return True + else: + return False + + +class _MapperConfig: + __slots__ = ( + "cls", + "classname", + "properties", + "declared_attr_reg", + "__weakref__", + ) + + cls: Type[Any] + classname: str + properties: util.OrderedDict[ + str, + Union[ + Sequence[NamedColumn[Any]], NamedColumn[Any], MapperProperty[Any] + ], + ] + declared_attr_reg: Dict[declared_attr[Any], Any] + + @classmethod + def setup_mapping( + cls, + registry: _RegistryType, + cls_: Type[_O], + dict_: _ClassDict, + table: Optional[FromClause], + mapper_kw: _MapperKwArgs, + ) -> Optional[_MapperConfig]: + manager = attributes.opt_manager_of_class(cls) + if manager and manager.class_ is cls_: + raise exc.InvalidRequestError( + f"Class {cls!r} already has been instrumented declaratively" + ) + + if cls_.__dict__.get("__abstract__", False): + return None + + defer_map = _get_immediate_cls_attr( + cls_, "_sa_decl_prepare_nocascade", strict=True + ) or hasattr(cls_, "_sa_decl_prepare") + + if defer_map: + return _DeferredMapperConfig( + registry, cls_, dict_, table, mapper_kw + ) + else: + return _ClassScanMapperConfig( + registry, cls_, dict_, table, mapper_kw + ) + + def __init__( + self, + registry: _RegistryType, + cls_: Type[Any], + mapper_kw: _MapperKwArgs, + ): + self.cls = util.assert_arg_type(cls_, type, "cls_") + self.classname = cls_.__name__ + self.properties = util.OrderedDict() + self.declared_attr_reg = {} + + if not mapper_kw.get("non_primary", False): + instrumentation.register_class( + self.cls, + finalize=False, + registry=registry, + declarative_scan=self, + init_method=registry.constructor, + ) + else: + manager = attributes.opt_manager_of_class(self.cls) + if not manager or not manager.is_mapped: + raise exc.InvalidRequestError( + "Class %s has no primary mapper configured. Configure " + "a primary mapper first before setting up a non primary " + "Mapper." % self.cls + ) + + def set_cls_attribute(self, attrname: str, value: _T) -> _T: + manager = instrumentation.manager_of_class(self.cls) + manager.install_member(attrname, value) + return value + + def map(self, mapper_kw: _MapperKwArgs = ...) -> Mapper[Any]: + raise NotImplementedError() + + def _early_mapping(self, mapper_kw: _MapperKwArgs) -> None: + self.map(mapper_kw) + + +class _ImperativeMapperConfig(_MapperConfig): + __slots__ = ("local_table", "inherits") + + def __init__( + self, + registry: _RegistryType, + cls_: Type[_O], + table: Optional[FromClause], + mapper_kw: _MapperKwArgs, + ): + super().__init__(registry, cls_, mapper_kw) + + self.local_table = self.set_cls_attribute("__table__", table) + + with mapperlib._CONFIGURE_MUTEX: + if not mapper_kw.get("non_primary", False): + clsregistry.add_class( + self.classname, self.cls, registry._class_registry + ) + + self._setup_inheritance(mapper_kw) + + self._early_mapping(mapper_kw) + + def map(self, mapper_kw: _MapperKwArgs = util.EMPTY_DICT) -> Mapper[Any]: + mapper_cls = Mapper + + return self.set_cls_attribute( + "__mapper__", + mapper_cls(self.cls, self.local_table, **mapper_kw), + ) + + def _setup_inheritance(self, mapper_kw: _MapperKwArgs) -> None: + cls = self.cls + + inherits = mapper_kw.get("inherits", None) + + if inherits is None: + # since we search for classical mappings now, search for + # multiple mapped bases as well and raise an error. + inherits_search = [] + for base_ in cls.__bases__: + c = _resolve_for_abstract_or_classical(base_) + if c is None: + continue + + if _is_supercls_for_inherits(c) and c not in inherits_search: + inherits_search.append(c) + + if inherits_search: + if len(inherits_search) > 1: + raise exc.InvalidRequestError( + "Class %s has multiple mapped bases: %r" + % (cls, inherits_search) + ) + inherits = inherits_search[0] + elif isinstance(inherits, Mapper): + inherits = inherits.class_ + + self.inherits = inherits + + +class _CollectedAnnotation(NamedTuple): + raw_annotation: _AnnotationScanType + mapped_container: Optional[Type[Mapped[Any]]] + extracted_mapped_annotation: Union[_AnnotationScanType, str] + is_dataclass: bool + attr_value: Any + originating_module: str + originating_class: Type[Any] + + +class _ClassScanMapperConfig(_MapperConfig): + __slots__ = ( + "registry", + "clsdict_view", + "collected_attributes", + "collected_annotations", + "local_table", + "persist_selectable", + "declared_columns", + "column_ordering", + "column_copies", + "table_args", + "tablename", + "mapper_args", + "mapper_args_fn", + "table_fn", + "inherits", + "single", + "allow_dataclass_fields", + "dataclass_setup_arguments", + "is_dataclass_prior_to_mapping", + "allow_unmapped_annotations", + ) + + is_deferred = False + registry: _RegistryType + clsdict_view: _ClassDict + collected_annotations: Dict[str, _CollectedAnnotation] + collected_attributes: Dict[str, Any] + local_table: Optional[FromClause] + persist_selectable: Optional[FromClause] + declared_columns: util.OrderedSet[Column[Any]] + column_ordering: Dict[Column[Any], int] + column_copies: Dict[ + Union[MappedColumn[Any], Column[Any]], + Union[MappedColumn[Any], Column[Any]], + ] + tablename: Optional[str] + mapper_args: Mapping[str, Any] + table_args: Optional[_TableArgsType] + mapper_args_fn: Optional[Callable[[], Dict[str, Any]]] + inherits: Optional[Type[Any]] + single: bool + + is_dataclass_prior_to_mapping: bool + allow_unmapped_annotations: bool + + dataclass_setup_arguments: Optional[_DataclassArguments] + """if the class has SQLAlchemy native dataclass parameters, where + we will turn the class into a dataclass within the declarative mapping + process. + + """ + + allow_dataclass_fields: bool + """if true, look for dataclass-processed Field objects on the target + class as well as superclasses and extract ORM mapping directives from + the "metadata" attribute of each Field. + + if False, dataclass fields can still be used, however they won't be + mapped. + + """ + + def __init__( + self, + registry: _RegistryType, + cls_: Type[_O], + dict_: _ClassDict, + table: Optional[FromClause], + mapper_kw: _MapperKwArgs, + ): + # grab class dict before the instrumentation manager has been added. + # reduces cycles + self.clsdict_view = ( + util.immutabledict(dict_) if dict_ else util.EMPTY_DICT + ) + super().__init__(registry, cls_, mapper_kw) + self.registry = registry + self.persist_selectable = None + + self.collected_attributes = {} + self.collected_annotations = {} + self.declared_columns = util.OrderedSet() + self.column_ordering = {} + self.column_copies = {} + self.single = False + self.dataclass_setup_arguments = dca = getattr( + self.cls, "_sa_apply_dc_transforms", None + ) + + self.allow_unmapped_annotations = getattr( + self.cls, "__allow_unmapped__", False + ) or bool(self.dataclass_setup_arguments) + + self.is_dataclass_prior_to_mapping = cld = dataclasses.is_dataclass( + cls_ + ) + + sdk = _get_immediate_cls_attr(cls_, "__sa_dataclass_metadata_key__") + + # we don't want to consume Field objects from a not-already-dataclass. + # the Field objects won't have their "name" or "type" populated, + # and while it seems like we could just set these on Field as we + # read them, Field is documented as "user read only" and we need to + # stay far away from any off-label use of dataclasses APIs. + if (not cld or dca) and sdk: + raise exc.InvalidRequestError( + "SQLAlchemy mapped dataclasses can't consume mapping " + "information from dataclass.Field() objects if the immediate " + "class is not already a dataclass." + ) + + # if already a dataclass, and __sa_dataclass_metadata_key__ present, + # then also look inside of dataclass.Field() objects yielded by + # dataclasses.get_fields(cls) when scanning for attributes + self.allow_dataclass_fields = bool(sdk and cld) + + self._setup_declared_events() + + self._scan_attributes() + + self._setup_dataclasses_transforms() + + with mapperlib._CONFIGURE_MUTEX: + clsregistry.add_class( + self.classname, self.cls, registry._class_registry + ) + + self._setup_inheriting_mapper(mapper_kw) + + self._extract_mappable_attributes() + + self._extract_declared_columns() + + self._setup_table(table) + + self._setup_inheriting_columns(mapper_kw) + + self._early_mapping(mapper_kw) + + def _setup_declared_events(self) -> None: + if _get_immediate_cls_attr(self.cls, "__declare_last__"): + + @event.listens_for(Mapper, "after_configured") + def after_configured() -> None: + cast( + "_DeclMappedClassProtocol[Any]", self.cls + ).__declare_last__() + + if _get_immediate_cls_attr(self.cls, "__declare_first__"): + + @event.listens_for(Mapper, "before_configured") + def before_configured() -> None: + cast( + "_DeclMappedClassProtocol[Any]", self.cls + ).__declare_first__() + + def _cls_attr_override_checker( + self, cls: Type[_O] + ) -> Callable[[str, Any], bool]: + """Produce a function that checks if a class has overridden an + attribute, taking SQLAlchemy-enabled dataclass fields into account. + + """ + + if self.allow_dataclass_fields: + sa_dataclass_metadata_key = _get_immediate_cls_attr( + cls, "__sa_dataclass_metadata_key__" + ) + else: + sa_dataclass_metadata_key = None + + if not sa_dataclass_metadata_key: + + def attribute_is_overridden(key: str, obj: Any) -> bool: + return getattr(cls, key, obj) is not obj + + else: + all_datacls_fields = { + f.name: f.metadata[sa_dataclass_metadata_key] + for f in util.dataclass_fields(cls) + if sa_dataclass_metadata_key in f.metadata + } + local_datacls_fields = { + f.name: f.metadata[sa_dataclass_metadata_key] + for f in util.local_dataclass_fields(cls) + if sa_dataclass_metadata_key in f.metadata + } + + absent = object() + + def attribute_is_overridden(key: str, obj: Any) -> bool: + if _is_declarative_props(obj): + obj = obj.fget + + # this function likely has some failure modes still if + # someone is doing a deep mixing of the same attribute + # name as plain Python attribute vs. dataclass field. + + ret = local_datacls_fields.get(key, absent) + if _is_declarative_props(ret): + ret = ret.fget + + if ret is obj: + return False + elif ret is not absent: + return True + + all_field = all_datacls_fields.get(key, absent) + + ret = getattr(cls, key, obj) + + if ret is obj: + return False + + # for dataclasses, this could be the + # 'default' of the field. so filter more specifically + # for an already-mapped InstrumentedAttribute + if ret is not absent and isinstance( + ret, InstrumentedAttribute + ): + return True + + if all_field is obj: + return False + elif all_field is not absent: + return True + + # can't find another attribute + return False + + return attribute_is_overridden + + _include_dunders = { + "__table__", + "__mapper_args__", + "__tablename__", + "__table_args__", + } + + _match_exclude_dunders = re.compile(r"^(?:_sa_|__)") + + def _cls_attr_resolver( + self, cls: Type[Any] + ) -> Callable[[], Iterable[Tuple[str, Any, Any, bool]]]: + """produce a function to iterate the "attributes" of a class + which we want to consider for mapping, adjusting for SQLAlchemy fields + embedded in dataclass fields. + + """ + cls_annotations = util.get_annotations(cls) + + cls_vars = vars(cls) + + _include_dunders = self._include_dunders + _match_exclude_dunders = self._match_exclude_dunders + + names = [ + n + for n in util.merge_lists_w_ordering( + list(cls_vars), list(cls_annotations) + ) + if not _match_exclude_dunders.match(n) or n in _include_dunders + ] + + if self.allow_dataclass_fields: + sa_dataclass_metadata_key: Optional[str] = _get_immediate_cls_attr( + cls, "__sa_dataclass_metadata_key__" + ) + else: + sa_dataclass_metadata_key = None + + if not sa_dataclass_metadata_key: + + def local_attributes_for_class() -> ( + Iterable[Tuple[str, Any, Any, bool]] + ): + return ( + ( + name, + cls_vars.get(name), + cls_annotations.get(name), + False, + ) + for name in names + ) + + else: + dataclass_fields = { + field.name: field for field in util.local_dataclass_fields(cls) + } + + fixed_sa_dataclass_metadata_key = sa_dataclass_metadata_key + + def local_attributes_for_class() -> ( + Iterable[Tuple[str, Any, Any, bool]] + ): + for name in names: + field = dataclass_fields.get(name, None) + if field and sa_dataclass_metadata_key in field.metadata: + yield field.name, _as_dc_declaredattr( + field.metadata, fixed_sa_dataclass_metadata_key + ), cls_annotations.get(field.name), True + else: + yield name, cls_vars.get(name), cls_annotations.get( + name + ), False + + return local_attributes_for_class + + def _scan_attributes(self) -> None: + cls = self.cls + + cls_as_Decl = cast("_DeclMappedClassProtocol[Any]", cls) + + clsdict_view = self.clsdict_view + collected_attributes = self.collected_attributes + column_copies = self.column_copies + _include_dunders = self._include_dunders + mapper_args_fn = None + table_args = inherited_table_args = None + table_fn = None + tablename = None + fixed_table = "__table__" in clsdict_view + + attribute_is_overridden = self._cls_attr_override_checker(self.cls) + + bases = [] + + for base in cls.__mro__: + # collect bases and make sure standalone columns are copied + # to be the column they will ultimately be on the class, + # so that declared_attr functions use the right columns. + # need to do this all the way up the hierarchy first + # (see #8190) + + class_mapped = base is not cls and _is_supercls_for_inherits(base) + + local_attributes_for_class = self._cls_attr_resolver(base) + + if not class_mapped and base is not cls: + locally_collected_columns = self._produce_column_copies( + local_attributes_for_class, + attribute_is_overridden, + fixed_table, + base, + ) + else: + locally_collected_columns = {} + + bases.append( + ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, + ) + ) + + for ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, + ) in bases: + # this transfer can also take place as we scan each name + # for finer-grained control of how collected_attributes is + # populated, as this is what impacts column ordering. + # however it's simpler to get it out of the way here. + collected_attributes.update(locally_collected_columns) + + for ( + name, + obj, + annotation, + is_dataclass_field, + ) in local_attributes_for_class(): + if name in _include_dunders: + if name == "__mapper_args__": + check_decl = _check_declared_props_nocascade( + obj, name, cls + ) + if not mapper_args_fn and ( + not class_mapped or check_decl + ): + # don't even invoke __mapper_args__ until + # after we've determined everything about the + # mapped table. + # make a copy of it so a class-level dictionary + # is not overwritten when we update column-based + # arguments. + def _mapper_args_fn() -> Dict[str, Any]: + return dict(cls_as_Decl.__mapper_args__) + + mapper_args_fn = _mapper_args_fn + + elif name == "__tablename__": + check_decl = _check_declared_props_nocascade( + obj, name, cls + ) + if not tablename and (not class_mapped or check_decl): + tablename = cls_as_Decl.__tablename__ + elif name == "__table__": + check_decl = _check_declared_props_nocascade( + obj, name, cls + ) + # if a @declared_attr using "__table__" is detected, + # wrap up a callable to look for "__table__" from + # the final concrete class when we set up a table. + # this was fixed by + # #11509, regression in 2.0 from version 1.4. + if check_decl and not table_fn: + # don't even invoke __table__ until we're ready + def _table_fn() -> FromClause: + return cls_as_Decl.__table__ + + table_fn = _table_fn + + elif name == "__table_args__": + check_decl = _check_declared_props_nocascade( + obj, name, cls + ) + if not table_args and (not class_mapped or check_decl): + table_args = cls_as_Decl.__table_args__ + if not isinstance( + table_args, (tuple, dict, type(None)) + ): + raise exc.ArgumentError( + "__table_args__ value must be a tuple, " + "dict, or None" + ) + if base is not cls: + inherited_table_args = True + else: + # any other dunder names; should not be here + # as we have tested for all four names in + # _include_dunders + assert False + elif class_mapped: + if _is_declarative_props(obj) and not obj._quiet: + util.warn( + "Regular (i.e. not __special__) " + "attribute '%s.%s' uses @declared_attr, " + "but owning class %s is mapped - " + "not applying to subclass %s." + % (base.__name__, name, base, cls) + ) + + continue + elif base is not cls: + # we're a mixin, abstract base, or something that is + # acting like that for now. + + if isinstance(obj, (Column, MappedColumn)): + # already copied columns to the mapped class. + continue + elif isinstance(obj, MapperProperty): + raise exc.InvalidRequestError( + "Mapper properties (i.e. deferred," + "column_property(), relationship(), etc.) must " + "be declared as @declared_attr callables " + "on declarative mixin classes. For dataclass " + "field() objects, use a lambda:" + ) + elif _is_declarative_props(obj): + # tried to get overloads to tell this to + # pylance, no luck + assert obj is not None + + if obj._cascading: + if name in clsdict_view: + # unfortunately, while we can use the user- + # defined attribute here to allow a clean + # override, if there's another + # subclass below then it still tries to use + # this. not sure if there is enough + # information here to add this as a feature + # later on. + util.warn( + "Attribute '%s' on class %s cannot be " + "processed due to " + "@declared_attr.cascading; " + "skipping" % (name, cls) + ) + collected_attributes[name] = column_copies[obj] = ( + ret + ) = obj.__get__(obj, cls) + setattr(cls, name, ret) + else: + if is_dataclass_field: + # access attribute using normal class access + # first, to see if it's been mapped on a + # superclass. note if the dataclasses.field() + # has "default", this value can be anything. + ret = getattr(cls, name, None) + + # so, if it's anything that's not ORM + # mapped, assume we should invoke the + # declared_attr + if not isinstance(ret, InspectionAttr): + ret = obj.fget() + else: + # access attribute using normal class access. + # if the declared attr already took place + # on a superclass that is mapped, then + # this is no longer a declared_attr, it will + # be the InstrumentedAttribute + ret = getattr(cls, name) + + # correct for proxies created from hybrid_property + # or similar. note there is no known case that + # produces nested proxies, so we are only + # looking one level deep right now. + + if ( + isinstance(ret, InspectionAttr) + and attr_is_internal_proxy(ret) + and not isinstance( + ret.original_property, MapperProperty + ) + ): + ret = ret.descriptor + + collected_attributes[name] = column_copies[obj] = ( + ret + ) + + if ( + isinstance(ret, (Column, MapperProperty)) + and ret.doc is None + ): + ret.doc = obj.__doc__ + + self._collect_annotation( + name, + obj._collect_return_annotation(), + base, + True, + obj, + ) + elif _is_mapped_annotation(annotation, cls, base): + # Mapped annotation without any object. + # product_column_copies should have handled this. + # if future support for other MapperProperty, + # then test if this name is already handled and + # otherwise proceed to generate. + if not fixed_table: + assert ( + name in collected_attributes + or attribute_is_overridden(name, None) + ) + continue + else: + # here, the attribute is some other kind of + # property that we assume is not part of the + # declarative mapping. however, check for some + # more common mistakes + self._warn_for_decl_attributes(base, name, obj) + elif is_dataclass_field and ( + name not in clsdict_view or clsdict_view[name] is not obj + ): + # here, we are definitely looking at the target class + # and not a superclass. this is currently a + # dataclass-only path. if the name is only + # a dataclass field and isn't in local cls.__dict__, + # put the object there. + # assert that the dataclass-enabled resolver agrees + # with what we are seeing + + assert not attribute_is_overridden(name, obj) + + if _is_declarative_props(obj): + obj = obj.fget() + + collected_attributes[name] = obj + self._collect_annotation( + name, annotation, base, False, obj + ) + else: + collected_annotation = self._collect_annotation( + name, annotation, base, None, obj + ) + is_mapped = ( + collected_annotation is not None + and collected_annotation.mapped_container is not None + ) + generated_obj = ( + collected_annotation.attr_value + if collected_annotation is not None + else obj + ) + if obj is None and not fixed_table and is_mapped: + collected_attributes[name] = ( + generated_obj + if generated_obj is not None + else MappedColumn() + ) + elif name in clsdict_view: + collected_attributes[name] = obj + # else if the name is not in the cls.__dict__, + # don't collect it as an attribute. + # we will see the annotation only, which is meaningful + # both for mapping and dataclasses setup + + if inherited_table_args and not tablename: + table_args = None + + self.table_args = table_args + self.tablename = tablename + self.mapper_args_fn = mapper_args_fn + self.table_fn = table_fn + + def _setup_dataclasses_transforms(self) -> None: + dataclass_setup_arguments = self.dataclass_setup_arguments + if not dataclass_setup_arguments: + return + + # can't use is_dataclass since it uses hasattr + if "__dataclass_fields__" in self.cls.__dict__: + raise exc.InvalidRequestError( + f"Class {self.cls} is already a dataclass; ensure that " + "base classes / decorator styles of establishing dataclasses " + "are not being mixed. " + "This can happen if a class that inherits from " + "'MappedAsDataclass', even indirectly, is been mapped with " + "'@registry.mapped_as_dataclass'" + ) + + # can't create a dataclass if __table__ is already there. This would + # fail an assertion when calling _get_arguments_for_make_dataclass: + # assert False, "Mapped[] received without a mapping declaration" + if "__table__" in self.cls.__dict__: + raise exc.InvalidRequestError( + f"Class {self.cls} already defines a '__table__'. " + "ORM Annotated Dataclasses do not support a pre-existing " + "'__table__' element" + ) + + warn_for_non_dc_attrs = collections.defaultdict(list) + + def _allow_dataclass_field( + key: str, originating_class: Type[Any] + ) -> bool: + if ( + originating_class is not self.cls + and "__dataclass_fields__" not in originating_class.__dict__ + ): + warn_for_non_dc_attrs[originating_class].append(key) + + return True + + manager = instrumentation.manager_of_class(self.cls) + assert manager is not None + + field_list = [ + _AttributeOptions._get_arguments_for_make_dataclass( + key, + anno, + mapped_container, + self.collected_attributes.get(key, _NoArg.NO_ARG), + ) + for key, anno, mapped_container in ( + ( + key, + mapped_anno if mapped_anno else raw_anno, + mapped_container, + ) + for key, ( + raw_anno, + mapped_container, + mapped_anno, + is_dc, + attr_value, + originating_module, + originating_class, + ) in self.collected_annotations.items() + if _allow_dataclass_field(key, originating_class) + and ( + key not in self.collected_attributes + # issue #9226; check for attributes that we've collected + # which are already instrumented, which we would assume + # mean we are in an ORM inheritance mapping and this + # attribute is already mapped on the superclass. Under + # no circumstance should any QueryableAttribute be sent to + # the dataclass() function; anything that's mapped should + # be Field and that's it + or not isinstance( + self.collected_attributes[key], QueryableAttribute + ) + ) + ) + ] + + if warn_for_non_dc_attrs: + for ( + originating_class, + non_dc_attrs, + ) in warn_for_non_dc_attrs.items(): + util.warn_deprecated( + f"When transforming {self.cls} to a dataclass, " + f"attribute(s) " + f"{', '.join(repr(key) for key in non_dc_attrs)} " + f"originates from superclass " + f"{originating_class}, which is not a dataclass. This " + f"usage is deprecated and will raise an error in " + f"SQLAlchemy 2.1. When declaring SQLAlchemy Declarative " + f"Dataclasses, ensure that all mixin classes and other " + f"superclasses which include attributes are also a " + f"subclass of MappedAsDataclass.", + "2.0", + code="dcmx", + ) + + annotations = {} + defaults = {} + for item in field_list: + if len(item) == 2: + name, tp = item + elif len(item) == 3: + name, tp, spec = item + defaults[name] = spec + else: + assert False + annotations[name] = tp + + for k, v in defaults.items(): + setattr(self.cls, k, v) + + self._apply_dataclasses_to_any_class( + dataclass_setup_arguments, self.cls, annotations + ) + + @classmethod + def _update_annotations_for_non_mapped_class( + cls, klass: Type[_O] + ) -> Mapping[str, _AnnotationScanType]: + cls_annotations = util.get_annotations(klass) + + new_anno = {} + for name, annotation in cls_annotations.items(): + if _is_mapped_annotation(annotation, klass, klass): + extracted = _extract_mapped_subtype( + annotation, + klass, + klass.__module__, + name, + type(None), + required=False, + is_dataclass_field=False, + expect_mapped=False, + ) + if extracted: + inner, _ = extracted + new_anno[name] = inner + else: + new_anno[name] = annotation + return new_anno + + @classmethod + def _apply_dataclasses_to_any_class( + cls, + dataclass_setup_arguments: _DataclassArguments, + klass: Type[_O], + use_annotations: Mapping[str, _AnnotationScanType], + ) -> None: + cls._assert_dc_arguments(dataclass_setup_arguments) + + dataclass_callable = dataclass_setup_arguments["dataclass_callable"] + if dataclass_callable is _NoArg.NO_ARG: + dataclass_callable = dataclasses.dataclass + + restored: Optional[Any] + + if use_annotations: + # apply constructed annotations that should look "normal" to a + # dataclasses callable, based on the fields present. This + # means remove the Mapped[] container and ensure all Field + # entries have an annotation + restored = getattr(klass, "__annotations__", None) + klass.__annotations__ = cast("Dict[str, Any]", use_annotations) + else: + restored = None + + try: + dataclass_callable( + klass, + **{ + k: v + for k, v in dataclass_setup_arguments.items() + if v is not _NoArg.NO_ARG and k != "dataclass_callable" + }, + ) + except (TypeError, ValueError) as ex: + raise exc.InvalidRequestError( + f"Python dataclasses error encountered when creating " + f"dataclass for {klass.__name__!r}: " + f"{ex!r}. Please refer to Python dataclasses " + "documentation for additional information.", + code="dcte", + ) from ex + finally: + # restore original annotations outside of the dataclasses + # process; for mixins and __abstract__ superclasses, SQLAlchemy + # Declarative will need to see the Mapped[] container inside the + # annotations in order to map subclasses + if use_annotations: + if restored is None: + del klass.__annotations__ + else: + klass.__annotations__ = restored + + @classmethod + def _assert_dc_arguments(cls, arguments: _DataclassArguments) -> None: + allowed = { + "init", + "repr", + "order", + "eq", + "unsafe_hash", + "kw_only", + "match_args", + "dataclass_callable", + } + disallowed_args = set(arguments).difference(allowed) + if disallowed_args: + msg = ", ".join(f"{arg!r}" for arg in sorted(disallowed_args)) + raise exc.ArgumentError( + f"Dataclass argument(s) {msg} are not accepted" + ) + + def _collect_annotation( + self, + name: str, + raw_annotation: _AnnotationScanType, + originating_class: Type[Any], + expect_mapped: Optional[bool], + attr_value: Any, + ) -> Optional[_CollectedAnnotation]: + if name in self.collected_annotations: + return self.collected_annotations[name] + + if raw_annotation is None: + return None + + is_dataclass = self.is_dataclass_prior_to_mapping + allow_unmapped = self.allow_unmapped_annotations + + if expect_mapped is None: + is_dataclass_field = isinstance(attr_value, dataclasses.Field) + expect_mapped = ( + not is_dataclass_field + and not allow_unmapped + and ( + attr_value is None + or isinstance(attr_value, _MappedAttribute) + ) + ) + + is_dataclass_field = False + extracted = _extract_mapped_subtype( + raw_annotation, + self.cls, + originating_class.__module__, + name, + type(attr_value), + required=False, + is_dataclass_field=is_dataclass_field, + expect_mapped=expect_mapped and not is_dataclass, + ) + if extracted is None: + # ClassVar can come out here + return None + + extracted_mapped_annotation, mapped_container = extracted + + if attr_value is None and not is_literal(extracted_mapped_annotation): + for elem in get_args(extracted_mapped_annotation): + if is_fwd_ref( + elem, check_generic=True, check_for_plain_string=True + ): + elem = de_stringify_annotation( + self.cls, + elem, + originating_class.__module__, + include_generic=True, + ) + # look in Annotated[...] for an ORM construct, + # such as Annotated[int, mapped_column(primary_key=True)] + if isinstance(elem, _IntrospectsAnnotations): + attr_value = elem.found_in_pep593_annotated() + + self.collected_annotations[name] = ca = _CollectedAnnotation( + raw_annotation, + mapped_container, + extracted_mapped_annotation, + is_dataclass, + attr_value, + originating_class.__module__, + originating_class, + ) + return ca + + def _warn_for_decl_attributes( + self, cls: Type[Any], key: str, c: Any + ) -> None: + if isinstance(c, expression.ColumnElement): + util.warn( + f"Attribute '{key}' on class {cls} appears to " + "be a non-schema SQLAlchemy expression " + "object; this won't be part of the declarative mapping. " + "To map arbitrary expressions, use ``column_property()`` " + "or a similar function such as ``deferred()``, " + "``query_expression()`` etc. " + ) + + def _produce_column_copies( + self, + attributes_for_class: Callable[ + [], Iterable[Tuple[str, Any, Any, bool]] + ], + attribute_is_overridden: Callable[[str, Any], bool], + fixed_table: bool, + originating_class: Type[Any], + ) -> Dict[str, Union[Column[Any], MappedColumn[Any]]]: + cls = self.cls + dict_ = self.clsdict_view + locally_collected_attributes = {} + column_copies = self.column_copies + # copy mixin columns to the mapped class + + for name, obj, annotation, is_dataclass in attributes_for_class(): + if ( + not fixed_table + and obj is None + and _is_mapped_annotation(annotation, cls, originating_class) + ): + # obj is None means this is the annotation only path + + if attribute_is_overridden(name, obj): + # perform same "overridden" check as we do for + # Column/MappedColumn, this is how a mixin col is not + # applied to an inherited subclass that does not have + # the mixin. the anno-only path added here for + # #9564 + continue + + collected_annotation = self._collect_annotation( + name, annotation, originating_class, True, obj + ) + obj = ( + collected_annotation.attr_value + if collected_annotation is not None + else obj + ) + if obj is None: + obj = MappedColumn() + + locally_collected_attributes[name] = obj + setattr(cls, name, obj) + + elif isinstance(obj, (Column, MappedColumn)): + if attribute_is_overridden(name, obj): + # if column has been overridden + # (like by the InstrumentedAttribute of the + # superclass), skip. don't collect the annotation + # either (issue #8718) + continue + + collected_annotation = self._collect_annotation( + name, annotation, originating_class, True, obj + ) + obj = ( + collected_annotation.attr_value + if collected_annotation is not None + else obj + ) + + if name not in dict_ and not ( + "__table__" in dict_ + and (getattr(obj, "name", None) or name) + in dict_["__table__"].c + ): + if obj.foreign_keys: + for fk in obj.foreign_keys: + if ( + fk._table_column is not None + and fk._table_column.table is None + ): + raise exc.InvalidRequestError( + "Columns with foreign keys to " + "non-table-bound " + "columns must be declared as " + "@declared_attr callables " + "on declarative mixin classes. " + "For dataclass " + "field() objects, use a lambda:." + ) + + column_copies[obj] = copy_ = obj._copy() + + locally_collected_attributes[name] = copy_ + setattr(cls, name, copy_) + + return locally_collected_attributes + + def _extract_mappable_attributes(self) -> None: + cls = self.cls + collected_attributes = self.collected_attributes + + our_stuff = self.properties + + _include_dunders = self._include_dunders + + late_mapped = _get_immediate_cls_attr( + cls, "_sa_decl_prepare_nocascade", strict=True + ) + + allow_unmapped_annotations = self.allow_unmapped_annotations + expect_annotations_wo_mapped = ( + allow_unmapped_annotations or self.is_dataclass_prior_to_mapping + ) + + look_for_dataclass_things = bool(self.dataclass_setup_arguments) + + for k in list(collected_attributes): + if k in _include_dunders: + continue + + value = collected_attributes[k] + + if _is_declarative_props(value): + # @declared_attr in collected_attributes only occurs here for a + # @declared_attr that's directly on the mapped class; + # for a mixin, these have already been evaluated + if value._cascading: + util.warn( + "Use of @declared_attr.cascading only applies to " + "Declarative 'mixin' and 'abstract' classes. " + "Currently, this flag is ignored on mapped class " + "%s" % self.cls + ) + + value = getattr(cls, k) + + elif ( + isinstance(value, QueryableAttribute) + and value.class_ is not cls + and value.key != k + ): + # detect a QueryableAttribute that's already mapped being + # assigned elsewhere in userland, turn into a synonym() + value = SynonymProperty(value.key) + setattr(cls, k, value) + + if ( + isinstance(value, tuple) + and len(value) == 1 + and isinstance(value[0], (Column, _MappedAttribute)) + ): + util.warn( + "Ignoring declarative-like tuple value of attribute " + "'%s': possibly a copy-and-paste error with a comma " + "accidentally placed at the end of the line?" % k + ) + continue + elif look_for_dataclass_things and isinstance( + value, dataclasses.Field + ): + # we collected a dataclass Field; dataclasses would have + # set up the correct state on the class + continue + elif not isinstance(value, (Column, _DCAttributeOptions)): + # using @declared_attr for some object that + # isn't Column/MapperProperty/_DCAttributeOptions; remove + # from the clsdict_view + # and place the evaluated value onto the class. + collected_attributes.pop(k) + self._warn_for_decl_attributes(cls, k, value) + if not late_mapped: + setattr(cls, k, value) + continue + # we expect to see the name 'metadata' in some valid cases; + # however at this point we see it's assigned to something trying + # to be mapped, so raise for that. + # TODO: should "registry" here be also? might be too late + # to change that now (2.0 betas) + elif k in ("metadata",): + raise exc.InvalidRequestError( + f"Attribute name '{k}' is reserved when using the " + "Declarative API." + ) + elif isinstance(value, Column): + _undefer_column_name( + k, self.column_copies.get(value, value) # type: ignore + ) + else: + if isinstance(value, _IntrospectsAnnotations): + ( + annotation, + mapped_container, + extracted_mapped_annotation, + is_dataclass, + attr_value, + originating_module, + originating_class, + ) = self.collected_annotations.get( + k, (None, None, None, False, None, None, None) + ) + + # issue #8692 - don't do any annotation interpretation if + # an annotation were present and a container such as + # Mapped[] etc. were not used. If annotation is None, + # do declarative_scan so that the property can raise + # for required + if ( + mapped_container is not None + or annotation is None + # issue #10516: need to do declarative_scan even with + # a non-Mapped annotation if we are doing + # __allow_unmapped__, for things like col.name + # assignment + or allow_unmapped_annotations + ): + try: + value.declarative_scan( + self, + self.registry, + cls, + originating_module, + k, + mapped_container, + annotation, + extracted_mapped_annotation, + is_dataclass, + ) + except NameError as ne: + raise orm_exc.MappedAnnotationError( + f"Could not resolve all types within mapped " + f'annotation: "{annotation}". Ensure all ' + f"types are written correctly and are " + f"imported within the module in use." + ) from ne + else: + # assert that we were expecting annotations + # without Mapped[] were going to be passed. + # otherwise an error should have been raised + # by util._extract_mapped_subtype before we got here. + assert expect_annotations_wo_mapped + + if isinstance(value, _DCAttributeOptions): + if ( + value._has_dataclass_arguments + and not look_for_dataclass_things + ): + if isinstance(value, MapperProperty): + argnames = [ + "init", + "default_factory", + "repr", + "default", + ] + else: + argnames = ["init", "default_factory", "repr"] + + args = { + a + for a in argnames + if getattr( + value._attribute_options, f"dataclasses_{a}" + ) + is not _NoArg.NO_ARG + } + + raise exc.ArgumentError( + f"Attribute '{k}' on class {cls} includes " + f"dataclasses argument(s): " + f"{', '.join(sorted(repr(a) for a in args))} but " + f"class does not specify " + "SQLAlchemy native dataclass configuration." + ) + + if not isinstance(value, (MapperProperty, _MapsColumns)): + # filter for _DCAttributeOptions objects that aren't + # MapperProperty / mapped_column(). Currently this + # includes AssociationProxy. pop it from the things + # we're going to map and set it up as a descriptor + # on the class. + collected_attributes.pop(k) + + # Assoc Prox (or other descriptor object that may + # use _DCAttributeOptions) is usually here, except if + # 1. we're a + # dataclass, dataclasses would have removed the + # attr here or 2. assoc proxy is coming from a + # superclass, we want it to be direct here so it + # tracks state or 3. assoc prox comes from + # declared_attr, uncommon case + setattr(cls, k, value) + continue + + our_stuff[k] = value + + def _extract_declared_columns(self) -> None: + our_stuff = self.properties + + # extract columns from the class dict + declared_columns = self.declared_columns + column_ordering = self.column_ordering + name_to_prop_key = collections.defaultdict(set) + + for key, c in list(our_stuff.items()): + if isinstance(c, _MapsColumns): + mp_to_assign = c.mapper_property_to_assign + if mp_to_assign: + our_stuff[key] = mp_to_assign + else: + # if no mapper property to assign, this currently means + # this is a MappedColumn that will produce a Column for us + del our_stuff[key] + + for col, sort_order in c.columns_to_assign: + if not isinstance(c, CompositeProperty): + name_to_prop_key[col.name].add(key) + declared_columns.add(col) + + # we would assert this, however we want the below + # warning to take effect instead. See #9630 + # assert col not in column_ordering + + column_ordering[col] = sort_order + + # if this is a MappedColumn and the attribute key we + # have is not what the column has for its key, map the + # Column explicitly under the attribute key name. + # otherwise, Mapper will map it under the column key. + if mp_to_assign is None and key != col.key: + our_stuff[key] = col + elif isinstance(c, Column): + # undefer previously occurred here, and now occurs earlier. + # ensure every column we get here has been named + assert c.name is not None + name_to_prop_key[c.name].add(key) + declared_columns.add(c) + # if the column is the same name as the key, + # remove it from the explicit properties dict. + # the normal rules for assigning column-based properties + # will take over, including precedence of columns + # in multi-column ColumnProperties. + if key == c.key: + del our_stuff[key] + + for name, keys in name_to_prop_key.items(): + if len(keys) > 1: + util.warn( + "On class %r, Column object %r named " + "directly multiple times, " + "only one will be used: %s. " + "Consider using orm.synonym instead" + % (self.classname, name, (", ".join(sorted(keys)))) + ) + + def _setup_table(self, table: Optional[FromClause] = None) -> None: + cls = self.cls + cls_as_Decl = cast("MappedClassProtocol[Any]", cls) + + tablename = self.tablename + table_args = self.table_args + clsdict_view = self.clsdict_view + declared_columns = self.declared_columns + column_ordering = self.column_ordering + + manager = attributes.manager_of_class(cls) + + if ( + self.table_fn is None + and "__table__" not in clsdict_view + and table is None + ): + if hasattr(cls, "__table_cls__"): + table_cls = cast( + Type[Table], + util.unbound_method_to_callable(cls.__table_cls__), # type: ignore # noqa: E501 + ) + else: + table_cls = Table + + if tablename is not None: + args: Tuple[Any, ...] = () + table_kw: Dict[str, Any] = {} + + if table_args: + if isinstance(table_args, dict): + table_kw = table_args + elif isinstance(table_args, tuple): + if isinstance(table_args[-1], dict): + args, table_kw = table_args[0:-1], table_args[-1] + else: + args = table_args + + autoload_with = clsdict_view.get("__autoload_with__") + if autoload_with: + table_kw["autoload_with"] = autoload_with + + autoload = clsdict_view.get("__autoload__") + if autoload: + table_kw["autoload"] = True + + sorted_columns = sorted( + declared_columns, + key=lambda c: column_ordering.get(c, 0), + ) + table = self.set_cls_attribute( + "__table__", + table_cls( + tablename, + self._metadata_for_cls(manager), + *sorted_columns, + *args, + **table_kw, + ), + ) + else: + if table is None: + if self.table_fn: + table = self.set_cls_attribute( + "__table__", self.table_fn() + ) + else: + table = cls_as_Decl.__table__ + if declared_columns: + for c in declared_columns: + if not table.c.contains_column(c): + raise exc.ArgumentError( + "Can't add additional column %r when " + "specifying __table__" % c.key + ) + + self.local_table = table + + def _metadata_for_cls(self, manager: ClassManager[Any]) -> MetaData: + meta: Optional[MetaData] = getattr(self.cls, "metadata", None) + if meta is not None: + return meta + else: + return manager.registry.metadata + + def _setup_inheriting_mapper(self, mapper_kw: _MapperKwArgs) -> None: + cls = self.cls + + inherits = mapper_kw.get("inherits", None) + + if inherits is None: + # since we search for classical mappings now, search for + # multiple mapped bases as well and raise an error. + inherits_search = [] + for base_ in cls.__bases__: + c = _resolve_for_abstract_or_classical(base_) + if c is None: + continue + + if _is_supercls_for_inherits(c) and c not in inherits_search: + inherits_search.append(c) + + if inherits_search: + if len(inherits_search) > 1: + raise exc.InvalidRequestError( + "Class %s has multiple mapped bases: %r" + % (cls, inherits_search) + ) + inherits = inherits_search[0] + elif isinstance(inherits, Mapper): + inherits = inherits.class_ + + self.inherits = inherits + + clsdict_view = self.clsdict_view + if "__table__" not in clsdict_view and self.tablename is None: + self.single = True + + def _setup_inheriting_columns(self, mapper_kw: _MapperKwArgs) -> None: + table = self.local_table + cls = self.cls + table_args = self.table_args + declared_columns = self.declared_columns + + if ( + table is None + and self.inherits is None + and not _get_immediate_cls_attr(cls, "__no_table__") + ): + raise exc.InvalidRequestError( + "Class %r does not have a __table__ or __tablename__ " + "specified and does not inherit from an existing " + "table-mapped class." % cls + ) + elif self.inherits: + inherited_mapper_or_config = _declared_mapping_info(self.inherits) + assert inherited_mapper_or_config is not None + inherited_table = inherited_mapper_or_config.local_table + inherited_persist_selectable = ( + inherited_mapper_or_config.persist_selectable + ) + + if table is None: + # single table inheritance. + # ensure no table args + if table_args: + raise exc.ArgumentError( + "Can't place __table_args__ on an inherited class " + "with no table." + ) + + # add any columns declared here to the inherited table. + if declared_columns and not isinstance(inherited_table, Table): + raise exc.ArgumentError( + f"Can't declare columns on single-table-inherited " + f"subclass {self.cls}; superclass {self.inherits} " + "is not mapped to a Table" + ) + + for col in declared_columns: + assert inherited_table is not None + if col.name in inherited_table.c: + if inherited_table.c[col.name] is col: + continue + raise exc.ArgumentError( + f"Column '{col}' on class {cls.__name__} " + f"conflicts with existing column " + f"'{inherited_table.c[col.name]}'. If using " + f"Declarative, consider using the " + "use_existing_column parameter of mapped_column() " + "to resolve conflicts." + ) + if col.primary_key: + raise exc.ArgumentError( + "Can't place primary key columns on an inherited " + "class with no table." + ) + + if TYPE_CHECKING: + assert isinstance(inherited_table, Table) + + inherited_table.append_column(col) + if ( + inherited_persist_selectable is not None + and inherited_persist_selectable is not inherited_table + ): + inherited_persist_selectable._refresh_for_new_column( + col + ) + + def _prepare_mapper_arguments(self, mapper_kw: _MapperKwArgs) -> None: + properties = self.properties + + if self.mapper_args_fn: + mapper_args = self.mapper_args_fn() + else: + mapper_args = {} + + if mapper_kw: + mapper_args.update(mapper_kw) + + if "properties" in mapper_args: + properties = dict(properties) + properties.update(mapper_args["properties"]) + + # make sure that column copies are used rather + # than the original columns from any mixins + for k in ("version_id_col", "polymorphic_on"): + if k in mapper_args: + v = mapper_args[k] + mapper_args[k] = self.column_copies.get(v, v) + + if "primary_key" in mapper_args: + mapper_args["primary_key"] = [ + self.column_copies.get(v, v) + for v in util.to_list(mapper_args["primary_key"]) + ] + + if "inherits" in mapper_args: + inherits_arg = mapper_args["inherits"] + if isinstance(inherits_arg, Mapper): + inherits_arg = inherits_arg.class_ + + if inherits_arg is not self.inherits: + raise exc.InvalidRequestError( + "mapper inherits argument given for non-inheriting " + "class %s" % (mapper_args["inherits"]) + ) + + if self.inherits: + mapper_args["inherits"] = self.inherits + + if self.inherits and not mapper_args.get("concrete", False): + # note the superclass is expected to have a Mapper assigned and + # not be a deferred config, as this is called within map() + inherited_mapper = class_mapper(self.inherits, False) + inherited_table = inherited_mapper.local_table + + # single or joined inheritance + # exclude any cols on the inherited table which are + # not mapped on the parent class, to avoid + # mapping columns specific to sibling/nephew classes + if "exclude_properties" not in mapper_args: + mapper_args["exclude_properties"] = exclude_properties = { + c.key + for c in inherited_table.c + if c not in inherited_mapper._columntoproperty + }.union(inherited_mapper.exclude_properties or ()) + exclude_properties.difference_update( + [c.key for c in self.declared_columns] + ) + + # look through columns in the current mapper that + # are keyed to a propname different than the colname + # (if names were the same, we'd have popped it out above, + # in which case the mapper makes this combination). + # See if the superclass has a similar column property. + # If so, join them together. + for k, col in list(properties.items()): + if not isinstance(col, expression.ColumnElement): + continue + if k in inherited_mapper._props: + p = inherited_mapper._props[k] + if isinstance(p, ColumnProperty): + # note here we place the subclass column + # first. See [ticket:1892] for background. + properties[k] = [col] + p.columns + result_mapper_args = mapper_args.copy() + result_mapper_args["properties"] = properties + self.mapper_args = result_mapper_args + + def map(self, mapper_kw: _MapperKwArgs = util.EMPTY_DICT) -> Mapper[Any]: + self._prepare_mapper_arguments(mapper_kw) + if hasattr(self.cls, "__mapper_cls__"): + mapper_cls = cast( + "Type[Mapper[Any]]", + util.unbound_method_to_callable( + self.cls.__mapper_cls__ # type: ignore + ), + ) + else: + mapper_cls = Mapper + + return self.set_cls_attribute( + "__mapper__", + mapper_cls(self.cls, self.local_table, **self.mapper_args), + ) + + +@util.preload_module("sqlalchemy.orm.decl_api") +def _as_dc_declaredattr( + field_metadata: Mapping[str, Any], sa_dataclass_metadata_key: str +) -> Any: + # wrap lambdas inside dataclass fields inside an ad-hoc declared_attr. + # we can't write it because field.metadata is immutable :( so we have + # to go through extra trouble to compare these + decl_api = util.preloaded.orm_decl_api + obj = field_metadata[sa_dataclass_metadata_key] + if callable(obj) and not isinstance(obj, decl_api.declared_attr): + return decl_api.declared_attr(obj) + else: + return obj + + +class _DeferredMapperConfig(_ClassScanMapperConfig): + _cls: weakref.ref[Type[Any]] + + is_deferred = True + + _configs: util.OrderedDict[ + weakref.ref[Type[Any]], _DeferredMapperConfig + ] = util.OrderedDict() + + def _early_mapping(self, mapper_kw: _MapperKwArgs) -> None: + pass + + # mypy disallows plain property override of variable + @property # type: ignore + def cls(self) -> Type[Any]: + return self._cls() # type: ignore + + @cls.setter + def cls(self, class_: Type[Any]) -> None: + self._cls = weakref.ref(class_, self._remove_config_cls) + self._configs[self._cls] = self + + @classmethod + def _remove_config_cls(cls, ref: weakref.ref[Type[Any]]) -> None: + cls._configs.pop(ref, None) + + @classmethod + def has_cls(cls, class_: Type[Any]) -> bool: + # 2.6 fails on weakref if class_ is an old style class + return isinstance(class_, type) and weakref.ref(class_) in cls._configs + + @classmethod + def raise_unmapped_for_cls(cls, class_: Type[Any]) -> NoReturn: + if hasattr(class_, "_sa_raise_deferred_config"): + class_._sa_raise_deferred_config() + + raise orm_exc.UnmappedClassError( + class_, + msg=( + f"Class {orm_exc._safe_cls_name(class_)} has a deferred " + "mapping on it. It is not yet usable as a mapped class." + ), + ) + + @classmethod + def config_for_cls(cls, class_: Type[Any]) -> _DeferredMapperConfig: + return cls._configs[weakref.ref(class_)] + + @classmethod + def classes_for_base( + cls, base_cls: Type[Any], sort: bool = True + ) -> List[_DeferredMapperConfig]: + classes_for_base = [ + m + for m, cls_ in [(m, m.cls) for m in cls._configs.values()] + if cls_ is not None and issubclass(cls_, base_cls) + ] + + if not sort: + return classes_for_base + + all_m_by_cls = {m.cls: m for m in classes_for_base} + + tuples: List[Tuple[_DeferredMapperConfig, _DeferredMapperConfig]] = [] + for m_cls in all_m_by_cls: + tuples.extend( + (all_m_by_cls[base_cls], all_m_by_cls[m_cls]) + for base_cls in m_cls.__bases__ + if base_cls in all_m_by_cls + ) + return list(topological.sort(tuples, classes_for_base)) + + def map(self, mapper_kw: _MapperKwArgs = util.EMPTY_DICT) -> Mapper[Any]: + self._configs.pop(self._cls, None) + return super().map(mapper_kw) + + +def _add_attribute( + cls: Type[Any], key: str, value: MapperProperty[Any] +) -> None: + """add an attribute to an existing declarative class. + + This runs through the logic to determine MapperProperty, + adds it to the Mapper, adds a column to the mapped Table, etc. + + """ + + if "__mapper__" in cls.__dict__: + mapped_cls = cast("MappedClassProtocol[Any]", cls) + + def _table_or_raise(mc: MappedClassProtocol[Any]) -> Table: + if isinstance(mc.__table__, Table): + return mc.__table__ + raise exc.InvalidRequestError( + f"Cannot add a new attribute to mapped class {mc.__name__!r} " + "because it's not mapped against a table." + ) + + if isinstance(value, Column): + _undefer_column_name(key, value) + _table_or_raise(mapped_cls).append_column( + value, replace_existing=True + ) + mapped_cls.__mapper__.add_property(key, value) + elif isinstance(value, _MapsColumns): + mp = value.mapper_property_to_assign + for col, _ in value.columns_to_assign: + _undefer_column_name(key, col) + _table_or_raise(mapped_cls).append_column( + col, replace_existing=True + ) + if not mp: + mapped_cls.__mapper__.add_property(key, col) + if mp: + mapped_cls.__mapper__.add_property(key, mp) + elif isinstance(value, MapperProperty): + mapped_cls.__mapper__.add_property(key, value) + elif isinstance(value, QueryableAttribute) and value.key != key: + # detect a QueryableAttribute that's already mapped being + # assigned elsewhere in userland, turn into a synonym() + value = SynonymProperty(value.key) + mapped_cls.__mapper__.add_property(key, value) + else: + type.__setattr__(cls, key, value) + mapped_cls.__mapper__._expire_memoizations() + else: + type.__setattr__(cls, key, value) + + +def _del_attribute(cls: Type[Any], key: str) -> None: + if ( + "__mapper__" in cls.__dict__ + and key in cls.__dict__ + and not cast( + "MappedClassProtocol[Any]", cls + ).__mapper__._dispose_called + ): + value = cls.__dict__[key] + if isinstance( + value, (Column, _MapsColumns, MapperProperty, QueryableAttribute) + ): + raise NotImplementedError( + "Can't un-map individual mapped attributes on a mapped class." + ) + else: + type.__delattr__(cls, key) + cast( + "MappedClassProtocol[Any]", cls + ).__mapper__._expire_memoizations() + else: + type.__delattr__(cls, key) + + +def _declarative_constructor(self: Any, **kwargs: Any) -> None: + """A simple constructor that allows initialization from kwargs. + + Sets attributes on the constructed instance using the names and + values in ``kwargs``. + + Only keys that are present as + attributes of the instance's class are allowed. These could be, + for example, any mapped columns or relationships. + """ + cls_ = type(self) + for k in kwargs: + if not hasattr(cls_, k): + raise TypeError( + "%r is an invalid keyword argument for %s" % (k, cls_.__name__) + ) + setattr(self, k, kwargs[k]) + + +_declarative_constructor.__name__ = "__init__" + + +def _undefer_column_name(key: str, column: Column[Any]) -> None: + if column.key is None: + column.key = key + if column.name is None: + column.name = key diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dependency.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..b055240a353f46c3a719a0b8964084999b9c27f0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dependency.py @@ -0,0 +1,1304 @@ +# orm/dependency.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""Relationship dependencies. + +""" + +from __future__ import annotations + +from . import attributes +from . import exc +from . import sync +from . import unitofwork +from . import util as mapperutil +from .interfaces import MANYTOMANY +from .interfaces import MANYTOONE +from .interfaces import ONETOMANY +from .. import exc as sa_exc +from .. import sql +from .. import util + + +class DependencyProcessor: + def __init__(self, prop): + self.prop = prop + self.cascade = prop.cascade + self.mapper = prop.mapper + self.parent = prop.parent + self.secondary = prop.secondary + self.direction = prop.direction + self.post_update = prop.post_update + self.passive_deletes = prop.passive_deletes + self.passive_updates = prop.passive_updates + self.enable_typechecks = prop.enable_typechecks + if self.passive_deletes: + self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE + else: + self._passive_delete_flag = attributes.PASSIVE_OFF + if self.passive_updates: + self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE + else: + self._passive_update_flag = attributes.PASSIVE_OFF + + self.sort_key = "%s_%s" % (self.parent._sort_key, prop.key) + self.key = prop.key + if not self.prop.synchronize_pairs: + raise sa_exc.ArgumentError( + "Can't build a DependencyProcessor for relationship %s. " + "No target attributes to populate between parent and " + "child are present" % self.prop + ) + + @classmethod + def from_relationship(cls, prop): + return _direction_to_processor[prop.direction](prop) + + def hasparent(self, state): + """return True if the given object instance has a parent, + according to the ``InstrumentedAttribute`` handled by this + ``DependencyProcessor``. + + """ + return self.parent.class_manager.get_impl(self.key).hasparent(state) + + def per_property_preprocessors(self, uow): + """establish actions and dependencies related to a flush. + + These actions will operate on all relevant states in + the aggregate. + + """ + uow.register_preprocessor(self, True) + + def per_property_flush_actions(self, uow): + after_save = unitofwork.ProcessAll(uow, self, False, True) + before_delete = unitofwork.ProcessAll(uow, self, True, True) + + parent_saves = unitofwork.SaveUpdateAll( + uow, self.parent.primary_base_mapper + ) + child_saves = unitofwork.SaveUpdateAll( + uow, self.mapper.primary_base_mapper + ) + + parent_deletes = unitofwork.DeleteAll( + uow, self.parent.primary_base_mapper + ) + child_deletes = unitofwork.DeleteAll( + uow, self.mapper.primary_base_mapper + ) + + self.per_property_dependencies( + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ) + + def per_state_flush_actions(self, uow, states, isdelete): + """establish actions and dependencies related to a flush. + + These actions will operate on all relevant states + individually. This occurs only if there are cycles + in the 'aggregated' version of events. + + """ + + child_base_mapper = self.mapper.primary_base_mapper + child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper) + child_deletes = unitofwork.DeleteAll(uow, child_base_mapper) + + # locate and disable the aggregate processors + # for this dependency + + if isdelete: + before_delete = unitofwork.ProcessAll(uow, self, True, True) + before_delete.disabled = True + else: + after_save = unitofwork.ProcessAll(uow, self, False, True) + after_save.disabled = True + + # check if the "child" side is part of the cycle + + if child_saves not in uow.cycles: + # based on the current dependencies we use, the saves/ + # deletes should always be in the 'cycles' collection + # together. if this changes, we will have to break up + # this method a bit more. + assert child_deletes not in uow.cycles + + # child side is not part of the cycle, so we will link per-state + # actions to the aggregate "saves", "deletes" actions + child_actions = [(child_saves, False), (child_deletes, True)] + child_in_cycles = False + else: + child_in_cycles = True + + # check if the "parent" side is part of the cycle + if not isdelete: + parent_saves = unitofwork.SaveUpdateAll( + uow, self.parent.base_mapper + ) + parent_deletes = before_delete = None + if parent_saves in uow.cycles: + parent_in_cycles = True + else: + parent_deletes = unitofwork.DeleteAll(uow, self.parent.base_mapper) + parent_saves = after_save = None + if parent_deletes in uow.cycles: + parent_in_cycles = True + + # now create actions /dependencies for each state. + + for state in states: + # detect if there's anything changed or loaded + # by a preprocessor on this state/attribute. In the + # case of deletes we may try to load missing items here as well. + sum_ = state.manager[self.key].impl.get_all_pending( + state, + state.dict, + ( + self._passive_delete_flag + if isdelete + else attributes.PASSIVE_NO_INITIALIZE + ), + ) + + if not sum_: + continue + + if isdelete: + before_delete = unitofwork.ProcessState(uow, self, True, state) + if parent_in_cycles: + parent_deletes = unitofwork.DeleteState(uow, state) + else: + after_save = unitofwork.ProcessState(uow, self, False, state) + if parent_in_cycles: + parent_saves = unitofwork.SaveUpdateState(uow, state) + + if child_in_cycles: + child_actions = [] + for child_state, child in sum_: + if child_state not in uow.states: + child_action = (None, None) + else: + (deleted, listonly) = uow.states[child_state] + if deleted: + child_action = ( + unitofwork.DeleteState(uow, child_state), + True, + ) + else: + child_action = ( + unitofwork.SaveUpdateState(uow, child_state), + False, + ) + child_actions.append(child_action) + + # establish dependencies between our possibly per-state + # parent action and our possibly per-state child action. + for child_action, childisdelete in child_actions: + self.per_state_dependencies( + uow, + parent_saves, + parent_deletes, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ) + + def presort_deletes(self, uowcommit, states): + return False + + def presort_saves(self, uowcommit, states): + return False + + def process_deletes(self, uowcommit, states): + pass + + def process_saves(self, uowcommit, states): + pass + + def prop_has_changes(self, uowcommit, states, isdelete): + if not isdelete or self.passive_deletes: + passive = ( + attributes.PASSIVE_NO_INITIALIZE + | attributes.INCLUDE_PENDING_MUTATIONS + ) + elif self.direction is MANYTOONE: + # here, we were hoping to optimize having to fetch many-to-one + # for history and ignore it, if there's no further cascades + # to take place. however there are too many less common conditions + # that still take place and tests in test_relationships / + # test_cascade etc. will still fail. + passive = attributes.PASSIVE_NO_FETCH_RELATED + else: + passive = ( + attributes.PASSIVE_OFF | attributes.INCLUDE_PENDING_MUTATIONS + ) + + for s in states: + # TODO: add a high speed method + # to InstanceState which returns: attribute + # has a non-None value, or had one + history = uowcommit.get_attribute_history(s, self.key, passive) + if history and not history.empty(): + return True + else: + return ( + states + and not self.prop._is_self_referential + and self.mapper in uowcommit.mappers + ) + + def _verify_canload(self, state): + if self.prop.uselist and state is None: + raise exc.FlushError( + "Can't flush None value found in " + "collection %s" % (self.prop,) + ) + elif state is not None and not self.mapper._canload( + state, allow_subtypes=not self.enable_typechecks + ): + if self.mapper._canload(state, allow_subtypes=True): + raise exc.FlushError( + "Attempting to flush an item of type " + "%(x)s as a member of collection " + '"%(y)s". Expected an object of type ' + "%(z)s or a polymorphic subclass of " + "this type. If %(x)s is a subclass of " + '%(z)s, configure mapper "%(zm)s" to ' + "load this subtype polymorphically, or " + "set enable_typechecks=False to allow " + "any subtype to be accepted for flush. " + % { + "x": state.class_, + "y": self.prop, + "z": self.mapper.class_, + "zm": self.mapper, + } + ) + else: + raise exc.FlushError( + "Attempting to flush an item of type " + "%(x)s as a member of collection " + '"%(y)s". Expected an object of type ' + "%(z)s or a polymorphic subclass of " + "this type." + % { + "x": state.class_, + "y": self.prop, + "z": self.mapper.class_, + } + ) + + def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): + raise NotImplementedError() + + def _get_reversed_processed_set(self, uow): + if not self.prop._reverse_property: + return None + + process_key = tuple( + sorted([self.key] + [p.key for p in self.prop._reverse_property]) + ) + return uow.memo(("reverse_key", process_key), set) + + def _post_update(self, state, uowcommit, related, is_m2o_delete=False): + for x in related: + if not is_m2o_delete or x is not None: + uowcommit.register_post_update( + state, [r for l, r in self.prop.synchronize_pairs] + ) + break + + def _pks_changed(self, uowcommit, state): + raise NotImplementedError() + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.prop) + + +class OneToManyDP(DependencyProcessor): + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): + if self.post_update: + child_post_updates = unitofwork.PostUpdateAll( + uow, self.mapper.primary_base_mapper, False + ) + child_pre_updates = unitofwork.PostUpdateAll( + uow, self.mapper.primary_base_mapper, True + ) + + uow.dependencies.update( + [ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, child_post_updates), + (before_delete, child_pre_updates), + (child_pre_updates, parent_deletes), + (child_pre_updates, child_deletes), + ] + ) + else: + uow.dependencies.update( + [ + (parent_saves, after_save), + (after_save, child_saves), + (after_save, child_deletes), + (child_saves, parent_deletes), + (child_deletes, parent_deletes), + (before_delete, child_saves), + (before_delete, child_deletes), + ] + ) + + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): + if self.post_update: + child_post_updates = unitofwork.PostUpdateAll( + uow, self.mapper.primary_base_mapper, False + ) + child_pre_updates = unitofwork.PostUpdateAll( + uow, self.mapper.primary_base_mapper, True + ) + + # TODO: this whole block is not covered + # by any tests + if not isdelete: + if childisdelete: + uow.dependencies.update( + [ + (child_action, after_save), + (after_save, child_post_updates), + ] + ) + else: + uow.dependencies.update( + [ + (save_parent, after_save), + (child_action, after_save), + (after_save, child_post_updates), + ] + ) + else: + if childisdelete: + uow.dependencies.update( + [ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ] + ) + else: + uow.dependencies.update( + [ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ] + ) + elif not isdelete: + uow.dependencies.update( + [ + (save_parent, after_save), + (after_save, child_action), + (save_parent, child_action), + ] + ) + else: + uow.dependencies.update( + [(before_delete, child_action), (child_action, delete_parent)] + ) + + def presort_deletes(self, uowcommit, states): + # head object is being deleted, and we manage its list of + # child objects the child objects have to have their + # foreign key to the parent set to NULL + should_null_fks = ( + not self.cascade.delete and not self.passive_deletes == "all" + ) + + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + for child in history.deleted: + if child is not None and self.hasparent(child) is False: + if self.cascade.delete_orphan: + uowcommit.register_object(child, isdelete=True) + else: + uowcommit.register_object(child) + + if should_null_fks: + for child in history.unchanged: + if child is not None: + uowcommit.register_object( + child, operation="delete", prop=self.prop + ) + + def presort_saves(self, uowcommit, states): + children_added = uowcommit.memo(("children_added", self), set) + + should_null_fks = ( + not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ) + + for state in states: + pks_changed = self._pks_changed(uowcommit, state) + + if not pks_changed or self.passive_updates: + passive = ( + attributes.PASSIVE_NO_INITIALIZE + | attributes.INCLUDE_PENDING_MUTATIONS + ) + else: + passive = ( + attributes.PASSIVE_OFF + | attributes.INCLUDE_PENDING_MUTATIONS + ) + + history = uowcommit.get_attribute_history(state, self.key, passive) + if history: + for child in history.added: + if child is not None: + uowcommit.register_object( + child, + cancel_delete=True, + operation="add", + prop=self.prop, + ) + + children_added.update(history.added) + + for child in history.deleted: + if not self.cascade.delete_orphan: + if should_null_fks: + uowcommit.register_object( + child, + isdelete=False, + operation="delete", + prop=self.prop, + ) + elif self.hasparent(child) is False: + uowcommit.register_object( + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) + for c, m, st_, dct_ in self.mapper.cascade_iterator( + "delete", child + ): + uowcommit.register_object(st_, isdelete=True) + + if pks_changed: + if history: + for child in history.unchanged: + if child is not None: + uowcommit.register_object( + child, + False, + self.passive_updates, + operation="pk change", + prop=self.prop, + ) + + def process_deletes(self, uowcommit, states): + # head object is being deleted, and we manage its list of + # child objects the child objects have to have their foreign + # key to the parent set to NULL this phase can be called + # safely for any cascade but is unnecessary if delete cascade + # is on. + + if self.post_update or not self.passive_deletes == "all": + children_added = uowcommit.memo(("children_added", self), set) + + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + for child in history.deleted: + if ( + child is not None + and self.hasparent(child) is False + ): + self._synchronize( + state, child, None, True, uowcommit, False + ) + if self.post_update and child: + self._post_update(child, uowcommit, [state]) + + if self.post_update or not self.cascade.delete: + for child in set(history.unchanged).difference( + children_added + ): + if child is not None: + self._synchronize( + state, child, None, True, uowcommit, False + ) + if self.post_update and child: + self._post_update( + child, uowcommit, [state] + ) + + # technically, we can even remove each child from the + # collection here too. but this would be a somewhat + # inconsistent behavior since it wouldn't happen + # if the old parent wasn't deleted but child was moved. + + def process_saves(self, uowcommit, states): + should_null_fks = ( + not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ) + + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) + if history: + for child in history.added: + self._synchronize( + state, child, None, False, uowcommit, False + ) + if child is not None and self.post_update: + self._post_update(child, uowcommit, [state]) + + for child in history.deleted: + if ( + should_null_fks + and not self.cascade.delete_orphan + and not self.hasparent(child) + ): + self._synchronize( + state, child, None, True, uowcommit, False + ) + + if self._pks_changed(uowcommit, state): + for child in history.unchanged: + self._synchronize( + state, child, None, False, uowcommit, True + ) + + def _synchronize( + self, state, child, associationrow, clearkeys, uowcommit, pks_changed + ): + source = state + dest = child + self._verify_canload(child) + if dest is None or ( + not self.post_update and uowcommit.is_deleted(dest) + ): + return + if clearkeys: + sync.clear(dest, self.mapper, self.prop.synchronize_pairs) + else: + sync.populate( + source, + self.parent, + dest, + self.mapper, + self.prop.synchronize_pairs, + uowcommit, + self.passive_updates and pks_changed, + ) + + def _pks_changed(self, uowcommit, state): + return sync.source_modified( + uowcommit, state, self.parent, self.prop.synchronize_pairs + ) + + +class ManyToOneDP(DependencyProcessor): + def __init__(self, prop): + DependencyProcessor.__init__(self, prop) + for mapper in self.mapper.self_and_descendants: + mapper._dependency_processors.append(DetectKeySwitch(prop)) + + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): + if self.post_update: + parent_post_updates = unitofwork.PostUpdateAll( + uow, self.parent.primary_base_mapper, False + ) + parent_pre_updates = unitofwork.PostUpdateAll( + uow, self.parent.primary_base_mapper, True + ) + + uow.dependencies.update( + [ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, parent_post_updates), + (after_save, parent_pre_updates), + (before_delete, parent_pre_updates), + (parent_pre_updates, child_deletes), + (parent_pre_updates, parent_deletes), + ] + ) + else: + uow.dependencies.update( + [ + (child_saves, after_save), + (after_save, parent_saves), + (parent_saves, child_deletes), + (parent_deletes, child_deletes), + ] + ) + + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): + if self.post_update: + if not isdelete: + parent_post_updates = unitofwork.PostUpdateAll( + uow, self.parent.primary_base_mapper, False + ) + if childisdelete: + uow.dependencies.update( + [ + (after_save, parent_post_updates), + (parent_post_updates, child_action), + ] + ) + else: + uow.dependencies.update( + [ + (save_parent, after_save), + (child_action, after_save), + (after_save, parent_post_updates), + ] + ) + else: + parent_pre_updates = unitofwork.PostUpdateAll( + uow, self.parent.primary_base_mapper, True + ) + + uow.dependencies.update( + [ + (before_delete, parent_pre_updates), + (parent_pre_updates, delete_parent), + (parent_pre_updates, child_action), + ] + ) + + elif not isdelete: + if not childisdelete: + uow.dependencies.update( + [(child_action, after_save), (after_save, save_parent)] + ) + else: + uow.dependencies.update([(after_save, save_parent)]) + + else: + if childisdelete: + uow.dependencies.update([(delete_parent, child_action)]) + + def presort_deletes(self, uowcommit, states): + if self.cascade.delete or self.cascade.delete_orphan: + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + if self.cascade.delete_orphan: + todelete = history.sum() + else: + todelete = history.non_deleted() + for child in todelete: + if child is None: + continue + uowcommit.register_object( + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) + t = self.mapper.cascade_iterator("delete", child) + for c, m, st_, dct_ in t: + uowcommit.register_object(st_, isdelete=True) + + def presort_saves(self, uowcommit, states): + for state in states: + uowcommit.register_object(state, operation="add", prop=self.prop) + if self.cascade.delete_orphan: + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + for child in history.deleted: + if self.hasparent(child) is False: + uowcommit.register_object( + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) + + t = self.mapper.cascade_iterator("delete", child) + for c, m, st_, dct_ in t: + uowcommit.register_object(st_, isdelete=True) + + def process_deletes(self, uowcommit, states): + if ( + self.post_update + and not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ): + # post_update means we have to update our + # row to not reference the child object + # before we can DELETE the row + for state in states: + self._synchronize(state, None, None, True, uowcommit) + if state and self.post_update: + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + self._post_update( + state, uowcommit, history.sum(), is_m2o_delete=True + ) + + def process_saves(self, uowcommit, states): + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) + if history: + if history.added: + for child in history.added: + self._synchronize( + state, child, None, False, uowcommit, "add" + ) + elif history.deleted: + self._synchronize( + state, None, None, True, uowcommit, "delete" + ) + if self.post_update: + self._post_update(state, uowcommit, history.sum()) + + def _synchronize( + self, + state, + child, + associationrow, + clearkeys, + uowcommit, + operation=None, + ): + if state is None or ( + not self.post_update and uowcommit.is_deleted(state) + ): + return + + if ( + operation is not None + and child is not None + and not uowcommit.session._contains_state(child) + ): + util.warn( + "Object of type %s not in session, %s " + "operation along '%s' won't proceed" + % (mapperutil.state_class_str(child), operation, self.prop) + ) + return + + if clearkeys or child is None: + sync.clear(state, self.parent, self.prop.synchronize_pairs) + else: + self._verify_canload(child) + sync.populate( + child, + self.mapper, + state, + self.parent, + self.prop.synchronize_pairs, + uowcommit, + False, + ) + + +class DetectKeySwitch(DependencyProcessor): + """For many-to-one relationships with no one-to-many backref, + searches for parents through the unit of work when a primary + key has changed and updates them. + + Theoretically, this approach could be expanded to support transparent + deletion of objects referenced via many-to-one as well, although + the current attribute system doesn't do enough bookkeeping for this + to be efficient. + + """ + + def per_property_preprocessors(self, uow): + if self.prop._reverse_property: + if self.passive_updates: + return + else: + if False in ( + prop.passive_updates + for prop in self.prop._reverse_property + ): + return + + uow.register_preprocessor(self, False) + + def per_property_flush_actions(self, uow): + parent_saves = unitofwork.SaveUpdateAll(uow, self.parent.base_mapper) + after_save = unitofwork.ProcessAll(uow, self, False, False) + uow.dependencies.update([(parent_saves, after_save)]) + + def per_state_flush_actions(self, uow, states, isdelete): + pass + + def presort_deletes(self, uowcommit, states): + pass + + def presort_saves(self, uow, states): + if not self.passive_updates: + # for non-passive updates, register in the preprocess stage + # so that mapper save_obj() gets a hold of changes + self._process_key_switches(states, uow) + + def prop_has_changes(self, uow, states, isdelete): + if not isdelete and self.passive_updates: + d = self._key_switchers(uow, states) + return bool(d) + + return False + + def process_deletes(self, uowcommit, states): + assert False + + def process_saves(self, uowcommit, states): + # for passive updates, register objects in the process stage + # so that we avoid ManyToOneDP's registering the object without + # the listonly flag in its own preprocess stage (results in UPDATE) + # statements being emitted + assert self.passive_updates + self._process_key_switches(states, uowcommit) + + def _key_switchers(self, uow, states): + switched, notswitched = uow.memo( + ("pk_switchers", self), lambda: (set(), set()) + ) + + allstates = switched.union(notswitched) + for s in states: + if s not in allstates: + if self._pks_changed(uow, s): + switched.add(s) + else: + notswitched.add(s) + return switched + + def _process_key_switches(self, deplist, uowcommit): + switchers = self._key_switchers(uowcommit, deplist) + if switchers: + # if primary key values have actually changed somewhere, perform + # a linear search through the UOW in search of a parent. + for state in uowcommit.session.identity_map.all_states(): + if not issubclass(state.class_, self.parent.class_): + continue + dict_ = state.dict + related = state.get_impl(self.key).get( + state, dict_, passive=self._passive_update_flag + ) + if ( + related is not attributes.PASSIVE_NO_RESULT + and related is not None + ): + if self.prop.uselist: + if not related: + continue + related_obj = related[0] + else: + related_obj = related + related_state = attributes.instance_state(related_obj) + if related_state in switchers: + uowcommit.register_object( + state, False, self.passive_updates + ) + sync.populate( + related_state, + self.mapper, + state, + self.parent, + self.prop.synchronize_pairs, + uowcommit, + self.passive_updates, + ) + + def _pks_changed(self, uowcommit, state): + return bool(state.key) and sync.source_modified( + uowcommit, state, self.mapper, self.prop.synchronize_pairs + ) + + +class ManyToManyDP(DependencyProcessor): + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): + uow.dependencies.update( + [ + (parent_saves, after_save), + (child_saves, after_save), + (after_save, child_deletes), + # a rowswitch on the parent from deleted to saved + # can make this one occur, as the "save" may remove + # an element from the + # "deleted" list before we have a chance to + # process its child rows + (before_delete, parent_saves), + (before_delete, parent_deletes), + (before_delete, child_deletes), + (before_delete, child_saves), + ] + ) + + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): + if not isdelete: + if childisdelete: + uow.dependencies.update( + [(save_parent, after_save), (after_save, child_action)] + ) + else: + uow.dependencies.update( + [(save_parent, after_save), (child_action, after_save)] + ) + else: + uow.dependencies.update( + [(before_delete, child_action), (before_delete, delete_parent)] + ) + + def presort_deletes(self, uowcommit, states): + # TODO: no tests fail if this whole + # thing is removed !!!! + if not self.passive_deletes: + # if no passive deletes, load history on + # the collection, so that prop_has_changes() + # returns True + for state in states: + uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + + def presort_saves(self, uowcommit, states): + if not self.passive_updates: + # if no passive updates, load history on + # each collection where parent has changed PK, + # so that prop_has_changes() returns True + for state in states: + if self._pks_changed(uowcommit, state): + uowcommit.get_attribute_history( + state, self.key, attributes.PASSIVE_OFF + ) + + if not self.cascade.delete_orphan: + return + + # check for child items removed from the collection + # if delete_orphan check is turned on. + for state in states: + history = uowcommit.get_attribute_history( + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) + if history: + for child in history.deleted: + if self.hasparent(child) is False: + uowcommit.register_object( + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) + for c, m, st_, dct_ in self.mapper.cascade_iterator( + "delete", child + ): + uowcommit.register_object(st_, isdelete=True) + + def process_deletes(self, uowcommit, states): + secondary_delete = [] + secondary_insert = [] + secondary_update = [] + + processed = self._get_reversed_processed_set(uowcommit) + tmp = set() + for state in states: + # this history should be cached already, as + # we loaded it in preprocess_deletes + history = uowcommit.get_attribute_history( + state, self.key, self._passive_delete_flag + ) + if history: + for child in history.non_added(): + if child is None or ( + processed is not None and (state, child) in processed + ): + continue + associationrow = {} + if not self._synchronize( + state, + child, + associationrow, + False, + uowcommit, + "delete", + ): + continue + secondary_delete.append(associationrow) + + tmp.update((c, state) for c in history.non_added()) + + if processed is not None: + processed.update(tmp) + + self._run_crud( + uowcommit, secondary_insert, secondary_update, secondary_delete + ) + + def process_saves(self, uowcommit, states): + secondary_delete = [] + secondary_insert = [] + secondary_update = [] + + processed = self._get_reversed_processed_set(uowcommit) + tmp = set() + + for state in states: + need_cascade_pks = not self.passive_updates and self._pks_changed( + uowcommit, state + ) + if need_cascade_pks: + passive = ( + attributes.PASSIVE_OFF + | attributes.INCLUDE_PENDING_MUTATIONS + ) + else: + passive = ( + attributes.PASSIVE_NO_INITIALIZE + | attributes.INCLUDE_PENDING_MUTATIONS + ) + history = uowcommit.get_attribute_history(state, self.key, passive) + if history: + for child in history.added: + if processed is not None and (state, child) in processed: + continue + associationrow = {} + if not self._synchronize( + state, child, associationrow, False, uowcommit, "add" + ): + continue + secondary_insert.append(associationrow) + for child in history.deleted: + if processed is not None and (state, child) in processed: + continue + associationrow = {} + if not self._synchronize( + state, + child, + associationrow, + False, + uowcommit, + "delete", + ): + continue + secondary_delete.append(associationrow) + + tmp.update((c, state) for c in history.added + history.deleted) + + if need_cascade_pks: + for child in history.unchanged: + associationrow = {} + sync.update( + state, + self.parent, + associationrow, + "old_", + self.prop.synchronize_pairs, + ) + sync.update( + child, + self.mapper, + associationrow, + "old_", + self.prop.secondary_synchronize_pairs, + ) + + secondary_update.append(associationrow) + + if processed is not None: + processed.update(tmp) + + self._run_crud( + uowcommit, secondary_insert, secondary_update, secondary_delete + ) + + def _run_crud( + self, uowcommit, secondary_insert, secondary_update, secondary_delete + ): + connection = uowcommit.transaction.connection(self.mapper) + + if secondary_delete: + associationrow = secondary_delete[0] + statement = self.secondary.delete().where( + sql.and_( + *[ + c == sql.bindparam(c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ] + ) + ) + result = connection.execute(statement, secondary_delete) + + if ( + result.supports_sane_multi_rowcount() + ) and result.rowcount != len(secondary_delete): + raise exc.StaleDataError( + "DELETE statement on table '%s' expected to delete " + "%d row(s); Only %d were matched." + % ( + self.secondary.description, + len(secondary_delete), + result.rowcount, + ) + ) + + if secondary_update: + associationrow = secondary_update[0] + statement = self.secondary.update().where( + sql.and_( + *[ + c == sql.bindparam("old_" + c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ] + ) + ) + result = connection.execute(statement, secondary_update) + + if ( + result.supports_sane_multi_rowcount() + ) and result.rowcount != len(secondary_update): + raise exc.StaleDataError( + "UPDATE statement on table '%s' expected to update " + "%d row(s); Only %d were matched." + % ( + self.secondary.description, + len(secondary_update), + result.rowcount, + ) + ) + + if secondary_insert: + statement = self.secondary.insert() + connection.execute(statement, secondary_insert) + + def _synchronize( + self, state, child, associationrow, clearkeys, uowcommit, operation + ): + # this checks for None if uselist=True + self._verify_canload(child) + + # but if uselist=False we get here. If child is None, + # no association row can be generated, so return. + if child is None: + return False + + if child is not None and not uowcommit.session._contains_state(child): + if not child.deleted: + util.warn( + "Object of type %s not in session, %s " + "operation along '%s' won't proceed" + % (mapperutil.state_class_str(child), operation, self.prop) + ) + return False + + sync.populate_dict( + state, self.parent, associationrow, self.prop.synchronize_pairs + ) + sync.populate_dict( + child, + self.mapper, + associationrow, + self.prop.secondary_synchronize_pairs, + ) + + return True + + def _pks_changed(self, uowcommit, state): + return sync.source_modified( + uowcommit, state, self.parent, self.prop.synchronize_pairs + ) + + +_direction_to_processor = { + ONETOMANY: OneToManyDP, + MANYTOONE: ManyToOneDP, + MANYTOMANY: ManyToManyDP, +} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/descriptor_props.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/descriptor_props.py new file mode 100644 index 0000000000000000000000000000000000000000..f01cc1788b3b0191819864a519ed42a46bb97b6e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/descriptor_props.py @@ -0,0 +1,1077 @@ +# orm/descriptor_props.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Descriptor properties are more "auxiliary" properties +that exist as configurational elements, but don't participate +as actively in the load/persist ORM loop. + +""" +from __future__ import annotations + +from dataclasses import is_dataclass +import inspect +import itertools +import operator +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import util as orm_util +from .base import _DeclarativeMapped +from .base import LoaderCallableStatus +from .base import Mapped +from .base import PassiveFlag +from .base import SQLORMOperations +from .interfaces import _AttributeOptions +from .interfaces import _IntrospectsAnnotations +from .interfaces import _MapsColumns +from .interfaces import MapperProperty +from .interfaces import PropComparator +from .util import _none_set +from .util import de_stringify_annotation +from .. import event +from .. import exc as sa_exc +from .. import schema +from .. import sql +from .. import util +from ..sql import expression +from ..sql import operators +from ..sql.elements import BindParameter +from ..util.typing import get_args +from ..util.typing import is_fwd_ref +from ..util.typing import is_pep593 + + +if typing.TYPE_CHECKING: + from ._typing import _InstanceDict + from ._typing import _RegistryType + from .attributes import History + from .attributes import InstrumentedAttribute + from .attributes import QueryableAttribute + from .context import ORMCompileState + from .decl_base import _ClassScanMapperConfig + from .mapper import Mapper + from .properties import ColumnProperty + from .properties import MappedColumn + from .state import InstanceState + from ..engine.base import Connection + from ..engine.row import Row + from ..sql._typing import _DMLColumnArgument + from ..sql._typing import _InfoType + from ..sql.elements import ClauseList + from ..sql.elements import ColumnElement + from ..sql.operators import OperatorType + from ..sql.schema import Column + from ..sql.selectable import Select + from ..util.typing import _AnnotationScanType + from ..util.typing import CallableReference + from ..util.typing import DescriptorReference + from ..util.typing import RODescriptorReference + +_T = TypeVar("_T", bound=Any) +_PT = TypeVar("_PT", bound=Any) + + +class DescriptorProperty(MapperProperty[_T]): + """:class:`.MapperProperty` which proxies access to a + user-defined descriptor.""" + + doc: Optional[str] = None + + uses_objects = False + _links_to_entity = False + + descriptor: DescriptorReference[Any] + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> History: + raise NotImplementedError() + + def instrument_class(self, mapper: Mapper[Any]) -> None: + prop = self + + class _ProxyImpl(attributes.AttributeImpl): + accepts_scalar_loader = False + load_on_unexpire = True + collection = False + + @property + def uses_objects(self) -> bool: # type: ignore + return prop.uses_objects + + def __init__(self, key: str): + self.key = key + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> History: + return prop.get_history(state, dict_, passive) + + if self.descriptor is None: + desc = getattr(mapper.class_, self.key, None) + if mapper._is_userland_descriptor(self.key, desc): + self.descriptor = desc + + if self.descriptor is None: + + def fset(obj: Any, value: Any) -> None: + setattr(obj, self.name, value) + + def fdel(obj: Any) -> None: + delattr(obj, self.name) + + def fget(obj: Any) -> Any: + return getattr(obj, self.name) + + self.descriptor = property(fget=fget, fset=fset, fdel=fdel) + + proxy_attr = attributes.create_proxied_attribute(self.descriptor)( + self.parent.class_, + self.key, + self.descriptor, + lambda: self._comparator_factory(mapper), + doc=self.doc, + original_property=self, + ) + proxy_attr.impl = _ProxyImpl(self.key) + mapper.class_manager.instrument_attribute(self.key, proxy_attr) + + +_CompositeAttrType = Union[ + str, + "Column[_T]", + "MappedColumn[_T]", + "InstrumentedAttribute[_T]", + "Mapped[_T]", +] + + +_CC = TypeVar("_CC", bound=Any) + + +_composite_getters: weakref.WeakKeyDictionary[ + Type[Any], Callable[[Any], Tuple[Any, ...]] +] = weakref.WeakKeyDictionary() + + +class CompositeProperty( + _MapsColumns[_CC], _IntrospectsAnnotations, DescriptorProperty[_CC] +): + """Defines a "composite" mapped attribute, representing a collection + of columns as one attribute. + + :class:`.CompositeProperty` is constructed using the :func:`.composite` + function. + + .. seealso:: + + :ref:`mapper_composite` + + """ + + composite_class: Union[Type[_CC], Callable[..., _CC]] + attrs: Tuple[_CompositeAttrType[Any], ...] + + _generated_composite_accessor: CallableReference[ + Optional[Callable[[_CC], Tuple[Any, ...]]] + ] + + comparator_factory: Type[Comparator[_CC]] + + def __init__( + self, + _class_or_attr: Union[ + None, Type[_CC], Callable[..., _CC], _CompositeAttrType[Any] + ] = None, + *attrs: _CompositeAttrType[Any], + attribute_options: Optional[_AttributeOptions] = None, + active_history: bool = False, + deferred: bool = False, + group: Optional[str] = None, + comparator_factory: Optional[Type[Comparator[_CC]]] = None, + info: Optional[_InfoType] = None, + **kwargs: Any, + ): + super().__init__(attribute_options=attribute_options) + + if isinstance(_class_or_attr, (Mapped, str, sql.ColumnElement)): + self.attrs = (_class_or_attr,) + attrs + # will initialize within declarative_scan + self.composite_class = None # type: ignore + else: + self.composite_class = _class_or_attr # type: ignore + self.attrs = attrs + + self.active_history = active_history + self.deferred = deferred + self.group = group + self.comparator_factory = ( + comparator_factory + if comparator_factory is not None + else self.__class__.Comparator + ) + self._generated_composite_accessor = None + if info is not None: + self.info.update(info) + + util.set_creation_order(self) + self._create_descriptor() + self._init_accessor() + + def instrument_class(self, mapper: Mapper[Any]) -> None: + super().instrument_class(mapper) + self._setup_event_handlers() + + def _composite_values_from_instance(self, value: _CC) -> Tuple[Any, ...]: + if self._generated_composite_accessor: + return self._generated_composite_accessor(value) + else: + try: + accessor = value.__composite_values__ + except AttributeError as ae: + raise sa_exc.InvalidRequestError( + f"Composite class {self.composite_class.__name__} is not " + f"a dataclass and does not define a __composite_values__()" + " method; can't get state" + ) from ae + else: + return accessor() # type: ignore + + def do_init(self) -> None: + """Initialization which occurs after the :class:`.Composite` + has been associated with its parent mapper. + + """ + self._setup_arguments_on_columns() + + _COMPOSITE_FGET = object() + + def _create_descriptor(self) -> None: + """Create the Python descriptor that will serve as + the access point on instances of the mapped class. + + """ + + def fget(instance: Any) -> Any: + dict_ = attributes.instance_dict(instance) + state = attributes.instance_state(instance) + + if self.key not in dict_: + # key not present. Iterate through related + # attributes, retrieve their values. This + # ensures they all load. + values = [ + getattr(instance, key) for key in self._attribute_keys + ] + + # current expected behavior here is that the composite is + # created on access if the object is persistent or if + # col attributes have non-None. This would be better + # if the composite were created unconditionally, + # but that would be a behavioral change. + if self.key not in dict_ and ( + state.key is not None or not _none_set.issuperset(values) + ): + dict_[self.key] = self.composite_class(*values) + state.manager.dispatch.refresh( + state, self._COMPOSITE_FGET, [self.key] + ) + + return dict_.get(self.key, None) + + def fset(instance: Any, value: Any) -> None: + dict_ = attributes.instance_dict(instance) + state = attributes.instance_state(instance) + attr = state.manager[self.key] + + if attr.dispatch._active_history: + previous = fget(instance) + else: + previous = dict_.get(self.key, LoaderCallableStatus.NO_VALUE) + + for fn in attr.dispatch.set: + value = fn(state, value, previous, attr.impl) + dict_[self.key] = value + if value is None: + for key in self._attribute_keys: + setattr(instance, key, None) + else: + for key, value in zip( + self._attribute_keys, + self._composite_values_from_instance(value), + ): + setattr(instance, key, value) + + def fdel(instance: Any) -> None: + state = attributes.instance_state(instance) + dict_ = attributes.instance_dict(instance) + attr = state.manager[self.key] + + if attr.dispatch._active_history: + previous = fget(instance) + dict_.pop(self.key, None) + else: + previous = dict_.pop(self.key, LoaderCallableStatus.NO_VALUE) + + attr = state.manager[self.key] + attr.dispatch.remove(state, previous, attr.impl) + for key in self._attribute_keys: + setattr(instance, key, None) + + self.descriptor = property(fget, fset, fdel) + + @util.preload_module("sqlalchemy.orm.properties") + def declarative_scan( + self, + decl_scan: _ClassScanMapperConfig, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + mapped_container: Optional[Type[Mapped[Any]]], + annotation: Optional[_AnnotationScanType], + extracted_mapped_annotation: Optional[_AnnotationScanType], + is_dataclass_field: bool, + ) -> None: + MappedColumn = util.preloaded.orm_properties.MappedColumn + if ( + self.composite_class is None + and extracted_mapped_annotation is None + ): + self._raise_for_required(key, cls) + argument = extracted_mapped_annotation + + if is_pep593(argument): + argument = get_args(argument)[0] + + if argument and self.composite_class is None: + if isinstance(argument, str) or is_fwd_ref( + argument, check_generic=True + ): + if originating_module is None: + str_arg = ( + argument.__forward_arg__ + if hasattr(argument, "__forward_arg__") + else str(argument) + ) + raise sa_exc.ArgumentError( + f"Can't use forward ref {argument} for composite " + f"class argument; set up the type as Mapped[{str_arg}]" + ) + argument = de_stringify_annotation( + cls, argument, originating_module, include_generic=True + ) + + self.composite_class = argument + + if is_dataclass(self.composite_class): + self._setup_for_dataclass(registry, cls, originating_module, key) + else: + for attr in self.attrs: + if ( + isinstance(attr, (MappedColumn, schema.Column)) + and attr.name is None + ): + raise sa_exc.ArgumentError( + "Composite class column arguments must be named " + "unless a dataclass is used" + ) + self._init_accessor() + + def _init_accessor(self) -> None: + if is_dataclass(self.composite_class) and not hasattr( + self.composite_class, "__composite_values__" + ): + insp = inspect.signature(self.composite_class) + getter = operator.attrgetter( + *[p.name for p in insp.parameters.values()] + ) + if len(insp.parameters) == 1: + self._generated_composite_accessor = lambda obj: (getter(obj),) + else: + self._generated_composite_accessor = getter + + if ( + self.composite_class is not None + and isinstance(self.composite_class, type) + and self.composite_class not in _composite_getters + ): + if self._generated_composite_accessor is not None: + _composite_getters[self.composite_class] = ( + self._generated_composite_accessor + ) + elif hasattr(self.composite_class, "__composite_values__"): + _composite_getters[self.composite_class] = ( + lambda obj: obj.__composite_values__() + ) + + @util.preload_module("sqlalchemy.orm.properties") + @util.preload_module("sqlalchemy.orm.decl_base") + def _setup_for_dataclass( + self, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + ) -> None: + MappedColumn = util.preloaded.orm_properties.MappedColumn + + decl_base = util.preloaded.orm_decl_base + + insp = inspect.signature(self.composite_class) + for param, attr in itertools.zip_longest( + insp.parameters.values(), self.attrs + ): + if param is None: + raise sa_exc.ArgumentError( + f"number of composite attributes " + f"{len(self.attrs)} exceeds " + f"that of the number of attributes in class " + f"{self.composite_class.__name__} {len(insp.parameters)}" + ) + if attr is None: + # fill in missing attr spots with empty MappedColumn + attr = MappedColumn() + self.attrs += (attr,) + + if isinstance(attr, MappedColumn): + attr.declarative_scan_for_composite( + registry, + cls, + originating_module, + key, + param.name, + param.annotation, + ) + elif isinstance(attr, schema.Column): + decl_base._undefer_column_name(param.name, attr) + + @util.memoized_property + def _comparable_elements(self) -> Sequence[QueryableAttribute[Any]]: + return [getattr(self.parent.class_, prop.key) for prop in self.props] + + @util.memoized_property + @util.preload_module("orm.properties") + def props(self) -> Sequence[MapperProperty[Any]]: + props = [] + MappedColumn = util.preloaded.orm_properties.MappedColumn + + for attr in self.attrs: + if isinstance(attr, str): + prop = self.parent.get_property(attr, _configure_mappers=False) + elif isinstance(attr, schema.Column): + prop = self.parent._columntoproperty[attr] + elif isinstance(attr, MappedColumn): + prop = self.parent._columntoproperty[attr.column] + elif isinstance(attr, attributes.InstrumentedAttribute): + prop = attr.property + else: + prop = None + + if not isinstance(prop, MapperProperty): + raise sa_exc.ArgumentError( + "Composite expects Column objects or mapped " + f"attributes/attribute names as arguments, got: {attr!r}" + ) + + props.append(prop) + return props + + @util.non_memoized_property + @util.preload_module("orm.properties") + def columns(self) -> Sequence[Column[Any]]: + MappedColumn = util.preloaded.orm_properties.MappedColumn + return [ + a.column if isinstance(a, MappedColumn) else a + for a in self.attrs + if isinstance(a, (schema.Column, MappedColumn)) + ] + + @property + def mapper_property_to_assign(self) -> Optional[MapperProperty[_CC]]: + return self + + @property + def columns_to_assign(self) -> List[Tuple[schema.Column[Any], int]]: + return [(c, 0) for c in self.columns if c.table is None] + + @util.preload_module("orm.properties") + def _setup_arguments_on_columns(self) -> None: + """Propagate configuration arguments made on this composite + to the target columns, for those that apply. + + """ + ColumnProperty = util.preloaded.orm_properties.ColumnProperty + + for prop in self.props: + if not isinstance(prop, ColumnProperty): + continue + else: + cprop = prop + + cprop.active_history = self.active_history + if self.deferred: + cprop.deferred = self.deferred + cprop.strategy_key = (("deferred", True), ("instrument", True)) + cprop.group = self.group + + def _setup_event_handlers(self) -> None: + """Establish events that populate/expire the composite attribute.""" + + def load_handler( + state: InstanceState[Any], context: ORMCompileState + ) -> None: + _load_refresh_handler(state, context, None, is_refresh=False) + + def refresh_handler( + state: InstanceState[Any], + context: ORMCompileState, + to_load: Optional[Sequence[str]], + ) -> None: + # note this corresponds to sqlalchemy.ext.mutable load_attrs() + + if not to_load or ( + {self.key}.union(self._attribute_keys) + ).intersection(to_load): + _load_refresh_handler(state, context, to_load, is_refresh=True) + + def _load_refresh_handler( + state: InstanceState[Any], + context: ORMCompileState, + to_load: Optional[Sequence[str]], + is_refresh: bool, + ) -> None: + dict_ = state.dict + + # if context indicates we are coming from the + # fget() handler, this already set the value; skip the + # handler here. (other handlers like mutablecomposite will still + # want to catch it) + # there's an insufficiency here in that the fget() handler + # really should not be using the refresh event and there should + # be some other event that mutablecomposite can subscribe + # towards for this. + + if ( + not is_refresh or context is self._COMPOSITE_FGET + ) and self.key in dict_: + return + + # if column elements aren't loaded, skip. + # __get__() will initiate a load for those + # columns + for k in self._attribute_keys: + if k not in dict_: + return + + dict_[self.key] = self.composite_class( + *[state.dict[key] for key in self._attribute_keys] + ) + + def expire_handler( + state: InstanceState[Any], keys: Optional[Sequence[str]] + ) -> None: + if keys is None or set(self._attribute_keys).intersection(keys): + state.dict.pop(self.key, None) + + def insert_update_handler( + mapper: Mapper[Any], + connection: Connection, + state: InstanceState[Any], + ) -> None: + """After an insert or update, some columns may be expired due + to server side defaults, or re-populated due to client side + defaults. Pop out the composite value here so that it + recreates. + + """ + + state.dict.pop(self.key, None) + + event.listen( + self.parent, "after_insert", insert_update_handler, raw=True + ) + event.listen( + self.parent, "after_update", insert_update_handler, raw=True + ) + event.listen( + self.parent, "load", load_handler, raw=True, propagate=True + ) + event.listen( + self.parent, "refresh", refresh_handler, raw=True, propagate=True + ) + event.listen( + self.parent, "expire", expire_handler, raw=True, propagate=True + ) + + proxy_attr = self.parent.class_manager[self.key] + proxy_attr.impl.dispatch = proxy_attr.dispatch # type: ignore + proxy_attr.impl.dispatch._active_history = self.active_history + + # TODO: need a deserialize hook here + + @util.memoized_property + def _attribute_keys(self) -> Sequence[str]: + return [prop.key for prop in self.props] + + def _populate_composite_bulk_save_mappings_fn( + self, + ) -> Callable[[Dict[str, Any]], None]: + if self._generated_composite_accessor: + get_values = self._generated_composite_accessor + else: + + def get_values(val: Any) -> Tuple[Any]: + return val.__composite_values__() # type: ignore + + attrs = [prop.key for prop in self.props] + + def populate(dest_dict: Dict[str, Any]) -> None: + dest_dict.update( + { + key: val + for key, val in zip( + attrs, get_values(dest_dict.pop(self.key)) + ) + } + ) + + return populate + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> History: + """Provided for userland code that uses attributes.get_history().""" + + added: List[Any] = [] + deleted: List[Any] = [] + + has_history = False + for prop in self.props: + key = prop.key + hist = state.manager[key].impl.get_history(state, dict_) + if hist.has_changes(): + has_history = True + + non_deleted = hist.non_deleted() + if non_deleted: + added.extend(non_deleted) + else: + added.append(None) + if hist.deleted: + deleted.extend(hist.deleted) + else: + deleted.append(None) + + if has_history: + return attributes.History( + [self.composite_class(*added)], + (), + [self.composite_class(*deleted)], + ) + else: + return attributes.History((), [self.composite_class(*added)], ()) + + def _comparator_factory( + self, mapper: Mapper[Any] + ) -> Composite.Comparator[_CC]: + return self.comparator_factory(self, mapper) + + class CompositeBundle(orm_util.Bundle[_T]): + def __init__( + self, + property_: Composite[_T], + expr: ClauseList, + ): + self.property = property_ + super().__init__(property_.key, *expr) + + def create_row_processor( + self, + query: Select[Any], + procs: Sequence[Callable[[Row[Any]], Any]], + labels: Sequence[str], + ) -> Callable[[Row[Any]], Any]: + def proc(row: Row[Any]) -> Any: + return self.property.composite_class( + *[proc(row) for proc in procs] + ) + + return proc + + class Comparator(PropComparator[_PT]): + """Produce boolean, comparison, and other operators for + :class:`.Composite` attributes. + + See the example in :ref:`composite_operations` for an overview + of usage , as well as the documentation for :class:`.PropComparator`. + + .. seealso:: + + :class:`.PropComparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + # https://github.com/python/mypy/issues/4266 + __hash__ = None # type: ignore + + prop: RODescriptorReference[Composite[_PT]] + + @util.memoized_property + def clauses(self) -> ClauseList: + return expression.ClauseList( + group=False, *self._comparable_elements + ) + + def __clause_element__(self) -> CompositeProperty.CompositeBundle[_PT]: + return self.expression + + @util.memoized_property + def expression(self) -> CompositeProperty.CompositeBundle[_PT]: + clauses = self.clauses._annotate( + { + "parententity": self._parententity, + "parentmapper": self._parententity, + "proxy_key": self.prop.key, + } + ) + return CompositeProperty.CompositeBundle(self.prop, clauses) + + def _bulk_update_tuples( + self, value: Any + ) -> Sequence[Tuple[_DMLColumnArgument, Any]]: + if isinstance(value, BindParameter): + value = value.value + + values: Sequence[Any] + + if value is None: + values = [None for key in self.prop._attribute_keys] + elif isinstance(self.prop.composite_class, type) and isinstance( + value, self.prop.composite_class + ): + values = self.prop._composite_values_from_instance( + value # type: ignore[arg-type] + ) + else: + raise sa_exc.ArgumentError( + "Can't UPDATE composite attribute %s to %r" + % (self.prop, value) + ) + + return list(zip(self._comparable_elements, values)) + + @util.memoized_property + def _comparable_elements(self) -> Sequence[QueryableAttribute[Any]]: + if self._adapt_to_entity: + return [ + getattr(self._adapt_to_entity.entity, prop.key) + for prop in self.prop._comparable_elements + ] + else: + return self.prop._comparable_elements + + def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + return self._compare(operators.eq, other) + + def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + return self._compare(operators.ne, other) + + def __lt__(self, other: Any) -> ColumnElement[bool]: + return self._compare(operators.lt, other) + + def __gt__(self, other: Any) -> ColumnElement[bool]: + return self._compare(operators.gt, other) + + def __le__(self, other: Any) -> ColumnElement[bool]: + return self._compare(operators.le, other) + + def __ge__(self, other: Any) -> ColumnElement[bool]: + return self._compare(operators.ge, other) + + # what might be interesting would be if we create + # an instance of the composite class itself with + # the columns as data members, then use "hybrid style" comparison + # to create these comparisons. then your Point.__eq__() method could + # be where comparison behavior is defined for SQL also. Likely + # not a good choice for default behavior though, not clear how it would + # work w/ dataclasses, etc. also no demand for any of this anyway. + def _compare( + self, operator: OperatorType, other: Any + ) -> ColumnElement[bool]: + values: Sequence[Any] + if other is None: + values = [None] * len(self.prop._comparable_elements) + else: + values = self.prop._composite_values_from_instance(other) + comparisons = [ + operator(a, b) + for a, b in zip(self.prop._comparable_elements, values) + ] + if self._adapt_to_entity: + assert self.adapter is not None + comparisons = [self.adapter(x) for x in comparisons] + return sql.and_(*comparisons) + + def __str__(self) -> str: + return str(self.parent.class_.__name__) + "." + self.key + + +class Composite(CompositeProperty[_T], _DeclarativeMapped[_T]): + """Declarative-compatible front-end for the :class:`.CompositeProperty` + class. + + Public constructor is the :func:`_orm.composite` function. + + .. versionchanged:: 2.0 Added :class:`_orm.Composite` as a Declarative + compatible subclass of :class:`_orm.CompositeProperty`. + + .. seealso:: + + :ref:`mapper_composite` + + """ + + inherit_cache = True + """:meta private:""" + + +class ConcreteInheritedProperty(DescriptorProperty[_T]): + """A 'do nothing' :class:`.MapperProperty` that disables + an attribute on a concrete subclass that is only present + on the inherited mapper, not the concrete classes' mapper. + + Cases where this occurs include: + + * When the superclass mapper is mapped against a + "polymorphic union", which includes all attributes from + all subclasses. + * When a relationship() is configured on an inherited mapper, + but not on the subclass mapper. Concrete mappers require + that relationship() is configured explicitly on each + subclass. + + """ + + def _comparator_factory( + self, mapper: Mapper[Any] + ) -> Type[PropComparator[_T]]: + comparator_callable = None + + for m in self.parent.iterate_to_root(): + p = m._props[self.key] + if getattr(p, "comparator_factory", None) is not None: + comparator_callable = p.comparator_factory + break + assert comparator_callable is not None + return comparator_callable(p, mapper) # type: ignore + + def __init__(self) -> None: + super().__init__() + + def warn() -> NoReturn: + raise AttributeError( + "Concrete %s does not implement " + "attribute %r at the instance level. Add " + "this property explicitly to %s." + % (self.parent, self.key, self.parent) + ) + + class NoninheritedConcreteProp: + def __set__(s: Any, obj: Any, value: Any) -> NoReturn: + warn() + + def __delete__(s: Any, obj: Any) -> NoReturn: + warn() + + def __get__(s: Any, obj: Any, owner: Any) -> Any: + if obj is None: + return self.descriptor + warn() + + self.descriptor = NoninheritedConcreteProp() + + +class SynonymProperty(DescriptorProperty[_T]): + """Denote an attribute name as a synonym to a mapped property, + in that the attribute will mirror the value and expression behavior + of another attribute. + + :class:`.Synonym` is constructed using the :func:`_orm.synonym` + function. + + .. seealso:: + + :ref:`synonyms` - Overview of synonyms + + """ + + comparator_factory: Optional[Type[PropComparator[_T]]] + + def __init__( + self, + name: str, + map_column: Optional[bool] = None, + descriptor: Optional[Any] = None, + comparator_factory: Optional[Type[PropComparator[_T]]] = None, + attribute_options: Optional[_AttributeOptions] = None, + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + ): + super().__init__(attribute_options=attribute_options) + + self.name = name + self.map_column = map_column + self.descriptor = descriptor + self.comparator_factory = comparator_factory + if doc: + self.doc = doc + elif descriptor and descriptor.__doc__: + self.doc = descriptor.__doc__ + else: + self.doc = None + if info: + self.info.update(info) + + util.set_creation_order(self) + + if not TYPE_CHECKING: + + @property + def uses_objects(self) -> bool: + return getattr(self.parent.class_, self.name).impl.uses_objects + + # TODO: when initialized, check _proxied_object, + # emit a warning if its not a column-based property + + @util.memoized_property + def _proxied_object( + self, + ) -> Union[MapperProperty[_T], SQLORMOperations[_T]]: + attr = getattr(self.parent.class_, self.name) + if not hasattr(attr, "property") or not isinstance( + attr.property, MapperProperty + ): + # attribute is a non-MapperProprerty proxy such as + # hybrid or association proxy + if isinstance(attr, attributes.QueryableAttribute): + return attr.comparator + elif isinstance(attr, SQLORMOperations): + # assocaition proxy comes here + return attr + + raise sa_exc.InvalidRequestError( + """synonym() attribute "%s.%s" only supports """ + """ORM mapped attributes, got %r""" + % (self.parent.class_.__name__, self.name, attr) + ) + return attr.property + + def _comparator_factory(self, mapper: Mapper[Any]) -> SQLORMOperations[_T]: + prop = self._proxied_object + + if isinstance(prop, MapperProperty): + if self.comparator_factory: + comp = self.comparator_factory(prop, mapper) + else: + comp = prop.comparator_factory(prop, mapper) + return comp + else: + return prop + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> History: + attr: QueryableAttribute[Any] = getattr(self.parent.class_, self.name) + return attr.impl.get_history(state, dict_, passive=passive) + + @util.preload_module("sqlalchemy.orm.properties") + def set_parent(self, parent: Mapper[Any], init: bool) -> None: + properties = util.preloaded.orm_properties + + if self.map_column: + # implement the 'map_column' option. + if self.key not in parent.persist_selectable.c: + raise sa_exc.ArgumentError( + "Can't compile synonym '%s': no column on table " + "'%s' named '%s'" + % ( + self.name, + parent.persist_selectable.description, + self.key, + ) + ) + elif ( + parent.persist_selectable.c[self.key] + in parent._columntoproperty + and parent._columntoproperty[ + parent.persist_selectable.c[self.key] + ].key + == self.name + ): + raise sa_exc.ArgumentError( + "Can't call map_column=True for synonym %r=%r, " + "a ColumnProperty already exists keyed to the name " + "%r for column %r" + % (self.key, self.name, self.name, self.key) + ) + p: ColumnProperty[Any] = properties.ColumnProperty( + parent.persist_selectable.c[self.key] + ) + parent._configure_property(self.name, p, init=init, setparent=True) + p._mapped_by_synonym = self.key + + self.parent = parent + + +class Synonym(SynonymProperty[_T], _DeclarativeMapped[_T]): + """Declarative front-end for the :class:`.SynonymProperty` class. + + Public constructor is the :func:`_orm.synonym` function. + + .. versionchanged:: 2.0 Added :class:`_orm.Synonym` as a Declarative + compatible subclass for :class:`_orm.SynonymProperty` + + .. seealso:: + + :ref:`synonyms` - Overview of synonyms + + """ + + inherit_cache = True + """:meta private:""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dynamic.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..3c81c396f6e82857afcde03a4be2e2d67fa67d56 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/dynamic.py @@ -0,0 +1,300 @@ +# orm/dynamic.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + + +"""Dynamic collection API. + +Dynamic collections act like Query() objects for read operations and support +basic add/delete mutation. + +.. legacy:: the "dynamic" loader is a legacy feature, superseded by the + "write_only" loader. + + +""" + +from __future__ import annotations + +from typing import Any +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import exc as orm_exc +from . import relationships +from . import util as orm_util +from .base import PassiveFlag +from .query import Query +from .session import object_session +from .writeonly import AbstractCollectionWriter +from .writeonly import WriteOnlyAttributeImpl +from .writeonly import WriteOnlyHistory +from .writeonly import WriteOnlyLoader +from .. import util +from ..engine import result + + +if TYPE_CHECKING: + from . import QueryableAttribute + from .mapper import Mapper + from .relationships import _RelationshipOrderByArg + from .session import Session + from .state import InstanceState + from .util import AliasedClass + from ..event import _Dispatch + from ..sql.elements import ColumnElement + +_T = TypeVar("_T", bound=Any) + + +class DynamicCollectionHistory(WriteOnlyHistory[_T]): + def __init__( + self, + attr: DynamicAttributeImpl, + state: InstanceState[_T], + passive: PassiveFlag, + apply_to: Optional[DynamicCollectionHistory[_T]] = None, + ) -> None: + if apply_to: + coll = AppenderQuery(attr, state).autoflush(False) + self.unchanged_items = util.OrderedIdentitySet(coll) + self.added_items = apply_to.added_items + self.deleted_items = apply_to.deleted_items + self._reconcile_collection = True + else: + self.deleted_items = util.OrderedIdentitySet() + self.added_items = util.OrderedIdentitySet() + self.unchanged_items = util.OrderedIdentitySet() + self._reconcile_collection = False + + +class DynamicAttributeImpl(WriteOnlyAttributeImpl): + _supports_dynamic_iteration = True + collection_history_cls = DynamicCollectionHistory[Any] + query_class: Type[AppenderMixin[Any]] # type: ignore[assignment] + + def __init__( + self, + class_: Union[Type[Any], AliasedClass[Any]], + key: str, + dispatch: _Dispatch[QueryableAttribute[Any]], + target_mapper: Mapper[_T], + order_by: _RelationshipOrderByArg, + query_class: Optional[Type[AppenderMixin[_T]]] = None, + **kw: Any, + ) -> None: + attributes.AttributeImpl.__init__( + self, class_, key, None, dispatch, **kw + ) + self.target_mapper = target_mapper + if order_by: + self.order_by = tuple(order_by) + if not query_class: + self.query_class = AppenderQuery + elif AppenderMixin in query_class.mro(): + self.query_class = query_class + else: + self.query_class = mixin_user_query(query_class) + + +@relationships.RelationshipProperty.strategy_for(lazy="dynamic") +class DynaLoader(WriteOnlyLoader): + impl_class = DynamicAttributeImpl + + +class AppenderMixin(AbstractCollectionWriter[_T]): + """A mixin that expects to be mixing in a Query class with + AbstractAppender. + + + """ + + query_class: Optional[Type[Query[_T]]] = None + _order_by_clauses: Tuple[ColumnElement[Any], ...] + + def __init__( + self, attr: DynamicAttributeImpl, state: InstanceState[_T] + ) -> None: + Query.__init__( + self, # type: ignore[arg-type] + attr.target_mapper, + None, + ) + super().__init__(attr, state) + + @property + def session(self) -> Optional[Session]: + sess = object_session(self.instance) + if sess is not None and sess.autoflush and self.instance in sess: + sess.flush() + if not orm_util.has_identity(self.instance): + return None + else: + return sess + + @session.setter + def session(self, session: Session) -> None: + self.sess = session + + def _iter(self) -> Union[result.ScalarResult[_T], result.Result[_T]]: + sess = self.session + if sess is None: + state = attributes.instance_state(self.instance) + if state.detached: + util.warn( + "Instance %s is detached, dynamic relationship cannot " + "return a correct result. This warning will become " + "a DetachedInstanceError in a future release." + % (orm_util.state_str(state)) + ) + + return result.IteratorResult( + result.SimpleResultMetaData([self.attr.class_.__name__]), + iter( + self.attr._get_collection_history( + attributes.instance_state(self.instance), + PassiveFlag.PASSIVE_NO_INITIALIZE, + ).added_items + ), + _source_supports_scalars=True, + ).scalars() + else: + return self._generate(sess)._iter() + + if TYPE_CHECKING: + + def __iter__(self) -> Iterator[_T]: ... + + def __getitem__(self, index: Any) -> Union[_T, List[_T]]: + sess = self.session + if sess is None: + return self.attr._get_collection_history( + attributes.instance_state(self.instance), + PassiveFlag.PASSIVE_NO_INITIALIZE, + ).indexed(index) + else: + return self._generate(sess).__getitem__(index) # type: ignore[no-any-return] # noqa: E501 + + def count(self) -> int: + sess = self.session + if sess is None: + return len( + self.attr._get_collection_history( + attributes.instance_state(self.instance), + PassiveFlag.PASSIVE_NO_INITIALIZE, + ).added_items + ) + else: + return self._generate(sess).count() + + def _generate( + self, + sess: Optional[Session] = None, + ) -> Query[_T]: + # note we're returning an entirely new Query class instance + # here without any assignment capabilities; the class of this + # query is determined by the session. + instance = self.instance + if sess is None: + sess = object_session(instance) + if sess is None: + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session, and no " + "contextual session is established; lazy load operation " + "of attribute '%s' cannot proceed" + % (orm_util.instance_str(instance), self.attr.key) + ) + + if self.query_class: + query = self.query_class(self.attr.target_mapper, session=sess) + else: + query = sess.query(self.attr.target_mapper) + + query._where_criteria = self._where_criteria + query._from_obj = self._from_obj + query._order_by_clauses = self._order_by_clauses + + return query + + def add_all(self, iterator: Iterable[_T]) -> None: + """Add an iterable of items to this :class:`_orm.AppenderQuery`. + + The given items will be persisted to the database in terms of + the parent instance's collection on the next flush. + + This method is provided to assist in delivering forwards-compatibility + with the :class:`_orm.WriteOnlyCollection` collection class. + + .. versionadded:: 2.0 + + """ + self._add_all_impl(iterator) + + def add(self, item: _T) -> None: + """Add an item to this :class:`_orm.AppenderQuery`. + + The given item will be persisted to the database in terms of + the parent instance's collection on the next flush. + + This method is provided to assist in delivering forwards-compatibility + with the :class:`_orm.WriteOnlyCollection` collection class. + + .. versionadded:: 2.0 + + """ + self._add_all_impl([item]) + + def extend(self, iterator: Iterable[_T]) -> None: + """Add an iterable of items to this :class:`_orm.AppenderQuery`. + + The given items will be persisted to the database in terms of + the parent instance's collection on the next flush. + + """ + self._add_all_impl(iterator) + + def append(self, item: _T) -> None: + """Append an item to this :class:`_orm.AppenderQuery`. + + The given item will be persisted to the database in terms of + the parent instance's collection on the next flush. + + """ + self._add_all_impl([item]) + + def remove(self, item: _T) -> None: + """Remove an item from this :class:`_orm.AppenderQuery`. + + The given item will be removed from the parent instance's collection on + the next flush. + + """ + self._remove_impl(item) + + +class AppenderQuery(AppenderMixin[_T], Query[_T]): # type: ignore[misc] + """A dynamic query that supports basic collection storage operations. + + Methods on :class:`.AppenderQuery` include all methods of + :class:`_orm.Query`, plus additional methods used for collection + persistence. + + + """ + + +def mixin_user_query(cls: Any) -> type[AppenderMixin[Any]]: + """Return a new class with AppenderQuery functionality layered over.""" + name = "Appender" + cls.__name__ + return type(name, (AppenderMixin, cls), {"query_class": cls}) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/evaluator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..57aae5a3c49ec853d4434a8ea3e1296ebca8795d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/evaluator.py @@ -0,0 +1,379 @@ +# orm/evaluator.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + +"""Evaluation functions used **INTERNALLY** by ORM DML use cases. + + +This module is **private, for internal use by SQLAlchemy**. + +.. versionchanged:: 2.0.4 renamed ``EvaluatorCompiler`` to + ``_EvaluatorCompiler``. + +""" + + +from __future__ import annotations + +from typing import Type + +from . import exc as orm_exc +from .base import LoaderCallableStatus +from .base import PassiveFlag +from .. import exc +from .. import inspect +from ..sql import and_ +from ..sql import operators +from ..sql.sqltypes import Concatenable +from ..sql.sqltypes import Integer +from ..sql.sqltypes import Numeric +from ..util import warn_deprecated + + +class UnevaluatableError(exc.InvalidRequestError): + pass + + +class _NoObject(operators.ColumnOperators): + def operate(self, *arg, **kw): + return None + + def reverse_operate(self, *arg, **kw): + return None + + +class _ExpiredObject(operators.ColumnOperators): + def operate(self, *arg, **kw): + return self + + def reverse_operate(self, *arg, **kw): + return self + + +_NO_OBJECT = _NoObject() +_EXPIRED_OBJECT = _ExpiredObject() + + +class _EvaluatorCompiler: + def __init__(self, target_cls=None): + self.target_cls = target_cls + + def process(self, clause, *clauses): + if clauses: + clause = and_(clause, *clauses) + + meth = getattr(self, f"visit_{clause.__visit_name__}", None) + if not meth: + raise UnevaluatableError( + f"Cannot evaluate {type(clause).__name__}" + ) + return meth(clause) + + def visit_grouping(self, clause): + return self.process(clause.element) + + def visit_null(self, clause): + return lambda obj: None + + def visit_false(self, clause): + return lambda obj: False + + def visit_true(self, clause): + return lambda obj: True + + def visit_column(self, clause): + try: + parentmapper = clause._annotations["parentmapper"] + except KeyError as ke: + raise UnevaluatableError( + f"Cannot evaluate column: {clause}" + ) from ke + + if self.target_cls and not issubclass( + self.target_cls, parentmapper.class_ + ): + raise UnevaluatableError( + "Can't evaluate criteria against " + f"alternate class {parentmapper.class_}" + ) + + parentmapper._check_configure() + + # we'd like to use "proxy_key" annotation to get the "key", however + # in relationship primaryjoin cases proxy_key is sometimes deannotated + # and sometimes apparently not present in the first place (?). + # While I can stop it from being deannotated (though need to see if + # this breaks other things), not sure right now about cases where it's + # not there in the first place. can fix at some later point. + # key = clause._annotations["proxy_key"] + + # for now, use the old way + try: + key = parentmapper._columntoproperty[clause].key + except orm_exc.UnmappedColumnError as err: + raise UnevaluatableError( + f"Cannot evaluate expression: {err}" + ) from err + + # note this used to fall back to a simple `getattr(obj, key)` evaluator + # if impl was None; as of #8656, we ensure mappers are configured + # so that impl is available + impl = parentmapper.class_manager[key].impl + + def get_corresponding_attr(obj): + if obj is None: + return _NO_OBJECT + state = inspect(obj) + dict_ = state.dict + + value = impl.get( + state, dict_, passive=PassiveFlag.PASSIVE_NO_FETCH + ) + if value is LoaderCallableStatus.PASSIVE_NO_RESULT: + return _EXPIRED_OBJECT + return value + + return get_corresponding_attr + + def visit_tuple(self, clause): + return self.visit_clauselist(clause) + + def visit_expression_clauselist(self, clause): + return self.visit_clauselist(clause) + + def visit_clauselist(self, clause): + evaluators = [self.process(clause) for clause in clause.clauses] + + dispatch = ( + f"visit_{clause.operator.__name__.rstrip('_')}_clauselist_op" + ) + meth = getattr(self, dispatch, None) + if meth: + return meth(clause.operator, evaluators, clause) + else: + raise UnevaluatableError( + f"Cannot evaluate clauselist with operator {clause.operator}" + ) + + def visit_binary(self, clause): + eval_left = self.process(clause.left) + eval_right = self.process(clause.right) + + dispatch = f"visit_{clause.operator.__name__.rstrip('_')}_binary_op" + meth = getattr(self, dispatch, None) + if meth: + return meth(clause.operator, eval_left, eval_right, clause) + else: + raise UnevaluatableError( + f"Cannot evaluate {type(clause).__name__} with " + f"operator {clause.operator}" + ) + + def visit_or_clauselist_op(self, operator, evaluators, clause): + def evaluate(obj): + has_null = False + for sub_evaluate in evaluators: + value = sub_evaluate(obj) + if value is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + elif value: + return True + has_null = has_null or value is None + if has_null: + return None + return False + + return evaluate + + def visit_and_clauselist_op(self, operator, evaluators, clause): + def evaluate(obj): + for sub_evaluate in evaluators: + value = sub_evaluate(obj) + if value is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + + if not value: + if value is None or value is _NO_OBJECT: + return None + return False + return True + + return evaluate + + def visit_comma_op_clauselist_op(self, operator, evaluators, clause): + def evaluate(obj): + values = [] + for sub_evaluate in evaluators: + value = sub_evaluate(obj) + if value is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + elif value is None or value is _NO_OBJECT: + return None + values.append(value) + return tuple(values) + + return evaluate + + def visit_custom_op_binary_op( + self, operator, eval_left, eval_right, clause + ): + if operator.python_impl: + return self._straight_evaluate( + operator, eval_left, eval_right, clause + ) + else: + raise UnevaluatableError( + f"Custom operator {operator.opstring!r} can't be evaluated " + "in Python unless it specifies a callable using " + "`.python_impl`." + ) + + def visit_is_binary_op(self, operator, eval_left, eval_right, clause): + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + return left_val == right_val + + return evaluate + + def visit_is_not_binary_op(self, operator, eval_left, eval_right, clause): + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + return left_val != right_val + + return evaluate + + def _straight_evaluate(self, operator, eval_left, eval_right, clause): + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + elif left_val is None or right_val is None: + return None + + return operator(eval_left(obj), eval_right(obj)) + + return evaluate + + def _straight_evaluate_numeric_only( + self, operator, eval_left, eval_right, clause + ): + if clause.left.type._type_affinity not in ( + Numeric, + Integer, + ) or clause.right.type._type_affinity not in (Numeric, Integer): + raise UnevaluatableError( + f'Cannot evaluate math operator "{operator.__name__}" for ' + f"datatypes {clause.left.type}, {clause.right.type}" + ) + + return self._straight_evaluate(operator, eval_left, eval_right, clause) + + visit_add_binary_op = _straight_evaluate_numeric_only + visit_mul_binary_op = _straight_evaluate_numeric_only + visit_sub_binary_op = _straight_evaluate_numeric_only + visit_mod_binary_op = _straight_evaluate_numeric_only + visit_truediv_binary_op = _straight_evaluate_numeric_only + visit_lt_binary_op = _straight_evaluate + visit_le_binary_op = _straight_evaluate + visit_ne_binary_op = _straight_evaluate + visit_gt_binary_op = _straight_evaluate + visit_ge_binary_op = _straight_evaluate + visit_eq_binary_op = _straight_evaluate + + def visit_in_op_binary_op(self, operator, eval_left, eval_right, clause): + return self._straight_evaluate( + lambda a, b: a in b if a is not _NO_OBJECT else None, + eval_left, + eval_right, + clause, + ) + + def visit_not_in_op_binary_op( + self, operator, eval_left, eval_right, clause + ): + return self._straight_evaluate( + lambda a, b: a not in b if a is not _NO_OBJECT else None, + eval_left, + eval_right, + clause, + ) + + def visit_concat_op_binary_op( + self, operator, eval_left, eval_right, clause + ): + + if not issubclass( + clause.left.type._type_affinity, Concatenable + ) or not issubclass(clause.right.type._type_affinity, Concatenable): + raise UnevaluatableError( + f"Cannot evaluate concatenate operator " + f'"{operator.__name__}" for ' + f"datatypes {clause.left.type}, {clause.right.type}" + ) + + return self._straight_evaluate( + lambda a, b: a + b, eval_left, eval_right, clause + ) + + def visit_startswith_op_binary_op( + self, operator, eval_left, eval_right, clause + ): + return self._straight_evaluate( + lambda a, b: a.startswith(b), eval_left, eval_right, clause + ) + + def visit_endswith_op_binary_op( + self, operator, eval_left, eval_right, clause + ): + return self._straight_evaluate( + lambda a, b: a.endswith(b), eval_left, eval_right, clause + ) + + def visit_unary(self, clause): + eval_inner = self.process(clause.element) + if clause.operator is operators.inv: + + def evaluate(obj): + value = eval_inner(obj) + if value is _EXPIRED_OBJECT: + return _EXPIRED_OBJECT + elif value is None: + return None + return not value + + return evaluate + raise UnevaluatableError( + f"Cannot evaluate {type(clause).__name__} " + f"with operator {clause.operator}" + ) + + def visit_bindparam(self, clause): + if clause.callable: + val = clause.callable() + else: + val = clause.value + return lambda obj: val + + +def __getattr__(name: str) -> Type[_EvaluatorCompiler]: + if name == "EvaluatorCompiler": + warn_deprecated( + "Direct use of 'EvaluatorCompiler' is not supported, and this " + "name will be removed in a future release. " + "'_EvaluatorCompiler' is for internal use only", + "2.0", + ) + return _EvaluatorCompiler + else: + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/events.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/events.py new file mode 100644 index 0000000000000000000000000000000000000000..f161760e6da04e3eefe2640c6b96cba2da9b2a2e --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/events.py @@ -0,0 +1,3271 @@ +# orm/events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""ORM event interfaces. + +""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Collection +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import instrumentation +from . import interfaces +from . import mapperlib +from .attributes import QueryableAttribute +from .base import _mapper_or_none +from .base import NO_KEY +from .instrumentation import ClassManager +from .instrumentation import InstrumentationFactory +from .query import BulkDelete +from .query import BulkUpdate +from .query import Query +from .scoping import scoped_session +from .session import Session +from .session import sessionmaker +from .. import event +from .. import exc +from .. import util +from ..event import EventTarget +from ..event.registry import _ET +from ..util.compat import inspect_getfullargspec + +if TYPE_CHECKING: + from weakref import ReferenceType + + from ._typing import _InstanceDict + from ._typing import _InternalEntityType + from ._typing import _O + from ._typing import _T + from .attributes import Event + from .base import EventConstants + from .session import ORMExecuteState + from .session import SessionTransaction + from .unitofwork import UOWTransaction + from ..engine import Connection + from ..event.base import _Dispatch + from ..event.base import _HasEventsDispatch + from ..event.registry import _EventKey + from ..orm.collections import CollectionAdapter + from ..orm.context import QueryContext + from ..orm.decl_api import DeclarativeAttributeIntercept + from ..orm.decl_api import DeclarativeMeta + from ..orm.mapper import Mapper + from ..orm.state import InstanceState + +_KT = TypeVar("_KT", bound=Any) +_ET2 = TypeVar("_ET2", bound=EventTarget) + + +class InstrumentationEvents(event.Events[InstrumentationFactory]): + """Events related to class instrumentation events. + + The listeners here support being established against + any new style class, that is any object that is a subclass + of 'type'. Events will then be fired off for events + against that class. If the "propagate=True" flag is passed + to event.listen(), the event will fire off for subclasses + of that class as well. + + The Python ``type`` builtin is also accepted as a target, + which when used has the effect of events being emitted + for all classes. + + Note the "propagate" flag here is defaulted to ``True``, + unlike the other class level events where it defaults + to ``False``. This means that new subclasses will also + be the subject of these events, when a listener + is established on a superclass. + + """ + + _target_class_doc = "SomeBaseClass" + _dispatch_target = InstrumentationFactory + + @classmethod + def _accept_with( + cls, + target: Union[ + InstrumentationFactory, + Type[InstrumentationFactory], + ], + identifier: str, + ) -> Optional[ + Union[ + InstrumentationFactory, + Type[InstrumentationFactory], + ] + ]: + if isinstance(target, type): + return _InstrumentationEventsHold(target) # type: ignore [return-value] # noqa: E501 + else: + return None + + @classmethod + def _listen( + cls, event_key: _EventKey[_T], propagate: bool = True, **kw: Any + ) -> None: + target, identifier, fn = ( + event_key.dispatch_target, + event_key.identifier, + event_key._listen_fn, + ) + + def listen(target_cls: type, *arg: Any) -> Optional[Any]: + listen_cls = target() + + # if weakref were collected, however this is not something + # that normally happens. it was occurring during test teardown + # between mapper/registry/instrumentation_manager, however this + # interaction was changed to not rely upon the event system. + if listen_cls is None: + return None + + if propagate and issubclass(target_cls, listen_cls): + return fn(target_cls, *arg) + elif not propagate and target_cls is listen_cls: + return fn(target_cls, *arg) + else: + return None + + def remove(ref: ReferenceType[_T]) -> None: + key = event.registry._EventKey( # type: ignore [type-var] + None, + identifier, + listen, + instrumentation._instrumentation_factory, + ) + getattr( + instrumentation._instrumentation_factory.dispatch, identifier + ).remove(key) + + target = weakref.ref(target.class_, remove) + + event_key.with_dispatch_target( + instrumentation._instrumentation_factory + ).with_wrapper(listen).base_listen(**kw) + + @classmethod + def _clear(cls) -> None: + super()._clear() + instrumentation._instrumentation_factory.dispatch._clear() + + def class_instrument(self, cls: ClassManager[_O]) -> None: + """Called after the given class is instrumented. + + To get at the :class:`.ClassManager`, use + :func:`.manager_of_class`. + + """ + + def class_uninstrument(self, cls: ClassManager[_O]) -> None: + """Called before the given class is uninstrumented. + + To get at the :class:`.ClassManager`, use + :func:`.manager_of_class`. + + """ + + def attribute_instrument( + self, cls: ClassManager[_O], key: _KT, inst: _O + ) -> None: + """Called when an attribute is instrumented.""" + + +class _InstrumentationEventsHold: + """temporary marker object used to transfer from _accept_with() to + _listen() on the InstrumentationEvents class. + + """ + + def __init__(self, class_: type) -> None: + self.class_ = class_ + + dispatch = event.dispatcher(InstrumentationEvents) + + +class InstanceEvents(event.Events[ClassManager[Any]]): + """Define events specific to object lifecycle. + + e.g.:: + + from sqlalchemy import event + + + def my_load_listener(target, context): + print("on load!") + + + event.listen(SomeClass, "load", my_load_listener) + + Available targets include: + + * mapped classes + * unmapped superclasses of mapped or to-be-mapped classes + (using the ``propagate=True`` flag) + * :class:`_orm.Mapper` objects + * the :class:`_orm.Mapper` class itself indicates listening for all + mappers. + + Instance events are closely related to mapper events, but + are more specific to the instance and its instrumentation, + rather than its system of persistence. + + When using :class:`.InstanceEvents`, several modifiers are + available to the :func:`.event.listen` function. + + :param propagate=False: When True, the event listener should + be applied to all inheriting classes as well as the + class which is the target of this listener. + :param raw=False: When True, the "target" argument passed + to applicable event listener functions will be the + instance's :class:`.InstanceState` management + object, rather than the mapped instance itself. + :param restore_load_context=False: Applies to the + :meth:`.InstanceEvents.load` and :meth:`.InstanceEvents.refresh` + events. Restores the loader context of the object when the event + hook is complete, so that ongoing eager load operations continue + to target the object appropriately. A warning is emitted if the + object is moved to a new loader context from within one of these + events if this flag is not set. + + .. versionadded:: 1.3.14 + + + """ + + _target_class_doc = "SomeClass" + + _dispatch_target = ClassManager + + @classmethod + def _new_classmanager_instance( + cls, + class_: Union[DeclarativeAttributeIntercept, DeclarativeMeta, type], + classmanager: ClassManager[_O], + ) -> None: + _InstanceEventsHold.populate(class_, classmanager) + + @classmethod + @util.preload_module("sqlalchemy.orm") + def _accept_with( + cls, + target: Union[ + ClassManager[Any], + Type[ClassManager[Any]], + ], + identifier: str, + ) -> Optional[Union[ClassManager[Any], Type[ClassManager[Any]]]]: + orm = util.preloaded.orm + + if isinstance(target, ClassManager): + return target + elif isinstance(target, mapperlib.Mapper): + return target.class_manager + elif target is orm.mapper: # type: ignore [attr-defined] + util.warn_deprecated( + "The `sqlalchemy.orm.mapper()` symbol is deprecated and " + "will be removed in a future release. For the mapper-wide " + "event target, use the 'sqlalchemy.orm.Mapper' class.", + "2.0", + ) + return ClassManager + elif isinstance(target, type): + if issubclass(target, mapperlib.Mapper): + return ClassManager + else: + manager = instrumentation.opt_manager_of_class(target) + if manager: + return manager + else: + return _InstanceEventsHold(target) # type: ignore [return-value] # noqa: E501 + return None + + @classmethod + def _listen( + cls, + event_key: _EventKey[ClassManager[Any]], + raw: bool = False, + propagate: bool = False, + restore_load_context: bool = False, + **kw: Any, + ) -> None: + target, fn = (event_key.dispatch_target, event_key._listen_fn) + + if not raw or restore_load_context: + + def wrap( + state: InstanceState[_O], *arg: Any, **kw: Any + ) -> Optional[Any]: + if not raw: + target: Any = state.obj() + else: + target = state + if restore_load_context: + runid = state.runid + try: + return fn(target, *arg, **kw) + finally: + if restore_load_context: + state.runid = runid + + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(propagate=propagate, **kw) + + if propagate: + for mgr in target.subclass_managers(True): + event_key.with_dispatch_target(mgr).base_listen(propagate=True) + + @classmethod + def _clear(cls) -> None: + super()._clear() + _InstanceEventsHold._clear() + + def first_init(self, manager: ClassManager[_O], cls: Type[_O]) -> None: + """Called when the first instance of a particular mapping is called. + + This event is called when the ``__init__`` method of a class + is called the first time for that particular class. The event + invokes before ``__init__`` actually proceeds as well as before + the :meth:`.InstanceEvents.init` event is invoked. + + """ + + def init(self, target: _O, args: Any, kwargs: Any) -> None: + """Receive an instance when its constructor is called. + + This method is only called during a userland construction of + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is + loaded from the database; see the :meth:`.InstanceEvents.load` + event in order to intercept a database load. + + The event is called before the actual ``__init__`` constructor + of the object is called. The ``kwargs`` dictionary may be + modified in-place in order to affect what is passed to + ``__init__``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments passed to the ``__init__`` method. + This is passed as a tuple and is currently immutable. + :param kwargs: keyword arguments passed to the ``__init__`` method. + This structure *can* be altered in place. + + .. seealso:: + + :meth:`.InstanceEvents.init_failure` + + :meth:`.InstanceEvents.load` + + """ + + def init_failure(self, target: _O, args: Any, kwargs: Any) -> None: + """Receive an instance when its constructor has been called, + and raised an exception. + + This method is only called during a userland construction of + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is loaded + from the database. + + The event is invoked after an exception raised by the ``__init__`` + method is caught. After the event + is invoked, the original exception is re-raised outwards, so that + the construction of the object still raises an exception. The + actual exception and stack trace raised should be present in + ``sys.exc_info()``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments that were passed to the ``__init__`` + method. + :param kwargs: keyword arguments that were passed to the ``__init__`` + method. + + .. seealso:: + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.load` + + """ + + def _sa_event_merge_wo_load( + self, target: _O, context: QueryContext + ) -> None: + """receive an object instance after it was the subject of a merge() + call, when load=False was passed. + + The target would be the already-loaded object in the Session which + would have had its attributes overwritten by the incoming object. This + overwrite operation does not use attribute events, instead just + populating dict directly. Therefore the purpose of this event is so + that extensions like sqlalchemy.ext.mutable know that object state has + changed and incoming state needs to be set up for "parents" etc. + + This functionality is acceptable to be made public in a later release. + + .. versionadded:: 1.4.41 + + """ + + def load(self, target: _O, context: QueryContext) -> None: + """Receive an object instance after it has been created via + ``__new__``, and after initial attribute population has + occurred. + + This typically occurs when the instance is created based on + incoming result rows, and is only called once for that + instance's lifetime. + + .. warning:: + + During a result-row load, this event is invoked when the + first row received for this instance is processed. When using + eager loading with collection-oriented attributes, the additional + rows that are to be loaded / processed in order to load subsequent + collection items have not occurred yet. This has the effect + both that collections will not be fully loaded, as well as that + if an operation occurs within this event handler that emits + another database load operation for the object, the "loading + context" for the object can change and interfere with the + existing eager loaders still in progress. + + Examples of what can cause the "loading context" to change within + the event handler include, but are not necessarily limited to: + + * accessing deferred attributes that weren't part of the row, + will trigger an "undefer" operation and refresh the object + + * accessing attributes on a joined-inheritance subclass that + weren't part of the row, will trigger a refresh operation. + + As of SQLAlchemy 1.3.14, a warning is emitted when this occurs. The + :paramref:`.InstanceEvents.restore_load_context` option may be + used on the event to prevent this warning; this will ensure that + the existing loading context is maintained for the object after the + event is called:: + + @event.listens_for(SomeClass, "load", restore_load_context=True) + def on_load(instance, context): + instance.some_unloaded_attribute + + .. versionchanged:: 1.3.14 Added + :paramref:`.InstanceEvents.restore_load_context` + and :paramref:`.SessionEvents.restore_load_context` flags which + apply to "on load" events, which will ensure that the loading + context for an object is restored when the event hook is + complete; a warning is emitted if the load context of the object + changes without this flag being set. + + + The :meth:`.InstanceEvents.load` event is also available in a + class-method decorator format called :func:`_orm.reconstructor`. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param context: the :class:`.QueryContext` corresponding to the + current :class:`_query.Query` in progress. This argument may be + ``None`` if the load does not correspond to a :class:`_query.Query`, + such as during :meth:`.Session.merge`. + + .. seealso:: + + :ref:`mapped_class_load_events` + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.refresh` + + :meth:`.SessionEvents.loaded_as_persistent` + + """ # noqa: E501 + + def refresh( + self, target: _O, context: QueryContext, attrs: Optional[Iterable[str]] + ) -> None: + """Receive an object instance after one or more attributes have + been refreshed from a query. + + Contrast this to the :meth:`.InstanceEvents.load` method, which + is invoked when the object is first loaded from a query. + + .. note:: This event is invoked within the loader process before + eager loaders may have been completed, and the object's state may + not be complete. Additionally, invoking row-level refresh + operations on the object will place the object into a new loader + context, interfering with the existing load context. See the note + on :meth:`.InstanceEvents.load` for background on making use of the + :paramref:`.InstanceEvents.restore_load_context` parameter, in + order to resolve this scenario. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param context: the :class:`.QueryContext` corresponding to the + current :class:`_query.Query` in progress. + :param attrs: sequence of attribute names which + were populated, or None if all column-mapped, non-deferred + attributes were populated. + + .. seealso:: + + :ref:`mapped_class_load_events` + + :meth:`.InstanceEvents.load` + + """ + + def refresh_flush( + self, + target: _O, + flush_context: UOWTransaction, + attrs: Optional[Iterable[str]], + ) -> None: + """Receive an object instance after one or more attributes that + contain a column-level default or onupdate handler have been refreshed + during persistence of the object's state. + + This event is the same as :meth:`.InstanceEvents.refresh` except + it is invoked within the unit of work flush process, and includes + only non-primary-key columns that have column level default or + onupdate handlers, including Python callables as well as server side + defaults and triggers which may be fetched via the RETURNING clause. + + .. note:: + + While the :meth:`.InstanceEvents.refresh_flush` event is triggered + for an object that was INSERTed as well as for an object that was + UPDATEd, the event is geared primarily towards the UPDATE process; + it is mostly an internal artifact that INSERT actions can also + trigger this event, and note that **primary key columns for an + INSERTed row are explicitly omitted** from this event. In order to + intercept the newly INSERTed state of an object, the + :meth:`.SessionEvents.pending_to_persistent` and + :meth:`.MapperEvents.after_insert` are better choices. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + :param attrs: sequence of attribute names which + were populated. + + .. seealso:: + + :ref:`mapped_class_load_events` + + :ref:`orm_server_defaults` + + :ref:`metadata_defaults_toplevel` + + """ + + def expire(self, target: _O, attrs: Optional[Iterable[str]]) -> None: + """Receive an object instance after its attributes or some subset + have been expired. + + 'keys' is a list of attribute names. If None, the entire + state was expired. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param attrs: sequence of attribute + names which were expired, or None if all attributes were + expired. + + """ + + def pickle(self, target: _O, state_dict: _InstanceDict) -> None: + """Receive an object instance when its associated state is + being pickled. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param state_dict: the dictionary returned by + :class:`.InstanceState.__getstate__`, containing the state + to be pickled. + + """ + + def unpickle(self, target: _O, state_dict: _InstanceDict) -> None: + """Receive an object instance after its associated state has + been unpickled. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param state_dict: the dictionary sent to + :class:`.InstanceState.__setstate__`, containing the state + dictionary which was pickled. + + """ + + +class _EventsHold(event.RefCollection[_ET]): + """Hold onto listeners against unmapped, uninstrumented classes. + + Establish _listen() for that class' mapper/instrumentation when + those objects are created for that class. + + """ + + all_holds: weakref.WeakKeyDictionary[Any, Any] + + def __init__( + self, + class_: Union[DeclarativeAttributeIntercept, DeclarativeMeta, type], + ) -> None: + self.class_ = class_ + + @classmethod + def _clear(cls) -> None: + cls.all_holds.clear() + + class HoldEvents(Generic[_ET2]): + _dispatch_target: Optional[Type[_ET2]] = None + + @classmethod + def _listen( + cls, + event_key: _EventKey[_ET2], + raw: bool = False, + propagate: bool = False, + retval: bool = False, + **kw: Any, + ) -> None: + target = event_key.dispatch_target + + if target.class_ in target.all_holds: + collection = target.all_holds[target.class_] + else: + collection = target.all_holds[target.class_] = {} + + event.registry._stored_in_collection(event_key, target) + collection[event_key._key] = ( + event_key, + raw, + propagate, + retval, + kw, + ) + + if propagate: + stack = list(target.class_.__subclasses__()) + while stack: + subclass = stack.pop(0) + stack.extend(subclass.__subclasses__()) + subject = target.resolve(subclass) + if subject is not None: + # we are already going through __subclasses__() + # so leave generic propagate flag False + event_key.with_dispatch_target(subject).listen( + raw=raw, propagate=False, retval=retval, **kw + ) + + def remove(self, event_key: _EventKey[_ET]) -> None: + target = event_key.dispatch_target + + if isinstance(target, _EventsHold): + collection = target.all_holds[target.class_] + del collection[event_key._key] + + @classmethod + def populate( + cls, + class_: Union[DeclarativeAttributeIntercept, DeclarativeMeta, type], + subject: Union[ClassManager[_O], Mapper[_O]], + ) -> None: + for subclass in class_.__mro__: + if subclass in cls.all_holds: + collection = cls.all_holds[subclass] + for ( + event_key, + raw, + propagate, + retval, + kw, + ) in collection.values(): + if propagate or subclass is class_: + # since we can't be sure in what order different + # classes in a hierarchy are triggered with + # populate(), we rely upon _EventsHold for all event + # assignment, instead of using the generic propagate + # flag. + event_key.with_dispatch_target(subject).listen( + raw=raw, propagate=False, retval=retval, **kw + ) + + +class _InstanceEventsHold(_EventsHold[_ET]): + all_holds: weakref.WeakKeyDictionary[Any, Any] = ( + weakref.WeakKeyDictionary() + ) + + def resolve(self, class_: Type[_O]) -> Optional[ClassManager[_O]]: + return instrumentation.opt_manager_of_class(class_) + + class HoldInstanceEvents(_EventsHold.HoldEvents[_ET], InstanceEvents): # type: ignore [misc] # noqa: E501 + pass + + dispatch = event.dispatcher(HoldInstanceEvents) + + +class MapperEvents(event.Events[mapperlib.Mapper[Any]]): + """Define events specific to mappings. + + e.g.:: + + from sqlalchemy import event + + + def my_before_insert_listener(mapper, connection, target): + # execute a stored procedure upon INSERT, + # apply the value to the row to be inserted + target.calculated_value = connection.execute( + text("select my_special_function(%d)" % target.special_number) + ).scalar() + + + # associate the listener function with SomeClass, + # to execute during the "before_insert" hook + event.listen(SomeClass, "before_insert", my_before_insert_listener) + + Available targets include: + + * mapped classes + * unmapped superclasses of mapped or to-be-mapped classes + (using the ``propagate=True`` flag) + * :class:`_orm.Mapper` objects + * the :class:`_orm.Mapper` class itself indicates listening for all + mappers. + + Mapper events provide hooks into critical sections of the + mapper, including those related to object instrumentation, + object loading, and object persistence. In particular, the + persistence methods :meth:`~.MapperEvents.before_insert`, + and :meth:`~.MapperEvents.before_update` are popular + places to augment the state being persisted - however, these + methods operate with several significant restrictions. The + user is encouraged to evaluate the + :meth:`.SessionEvents.before_flush` and + :meth:`.SessionEvents.after_flush` methods as more + flexible and user-friendly hooks in which to apply + additional database state during a flush. + + When using :class:`.MapperEvents`, several modifiers are + available to the :func:`.event.listen` function. + + :param propagate=False: When True, the event listener should + be applied to all inheriting mappers and/or the mappers of + inheriting classes, as well as any + mapper which is the target of this listener. + :param raw=False: When True, the "target" argument passed + to applicable event listener functions will be the + instance's :class:`.InstanceState` management + object, rather than the mapped instance itself. + :param retval=False: when True, the user-defined event function + must have a return value, the purpose of which is either to + control subsequent event propagation, or to otherwise alter + the operation in progress by the mapper. Possible return + values are: + + * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event + processing normally. + * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent + event handlers in the chain. + * other values - the return value specified by specific listeners. + + """ + + _target_class_doc = "SomeClass" + _dispatch_target = mapperlib.Mapper + + @classmethod + def _new_mapper_instance( + cls, + class_: Union[DeclarativeAttributeIntercept, DeclarativeMeta, type], + mapper: Mapper[_O], + ) -> None: + _MapperEventsHold.populate(class_, mapper) + + @classmethod + @util.preload_module("sqlalchemy.orm") + def _accept_with( + cls, + target: Union[mapperlib.Mapper[Any], Type[mapperlib.Mapper[Any]]], + identifier: str, + ) -> Optional[Union[mapperlib.Mapper[Any], Type[mapperlib.Mapper[Any]]]]: + orm = util.preloaded.orm + + if target is orm.mapper: # type: ignore [attr-defined] + util.warn_deprecated( + "The `sqlalchemy.orm.mapper()` symbol is deprecated and " + "will be removed in a future release. For the mapper-wide " + "event target, use the 'sqlalchemy.orm.Mapper' class.", + "2.0", + ) + return mapperlib.Mapper + elif isinstance(target, type): + if issubclass(target, mapperlib.Mapper): + return target + else: + mapper = _mapper_or_none(target) + if mapper is not None: + return mapper + else: + return _MapperEventsHold(target) + else: + return target + + @classmethod + def _listen( + cls, + event_key: _EventKey[_ET], + raw: bool = False, + retval: bool = False, + propagate: bool = False, + **kw: Any, + ) -> None: + target, identifier, fn = ( + event_key.dispatch_target, + event_key.identifier, + event_key._listen_fn, + ) + + if ( + identifier in ("before_configured", "after_configured") + and target is not mapperlib.Mapper + ): + util.warn( + "'before_configured' and 'after_configured' ORM events " + "only invoke with the Mapper class " + "as the target." + ) + + if not raw or not retval: + if not raw: + meth = getattr(cls, identifier) + try: + target_index = ( + inspect_getfullargspec(meth)[0].index("target") - 1 + ) + except ValueError: + target_index = None + + def wrap(*arg: Any, **kw: Any) -> Any: + if not raw and target_index is not None: + arg = list(arg) # type: ignore [assignment] + arg[target_index] = arg[target_index].obj() # type: ignore [index] # noqa: E501 + if not retval: + fn(*arg, **kw) + return interfaces.EXT_CONTINUE + else: + return fn(*arg, **kw) + + event_key = event_key.with_wrapper(wrap) + + if propagate: + for mapper in target.self_and_descendants: + event_key.with_dispatch_target(mapper).base_listen( + propagate=True, **kw + ) + else: + event_key.base_listen(**kw) + + @classmethod + def _clear(cls) -> None: + super()._clear() + _MapperEventsHold._clear() + + def instrument_class(self, mapper: Mapper[_O], class_: Type[_O]) -> None: + r"""Receive a class when the mapper is first constructed, + before instrumentation is applied to the mapped class. + + This event is the earliest phase of mapper construction. + Most attributes of the mapper are not yet initialized. To + receive an event within initial mapper construction where basic + state is available such as the :attr:`_orm.Mapper.attrs` collection, + the :meth:`_orm.MapperEvents.after_mapper_constructed` event may + be a better choice. + + This listener can either be applied to the :class:`_orm.Mapper` + class overall, or to any un-mapped class which serves as a base + for classes that will be mapped (using the ``propagate=True`` flag):: + + Base = declarative_base() + + + @event.listens_for(Base, "instrument_class", propagate=True) + def on_new_class(mapper, cls_): + "..." + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param class\_: the mapped class. + + .. seealso:: + + :meth:`_orm.MapperEvents.after_mapper_constructed` + + """ + + def after_mapper_constructed( + self, mapper: Mapper[_O], class_: Type[_O] + ) -> None: + """Receive a class and mapper when the :class:`_orm.Mapper` has been + fully constructed. + + This event is called after the initial constructor for + :class:`_orm.Mapper` completes. This occurs after the + :meth:`_orm.MapperEvents.instrument_class` event and after the + :class:`_orm.Mapper` has done an initial pass of its arguments + to generate its collection of :class:`_orm.MapperProperty` objects, + which are accessible via the :meth:`_orm.Mapper.get_property` + method and the :attr:`_orm.Mapper.iterate_properties` attribute. + + This event differs from the + :meth:`_orm.MapperEvents.before_mapper_configured` event in that it + is invoked within the constructor for :class:`_orm.Mapper`, rather + than within the :meth:`_orm.registry.configure` process. Currently, + this event is the only one which is appropriate for handlers that + wish to create additional mapped classes in response to the + construction of this :class:`_orm.Mapper`, which will be part of the + same configure step when :meth:`_orm.registry.configure` next runs. + + .. versionadded:: 2.0.2 + + .. seealso:: + + :ref:`examples_versioning` - an example which illustrates the use + of the :meth:`_orm.MapperEvents.before_mapper_configured` + event to create new mappers to record change-audit histories on + objects. + + """ + + def before_mapper_configured( + self, mapper: Mapper[_O], class_: Type[_O] + ) -> None: + """Called right before a specific mapper is to be configured. + + This event is intended to allow a specific mapper to be skipped during + the configure step, by returning the :attr:`.orm.interfaces.EXT_SKIP` + symbol which indicates to the :func:`.configure_mappers` call that this + particular mapper (or hierarchy of mappers, if ``propagate=True`` is + used) should be skipped in the current configuration run. When one or + more mappers are skipped, the "new mappers" flag will remain set, + meaning the :func:`.configure_mappers` function will continue to be + called when mappers are used, to continue to try to configure all + available mappers. + + In comparison to the other configure-level events, + :meth:`.MapperEvents.before_configured`, + :meth:`.MapperEvents.after_configured`, and + :meth:`.MapperEvents.mapper_configured`, the + :meth:`.MapperEvents.before_mapper_configured` event provides for a + meaningful return value when it is registered with the ``retval=True`` + parameter. + + .. versionadded:: 1.3 + + e.g.:: + + from sqlalchemy.orm import EXT_SKIP + + Base = declarative_base() + + DontConfigureBase = declarative_base() + + + @event.listens_for( + DontConfigureBase, + "before_mapper_configured", + retval=True, + propagate=True, + ) + def dont_configure(mapper, cls): + return EXT_SKIP + + .. seealso:: + + :meth:`.MapperEvents.before_configured` + + :meth:`.MapperEvents.after_configured` + + :meth:`.MapperEvents.mapper_configured` + + """ + + def mapper_configured(self, mapper: Mapper[_O], class_: Type[_O]) -> None: + r"""Called when a specific mapper has completed its own configuration + within the scope of the :func:`.configure_mappers` call. + + The :meth:`.MapperEvents.mapper_configured` event is invoked + for each mapper that is encountered when the + :func:`_orm.configure_mappers` function proceeds through the current + list of not-yet-configured mappers. + :func:`_orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + When the event is called, the mapper should be in its final + state, but **not including backrefs** that may be invoked from + other mappers; they might still be pending within the + configuration operation. Bidirectional relationships that + are instead configured via the + :paramref:`.orm.relationship.back_populates` argument + *will* be fully available, since this style of relationship does not + rely upon other possibly-not-configured mappers to know that they + exist. + + For an event that is guaranteed to have **all** mappers ready + to go including backrefs that are defined only on other + mappings, use the :meth:`.MapperEvents.after_configured` + event; this event invokes only after all known mappings have been + fully configured. + + The :meth:`.MapperEvents.mapper_configured` event, unlike + :meth:`.MapperEvents.before_configured` or + :meth:`.MapperEvents.after_configured`, + is called for each mapper/class individually, and the mapper is + passed to the event itself. It also is called exactly once for + a particular mapper. The event is therefore useful for + configurational steps that benefit from being invoked just once + on a specific mapper basis, which don't require that "backref" + configurations are necessarily ready yet. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param class\_: the mapped class. + + .. seealso:: + + :meth:`.MapperEvents.before_configured` + + :meth:`.MapperEvents.after_configured` + + :meth:`.MapperEvents.before_mapper_configured` + + """ + # TODO: need coverage for this event + + def before_configured(self) -> None: + """Called before a series of mappers have been configured. + + The :meth:`.MapperEvents.before_configured` event is invoked + each time the :func:`_orm.configure_mappers` function is + invoked, before the function has done any of its work. + :func:`_orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + This event can **only** be applied to the :class:`_orm.Mapper` class, + and not to individual mappings or mapped classes. It is only invoked + for all mappings as a whole:: + + from sqlalchemy.orm import Mapper + + + @event.listens_for(Mapper, "before_configured") + def go(): ... + + Contrast this event to :meth:`.MapperEvents.after_configured`, + which is invoked after the series of mappers has been configured, + as well as :meth:`.MapperEvents.before_mapper_configured` + and :meth:`.MapperEvents.mapper_configured`, which are both invoked + on a per-mapper basis. + + Theoretically this event is called once per + application, but is actually called any time new mappers + are to be affected by a :func:`_orm.configure_mappers` + call. If new mappings are constructed after existing ones have + already been used, this event will likely be called again. To ensure + that a particular event is only called once and no further, the + ``once=True`` argument (new in 0.9.4) can be applied:: + + from sqlalchemy.orm import mapper + + + @event.listens_for(mapper, "before_configured", once=True) + def go(): ... + + .. seealso:: + + :meth:`.MapperEvents.before_mapper_configured` + + :meth:`.MapperEvents.mapper_configured` + + :meth:`.MapperEvents.after_configured` + + """ + + def after_configured(self) -> None: + """Called after a series of mappers have been configured. + + The :meth:`.MapperEvents.after_configured` event is invoked + each time the :func:`_orm.configure_mappers` function is + invoked, after the function has completed its work. + :func:`_orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + Contrast this event to the :meth:`.MapperEvents.mapper_configured` + event, which is called on a per-mapper basis while the configuration + operation proceeds; unlike that event, when this event is invoked, + all cross-configurations (e.g. backrefs) will also have been made + available for any mappers that were pending. + Also contrast to :meth:`.MapperEvents.before_configured`, + which is invoked before the series of mappers has been configured. + + This event can **only** be applied to the :class:`_orm.Mapper` class, + and not to individual mappings or + mapped classes. It is only invoked for all mappings as a whole:: + + from sqlalchemy.orm import Mapper + + + @event.listens_for(Mapper, "after_configured") + def go(): ... + + Theoretically this event is called once per + application, but is actually called any time new mappers + have been affected by a :func:`_orm.configure_mappers` + call. If new mappings are constructed after existing ones have + already been used, this event will likely be called again. To ensure + that a particular event is only called once and no further, the + ``once=True`` argument (new in 0.9.4) can be applied:: + + from sqlalchemy.orm import mapper + + + @event.listens_for(mapper, "after_configured", once=True) + def go(): ... + + .. seealso:: + + :meth:`.MapperEvents.before_mapper_configured` + + :meth:`.MapperEvents.mapper_configured` + + :meth:`.MapperEvents.before_configured` + + """ + + def before_insert( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance before an INSERT statement + is emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to modify local, non-object related + attributes on the instance before an INSERT occurs, as well + as to emit additional SQL statements on the given + connection. + + The event is often called for a batch of objects of the + same class before their INSERT statements are emitted at + once in a later step. In the extremely rare case that + this is not desirable, the :class:`_orm.Mapper` object can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit INSERT statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_insert( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance after an INSERT statement + is emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to modify in-Python-only + state on the instance after an INSERT occurs, as well + as to emit additional SQL statements on the given + connection. + + The event is often called for a batch of objects of the + same class after their INSERT statements have been + emitted at once in a previous step. In the extremely + rare case that this is not desirable, the + :class:`_orm.Mapper` object can be configured with ``batch=False``, + which will cause batches of instances to be broken up + into individual (and more poorly performing) + event->persist->event steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit INSERT statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def before_update( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance before an UPDATE statement + is emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to modify local, non-object related + attributes on the instance before an UPDATE occurs, as well + as to emit additional SQL statements on the given + connection. + + This method is called for all instances that are + marked as "dirty", *even those which have no net changes + to their column-based attributes*. An object is marked + as dirty when any of its column-based attributes have a + "set attribute" operation called or when any of its + collections are modified. If, at update time, no + column-based attributes have any net changes, no UPDATE + statement will be issued. This means that an instance + being sent to :meth:`~.MapperEvents.before_update` is + *not* a guarantee that an UPDATE statement will be + issued, although you can affect the outcome here by + modifying attributes so that a net change in value does + exist. + + To detect if the column-based attributes on the object have net + changes, and will therefore generate an UPDATE statement, use + ``object_session(instance).is_modified(instance, + include_collections=False)``. + + The event is often called for a batch of objects of the + same class before their UPDATE statements are emitted at + once in a later step. In the extremely rare case that + this is not desirable, the :class:`_orm.Mapper` can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit UPDATE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_update( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance after an UPDATE statement + is emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to modify in-Python-only + state on the instance after an UPDATE occurs, as well + as to emit additional SQL statements on the given + connection. + + This method is called for all instances that are + marked as "dirty", *even those which have no net changes + to their column-based attributes*, and for which + no UPDATE statement has proceeded. An object is marked + as dirty when any of its column-based attributes have a + "set attribute" operation called or when any of its + collections are modified. If, at update time, no + column-based attributes have any net changes, no UPDATE + statement will be issued. This means that an instance + being sent to :meth:`~.MapperEvents.after_update` is + *not* a guarantee that an UPDATE statement has been + issued. + + To detect if the column-based attributes on the object have net + changes, and therefore resulted in an UPDATE statement, use + ``object_session(instance).is_modified(instance, + include_collections=False)``. + + The event is often called for a batch of objects of the + same class after their UPDATE statements have been emitted at + once in a previous step. In the extremely rare case that + this is not desirable, the :class:`_orm.Mapper` can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit UPDATE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def before_delete( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance before a DELETE statement + is emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to emit additional SQL statements on + the given connection as well as to perform application + specific bookkeeping related to a deletion event. + + The event is often called for a batch of objects of the + same class before their DELETE statements are emitted at + once in a later step. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit DELETE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being deleted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_delete( + self, mapper: Mapper[_O], connection: Connection, target: _O + ) -> None: + """Receive an object instance after a DELETE statement + has been emitted corresponding to that instance. + + .. note:: this event **only** applies to the + :ref:`session flush operation ` + and does **not** apply to the ORM DML operations described at + :ref:`orm_expression_update_delete`. To intercept ORM + DML events, use :meth:`_orm.SessionEvents.do_orm_execute`. + + This event is used to emit additional SQL statements on + the given connection as well as to perform application + specific bookkeeping related to a deletion event. + + The event is often called for a batch of objects of the + same class after their DELETE statements have been emitted at + once in a previous step. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`_engine.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`_orm.Mapper` which is the target + of this event. + :param connection: the :class:`_engine.Connection` being used to + emit DELETE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being deleted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + +class _MapperEventsHold(_EventsHold[_ET]): + all_holds = weakref.WeakKeyDictionary() + + def resolve( + self, class_: Union[Type[_T], _InternalEntityType[_T]] + ) -> Optional[Mapper[_T]]: + return _mapper_or_none(class_) + + class HoldMapperEvents(_EventsHold.HoldEvents[_ET], MapperEvents): # type: ignore [misc] # noqa: E501 + pass + + dispatch = event.dispatcher(HoldMapperEvents) + + +_sessionevents_lifecycle_event_names: Set[str] = set() + + +class SessionEvents(event.Events[Session]): + """Define events specific to :class:`.Session` lifecycle. + + e.g.:: + + from sqlalchemy import event + from sqlalchemy.orm import sessionmaker + + + def my_before_commit(session): + print("before commit!") + + + Session = sessionmaker() + + event.listen(Session, "before_commit", my_before_commit) + + The :func:`~.event.listen` function will accept + :class:`.Session` objects as well as the return result + of :class:`~.sessionmaker()` and :class:`~.scoped_session()`. + + Additionally, it accepts the :class:`.Session` class which + will apply listeners to all :class:`.Session` instances + globally. + + :param raw=False: When True, the "target" argument passed + to applicable event listener functions that work on individual + objects will be the instance's :class:`.InstanceState` management + object, rather than the mapped instance itself. + + .. versionadded:: 1.3.14 + + :param restore_load_context=False: Applies to the + :meth:`.SessionEvents.loaded_as_persistent` event. Restores the loader + context of the object when the event hook is complete, so that ongoing + eager load operations continue to target the object appropriately. A + warning is emitted if the object is moved to a new loader context from + within this event if this flag is not set. + + .. versionadded:: 1.3.14 + + """ + + _target_class_doc = "SomeSessionClassOrObject" + + _dispatch_target = Session + + def _lifecycle_event( # type: ignore [misc] + fn: Callable[[SessionEvents, Session, Any], None] + ) -> Callable[[SessionEvents, Session, Any], None]: + _sessionevents_lifecycle_event_names.add(fn.__name__) + return fn + + @classmethod + def _accept_with( # type: ignore [return] + cls, target: Any, identifier: str + ) -> Union[Session, type]: + if isinstance(target, scoped_session): + target = target.session_factory + if not isinstance(target, sessionmaker) and ( + not isinstance(target, type) or not issubclass(target, Session) + ): + raise exc.ArgumentError( + "Session event listen on a scoped_session " + "requires that its creation callable " + "is associated with the Session class." + ) + + if isinstance(target, sessionmaker): + return target.class_ + elif isinstance(target, type): + if issubclass(target, scoped_session): + return Session + elif issubclass(target, Session): + return target + elif isinstance(target, Session): + return target + elif hasattr(target, "_no_async_engine_events"): + target._no_async_engine_events() + else: + # allows alternate SessionEvents-like-classes to be consulted + return event.Events._accept_with(target, identifier) # type: ignore [return-value] # noqa: E501 + + @classmethod + def _listen( + cls, + event_key: Any, + *, + raw: bool = False, + restore_load_context: bool = False, + **kw: Any, + ) -> None: + is_instance_event = ( + event_key.identifier in _sessionevents_lifecycle_event_names + ) + + if is_instance_event: + if not raw or restore_load_context: + fn = event_key._listen_fn + + def wrap( + session: Session, + state: InstanceState[_O], + *arg: Any, + **kw: Any, + ) -> Optional[Any]: + if not raw: + target = state.obj() + if target is None: + # existing behavior is that if the object is + # garbage collected, no event is emitted + return None + else: + target = state # type: ignore [assignment] + if restore_load_context: + runid = state.runid + try: + return fn(session, target, *arg, **kw) + finally: + if restore_load_context: + state.runid = runid + + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(**kw) + + def do_orm_execute(self, orm_execute_state: ORMExecuteState) -> None: + """Intercept statement executions that occur on behalf of an + ORM :class:`.Session` object. + + This event is invoked for all top-level SQL statements invoked from the + :meth:`_orm.Session.execute` method, as well as related methods such as + :meth:`_orm.Session.scalars` and :meth:`_orm.Session.scalar`. As of + SQLAlchemy 1.4, all ORM queries that run through the + :meth:`_orm.Session.execute` method as well as related methods + :meth:`_orm.Session.scalars`, :meth:`_orm.Session.scalar` etc. + will participate in this event. + This event hook does **not** apply to the queries that are + emitted internally within the ORM flush process, i.e. the + process described at :ref:`session_flushing`. + + .. note:: The :meth:`_orm.SessionEvents.do_orm_execute` event hook + is triggered **for ORM statement executions only**, meaning those + invoked via the :meth:`_orm.Session.execute` and similar methods on + the :class:`_orm.Session` object. It does **not** trigger for + statements that are invoked by SQLAlchemy Core only, i.e. statements + invoked directly using :meth:`_engine.Connection.execute` or + otherwise originating from an :class:`_engine.Engine` object without + any :class:`_orm.Session` involved. To intercept **all** SQL + executions regardless of whether the Core or ORM APIs are in use, + see the event hooks at :class:`.ConnectionEvents`, such as + :meth:`.ConnectionEvents.before_execute` and + :meth:`.ConnectionEvents.before_cursor_execute`. + + Also, this event hook does **not** apply to queries that are + emitted internally within the ORM flush process, + i.e. the process described at :ref:`session_flushing`; to + intercept steps within the flush process, see the event + hooks described at :ref:`session_persistence_events` as + well as :ref:`session_persistence_mapper`. + + This event is a ``do_`` event, meaning it has the capability to replace + the operation that the :meth:`_orm.Session.execute` method normally + performs. The intended use for this includes sharding and + result-caching schemes which may seek to invoke the same statement + across multiple database connections, returning a result that is + merged from each of them, or which don't invoke the statement at all, + instead returning data from a cache. + + The hook intends to replace the use of the + ``Query._execute_and_instances`` method that could be subclassed prior + to SQLAlchemy 1.4. + + :param orm_execute_state: an instance of :class:`.ORMExecuteState` + which contains all information about the current execution, as well + as helper functions used to derive other commonly required + information. See that object for details. + + .. seealso:: + + :ref:`session_execute_events` - top level documentation on how + to use :meth:`_orm.SessionEvents.do_orm_execute` + + :class:`.ORMExecuteState` - the object passed to the + :meth:`_orm.SessionEvents.do_orm_execute` event which contains + all information about the statement to be invoked. It also + provides an interface to extend the current statement, options, + and parameters as well as an option that allows programmatic + invocation of the statement at any point. + + :ref:`examples_session_orm_events` - includes examples of using + :meth:`_orm.SessionEvents.do_orm_execute` + + :ref:`examples_caching` - an example of how to integrate + Dogpile caching with the ORM :class:`_orm.Session` making use + of the :meth:`_orm.SessionEvents.do_orm_execute` event hook. + + :ref:`examples_sharding` - the Horizontal Sharding example / + extension relies upon the + :meth:`_orm.SessionEvents.do_orm_execute` event hook to invoke a + SQL statement on multiple backends and return a merged result. + + + .. versionadded:: 1.4 + + """ + + def after_transaction_create( + self, session: Session, transaction: SessionTransaction + ) -> None: + """Execute when a new :class:`.SessionTransaction` is created. + + This event differs from :meth:`~.SessionEvents.after_begin` + in that it occurs for each :class:`.SessionTransaction` + overall, as opposed to when transactions are begun + on individual database connections. It is also invoked + for nested transactions and subtransactions, and is always + matched by a corresponding + :meth:`~.SessionEvents.after_transaction_end` event + (assuming normal operation of the :class:`.Session`). + + :param session: the target :class:`.Session`. + :param transaction: the target :class:`.SessionTransaction`. + + To detect if this is the outermost + :class:`.SessionTransaction`, as opposed to a "subtransaction" or a + SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute + is ``None``:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_create(session, transaction): + if transaction.parent is None: + ... # work with top-level transaction + + To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the + :attr:`.SessionTransaction.nested` attribute:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_create(session, transaction): + if transaction.nested: + ... # work with SAVEPOINT transaction + + .. seealso:: + + :class:`.SessionTransaction` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_transaction_end( + self, session: Session, transaction: SessionTransaction + ) -> None: + """Execute when the span of a :class:`.SessionTransaction` ends. + + This event differs from :meth:`~.SessionEvents.after_commit` + in that it corresponds to all :class:`.SessionTransaction` + objects in use, including those for nested transactions + and subtransactions, and is always matched by a corresponding + :meth:`~.SessionEvents.after_transaction_create` event. + + :param session: the target :class:`.Session`. + :param transaction: the target :class:`.SessionTransaction`. + + To detect if this is the outermost + :class:`.SessionTransaction`, as opposed to a "subtransaction" or a + SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute + is ``None``:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_end(session, transaction): + if transaction.parent is None: + ... # work with top-level transaction + + To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the + :attr:`.SessionTransaction.nested` attribute:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_end(session, transaction): + if transaction.nested: + ... # work with SAVEPOINT transaction + + .. seealso:: + + :class:`.SessionTransaction` + + :meth:`~.SessionEvents.after_transaction_create` + + """ + + def before_commit(self, session: Session) -> None: + """Execute before commit is called. + + .. note:: + + The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush, + that is, the :class:`.Session` can emit SQL to the database + many times within the scope of a transaction. + For interception of these events, use the + :meth:`~.SessionEvents.before_flush`, + :meth:`~.SessionEvents.after_flush`, or + :meth:`~.SessionEvents.after_flush_postexec` + events. + + :param session: The target :class:`.Session`. + + .. seealso:: + + :meth:`~.SessionEvents.after_commit` + + :meth:`~.SessionEvents.after_begin` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_commit(self, session: Session) -> None: + """Execute after a commit has occurred. + + .. note:: + + The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush, + that is, the :class:`.Session` can emit SQL to the database + many times within the scope of a transaction. + For interception of these events, use the + :meth:`~.SessionEvents.before_flush`, + :meth:`~.SessionEvents.after_flush`, or + :meth:`~.SessionEvents.after_flush_postexec` + events. + + .. note:: + + The :class:`.Session` is not in an active transaction + when the :meth:`~.SessionEvents.after_commit` event is invoked, + and therefore can not emit SQL. To emit SQL corresponding to + every transaction, use the :meth:`~.SessionEvents.before_commit` + event. + + :param session: The target :class:`.Session`. + + .. seealso:: + + :meth:`~.SessionEvents.before_commit` + + :meth:`~.SessionEvents.after_begin` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_rollback(self, session: Session) -> None: + """Execute after a real DBAPI rollback has occurred. + + Note that this event only fires when the *actual* rollback against + the database occurs - it does *not* fire each time the + :meth:`.Session.rollback` method is called, if the underlying + DBAPI transaction has already been rolled back. In many + cases, the :class:`.Session` will not be in + an "active" state during this event, as the current + transaction is not valid. To acquire a :class:`.Session` + which is active after the outermost rollback has proceeded, + use the :meth:`.SessionEvents.after_soft_rollback` event, checking the + :attr:`.Session.is_active` flag. + + :param session: The target :class:`.Session`. + + """ + + def after_soft_rollback( + self, session: Session, previous_transaction: SessionTransaction + ) -> None: + """Execute after any rollback has occurred, including "soft" + rollbacks that don't actually emit at the DBAPI level. + + This corresponds to both nested and outer rollbacks, i.e. + the innermost rollback that calls the DBAPI's + rollback() method, as well as the enclosing rollback + calls that only pop themselves from the transaction stack. + + The given :class:`.Session` can be used to invoke SQL and + :meth:`.Session.query` operations after an outermost rollback + by first checking the :attr:`.Session.is_active` flag:: + + @event.listens_for(Session, "after_soft_rollback") + def do_something(session, previous_transaction): + if session.is_active: + session.execute(text("select * from some_table")) + + :param session: The target :class:`.Session`. + :param previous_transaction: The :class:`.SessionTransaction` + transactional marker object which was just closed. The current + :class:`.SessionTransaction` for the given :class:`.Session` is + available via the :attr:`.Session.transaction` attribute. + + """ + + def before_flush( + self, + session: Session, + flush_context: UOWTransaction, + instances: Optional[Sequence[_O]], + ) -> None: + """Execute before flush process has started. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + :param instances: Usually ``None``, this is the collection of + objects which can be passed to the :meth:`.Session.flush` method + (note this usage is deprecated). + + .. seealso:: + + :meth:`~.SessionEvents.after_flush` + + :meth:`~.SessionEvents.after_flush_postexec` + + :ref:`session_persistence_events` + + """ + + def after_flush( + self, session: Session, flush_context: UOWTransaction + ) -> None: + """Execute after flush has completed, but before commit has been + called. + + Note that the session's state is still in pre-flush, i.e. 'new', + 'dirty', and 'deleted' lists still show pre-flush state as well + as the history settings on instance attributes. + + .. warning:: This event runs after the :class:`.Session` has emitted + SQL to modify the database, but **before** it has altered its + internal state to reflect those changes, including that newly + inserted objects are placed into the identity map. ORM operations + emitted within this event such as loads of related items + may produce new identity map entries that will immediately + be replaced, sometimes causing confusing results. SQLAlchemy will + emit a warning for this condition as of version 1.3.9. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + + .. seealso:: + + :meth:`~.SessionEvents.before_flush` + + :meth:`~.SessionEvents.after_flush_postexec` + + :ref:`session_persistence_events` + + """ + + def after_flush_postexec( + self, session: Session, flush_context: UOWTransaction + ) -> None: + """Execute after flush has completed, and after the post-exec + state occurs. + + This will be when the 'new', 'dirty', and 'deleted' lists are in + their final state. An actual commit() may or may not have + occurred, depending on whether or not the flush started its own + transaction or participated in a larger transaction. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + + + .. seealso:: + + :meth:`~.SessionEvents.before_flush` + + :meth:`~.SessionEvents.after_flush` + + :ref:`session_persistence_events` + + """ + + def after_begin( + self, + session: Session, + transaction: SessionTransaction, + connection: Connection, + ) -> None: + """Execute after a transaction is begun on a connection. + + .. note:: This event is called within the process of the + :class:`_orm.Session` modifying its own internal state. + To invoke SQL operations within this hook, use the + :class:`_engine.Connection` provided to the event; + do not run SQL operations using the :class:`_orm.Session` + directly. + + :param session: The target :class:`.Session`. + :param transaction: The :class:`.SessionTransaction`. + :param connection: The :class:`_engine.Connection` object + which will be used for SQL statements. + + .. seealso:: + + :meth:`~.SessionEvents.before_commit` + + :meth:`~.SessionEvents.after_commit` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + @_lifecycle_event + def before_attach(self, session: Session, instance: _O) -> None: + """Execute before an instance is attached to a session. + + This is called before an add, delete or merge causes + the object to be part of the session. + + .. seealso:: + + :meth:`~.SessionEvents.after_attach` + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def after_attach(self, session: Session, instance: _O) -> None: + """Execute after an instance is attached to a session. + + This is called after an add, delete or merge. + + .. note:: + + As of 0.8, this event fires off *after* the item + has been fully associated with the session, which is + different than previous releases. For event + handlers that require the object not yet + be part of session state (such as handlers which + may autoflush while the target object is not + yet complete) consider the + new :meth:`.before_attach` event. + + .. seealso:: + + :meth:`~.SessionEvents.before_attach` + + :ref:`session_lifecycle_events` + + """ + + @event._legacy_signature( + "0.9", + ["session", "query", "query_context", "result"], + lambda update_context: ( + update_context.session, + update_context.query, + None, + update_context.result, + ), + ) + def after_bulk_update(self, update_context: _O) -> None: + """Event for after the legacy :meth:`_orm.Query.update` method + has been called. + + .. legacy:: The :meth:`_orm.SessionEvents.after_bulk_update` method + is a legacy event hook as of SQLAlchemy 2.0. The event + **does not participate** in :term:`2.0 style` invocations + using :func:`_dml.update` documented at + :ref:`orm_queryguide_update_delete_where`. For 2.0 style use, + the :meth:`_orm.SessionEvents.do_orm_execute` hook will intercept + these calls. + + :param update_context: an "update context" object which contains + details about the update, including these attributes: + + * ``session`` - the :class:`.Session` involved + * ``query`` -the :class:`_query.Query` + object that this update operation + was called upon. + * ``values`` The "values" dictionary that was passed to + :meth:`_query.Query.update`. + * ``result`` the :class:`_engine.CursorResult` + returned as a result of the + bulk UPDATE operation. + + .. versionchanged:: 1.4 the update_context no longer has a + ``QueryContext`` object associated with it. + + .. seealso:: + + :meth:`.QueryEvents.before_compile_update` + + :meth:`.SessionEvents.after_bulk_delete` + + """ + + @event._legacy_signature( + "0.9", + ["session", "query", "query_context", "result"], + lambda delete_context: ( + delete_context.session, + delete_context.query, + None, + delete_context.result, + ), + ) + def after_bulk_delete(self, delete_context: _O) -> None: + """Event for after the legacy :meth:`_orm.Query.delete` method + has been called. + + .. legacy:: The :meth:`_orm.SessionEvents.after_bulk_delete` method + is a legacy event hook as of SQLAlchemy 2.0. The event + **does not participate** in :term:`2.0 style` invocations + using :func:`_dml.delete` documented at + :ref:`orm_queryguide_update_delete_where`. For 2.0 style use, + the :meth:`_orm.SessionEvents.do_orm_execute` hook will intercept + these calls. + + :param delete_context: a "delete context" object which contains + details about the update, including these attributes: + + * ``session`` - the :class:`.Session` involved + * ``query`` -the :class:`_query.Query` + object that this update operation + was called upon. + * ``result`` the :class:`_engine.CursorResult` + returned as a result of the + bulk DELETE operation. + + .. versionchanged:: 1.4 the update_context no longer has a + ``QueryContext`` object associated with it. + + .. seealso:: + + :meth:`.QueryEvents.before_compile_delete` + + :meth:`.SessionEvents.after_bulk_update` + + """ + + @_lifecycle_event + def transient_to_pending(self, session: Session, instance: _O) -> None: + """Intercept the "transient to pending" transition for a specific + object. + + This event is a specialization of the + :meth:`.SessionEvents.after_attach` event which is only invoked + for this specific transition. It is invoked typically during the + :meth:`.Session.add` call. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def pending_to_transient(self, session: Session, instance: _O) -> None: + """Intercept the "pending to transient" transition for a specific + object. + + This less common transition occurs when an pending object that has + not been flushed is evicted from the session; this can occur + when the :meth:`.Session.rollback` method rolls back the transaction, + or when the :meth:`.Session.expunge` method is used. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def persistent_to_transient(self, session: Session, instance: _O) -> None: + """Intercept the "persistent to transient" transition for a specific + object. + + This less common transition occurs when an pending object that has + has been flushed is evicted from the session; this can occur + when the :meth:`.Session.rollback` method rolls back the transaction. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def pending_to_persistent(self, session: Session, instance: _O) -> None: + """Intercept the "pending to persistent"" transition for a specific + object. + + This event is invoked within the flush process, and is + similar to scanning the :attr:`.Session.new` collection within + the :meth:`.SessionEvents.after_flush` event. However, in this + case the object has already been moved to the persistent state + when the event is called. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def detached_to_persistent(self, session: Session, instance: _O) -> None: + """Intercept the "detached to persistent" transition for a specific + object. + + This event is a specialization of the + :meth:`.SessionEvents.after_attach` event which is only invoked + for this specific transition. It is invoked typically during the + :meth:`.Session.add` call, as well as during the + :meth:`.Session.delete` call if the object was not previously + associated with the + :class:`.Session` (note that an object marked as "deleted" remains + in the "persistent" state until the flush proceeds). + + .. note:: + + If the object becomes persistent as part of a call to + :meth:`.Session.delete`, the object is **not** yet marked as + deleted when this event is called. To detect deleted objects, + check the ``deleted`` flag sent to the + :meth:`.SessionEvents.persistent_to_detached` to event after the + flush proceeds, or check the :attr:`.Session.deleted` collection + within the :meth:`.SessionEvents.before_flush` event if deleted + objects need to be intercepted before the flush. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def loaded_as_persistent(self, session: Session, instance: _O) -> None: + """Intercept the "loaded as persistent" transition for a specific + object. + + This event is invoked within the ORM loading process, and is invoked + very similarly to the :meth:`.InstanceEvents.load` event. However, + the event here is linkable to a :class:`.Session` class or instance, + rather than to a mapper or class hierarchy, and integrates + with the other session lifecycle events smoothly. The object + is guaranteed to be present in the session's identity map when + this event is called. + + .. note:: This event is invoked within the loader process before + eager loaders may have been completed, and the object's state may + not be complete. Additionally, invoking row-level refresh + operations on the object will place the object into a new loader + context, interfering with the existing load context. See the note + on :meth:`.InstanceEvents.load` for background on making use of the + :paramref:`.SessionEvents.restore_load_context` parameter, which + works in the same manner as that of + :paramref:`.InstanceEvents.restore_load_context`, in order to + resolve this scenario. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def persistent_to_deleted(self, session: Session, instance: _O) -> None: + """Intercept the "persistent to deleted" transition for a specific + object. + + This event is invoked when a persistent object's identity + is deleted from the database within a flush, however the object + still remains associated with the :class:`.Session` until the + transaction completes. + + If the transaction is rolled back, the object moves again + to the persistent state, and the + :meth:`.SessionEvents.deleted_to_persistent` event is called. + If the transaction is committed, the object becomes detached, + which will emit the :meth:`.SessionEvents.deleted_to_detached` + event. + + Note that while the :meth:`.Session.delete` method is the primary + public interface to mark an object as deleted, many objects + get deleted due to cascade rules, which are not always determined + until flush time. Therefore, there's no way to catch + every object that will be deleted until the flush has proceeded. + the :meth:`.SessionEvents.persistent_to_deleted` event is therefore + invoked at the end of a flush. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def deleted_to_persistent(self, session: Session, instance: _O) -> None: + """Intercept the "deleted to persistent" transition for a specific + object. + + This transition occurs only when an object that's been deleted + successfully in a flush is restored due to a call to + :meth:`.Session.rollback`. The event is not called under + any other circumstances. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def deleted_to_detached(self, session: Session, instance: _O) -> None: + """Intercept the "deleted to detached" transition for a specific + object. + + This event is invoked when a deleted object is evicted + from the session. The typical case when this occurs is when + the transaction for a :class:`.Session` in which the object + was deleted is committed; the object moves from the deleted + state to the detached state. + + It is also invoked for objects that were deleted in a flush + when the :meth:`.Session.expunge_all` or :meth:`.Session.close` + events are called, as well as if the object is individually + expunged from its deleted state via :meth:`.Session.expunge`. + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + @_lifecycle_event + def persistent_to_detached(self, session: Session, instance: _O) -> None: + """Intercept the "persistent to detached" transition for a specific + object. + + This event is invoked when a persistent object is evicted + from the session. There are many conditions that cause this + to happen, including: + + * using a method such as :meth:`.Session.expunge` + or :meth:`.Session.close` + + * Calling the :meth:`.Session.rollback` method, when the object + was part of an INSERT statement for that session's transaction + + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + :param deleted: boolean. If True, indicates this object moved + to the detached state because it was marked as deleted and flushed. + + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + +class AttributeEvents(event.Events[QueryableAttribute[Any]]): + r"""Define events for object attributes. + + These are typically defined on the class-bound descriptor for the + target class. + + For example, to register a listener that will receive the + :meth:`_orm.AttributeEvents.append` event:: + + from sqlalchemy import event + + + @event.listens_for(MyClass.collection, "append", propagate=True) + def my_append_listener(target, value, initiator): + print("received append event for target: %s" % target) + + Listeners have the option to return a possibly modified version of the + value, when the :paramref:`.AttributeEvents.retval` flag is passed to + :func:`.event.listen` or :func:`.event.listens_for`, such as below, + illustrated using the :meth:`_orm.AttributeEvents.set` event:: + + def validate_phone(target, value, oldvalue, initiator): + "Strip non-numeric characters from a phone number" + + return re.sub(r"\D", "", value) + + + # setup listener on UserContact.phone attribute, instructing + # it to use the return value + listen(UserContact.phone, "set", validate_phone, retval=True) + + A validation function like the above can also raise an exception + such as :exc:`ValueError` to halt the operation. + + The :paramref:`.AttributeEvents.propagate` flag is also important when + applying listeners to mapped classes that also have mapped subclasses, + as when using mapper inheritance patterns:: + + + @event.listens_for(MySuperClass.attr, "set", propagate=True) + def receive_set(target, value, initiator): + print("value set: %s" % target) + + The full list of modifiers available to the :func:`.event.listen` + and :func:`.event.listens_for` functions are below. + + :param active_history=False: When True, indicates that the + "set" event would like to receive the "old" value being + replaced unconditionally, even if this requires firing off + database loads. Note that ``active_history`` can also be + set directly via :func:`.column_property` and + :func:`_orm.relationship`. + + :param propagate=False: When True, the listener function will + be established not just for the class attribute given, but + for attributes of the same name on all current subclasses + of that class, as well as all future subclasses of that + class, using an additional listener that listens for + instrumentation events. + :param raw=False: When True, the "target" argument to the + event will be the :class:`.InstanceState` management + object, rather than the mapped instance itself. + :param retval=False: when True, the user-defined event + listening must return the "value" argument from the + function. This gives the listening function the opportunity + to change the value that is ultimately used for a "set" + or "append" event. + + """ + + _target_class_doc = "SomeClass.some_attribute" + _dispatch_target = QueryableAttribute + + @staticmethod + def _set_dispatch( + cls: Type[_HasEventsDispatch[Any]], dispatch_cls: Type[_Dispatch[Any]] + ) -> _Dispatch[Any]: + dispatch = event.Events._set_dispatch(cls, dispatch_cls) + dispatch_cls._active_history = False + return dispatch + + @classmethod + def _accept_with( + cls, + target: Union[QueryableAttribute[Any], Type[QueryableAttribute[Any]]], + identifier: str, + ) -> Union[QueryableAttribute[Any], Type[QueryableAttribute[Any]]]: + # TODO: coverage + if isinstance(target, interfaces.MapperProperty): + return getattr(target.parent.class_, target.key) + else: + return target + + @classmethod + def _listen( # type: ignore [override] + cls, + event_key: _EventKey[QueryableAttribute[Any]], + active_history: bool = False, + raw: bool = False, + retval: bool = False, + propagate: bool = False, + include_key: bool = False, + ) -> None: + target, fn = event_key.dispatch_target, event_key._listen_fn + + if active_history: + target.dispatch._active_history = True + + if not raw or not retval or not include_key: + + def wrap(target: InstanceState[_O], *arg: Any, **kw: Any) -> Any: + if not raw: + target = target.obj() # type: ignore [assignment] + if not retval: + if arg: + value = arg[0] + else: + value = None + if include_key: + fn(target, *arg, **kw) + else: + fn(target, *arg) + return value + else: + if include_key: + return fn(target, *arg, **kw) + else: + return fn(target, *arg) + + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(propagate=propagate) + + if propagate: + manager = instrumentation.manager_of_class(target.class_) + + for mgr in manager.subclass_managers(True): # type: ignore [no-untyped-call] # noqa: E501 + event_key.with_dispatch_target(mgr[target.key]).base_listen( + propagate=True + ) + if active_history: + mgr[target.key].dispatch._active_history = True + + def append( + self, + target: _O, + value: _T, + initiator: Event, + *, + key: EventConstants = NO_KEY, + ) -> Optional[_T]: + """Receive a collection append event. + + The append event is invoked for each element as it is appended + to the collection. This occurs for single-item appends as well + as for a "bulk replace" operation. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being appended. If this listener + is registered with ``retval=True``, the listener + function must return this value, or a new value which + replaces it. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation, as well as be inspected for information + about the source of the event. + :param key: When the event is established using the + :paramref:`.AttributeEvents.include_key` parameter set to + True, this will be the key used in the operation, such as + ``collection[some_key_or_index] = value``. + The parameter is not passed + to the event at all if the the + :paramref:`.AttributeEvents.include_key` + was not used to set up the event; this is to allow backwards + compatibility with existing event handlers that don't include the + ``key`` parameter. + + .. versionadded:: 2.0 + + :return: if the event was registered with ``retval=True``, + the given value, or a new effective value, should be returned. + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + :meth:`.AttributeEvents.bulk_replace` + + """ + + def append_wo_mutation( + self, + target: _O, + value: _T, + initiator: Event, + *, + key: EventConstants = NO_KEY, + ) -> None: + """Receive a collection append event where the collection was not + actually mutated. + + This event differs from :meth:`_orm.AttributeEvents.append` in that + it is fired off for de-duplicating collections such as sets and + dictionaries, when the object already exists in the target collection. + The event does not have a return value and the identity of the + given object cannot be changed. + + The event is used for cascading objects into a :class:`_orm.Session` + when the collection has already been mutated via a backref event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value that would be appended if the object did not + already exist in the collection. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation, as well as be inspected for information + about the source of the event. + :param key: When the event is established using the + :paramref:`.AttributeEvents.include_key` parameter set to + True, this will be the key used in the operation, such as + ``collection[some_key_or_index] = value``. + The parameter is not passed + to the event at all if the the + :paramref:`.AttributeEvents.include_key` + was not used to set up the event; this is to allow backwards + compatibility with existing event handlers that don't include the + ``key`` parameter. + + .. versionadded:: 2.0 + + :return: No return value is defined for this event. + + .. versionadded:: 1.4.15 + + """ + + def bulk_replace( + self, + target: _O, + values: Iterable[_T], + initiator: Event, + *, + keys: Optional[Iterable[EventConstants]] = None, + ) -> None: + """Receive a collection 'bulk replace' event. + + This event is invoked for a sequence of values as they are incoming + to a bulk collection set operation, which can be + modified in place before the values are treated as ORM objects. + This is an "early hook" that runs before the bulk replace routine + attempts to reconcile which objects are already present in the + collection and which are being removed by the net replace operation. + + It is typical that this method be combined with use of the + :meth:`.AttributeEvents.append` event. When using both of these + events, note that a bulk replace operation will invoke + the :meth:`.AttributeEvents.append` event for all new items, + even after :meth:`.AttributeEvents.bulk_replace` has been invoked + for the collection as a whole. In order to determine if an + :meth:`.AttributeEvents.append` event is part of a bulk replace, + use the symbol :attr:`~.attributes.OP_BULK_REPLACE` to test the + incoming initiator:: + + from sqlalchemy.orm.attributes import OP_BULK_REPLACE + + + @event.listens_for(SomeObject.collection, "bulk_replace") + def process_collection(target, values, initiator): + values[:] = [_make_value(value) for value in values] + + + @event.listens_for(SomeObject.collection, "append", retval=True) + def process_collection(target, value, initiator): + # make sure bulk_replace didn't already do it + if initiator is None or initiator.op is not OP_BULK_REPLACE: + return _make_value(value) + else: + return value + + .. versionadded:: 1.2 + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: a sequence (e.g. a list) of the values being set. The + handler can modify this list in place. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. + :param keys: When the event is established using the + :paramref:`.AttributeEvents.include_key` parameter set to + True, this will be the sequence of keys used in the operation, + typically only for a dictionary update. The parameter is not passed + to the event at all if the the + :paramref:`.AttributeEvents.include_key` + was not used to set up the event; this is to allow backwards + compatibility with existing event handlers that don't include the + ``key`` parameter. + + .. versionadded:: 2.0 + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + + """ + + def remove( + self, + target: _O, + value: _T, + initiator: Event, + *, + key: EventConstants = NO_KEY, + ) -> None: + """Receive a collection remove event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being removed. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation. + + :param key: When the event is established using the + :paramref:`.AttributeEvents.include_key` parameter set to + True, this will be the key used in the operation, such as + ``del collection[some_key_or_index]``. The parameter is not passed + to the event at all if the the + :paramref:`.AttributeEvents.include_key` + was not used to set up the event; this is to allow backwards + compatibility with existing event handlers that don't include the + ``key`` parameter. + + .. versionadded:: 2.0 + + :return: No return value is defined for this event. + + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + """ + + def set( + self, target: _O, value: _T, oldvalue: _T, initiator: Event + ) -> None: + """Receive a scalar set event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being set. If this listener + is registered with ``retval=True``, the listener + function must return this value, or a new value which + replaces it. + :param oldvalue: the previous value being replaced. This + may also be the symbol ``NEVER_SET`` or ``NO_VALUE``. + If the listener is registered with ``active_history=True``, + the previous value of the attribute will be loaded from + the database if the existing value is currently unloaded + or expired. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation. + + :return: if the event was registered with ``retval=True``, + the given value, or a new effective value, should be returned. + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + """ + + def init_scalar( + self, target: _O, value: _T, dict_: Dict[Any, Any] + ) -> None: + r"""Receive a scalar "init" event. + + This event is invoked when an uninitialized, unpersisted scalar + attribute is accessed, e.g. read:: + + + x = my_object.some_attribute + + The ORM's default behavior when this occurs for an un-initialized + attribute is to return the value ``None``; note this differs from + Python's usual behavior of raising ``AttributeError``. The + event here can be used to customize what value is actually returned, + with the assumption that the event listener would be mirroring + a default generator that is configured on the Core + :class:`_schema.Column` + object as well. + + Since a default generator on a :class:`_schema.Column` + might also produce + a changing value such as a timestamp, the + :meth:`.AttributeEvents.init_scalar` + event handler can also be used to **set** the newly returned value, so + that a Core-level default generation function effectively fires off + only once, but at the moment the attribute is accessed on the + non-persisted object. Normally, no change to the object's state + is made when an uninitialized attribute is accessed (much older + SQLAlchemy versions did in fact change the object's state). + + If a default generator on a column returned a particular constant, + a handler might be used as follows:: + + SOME_CONSTANT = 3.1415926 + + + class MyClass(Base): + # ... + + some_attribute = Column(Numeric, default=SOME_CONSTANT) + + + @event.listens_for( + MyClass.some_attribute, "init_scalar", retval=True, propagate=True + ) + def _init_some_attribute(target, dict_, value): + dict_["some_attribute"] = SOME_CONSTANT + return SOME_CONSTANT + + Above, we initialize the attribute ``MyClass.some_attribute`` to the + value of ``SOME_CONSTANT``. The above code includes the following + features: + + * By setting the value ``SOME_CONSTANT`` in the given ``dict_``, + we indicate that this value is to be persisted to the database. + This supersedes the use of ``SOME_CONSTANT`` in the default generator + for the :class:`_schema.Column`. The ``active_column_defaults.py`` + example given at :ref:`examples_instrumentation` illustrates using + the same approach for a changing default, e.g. a timestamp + generator. In this particular example, it is not strictly + necessary to do this since ``SOME_CONSTANT`` would be part of the + INSERT statement in either case. + + * By establishing the ``retval=True`` flag, the value we return + from the function will be returned by the attribute getter. + Without this flag, the event is assumed to be a passive observer + and the return value of our function is ignored. + + * The ``propagate=True`` flag is significant if the mapped class + includes inheriting subclasses, which would also make use of this + event listener. Without this flag, an inheriting subclass will + not use our event handler. + + In the above example, the attribute set event + :meth:`.AttributeEvents.set` as well as the related validation feature + provided by :obj:`_orm.validates` is **not** invoked when we apply our + value to the given ``dict_``. To have these events to invoke in + response to our newly generated value, apply the value to the given + object as a normal attribute set operation:: + + SOME_CONSTANT = 3.1415926 + + + @event.listens_for( + MyClass.some_attribute, "init_scalar", retval=True, propagate=True + ) + def _init_some_attribute(target, dict_, value): + # will also fire off attribute set events + target.some_attribute = SOME_CONSTANT + return SOME_CONSTANT + + When multiple listeners are set up, the generation of the value + is "chained" from one listener to the next by passing the value + returned by the previous listener that specifies ``retval=True`` + as the ``value`` argument of the next listener. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value that is to be returned before this event + listener were invoked. This value begins as the value ``None``, + however will be the return value of the previous event handler + function if multiple listeners are present. + :param dict\_: the attribute dictionary of this mapped object. + This is normally the ``__dict__`` of the object, but in all cases + represents the destination that the attribute system uses to get + at the actual value of this attribute. Placing the value in this + dictionary has the effect that the value will be used in the + INSERT statement generated by the unit of work. + + + .. seealso:: + + :meth:`.AttributeEvents.init_collection` - collection version + of this event + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + :ref:`examples_instrumentation` - see the + ``active_column_defaults.py`` example. + + """ # noqa: E501 + + def init_collection( + self, + target: _O, + collection: Type[Collection[Any]], + collection_adapter: CollectionAdapter, + ) -> None: + """Receive a 'collection init' event. + + This event is triggered for a collection-based attribute, when + the initial "empty collection" is first generated for a blank + attribute, as well as for when the collection is replaced with + a new one, such as via a set event. + + E.g., given that ``User.addresses`` is a relationship-based + collection, the event is triggered here:: + + u1 = User() + u1.addresses.append(a1) # <- new collection + + and also during replace operations:: + + u1.addresses = [a2, a3] # <- new collection + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param collection: the new collection. This will always be generated + from what was specified as + :paramref:`_orm.relationship.collection_class`, and will always + be empty. + :param collection_adapter: the :class:`.CollectionAdapter` that will + mediate internal access to the collection. + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + :meth:`.AttributeEvents.init_scalar` - "scalar" version of this + event. + + """ + + def dispose_collection( + self, + target: _O, + collection: Collection[Any], + collection_adapter: CollectionAdapter, + ) -> None: + """Receive a 'collection dispose' event. + + This event is triggered for a collection-based attribute when + a collection is replaced, that is:: + + u1.addresses.append(a1) + + u1.addresses = [a2, a3] # <- old collection is disposed + + The old collection received will contain its previous contents. + + .. versionchanged:: 1.2 The collection passed to + :meth:`.AttributeEvents.dispose_collection` will now have its + contents before the dispose intact; previously, the collection + would be empty. + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + """ + + def modified(self, target: _O, initiator: Event) -> None: + """Receive a 'modified' event. + + This event is triggered when the :func:`.attributes.flag_modified` + function is used to trigger a modify event on an attribute without + any specific value being set. + + .. versionadded:: 1.2 + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. + + .. seealso:: + + :class:`.AttributeEvents` - background on listener options such + as propagation to subclasses. + + """ + + +class QueryEvents(event.Events[Query[Any]]): + """Represent events within the construction of a :class:`_query.Query` + object. + + .. legacy:: The :class:`_orm.QueryEvents` event methods are legacy + as of SQLAlchemy 2.0, and only apply to direct use of the + :class:`_orm.Query` object. They are not used for :term:`2.0 style` + statements. For events to intercept and modify 2.0 style ORM use, + use the :meth:`_orm.SessionEvents.do_orm_execute` hook. + + + The :class:`_orm.QueryEvents` hooks are now superseded by the + :meth:`_orm.SessionEvents.do_orm_execute` event hook. + + """ + + _target_class_doc = "SomeQuery" + _dispatch_target = Query + + def before_compile(self, query: Query[Any]) -> None: + """Receive the :class:`_query.Query` + object before it is composed into a + core :class:`_expression.Select` object. + + .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile` event + is superseded by the much more capable + :meth:`_orm.SessionEvents.do_orm_execute` hook. In version 1.4, + the :meth:`_orm.QueryEvents.before_compile` event is **no longer + used** for ORM-level attribute loads, such as loads of deferred + or expired attributes as well as relationship loaders. See the + new examples in :ref:`examples_session_orm_events` which + illustrate new ways of intercepting and modifying ORM queries + for the most common purpose of adding arbitrary filter criteria. + + + This event is intended to allow changes to the query given:: + + @event.listens_for(Query, "before_compile", retval=True) + def no_deleted(query): + for desc in query.column_descriptions: + if desc["type"] is User: + entity = desc["entity"] + query = query.filter(entity.deleted == False) + return query + + The event should normally be listened with the ``retval=True`` + parameter set, so that the modified query may be returned. + + The :meth:`.QueryEvents.before_compile` event by default + will disallow "baked" queries from caching a query, if the event + hook returns a new :class:`_query.Query` object. + This affects both direct + use of the baked query extension as well as its operation within + lazy loaders and eager loaders for relationships. In order to + re-establish the query being cached, apply the event adding the + ``bake_ok`` flag:: + + @event.listens_for(Query, "before_compile", retval=True, bake_ok=True) + def my_event(query): + for desc in query.column_descriptions: + if desc["type"] is User: + entity = desc["entity"] + query = query.filter(entity.deleted == False) + return query + + When ``bake_ok`` is set to True, the event hook will only be invoked + once, and not called for subsequent invocations of a particular query + that is being cached. + + .. versionadded:: 1.3.11 - added the "bake_ok" flag to the + :meth:`.QueryEvents.before_compile` event and disallowed caching via + the "baked" extension from occurring for event handlers that + return a new :class:`_query.Query` object if this flag is not set. + + .. seealso:: + + :meth:`.QueryEvents.before_compile_update` + + :meth:`.QueryEvents.before_compile_delete` + + :ref:`baked_with_before_compile` + + """ # noqa: E501 + + def before_compile_update( + self, query: Query[Any], update_context: BulkUpdate + ) -> None: + """Allow modifications to the :class:`_query.Query` object within + :meth:`_query.Query.update`. + + .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_update` + event is superseded by the much more capable + :meth:`_orm.SessionEvents.do_orm_execute` hook. + + Like the :meth:`.QueryEvents.before_compile` event, if the event + is to be used to alter the :class:`_query.Query` object, it should + be configured with ``retval=True``, and the modified + :class:`_query.Query` object returned, as in :: + + @event.listens_for(Query, "before_compile_update", retval=True) + def no_deleted(query, update_context): + for desc in query.column_descriptions: + if desc["type"] is User: + entity = desc["entity"] + query = query.filter(entity.deleted == False) + + update_context.values["timestamp"] = datetime.datetime.now( + datetime.UTC + ) + return query + + The ``.values`` dictionary of the "update context" object can also + be modified in place as illustrated above. + + :param query: a :class:`_query.Query` instance; this is also + the ``.query`` attribute of the given "update context" + object. + + :param update_context: an "update context" object which is + the same kind of object as described in + :paramref:`.QueryEvents.after_bulk_update.update_context`. + The object has a ``.values`` attribute in an UPDATE context which is + the dictionary of parameters passed to :meth:`_query.Query.update`. + This + dictionary can be modified to alter the VALUES clause of the + resulting UPDATE statement. + + .. versionadded:: 1.2.17 + + .. seealso:: + + :meth:`.QueryEvents.before_compile` + + :meth:`.QueryEvents.before_compile_delete` + + + """ # noqa: E501 + + def before_compile_delete( + self, query: Query[Any], delete_context: BulkDelete + ) -> None: + """Allow modifications to the :class:`_query.Query` object within + :meth:`_query.Query.delete`. + + .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_delete` + event is superseded by the much more capable + :meth:`_orm.SessionEvents.do_orm_execute` hook. + + Like the :meth:`.QueryEvents.before_compile` event, this event + should be configured with ``retval=True``, and the modified + :class:`_query.Query` object returned, as in :: + + @event.listens_for(Query, "before_compile_delete", retval=True) + def no_deleted(query, delete_context): + for desc in query.column_descriptions: + if desc["type"] is User: + entity = desc["entity"] + query = query.filter(entity.deleted == False) + return query + + :param query: a :class:`_query.Query` instance; this is also + the ``.query`` attribute of the given "delete context" + object. + + :param delete_context: a "delete context" object which is + the same kind of object as described in + :paramref:`.QueryEvents.after_bulk_delete.delete_context`. + + .. versionadded:: 1.2.17 + + .. seealso:: + + :meth:`.QueryEvents.before_compile` + + :meth:`.QueryEvents.before_compile_update` + + + """ + + @classmethod + def _listen( + cls, + event_key: _EventKey[_ET], + retval: bool = False, + bake_ok: bool = False, + **kw: Any, + ) -> None: + fn = event_key._listen_fn + + if not retval: + + def wrap(*arg: Any, **kw: Any) -> Any: + if not retval: + query = arg[0] + fn(*arg, **kw) + return query + else: + return fn(*arg, **kw) + + event_key = event_key.with_wrapper(wrap) + else: + # don't assume we can apply an attribute to the callable + def wrap(*arg: Any, **kw: Any) -> Any: + return fn(*arg, **kw) + + event_key = event_key.with_wrapper(wrap) + + wrap._bake_ok = bake_ok # type: ignore [attr-defined] + + event_key.base_listen(**kw) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/exc.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..a2f7c9f78a304e6b5eb13a6f1a6f5f93f0175bfa --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/exc.py @@ -0,0 +1,237 @@ +# orm/exc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""SQLAlchemy ORM exceptions.""" + +from __future__ import annotations + +from typing import Any +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar + +from .util import _mapper_property_as_plain_name +from .. import exc as sa_exc +from .. import util +from ..exc import MultipleResultsFound # noqa +from ..exc import NoResultFound # noqa + +if TYPE_CHECKING: + from .interfaces import LoaderStrategy + from .interfaces import MapperProperty + from .state import InstanceState + +_T = TypeVar("_T", bound=Any) + +NO_STATE = (AttributeError, KeyError) +"""Exception types that may be raised by instrumentation implementations.""" + + +class StaleDataError(sa_exc.SQLAlchemyError): + """An operation encountered database state that is unaccounted for. + + Conditions which cause this to happen include: + + * A flush may have attempted to update or delete rows + and an unexpected number of rows were matched during + the UPDATE or DELETE statement. Note that when + version_id_col is used, rows in UPDATE or DELETE statements + are also matched against the current known version + identifier. + + * A mapped object with version_id_col was refreshed, + and the version number coming back from the database does + not match that of the object itself. + + * A object is detached from its parent object, however + the object was previously attached to a different parent + identity which was garbage collected, and a decision + cannot be made if the new parent was really the most + recent "parent". + + """ + + +ConcurrentModificationError = StaleDataError + + +class FlushError(sa_exc.SQLAlchemyError): + """A invalid condition was detected during flush().""" + + +class MappedAnnotationError(sa_exc.ArgumentError): + """Raised when ORM annotated declarative cannot interpret the + expression present inside of the :class:`.Mapped` construct. + + .. versionadded:: 2.0.40 + + """ + + +class UnmappedError(sa_exc.InvalidRequestError): + """Base for exceptions that involve expected mappings not present.""" + + +class ObjectDereferencedError(sa_exc.SQLAlchemyError): + """An operation cannot complete due to an object being garbage + collected. + + """ + + +class DetachedInstanceError(sa_exc.SQLAlchemyError): + """An attempt to access unloaded attributes on a + mapped instance that is detached.""" + + code = "bhk3" + + +class UnmappedInstanceError(UnmappedError): + """An mapping operation was requested for an unknown instance.""" + + @util.preload_module("sqlalchemy.orm.base") + def __init__(self, obj: object, msg: Optional[str] = None): + base = util.preloaded.orm_base + + if not msg: + try: + base.class_mapper(type(obj)) + name = _safe_cls_name(type(obj)) + msg = ( + "Class %r is mapped, but this instance lacks " + "instrumentation. This occurs when the instance " + "is created before sqlalchemy.orm.mapper(%s) " + "was called." % (name, name) + ) + except UnmappedClassError: + msg = f"Class '{_safe_cls_name(type(obj))}' is not mapped" + if isinstance(obj, type): + msg += ( + "; was a class (%s) supplied where an instance was " + "required?" % _safe_cls_name(obj) + ) + UnmappedError.__init__(self, msg) + + def __reduce__(self) -> Any: + return self.__class__, (None, self.args[0]) + + +class UnmappedClassError(UnmappedError): + """An mapping operation was requested for an unknown class.""" + + def __init__(self, cls: Type[_T], msg: Optional[str] = None): + if not msg: + msg = _default_unmapped(cls) + UnmappedError.__init__(self, msg) + + def __reduce__(self) -> Any: + return self.__class__, (None, self.args[0]) + + +class ObjectDeletedError(sa_exc.InvalidRequestError): + """A refresh operation failed to retrieve the database + row corresponding to an object's known primary key identity. + + A refresh operation proceeds when an expired attribute is + accessed on an object, or when :meth:`_query.Query.get` is + used to retrieve an object which is, upon retrieval, detected + as expired. A SELECT is emitted for the target row + based on primary key; if no row is returned, this + exception is raised. + + The true meaning of this exception is simply that + no row exists for the primary key identifier associated + with a persistent object. The row may have been + deleted, or in some cases the primary key updated + to a new value, outside of the ORM's management of the target + object. + + """ + + @util.preload_module("sqlalchemy.orm.base") + def __init__(self, state: InstanceState[Any], msg: Optional[str] = None): + base = util.preloaded.orm_base + + if not msg: + msg = ( + "Instance '%s' has been deleted, or its " + "row is otherwise not present." % base.state_str(state) + ) + + sa_exc.InvalidRequestError.__init__(self, msg) + + def __reduce__(self) -> Any: + return self.__class__, (None, self.args[0]) + + +class UnmappedColumnError(sa_exc.InvalidRequestError): + """Mapping operation was requested on an unknown column.""" + + +class LoaderStrategyException(sa_exc.InvalidRequestError): + """A loader strategy for an attribute does not exist.""" + + def __init__( + self, + applied_to_property_type: Type[Any], + requesting_property: MapperProperty[Any], + applies_to: Optional[Type[MapperProperty[Any]]], + actual_strategy_type: Optional[Type[LoaderStrategy]], + strategy_key: Tuple[Any, ...], + ): + if actual_strategy_type is None: + sa_exc.InvalidRequestError.__init__( + self, + "Can't find strategy %s for %s" + % (strategy_key, requesting_property), + ) + else: + assert applies_to is not None + sa_exc.InvalidRequestError.__init__( + self, + 'Can\'t apply "%s" strategy to property "%s", ' + 'which is a "%s"; this loader strategy is intended ' + 'to be used with a "%s".' + % ( + util.clsname_as_plain_name(actual_strategy_type), + requesting_property, + _mapper_property_as_plain_name(applied_to_property_type), + _mapper_property_as_plain_name(applies_to), + ), + ) + + +def _safe_cls_name(cls: Type[Any]) -> str: + cls_name: Optional[str] + try: + cls_name = ".".join((cls.__module__, cls.__name__)) + except AttributeError: + cls_name = getattr(cls, "__name__", None) + if cls_name is None: + cls_name = repr(cls) + return cls_name + + +@util.preload_module("sqlalchemy.orm.base") +def _default_unmapped(cls: Type[Any]) -> Optional[str]: + base = util.preloaded.orm_base + + try: + mappers = base.manager_of_class(cls).mappers # type: ignore + except ( + UnmappedClassError, + TypeError, + ) + NO_STATE: + mappers = {} + name = _safe_cls_name(cls) + + if not mappers: + return f"Class '{name}' is not mapped" + else: + return None diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/identity.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/identity.py new file mode 100644 index 0000000000000000000000000000000000000000..1808b2d5e59c552686564f8435284c765ff14697 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/identity.py @@ -0,0 +1,302 @@ +# orm/identity.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NoReturn +from typing import Optional +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +import weakref + +from . import util as orm_util +from .. import exc as sa_exc + +if TYPE_CHECKING: + from ._typing import _IdentityKeyType + from .state import InstanceState + + +_T = TypeVar("_T", bound=Any) + +_O = TypeVar("_O", bound=object) + + +class IdentityMap: + _wr: weakref.ref[IdentityMap] + + _dict: Dict[_IdentityKeyType[Any], Any] + _modified: Set[InstanceState[Any]] + + def __init__(self) -> None: + self._dict = {} + self._modified = set() + self._wr = weakref.ref(self) + + def _kill(self) -> None: + self._add_unpresent = _killed # type: ignore + + def all_states(self) -> List[InstanceState[Any]]: + raise NotImplementedError() + + def contains_state(self, state: InstanceState[Any]) -> bool: + raise NotImplementedError() + + def __contains__(self, key: _IdentityKeyType[Any]) -> bool: + raise NotImplementedError() + + def safe_discard(self, state: InstanceState[Any]) -> None: + raise NotImplementedError() + + def __getitem__(self, key: _IdentityKeyType[_O]) -> _O: + raise NotImplementedError() + + def get( + self, key: _IdentityKeyType[_O], default: Optional[_O] = None + ) -> Optional[_O]: + raise NotImplementedError() + + def fast_get_state( + self, key: _IdentityKeyType[_O] + ) -> Optional[InstanceState[_O]]: + raise NotImplementedError() + + def keys(self) -> Iterable[_IdentityKeyType[Any]]: + return self._dict.keys() + + def values(self) -> Iterable[object]: + raise NotImplementedError() + + def replace(self, state: InstanceState[_O]) -> Optional[InstanceState[_O]]: + raise NotImplementedError() + + def add(self, state: InstanceState[Any]) -> bool: + raise NotImplementedError() + + def _fast_discard(self, state: InstanceState[Any]) -> None: + raise NotImplementedError() + + def _add_unpresent( + self, state: InstanceState[Any], key: _IdentityKeyType[Any] + ) -> None: + """optional inlined form of add() which can assume item isn't present + in the map""" + self.add(state) + + def _manage_incoming_state(self, state: InstanceState[Any]) -> None: + state._instance_dict = self._wr + + if state.modified: + self._modified.add(state) + + def _manage_removed_state(self, state: InstanceState[Any]) -> None: + del state._instance_dict + if state.modified: + self._modified.discard(state) + + def _dirty_states(self) -> Set[InstanceState[Any]]: + return self._modified + + def check_modified(self) -> bool: + """return True if any InstanceStates present have been marked + as 'modified'. + + """ + return bool(self._modified) + + def has_key(self, key: _IdentityKeyType[Any]) -> bool: + return key in self + + def __len__(self) -> int: + return len(self._dict) + + +class WeakInstanceDict(IdentityMap): + _dict: Dict[_IdentityKeyType[Any], InstanceState[Any]] + + def __getitem__(self, key: _IdentityKeyType[_O]) -> _O: + state = cast("InstanceState[_O]", self._dict[key]) + o = state.obj() + if o is None: + raise KeyError(key) + return o + + def __contains__(self, key: _IdentityKeyType[Any]) -> bool: + try: + if key in self._dict: + state = self._dict[key] + o = state.obj() + else: + return False + except KeyError: + return False + else: + return o is not None + + def contains_state(self, state: InstanceState[Any]) -> bool: + if state.key in self._dict: + if TYPE_CHECKING: + assert state.key is not None + try: + return self._dict[state.key] is state + except KeyError: + return False + else: + return False + + def replace( + self, state: InstanceState[Any] + ) -> Optional[InstanceState[Any]]: + assert state.key is not None + if state.key in self._dict: + try: + existing = existing_non_none = self._dict[state.key] + except KeyError: + # catch gc removed the key after we just checked for it + existing = None + else: + if existing_non_none is not state: + self._manage_removed_state(existing_non_none) + else: + return None + else: + existing = None + + self._dict[state.key] = state + self._manage_incoming_state(state) + return existing + + def add(self, state: InstanceState[Any]) -> bool: + key = state.key + assert key is not None + # inline of self.__contains__ + if key in self._dict: + try: + existing_state = self._dict[key] + except KeyError: + # catch gc removed the key after we just checked for it + pass + else: + if existing_state is not state: + o = existing_state.obj() + if o is not None: + raise sa_exc.InvalidRequestError( + "Can't attach instance " + "%s; another instance with key %s is already " + "present in this session." + % (orm_util.state_str(state), state.key) + ) + else: + return False + self._dict[key] = state + self._manage_incoming_state(state) + return True + + def _add_unpresent( + self, state: InstanceState[Any], key: _IdentityKeyType[Any] + ) -> None: + # inlined form of add() called by loading.py + self._dict[key] = state + state._instance_dict = self._wr + + def fast_get_state( + self, key: _IdentityKeyType[_O] + ) -> Optional[InstanceState[_O]]: + return self._dict.get(key) + + def get( + self, key: _IdentityKeyType[_O], default: Optional[_O] = None + ) -> Optional[_O]: + if key not in self._dict: + return default + try: + state = cast("InstanceState[_O]", self._dict[key]) + except KeyError: + # catch gc removed the key after we just checked for it + return default + else: + o = state.obj() + if o is None: + return default + return o + + def items(self) -> List[Tuple[_IdentityKeyType[Any], InstanceState[Any]]]: + values = self.all_states() + result = [] + for state in values: + value = state.obj() + key = state.key + assert key is not None + if value is not None: + result.append((key, value)) + return result + + def values(self) -> List[object]: + values = self.all_states() + result = [] + for state in values: + value = state.obj() + if value is not None: + result.append(value) + + return result + + def __iter__(self) -> Iterator[_IdentityKeyType[Any]]: + return iter(self.keys()) + + def all_states(self) -> List[InstanceState[Any]]: + return list(self._dict.values()) + + def _fast_discard(self, state: InstanceState[Any]) -> None: + # used by InstanceState for state being + # GC'ed, inlines _managed_removed_state + key = state.key + assert key is not None + try: + st = self._dict[key] + except KeyError: + # catch gc removed the key after we just checked for it + pass + else: + if st is state: + self._dict.pop(key, None) + + def discard(self, state: InstanceState[Any]) -> None: + self.safe_discard(state) + + def safe_discard(self, state: InstanceState[Any]) -> None: + key = state.key + if key in self._dict: + assert key is not None + try: + st = self._dict[key] + except KeyError: + # catch gc removed the key after we just checked for it + pass + else: + if st is state: + self._dict.pop(key, None) + self._manage_removed_state(state) + + +def _killed(state: InstanceState[Any], key: _IdentityKeyType[Any]) -> NoReturn: + # external function to avoid creating cycles when assigned to + # the IdentityMap + raise sa_exc.InvalidRequestError( + "Object %s cannot be converted to 'persistent' state, as this " + "identity map is no longer valid. Has the owning Session " + "been closed?" % orm_util.state_str(state), + code="lkrp", + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/instrumentation.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/instrumentation.py new file mode 100644 index 0000000000000000000000000000000000000000..f87023f18092120cd5d0f3edb0788ad5b5127416 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/instrumentation.py @@ -0,0 +1,754 @@ +# orm/instrumentation.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Defines SQLAlchemy's system of class instrumentation. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + +instrumentation.py deals with registration of end-user classes +for state tracking. It interacts closely with state.py +and attributes.py which establish per-instance and per-class-attribute +instrumentation, respectively. + +The class instrumentation system can be customized on a per-class +or global basis using the :mod:`sqlalchemy.ext.instrumentation` +module, which provides the means to build and specify +alternate instrumentation forms. + +.. versionchanged: 0.8 + The instrumentation extension system was moved out of the + ORM and into the external :mod:`sqlalchemy.ext.instrumentation` + package. When that package is imported, it installs + itself within sqlalchemy.orm so that its more comprehensive + resolution mechanics take effect. + +""" + + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import base +from . import collections +from . import exc +from . import interfaces +from . import state +from ._typing import _O +from .attributes import _is_collection_attribute_impl +from .. import util +from ..event import EventTarget +from ..util import HasMemoized +from ..util.typing import Literal +from ..util.typing import Protocol + +if TYPE_CHECKING: + from ._typing import _RegistryType + from .attributes import AttributeImpl + from .attributes import QueryableAttribute + from .collections import _AdaptedCollectionProtocol + from .collections import _CollectionFactoryType + from .decl_base import _MapperConfig + from .events import InstanceEvents + from .mapper import Mapper + from .state import InstanceState + from ..event import dispatcher + +_T = TypeVar("_T", bound=Any) +DEL_ATTR = util.symbol("DEL_ATTR") + + +class _ExpiredAttributeLoaderProto(Protocol): + def __call__( + self, + state: state.InstanceState[Any], + toload: Set[str], + passive: base.PassiveFlag, + ) -> None: ... + + +class _ManagerFactory(Protocol): + def __call__(self, class_: Type[_O]) -> ClassManager[_O]: ... + + +class ClassManager( + HasMemoized, + Dict[str, "QueryableAttribute[Any]"], + Generic[_O], + EventTarget, +): + """Tracks state information at the class level.""" + + dispatch: dispatcher[ClassManager[_O]] + + MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR + STATE_ATTR = base.DEFAULT_STATE_ATTR + + _state_setter = staticmethod(util.attrsetter(STATE_ATTR)) + + expired_attribute_loader: _ExpiredAttributeLoaderProto + "previously known as deferred_scalar_loader" + + init_method: Optional[Callable[..., None]] + original_init: Optional[Callable[..., None]] = None + + factory: Optional[_ManagerFactory] + + declarative_scan: Optional[weakref.ref[_MapperConfig]] = None + + registry: _RegistryType + + if not TYPE_CHECKING: + # starts as None during setup + registry = None + + class_: Type[_O] + + _bases: List[ClassManager[Any]] + + @property + @util.deprecated( + "1.4", + message="The ClassManager.deferred_scalar_loader attribute is now " + "named expired_attribute_loader", + ) + def deferred_scalar_loader(self): + return self.expired_attribute_loader + + @deferred_scalar_loader.setter + @util.deprecated( + "1.4", + message="The ClassManager.deferred_scalar_loader attribute is now " + "named expired_attribute_loader", + ) + def deferred_scalar_loader(self, obj): + self.expired_attribute_loader = obj + + def __init__(self, class_): + self.class_ = class_ + self.info = {} + self.new_init = None + self.local_attrs = {} + self.originals = {} + self._finalized = False + self.factory = None + self.init_method = None + + self._bases = [ + mgr + for mgr in cast( + "List[Optional[ClassManager[Any]]]", + [ + opt_manager_of_class(base) + for base in self.class_.__bases__ + if isinstance(base, type) + ], + ) + if mgr is not None + ] + + for base_ in self._bases: + self.update(base_) + + cast( + "InstanceEvents", self.dispatch._events + )._new_classmanager_instance(class_, self) + + for basecls in class_.__mro__: + mgr = opt_manager_of_class(basecls) + if mgr is not None: + self.dispatch._update(mgr.dispatch) + + self.manage() + + if "__del__" in class_.__dict__: + util.warn( + "__del__() method on class %s will " + "cause unreachable cycles and memory leaks, " + "as SQLAlchemy instrumentation often creates " + "reference cycles. Please remove this method." % class_ + ) + + def _update_state( + self, + finalize: bool = False, + mapper: Optional[Mapper[_O]] = None, + registry: Optional[_RegistryType] = None, + declarative_scan: Optional[_MapperConfig] = None, + expired_attribute_loader: Optional[ + _ExpiredAttributeLoaderProto + ] = None, + init_method: Optional[Callable[..., None]] = None, + ) -> None: + if mapper: + self.mapper = mapper # + if registry: + registry._add_manager(self) + if declarative_scan: + self.declarative_scan = weakref.ref(declarative_scan) + if expired_attribute_loader: + self.expired_attribute_loader = expired_attribute_loader + + if init_method: + assert not self._finalized, ( + "class is already instrumented, " + "init_method %s can't be applied" % init_method + ) + self.init_method = init_method + + if not self._finalized: + self.original_init = ( + self.init_method + if self.init_method is not None + and self.class_.__init__ is object.__init__ + else self.class_.__init__ + ) + + if finalize and not self._finalized: + self._finalize() + + def _finalize(self) -> None: + if self._finalized: + return + self._finalized = True + + self._instrument_init() + + _instrumentation_factory.dispatch.class_instrument(self.class_) + + def __hash__(self) -> int: # type: ignore[override] + return id(self) + + def __eq__(self, other: Any) -> bool: + return other is self + + @property + def is_mapped(self) -> bool: + return "mapper" in self.__dict__ + + @HasMemoized.memoized_attribute + def _all_key_set(self): + return frozenset(self) + + @HasMemoized.memoized_attribute + def _collection_impl_keys(self): + return frozenset( + [attr.key for attr in self.values() if attr.impl.collection] + ) + + @HasMemoized.memoized_attribute + def _scalar_loader_impls(self): + return frozenset( + [ + attr.impl + for attr in self.values() + if attr.impl.accepts_scalar_loader + ] + ) + + @HasMemoized.memoized_attribute + def _loader_impls(self): + return frozenset([attr.impl for attr in self.values()]) + + @util.memoized_property + def mapper(self) -> Mapper[_O]: + # raises unless self.mapper has been assigned + raise exc.UnmappedClassError(self.class_) + + def _all_sqla_attributes(self, exclude=None): + """return an iterator of all classbound attributes that are + implement :class:`.InspectionAttr`. + + This includes :class:`.QueryableAttribute` as well as extension + types such as :class:`.hybrid_property` and + :class:`.AssociationProxy`. + + """ + + found: Dict[str, Any] = {} + + # constraints: + # 1. yield keys in cls.__dict__ order + # 2. if a subclass has the same key as a superclass, include that + # key as part of the ordering of the superclass, because an + # overridden key is usually installed by the mapper which is going + # on a different ordering + # 3. don't use getattr() as this fires off descriptors + + for supercls in self.class_.__mro__[0:-1]: + inherits = supercls.__mro__[1] + for key in supercls.__dict__: + found.setdefault(key, supercls) + if key in inherits.__dict__: + continue + val = found[key].__dict__[key] + if ( + isinstance(val, interfaces.InspectionAttr) + and val.is_attribute + ): + yield key, val + + def _get_class_attr_mro(self, key, default=None): + """return an attribute on the class without tripping it.""" + + for supercls in self.class_.__mro__: + if key in supercls.__dict__: + return supercls.__dict__[key] + else: + return default + + def _attr_has_impl(self, key: str) -> bool: + """Return True if the given attribute is fully initialized. + + i.e. has an impl. + """ + + return key in self and self[key].impl is not None + + def _subclass_manager(self, cls: Type[_T]) -> ClassManager[_T]: + """Create a new ClassManager for a subclass of this ClassManager's + class. + + This is called automatically when attributes are instrumented so that + the attributes can be propagated to subclasses against their own + class-local manager, without the need for mappers etc. to have already + pre-configured managers for the full class hierarchy. Mappers + can post-configure the auto-generated ClassManager when needed. + + """ + return register_class(cls, finalize=False) + + def _instrument_init(self): + self.new_init = _generate_init(self.class_, self, self.original_init) + self.install_member("__init__", self.new_init) + + @util.memoized_property + def _state_constructor(self) -> Type[state.InstanceState[_O]]: + self.dispatch.first_init(self, self.class_) + return state.InstanceState + + def manage(self): + """Mark this instance as the manager for its class.""" + + setattr(self.class_, self.MANAGER_ATTR, self) + + @util.hybridmethod + def manager_getter(self): + return _default_manager_getter + + @util.hybridmethod + def state_getter(self): + """Return a (instance) -> InstanceState callable. + + "state getter" callables should raise either KeyError or + AttributeError if no InstanceState could be found for the + instance. + """ + + return _default_state_getter + + @util.hybridmethod + def dict_getter(self): + return _default_dict_getter + + def instrument_attribute( + self, + key: str, + inst: QueryableAttribute[Any], + propagated: bool = False, + ) -> None: + if propagated: + if key in self.local_attrs: + return # don't override local attr with inherited attr + else: + self.local_attrs[key] = inst + self.install_descriptor(key, inst) + self._reset_memoizations() + self[key] = inst + + for cls in self.class_.__subclasses__(): + manager = self._subclass_manager(cls) + manager.instrument_attribute(key, inst, True) + + def subclass_managers(self, recursive): + for cls in self.class_.__subclasses__(): + mgr = opt_manager_of_class(cls) + if mgr is not None and mgr is not self: + yield mgr + if recursive: + yield from mgr.subclass_managers(True) + + def post_configure_attribute(self, key): + _instrumentation_factory.dispatch.attribute_instrument( + self.class_, key, self[key] + ) + + def uninstrument_attribute(self, key, propagated=False): + if key not in self: + return + if propagated: + if key in self.local_attrs: + return # don't get rid of local attr + else: + del self.local_attrs[key] + self.uninstall_descriptor(key) + self._reset_memoizations() + del self[key] + for cls in self.class_.__subclasses__(): + manager = opt_manager_of_class(cls) + if manager: + manager.uninstrument_attribute(key, True) + + def unregister(self) -> None: + """remove all instrumentation established by this ClassManager.""" + + for key in list(self.originals): + self.uninstall_member(key) + + self.mapper = None + self.dispatch = None # type: ignore + self.new_init = None + self.info.clear() + + for key in list(self): + if key in self.local_attrs: + self.uninstrument_attribute(key) + + if self.MANAGER_ATTR in self.class_.__dict__: + delattr(self.class_, self.MANAGER_ATTR) + + def install_descriptor( + self, key: str, inst: QueryableAttribute[Any] + ) -> None: + if key in (self.STATE_ATTR, self.MANAGER_ATTR): + raise KeyError( + "%r: requested attribute name conflicts with " + "instrumentation attribute of the same name." % key + ) + setattr(self.class_, key, inst) + + def uninstall_descriptor(self, key: str) -> None: + delattr(self.class_, key) + + def install_member(self, key: str, implementation: Any) -> None: + if key in (self.STATE_ATTR, self.MANAGER_ATTR): + raise KeyError( + "%r: requested attribute name conflicts with " + "instrumentation attribute of the same name." % key + ) + self.originals.setdefault(key, self.class_.__dict__.get(key, DEL_ATTR)) + setattr(self.class_, key, implementation) + + def uninstall_member(self, key: str) -> None: + original = self.originals.pop(key, None) + if original is not DEL_ATTR: + setattr(self.class_, key, original) + else: + delattr(self.class_, key) + + def instrument_collection_class( + self, key: str, collection_class: Type[Collection[Any]] + ) -> _CollectionFactoryType: + return collections.prepare_instrumentation(collection_class) + + def initialize_collection( + self, + key: str, + state: InstanceState[_O], + factory: _CollectionFactoryType, + ) -> Tuple[collections.CollectionAdapter, _AdaptedCollectionProtocol]: + user_data = factory() + impl = self.get_impl(key) + assert _is_collection_attribute_impl(impl) + adapter = collections.CollectionAdapter(impl, state, user_data) + return adapter, user_data + + def is_instrumented(self, key: str, search: bool = False) -> bool: + if search: + return key in self + else: + return key in self.local_attrs + + def get_impl(self, key: str) -> AttributeImpl: + return self[key].impl + + @property + def attributes(self) -> Iterable[Any]: + return iter(self.values()) + + # InstanceState management + + def new_instance(self, state: Optional[InstanceState[_O]] = None) -> _O: + # here, we would prefer _O to be bound to "object" + # so that mypy sees that __new__ is present. currently + # it's bound to Any as there were other problems not having + # it that way but these can be revisited + instance = self.class_.__new__(self.class_) + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + return instance + + def setup_instance( + self, instance: _O, state: Optional[InstanceState[_O]] = None + ) -> None: + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + + def teardown_instance(self, instance: _O) -> None: + delattr(instance, self.STATE_ATTR) + + def _serialize( + self, state: InstanceState[_O], state_dict: Dict[str, Any] + ) -> _SerializeManager: + return _SerializeManager(state, state_dict) + + def _new_state_if_none( + self, instance: _O + ) -> Union[Literal[False], InstanceState[_O]]: + """Install a default InstanceState if none is present. + + A private convenience method used by the __init__ decorator. + + """ + if hasattr(instance, self.STATE_ATTR): + return False + elif self.class_ is not instance.__class__ and self.is_mapped: + # this will create a new ClassManager for the + # subclass, without a mapper. This is likely a + # user error situation but allow the object + # to be constructed, so that it is usable + # in a non-ORM context at least. + return self._subclass_manager( + instance.__class__ + )._new_state_if_none(instance) + else: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + return state + + def has_state(self, instance: _O) -> bool: + return hasattr(instance, self.STATE_ATTR) + + def has_parent( + self, state: InstanceState[_O], key: str, optimistic: bool = False + ) -> bool: + """TODO""" + return self.get_impl(key).hasparent(state, optimistic=optimistic) + + def __bool__(self) -> bool: + """All ClassManagers are non-zero regardless of attribute state.""" + return True + + def __repr__(self) -> str: + return "<%s of %r at %x>" % ( + self.__class__.__name__, + self.class_, + id(self), + ) + + +class _SerializeManager: + """Provide serialization of a :class:`.ClassManager`. + + The :class:`.InstanceState` uses ``__init__()`` on serialize + and ``__call__()`` on deserialize. + + """ + + def __init__(self, state: state.InstanceState[Any], d: Dict[str, Any]): + self.class_ = state.class_ + manager = state.manager + manager.dispatch.pickle(state, d) + + def __call__(self, state, inst, state_dict): + state.manager = manager = opt_manager_of_class(self.class_) + if manager is None: + raise exc.UnmappedInstanceError( + inst, + "Cannot deserialize object of type %r - " + "no mapper() has " + "been configured for this class within the current " + "Python process!" % self.class_, + ) + elif manager.is_mapped and not manager.mapper.configured: + manager.mapper._check_configure() + + # setup _sa_instance_state ahead of time so that + # unpickle events can access the object normally. + # see [ticket:2362] + if inst is not None: + manager.setup_instance(inst, state) + manager.dispatch.unpickle(state, state_dict) + + +class InstrumentationFactory(EventTarget): + """Factory for new ClassManager instances.""" + + dispatch: dispatcher[InstrumentationFactory] + + def create_manager_for_cls(self, class_: Type[_O]) -> ClassManager[_O]: + assert class_ is not None + assert opt_manager_of_class(class_) is None + + # give a more complicated subclass + # a chance to do what it wants here + manager, factory = self._locate_extended_factory(class_) + + if factory is None: + factory = ClassManager + manager = ClassManager(class_) + else: + assert manager is not None + + self._check_conflicts(class_, factory) + + manager.factory = factory + + return manager + + def _locate_extended_factory( + self, class_: Type[_O] + ) -> Tuple[Optional[ClassManager[_O]], Optional[_ManagerFactory]]: + """Overridden by a subclass to do an extended lookup.""" + return None, None + + def _check_conflicts( + self, class_: Type[_O], factory: Callable[[Type[_O]], ClassManager[_O]] + ) -> None: + """Overridden by a subclass to test for conflicting factories.""" + + def unregister(self, class_: Type[_O]) -> None: + manager = manager_of_class(class_) + manager.unregister() + self.dispatch.class_uninstrument(class_) + + +# this attribute is replaced by sqlalchemy.ext.instrumentation +# when imported. +_instrumentation_factory = InstrumentationFactory() + +# these attributes are replaced by sqlalchemy.ext.instrumentation +# when a non-standard InstrumentationManager class is first +# used to instrument a class. +instance_state = _default_state_getter = base.instance_state + +instance_dict = _default_dict_getter = base.instance_dict + +manager_of_class = _default_manager_getter = base.manager_of_class +opt_manager_of_class = _default_opt_manager_getter = base.opt_manager_of_class + + +def register_class( + class_: Type[_O], + finalize: bool = True, + mapper: Optional[Mapper[_O]] = None, + registry: Optional[_RegistryType] = None, + declarative_scan: Optional[_MapperConfig] = None, + expired_attribute_loader: Optional[_ExpiredAttributeLoaderProto] = None, + init_method: Optional[Callable[..., None]] = None, +) -> ClassManager[_O]: + """Register class instrumentation. + + Returns the existing or newly created class manager. + + """ + + manager = opt_manager_of_class(class_) + if manager is None: + manager = _instrumentation_factory.create_manager_for_cls(class_) + manager._update_state( + mapper=mapper, + registry=registry, + declarative_scan=declarative_scan, + expired_attribute_loader=expired_attribute_loader, + init_method=init_method, + finalize=finalize, + ) + + return manager + + +def unregister_class(class_): + """Unregister class instrumentation.""" + + _instrumentation_factory.unregister(class_) + + +def is_instrumented(instance, key): + """Return True if the given attribute on the given instance is + instrumented by the attributes package. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + + """ + return manager_of_class(instance.__class__).is_instrumented( + key, search=True + ) + + +def _generate_init(class_, class_manager, original_init): + """Build an __init__ decorator that triggers ClassManager events.""" + + # TODO: we should use the ClassManager's notion of the + # original '__init__' method, once ClassManager is fixed + # to always reference that. + + if original_init is None: + original_init = class_.__init__ + + # Go through some effort here and don't change the user's __init__ + # calling signature, including the unlikely case that it has + # a return value. + # FIXME: need to juggle local names to avoid constructor argument + # clashes. + func_body = """\ +def __init__(%(apply_pos)s): + new_state = class_manager._new_state_if_none(%(self_arg)s) + if new_state: + return new_state._initialize_instance(%(apply_kw)s) + else: + return original_init(%(apply_kw)s) +""" + func_vars = util.format_argspec_init(original_init, grouped=False) + func_text = func_body % func_vars + + func_defaults = getattr(original_init, "__defaults__", None) + func_kw_defaults = getattr(original_init, "__kwdefaults__", None) + + env = locals().copy() + env["__name__"] = __name__ + exec(func_text, env) + __init__ = env["__init__"] + __init__.__doc__ = original_init.__doc__ + __init__._sa_original_init = original_init + + if func_defaults: + __init__.__defaults__ = func_defaults + if func_kw_defaults: + __init__.__kwdefaults__ = func_kw_defaults + + return __init__ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/interfaces.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..b4462e54593fa3d3a4f5d4de1e1a4c1902087398 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/interfaces.py @@ -0,0 +1,1490 @@ +# orm/interfaces.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +""" + +Contains various base classes used throughout the ORM. + +Defines some key base classes prominent within the internals. + +This module and the classes within are mostly private, though some attributes +are exposed when inspecting mappings. + +""" + +from __future__ import annotations + +import collections +import dataclasses +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import Generic +from typing import Iterator +from typing import List +from typing import NamedTuple +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import exc as orm_exc +from . import path_registry +from .base import _MappedAttribute as _MappedAttribute +from .base import EXT_CONTINUE as EXT_CONTINUE # noqa: F401 +from .base import EXT_SKIP as EXT_SKIP # noqa: F401 +from .base import EXT_STOP as EXT_STOP # noqa: F401 +from .base import InspectionAttr as InspectionAttr # noqa: F401 +from .base import InspectionAttrInfo as InspectionAttrInfo +from .base import MANYTOMANY as MANYTOMANY # noqa: F401 +from .base import MANYTOONE as MANYTOONE # noqa: F401 +from .base import NO_KEY as NO_KEY # noqa: F401 +from .base import NO_VALUE as NO_VALUE # noqa: F401 +from .base import NotExtension as NotExtension # noqa: F401 +from .base import ONETOMANY as ONETOMANY # noqa: F401 +from .base import RelationshipDirection as RelationshipDirection # noqa: F401 +from .base import SQLORMOperations +from .. import ColumnElement +from .. import exc as sa_exc +from .. import inspection +from .. import util +from ..sql import operators +from ..sql import roles +from ..sql import visitors +from ..sql.base import _NoArg +from ..sql.base import ExecutableOption +from ..sql.cache_key import HasCacheKey +from ..sql.operators import ColumnOperators +from ..sql.schema import Column +from ..sql.type_api import TypeEngine +from ..util import warn_deprecated +from ..util.typing import RODescriptorReference +from ..util.typing import TypedDict + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import _InternalEntityType + from ._typing import _ORMAdapterProto + from .attributes import InstrumentedAttribute + from .base import Mapped + from .context import _MapperEntity + from .context import ORMCompileState + from .context import QueryContext + from .decl_api import RegistryType + from .decl_base import _ClassScanMapperConfig + from .loading import _PopulatorDict + from .mapper import Mapper + from .path_registry import AbstractEntityRegistry + from .query import Query + from .session import Session + from .state import InstanceState + from .strategy_options import _LoadElement + from .util import AliasedInsp + from .util import ORMAdapter + from ..engine.result import Result + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _ColumnsClauseArgument + from ..sql._typing import _DMLColumnArgument + from ..sql._typing import _InfoType + from ..sql.operators import OperatorType + from ..sql.visitors import _TraverseInternalsType + from ..util.typing import _AnnotationScanType + +_StrategyKey = Tuple[Any, ...] + +_T = TypeVar("_T", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + +_TLS = TypeVar("_TLS", bound="Type[LoaderStrategy]") + + +class ORMStatementRole(roles.StatementRole): + __slots__ = () + _role_name = ( + "Executable SQL or text() construct, including ORM aware objects" + ) + + +class ORMColumnsClauseRole( + roles.ColumnsClauseRole, roles.TypedColumnsClauseRole[_T] +): + __slots__ = () + _role_name = "ORM mapped entity, aliased entity, or Column expression" + + +class ORMEntityColumnsClauseRole(ORMColumnsClauseRole[_T]): + __slots__ = () + _role_name = "ORM mapped or aliased entity" + + +class ORMFromClauseRole(roles.StrictFromClauseRole): + __slots__ = () + _role_name = "ORM mapped entity, aliased entity, or FROM expression" + + +class ORMColumnDescription(TypedDict): + name: str + # TODO: add python_type and sql_type here; combining them + # into "type" is a bad idea + type: Union[Type[Any], TypeEngine[Any]] + aliased: bool + expr: _ColumnsClauseArgument[Any] + entity: Optional[_ColumnsClauseArgument[Any]] + + +class _IntrospectsAnnotations: + __slots__ = () + + @classmethod + def _mapper_property_name(cls) -> str: + return cls.__name__ + + def found_in_pep593_annotated(self) -> Any: + """return a copy of this object to use in declarative when the + object is found inside of an Annotated object.""" + + raise NotImplementedError( + f"Use of the {self._mapper_property_name()!r} " + "construct inside of an Annotated object is not yet supported." + ) + + def declarative_scan( + self, + decl_scan: _ClassScanMapperConfig, + registry: RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + mapped_container: Optional[Type[Mapped[Any]]], + annotation: Optional[_AnnotationScanType], + extracted_mapped_annotation: Optional[_AnnotationScanType], + is_dataclass_field: bool, + ) -> None: + """Perform class-specific initializaton at early declarative scanning + time. + + .. versionadded:: 2.0 + + """ + + def _raise_for_required(self, key: str, cls: Type[Any]) -> NoReturn: + raise sa_exc.ArgumentError( + f"Python typing annotation is required for attribute " + f'"{cls.__name__}.{key}" when primary argument(s) for ' + f'"{self._mapper_property_name()}" ' + "construct are None or not present" + ) + + +class _AttributeOptions(NamedTuple): + """define Python-local attribute behavior options common to all + :class:`.MapperProperty` objects. + + Currently this includes dataclass-generation arguments. + + .. versionadded:: 2.0 + + """ + + dataclasses_init: Union[_NoArg, bool] + dataclasses_repr: Union[_NoArg, bool] + dataclasses_default: Union[_NoArg, Any] + dataclasses_default_factory: Union[_NoArg, Callable[[], Any]] + dataclasses_compare: Union[_NoArg, bool] + dataclasses_kw_only: Union[_NoArg, bool] + dataclasses_hash: Union[_NoArg, bool, None] + + def _as_dataclass_field(self, key: str) -> Any: + """Return a ``dataclasses.Field`` object given these arguments.""" + + kw: Dict[str, Any] = {} + if self.dataclasses_default_factory is not _NoArg.NO_ARG: + kw["default_factory"] = self.dataclasses_default_factory + if self.dataclasses_default is not _NoArg.NO_ARG: + kw["default"] = self.dataclasses_default + if self.dataclasses_init is not _NoArg.NO_ARG: + kw["init"] = self.dataclasses_init + if self.dataclasses_repr is not _NoArg.NO_ARG: + kw["repr"] = self.dataclasses_repr + if self.dataclasses_compare is not _NoArg.NO_ARG: + kw["compare"] = self.dataclasses_compare + if self.dataclasses_kw_only is not _NoArg.NO_ARG: + kw["kw_only"] = self.dataclasses_kw_only + if self.dataclasses_hash is not _NoArg.NO_ARG: + kw["hash"] = self.dataclasses_hash + + if "default" in kw and callable(kw["default"]): + # callable defaults are ambiguous. deprecate them in favour of + # insert_default or default_factory. #9936 + warn_deprecated( + f"Callable object passed to the ``default`` parameter for " + f"attribute {key!r} in a ORM-mapped Dataclasses context is " + "ambiguous, " + "and this use will raise an error in a future release. " + "If this callable is intended to produce Core level INSERT " + "default values for an underlying ``Column``, use " + "the ``mapped_column.insert_default`` parameter instead. " + "To establish this callable as providing a default value " + "for instances of the dataclass itself, use the " + "``default_factory`` dataclasses parameter.", + "2.0", + ) + + if ( + "init" in kw + and not kw["init"] + and "default" in kw + and not callable(kw["default"]) # ignore callable defaults. #9936 + and "default_factory" not in kw # illegal but let dc.field raise + ): + # fix for #9879 + default = kw.pop("default") + kw["default_factory"] = lambda: default + + return dataclasses.field(**kw) + + @classmethod + def _get_arguments_for_make_dataclass( + cls, + key: str, + annotation: _AnnotationScanType, + mapped_container: Optional[Any], + elem: _T, + ) -> Union[ + Tuple[str, _AnnotationScanType], + Tuple[str, _AnnotationScanType, dataclasses.Field[Any]], + ]: + """given attribute key, annotation, and value from a class, return + the argument tuple we would pass to dataclasses.make_dataclass() + for this attribute. + + """ + if isinstance(elem, _DCAttributeOptions): + dc_field = elem._attribute_options._as_dataclass_field(key) + + return (key, annotation, dc_field) + elif elem is not _NoArg.NO_ARG: + # why is typing not erroring on this? + return (key, annotation, elem) + elif mapped_container is not None: + # it's Mapped[], but there's no "element", which means declarative + # did not actually do anything for this field. this shouldn't + # happen. + # previously, this would occur because _scan_attributes would + # skip a field that's on an already mapped superclass, but it + # would still include it in the annotations, leading + # to issue #8718 + + assert False, "Mapped[] received without a mapping declaration" + + else: + # plain dataclass field, not mapped. Is only possible + # if __allow_unmapped__ is set up. I can see this mode causing + # problems... + return (key, annotation) + + +_DEFAULT_ATTRIBUTE_OPTIONS = _AttributeOptions( + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, +) + +_DEFAULT_READONLY_ATTRIBUTE_OPTIONS = _AttributeOptions( + False, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, + _NoArg.NO_ARG, +) + + +class _DCAttributeOptions: + """mixin for descriptors or configurational objects that include dataclass + field options. + + This includes :class:`.MapperProperty`, :class:`._MapsColumn` within + the ORM, but also includes :class:`.AssociationProxy` within ext. + Can in theory be used for other descriptors that serve a similar role + as association proxy. (*maybe* hybrids, not sure yet.) + + """ + + __slots__ = () + + _attribute_options: _AttributeOptions + """behavioral options for ORM-enabled Python attributes + + .. versionadded:: 2.0 + + """ + + _has_dataclass_arguments: bool + + +class _MapsColumns(_DCAttributeOptions, _MappedAttribute[_T]): + """interface for declarative-capable construct that delivers one or more + Column objects to the declarative process to be part of a Table. + """ + + __slots__ = () + + @property + def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]: + """return a MapperProperty to be assigned to the declarative mapping""" + raise NotImplementedError() + + @property + def columns_to_assign(self) -> List[Tuple[Column[_T], int]]: + """A list of Column objects that should be declaratively added to the + new Table object. + + """ + raise NotImplementedError() + + +# NOTE: MapperProperty needs to extend _MappedAttribute so that declarative +# typing works, i.e. "Mapped[A] = relationship()". This introduces an +# inconvenience which is that all the MapperProperty objects are treated +# as descriptors by typing tools, which are misled by this as assignment / +# access to a descriptor attribute wants to move through __get__. +# Therefore, references to MapperProperty as an instance variable, such +# as in PropComparator, may have some special typing workarounds such as the +# use of sqlalchemy.util.typing.DescriptorReference to avoid mis-interpretation +# by typing tools +@inspection._self_inspects +class MapperProperty( + HasCacheKey, + _DCAttributeOptions, + _MappedAttribute[_T], + InspectionAttrInfo, + util.MemoizedSlots, +): + """Represent a particular class attribute mapped by :class:`_orm.Mapper`. + + The most common occurrences of :class:`.MapperProperty` are the + mapped :class:`_schema.Column`, which is represented in a mapping as + an instance of :class:`.ColumnProperty`, + and a reference to another class produced by :func:`_orm.relationship`, + represented in the mapping as an instance of + :class:`.Relationship`. + + """ + + __slots__ = ( + "_configure_started", + "_configure_finished", + "_attribute_options", + "_has_dataclass_arguments", + "parent", + "key", + "info", + "doc", + ) + + _cache_key_traversal: _TraverseInternalsType = [ + ("parent", visitors.ExtendedInternalTraversal.dp_has_cache_key), + ("key", visitors.ExtendedInternalTraversal.dp_string), + ] + + if not TYPE_CHECKING: + cascade = None + + is_property = True + """Part of the InspectionAttr interface; states this object is a + mapper property. + + """ + + comparator: PropComparator[_T] + """The :class:`_orm.PropComparator` instance that implements SQL + expression construction on behalf of this mapped attribute.""" + + key: str + """name of class attribute""" + + parent: Mapper[Any] + """the :class:`.Mapper` managing this property.""" + + _is_relationship = False + + _links_to_entity: bool + """True if this MapperProperty refers to a mapped entity. + + Should only be True for Relationship, False for all others. + + """ + + doc: Optional[str] + """optional documentation string""" + + info: _InfoType + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`_orm.relationship`, or :func:`.composite` + functions. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + + def _memoized_attr_info(self) -> _InfoType: + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`_orm.relationship`, or + :func:`.composite` + functions. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} + + def setup( + self, + context: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + adapter: Optional[ORMAdapter], + **kwargs: Any, + ) -> None: + """Called by Query for the purposes of constructing a SQL statement. + + Each MapperProperty associated with the target mapper processes the + statement referenced by the query context, adding columns and/or + criterion as appropriate. + + """ + + def create_row_processor( + self, + context: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + mapper: Mapper[Any], + result: Result[Any], + adapter: Optional[ORMAdapter], + populators: _PopulatorDict, + ) -> None: + """Produce row processing functions and append to the given + set of populators lists. + + """ + + def cascade_iterator( + self, + type_: str, + state: InstanceState[Any], + dict_: _InstanceDict, + visited_states: Set[InstanceState[Any]], + halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None, + ) -> Iterator[ + Tuple[object, Mapper[Any], InstanceState[Any], _InstanceDict] + ]: + """Iterate through instances related to the given instance for + a particular 'cascade', starting with this MapperProperty. + + Return an iterator3-tuples (instance, mapper, state). + + Note that the 'cascade' collection on this MapperProperty is + checked first for the given type before cascade_iterator is called. + + This method typically only applies to Relationship. + + """ + + return iter(()) + + def set_parent(self, parent: Mapper[Any], init: bool) -> None: + """Set the parent mapper that references this MapperProperty. + + This method is overridden by some subclasses to perform extra + setup when the mapper is first known. + + """ + self.parent = parent + + def instrument_class(self, mapper: Mapper[Any]) -> None: + """Hook called by the Mapper to the property to initiate + instrumentation of the class attribute managed by this + MapperProperty. + + The MapperProperty here will typically call out to the + attributes module to set up an InstrumentedAttribute. + + This step is the first of two steps to set up an InstrumentedAttribute, + and is called early in the mapper setup process. + + The second step is typically the init_class_attribute step, + called from StrategizedProperty via the post_instrument_class() + hook. This step assigns additional state to the InstrumentedAttribute + (specifically the "impl") which has been determined after the + MapperProperty has determined what kind of persistence + management it needs to do (e.g. scalar, object, collection, etc). + + """ + + def __init__( + self, + attribute_options: Optional[_AttributeOptions] = None, + _assume_readonly_dc_attributes: bool = False, + ) -> None: + self._configure_started = False + self._configure_finished = False + + if _assume_readonly_dc_attributes: + default_attrs = _DEFAULT_READONLY_ATTRIBUTE_OPTIONS + else: + default_attrs = _DEFAULT_ATTRIBUTE_OPTIONS + + if attribute_options and attribute_options != default_attrs: + self._has_dataclass_arguments = True + self._attribute_options = attribute_options + else: + self._has_dataclass_arguments = False + self._attribute_options = default_attrs + + def init(self) -> None: + """Called after all mappers are created to assemble + relationships between mappers and perform other post-mapper-creation + initialization steps. + + + """ + self._configure_started = True + self.do_init() + self._configure_finished = True + + @property + def class_attribute(self) -> InstrumentedAttribute[_T]: + """Return the class-bound descriptor corresponding to this + :class:`.MapperProperty`. + + This is basically a ``getattr()`` call:: + + return getattr(self.parent.class_, self.key) + + I.e. if this :class:`.MapperProperty` were named ``addresses``, + and the class to which it is mapped is ``User``, this sequence + is possible:: + + >>> from sqlalchemy import inspect + >>> mapper = inspect(User) + >>> addresses_property = mapper.attrs.addresses + >>> addresses_property.class_attribute is User.addresses + True + >>> User.addresses.property is addresses_property + True + + + """ + + return getattr(self.parent.class_, self.key) # type: ignore + + def do_init(self) -> None: + """Perform subclass-specific initialization post-mapper-creation + steps. + + This is a template method called by the ``MapperProperty`` + object's init() method. + + """ + + def post_instrument_class(self, mapper: Mapper[Any]) -> None: + """Perform instrumentation adjustments that need to occur + after init() has completed. + + The given Mapper is the Mapper invoking the operation, which + may not be the same Mapper as self.parent in an inheritance + scenario; however, Mapper will always at least be a sub-mapper of + self.parent. + + This method is typically used by StrategizedProperty, which delegates + it to LoaderStrategy.init_class_attribute() to perform final setup + on the class-bound InstrumentedAttribute. + + """ + + def merge( + self, + session: Session, + source_state: InstanceState[Any], + source_dict: _InstanceDict, + dest_state: InstanceState[Any], + dest_dict: _InstanceDict, + load: bool, + _recursive: Dict[Any, object], + _resolve_conflict_map: Dict[_IdentityKeyType[Any], object], + ) -> None: + """Merge the attribute represented by this ``MapperProperty`` + from source to destination object. + + """ + + def __repr__(self) -> str: + return "<%s at 0x%x; %s>" % ( + self.__class__.__name__, + id(self), + getattr(self, "key", "no key"), + ) + + +@inspection._self_inspects +class PropComparator(SQLORMOperations[_T_co], Generic[_T_co], ColumnOperators): + r"""Defines SQL operations for ORM mapped attributes. + + SQLAlchemy allows for operators to + be redefined at both the Core and ORM level. :class:`.PropComparator` + is the base class of operator redefinition for ORM-level operations, + including those of :class:`.ColumnProperty`, + :class:`.Relationship`, and :class:`.Composite`. + + User-defined subclasses of :class:`.PropComparator` may be created. The + built-in Python comparison and math operator methods, such as + :meth:`.operators.ColumnOperators.__eq__`, + :meth:`.operators.ColumnOperators.__lt__`, and + :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide + new operator behavior. The custom :class:`.PropComparator` is passed to + the :class:`.MapperProperty` instance via the ``comparator_factory`` + argument. In each case, + the appropriate subclass of :class:`.PropComparator` should be used:: + + # definition of custom PropComparator subclasses + + from sqlalchemy.orm.properties import ( + ColumnProperty, + Composite, + Relationship, + ) + + + class MyColumnComparator(ColumnProperty.Comparator): + def __eq__(self, other): + return self.__clause_element__() == other + + + class MyRelationshipComparator(Relationship.Comparator): + def any(self, expression): + "define the 'any' operation" + # ... + + + class MyCompositeComparator(Composite.Comparator): + def __gt__(self, other): + "redefine the 'greater than' operation" + + return sql.and_( + *[ + a > b + for a, b in zip( + self.__clause_element__().clauses, + other.__composite_values__(), + ) + ] + ) + + + # application of custom PropComparator subclasses + + from sqlalchemy.orm import column_property, relationship, composite + from sqlalchemy import Column, String + + + class SomeMappedClass(Base): + some_column = column_property( + Column("some_column", String), + comparator_factory=MyColumnComparator, + ) + + some_relationship = relationship( + SomeOtherClass, comparator_factory=MyRelationshipComparator + ) + + some_composite = composite( + Column("a", String), + Column("b", String), + comparator_factory=MyCompositeComparator, + ) + + Note that for column-level operator redefinition, it's usually + simpler to define the operators at the Core level, using the + :attr:`.TypeEngine.comparator_factory` attribute. See + :ref:`types_operators` for more detail. + + .. seealso:: + + :class:`.ColumnProperty.Comparator` + + :class:`.Relationship.Comparator` + + :class:`.Composite.Comparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + __slots__ = "prop", "_parententity", "_adapt_to_entity" + + __visit_name__ = "orm_prop_comparator" + + _parententity: _InternalEntityType[Any] + _adapt_to_entity: Optional[AliasedInsp[Any]] + prop: RODescriptorReference[MapperProperty[_T_co]] + + def __init__( + self, + prop: MapperProperty[_T], + parentmapper: _InternalEntityType[Any], + adapt_to_entity: Optional[AliasedInsp[Any]] = None, + ): + self.prop = prop + self._parententity = adapt_to_entity or parentmapper + self._adapt_to_entity = adapt_to_entity + + @util.non_memoized_property + def property(self) -> MapperProperty[_T_co]: + """Return the :class:`.MapperProperty` associated with this + :class:`.PropComparator`. + + + Return values here will commonly be instances of + :class:`.ColumnProperty` or :class:`.Relationship`. + + + """ + return self.prop + + def __clause_element__(self) -> roles.ColumnsClauseRole: + raise NotImplementedError("%r" % self) + + def _bulk_update_tuples( + self, value: Any + ) -> Sequence[Tuple[_DMLColumnArgument, Any]]: + """Receive a SQL expression that represents a value in the SET + clause of an UPDATE statement. + + Return a tuple that can be passed to a :class:`_expression.Update` + construct. + + """ + + return [(cast("_DMLColumnArgument", self.__clause_element__()), value)] + + def adapt_to_entity( + self, adapt_to_entity: AliasedInsp[Any] + ) -> PropComparator[_T_co]: + """Return a copy of this PropComparator which will use the given + :class:`.AliasedInsp` to produce corresponding expressions. + """ + return self.__class__(self.prop, self._parententity, adapt_to_entity) + + @util.ro_non_memoized_property + def _parentmapper(self) -> Mapper[Any]: + """legacy; this is renamed to _parententity to be + compatible with QueryableAttribute.""" + return self._parententity.mapper + + def _criterion_exists( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[Any]: + return self.prop.comparator._criterion_exists(criterion, **kwargs) + + @util.ro_non_memoized_property + def adapter(self) -> Optional[_ORMAdapterProto]: + """Produce a callable that adapts column expressions + to suit an aliased version of this comparator. + + """ + if self._adapt_to_entity is None: + return None + else: + return self._adapt_to_entity._orm_adapt_element + + @util.ro_non_memoized_property + def info(self) -> _InfoType: + return self.prop.info + + @staticmethod + def _any_op(a: Any, b: Any, **kwargs: Any) -> Any: + return a.any(b, **kwargs) + + @staticmethod + def _has_op(left: Any, other: Any, **kwargs: Any) -> Any: + return left.has(other, **kwargs) + + @staticmethod + def _of_type_op(a: Any, class_: Any) -> Any: + return a.of_type(class_) + + any_op = cast(operators.OperatorType, _any_op) + has_op = cast(operators.OperatorType, _has_op) + of_type_op = cast(operators.OperatorType, _of_type_op) + + if typing.TYPE_CHECKING: + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: ... + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: ... + + def of_type(self, class_: _EntityType[Any]) -> PropComparator[_T_co]: + r"""Redefine this object in terms of a polymorphic subclass, + :func:`_orm.with_polymorphic` construct, or :func:`_orm.aliased` + construct. + + Returns a new PropComparator from which further criterion can be + evaluated. + + e.g.:: + + query.join(Company.employees.of_type(Engineer)).filter( + Engineer.name == "foo" + ) + + :param \class_: a class or mapper indicating that criterion will be + against this specific subclass. + + .. seealso:: + + :ref:`orm_queryguide_joining_relationships_aliased` - in the + :ref:`queryguide_toplevel` + + :ref:`inheritance_of_type` + + """ + + return self.operate(PropComparator.of_type_op, class_) # type: ignore + + def and_( + self, *criteria: _ColumnExpressionArgument[bool] + ) -> PropComparator[bool]: + """Add additional criteria to the ON clause that's represented by this + relationship attribute. + + E.g.:: + + + stmt = select(User).join( + User.addresses.and_(Address.email_address != "foo") + ) + + stmt = select(User).options( + joinedload(User.addresses.and_(Address.email_address != "foo")) + ) + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`orm_queryguide_join_on_augmented` + + :ref:`loader_option_criteria` + + :func:`.with_loader_criteria` + + """ + return self.operate(operators.and_, *criteria) # type: ignore + + def any( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + r"""Return a SQL expression representing true if this element + references a member which meets the given criterion. + + The usual implementation of ``any()`` is + :meth:`.Relationship.Comparator.any`. + + :param criterion: an optional ClauseElement formulated against the + member class' table or attributes. + + :param \**kwargs: key/value pairs corresponding to member class + attribute names which will be compared via equality to the + corresponding values. + + """ + + return self.operate(PropComparator.any_op, criterion, **kwargs) + + def has( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + r"""Return a SQL expression representing true if this element + references a member which meets the given criterion. + + The usual implementation of ``has()`` is + :meth:`.Relationship.Comparator.has`. + + :param criterion: an optional ClauseElement formulated against the + member class' table or attributes. + + :param \**kwargs: key/value pairs corresponding to member class + attribute names which will be compared via equality to the + corresponding values. + + """ + + return self.operate(PropComparator.has_op, criterion, **kwargs) + + +class StrategizedProperty(MapperProperty[_T]): + """A MapperProperty which uses selectable strategies to affect + loading behavior. + + There is a single strategy selected by default. Alternate + strategies can be selected at Query time through the usage of + ``StrategizedOption`` objects via the Query.options() method. + + The mechanics of StrategizedProperty are used for every Query + invocation for every mapped attribute participating in that Query, + to determine first how the attribute will be rendered in SQL + and secondly how the attribute will retrieve a value from a result + row and apply it to a mapped object. The routines here are very + performance-critical. + + """ + + __slots__ = ( + "_strategies", + "strategy", + "_wildcard_token", + "_default_path_loader_key", + "strategy_key", + ) + inherit_cache = True + strategy_wildcard_key: ClassVar[str] + + strategy_key: _StrategyKey + + _strategies: Dict[_StrategyKey, LoaderStrategy] + + def _memoized_attr__wildcard_token(self) -> Tuple[str]: + return ( + f"{self.strategy_wildcard_key}:{path_registry._WILDCARD_TOKEN}", + ) + + def _memoized_attr__default_path_loader_key( + self, + ) -> Tuple[str, Tuple[str]]: + return ( + "loader", + (f"{self.strategy_wildcard_key}:{path_registry._DEFAULT_TOKEN}",), + ) + + def _get_context_loader( + self, context: ORMCompileState, path: AbstractEntityRegistry + ) -> Optional[_LoadElement]: + load: Optional[_LoadElement] = None + + search_path = path[self] + + # search among: exact match, "attr.*", "default" strategy + # if any. + for path_key in ( + search_path._loader_key, + search_path._wildcard_path_loader_key, + search_path._default_path_loader_key, + ): + if path_key in context.attributes: + load = context.attributes[path_key] + break + + # note that if strategy_options.Load is placing non-actionable + # objects in the context like defaultload(), we would + # need to continue the loop here if we got such an + # option as below. + # if load.strategy or load.local_opts: + # break + + return load + + def _get_strategy(self, key: _StrategyKey) -> LoaderStrategy: + try: + return self._strategies[key] + except KeyError: + pass + + # run outside to prevent transfer of exception context + cls = self._strategy_lookup(self, *key) + # this previously was setting self._strategies[cls], that's + # a bad idea; should use strategy key at all times because every + # strategy has multiple keys at this point + self._strategies[key] = strategy = cls(self, key) + return strategy + + def setup( + self, + context: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + adapter: Optional[ORMAdapter], + **kwargs: Any, + ) -> None: + loader = self._get_context_loader(context, path) + if loader and loader.strategy: + strat = self._get_strategy(loader.strategy) + else: + strat = self.strategy + strat.setup_query( + context, query_entity, path, loader, adapter, **kwargs + ) + + def create_row_processor( + self, + context: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + mapper: Mapper[Any], + result: Result[Any], + adapter: Optional[ORMAdapter], + populators: _PopulatorDict, + ) -> None: + loader = self._get_context_loader(context, path) + if loader and loader.strategy: + strat = self._get_strategy(loader.strategy) + else: + strat = self.strategy + strat.create_row_processor( + context, + query_entity, + path, + loader, + mapper, + result, + adapter, + populators, + ) + + def do_init(self) -> None: + self._strategies = {} + self.strategy = self._get_strategy(self.strategy_key) + + def post_instrument_class(self, mapper: Mapper[Any]) -> None: + if ( + not self.parent.non_primary + and not mapper.class_manager._attr_has_impl(self.key) + ): + self.strategy.init_class_attribute(mapper) + + _all_strategies: collections.defaultdict[ + Type[MapperProperty[Any]], Dict[_StrategyKey, Type[LoaderStrategy]] + ] = collections.defaultdict(dict) + + @classmethod + def strategy_for(cls, **kw: Any) -> Callable[[_TLS], _TLS]: + def decorate(dec_cls: _TLS) -> _TLS: + # ensure each subclass of the strategy has its + # own _strategy_keys collection + if "_strategy_keys" not in dec_cls.__dict__: + dec_cls._strategy_keys = [] + key = tuple(sorted(kw.items())) + cls._all_strategies[cls][key] = dec_cls + dec_cls._strategy_keys.append(key) + return dec_cls + + return decorate + + @classmethod + def _strategy_lookup( + cls, requesting_property: MapperProperty[Any], *key: Any + ) -> Type[LoaderStrategy]: + requesting_property.parent._with_polymorphic_mappers + + for prop_cls in cls.__mro__: + if prop_cls in cls._all_strategies: + if TYPE_CHECKING: + assert issubclass(prop_cls, MapperProperty) + strategies = cls._all_strategies[prop_cls] + try: + return strategies[key] + except KeyError: + pass + + for property_type, strats in cls._all_strategies.items(): + if key in strats: + intended_property_type = property_type + actual_strategy = strats[key] + break + else: + intended_property_type = None + actual_strategy = None + + raise orm_exc.LoaderStrategyException( + cls, + requesting_property, + intended_property_type, + actual_strategy, + key, + ) + + +class ORMOption(ExecutableOption): + """Base class for option objects that are passed to ORM queries. + + These options may be consumed by :meth:`.Query.options`, + :meth:`.Select.options`, or in a more general sense by any + :meth:`.Executable.options` method. They are interpreted at + statement compile time or execution time in modern use. The + deprecated :class:`.MapperOption` is consumed at ORM query construction + time. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + _is_legacy_option = False + + propagate_to_loaders = False + """if True, indicate this option should be carried along + to "secondary" SELECT statements that occur for relationship + lazy loaders as well as attribute load / refresh operations. + + """ + + _is_core = False + + _is_user_defined = False + + _is_compile_state = False + + _is_criteria_option = False + + _is_strategy_option = False + + def _adapt_cached_option_to_uncached_option( + self, context: QueryContext, uncached_opt: ORMOption + ) -> ORMOption: + """adapt this option to the "uncached" version of itself in a + loader strategy context. + + given "self" which is an option from a cached query, as well as the + corresponding option from the uncached version of the same query, + return the option we should use in a new query, in the context of a + loader strategy being asked to load related rows on behalf of that + cached query, which is assumed to be building a new query based on + entities passed to us from the cached query. + + Currently this routine chooses between "self" and "uncached" without + manufacturing anything new. If the option is itself a loader strategy + option which has a path, that path needs to match to the entities being + passed to us by the cached query, so the :class:`_orm.Load` subclass + overrides this to return "self". For all other options, we return the + uncached form which may have changing state, such as a + with_loader_criteria() option which will very often have new state. + + This routine could in the future involve + generating a new option based on both inputs if use cases arise, + such as if with_loader_criteria() needed to match up to + ``AliasedClass`` instances given in the parent query. + + However, longer term it might be better to restructure things such that + ``AliasedClass`` entities are always matched up on their cache key, + instead of identity, in things like paths and such, so that this whole + issue of "the uncached option does not match the entities" goes away. + However this would make ``PathRegistry`` more complicated and difficult + to debug as well as potentially less performant in that it would be + hashing enormous cache keys rather than a simple AliasedInsp. UNLESS, + we could get cache keys overall to be reliably hashed into something + like an md5 key. + + .. versionadded:: 1.4.41 + + """ + if uncached_opt is not None: + return uncached_opt + else: + return self + + +class CompileStateOption(HasCacheKey, ORMOption): + """base for :class:`.ORMOption` classes that affect the compilation of + a SQL query and therefore need to be part of the cache key. + + .. note:: :class:`.CompileStateOption` is generally non-public and + should not be used as a base class for user-defined options; instead, + use :class:`.UserDefinedOption`, which is easier to use as it does not + interact with ORM compilation internals or caching. + + :class:`.CompileStateOption` defines an internal attribute + ``_is_compile_state=True`` which has the effect of the ORM compilation + routines for SELECT and other statements will call upon these options when + a SQL string is being compiled. As such, these classes implement + :class:`.HasCacheKey` and need to provide robust ``_cache_key_traversal`` + structures. + + The :class:`.CompileStateOption` class is used to implement the ORM + :class:`.LoaderOption` and :class:`.CriteriaOption` classes. + + .. versionadded:: 1.4.28 + + + """ + + __slots__ = () + + _is_compile_state = True + + def process_compile_state(self, compile_state: ORMCompileState) -> None: + """Apply a modification to a given :class:`.ORMCompileState`. + + This method is part of the implementation of a particular + :class:`.CompileStateOption` and is only invoked internally + when an ORM query is compiled. + + """ + + def process_compile_state_replaced_entities( + self, + compile_state: ORMCompileState, + mapper_entities: Sequence[_MapperEntity], + ) -> None: + """Apply a modification to a given :class:`.ORMCompileState`, + given entities that were replaced by with_only_columns() or + with_entities(). + + This method is part of the implementation of a particular + :class:`.CompileStateOption` and is only invoked internally + when an ORM query is compiled. + + .. versionadded:: 1.4.19 + + """ + + +class LoaderOption(CompileStateOption): + """Describe a loader modification to an ORM statement at compilation time. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + def process_compile_state_replaced_entities( + self, + compile_state: ORMCompileState, + mapper_entities: Sequence[_MapperEntity], + ) -> None: + self.process_compile_state(compile_state) + + +class CriteriaOption(CompileStateOption): + """Describe a WHERE criteria modification to an ORM statement at + compilation time. + + .. versionadded:: 1.4 + + """ + + __slots__ = () + + _is_criteria_option = True + + def get_global_criteria(self, attributes: Dict[str, Any]) -> None: + """update additional entity criteria options in the given + attributes dictionary. + + """ + + +class UserDefinedOption(ORMOption): + """Base class for a user-defined option that can be consumed from the + :meth:`.SessionEvents.do_orm_execute` event hook. + + """ + + __slots__ = ("payload",) + + _is_legacy_option = False + + _is_user_defined = True + + propagate_to_loaders = False + """if True, indicate this option should be carried along + to "secondary" Query objects produced during lazy loads + or refresh operations. + + """ + + def __init__(self, payload: Optional[Any] = None): + self.payload = payload + + +@util.deprecated_cls( + "1.4", + "The :class:`.MapperOption class is deprecated and will be removed " + "in a future release. For " + "modifications to queries on a per-execution basis, use the " + ":class:`.UserDefinedOption` class to establish state within a " + ":class:`.Query` or other Core statement, then use the " + ":meth:`.SessionEvents.before_orm_execute` hook to consume them.", + constructor=None, +) +class MapperOption(ORMOption): + """Describe a modification to a Query""" + + __slots__ = () + + _is_legacy_option = True + + propagate_to_loaders = False + """if True, indicate this option should be carried along + to "secondary" Query objects produced during lazy loads + or refresh operations. + + """ + + def process_query(self, query: Query[Any]) -> None: + """Apply a modification to the given :class:`_query.Query`.""" + + def process_query_conditionally(self, query: Query[Any]) -> None: + """same as process_query(), except that this option may not + apply to the given query. + + This is typically applied during a lazy load or scalar refresh + operation to propagate options stated in the original Query to the + new Query being used for the load. It occurs for those options that + specify propagate_to_loaders=True. + + """ + + self.process_query(query) + + +class LoaderStrategy: + """Describe the loading behavior of a StrategizedProperty object. + + The ``LoaderStrategy`` interacts with the querying process in three + ways: + + * it controls the configuration of the ``InstrumentedAttribute`` + placed on a class to handle the behavior of the attribute. this + may involve setting up class-level callable functions to fire + off a select operation when the attribute is first accessed + (i.e. a lazy load) + + * it processes the ``QueryContext`` at statement construction time, + where it can modify the SQL statement that is being produced. + For example, simple column attributes will add their represented + column to the list of selected columns, a joined eager loader + may establish join clauses to add to the statement. + + * It produces "row processor" functions at result fetching time. + These "row processor" functions populate a particular attribute + on a particular mapped instance. + + """ + + __slots__ = ( + "parent_property", + "is_class_level", + "parent", + "key", + "strategy_key", + "strategy_opts", + ) + + _strategy_keys: ClassVar[List[_StrategyKey]] + + def __init__( + self, parent: MapperProperty[Any], strategy_key: _StrategyKey + ): + self.parent_property = parent + self.is_class_level = False + self.parent = self.parent_property.parent + self.key = self.parent_property.key + self.strategy_key = strategy_key + self.strategy_opts = dict(strategy_key) + + def init_class_attribute(self, mapper: Mapper[Any]) -> None: + pass + + def setup_query( + self, + compile_state: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + loadopt: Optional[_LoadElement], + adapter: Optional[ORMAdapter], + **kwargs: Any, + ) -> None: + """Establish column and other state for a given QueryContext. + + This method fulfills the contract specified by MapperProperty.setup(). + + StrategizedProperty delegates its setup() method + directly to this method. + + """ + + def create_row_processor( + self, + context: ORMCompileState, + query_entity: _MapperEntity, + path: AbstractEntityRegistry, + loadopt: Optional[_LoadElement], + mapper: Mapper[Any], + result: Result[Any], + adapter: Optional[ORMAdapter], + populators: _PopulatorDict, + ) -> None: + """Establish row processing functions for a given QueryContext. + + This method fulfills the contract specified by + MapperProperty.create_row_processor(). + + StrategizedProperty delegates its create_row_processor() method + directly to this method. + + """ + + def __str__(self) -> str: + return str(self.parent_property) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/loading.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/loading.py new file mode 100644 index 0000000000000000000000000000000000000000..679286f546663e032de65e09264863989e919254 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/loading.py @@ -0,0 +1,1682 @@ +# orm/loading.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""private module containing functions used to convert database +rows into object instances and associated state. + +the functions here are called primarily by Query, Mapper, +as well as some of the attribute loading strategies. + +""" + +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import exc as orm_exc +from . import path_registry +from .base import _DEFER_FOR_STATE +from .base import _RAISE_FOR_STATE +from .base import _SET_DEFERRED_EXPIRED +from .base import PassiveFlag +from .context import FromStatement +from .context import ORMCompileState +from .context import QueryContext +from .util import _none_set +from .util import state_str +from .. import exc as sa_exc +from .. import util +from ..engine import result_tuple +from ..engine.result import ChunkedIteratorResult +from ..engine.result import FrozenResult +from ..engine.result import SimpleResultMetaData +from ..sql import select +from ..sql import util as sql_util +from ..sql.selectable import ForUpdateArg +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..sql.selectable import SelectState +from ..util import EMPTY_DICT + +if TYPE_CHECKING: + from ._typing import _IdentityKeyType + from .base import LoaderCallableStatus + from .interfaces import ORMOption + from .mapper import Mapper + from .query import Query + from .session import Session + from .state import InstanceState + from ..engine.cursor import CursorResult + from ..engine.interfaces import _ExecuteOptions + from ..engine.result import Result + from ..sql import Select + +_T = TypeVar("_T", bound=Any) +_O = TypeVar("_O", bound=object) +_new_runid = util.counter() + + +_PopulatorDict = Dict[str, List[Tuple[str, Any]]] + + +def instances(cursor: CursorResult[Any], context: QueryContext) -> Result[Any]: + """Return a :class:`.Result` given an ORM query context. + + :param cursor: a :class:`.CursorResult`, generated by a statement + which came from :class:`.ORMCompileState` + + :param context: a :class:`.QueryContext` object + + :return: a :class:`.Result` object representing ORM results + + .. versionchanged:: 1.4 The instances() function now uses + :class:`.Result` objects and has an all new interface. + + """ + + context.runid = _new_runid() + + if context.top_level_context: + is_top_level = False + context.post_load_paths = context.top_level_context.post_load_paths + else: + is_top_level = True + context.post_load_paths = {} + + compile_state = context.compile_state + filtered = compile_state._has_mapper_entities + single_entity = ( + not context.load_options._only_return_tuples + and len(compile_state._entities) == 1 + and compile_state._entities[0].supports_single_entity + ) + + try: + (process, labels, extra) = list( + zip( + *[ + query_entity.row_processor(context, cursor) + for query_entity in context.compile_state._entities + ] + ) + ) + + if context.yield_per and ( + context.loaders_require_buffering + or context.loaders_require_uniquing + ): + raise sa_exc.InvalidRequestError( + "Can't use yield_per with eager loaders that require uniquing " + "or row buffering, e.g. joinedload() against collections " + "or subqueryload(). Consider the selectinload() strategy " + "for better flexibility in loading objects." + ) + + except Exception: + with util.safe_reraise(): + cursor.close() + + def _no_unique(entry): + raise sa_exc.InvalidRequestError( + "Can't use the ORM yield_per feature in conjunction with unique()" + ) + + def _not_hashable(datatype, *, legacy=False, uncertain=False): + if not legacy: + + def go(obj): + if uncertain: + try: + return hash(obj) + except: + pass + + raise sa_exc.InvalidRequestError( + "Can't apply uniqueness to row tuple containing value of " + f"""type {datatype!r}; { + 'the values returned appear to be' + if uncertain + else 'this datatype produces' + } non-hashable values""" + ) + + return go + elif not uncertain: + return id + else: + _use_id = False + + def go(obj): + nonlocal _use_id + + if not _use_id: + try: + return hash(obj) + except: + pass + + # in #10459, we considered using a warning here, however + # as legacy query uses result.unique() in all cases, this + # would lead to too many warning cases. + _use_id = True + + return id(obj) + + return go + + unique_filters = [ + ( + _no_unique + if context.yield_per + else ( + _not_hashable( + ent.column.type, # type: ignore + legacy=context.load_options._legacy_uniquing, + uncertain=ent._null_column_type, + ) + if ( + not ent.use_id_for_hash + and (ent._non_hashable_value or ent._null_column_type) + ) + else id if ent.use_id_for_hash else None + ) + ) + for ent in context.compile_state._entities + ] + + row_metadata = SimpleResultMetaData( + labels, extra, _unique_filters=unique_filters + ) + + def chunks(size): # type: ignore + while True: + yield_per = size + + context.partials = {} + + if yield_per: + fetch = cursor.fetchmany(yield_per) + + if not fetch: + break + else: + fetch = cursor._raw_all_rows() + + if single_entity: + proc = process[0] + rows = [proc(row) for row in fetch] + else: + rows = [ + tuple([proc(row) for proc in process]) for row in fetch + ] + + # if we are the originating load from a query, meaning we + # aren't being called as a result of a nested "post load", + # iterate through all the collected post loaders and fire them + # off. Previously this used to work recursively, however that + # prevented deeply nested structures from being loadable + if is_top_level: + if yield_per: + # if using yield per, memoize the state of the + # collection so that it can be restored + top_level_post_loads = list( + context.post_load_paths.items() + ) + + while context.post_load_paths: + post_loads = list(context.post_load_paths.items()) + context.post_load_paths.clear() + for path, post_load in post_loads: + post_load.invoke(context, path) + + if yield_per: + context.post_load_paths.clear() + context.post_load_paths.update(top_level_post_loads) + + yield rows + + if not yield_per: + break + + if context.execution_options.get("prebuffer_rows", False): + # this is a bit of a hack at the moment. + # I would rather have some option in the result to pre-buffer + # internally. + _prebuffered = list(chunks(None)) + + def chunks(size): + return iter(_prebuffered) + + result = ChunkedIteratorResult( + row_metadata, + chunks, + source_supports_scalars=single_entity, + raw=cursor, + dynamic_yield_per=cursor.context._is_server_side, + ) + + # filtered and single_entity are used to indicate to legacy Query that the + # query has ORM entities, so legacy deduping and scalars should be called + # on the result. + result._attributes = result._attributes.union( + dict(filtered=filtered, is_single_entity=single_entity) + ) + + # multi_row_eager_loaders OTOH is specific to joinedload. + if context.compile_state.multi_row_eager_loaders: + + def require_unique(obj): + raise sa_exc.InvalidRequestError( + "The unique() method must be invoked on this Result, " + "as it contains results that include joined eager loads " + "against collections" + ) + + result._unique_filter_state = (None, require_unique) + + if context.yield_per: + result.yield_per(context.yield_per) + + return result + + +@util.preload_module("sqlalchemy.orm.context") +def merge_frozen_result(session, statement, frozen_result, load=True): + """Merge a :class:`_engine.FrozenResult` back into a :class:`_orm.Session`, + returning a new :class:`_engine.Result` object with :term:`persistent` + objects. + + See the section :ref:`do_orm_execute_re_executing` for an example. + + .. seealso:: + + :ref:`do_orm_execute_re_executing` + + :meth:`_engine.Result.freeze` + + :class:`_engine.FrozenResult` + + """ + querycontext = util.preloaded.orm_context + + if load: + # flush current contents if we expect to load data + session._autoflush() + + ctx = querycontext.ORMSelectCompileState._create_entities_collection( + statement, legacy=False + ) + + autoflush = session.autoflush + try: + session.autoflush = False + mapped_entities = [ + i + for i, e in enumerate(ctx._entities) + if isinstance(e, querycontext._MapperEntity) + ] + keys = [ent._label_name for ent in ctx._entities] + + keyed_tuple = result_tuple( + keys, [ent._extra_entities for ent in ctx._entities] + ) + + result = [] + for newrow in frozen_result.rewrite_rows(): + for i in mapped_entities: + if newrow[i] is not None: + newrow[i] = session._merge( + attributes.instance_state(newrow[i]), + attributes.instance_dict(newrow[i]), + load=load, + _recursive={}, + _resolve_conflict_map={}, + ) + + result.append(keyed_tuple(newrow)) + + return frozen_result.with_new_rows(result) + finally: + session.autoflush = autoflush + + +@util.became_legacy_20( + ":func:`_orm.merge_result`", + alternative="The function as well as the method on :class:`_orm.Query` " + "is superseded by the :func:`_orm.merge_frozen_result` function.", +) +@util.preload_module("sqlalchemy.orm.context") +def merge_result( + query: Query[Any], + iterator: Union[FrozenResult, Iterable[Sequence[Any]], Iterable[object]], + load: bool = True, +) -> Union[FrozenResult, Iterable[Any]]: + """Merge a result into the given :class:`.Query` object's Session. + + See :meth:`_orm.Query.merge_result` for top-level documentation on this + function. + + """ + + querycontext = util.preloaded.orm_context + + session = query.session + if load: + # flush current contents if we expect to load data + session._autoflush() + + # TODO: need test coverage and documentation for the FrozenResult + # use case. + if isinstance(iterator, FrozenResult): + frozen_result = iterator + iterator = iter(frozen_result.data) + else: + frozen_result = None + + ctx = querycontext.ORMSelectCompileState._create_entities_collection( + query, legacy=True + ) + + autoflush = session.autoflush + try: + session.autoflush = False + single_entity = not frozen_result and len(ctx._entities) == 1 + + if single_entity: + if isinstance(ctx._entities[0], querycontext._MapperEntity): + result = [ + session._merge( + attributes.instance_state(instance), + attributes.instance_dict(instance), + load=load, + _recursive={}, + _resolve_conflict_map={}, + ) + for instance in iterator + ] + else: + result = list(iterator) + else: + mapped_entities = [ + i + for i, e in enumerate(ctx._entities) + if isinstance(e, querycontext._MapperEntity) + ] + result = [] + keys = [ent._label_name for ent in ctx._entities] + + keyed_tuple = result_tuple( + keys, [ent._extra_entities for ent in ctx._entities] + ) + + for row in iterator: + newrow = list(row) + for i in mapped_entities: + if newrow[i] is not None: + newrow[i] = session._merge( + attributes.instance_state(newrow[i]), + attributes.instance_dict(newrow[i]), + load=load, + _recursive={}, + _resolve_conflict_map={}, + ) + result.append(keyed_tuple(newrow)) + + if frozen_result: + return frozen_result.with_new_rows(result) + else: + return iter(result) + finally: + session.autoflush = autoflush + + +def get_from_identity( + session: Session, + mapper: Mapper[_O], + key: _IdentityKeyType[_O], + passive: PassiveFlag, +) -> Union[LoaderCallableStatus, Optional[_O]]: + """Look up the given key in the given session's identity map, + check the object for expired state if found. + + """ + instance = session.identity_map.get(key) + if instance is not None: + state = attributes.instance_state(instance) + + if mapper.inherits and not state.mapper.isa(mapper): + return attributes.PASSIVE_CLASS_MISMATCH + + # expired - ensure it still exists + if state.expired: + if not passive & attributes.SQL_OK: + # TODO: no coverage here + return attributes.PASSIVE_NO_RESULT + elif not passive & attributes.RELATED_OBJECT_OK: + # this mode is used within a flush and the instance's + # expired state will be checked soon enough, if necessary. + # also used by immediateloader for a mutually-dependent + # o2m->m2m load, :ticket:`6301` + return instance + try: + state._load_expired(state, passive) + except orm_exc.ObjectDeletedError: + session._remove_newly_deleted([state]) + return None + return instance + else: + return None + + +def load_on_ident( + session: Session, + statement: Union[Select, FromStatement], + key: Optional[_IdentityKeyType], + *, + load_options: Optional[Sequence[ORMOption]] = None, + refresh_state: Optional[InstanceState[Any]] = None, + with_for_update: Optional[ForUpdateArg] = None, + only_load_props: Optional[Iterable[str]] = None, + no_autoflush: bool = False, + bind_arguments: Mapping[str, Any] = util.EMPTY_DICT, + execution_options: _ExecuteOptions = util.EMPTY_DICT, + require_pk_cols: bool = False, + is_user_refresh: bool = False, +): + """Load the given identity key from the database.""" + if key is not None: + ident = key[1] + identity_token = key[2] + else: + ident = identity_token = None + + return load_on_pk_identity( + session, + statement, + ident, + load_options=load_options, + refresh_state=refresh_state, + with_for_update=with_for_update, + only_load_props=only_load_props, + identity_token=identity_token, + no_autoflush=no_autoflush, + bind_arguments=bind_arguments, + execution_options=execution_options, + require_pk_cols=require_pk_cols, + is_user_refresh=is_user_refresh, + ) + + +def load_on_pk_identity( + session: Session, + statement: Union[Select, FromStatement], + primary_key_identity: Optional[Tuple[Any, ...]], + *, + load_options: Optional[Sequence[ORMOption]] = None, + refresh_state: Optional[InstanceState[Any]] = None, + with_for_update: Optional[ForUpdateArg] = None, + only_load_props: Optional[Iterable[str]] = None, + identity_token: Optional[Any] = None, + no_autoflush: bool = False, + bind_arguments: Mapping[str, Any] = util.EMPTY_DICT, + execution_options: _ExecuteOptions = util.EMPTY_DICT, + require_pk_cols: bool = False, + is_user_refresh: bool = False, +): + """Load the given primary key identity from the database.""" + + query = statement + q = query._clone() + + assert not q._is_lambda_element + + if load_options is None: + load_options = QueryContext.default_load_options + + if ( + statement._compile_options + is SelectState.default_select_compile_options + ): + compile_options = ORMCompileState.default_compile_options + else: + compile_options = statement._compile_options + + if primary_key_identity is not None: + mapper = query._propagate_attrs["plugin_subject"] + + (_get_clause, _get_params) = mapper._get_clause + + # None present in ident - turn those comparisons + # into "IS NULL" + if None in primary_key_identity: + nones = { + _get_params[col].key + for col, value in zip(mapper.primary_key, primary_key_identity) + if value is None + } + + _get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones) + + if len(nones) == len(primary_key_identity): + util.warn( + "fully NULL primary key identity cannot load any " + "object. This condition may raise an error in a future " + "release." + ) + + q._where_criteria = ( + sql_util._deep_annotate(_get_clause, {"_orm_adapt": True}), + ) + + params = { + _get_params[primary_key].key: id_val + for id_val, primary_key in zip( + primary_key_identity, mapper.primary_key + ) + } + else: + params = None + + if with_for_update is not None: + version_check = True + q._for_update_arg = with_for_update + elif query._for_update_arg is not None: + version_check = True + q._for_update_arg = query._for_update_arg + else: + version_check = False + + if require_pk_cols and only_load_props: + if not refresh_state: + raise sa_exc.ArgumentError( + "refresh_state is required when require_pk_cols is present" + ) + + refresh_state_prokeys = refresh_state.mapper._primary_key_propkeys + has_changes = { + key + for key in refresh_state_prokeys.difference(only_load_props) + if refresh_state.attrs[key].history.has_changes() + } + if has_changes: + # raise if pending pk changes are present. + # technically, this could be limited to the case where we have + # relationships in the only_load_props collection to be refreshed + # also (and only ones that have a secondary eager loader, at that). + # however, the error is in place across the board so that behavior + # here is easier to predict. The use case it prevents is one + # of mutating PK attrs, leaving them unflushed, + # calling session.refresh(), and expecting those attrs to remain + # still unflushed. It seems likely someone doing all those + # things would be better off having the PK attributes flushed + # to the database before tinkering like that (session.refresh() is + # tinkering). + raise sa_exc.InvalidRequestError( + f"Please flush pending primary key changes on " + "attributes " + f"{has_changes} for mapper {refresh_state.mapper} before " + "proceeding with a refresh" + ) + + # overall, the ORM has no internal flow right now for "dont load the + # primary row of an object at all, but fire off + # selectinload/subqueryload/immediateload for some relationships". + # It would probably be a pretty big effort to add such a flow. So + # here, the case for #8703 is introduced; user asks to refresh some + # relationship attributes only which are + # selectinload/subqueryload/immediateload/ etc. (not joinedload). + # ORM complains there's no columns in the primary row to load. + # So here, we just add the PK cols if that + # case is detected, so that there is a SELECT emitted for the primary + # row. + # + # Let's just state right up front, for this one little case, + # the ORM here is adding a whole extra SELECT just to satisfy + # limitations in the internal flow. This is really not a thing + # SQLAlchemy finds itself doing like, ever, obviously, we are + # constantly working to *remove* SELECTs we don't need. We + # rationalize this for now based on 1. session.refresh() is not + # commonly used 2. session.refresh() with only relationship attrs is + # even less commonly used 3. the SELECT in question is very low + # latency. + # + # to add the flow to not include the SELECT, the quickest way + # might be to just manufacture a single-row result set to send off to + # instances(), but we'd have to weave that into context.py and all + # that. For 2.0.0, we have enough big changes to navigate for now. + # + mp = refresh_state.mapper._props + for p in only_load_props: + if mp[p]._is_relationship: + only_load_props = refresh_state_prokeys.union(only_load_props) + break + + if refresh_state and refresh_state.load_options: + compile_options += {"_current_path": refresh_state.load_path.parent} + q = q.options(*refresh_state.load_options) + + new_compile_options, load_options = _set_get_options( + compile_options, + load_options, + version_check=version_check, + only_load_props=only_load_props, + refresh_state=refresh_state, + identity_token=identity_token, + is_user_refresh=is_user_refresh, + ) + + q._compile_options = new_compile_options + q._order_by = None + + if no_autoflush: + load_options += {"_autoflush": False} + + execution_options = util.EMPTY_DICT.merge_with( + execution_options, {"_sa_orm_load_options": load_options} + ) + result = ( + session.execute( + q, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + .unique() + .scalars() + ) + + try: + return result.one() + except orm_exc.NoResultFound: + return None + + +def _set_get_options( + compile_opt, + load_opt, + populate_existing=None, + version_check=None, + only_load_props=None, + refresh_state=None, + identity_token=None, + is_user_refresh=None, +): + compile_options = {} + load_options = {} + if version_check: + load_options["_version_check"] = version_check + if populate_existing: + load_options["_populate_existing"] = populate_existing + if refresh_state: + load_options["_refresh_state"] = refresh_state + compile_options["_for_refresh_state"] = True + if only_load_props: + compile_options["_only_load_props"] = frozenset(only_load_props) + if identity_token: + load_options["_identity_token"] = identity_token + + if is_user_refresh: + load_options["_is_user_refresh"] = is_user_refresh + if load_options: + load_opt += load_options + if compile_options: + compile_opt += compile_options + + return compile_opt, load_opt + + +def _setup_entity_query( + compile_state, + mapper, + query_entity, + path, + adapter, + column_collection, + with_polymorphic=None, + only_load_props=None, + polymorphic_discriminator=None, + **kw, +): + if with_polymorphic: + poly_properties = mapper._iterate_polymorphic_properties( + with_polymorphic + ) + else: + poly_properties = mapper._polymorphic_properties + + quick_populators = {} + + path.set(compile_state.attributes, "memoized_setups", quick_populators) + + # for the lead entities in the path, e.g. not eager loads, and + # assuming a user-passed aliased class, e.g. not a from_self() or any + # implicit aliasing, don't add columns to the SELECT that aren't + # in the thing that's aliased. + check_for_adapt = adapter and len(path) == 1 and path[-1].is_aliased_class + + for value in poly_properties: + if only_load_props and value.key not in only_load_props: + continue + value.setup( + compile_state, + query_entity, + path, + adapter, + only_load_props=only_load_props, + column_collection=column_collection, + memoized_populators=quick_populators, + check_for_adapt=check_for_adapt, + **kw, + ) + + if ( + polymorphic_discriminator is not None + and polymorphic_discriminator is not mapper.polymorphic_on + ): + if adapter: + pd = adapter.columns[polymorphic_discriminator] + else: + pd = polymorphic_discriminator + column_collection.append(pd) + + +def _warn_for_runid_changed(state): + util.warn( + "Loading context for %s has changed within a load/refresh " + "handler, suggesting a row refresh operation took place. If this " + "event handler is expected to be " + "emitting row refresh operations within an existing load or refresh " + "operation, set restore_load_context=True when establishing the " + "listener to ensure the context remains unchanged when the event " + "handler completes." % (state_str(state),) + ) + + +def _instance_processor( + query_entity, + mapper, + context, + result, + path, + adapter, + only_load_props=None, + refresh_state=None, + polymorphic_discriminator=None, + _polymorphic_from=None, +): + """Produce a mapper level row processor callable + which processes rows into mapped instances.""" + + # note that this method, most of which exists in a closure + # called _instance(), resists being broken out, as + # attempts to do so tend to add significant function + # call overhead. _instance() is the most + # performance-critical section in the whole ORM. + + identity_class = mapper._identity_class + compile_state = context.compile_state + + # look for "row getter" functions that have been assigned along + # with the compile state that were cached from a previous load. + # these are operator.itemgetter() objects that each will extract a + # particular column from each row. + + getter_key = ("getters", mapper) + getters = path.get(compile_state.attributes, getter_key, None) + + if getters is None: + # no getters, so go through a list of attributes we are loading for, + # and the ones that are column based will have already put information + # for us in another collection "memoized_setups", which represents the + # output of the LoaderStrategy.setup_query() method. We can just as + # easily call LoaderStrategy.create_row_processor for each, but by + # getting it all at once from setup_query we save another method call + # per attribute. + props = mapper._prop_set + if only_load_props is not None: + props = props.intersection( + mapper._props[k] for k in only_load_props + ) + + quick_populators = path.get( + context.attributes, "memoized_setups", EMPTY_DICT + ) + + todo = [] + cached_populators = { + "new": [], + "quick": [], + "deferred": [], + "expire": [], + "existing": [], + "eager": [], + } + + if refresh_state is None: + # we can also get the "primary key" tuple getter function + pk_cols = mapper.primary_key + + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + primary_key_getter = result._tuple_getter(pk_cols) + else: + primary_key_getter = None + + getters = { + "cached_populators": cached_populators, + "todo": todo, + "primary_key_getter": primary_key_getter, + } + for prop in props: + if prop in quick_populators: + # this is an inlined path just for column-based attributes. + col = quick_populators[prop] + if col is _DEFER_FOR_STATE: + cached_populators["new"].append( + (prop.key, prop._deferred_column_loader) + ) + elif col is _SET_DEFERRED_EXPIRED: + # note that in this path, we are no longer + # searching in the result to see if the column might + # be present in some unexpected way. + cached_populators["expire"].append((prop.key, False)) + elif col is _RAISE_FOR_STATE: + cached_populators["new"].append( + (prop.key, prop._raise_column_loader) + ) + else: + getter = None + if adapter: + # this logic had been removed for all 1.4 releases + # up until 1.4.18; the adapter here is particularly + # the compound eager adapter which isn't accommodated + # in the quick_populators right now. The "fallback" + # logic below instead took over in many more cases + # until issue #6596 was identified. + + # note there is still an issue where this codepath + # produces no "getter" for cases where a joined-inh + # mapping includes a labeled column property, meaning + # KeyError is caught internally and we fall back to + # _getter(col), which works anyway. The adapter + # here for joined inh without any aliasing might not + # be useful. Tests which see this include + # test.orm.inheritance.test_basic -> + # EagerTargetingTest.test_adapt_stringency + # OptimizedLoadTest.test_column_expression_joined + # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa: E501 + # + + adapted_col = adapter.columns[col] + if adapted_col is not None: + getter = result._getter(adapted_col, False) + if not getter: + getter = result._getter(col, False) + if getter: + cached_populators["quick"].append((prop.key, getter)) + else: + # fall back to the ColumnProperty itself, which + # will iterate through all of its columns + # to see if one fits + prop.create_row_processor( + context, + query_entity, + path, + mapper, + result, + adapter, + cached_populators, + ) + else: + # loader strategies like subqueryload, selectinload, + # joinedload, basically relationships, these need to interact + # with the context each time to work correctly. + todo.append(prop) + + path.set(compile_state.attributes, getter_key, getters) + + cached_populators = getters["cached_populators"] + + populators = {key: list(value) for key, value in cached_populators.items()} + for prop in getters["todo"]: + prop.create_row_processor( + context, query_entity, path, mapper, result, adapter, populators + ) + + propagated_loader_options = context.propagated_loader_options + load_path = ( + context.compile_state.current_path + path + if context.compile_state.current_path.path + else path + ) + + session_identity_map = context.session.identity_map + + populate_existing = context.populate_existing or mapper.always_refresh + load_evt = bool(mapper.class_manager.dispatch.load) + refresh_evt = bool(mapper.class_manager.dispatch.refresh) + persistent_evt = bool(context.session.dispatch.loaded_as_persistent) + if persistent_evt: + loaded_as_persistent = context.session.dispatch.loaded_as_persistent + instance_state = attributes.instance_state + instance_dict = attributes.instance_dict + session_id = context.session.hash_key + runid = context.runid + identity_token = context.identity_token + + version_check = context.version_check + if version_check: + version_id_col = mapper.version_id_col + if version_id_col is not None: + if adapter: + version_id_col = adapter.columns[version_id_col] + version_id_getter = result._getter(version_id_col) + else: + version_id_getter = None + + if not refresh_state and _polymorphic_from is not None: + key = ("loader", path.path) + + if key in context.attributes and context.attributes[key].strategy == ( + ("selectinload_polymorphic", True), + ): + option_entities = context.attributes[key].local_opts["entities"] + else: + option_entities = None + selectin_load_via = mapper._should_selectin_load( + option_entities, + _polymorphic_from, + ) + + if selectin_load_via and selectin_load_via is not _polymorphic_from: + # only_load_props goes w/ refresh_state only, and in a refresh + # we are a single row query for the exact entity; polymorphic + # loading does not apply + assert only_load_props is None + + if selectin_load_via.is_mapper: + _load_supers = [] + _endmost_mapper = selectin_load_via + while ( + _endmost_mapper + and _endmost_mapper is not _polymorphic_from + ): + _load_supers.append(_endmost_mapper) + _endmost_mapper = _endmost_mapper.inherits + else: + _load_supers = [selectin_load_via] + + for _selectinload_entity in _load_supers: + if PostLoad.path_exists( + context, load_path, _selectinload_entity + ): + continue + callable_ = _load_subclass_via_in( + context, + path, + _selectinload_entity, + _polymorphic_from, + option_entities, + ) + PostLoad.callable_for_path( + context, + load_path, + _selectinload_entity.mapper, + _selectinload_entity, + callable_, + _selectinload_entity, + ) + + post_load = PostLoad.for_context(context, load_path, only_load_props) + + if refresh_state: + refresh_identity_key = refresh_state.key + if refresh_identity_key is None: + # super-rare condition; a refresh is being called + # on a non-instance-key instance; this is meant to only + # occur within a flush() + refresh_identity_key = mapper._identity_key_from_state( + refresh_state + ) + else: + refresh_identity_key = None + + primary_key_getter = getters["primary_key_getter"] + + if mapper.allow_partial_pks: + is_not_primary_key = _none_set.issuperset + else: + is_not_primary_key = _none_set.intersection + + def _instance(row): + # determine the state that we'll be populating + if refresh_identity_key: + # fixed state that we're refreshing + state = refresh_state + instance = state.obj() + dict_ = instance_dict(instance) + isnew = state.runid != runid + currentload = True + loaded_instance = False + else: + # look at the row, see if that identity is in the + # session, or we have to create a new one + identitykey = ( + identity_class, + primary_key_getter(row), + identity_token, + ) + + instance = session_identity_map.get(identitykey) + + if instance is not None: + # existing instance + state = instance_state(instance) + dict_ = instance_dict(instance) + + isnew = state.runid != runid + currentload = not isnew + loaded_instance = False + + if version_check and version_id_getter and not currentload: + _validate_version_id( + mapper, state, dict_, row, version_id_getter + ) + + else: + # create a new instance + + # check for non-NULL values in the primary key columns, + # else no entity is returned for the row + if is_not_primary_key(identitykey[1]): + return None + + isnew = True + currentload = True + loaded_instance = True + + instance = mapper.class_manager.new_instance() + + dict_ = instance_dict(instance) + state = instance_state(instance) + state.key = identitykey + state.identity_token = identity_token + + # attach instance to session. + state.session_id = session_id + session_identity_map._add_unpresent(state, identitykey) + + effective_populate_existing = populate_existing + if refresh_state is state: + effective_populate_existing = True + + # populate. this looks at whether this state is new + # for this load or was existing, and whether or not this + # row is the first row with this identity. + if currentload or effective_populate_existing: + # full population routines. Objects here are either + # just created, or we are doing a populate_existing + + # be conservative about setting load_path when populate_existing + # is in effect; want to maintain options from the original + # load. see test_expire->test_refresh_maintains_deferred_options + if isnew and ( + propagated_loader_options or not effective_populate_existing + ): + state.load_options = propagated_loader_options + state.load_path = load_path + + _populate_full( + context, + row, + state, + dict_, + isnew, + load_path, + loaded_instance, + effective_populate_existing, + populators, + ) + + if isnew: + # state.runid should be equal to context.runid / runid + # here, however for event checks we are being more conservative + # and checking against existing run id + # assert state.runid == runid + + existing_runid = state.runid + + if loaded_instance: + if load_evt: + state.manager.dispatch.load(state, context) + if state.runid != existing_runid: + _warn_for_runid_changed(state) + if persistent_evt: + loaded_as_persistent(context.session, state) + if state.runid != existing_runid: + _warn_for_runid_changed(state) + elif refresh_evt: + state.manager.dispatch.refresh( + state, context, only_load_props + ) + if state.runid != runid: + _warn_for_runid_changed(state) + + if effective_populate_existing or state.modified: + if refresh_state and only_load_props: + state._commit(dict_, only_load_props) + else: + state._commit_all(dict_, session_identity_map) + + if post_load: + post_load.add_state(state, True) + + else: + # partial population routines, for objects that were already + # in the Session, but a row matches them; apply eager loaders + # on existing objects, etc. + unloaded = state.unloaded + isnew = state not in context.partials + + if not isnew or unloaded or populators["eager"]: + # state is having a partial set of its attributes + # refreshed. Populate those attributes, + # and add to the "context.partials" collection. + + to_load = _populate_partial( + context, + row, + state, + dict_, + isnew, + load_path, + unloaded, + populators, + ) + + if isnew: + if refresh_evt: + existing_runid = state.runid + state.manager.dispatch.refresh(state, context, to_load) + if state.runid != existing_runid: + _warn_for_runid_changed(state) + + state._commit(dict_, to_load) + + if post_load and context.invoke_all_eagers: + post_load.add_state(state, False) + + return instance + + if mapper.polymorphic_map and not _polymorphic_from and not refresh_state: + # if we are doing polymorphic, dispatch to a different _instance() + # method specific to the subclass mapper + def ensure_no_pk(row): + identitykey = ( + identity_class, + primary_key_getter(row), + identity_token, + ) + if not is_not_primary_key(identitykey[1]): + return identitykey + else: + return None + + _instance = _decorate_polymorphic_switch( + _instance, + context, + query_entity, + mapper, + result, + path, + polymorphic_discriminator, + adapter, + ensure_no_pk, + ) + + return _instance + + +def _load_subclass_via_in( + context, path, entity, polymorphic_from, option_entities +): + mapper = entity.mapper + + # TODO: polymorphic_from seems to be a Mapper in all cases. + # this is likely not needed, but as we dont have typing in loading.py + # yet, err on the safe side + polymorphic_from_mapper = polymorphic_from.mapper + not_against_basemost = polymorphic_from_mapper.inherits is not None + + zero_idx = len(mapper.base_mapper.primary_key) == 1 + + if entity.is_aliased_class or not_against_basemost: + q, enable_opt, disable_opt = mapper._subclass_load_via_in( + entity, polymorphic_from + ) + else: + q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper + + def do_load(context, path, states, load_only, effective_entity): + if not option_entities: + # filter out states for those that would have selectinloaded + # from another loader + # TODO: we are currently ignoring the case where the + # "selectin_polymorphic" option is used, as this is much more + # complex / specific / very uncommon API use + states = [ + (s, v) + for s, v in states + if s.mapper._would_selectin_load_only_from_given_mapper(mapper) + ] + + if not states: + return + + orig_query = context.query + + if path.parent: + enable_opt_lcl = enable_opt._prepend_path(path) + disable_opt_lcl = disable_opt._prepend_path(path) + else: + enable_opt_lcl = enable_opt + disable_opt_lcl = disable_opt + options = ( + (enable_opt_lcl,) + orig_query._with_options + (disable_opt_lcl,) + ) + + q2 = q.options(*options) + + q2._compile_options = context.compile_state.default_compile_options + q2._compile_options += {"_current_path": path.parent} + + if context.populate_existing: + q2 = q2.execution_options(populate_existing=True) + + context.session.execute( + q2, + dict( + primary_keys=[ + state.key[1][0] if zero_idx else state.key[1] + for state, load_attrs in states + ] + ), + ).unique().scalars().all() + + return do_load + + +def _populate_full( + context, + row, + state, + dict_, + isnew, + load_path, + loaded_instance, + populate_existing, + populators, +): + if isnew: + # first time we are seeing a row with this identity. + state.runid = context.runid + + for key, getter in populators["quick"]: + dict_[key] = getter(row) + if populate_existing: + for key, set_callable in populators["expire"]: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + else: + for key, set_callable in populators["expire"]: + if set_callable: + state.expired_attributes.add(key) + + for key, populator in populators["new"]: + populator(state, dict_, row) + + elif load_path != state.load_path: + # new load path, e.g. object is present in more than one + # column position in a series of rows + state.load_path = load_path + + # if we have data, and the data isn't in the dict, OK, let's put + # it in. + for key, getter in populators["quick"]: + if key not in dict_: + dict_[key] = getter(row) + + # otherwise treat like an "already seen" row + for key, populator in populators["existing"]: + populator(state, dict_, row) + # TODO: allow "existing" populator to know this is + # a new path for the state: + # populator(state, dict_, row, new_path=True) + + else: + # have already seen rows with this identity in this same path. + for key, populator in populators["existing"]: + populator(state, dict_, row) + + # TODO: same path + # populator(state, dict_, row, new_path=False) + + +def _populate_partial( + context, row, state, dict_, isnew, load_path, unloaded, populators +): + if not isnew: + if unloaded: + # extra pass, see #8166 + for key, getter in populators["quick"]: + if key in unloaded: + dict_[key] = getter(row) + + to_load = context.partials[state] + for key, populator in populators["existing"]: + if key in to_load: + populator(state, dict_, row) + else: + to_load = unloaded + context.partials[state] = to_load + + for key, getter in populators["quick"]: + if key in to_load: + dict_[key] = getter(row) + for key, set_callable in populators["expire"]: + if key in to_load: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + for key, populator in populators["new"]: + if key in to_load: + populator(state, dict_, row) + + for key, populator in populators["eager"]: + if key not in unloaded: + populator(state, dict_, row) + + return to_load + + +def _validate_version_id(mapper, state, dict_, row, getter): + if mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col + ) != getter(row): + raise orm_exc.StaleDataError( + "Instance '%s' has version id '%s' which " + "does not match database-loaded version id '%s'." + % ( + state_str(state), + mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col + ), + getter(row), + ) + ) + + +def _decorate_polymorphic_switch( + instance_fn, + context, + query_entity, + mapper, + result, + path, + polymorphic_discriminator, + adapter, + ensure_no_pk, +): + if polymorphic_discriminator is not None: + polymorphic_on = polymorphic_discriminator + else: + polymorphic_on = mapper.polymorphic_on + if polymorphic_on is None: + return instance_fn + + if adapter: + polymorphic_on = adapter.columns[polymorphic_on] + + def configure_subclass_mapper(discriminator): + try: + sub_mapper = mapper.polymorphic_map[discriminator] + except KeyError: + raise AssertionError( + "No such polymorphic_identity %r is defined" % discriminator + ) + else: + if sub_mapper is mapper: + return None + elif not sub_mapper.isa(mapper): + return False + + return _instance_processor( + query_entity, + sub_mapper, + context, + result, + path, + adapter, + _polymorphic_from=mapper, + ) + + polymorphic_instances = util.PopulateDict(configure_subclass_mapper) + + getter = result._getter(polymorphic_on) + + def polymorphic_instance(row): + discriminator = getter(row) + if discriminator is not None: + _instance = polymorphic_instances[discriminator] + if _instance: + return _instance(row) + elif _instance is False: + identitykey = ensure_no_pk(row) + + if identitykey: + raise sa_exc.InvalidRequestError( + "Row with identity key %s can't be loaded into an " + "object; the polymorphic discriminator column '%s' " + "refers to %s, which is not a sub-mapper of " + "the requested %s" + % ( + identitykey, + polymorphic_on, + mapper.polymorphic_map[discriminator], + mapper, + ) + ) + else: + return None + else: + return instance_fn(row) + else: + identitykey = ensure_no_pk(row) + + if identitykey: + raise sa_exc.InvalidRequestError( + "Row with identity key %s can't be loaded into an " + "object; the polymorphic discriminator column '%s' is " + "NULL" % (identitykey, polymorphic_on) + ) + else: + return None + + return polymorphic_instance + + +class PostLoad: + """Track loaders and states for "post load" operations.""" + + __slots__ = "loaders", "states", "load_keys" + + def __init__(self): + self.loaders = {} + self.states = util.OrderedDict() + self.load_keys = None + + def add_state(self, state, overwrite): + # the states for a polymorphic load here are all shared + # within a single PostLoad object among multiple subtypes. + # Filtering of callables on a per-subclass basis needs to be done at + # the invocation level + self.states[state] = overwrite + + def invoke(self, context, path): + if not self.states: + return + path = path_registry.PathRegistry.coerce(path) + for ( + effective_context, + token, + limit_to_mapper, + loader, + arg, + kw, + ) in self.loaders.values(): + states = [ + (state, overwrite) + for state, overwrite in self.states.items() + if state.manager.mapper.isa(limit_to_mapper) + ] + if states: + loader( + effective_context, path, states, self.load_keys, *arg, **kw + ) + self.states.clear() + + @classmethod + def for_context(cls, context, path, only_load_props): + pl = context.post_load_paths.get(path.path) + if pl is not None and only_load_props: + pl.load_keys = only_load_props + return pl + + @classmethod + def path_exists(self, context, path, key): + return ( + path.path in context.post_load_paths + and key in context.post_load_paths[path.path].loaders + ) + + @classmethod + def callable_for_path( + cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw + ): + if path.path in context.post_load_paths: + pl = context.post_load_paths[path.path] + else: + pl = context.post_load_paths[path.path] = PostLoad() + pl.loaders[token] = ( + context, + token, + limit_to_mapper, + loader_callable, + arg, + kw, + ) + + +def load_scalar_attributes(mapper, state, attribute_names, passive): + """initiate a column-based attribute refresh operation.""" + + # assert mapper is _state_mapper(state) + session = state.session + if not session: + raise orm_exc.DetachedInstanceError( + "Instance %s is not bound to a Session; " + "attribute refresh operation cannot proceed" % (state_str(state)) + ) + + no_autoflush = bool(passive & attributes.NO_AUTOFLUSH) + + # in the case of inheritance, particularly concrete and abstract + # concrete inheritance, the class manager might have some keys + # of attributes on the superclass that we didn't actually map. + # These could be mapped as "concrete, don't load" or could be completely + # excluded from the mapping and we know nothing about them. Filter them + # here to prevent them from coming through. + if attribute_names: + attribute_names = attribute_names.intersection(mapper.attrs.keys()) + + if mapper.inherits and not mapper.concrete: + # load based on committed attributes in the object, formed into + # a truncated SELECT that only includes relevant tables. does not + # currently use state.key + statement = mapper._optimized_get_statement(state, attribute_names) + if statement is not None: + # undefer() isn't needed here because statement has the + # columns needed already, this implicitly undefers that column + stmt = FromStatement(mapper, statement) + + return load_on_ident( + session, + stmt, + None, + only_load_props=attribute_names, + refresh_state=state, + no_autoflush=no_autoflush, + ) + + # normal load, use state.key as the identity to SELECT + has_key = bool(state.key) + + if has_key: + identity_key = state.key + else: + # this codepath is rare - only valid when inside a flush, and the + # object is becoming persistent but hasn't yet been assigned + # an identity_key. + # check here to ensure we have the attrs we need. + pk_attrs = [ + mapper._columntoproperty[col].key for col in mapper.primary_key + ] + if state.expired_attributes.intersection(pk_attrs): + raise sa_exc.InvalidRequestError( + "Instance %s cannot be refreshed - it's not " + " persistent and does not " + "contain a full primary key." % state_str(state) + ) + identity_key = mapper._identity_key_from_state(state) + + if ( + _none_set.issubset(identity_key) and not mapper.allow_partial_pks + ) or _none_set.issuperset(identity_key): + util.warn_limited( + "Instance %s to be refreshed doesn't " + "contain a full primary key - can't be refreshed " + "(and shouldn't be expired, either).", + state_str(state), + ) + return + + result = load_on_ident( + session, + select(mapper).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), + identity_key, + refresh_state=state, + only_load_props=attribute_names, + no_autoflush=no_autoflush, + ) + + # if instance is pending, a refresh operation + # may not complete (even if PK attributes are assigned) + if has_key and result is None: + raise orm_exc.ObjectDeletedError(state) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapped_collection.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapped_collection.py new file mode 100644 index 0000000000000000000000000000000000000000..ca085c4037675e445b30725c2192c5c49cc29b13 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapped_collection.py @@ -0,0 +1,557 @@ +# orm/mapped_collection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import operator +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import base +from .collections import collection +from .collections import collection_adapter +from .. import exc as sa_exc +from .. import util +from ..sql import coercions +from ..sql import expression +from ..sql import roles +from ..util.langhelpers import Missing +from ..util.langhelpers import MissingOr +from ..util.typing import Literal + +if TYPE_CHECKING: + from . import AttributeEventToken + from . import Mapper + from .collections import CollectionAdapter + from ..sql.elements import ColumnElement + +_KT = TypeVar("_KT", bound=Any) +_VT = TypeVar("_VT", bound=Any) + + +class _PlainColumnGetter(Generic[_KT]): + """Plain column getter, stores collection of Column objects + directly. + + Serializes to a :class:`._SerializableColumnGetterV2` + which has more expensive __call__() performance + and some rare caveats. + + """ + + __slots__ = ("cols", "composite") + + def __init__(self, cols: Sequence[ColumnElement[_KT]]) -> None: + self.cols = cols + self.composite = len(cols) > 1 + + def __reduce__( + self, + ) -> Tuple[ + Type[_SerializableColumnGetterV2[_KT]], + Tuple[Sequence[Tuple[Optional[str], Optional[str]]]], + ]: + return _SerializableColumnGetterV2._reduce_from_cols(self.cols) + + def _cols(self, mapper: Mapper[_KT]) -> Sequence[ColumnElement[_KT]]: + return self.cols + + def __call__(self, value: _KT) -> MissingOr[Union[_KT, Tuple[_KT, ...]]]: + state = base.instance_state(value) + m = base._state_mapper(state) + + key: List[_KT] = [ + m._get_state_attr_by_column(state, state.dict, col) + for col in self._cols(m) + ] + if self.composite: + return tuple(key) + else: + obj = key[0] + if obj is None: + return Missing + else: + return obj + + +class _SerializableColumnGetterV2(_PlainColumnGetter[_KT]): + """Updated serializable getter which deals with + multi-table mapped classes. + + Two extremely unusual cases are not supported. + Mappings which have tables across multiple metadata + objects, or which are mapped to non-Table selectables + linked across inheriting mappers may fail to function + here. + + """ + + __slots__ = ("colkeys",) + + def __init__( + self, colkeys: Sequence[Tuple[Optional[str], Optional[str]]] + ) -> None: + self.colkeys = colkeys + self.composite = len(colkeys) > 1 + + def __reduce__( + self, + ) -> Tuple[ + Type[_SerializableColumnGetterV2[_KT]], + Tuple[Sequence[Tuple[Optional[str], Optional[str]]]], + ]: + return self.__class__, (self.colkeys,) + + @classmethod + def _reduce_from_cols(cls, cols: Sequence[ColumnElement[_KT]]) -> Tuple[ + Type[_SerializableColumnGetterV2[_KT]], + Tuple[Sequence[Tuple[Optional[str], Optional[str]]]], + ]: + def _table_key(c: ColumnElement[_KT]) -> Optional[str]: + if not isinstance(c.table, expression.TableClause): + return None + else: + return c.table.key # type: ignore + + colkeys = [(c.key, _table_key(c)) for c in cols] + return _SerializableColumnGetterV2, (colkeys,) + + def _cols(self, mapper: Mapper[_KT]) -> Sequence[ColumnElement[_KT]]: + cols: List[ColumnElement[_KT]] = [] + metadata = getattr(mapper.local_table, "metadata", None) + for ckey, tkey in self.colkeys: + if tkey is None or metadata is None or tkey not in metadata: + cols.append(mapper.local_table.c[ckey]) # type: ignore + else: + cols.append(metadata.tables[tkey].c[ckey]) + return cols + + +def column_keyed_dict( + mapping_spec: Union[Type[_KT], Callable[[_KT], _VT]], + *, + ignore_unpopulated_attribute: bool = False, +) -> Type[KeyFuncDict[_KT, _KT]]: + """A dictionary-based collection type with column-based keying. + + .. versionchanged:: 2.0 Renamed :data:`.column_mapped_collection` to + :class:`.column_keyed_dict`. + + Returns a :class:`.KeyFuncDict` factory which will produce new + dictionary keys based on the value of a particular :class:`.Column`-mapped + attribute on ORM mapped instances to be added to the dictionary. + + .. note:: the value of the target attribute must be assigned with its + value at the time that the object is being added to the + dictionary collection. Additionally, changes to the key attribute + are **not tracked**, which means the key in the dictionary is not + automatically synchronized with the key value on the target object + itself. See :ref:`key_collections_mutations` for further details. + + .. seealso:: + + :ref:`orm_dictionary_collection` - background on use + + :param mapping_spec: a :class:`_schema.Column` object that is expected + to be mapped by the target mapper to a particular attribute on the + mapped class, the value of which on a particular instance is to be used + as the key for a new dictionary entry for that instance. + :param ignore_unpopulated_attribute: if True, and the mapped attribute + indicated by the given :class:`_schema.Column` target attribute + on an object is not populated at all, the operation will be silently + skipped. By default, an error is raised. + + .. versionadded:: 2.0 an error is raised by default if the attribute + being used for the dictionary key is determined that it was never + populated with any value. The + :paramref:`_orm.column_keyed_dict.ignore_unpopulated_attribute` + parameter may be set which will instead indicate that this condition + should be ignored, and the append operation silently skipped. + This is in contrast to the behavior of the 1.x series which would + erroneously populate the value in the dictionary with an arbitrary key + value of ``None``. + + + """ + cols = [ + coercions.expect(roles.ColumnArgumentRole, q, argname="mapping_spec") + for q in util.to_list(mapping_spec) + ] + keyfunc = _PlainColumnGetter(cols) + return _mapped_collection_cls( + keyfunc, + ignore_unpopulated_attribute=ignore_unpopulated_attribute, + ) + + +class _AttrGetter: + __slots__ = ("attr_name", "getter") + + def __init__(self, attr_name: str): + self.attr_name = attr_name + self.getter = operator.attrgetter(attr_name) + + def __call__(self, mapped_object: Any) -> Any: + obj = self.getter(mapped_object) + if obj is None: + state = base.instance_state(mapped_object) + mp = state.mapper + if self.attr_name in mp.attrs: + dict_ = state.dict + obj = dict_.get(self.attr_name, base.NO_VALUE) + if obj is None: + return Missing + else: + return Missing + + return obj + + def __reduce__(self) -> Tuple[Type[_AttrGetter], Tuple[str]]: + return _AttrGetter, (self.attr_name,) + + +def attribute_keyed_dict( + attr_name: str, *, ignore_unpopulated_attribute: bool = False +) -> Type[KeyFuncDict[Any, Any]]: + """A dictionary-based collection type with attribute-based keying. + + .. versionchanged:: 2.0 Renamed :data:`.attribute_mapped_collection` to + :func:`.attribute_keyed_dict`. + + Returns a :class:`.KeyFuncDict` factory which will produce new + dictionary keys based on the value of a particular named attribute on + ORM mapped instances to be added to the dictionary. + + .. note:: the value of the target attribute must be assigned with its + value at the time that the object is being added to the + dictionary collection. Additionally, changes to the key attribute + are **not tracked**, which means the key in the dictionary is not + automatically synchronized with the key value on the target object + itself. See :ref:`key_collections_mutations` for further details. + + .. seealso:: + + :ref:`orm_dictionary_collection` - background on use + + :param attr_name: string name of an ORM-mapped attribute + on the mapped class, the value of which on a particular instance + is to be used as the key for a new dictionary entry for that instance. + :param ignore_unpopulated_attribute: if True, and the target attribute + on an object is not populated at all, the operation will be silently + skipped. By default, an error is raised. + + .. versionadded:: 2.0 an error is raised by default if the attribute + being used for the dictionary key is determined that it was never + populated with any value. The + :paramref:`_orm.attribute_keyed_dict.ignore_unpopulated_attribute` + parameter may be set which will instead indicate that this condition + should be ignored, and the append operation silently skipped. + This is in contrast to the behavior of the 1.x series which would + erroneously populate the value in the dictionary with an arbitrary key + value of ``None``. + + + """ + + return _mapped_collection_cls( + _AttrGetter(attr_name), + ignore_unpopulated_attribute=ignore_unpopulated_attribute, + ) + + +def keyfunc_mapping( + keyfunc: Callable[[Any], Any], + *, + ignore_unpopulated_attribute: bool = False, +) -> Type[KeyFuncDict[_KT, Any]]: + """A dictionary-based collection type with arbitrary keying. + + .. versionchanged:: 2.0 Renamed :data:`.mapped_collection` to + :func:`.keyfunc_mapping`. + + Returns a :class:`.KeyFuncDict` factory with a keying function + generated from keyfunc, a callable that takes an entity and returns a + key value. + + .. note:: the given keyfunc is called only once at the time that the + target object is being added to the collection. Changes to the + effective value returned by the function are not tracked. + + + .. seealso:: + + :ref:`orm_dictionary_collection` - background on use + + :param keyfunc: a callable that will be passed the ORM-mapped instance + which should then generate a new key to use in the dictionary. + If the value returned is :attr:`.LoaderCallableStatus.NO_VALUE`, an error + is raised. + :param ignore_unpopulated_attribute: if True, and the callable returns + :attr:`.LoaderCallableStatus.NO_VALUE` for a particular instance, the + operation will be silently skipped. By default, an error is raised. + + .. versionadded:: 2.0 an error is raised by default if the callable + being used for the dictionary key returns + :attr:`.LoaderCallableStatus.NO_VALUE`, which in an ORM attribute + context indicates an attribute that was never populated with any value. + The :paramref:`_orm.mapped_collection.ignore_unpopulated_attribute` + parameter may be set which will instead indicate that this condition + should be ignored, and the append operation silently skipped. This is + in contrast to the behavior of the 1.x series which would erroneously + populate the value in the dictionary with an arbitrary key value of + ``None``. + + + """ + return _mapped_collection_cls( + keyfunc, ignore_unpopulated_attribute=ignore_unpopulated_attribute + ) + + +class KeyFuncDict(Dict[_KT, _VT]): + """Base for ORM mapped dictionary classes. + + Extends the ``dict`` type with additional methods needed by SQLAlchemy ORM + collection classes. Use of :class:`_orm.KeyFuncDict` is most directly + by using the :func:`.attribute_keyed_dict` or + :func:`.column_keyed_dict` class factories. + :class:`_orm.KeyFuncDict` may also serve as the base for user-defined + custom dictionary classes. + + .. versionchanged:: 2.0 Renamed :class:`.MappedCollection` to + :class:`.KeyFuncDict`. + + .. seealso:: + + :func:`_orm.attribute_keyed_dict` + + :func:`_orm.column_keyed_dict` + + :ref:`orm_dictionary_collection` + + :ref:`orm_custom_collection` + + + """ + + def __init__( + self, + keyfunc: Callable[[Any], Any], + *dict_args: Any, + ignore_unpopulated_attribute: bool = False, + ) -> None: + """Create a new collection with keying provided by keyfunc. + + keyfunc may be any callable that takes an object and returns an object + for use as a dictionary key. + + The keyfunc will be called every time the ORM needs to add a member by + value-only (such as when loading instances from the database) or + remove a member. The usual cautions about dictionary keying apply- + ``keyfunc(object)`` should return the same output for the life of the + collection. Keying based on mutable properties can result in + unreachable instances "lost" in the collection. + + """ + self.keyfunc = keyfunc + self.ignore_unpopulated_attribute = ignore_unpopulated_attribute + super().__init__(*dict_args) + + @classmethod + def _unreduce( + cls, + keyfunc: Callable[[Any], Any], + values: Dict[_KT, _KT], + adapter: Optional[CollectionAdapter] = None, + ) -> "KeyFuncDict[_KT, _KT]": + mp: KeyFuncDict[_KT, _KT] = KeyFuncDict(keyfunc) + mp.update(values) + # note that the adapter sets itself up onto this collection + # when its `__setstate__` method is called + return mp + + def __reduce__( + self, + ) -> Tuple[ + Callable[[_KT, _KT], KeyFuncDict[_KT, _KT]], + Tuple[Any, Union[Dict[_KT, _KT], Dict[_KT, _KT]], CollectionAdapter], + ]: + return ( + KeyFuncDict._unreduce, + ( + self.keyfunc, + dict(self), + collection_adapter(self), + ), + ) + + @util.preload_module("sqlalchemy.orm.attributes") + def _raise_for_unpopulated( + self, + value: _KT, + initiator: Union[AttributeEventToken, Literal[None, False]] = None, + *, + warn_only: bool, + ) -> None: + mapper = base.instance_state(value).mapper + + attributes = util.preloaded.orm_attributes + + if not isinstance(initiator, attributes.AttributeEventToken): + relationship = "unknown relationship" + elif initiator.key in mapper.attrs: + relationship = f"{mapper.attrs[initiator.key]}" + else: + relationship = initiator.key + + if warn_only: + util.warn( + f"Attribute keyed dictionary value for " + f"attribute '{relationship}' was None; this will raise " + "in a future release. " + f"To skip this assignment entirely, " + f'Set the "ignore_unpopulated_attribute=True" ' + f"parameter on the mapped collection factory." + ) + else: + raise sa_exc.InvalidRequestError( + "In event triggered from population of " + f"attribute '{relationship}' " + "(potentially from a backref), " + f"can't populate value in KeyFuncDict; " + "dictionary key " + f"derived from {base.instance_str(value)} is not " + f"populated. Ensure appropriate state is set up on " + f"the {base.instance_str(value)} object " + f"before assigning to the {relationship} attribute. " + f"To skip this assignment entirely, " + f'Set the "ignore_unpopulated_attribute=True" ' + f"parameter on the mapped collection factory." + ) + + @collection.appender # type: ignore[misc] + @collection.internally_instrumented # type: ignore[misc] + def set( + self, + value: _KT, + _sa_initiator: Union[AttributeEventToken, Literal[None, False]] = None, + ) -> None: + """Add an item by value, consulting the keyfunc for the key.""" + + key = self.keyfunc(value) + + if key is base.NO_VALUE: + if not self.ignore_unpopulated_attribute: + self._raise_for_unpopulated( + value, _sa_initiator, warn_only=False + ) + else: + return + elif key is Missing: + if not self.ignore_unpopulated_attribute: + self._raise_for_unpopulated( + value, _sa_initiator, warn_only=True + ) + key = None + else: + return + + self.__setitem__(key, value, _sa_initiator) # type: ignore[call-arg] + + @collection.remover # type: ignore[misc] + @collection.internally_instrumented # type: ignore[misc] + def remove( + self, + value: _KT, + _sa_initiator: Union[AttributeEventToken, Literal[None, False]] = None, + ) -> None: + """Remove an item by value, consulting the keyfunc for the key.""" + + key = self.keyfunc(value) + + if key is base.NO_VALUE: + if not self.ignore_unpopulated_attribute: + self._raise_for_unpopulated( + value, _sa_initiator, warn_only=False + ) + return + elif key is Missing: + if not self.ignore_unpopulated_attribute: + self._raise_for_unpopulated( + value, _sa_initiator, warn_only=True + ) + key = None + else: + return + + # Let self[key] raise if key is not in this collection + # testlib.pragma exempt:__ne__ + if self[key] != value: + raise sa_exc.InvalidRequestError( + "Can not remove '%s': collection holds '%s' for key '%s'. " + "Possible cause: is the KeyFuncDict key function " + "based on mutable properties or properties that only obtain " + "values after flush?" % (value, self[key], key) + ) + self.__delitem__(key, _sa_initiator) # type: ignore[call-arg] + + +def _mapped_collection_cls( + keyfunc: Callable[[Any], Any], ignore_unpopulated_attribute: bool +) -> Type[KeyFuncDict[_KT, _KT]]: + class _MKeyfuncMapped(KeyFuncDict[_KT, _KT]): + def __init__(self, *dict_args: Any) -> None: + super().__init__( + keyfunc, + *dict_args, + ignore_unpopulated_attribute=ignore_unpopulated_attribute, + ) + + return _MKeyfuncMapped + + +MappedCollection = KeyFuncDict +"""A synonym for :class:`.KeyFuncDict`. + +.. versionchanged:: 2.0 Renamed :class:`.MappedCollection` to + :class:`.KeyFuncDict`. + +""" + +mapped_collection = keyfunc_mapping +"""A synonym for :func:`_orm.keyfunc_mapping`. + +.. versionchanged:: 2.0 Renamed :data:`.mapped_collection` to + :func:`_orm.keyfunc_mapping` + +""" + +attribute_mapped_collection = attribute_keyed_dict +"""A synonym for :func:`_orm.attribute_keyed_dict`. + +.. versionchanged:: 2.0 Renamed :data:`.attribute_mapped_collection` to + :func:`_orm.attribute_keyed_dict` + +""" + +column_mapped_collection = column_keyed_dict +"""A synonym for :func:`_orm.column_keyed_dict. + +.. versionchanged:: 2.0 Renamed :func:`.column_mapped_collection` to + :func:`_orm.column_keyed_dict` + +""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapper.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..5844854f9d03188c4d043efae5b95ec4583aa86b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/mapper.py @@ -0,0 +1,4431 @@ +# orm/mapper.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Logic to map Python classes to and from selectables. + +Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central +configurational unit which associates a class with a database table. + +This is a semi-private module; the main configurational API of the ORM is +available in :class:`~sqlalchemy.orm.`. + +""" +from __future__ import annotations + +from collections import deque +from functools import reduce +from itertools import chain +import sys +import threading +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Deque +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import exc as orm_exc +from . import instrumentation +from . import loading +from . import properties +from . import util as orm_util +from ._typing import _O +from .base import _class_to_mapper +from .base import _parse_mapper_argument +from .base import _state_mapper +from .base import PassiveFlag +from .base import state_str +from .interfaces import _MappedAttribute +from .interfaces import EXT_SKIP +from .interfaces import InspectionAttr +from .interfaces import MapperProperty +from .interfaces import ORMEntityColumnsClauseRole +from .interfaces import ORMFromClauseRole +from .interfaces import StrategizedProperty +from .path_registry import PathRegistry +from .. import event +from .. import exc as sa_exc +from .. import inspection +from .. import log +from .. import schema +from .. import sql +from .. import util +from ..event import dispatcher +from ..event import EventTarget +from ..sql import base as sql_base +from ..sql import coercions +from ..sql import expression +from ..sql import operators +from ..sql import roles +from ..sql import TableClause +from ..sql import util as sql_util +from ..sql import visitors +from ..sql.cache_key import MemoizedHasCacheKey +from ..sql.elements import KeyedColumnElement +from ..sql.schema import Column +from ..sql.schema import Table +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..util import HasMemoized +from ..util import HasMemoized_ro_memoized_attribute +from ..util.typing import Literal + +if TYPE_CHECKING: + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import _ORMColumnExprArgument + from ._typing import _RegistryType + from .decl_api import registry + from .dependency import DependencyProcessor + from .descriptor_props import CompositeProperty + from .descriptor_props import SynonymProperty + from .events import MapperEvents + from .instrumentation import ClassManager + from .path_registry import CachingEntityRegistry + from .properties import ColumnProperty + from .relationships import RelationshipProperty + from .state import InstanceState + from .util import ORMAdapter + from ..engine import Row + from ..engine import RowMapping + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _EquivalentColumnMap + from ..sql.base import ReadOnlyColumnCollection + from ..sql.elements import ColumnClause + from ..sql.elements import ColumnElement + from ..sql.selectable import FromClause + from ..util import OrderedSet + + +_T = TypeVar("_T", bound=Any) +_MP = TypeVar("_MP", bound="MapperProperty[Any]") +_Fn = TypeVar("_Fn", bound="Callable[..., Any]") + + +_WithPolymorphicArg = Union[ + Literal["*"], + Tuple[ + Union[Literal["*"], Sequence[Union["Mapper[Any]", Type[Any]]]], + Optional["FromClause"], + ], + Sequence[Union["Mapper[Any]", Type[Any]]], +] + + +_mapper_registries: weakref.WeakKeyDictionary[_RegistryType, bool] = ( + weakref.WeakKeyDictionary() +) + + +def _all_registries() -> Set[registry]: + with _CONFIGURE_MUTEX: + return set(_mapper_registries) + + +def _unconfigured_mappers() -> Iterator[Mapper[Any]]: + for reg in _all_registries(): + yield from reg._mappers_to_configure() + + +_already_compiling = False + + +# a constant returned by _get_attr_by_column to indicate +# this mapper is not handling an attribute for a particular +# column +NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE") + +# lock used to synchronize the "mapper configure" step +_CONFIGURE_MUTEX = threading.RLock() + + +@inspection._self_inspects +@log.class_logger +class Mapper( + ORMFromClauseRole, + ORMEntityColumnsClauseRole[_O], + MemoizedHasCacheKey, + InspectionAttr, + log.Identified, + inspection.Inspectable["Mapper[_O]"], + EventTarget, + Generic[_O], +): + """Defines an association between a Python class and a database table or + other relational structure, so that ORM operations against the class may + proceed. + + The :class:`_orm.Mapper` object is instantiated using mapping methods + present on the :class:`_orm.registry` object. For information + about instantiating new :class:`_orm.Mapper` objects, see + :ref:`orm_mapping_classes_toplevel`. + + """ + + dispatch: dispatcher[Mapper[_O]] + + _dispose_called = False + _configure_failed: Any = False + _ready_for_configure = False + + @util.deprecated_params( + non_primary=( + "1.3", + "The :paramref:`.mapper.non_primary` parameter is deprecated, " + "and will be removed in a future release. The functionality " + "of non primary mappers is now better suited using the " + ":class:`.AliasedClass` construct, which can also be used " + "as the target of a :func:`_orm.relationship` in 1.3.", + ), + ) + def __init__( + self, + class_: Type[_O], + local_table: Optional[FromClause] = None, + properties: Optional[Mapping[str, MapperProperty[Any]]] = None, + primary_key: Optional[Iterable[_ORMColumnExprArgument[Any]]] = None, + non_primary: bool = False, + inherits: Optional[Union[Mapper[Any], Type[Any]]] = None, + inherit_condition: Optional[_ColumnExpressionArgument[bool]] = None, + inherit_foreign_keys: Optional[ + Sequence[_ORMColumnExprArgument[Any]] + ] = None, + always_refresh: bool = False, + version_id_col: Optional[_ORMColumnExprArgument[Any]] = None, + version_id_generator: Optional[ + Union[Literal[False], Callable[[Any], Any]] + ] = None, + polymorphic_on: Optional[ + Union[_ORMColumnExprArgument[Any], str, MapperProperty[Any]] + ] = None, + _polymorphic_map: Optional[Dict[Any, Mapper[Any]]] = None, + polymorphic_identity: Optional[Any] = None, + concrete: bool = False, + with_polymorphic: Optional[_WithPolymorphicArg] = None, + polymorphic_abstract: bool = False, + polymorphic_load: Optional[Literal["selectin", "inline"]] = None, + allow_partial_pks: bool = True, + batch: bool = True, + column_prefix: Optional[str] = None, + include_properties: Optional[Sequence[str]] = None, + exclude_properties: Optional[Sequence[str]] = None, + passive_updates: bool = True, + passive_deletes: bool = False, + confirm_deleted_rows: bool = True, + eager_defaults: Literal[True, False, "auto"] = "auto", + legacy_is_orphan: bool = False, + _compiled_cache_size: int = 100, + ): + r"""Direct constructor for a new :class:`_orm.Mapper` object. + + The :class:`_orm.Mapper` constructor is not called directly, and + is normally invoked through the + use of the :class:`_orm.registry` object through either the + :ref:`Declarative ` or + :ref:`Imperative ` mapping styles. + + .. versionchanged:: 2.0 The public facing ``mapper()`` function is + removed; for a classical mapping configuration, use the + :meth:`_orm.registry.map_imperatively` method. + + Parameters documented below may be passed to either the + :meth:`_orm.registry.map_imperatively` method, or may be passed in the + ``__mapper_args__`` declarative class attribute described at + :ref:`orm_declarative_mapper_options`. + + :param class\_: The class to be mapped. When using Declarative, + this argument is automatically passed as the declared class + itself. + + :param local_table: The :class:`_schema.Table` or other + :class:`_sql.FromClause` (i.e. selectable) to which the class is + mapped. May be ``None`` if this mapper inherits from another mapper + using single-table inheritance. When using Declarative, this + argument is automatically passed by the extension, based on what is + configured via the :attr:`_orm.DeclarativeBase.__table__` attribute + or via the :class:`_schema.Table` produced as a result of + the :attr:`_orm.DeclarativeBase.__tablename__` attribute being + present. + + :param polymorphic_abstract: Indicates this class will be mapped in a + polymorphic hierarchy, but not directly instantiated. The class is + mapped normally, except that it has no requirement for a + :paramref:`_orm.Mapper.polymorphic_identity` within an inheritance + hierarchy. The class however must be part of a polymorphic + inheritance scheme which uses + :paramref:`_orm.Mapper.polymorphic_on` at the base. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`orm_inheritance_abstract_poly` + + :param always_refresh: If True, all query operations for this mapped + class will overwrite all data within object instances that already + exist within the session, erasing any in-memory changes with + whatever information was loaded from the database. Usage of this + flag is highly discouraged; as an alternative, see the method + :meth:`_query.Query.populate_existing`. + + :param allow_partial_pks: Defaults to True. Indicates that a + composite primary key with some NULL values should be considered as + possibly existing within the database. This affects whether a + mapper will assign an incoming row to an existing identity, as well + as if :meth:`.Session.merge` will check the database first for a + particular primary key value. A "partial primary key" can occur if + one has mapped to an OUTER JOIN, for example. + + The :paramref:`.orm.Mapper.allow_partial_pks` parameter also + indicates to the ORM relationship lazy loader, when loading a + many-to-one related object, if a composite primary key that has + partial NULL values should result in an attempt to load from the + database, or if a load attempt is not necessary. + + .. versionadded:: 2.0.36 :paramref:`.orm.Mapper.allow_partial_pks` + is consulted by the relationship lazy loader strategy, such that + when set to False, a SELECT for a composite primary key that + has partial NULL values will not be emitted. + + :param batch: Defaults to ``True``, indicating that save operations + of multiple entities can be batched together for efficiency. + Setting to False indicates + that an instance will be fully saved before saving the next + instance. This is used in the extremely rare case that a + :class:`.MapperEvents` listener requires being called + in between individual row persistence operations. + + :param column_prefix: A string which will be prepended + to the mapped attribute name when :class:`_schema.Column` + objects are automatically assigned as attributes to the + mapped class. Does not affect :class:`.Column` objects that + are mapped explicitly in the :paramref:`.Mapper.properties` + dictionary. + + This parameter is typically useful with imperative mappings + that keep the :class:`.Table` object separate. Below, assuming + the ``user_table`` :class:`.Table` object has columns named + ``user_id``, ``user_name``, and ``password``:: + + class User(Base): + __table__ = user_table + __mapper_args__ = {"column_prefix": "_"} + + The above mapping will assign the ``user_id``, ``user_name``, and + ``password`` columns to attributes named ``_user_id``, + ``_user_name``, and ``_password`` on the mapped ``User`` class. + + The :paramref:`.Mapper.column_prefix` parameter is uncommon in + modern use. For dealing with reflected tables, a more flexible + approach to automating a naming scheme is to intercept the + :class:`.Column` objects as they are reflected; see the section + :ref:`mapper_automated_reflection_schemes` for notes on this usage + pattern. + + :param concrete: If True, indicates this mapper should use concrete + table inheritance with its parent mapper. + + See the section :ref:`concrete_inheritance` for an example. + + :param confirm_deleted_rows: defaults to True; when a DELETE occurs + of one more rows based on specific primary keys, a warning is + emitted when the number of rows matched does not equal the number + of rows expected. This parameter may be set to False to handle the + case where database ON DELETE CASCADE rules may be deleting some of + those rows automatically. The warning may be changed to an + exception in a future release. + + :param eager_defaults: if True, the ORM will immediately fetch the + value of server-generated default values after an INSERT or UPDATE, + rather than leaving them as expired to be fetched on next access. + This can be used for event schemes where the server-generated values + are needed immediately before the flush completes. + + The fetch of values occurs either by using ``RETURNING`` inline + with the ``INSERT`` or ``UPDATE`` statement, or by adding an + additional ``SELECT`` statement subsequent to the ``INSERT`` or + ``UPDATE``, if the backend does not support ``RETURNING``. + + The use of ``RETURNING`` is extremely performant in particular for + ``INSERT`` statements where SQLAlchemy can take advantage of + :ref:`insertmanyvalues `, whereas the use of + an additional ``SELECT`` is relatively poor performing, adding + additional SQL round trips which would be unnecessary if these new + attributes are not to be accessed in any case. + + For this reason, :paramref:`.Mapper.eager_defaults` defaults to the + string value ``"auto"``, which indicates that server defaults for + INSERT should be fetched using ``RETURNING`` if the backing database + supports it and if the dialect in use supports "insertmanyreturning" + for an INSERT statement. If the backing database does not support + ``RETURNING`` or "insertmanyreturning" is not available, server + defaults will not be fetched. + + .. versionchanged:: 2.0.0rc1 added the "auto" option for + :paramref:`.Mapper.eager_defaults` + + .. seealso:: + + :ref:`orm_server_defaults` + + .. versionchanged:: 2.0.0 RETURNING now works with multiple rows + INSERTed at once using the + :ref:`insertmanyvalues ` feature, which + among other things allows the :paramref:`.Mapper.eager_defaults` + feature to be very performant on supporting backends. + + :param exclude_properties: A list or set of string column names to + be excluded from mapping. + + .. seealso:: + + :ref:`include_exclude_cols` + + :param include_properties: An inclusive list or set of string column + names to map. + + .. seealso:: + + :ref:`include_exclude_cols` + + :param inherits: A mapped class or the corresponding + :class:`_orm.Mapper` + of one indicating a superclass to which this :class:`_orm.Mapper` + should *inherit* from. The mapped class here must be a subclass + of the other mapper's class. When using Declarative, this argument + is passed automatically as a result of the natural class + hierarchy of the declared classes. + + .. seealso:: + + :ref:`inheritance_toplevel` + + :param inherit_condition: For joined table inheritance, a SQL + expression which will + define how the two tables are joined; defaults to a natural join + between the two tables. + + :param inherit_foreign_keys: When ``inherit_condition`` is used and + the columns present are missing a :class:`_schema.ForeignKey` + configuration, this parameter can be used to specify which columns + are "foreign". In most cases can be left as ``None``. + + :param legacy_is_orphan: Boolean, defaults to ``False``. + When ``True``, specifies that "legacy" orphan consideration + is to be applied to objects mapped by this mapper, which means + that a pending (that is, not persistent) object is auto-expunged + from an owning :class:`.Session` only when it is de-associated + from *all* parents that specify a ``delete-orphan`` cascade towards + this mapper. The new default behavior is that the object is + auto-expunged when it is de-associated with *any* of its parents + that specify ``delete-orphan`` cascade. This behavior is more + consistent with that of a persistent object, and allows behavior to + be consistent in more scenarios independently of whether or not an + orphan object has been flushed yet or not. + + See the change note and example at :ref:`legacy_is_orphan_addition` + for more detail on this change. + + :param non_primary: Specify that this :class:`_orm.Mapper` + is in addition + to the "primary" mapper, that is, the one used for persistence. + The :class:`_orm.Mapper` created here may be used for ad-hoc + mapping of the class to an alternate selectable, for loading + only. + + .. seealso:: + + :ref:`relationship_aliased_class` - the new pattern that removes + the need for the :paramref:`_orm.Mapper.non_primary` flag. + + :param passive_deletes: Indicates DELETE behavior of foreign key + columns when a joined-table inheritance entity is being deleted. + Defaults to ``False`` for a base mapper; for an inheriting mapper, + defaults to ``False`` unless the value is set to ``True`` + on the superclass mapper. + + When ``True``, it is assumed that ON DELETE CASCADE is configured + on the foreign key relationships that link this mapper's table + to its superclass table, so that when the unit of work attempts + to delete the entity, it need only emit a DELETE statement for the + superclass table, and not this table. + + When ``False``, a DELETE statement is emitted for this mapper's + table individually. If the primary key attributes local to this + table are unloaded, then a SELECT must be emitted in order to + validate these attributes; note that the primary key columns + of a joined-table subclass are not part of the "primary key" of + the object as a whole. + + Note that a value of ``True`` is **always** forced onto the + subclass mappers; that is, it's not possible for a superclass + to specify passive_deletes without this taking effect for + all subclass mappers. + + .. seealso:: + + :ref:`passive_deletes` - description of similar feature as + used with :func:`_orm.relationship` + + :paramref:`.mapper.passive_updates` - supporting ON UPDATE + CASCADE for joined-table inheritance mappers + + :param passive_updates: Indicates UPDATE behavior of foreign key + columns when a primary key column changes on a joined-table + inheritance mapping. Defaults to ``True``. + + When True, it is assumed that ON UPDATE CASCADE is configured on + the foreign key in the database, and that the database will handle + propagation of an UPDATE from a source column to dependent columns + on joined-table rows. + + When False, it is assumed that the database does not enforce + referential integrity and will not be issuing its own CASCADE + operation for an update. The unit of work process will + emit an UPDATE statement for the dependent columns during a + primary key change. + + .. seealso:: + + :ref:`passive_updates` - description of a similar feature as + used with :func:`_orm.relationship` + + :paramref:`.mapper.passive_deletes` - supporting ON DELETE + CASCADE for joined-table inheritance mappers + + :param polymorphic_load: Specifies "polymorphic loading" behavior + for a subclass in an inheritance hierarchy (joined and single + table inheritance only). Valid values are: + + * "'inline'" - specifies this class should be part of + the "with_polymorphic" mappers, e.g. its columns will be included + in a SELECT query against the base. + + * "'selectin'" - specifies that when instances of this class + are loaded, an additional SELECT will be emitted to retrieve + the columns specific to this subclass. The SELECT uses + IN to fetch multiple subclasses at once. + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`with_polymorphic_mapper_config` + + :ref:`polymorphic_selectin` + + :param polymorphic_on: Specifies the column, attribute, or + SQL expression used to determine the target class for an + incoming row, when inheriting classes are present. + + May be specified as a string attribute name, or as a SQL + expression such as a :class:`_schema.Column` or in a Declarative + mapping a :func:`_orm.mapped_column` object. It is typically + expected that the SQL expression corresponds to a column in the + base-most mapped :class:`.Table`:: + + class Employee(Base): + __tablename__ = "employee" + + id: Mapped[int] = mapped_column(primary_key=True) + discriminator: Mapped[str] = mapped_column(String(50)) + + __mapper_args__ = { + "polymorphic_on": discriminator, + "polymorphic_identity": "employee", + } + + It may also be specified + as a SQL expression, as in this example where we + use the :func:`.case` construct to provide a conditional + approach:: + + class Employee(Base): + __tablename__ = "employee" + + id: Mapped[int] = mapped_column(primary_key=True) + discriminator: Mapped[str] = mapped_column(String(50)) + + __mapper_args__ = { + "polymorphic_on": case( + (discriminator == "EN", "engineer"), + (discriminator == "MA", "manager"), + else_="employee", + ), + "polymorphic_identity": "employee", + } + + It may also refer to any attribute using its string name, + which is of particular use when using annotated column + configurations:: + + class Employee(Base): + __tablename__ = "employee" + + id: Mapped[int] = mapped_column(primary_key=True) + discriminator: Mapped[str] + + __mapper_args__ = { + "polymorphic_on": "discriminator", + "polymorphic_identity": "employee", + } + + When setting ``polymorphic_on`` to reference an + attribute or expression that's not present in the + locally mapped :class:`_schema.Table`, yet the value + of the discriminator should be persisted to the database, + the value of the + discriminator is not automatically set on new + instances; this must be handled by the user, + either through manual means or via event listeners. + A typical approach to establishing such a listener + looks like:: + + from sqlalchemy import event + from sqlalchemy.orm import object_mapper + + + @event.listens_for(Employee, "init", propagate=True) + def set_identity(instance, *arg, **kw): + mapper = object_mapper(instance) + instance.discriminator = mapper.polymorphic_identity + + Where above, we assign the value of ``polymorphic_identity`` + for the mapped class to the ``discriminator`` attribute, + thus persisting the value to the ``discriminator`` column + in the database. + + .. warning:: + + Currently, **only one discriminator column may be set**, typically + on the base-most class in the hierarchy. "Cascading" polymorphic + columns are not yet supported. + + .. seealso:: + + :ref:`inheritance_toplevel` + + :param polymorphic_identity: Specifies the value which + identifies this particular class as returned by the column expression + referred to by the :paramref:`_orm.Mapper.polymorphic_on` setting. As + rows are received, the value corresponding to the + :paramref:`_orm.Mapper.polymorphic_on` column expression is compared + to this value, indicating which subclass should be used for the newly + reconstructed object. + + .. seealso:: + + :ref:`inheritance_toplevel` + + :param properties: A dictionary mapping the string names of object + attributes to :class:`.MapperProperty` instances, which define the + persistence behavior of that attribute. Note that + :class:`_schema.Column` + objects present in + the mapped :class:`_schema.Table` are automatically placed into + ``ColumnProperty`` instances upon mapping, unless overridden. + When using Declarative, this argument is passed automatically, + based on all those :class:`.MapperProperty` instances declared + in the declared class body. + + .. seealso:: + + :ref:`orm_mapping_properties` - in the + :ref:`orm_mapping_classes_toplevel` + + :param primary_key: A list of :class:`_schema.Column` + objects, or alternatively string names of attribute names which + refer to :class:`_schema.Column`, which define + the primary key to be used against this mapper's selectable unit. + This is normally simply the primary key of the ``local_table``, but + can be overridden here. + + .. versionchanged:: 2.0.2 :paramref:`_orm.Mapper.primary_key` + arguments may be indicated as string attribute names as well. + + .. seealso:: + + :ref:`mapper_primary_key` - background and example use + + :param version_id_col: A :class:`_schema.Column` + that will be used to keep a running version id of rows + in the table. This is used to detect concurrent updates or + the presence of stale data in a flush. The methodology is to + detect if an UPDATE statement does not match the last known + version id, a + :class:`~sqlalchemy.orm.exc.StaleDataError` exception is + thrown. + By default, the column must be of :class:`.Integer` type, + unless ``version_id_generator`` specifies an alternative version + generator. + + .. seealso:: + + :ref:`mapper_version_counter` - discussion of version counting + and rationale. + + :param version_id_generator: Define how new version ids should + be generated. Defaults to ``None``, which indicates that + a simple integer counting scheme be employed. To provide a custom + versioning scheme, provide a callable function of the form:: + + def generate_version(version): + return next_version + + Alternatively, server-side versioning functions such as triggers, + or programmatic versioning schemes outside of the version id + generator may be used, by specifying the value ``False``. + Please see :ref:`server_side_version_counter` for a discussion + of important points when using this option. + + .. seealso:: + + :ref:`custom_version_counter` + + :ref:`server_side_version_counter` + + + :param with_polymorphic: A tuple in the form ``(, + )`` indicating the default style of "polymorphic" + loading, that is, which tables are queried at once. is + any single or list of mappers and/or classes indicating the + inherited classes that should be loaded at once. The special value + ``'*'`` may be used to indicate all descending classes should be + loaded immediately. The second tuple argument + indicates a selectable that will be used to query for multiple + classes. + + The :paramref:`_orm.Mapper.polymorphic_load` parameter may be + preferable over the use of :paramref:`_orm.Mapper.with_polymorphic` + in modern mappings to indicate a per-subclass technique of + indicating polymorphic loading styles. + + .. seealso:: + + :ref:`with_polymorphic_mapper_config` + + """ + self.class_ = util.assert_arg_type(class_, type, "class_") + self._sort_key = "%s.%s" % ( + self.class_.__module__, + self.class_.__name__, + ) + + self._primary_key_argument = util.to_list(primary_key) + self.non_primary = non_primary + + self.always_refresh = always_refresh + + if isinstance(version_id_col, MapperProperty): + self.version_id_prop = version_id_col + self.version_id_col = None + else: + self.version_id_col = ( + coercions.expect( + roles.ColumnArgumentOrKeyRole, + version_id_col, + argname="version_id_col", + ) + if version_id_col is not None + else None + ) + + if version_id_generator is False: + self.version_id_generator = False + elif version_id_generator is None: + self.version_id_generator = lambda x: (x or 0) + 1 + else: + self.version_id_generator = version_id_generator + + self.concrete = concrete + self.single = False + + if inherits is not None: + self.inherits = _parse_mapper_argument(inherits) + else: + self.inherits = None + + if local_table is not None: + self.local_table = coercions.expect( + roles.StrictFromClauseRole, + local_table, + disable_inspection=True, + argname="local_table", + ) + elif self.inherits: + # note this is a new flow as of 2.0 so that + # .local_table need not be Optional + self.local_table = self.inherits.local_table + self.single = True + else: + raise sa_exc.ArgumentError( + f"Mapper[{self.class_.__name__}(None)] has None for a " + "primary table argument and does not specify 'inherits'" + ) + + if inherit_condition is not None: + self.inherit_condition = coercions.expect( + roles.OnClauseRole, inherit_condition + ) + else: + self.inherit_condition = None + + self.inherit_foreign_keys = inherit_foreign_keys + self._init_properties = dict(properties) if properties else {} + self._delete_orphans = [] + self.batch = batch + self.eager_defaults = eager_defaults + self.column_prefix = column_prefix + + # interim - polymorphic_on is further refined in + # _configure_polymorphic_setter + self.polymorphic_on = ( + coercions.expect( # type: ignore + roles.ColumnArgumentOrKeyRole, + polymorphic_on, + argname="polymorphic_on", + ) + if polymorphic_on is not None + else None + ) + self.polymorphic_abstract = polymorphic_abstract + self._dependency_processors = [] + self.validators = util.EMPTY_DICT + self.passive_updates = passive_updates + self.passive_deletes = passive_deletes + self.legacy_is_orphan = legacy_is_orphan + self._clause_adapter = None + self._requires_row_aliasing = False + self._inherits_equated_pairs = None + self._memoized_values = {} + self._compiled_cache_size = _compiled_cache_size + self._reconstructor = None + self.allow_partial_pks = allow_partial_pks + + if self.inherits and not self.concrete: + self.confirm_deleted_rows = False + else: + self.confirm_deleted_rows = confirm_deleted_rows + + self._set_with_polymorphic(with_polymorphic) + self.polymorphic_load = polymorphic_load + + # our 'polymorphic identity', a string name that when located in a + # result set row indicates this Mapper should be used to construct + # the object instance for that row. + self.polymorphic_identity = polymorphic_identity + + # a dictionary of 'polymorphic identity' names, associating those + # names with Mappers that will be used to construct object instances + # upon a select operation. + if _polymorphic_map is None: + self.polymorphic_map = {} + else: + self.polymorphic_map = _polymorphic_map + + if include_properties is not None: + self.include_properties = util.to_set(include_properties) + else: + self.include_properties = None + if exclude_properties: + self.exclude_properties = util.to_set(exclude_properties) + else: + self.exclude_properties = None + + # prevent this mapper from being constructed + # while a configure_mappers() is occurring (and defer a + # configure_mappers() until construction succeeds) + with _CONFIGURE_MUTEX: + cast("MapperEvents", self.dispatch._events)._new_mapper_instance( + class_, self + ) + self._configure_inheritance() + self._configure_class_instrumentation() + self._configure_properties() + self._configure_polymorphic_setter() + self._configure_pks() + self.registry._flag_new_mapper(self) + self._log("constructed") + self._expire_memoizations() + + self.dispatch.after_mapper_constructed(self, self.class_) + + def _prefer_eager_defaults(self, dialect, table): + if self.eager_defaults == "auto": + if not table.implicit_returning: + return False + + return ( + table in self._server_default_col_keys + and dialect.insert_executemany_returning + ) + else: + return self.eager_defaults + + def _gen_cache_key(self, anon_map, bindparams): + return (self,) + + # ### BEGIN + # ATTRIBUTE DECLARATIONS START HERE + + is_mapper = True + """Part of the inspection API.""" + + represents_outer_join = False + + registry: _RegistryType + + @property + def mapper(self) -> Mapper[_O]: + """Part of the inspection API. + + Returns self. + + """ + return self + + @property + def entity(self): + r"""Part of the inspection API. + + Returns self.class\_. + + """ + return self.class_ + + class_: Type[_O] + """The class to which this :class:`_orm.Mapper` is mapped.""" + + _identity_class: Type[_O] + + _delete_orphans: List[Tuple[str, Type[Any]]] + _dependency_processors: List[DependencyProcessor] + _memoized_values: Dict[Any, Callable[[], Any]] + _inheriting_mappers: util.WeakSequence[Mapper[Any]] + _all_tables: Set[TableClause] + _polymorphic_attr_key: Optional[str] + + _pks_by_table: Dict[FromClause, OrderedSet[ColumnClause[Any]]] + _cols_by_table: Dict[FromClause, OrderedSet[ColumnElement[Any]]] + + _props: util.OrderedDict[str, MapperProperty[Any]] + _init_properties: Dict[str, MapperProperty[Any]] + + _columntoproperty: _ColumnMapping + + _set_polymorphic_identity: Optional[Callable[[InstanceState[_O]], None]] + _validate_polymorphic_identity: Optional[ + Callable[[Mapper[_O], InstanceState[_O], _InstanceDict], None] + ] + + tables: Sequence[TableClause] + """A sequence containing the collection of :class:`_schema.Table` + or :class:`_schema.TableClause` objects which this :class:`_orm.Mapper` + is aware of. + + If the mapper is mapped to a :class:`_expression.Join`, or an + :class:`_expression.Alias` + representing a :class:`_expression.Select`, the individual + :class:`_schema.Table` + objects that comprise the full construct will be represented here. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + validators: util.immutabledict[str, Tuple[str, Dict[str, Any]]] + """An immutable dictionary of attributes which have been decorated + using the :func:`_orm.validates` decorator. + + The dictionary contains string attribute names as keys + mapped to the actual validation method. + + """ + + always_refresh: bool + allow_partial_pks: bool + version_id_col: Optional[ColumnElement[Any]] + + with_polymorphic: Optional[ + Tuple[ + Union[Literal["*"], Sequence[Union[Mapper[Any], Type[Any]]]], + Optional[FromClause], + ] + ] + + version_id_generator: Optional[Union[Literal[False], Callable[[Any], Any]]] + + local_table: FromClause + """The immediate :class:`_expression.FromClause` to which this + :class:`_orm.Mapper` refers. + + Typically is an instance of :class:`_schema.Table`, may be any + :class:`.FromClause`. + + The "local" table is the + selectable that the :class:`_orm.Mapper` is directly responsible for + managing from an attribute access and flush perspective. For + non-inheriting mappers, :attr:`.Mapper.local_table` will be the same + as :attr:`.Mapper.persist_selectable`. For inheriting mappers, + :attr:`.Mapper.local_table` refers to the specific portion of + :attr:`.Mapper.persist_selectable` that includes the columns to which + this :class:`.Mapper` is loading/persisting, such as a particular + :class:`.Table` within a join. + + .. seealso:: + + :attr:`_orm.Mapper.persist_selectable`. + + :attr:`_orm.Mapper.selectable`. + + """ + + persist_selectable: FromClause + """The :class:`_expression.FromClause` to which this :class:`_orm.Mapper` + is mapped. + + Typically is an instance of :class:`_schema.Table`, may be any + :class:`.FromClause`. + + The :attr:`_orm.Mapper.persist_selectable` is similar to + :attr:`.Mapper.local_table`, but represents the :class:`.FromClause` that + represents the inheriting class hierarchy overall in an inheritance + scenario. + + :attr.`.Mapper.persist_selectable` is also separate from the + :attr:`.Mapper.selectable` attribute, the latter of which may be an + alternate subquery used for selecting columns. + :attr.`.Mapper.persist_selectable` is oriented towards columns that + will be written on a persist operation. + + .. seealso:: + + :attr:`_orm.Mapper.selectable`. + + :attr:`_orm.Mapper.local_table`. + + """ + + inherits: Optional[Mapper[Any]] + """References the :class:`_orm.Mapper` which this :class:`_orm.Mapper` + inherits from, if any. + + """ + + inherit_condition: Optional[ColumnElement[bool]] + + configured: bool = False + """Represent ``True`` if this :class:`_orm.Mapper` has been configured. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + .. seealso:: + + :func:`.configure_mappers`. + + """ + + concrete: bool + """Represent ``True`` if this :class:`_orm.Mapper` is a concrete + inheritance mapper. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + primary_key: Tuple[Column[Any], ...] + """An iterable containing the collection of :class:`_schema.Column` + objects + which comprise the 'primary key' of the mapped table, from the + perspective of this :class:`_orm.Mapper`. + + This list is against the selectable in + :attr:`_orm.Mapper.persist_selectable`. + In the case of inheriting mappers, some columns may be managed by a + superclass mapper. For example, in the case of a + :class:`_expression.Join`, the + primary key is determined by all of the primary key columns across all + tables referenced by the :class:`_expression.Join`. + + The list is also not necessarily the same as the primary key column + collection associated with the underlying tables; the :class:`_orm.Mapper` + features a ``primary_key`` argument that can override what the + :class:`_orm.Mapper` considers as primary key columns. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + class_manager: ClassManager[_O] + """The :class:`.ClassManager` which maintains event listeners + and class-bound descriptors for this :class:`_orm.Mapper`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + single: bool + """Represent ``True`` if this :class:`_orm.Mapper` is a single table + inheritance mapper. + + :attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + non_primary: bool + """Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary" + mapper, e.g. a mapper that is used only to select rows but not for + persistence management. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_on: Optional[KeyedColumnElement[Any]] + """The :class:`_schema.Column` or SQL expression specified as the + ``polymorphic_on`` argument + for this :class:`_orm.Mapper`, within an inheritance scenario. + + This attribute is normally a :class:`_schema.Column` instance but + may also be an expression, such as one derived from + :func:`.cast`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_map: Dict[Any, Mapper[Any]] + """A mapping of "polymorphic identity" identifiers mapped to + :class:`_orm.Mapper` instances, within an inheritance scenario. + + The identifiers can be of any type which is comparable to the + type of column represented by :attr:`_orm.Mapper.polymorphic_on`. + + An inheritance chain of mappers will all reference the same + polymorphic map object. The object is used to correlate incoming + result rows to target mappers. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_identity: Optional[Any] + """Represent an identifier which is matched against the + :attr:`_orm.Mapper.polymorphic_on` column during result row loading. + + Used only with inheritance, this object can be of any type which is + comparable to the type of column represented by + :attr:`_orm.Mapper.polymorphic_on`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + base_mapper: Mapper[Any] + """The base-most :class:`_orm.Mapper` in an inheritance chain. + + In a non-inheriting scenario, this attribute will always be this + :class:`_orm.Mapper`. In an inheritance scenario, it references + the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper` + objects in the inheritance chain. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + columns: ReadOnlyColumnCollection[str, Column[Any]] + """A collection of :class:`_schema.Column` or other scalar expression + objects maintained by this :class:`_orm.Mapper`. + + The collection behaves the same as that of the ``c`` attribute on + any :class:`_schema.Table` object, + except that only those columns included in + this mapping are present, and are keyed based on the attribute name + defined in the mapping, not necessarily the ``key`` attribute of the + :class:`_schema.Column` itself. Additionally, scalar expressions mapped + by :func:`.column_property` are also present here. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + c: ReadOnlyColumnCollection[str, Column[Any]] + """A synonym for :attr:`_orm.Mapper.columns`.""" + + @util.non_memoized_property + @util.deprecated("1.3", "Use .persist_selectable") + def mapped_table(self): + return self.persist_selectable + + @util.memoized_property + def _path_registry(self) -> CachingEntityRegistry: + return PathRegistry.per_mapper(self) + + def _configure_inheritance(self): + """Configure settings related to inheriting and/or inherited mappers + being present.""" + + # a set of all mappers which inherit from this one. + self._inheriting_mappers = util.WeakSequence() + + if self.inherits: + if not issubclass(self.class_, self.inherits.class_): + raise sa_exc.ArgumentError( + "Class '%s' does not inherit from '%s'" + % (self.class_.__name__, self.inherits.class_.__name__) + ) + + self.dispatch._update(self.inherits.dispatch) + + if self.non_primary != self.inherits.non_primary: + np = not self.non_primary and "primary" or "non-primary" + raise sa_exc.ArgumentError( + "Inheritance of %s mapper for class '%s' is " + "only allowed from a %s mapper" + % (np, self.class_.__name__, np) + ) + + if self.single: + self.persist_selectable = self.inherits.persist_selectable + elif self.local_table is not self.inherits.local_table: + if self.concrete: + self.persist_selectable = self.local_table + for mapper in self.iterate_to_root(): + if mapper.polymorphic_on is not None: + mapper._requires_row_aliasing = True + else: + if self.inherit_condition is None: + # figure out inherit condition from our table to the + # immediate table of the inherited mapper, not its + # full table which could pull in other stuff we don't + # want (allows test/inheritance.InheritTest4 to pass) + try: + self.inherit_condition = sql_util.join_condition( + self.inherits.local_table, self.local_table + ) + except sa_exc.NoForeignKeysError as nfe: + assert self.inherits.local_table is not None + assert self.local_table is not None + raise sa_exc.NoForeignKeysError( + "Can't determine the inherit condition " + "between inherited table '%s' and " + "inheriting " + "table '%s'; tables have no " + "foreign key relationships established. " + "Please ensure the inheriting table has " + "a foreign key relationship to the " + "inherited " + "table, or provide an " + "'on clause' using " + "the 'inherit_condition' mapper argument." + % ( + self.inherits.local_table.description, + self.local_table.description, + ) + ) from nfe + except sa_exc.AmbiguousForeignKeysError as afe: + assert self.inherits.local_table is not None + assert self.local_table is not None + raise sa_exc.AmbiguousForeignKeysError( + "Can't determine the inherit condition " + "between inherited table '%s' and " + "inheriting " + "table '%s'; tables have more than one " + "foreign key relationship established. " + "Please specify the 'on clause' using " + "the 'inherit_condition' mapper argument." + % ( + self.inherits.local_table.description, + self.local_table.description, + ) + ) from afe + assert self.inherits.persist_selectable is not None + self.persist_selectable = sql.join( + self.inherits.persist_selectable, + self.local_table, + self.inherit_condition, + ) + + fks = util.to_set(self.inherit_foreign_keys) + self._inherits_equated_pairs = sql_util.criterion_as_pairs( + self.persist_selectable.onclause, + consider_as_foreign_keys=fks, + ) + else: + self.persist_selectable = self.local_table + + if self.polymorphic_identity is None: + self._identity_class = self.class_ + + if ( + not self.polymorphic_abstract + and self.inherits.base_mapper.polymorphic_on is not None + ): + util.warn( + f"{self} does not indicate a 'polymorphic_identity', " + "yet is part of an inheritance hierarchy that has a " + f"'polymorphic_on' column of " + f"'{self.inherits.base_mapper.polymorphic_on}'. " + "If this is an intermediary class that should not be " + "instantiated, the class may either be left unmapped, " + "or may include the 'polymorphic_abstract=True' " + "parameter in its Mapper arguments. To leave the " + "class unmapped when using Declarative, set the " + "'__abstract__ = True' attribute on the class." + ) + elif self.concrete: + self._identity_class = self.class_ + else: + self._identity_class = self.inherits._identity_class + + if self.version_id_col is None: + self.version_id_col = self.inherits.version_id_col + self.version_id_generator = self.inherits.version_id_generator + elif ( + self.inherits.version_id_col is not None + and self.version_id_col is not self.inherits.version_id_col + ): + util.warn( + "Inheriting version_id_col '%s' does not match inherited " + "version_id_col '%s' and will not automatically populate " + "the inherited versioning column. " + "version_id_col should only be specified on " + "the base-most mapper that includes versioning." + % ( + self.version_id_col.description, + self.inherits.version_id_col.description, + ) + ) + + self.polymorphic_map = self.inherits.polymorphic_map + self.batch = self.inherits.batch + self.inherits._inheriting_mappers.append(self) + self.base_mapper = self.inherits.base_mapper + self.passive_updates = self.inherits.passive_updates + self.passive_deletes = ( + self.inherits.passive_deletes or self.passive_deletes + ) + self._all_tables = self.inherits._all_tables + + if self.polymorphic_identity is not None: + if self.polymorphic_identity in self.polymorphic_map: + util.warn( + "Reassigning polymorphic association for identity %r " + "from %r to %r: Check for duplicate use of %r as " + "value for polymorphic_identity." + % ( + self.polymorphic_identity, + self.polymorphic_map[self.polymorphic_identity], + self, + self.polymorphic_identity, + ) + ) + self.polymorphic_map[self.polymorphic_identity] = self + + if self.polymorphic_load and self.concrete: + raise sa_exc.ArgumentError( + "polymorphic_load is not currently supported " + "with concrete table inheritance" + ) + if self.polymorphic_load == "inline": + self.inherits._add_with_polymorphic_subclass(self) + elif self.polymorphic_load == "selectin": + pass + elif self.polymorphic_load is not None: + raise sa_exc.ArgumentError( + "unknown argument for polymorphic_load: %r" + % self.polymorphic_load + ) + + else: + self._all_tables = set() + self.base_mapper = self + assert self.local_table is not None + self.persist_selectable = self.local_table + if self.polymorphic_identity is not None: + self.polymorphic_map[self.polymorphic_identity] = self + self._identity_class = self.class_ + + if self.persist_selectable is None: + raise sa_exc.ArgumentError( + "Mapper '%s' does not have a persist_selectable specified." + % self + ) + + def _set_with_polymorphic( + self, with_polymorphic: Optional[_WithPolymorphicArg] + ) -> None: + if with_polymorphic == "*": + self.with_polymorphic = ("*", None) + elif isinstance(with_polymorphic, (tuple, list)): + if isinstance(with_polymorphic[0], (str, tuple, list)): + self.with_polymorphic = cast( + """Tuple[ + Union[ + Literal["*"], + Sequence[Union["Mapper[Any]", Type[Any]]], + ], + Optional["FromClause"], + ]""", + with_polymorphic, + ) + else: + self.with_polymorphic = (with_polymorphic, None) + elif with_polymorphic is not None: + raise sa_exc.ArgumentError( + f"Invalid setting for with_polymorphic: {with_polymorphic!r}" + ) + else: + self.with_polymorphic = None + + if self.with_polymorphic and self.with_polymorphic[1] is not None: + self.with_polymorphic = ( + self.with_polymorphic[0], + coercions.expect( + roles.StrictFromClauseRole, + self.with_polymorphic[1], + allow_select=True, + ), + ) + + if self.configured: + self._expire_memoizations() + + def _add_with_polymorphic_subclass(self, mapper): + subcl = mapper.class_ + if self.with_polymorphic is None: + self._set_with_polymorphic((subcl,)) + elif self.with_polymorphic[0] != "*": + assert isinstance(self.with_polymorphic[0], tuple) + self._set_with_polymorphic( + (self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1]) + ) + + def _set_concrete_base(self, mapper): + """Set the given :class:`_orm.Mapper` as the 'inherits' for this + :class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete + and does not already have an inherits.""" + + assert self.concrete + assert not self.inherits + assert isinstance(mapper, Mapper) + self.inherits = mapper + self.inherits.polymorphic_map.update(self.polymorphic_map) + self.polymorphic_map = self.inherits.polymorphic_map + for mapper in self.iterate_to_root(): + if mapper.polymorphic_on is not None: + mapper._requires_row_aliasing = True + self.batch = self.inherits.batch + for mp in self.self_and_descendants: + mp.base_mapper = self.inherits.base_mapper + self.inherits._inheriting_mappers.append(self) + self.passive_updates = self.inherits.passive_updates + self._all_tables = self.inherits._all_tables + + for key, prop in mapper._props.items(): + if key not in self._props and not self._should_exclude( + key, key, local=False, column=None + ): + self._adapt_inherited_property(key, prop, False) + + def _set_polymorphic_on(self, polymorphic_on): + self.polymorphic_on = polymorphic_on + self._configure_polymorphic_setter(True) + + def _configure_class_instrumentation(self): + """If this mapper is to be a primary mapper (i.e. the + non_primary flag is not set), associate this Mapper with the + given class and entity name. + + Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity`` + name combination will return this mapper. Also decorate the + `__init__` method on the mapped class to include optional + auto-session attachment logic. + + """ + + # we expect that declarative has applied the class manager + # already and set up a registry. if this is None, + # this raises as of 2.0. + manager = attributes.opt_manager_of_class(self.class_) + + if self.non_primary: + if not manager or not manager.is_mapped: + raise sa_exc.InvalidRequestError( + "Class %s has no primary mapper configured. Configure " + "a primary mapper first before setting up a non primary " + "Mapper." % self.class_ + ) + self.class_manager = manager + + assert manager.registry is not None + self.registry = manager.registry + self._identity_class = manager.mapper._identity_class + manager.registry._add_non_primary_mapper(self) + return + + if manager is None or not manager.registry: + raise sa_exc.InvalidRequestError( + "The _mapper() function and Mapper() constructor may not be " + "invoked directly outside of a declarative registry." + " Please use the sqlalchemy.orm.registry.map_imperatively() " + "function for a classical mapping." + ) + + self.dispatch.instrument_class(self, self.class_) + + # this invokes the class_instrument event and sets up + # the __init__ method. documented behavior is that this must + # occur after the instrument_class event above. + # yes two events with the same two words reversed and different APIs. + # :( + + manager = instrumentation.register_class( + self.class_, + mapper=self, + expired_attribute_loader=util.partial( + loading.load_scalar_attributes, self + ), + # finalize flag means instrument the __init__ method + # and call the class_instrument event + finalize=True, + ) + + self.class_manager = manager + + assert manager.registry is not None + self.registry = manager.registry + + # The remaining members can be added by any mapper, + # e_name None or not. + if manager.mapper is None: + return + + event.listen(manager, "init", _event_on_init, raw=True) + + for key, method in util.iterate_attributes(self.class_): + if key == "__init__" and hasattr(method, "_sa_original_init"): + method = method._sa_original_init + if hasattr(method, "__func__"): + method = method.__func__ + if callable(method): + if hasattr(method, "__sa_reconstructor__"): + self._reconstructor = method + event.listen(manager, "load", _event_on_load, raw=True) + elif hasattr(method, "__sa_validators__"): + validation_opts = method.__sa_validation_opts__ + for name in method.__sa_validators__: + if name in self.validators: + raise sa_exc.InvalidRequestError( + "A validation function for mapped " + "attribute %r on mapper %s already exists." + % (name, self) + ) + self.validators = self.validators.union( + {name: (method, validation_opts)} + ) + + def _set_dispose_flags(self) -> None: + self.configured = True + self._ready_for_configure = True + self._dispose_called = True + + self.__dict__.pop("_configure_failed", None) + + def _str_arg_to_mapped_col(self, argname: str, key: str) -> Column[Any]: + try: + prop = self._props[key] + except KeyError as err: + raise sa_exc.ArgumentError( + f"Can't determine {argname} column '{key}' - " + "no attribute is mapped to this name." + ) from err + try: + expr = prop.expression + except AttributeError as ae: + raise sa_exc.ArgumentError( + f"Can't determine {argname} column '{key}'; " + "property does not refer to a single mapped Column" + ) from ae + if not isinstance(expr, Column): + raise sa_exc.ArgumentError( + f"Can't determine {argname} column '{key}'; " + "property does not refer to a single " + "mapped Column" + ) + return expr + + def _configure_pks(self) -> None: + self.tables = sql_util.find_tables(self.persist_selectable) + + self._all_tables.update(t for t in self.tables) + + self._pks_by_table = {} + self._cols_by_table = {} + + all_cols = util.column_set( + chain(*[col.proxy_set for col in self._columntoproperty]) + ) + + pk_cols = util.column_set(c for c in all_cols if c.primary_key) + + # identify primary key columns which are also mapped by this mapper. + for fc in set(self.tables).union([self.persist_selectable]): + if fc.primary_key and pk_cols.issuperset(fc.primary_key): + # ordering is important since it determines the ordering of + # mapper.primary_key (and therefore query.get()) + self._pks_by_table[fc] = util.ordered_column_set( # type: ignore # noqa: E501 + fc.primary_key + ).intersection( + pk_cols + ) + self._cols_by_table[fc] = util.ordered_column_set(fc.c).intersection( # type: ignore # noqa: E501 + all_cols + ) + + if self._primary_key_argument: + coerced_pk_arg = [ + ( + self._str_arg_to_mapped_col("primary_key", c) + if isinstance(c, str) + else c + ) + for c in ( + coercions.expect( + roles.DDLConstraintColumnRole, + coerce_pk, + argname="primary_key", + ) + for coerce_pk in self._primary_key_argument + ) + ] + else: + coerced_pk_arg = None + + # if explicit PK argument sent, add those columns to the + # primary key mappings + if coerced_pk_arg: + for k in coerced_pk_arg: + if k.table not in self._pks_by_table: + self._pks_by_table[k.table] = util.OrderedSet() + self._pks_by_table[k.table].add(k) + + # otherwise, see that we got a full PK for the mapped table + elif ( + self.persist_selectable not in self._pks_by_table + or len(self._pks_by_table[self.persist_selectable]) == 0 + ): + raise sa_exc.ArgumentError( + "Mapper %s could not assemble any primary " + "key columns for mapped table '%s'" + % (self, self.persist_selectable.description) + ) + elif self.local_table not in self._pks_by_table and isinstance( + self.local_table, schema.Table + ): + util.warn( + "Could not assemble any primary " + "keys for locally mapped table '%s' - " + "no rows will be persisted in this Table." + % self.local_table.description + ) + + if ( + self.inherits + and not self.concrete + and not self._primary_key_argument + ): + # if inheriting, the "primary key" for this mapper is + # that of the inheriting (unless concrete or explicit) + self.primary_key = self.inherits.primary_key + else: + # determine primary key from argument or persist_selectable pks + primary_key: Collection[ColumnElement[Any]] + + if coerced_pk_arg: + primary_key = [ + cc if cc is not None else c + for cc, c in ( + (self.persist_selectable.corresponding_column(c), c) + for c in coerced_pk_arg + ) + ] + else: + # if heuristically determined PKs, reduce to the minimal set + # of columns by eliminating FK->PK pairs for a multi-table + # expression. May over-reduce for some kinds of UNIONs + # / CTEs; use explicit PK argument for these special cases + primary_key = sql_util.reduce_columns( + self._pks_by_table[self.persist_selectable], + ignore_nonexistent_tables=True, + ) + + if len(primary_key) == 0: + raise sa_exc.ArgumentError( + "Mapper %s could not assemble any primary " + "key columns for mapped table '%s'" + % (self, self.persist_selectable.description) + ) + + self.primary_key = tuple(primary_key) + self._log("Identified primary key columns: %s", primary_key) + + # determine cols that aren't expressed within our tables; mark these + # as "read only" properties which are refreshed upon INSERT/UPDATE + self._readonly_props = { + self._columntoproperty[col] + for col in self._columntoproperty + if self._columntoproperty[col] not in self._identity_key_props + and ( + not hasattr(col, "table") + or col.table not in self._cols_by_table + ) + } + + def _configure_properties(self) -> None: + self.columns = self.c = sql_base.ColumnCollection() # type: ignore + + # object attribute names mapped to MapperProperty objects + self._props = util.OrderedDict() + + # table columns mapped to MapperProperty + self._columntoproperty = _ColumnMapping(self) + + explicit_col_props_by_column: Dict[ + KeyedColumnElement[Any], Tuple[str, ColumnProperty[Any]] + ] = {} + explicit_col_props_by_key: Dict[str, ColumnProperty[Any]] = {} + + # step 1: go through properties that were explicitly passed + # in the properties dictionary. For Columns that are local, put them + # aside in a separate collection we will reconcile with the Table + # that's given. For other properties, set them up in _props now. + if self._init_properties: + for key, prop_arg in self._init_properties.items(): + if not isinstance(prop_arg, MapperProperty): + possible_col_prop = self._make_prop_from_column( + key, prop_arg + ) + else: + possible_col_prop = prop_arg + + # issue #8705. if the explicit property is actually a + # Column that is local to the local Table, don't set it up + # in ._props yet, integrate it into the order given within + # the Table. + + _map_as_property_now = True + if isinstance(possible_col_prop, properties.ColumnProperty): + for given_col in possible_col_prop.columns: + if self.local_table.c.contains_column(given_col): + _map_as_property_now = False + explicit_col_props_by_key[key] = possible_col_prop + explicit_col_props_by_column[given_col] = ( + key, + possible_col_prop, + ) + + if _map_as_property_now: + self._configure_property( + key, + possible_col_prop, + init=False, + ) + + # step 2: pull properties from the inherited mapper. reconcile + # columns with those which are explicit above. for properties that + # are only in the inheriting mapper, set them up as local props + if self.inherits: + for key, inherited_prop in self.inherits._props.items(): + if self._should_exclude(key, key, local=False, column=None): + continue + + incoming_prop = explicit_col_props_by_key.get(key) + if incoming_prop: + new_prop = self._reconcile_prop_with_incoming_columns( + key, + inherited_prop, + warn_only=False, + incoming_prop=incoming_prop, + ) + explicit_col_props_by_key[key] = new_prop + + for inc_col in incoming_prop.columns: + explicit_col_props_by_column[inc_col] = ( + key, + new_prop, + ) + elif key not in self._props: + self._adapt_inherited_property(key, inherited_prop, False) + + # step 3. Iterate through all columns in the persist selectable. + # this includes not only columns in the local table / fromclause, + # but also those columns in the superclass table if we are joined + # inh or single inh mapper. map these columns as well. additional + # reconciliation against inherited columns occurs here also. + + for column in self.persist_selectable.columns: + if column in explicit_col_props_by_column: + # column was explicitly passed to properties; configure + # it now in the order in which it corresponds to the + # Table / selectable + key, prop = explicit_col_props_by_column[column] + self._configure_property(key, prop, init=False) + continue + + elif column in self._columntoproperty: + continue + + column_key = (self.column_prefix or "") + column.key + if self._should_exclude( + column.key, + column_key, + local=self.local_table.c.contains_column(column), + column=column, + ): + continue + + # adjust the "key" used for this column to that + # of the inheriting mapper + for mapper in self.iterate_to_root(): + if column in mapper._columntoproperty: + column_key = mapper._columntoproperty[column].key + + self._configure_property( + column_key, + column, + init=False, + setparent=True, + ) + + def _configure_polymorphic_setter(self, init=False): + """Configure an attribute on the mapper representing the + 'polymorphic_on' column, if applicable, and not + already generated by _configure_properties (which is typical). + + Also create a setter function which will assign this + attribute to the value of the 'polymorphic_identity' + upon instance construction, also if applicable. This + routine will run when an instance is created. + + """ + setter = False + polymorphic_key: Optional[str] = None + + if self.polymorphic_on is not None: + setter = True + + if isinstance(self.polymorphic_on, str): + # polymorphic_on specified as a string - link + # it to mapped ColumnProperty + try: + self.polymorphic_on = self._props[self.polymorphic_on] + except KeyError as err: + raise sa_exc.ArgumentError( + "Can't determine polymorphic_on " + "value '%s' - no attribute is " + "mapped to this name." % self.polymorphic_on + ) from err + + if self.polymorphic_on in self._columntoproperty: + # polymorphic_on is a column that is already mapped + # to a ColumnProperty + prop = self._columntoproperty[self.polymorphic_on] + elif isinstance(self.polymorphic_on, MapperProperty): + # polymorphic_on is directly a MapperProperty, + # ensure it's a ColumnProperty + if not isinstance( + self.polymorphic_on, properties.ColumnProperty + ): + raise sa_exc.ArgumentError( + "Only direct column-mapped " + "property or SQL expression " + "can be passed for polymorphic_on" + ) + prop = self.polymorphic_on + else: + # polymorphic_on is a Column or SQL expression and + # doesn't appear to be mapped. this means it can be 1. + # only present in the with_polymorphic selectable or + # 2. a totally standalone SQL expression which we'd + # hope is compatible with this mapper's persist_selectable + col = self.persist_selectable.corresponding_column( + self.polymorphic_on + ) + if col is None: + # polymorphic_on doesn't derive from any + # column/expression isn't present in the mapped + # table. we will make a "hidden" ColumnProperty + # for it. Just check that if it's directly a + # schema.Column and we have with_polymorphic, it's + # likely a user error if the schema.Column isn't + # represented somehow in either persist_selectable or + # with_polymorphic. Otherwise as of 0.7.4 we + # just go with it and assume the user wants it + # that way (i.e. a CASE statement) + setter = False + instrument = False + col = self.polymorphic_on + if isinstance(col, schema.Column) and ( + self.with_polymorphic is None + or self.with_polymorphic[1] is None + or self.with_polymorphic[1].corresponding_column(col) + is None + ): + raise sa_exc.InvalidRequestError( + "Could not map polymorphic_on column " + "'%s' to the mapped table - polymorphic " + "loads will not function properly" + % col.description + ) + else: + # column/expression that polymorphic_on derives from + # is present in our mapped table + # and is probably mapped, but polymorphic_on itself + # is not. This happens when + # the polymorphic_on is only directly present in the + # with_polymorphic selectable, as when use + # polymorphic_union. + # we'll make a separate ColumnProperty for it. + instrument = True + key = getattr(col, "key", None) + if key: + if self._should_exclude(key, key, False, col): + raise sa_exc.InvalidRequestError( + "Cannot exclude or override the " + "discriminator column %r" % key + ) + else: + self.polymorphic_on = col = col.label("_sa_polymorphic_on") + key = col.key + + prop = properties.ColumnProperty(col, _instrument=instrument) + self._configure_property(key, prop, init=init, setparent=True) + + # the actual polymorphic_on should be the first public-facing + # column in the property + self.polymorphic_on = prop.columns[0] + polymorphic_key = prop.key + else: + # no polymorphic_on was set. + # check inheriting mappers for one. + for mapper in self.iterate_to_root(): + # determine if polymorphic_on of the parent + # should be propagated here. If the col + # is present in our mapped table, or if our mapped + # table is the same as the parent (i.e. single table + # inheritance), we can use it + if mapper.polymorphic_on is not None: + if self.persist_selectable is mapper.persist_selectable: + self.polymorphic_on = mapper.polymorphic_on + else: + self.polymorphic_on = ( + self.persist_selectable + ).corresponding_column(mapper.polymorphic_on) + # we can use the parent mapper's _set_polymorphic_identity + # directly; it ensures the polymorphic_identity of the + # instance's mapper is used so is portable to subclasses. + if self.polymorphic_on is not None: + self._set_polymorphic_identity = ( + mapper._set_polymorphic_identity + ) + self._polymorphic_attr_key = ( + mapper._polymorphic_attr_key + ) + self._validate_polymorphic_identity = ( + mapper._validate_polymorphic_identity + ) + else: + self._set_polymorphic_identity = None + self._polymorphic_attr_key = None + return + + if self.polymorphic_abstract and self.polymorphic_on is None: + raise sa_exc.InvalidRequestError( + "The Mapper.polymorphic_abstract parameter may only be used " + "on a mapper hierarchy which includes the " + "Mapper.polymorphic_on parameter at the base of the hierarchy." + ) + + if setter: + + def _set_polymorphic_identity(state): + dict_ = state.dict + # TODO: what happens if polymorphic_on column attribute name + # does not match .key? + + polymorphic_identity = ( + state.manager.mapper.polymorphic_identity + ) + if ( + polymorphic_identity is None + and state.manager.mapper.polymorphic_abstract + ): + raise sa_exc.InvalidRequestError( + f"Can't instantiate class for {state.manager.mapper}; " + "mapper is marked polymorphic_abstract=True" + ) + + state.get_impl(polymorphic_key).set( + state, + dict_, + polymorphic_identity, + None, + ) + + self._polymorphic_attr_key = polymorphic_key + + def _validate_polymorphic_identity(mapper, state, dict_): + if ( + polymorphic_key in dict_ + and dict_[polymorphic_key] + not in mapper._acceptable_polymorphic_identities + ): + util.warn_limited( + "Flushing object %s with " + "incompatible polymorphic identity %r; the " + "object may not refresh and/or load correctly", + (state_str(state), dict_[polymorphic_key]), + ) + + self._set_polymorphic_identity = _set_polymorphic_identity + self._validate_polymorphic_identity = ( + _validate_polymorphic_identity + ) + else: + self._polymorphic_attr_key = None + self._set_polymorphic_identity = None + + _validate_polymorphic_identity = None + + @HasMemoized.memoized_attribute + def _version_id_prop(self): + if self.version_id_col is not None: + return self._columntoproperty[self.version_id_col] + else: + return None + + @HasMemoized.memoized_attribute + def _acceptable_polymorphic_identities(self): + identities = set() + + stack = deque([self]) + while stack: + item = stack.popleft() + if item.persist_selectable is self.persist_selectable: + identities.add(item.polymorphic_identity) + stack.extend(item._inheriting_mappers) + + return identities + + @HasMemoized.memoized_attribute + def _prop_set(self): + return frozenset(self._props.values()) + + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _adapt_inherited_property(self, key, prop, init): + descriptor_props = util.preloaded.orm_descriptor_props + + if not self.concrete: + self._configure_property(key, prop, init=False, setparent=False) + elif key not in self._props: + # determine if the class implements this attribute; if not, + # or if it is implemented by the attribute that is handling the + # given superclass-mapped property, then we need to report that we + # can't use this at the instance level since we are a concrete + # mapper and we don't map this. don't trip user-defined + # descriptors that might have side effects when invoked. + implementing_attribute = self.class_manager._get_class_attr_mro( + key, prop + ) + if implementing_attribute is prop or ( + isinstance( + implementing_attribute, attributes.InstrumentedAttribute + ) + and implementing_attribute._parententity is prop.parent + ): + self._configure_property( + key, + descriptor_props.ConcreteInheritedProperty(), + init=init, + setparent=True, + ) + + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _configure_property( + self, + key: str, + prop_arg: Union[KeyedColumnElement[Any], MapperProperty[Any]], + *, + init: bool = True, + setparent: bool = True, + warn_for_existing: bool = False, + ) -> MapperProperty[Any]: + descriptor_props = util.preloaded.orm_descriptor_props + self._log( + "_configure_property(%s, %s)", key, prop_arg.__class__.__name__ + ) + + if not isinstance(prop_arg, MapperProperty): + prop: MapperProperty[Any] = self._property_from_column( + key, prop_arg + ) + else: + prop = prop_arg + + if isinstance(prop, properties.ColumnProperty): + col = self.persist_selectable.corresponding_column(prop.columns[0]) + + # if the column is not present in the mapped table, + # test if a column has been added after the fact to the + # parent table (or their parent, etc.) [ticket:1570] + if col is None and self.inherits: + path = [self] + for m in self.inherits.iterate_to_root(): + col = m.local_table.corresponding_column(prop.columns[0]) + if col is not None: + for m2 in path: + m2.persist_selectable._refresh_for_new_column(col) + col = self.persist_selectable.corresponding_column( + prop.columns[0] + ) + break + path.append(m) + + # subquery expression, column not present in the mapped + # selectable. + if col is None: + col = prop.columns[0] + + # column is coming in after _readonly_props was + # initialized; check for 'readonly' + if hasattr(self, "_readonly_props") and ( + not hasattr(col, "table") + or col.table not in self._cols_by_table + ): + self._readonly_props.add(prop) + + else: + # if column is coming in after _cols_by_table was + # initialized, ensure the col is in the right set + if ( + hasattr(self, "_cols_by_table") + and col.table in self._cols_by_table + and col not in self._cols_by_table[col.table] + ): + self._cols_by_table[col.table].add(col) + + # if this properties.ColumnProperty represents the "polymorphic + # discriminator" column, mark it. We'll need this when rendering + # columns in SELECT statements. + if not hasattr(prop, "_is_polymorphic_discriminator"): + prop._is_polymorphic_discriminator = ( + col is self.polymorphic_on + or prop.columns[0] is self.polymorphic_on + ) + + if isinstance(col, expression.Label): + # new in 1.4, get column property against expressions + # to be addressable in subqueries + col.key = col._tq_key_label = key + + self.columns.add(col, key) + + for col in prop.columns: + for proxy_col in col.proxy_set: + self._columntoproperty[proxy_col] = prop + + if getattr(prop, "key", key) != key: + util.warn( + f"ORM mapped property {self.class_.__name__}.{prop.key} being " + "assigned to attribute " + f"{key!r} is already associated with " + f"attribute {prop.key!r}. The attribute will be de-associated " + f"from {prop.key!r}." + ) + + prop.key = key + + if setparent: + prop.set_parent(self, init) + + if key in self._props and getattr( + self._props[key], "_mapped_by_synonym", False + ): + syn = self._props[key]._mapped_by_synonym + raise sa_exc.ArgumentError( + "Can't call map_column=True for synonym %r=%r, " + "a ColumnProperty already exists keyed to the name " + "%r for column %r" % (syn, key, key, syn) + ) + + # replacement cases + + # case one: prop is replacing a prop that we have mapped. this is + # independent of whatever might be in the actual class dictionary + if ( + key in self._props + and not isinstance( + self._props[key], descriptor_props.ConcreteInheritedProperty + ) + and not isinstance(prop, descriptor_props.SynonymProperty) + ): + if warn_for_existing: + util.warn_deprecated( + f"User-placed attribute {self.class_.__name__}.{key} on " + f"{self} is replacing an existing ORM-mapped attribute. " + "Behavior is not fully defined in this case. This " + "use is deprecated and will raise an error in a future " + "release", + "2.0", + ) + oldprop = self._props[key] + self._path_registry.pop(oldprop, None) + + # case two: prop is replacing an attribute on the class of some kind. + # we have to be more careful here since it's normal when using + # Declarative that all the "declared attributes" on the class + # get replaced. + elif ( + warn_for_existing + and self.class_.__dict__.get(key, None) is not None + and not isinstance(prop, descriptor_props.SynonymProperty) + and not isinstance( + self._props.get(key, None), + descriptor_props.ConcreteInheritedProperty, + ) + ): + util.warn_deprecated( + f"User-placed attribute {self.class_.__name__}.{key} on " + f"{self} is replacing an existing class-bound " + "attribute of the same name. " + "Behavior is not fully defined in this case. This " + "use is deprecated and will raise an error in a future " + "release", + "2.0", + ) + + self._props[key] = prop + + if not self.non_primary: + prop.instrument_class(self) + + for mapper in self._inheriting_mappers: + mapper._adapt_inherited_property(key, prop, init) + + if init: + prop.init() + prop.post_instrument_class(self) + + if self.configured: + self._expire_memoizations() + + return prop + + def _make_prop_from_column( + self, + key: str, + column: Union[ + Sequence[KeyedColumnElement[Any]], KeyedColumnElement[Any] + ], + ) -> ColumnProperty[Any]: + columns = util.to_list(column) + mapped_column = [] + for c in columns: + mc = self.persist_selectable.corresponding_column(c) + if mc is None: + mc = self.local_table.corresponding_column(c) + if mc is not None: + # if the column is in the local table but not the + # mapped table, this corresponds to adding a + # column after the fact to the local table. + # [ticket:1523] + self.persist_selectable._refresh_for_new_column(mc) + mc = self.persist_selectable.corresponding_column(c) + if mc is None: + raise sa_exc.ArgumentError( + "When configuring property '%s' on %s, " + "column '%s' is not represented in the mapper's " + "table. Use the `column_property()` function to " + "force this column to be mapped as a read-only " + "attribute." % (key, self, c) + ) + mapped_column.append(mc) + return properties.ColumnProperty(*mapped_column) + + def _reconcile_prop_with_incoming_columns( + self, + key: str, + existing_prop: MapperProperty[Any], + warn_only: bool, + incoming_prop: Optional[ColumnProperty[Any]] = None, + single_column: Optional[KeyedColumnElement[Any]] = None, + ) -> ColumnProperty[Any]: + if incoming_prop and ( + self.concrete + or not isinstance(existing_prop, properties.ColumnProperty) + ): + return incoming_prop + + existing_column = existing_prop.columns[0] + + if incoming_prop and existing_column in incoming_prop.columns: + return incoming_prop + + if incoming_prop is None: + assert single_column is not None + incoming_column = single_column + equated_pair_key = (existing_prop.columns[0], incoming_column) + else: + assert single_column is None + incoming_column = incoming_prop.columns[0] + equated_pair_key = (incoming_column, existing_prop.columns[0]) + + if ( + ( + not self._inherits_equated_pairs + or (equated_pair_key not in self._inherits_equated_pairs) + ) + and not existing_column.shares_lineage(incoming_column) + and existing_column is not self.version_id_col + and incoming_column is not self.version_id_col + ): + msg = ( + "Implicitly combining column %s with column " + "%s under attribute '%s'. Please configure one " + "or more attributes for these same-named columns " + "explicitly." + % ( + existing_prop.columns[-1], + incoming_column, + key, + ) + ) + if warn_only: + util.warn(msg) + else: + raise sa_exc.InvalidRequestError(msg) + + # existing properties.ColumnProperty from an inheriting + # mapper. make a copy and append our column to it + # breakpoint() + new_prop = existing_prop.copy() + + new_prop.columns.insert(0, incoming_column) + self._log( + "inserting column to existing list " + "in properties.ColumnProperty %s", + key, + ) + return new_prop # type: ignore + + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _property_from_column( + self, + key: str, + column: KeyedColumnElement[Any], + ) -> ColumnProperty[Any]: + """generate/update a :class:`.ColumnProperty` given a + :class:`_schema.Column` or other SQL expression object.""" + + descriptor_props = util.preloaded.orm_descriptor_props + + prop = self._props.get(key) + + if isinstance(prop, properties.ColumnProperty): + return self._reconcile_prop_with_incoming_columns( + key, + prop, + single_column=column, + warn_only=prop.parent is not self, + ) + elif prop is None or isinstance( + prop, descriptor_props.ConcreteInheritedProperty + ): + return self._make_prop_from_column(key, column) + else: + raise sa_exc.ArgumentError( + "WARNING: when configuring property '%s' on %s, " + "column '%s' conflicts with property '%r'. " + "To resolve this, map the column to the class under a " + "different name in the 'properties' dictionary. Or, " + "to remove all awareness of the column entirely " + "(including its availability as a foreign key), " + "use the 'include_properties' or 'exclude_properties' " + "mapper arguments to control specifically which table " + "columns get mapped." % (key, self, column.key, prop) + ) + + @util.langhelpers.tag_method_for_warnings( + "This warning originated from the `configure_mappers()` process, " + "which was invoked automatically in response to a user-initiated " + "operation.", + sa_exc.SAWarning, + ) + def _check_configure(self) -> None: + if self.registry._new_mappers: + _configure_registries({self.registry}, cascade=True) + + def _post_configure_properties(self) -> None: + """Call the ``init()`` method on all ``MapperProperties`` + attached to this mapper. + + This is a deferred configuration step which is intended + to execute once all mappers have been constructed. + + """ + + self._log("_post_configure_properties() started") + l = [(key, prop) for key, prop in self._props.items()] + for key, prop in l: + self._log("initialize prop %s", key) + + if prop.parent is self and not prop._configure_started: + prop.init() + + if prop._configure_finished: + prop.post_instrument_class(self) + + self._log("_post_configure_properties() complete") + self.configured = True + + def add_properties(self, dict_of_properties): + """Add the given dictionary of properties to this mapper, + using `add_property`. + + """ + for key, value in dict_of_properties.items(): + self.add_property(key, value) + + def add_property( + self, key: str, prop: Union[Column[Any], MapperProperty[Any]] + ) -> None: + """Add an individual MapperProperty to this mapper. + + If the mapper has not been configured yet, just adds the + property to the initial properties dictionary sent to the + constructor. If this Mapper has already been configured, then + the given MapperProperty is configured immediately. + + """ + prop = self._configure_property( + key, prop, init=self.configured, warn_for_existing=True + ) + assert isinstance(prop, MapperProperty) + self._init_properties[key] = prop + + def _expire_memoizations(self) -> None: + for mapper in self.iterate_to_root(): + mapper._reset_memoizations() + + @property + def _log_desc(self) -> str: + return ( + "(" + + self.class_.__name__ + + "|" + + ( + self.local_table is not None + and self.local_table.description + or str(self.local_table) + ) + + (self.non_primary and "|non-primary" or "") + + ")" + ) + + def _log(self, msg: str, *args: Any) -> None: + self.logger.info("%s " + msg, *((self._log_desc,) + args)) + + def _log_debug(self, msg: str, *args: Any) -> None: + self.logger.debug("%s " + msg, *((self._log_desc,) + args)) + + def __repr__(self) -> str: + return "" % (id(self), self.class_.__name__) + + def __str__(self) -> str: + return "Mapper[%s%s(%s)]" % ( + self.class_.__name__, + self.non_primary and " (non-primary)" or "", + ( + self.local_table.description + if self.local_table is not None + else self.persist_selectable.description + ), + ) + + def _is_orphan(self, state: InstanceState[_O]) -> bool: + orphan_possible = False + for mapper in self.iterate_to_root(): + for key, cls in mapper._delete_orphans: + orphan_possible = True + + has_parent = attributes.manager_of_class(cls).has_parent( + state, key, optimistic=state.has_identity + ) + + if self.legacy_is_orphan and has_parent: + return False + elif not self.legacy_is_orphan and not has_parent: + return True + + if self.legacy_is_orphan: + return orphan_possible + else: + return False + + def has_property(self, key: str) -> bool: + return key in self._props + + def get_property( + self, key: str, _configure_mappers: bool = False + ) -> MapperProperty[Any]: + """return a MapperProperty associated with the given key.""" + + if _configure_mappers: + self._check_configure() + + try: + return self._props[key] + except KeyError as err: + raise sa_exc.InvalidRequestError( + f"Mapper '{self}' has no property '{key}'. If this property " + "was indicated from other mappers or configure events, ensure " + "registry.configure() has been called." + ) from err + + def get_property_by_column( + self, column: ColumnElement[_T] + ) -> MapperProperty[_T]: + """Given a :class:`_schema.Column` object, return the + :class:`.MapperProperty` which maps this column.""" + + return self._columntoproperty[column] + + @property + def iterate_properties(self): + """return an iterator of all MapperProperty objects.""" + + return iter(self._props.values()) + + def _mappers_from_spec( + self, spec: Any, selectable: Optional[FromClause] + ) -> Sequence[Mapper[Any]]: + """given a with_polymorphic() argument, return the set of mappers it + represents. + + Trims the list of mappers to just those represented within the given + selectable, if present. This helps some more legacy-ish mappings. + + """ + if spec == "*": + mappers = list(self.self_and_descendants) + elif spec: + mapper_set = set() + for m in util.to_list(spec): + m = _class_to_mapper(m) + if not m.isa(self): + raise sa_exc.InvalidRequestError( + "%r does not inherit from %r" % (m, self) + ) + + if selectable is None: + mapper_set.update(m.iterate_to_root()) + else: + mapper_set.add(m) + mappers = [m for m in self.self_and_descendants if m in mapper_set] + else: + mappers = [] + + if selectable is not None: + tables = set( + sql_util.find_tables(selectable, include_aliases=True) + ) + mappers = [m for m in mappers if m.local_table in tables] + return mappers + + def _selectable_from_mappers( + self, mappers: Iterable[Mapper[Any]], innerjoin: bool + ) -> FromClause: + """given a list of mappers (assumed to be within this mapper's + inheritance hierarchy), construct an outerjoin amongst those mapper's + mapped tables. + + """ + from_obj = self.persist_selectable + for m in mappers: + if m is self: + continue + if m.concrete: + raise sa_exc.InvalidRequestError( + "'with_polymorphic()' requires 'selectable' argument " + "when concrete-inheriting mappers are used." + ) + elif not m.single: + if innerjoin: + from_obj = from_obj.join( + m.local_table, m.inherit_condition + ) + else: + from_obj = from_obj.outerjoin( + m.local_table, m.inherit_condition + ) + + return from_obj + + @HasMemoized.memoized_attribute + def _version_id_has_server_side_value(self) -> bool: + vid_col = self.version_id_col + + if vid_col is None: + return False + + elif not isinstance(vid_col, Column): + return True + else: + return vid_col.server_default is not None or ( + vid_col.default is not None + and ( + not vid_col.default.is_scalar + and not vid_col.default.is_callable + ) + ) + + @HasMemoized.memoized_attribute + def _single_table_criterion(self): + if self.single and self.inherits and self.polymorphic_on is not None: + return self.polymorphic_on._annotate( + {"parententity": self, "parentmapper": self} + ).in_( + [ + m.polymorphic_identity + for m in self.self_and_descendants + if not m.polymorphic_abstract + ] + ) + else: + return None + + @HasMemoized.memoized_attribute + def _has_aliased_polymorphic_fromclause(self): + """return True if with_polymorphic[1] is an aliased fromclause, + like a subquery. + + As of #8168, polymorphic adaption with ORMAdapter is used only + if this is present. + + """ + return self.with_polymorphic and isinstance( + self.with_polymorphic[1], + expression.AliasedReturnsRows, + ) + + @HasMemoized.memoized_attribute + def _should_select_with_poly_adapter(self): + """determine if _MapperEntity or _ORMColumnEntity will need to use + polymorphic adaption when setting up a SELECT as well as fetching + rows for mapped classes and subclasses against this Mapper. + + moved here from context.py for #8456 to generalize the ruleset + for this condition. + + """ + + # this has been simplified as of #8456. + # rule is: if we have a with_polymorphic or a concrete-style + # polymorphic selectable, *or* if the base mapper has either of those, + # we turn on the adaption thing. if not, we do *no* adaption. + # + # (UPDATE for #8168: the above comment was not accurate, as we were + # still saying "do polymorphic" if we were using an auto-generated + # flattened JOIN for with_polymorphic.) + # + # this splits the behavior among the "regular" joined inheritance + # and single inheritance mappers, vs. the "weird / difficult" + # concrete and joined inh mappings that use a with_polymorphic of + # some kind or polymorphic_union. + # + # note we have some tests in test_polymorphic_rel that query against + # a subclass, then refer to the superclass that has a with_polymorphic + # on it (such as test_join_from_polymorphic_explicit_aliased_three). + # these tests actually adapt the polymorphic selectable (like, the + # UNION or the SELECT subquery with JOIN in it) to be just the simple + # subclass table. Hence even if we are a "plain" inheriting mapper + # but our base has a wpoly on it, we turn on adaption. This is a + # legacy case we should probably disable. + # + # + # UPDATE: simplified way more as of #8168. polymorphic adaption + # is turned off even if with_polymorphic is set, as long as there + # is no user-defined aliased selectable / subquery configured. + # this scales back the use of polymorphic adaption in practice + # to basically no cases except for concrete inheritance with a + # polymorphic base class. + # + return ( + self._has_aliased_polymorphic_fromclause + or self._requires_row_aliasing + or (self.base_mapper._has_aliased_polymorphic_fromclause) + or self.base_mapper._requires_row_aliasing + ) + + @HasMemoized.memoized_attribute + def _with_polymorphic_mappers(self) -> Sequence[Mapper[Any]]: + self._check_configure() + + if not self.with_polymorphic: + return [] + return self._mappers_from_spec(*self.with_polymorphic) + + @HasMemoized.memoized_attribute + def _post_inspect(self): + """This hook is invoked by attribute inspection. + + E.g. when Query calls: + + coercions.expect(roles.ColumnsClauseRole, ent, keep_inspect=True) + + This allows the inspection process run a configure mappers hook. + + """ + self._check_configure() + + @HasMemoized_ro_memoized_attribute + def _with_polymorphic_selectable(self) -> FromClause: + if not self.with_polymorphic: + return self.persist_selectable + + spec, selectable = self.with_polymorphic + if selectable is not None: + return selectable + else: + return self._selectable_from_mappers( + self._mappers_from_spec(spec, selectable), False + ) + + with_polymorphic_mappers = _with_polymorphic_mappers + """The list of :class:`_orm.Mapper` objects included in the + default "polymorphic" query. + + """ + + @HasMemoized_ro_memoized_attribute + def _insert_cols_evaluating_none(self): + return { + table: frozenset( + col for col in columns if col.type.should_evaluate_none + ) + for table, columns in self._cols_by_table.items() + } + + @HasMemoized.memoized_attribute + def _insert_cols_as_none(self): + return { + table: frozenset( + col.key + for col in columns + if not col.primary_key + and not col.server_default + and not col.default + and not col.type.should_evaluate_none + ) + for table, columns in self._cols_by_table.items() + } + + @HasMemoized.memoized_attribute + def _propkey_to_col(self): + return { + table: {self._columntoproperty[col].key: col for col in columns} + for table, columns in self._cols_by_table.items() + } + + @HasMemoized.memoized_attribute + def _pk_keys_by_table(self): + return { + table: frozenset([col.key for col in pks]) + for table, pks in self._pks_by_table.items() + } + + @HasMemoized.memoized_attribute + def _pk_attr_keys_by_table(self): + return { + table: frozenset([self._columntoproperty[col].key for col in pks]) + for table, pks in self._pks_by_table.items() + } + + @HasMemoized.memoized_attribute + def _server_default_cols( + self, + ) -> Mapping[FromClause, FrozenSet[Column[Any]]]: + return { + table: frozenset( + [ + col + for col in cast("Iterable[Column[Any]]", columns) + if col.server_default is not None + or ( + col.default is not None + and col.default.is_clause_element + ) + ] + ) + for table, columns in self._cols_by_table.items() + } + + @HasMemoized.memoized_attribute + def _server_onupdate_default_cols( + self, + ) -> Mapping[FromClause, FrozenSet[Column[Any]]]: + return { + table: frozenset( + [ + col + for col in cast("Iterable[Column[Any]]", columns) + if col.server_onupdate is not None + or ( + col.onupdate is not None + and col.onupdate.is_clause_element + ) + ] + ) + for table, columns in self._cols_by_table.items() + } + + @HasMemoized.memoized_attribute + def _server_default_col_keys(self) -> Mapping[FromClause, FrozenSet[str]]: + return { + table: frozenset(col.key for col in cols if col.key is not None) + for table, cols in self._server_default_cols.items() + } + + @HasMemoized.memoized_attribute + def _server_onupdate_default_col_keys( + self, + ) -> Mapping[FromClause, FrozenSet[str]]: + return { + table: frozenset(col.key for col in cols if col.key is not None) + for table, cols in self._server_onupdate_default_cols.items() + } + + @HasMemoized.memoized_attribute + def _server_default_plus_onupdate_propkeys(self) -> Set[str]: + result: Set[str] = set() + + col_to_property = self._columntoproperty + for table, columns in self._server_default_cols.items(): + result.update( + col_to_property[col].key + for col in columns.intersection(col_to_property) + ) + for table, columns in self._server_onupdate_default_cols.items(): + result.update( + col_to_property[col].key + for col in columns.intersection(col_to_property) + ) + return result + + @HasMemoized.memoized_instancemethod + def __clause_element__(self): + annotations: Dict[str, Any] = { + "entity_namespace": self, + "parententity": self, + "parentmapper": self, + } + if self.persist_selectable is not self.local_table: + # joined table inheritance, with polymorphic selectable, + # etc. + annotations["dml_table"] = self.local_table._annotate( + { + "entity_namespace": self, + "parententity": self, + "parentmapper": self, + } + )._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": self} + ) + + return self.selectable._annotate(annotations)._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": self} + ) + + @util.memoized_property + def select_identity_token(self): + return ( + expression.null() + ._annotate( + { + "entity_namespace": self, + "parententity": self, + "parentmapper": self, + "identity_token": True, + } + ) + ._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": self} + ) + ) + + @property + def selectable(self) -> FromClause: + """The :class:`_schema.FromClause` construct this + :class:`_orm.Mapper` selects from by default. + + Normally, this is equivalent to :attr:`.persist_selectable`, unless + the ``with_polymorphic`` feature is in use, in which case the + full "polymorphic" selectable is returned. + + """ + return self._with_polymorphic_selectable + + def _with_polymorphic_args( + self, + spec: Any = None, + selectable: Union[Literal[False, None], FromClause] = False, + innerjoin: bool = False, + ) -> Tuple[Sequence[Mapper[Any]], FromClause]: + if selectable not in (None, False): + selectable = coercions.expect( + roles.StrictFromClauseRole, selectable, allow_select=True + ) + + if self.with_polymorphic: + if not spec: + spec = self.with_polymorphic[0] + if selectable is False: + selectable = self.with_polymorphic[1] + elif selectable is False: + selectable = None + mappers = self._mappers_from_spec(spec, selectable) + if selectable is not None: + return mappers, selectable + else: + return mappers, self._selectable_from_mappers(mappers, innerjoin) + + @HasMemoized.memoized_attribute + def _polymorphic_properties(self): + return list( + self._iterate_polymorphic_properties( + self._with_polymorphic_mappers + ) + ) + + @property + def _all_column_expressions(self): + poly_properties = self._polymorphic_properties + adapter = self._polymorphic_adapter + + return [ + adapter.columns[c] if adapter else c + for prop in poly_properties + if isinstance(prop, properties.ColumnProperty) + and prop._renders_in_subqueries + for c in prop.columns + ] + + def _columns_plus_keys(self, polymorphic_mappers=()): + if polymorphic_mappers: + poly_properties = self._iterate_polymorphic_properties( + polymorphic_mappers + ) + else: + poly_properties = self._polymorphic_properties + + return [ + (prop.key, prop.columns[0]) + for prop in poly_properties + if isinstance(prop, properties.ColumnProperty) + ] + + @HasMemoized.memoized_attribute + def _polymorphic_adapter(self) -> Optional[orm_util.ORMAdapter]: + if self._has_aliased_polymorphic_fromclause: + return orm_util.ORMAdapter( + orm_util._TraceAdaptRole.MAPPER_POLYMORPHIC_ADAPTER, + self, + selectable=self.selectable, + equivalents=self._equivalent_columns, + limit_on_entity=False, + ) + else: + return None + + def _iterate_polymorphic_properties(self, mappers=None): + """Return an iterator of MapperProperty objects which will render into + a SELECT.""" + if mappers is None: + mappers = self._with_polymorphic_mappers + + if not mappers: + for c in self.iterate_properties: + yield c + else: + # in the polymorphic case, filter out discriminator columns + # from other mappers, as these are sometimes dependent on that + # mapper's polymorphic selectable (which we don't want rendered) + for c in util.unique_list( + chain( + *[ + list(mapper.iterate_properties) + for mapper in [self] + mappers + ] + ) + ): + if getattr(c, "_is_polymorphic_discriminator", False) and ( + self.polymorphic_on is None + or c.columns[0] is not self.polymorphic_on + ): + continue + yield c + + @HasMemoized.memoized_attribute + def attrs(self) -> util.ReadOnlyProperties[MapperProperty[Any]]: + """A namespace of all :class:`.MapperProperty` objects + associated this mapper. + + This is an object that provides each property based on + its key name. For instance, the mapper for a + ``User`` class which has ``User.name`` attribute would + provide ``mapper.attrs.name``, which would be the + :class:`.ColumnProperty` representing the ``name`` + column. The namespace object can also be iterated, + which would yield each :class:`.MapperProperty`. + + :class:`_orm.Mapper` has several pre-filtered views + of this attribute which limit the types of properties + returned, including :attr:`.synonyms`, :attr:`.column_attrs`, + :attr:`.relationships`, and :attr:`.composites`. + + .. warning:: + + The :attr:`_orm.Mapper.attrs` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.attrs[somename]`` over + ``getattr(mapper.attrs, somename)`` to avoid name collisions. + + .. seealso:: + + :attr:`_orm.Mapper.all_orm_descriptors` + + """ + + self._check_configure() + return util.ReadOnlyProperties(self._props) + + @HasMemoized.memoized_attribute + def all_orm_descriptors(self) -> util.ReadOnlyProperties[InspectionAttr]: + """A namespace of all :class:`.InspectionAttr` attributes associated + with the mapped class. + + These attributes are in all cases Python :term:`descriptors` + associated with the mapped class or its superclasses. + + This namespace includes attributes that are mapped to the class + as well as attributes declared by extension modules. + It includes any Python descriptor type that inherits from + :class:`.InspectionAttr`. This includes + :class:`.QueryableAttribute`, as well as extension types such as + :class:`.hybrid_property`, :class:`.hybrid_method` and + :class:`.AssociationProxy`. + + To distinguish between mapped attributes and extension attributes, + the attribute :attr:`.InspectionAttr.extension_type` will refer + to a constant that distinguishes between different extension types. + + The sorting of the attributes is based on the following rules: + + 1. Iterate through the class and its superclasses in order from + subclass to superclass (i.e. iterate through ``cls.__mro__``) + + 2. For each class, yield the attributes in the order in which they + appear in ``__dict__``, with the exception of those in step + 3 below. In Python 3.6 and above this ordering will be the + same as that of the class' construction, with the exception + of attributes that were added after the fact by the application + or the mapper. + + 3. If a certain attribute key is also in the superclass ``__dict__``, + then it's included in the iteration for that class, and not the + class in which it first appeared. + + The above process produces an ordering that is deterministic in terms + of the order in which attributes were assigned to the class. + + .. versionchanged:: 1.3.19 ensured deterministic ordering for + :meth:`_orm.Mapper.all_orm_descriptors`. + + When dealing with a :class:`.QueryableAttribute`, the + :attr:`.QueryableAttribute.property` attribute refers to the + :class:`.MapperProperty` property, which is what you get when + referring to the collection of mapped properties via + :attr:`_orm.Mapper.attrs`. + + .. warning:: + + The :attr:`_orm.Mapper.all_orm_descriptors` + accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over + ``getattr(mapper.all_orm_descriptors, somename)`` to avoid name + collisions. + + .. seealso:: + + :attr:`_orm.Mapper.attrs` + + """ + return util.ReadOnlyProperties( + dict(self.class_manager._all_sqla_attributes()) + ) + + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _pk_synonyms(self) -> Dict[str, str]: + """return a dictionary of {syn_attribute_name: pk_attr_name} for + all synonyms that refer to primary key columns + + """ + descriptor_props = util.preloaded.orm_descriptor_props + + pk_keys = {prop.key for prop in self._identity_key_props} + + return { + syn.key: syn.name + for k, syn in self._props.items() + if isinstance(syn, descriptor_props.SynonymProperty) + and syn.name in pk_keys + } + + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.descriptor_props") + def synonyms(self) -> util.ReadOnlyProperties[SynonymProperty[Any]]: + """Return a namespace of all :class:`.Synonym` + properties maintained by this :class:`_orm.Mapper`. + + .. seealso:: + + :attr:`_orm.Mapper.attrs` - namespace of all + :class:`.MapperProperty` + objects. + + """ + descriptor_props = util.preloaded.orm_descriptor_props + + return self._filter_properties(descriptor_props.SynonymProperty) + + @property + def entity_namespace(self): + return self.class_ + + @HasMemoized.memoized_attribute + def column_attrs(self) -> util.ReadOnlyProperties[ColumnProperty[Any]]: + """Return a namespace of all :class:`.ColumnProperty` + properties maintained by this :class:`_orm.Mapper`. + + .. seealso:: + + :attr:`_orm.Mapper.attrs` - namespace of all + :class:`.MapperProperty` + objects. + + """ + return self._filter_properties(properties.ColumnProperty) + + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.relationships") + def relationships( + self, + ) -> util.ReadOnlyProperties[RelationshipProperty[Any]]: + """A namespace of all :class:`.Relationship` properties + maintained by this :class:`_orm.Mapper`. + + .. warning:: + + the :attr:`_orm.Mapper.relationships` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.relationships[somename]`` over + ``getattr(mapper.relationships, somename)`` to avoid name + collisions. + + .. seealso:: + + :attr:`_orm.Mapper.attrs` - namespace of all + :class:`.MapperProperty` + objects. + + """ + return self._filter_properties( + util.preloaded.orm_relationships.RelationshipProperty + ) + + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.descriptor_props") + def composites(self) -> util.ReadOnlyProperties[CompositeProperty[Any]]: + """Return a namespace of all :class:`.Composite` + properties maintained by this :class:`_orm.Mapper`. + + .. seealso:: + + :attr:`_orm.Mapper.attrs` - namespace of all + :class:`.MapperProperty` + objects. + + """ + return self._filter_properties( + util.preloaded.orm_descriptor_props.CompositeProperty + ) + + def _filter_properties( + self, type_: Type[_MP] + ) -> util.ReadOnlyProperties[_MP]: + self._check_configure() + return util.ReadOnlyProperties( + util.OrderedDict( + (k, v) for k, v in self._props.items() if isinstance(v, type_) + ) + ) + + @HasMemoized.memoized_attribute + def _get_clause(self): + """create a "get clause" based on the primary key. this is used + by query.get() and many-to-one lazyloads to load this item + by primary key. + + """ + params = [ + ( + primary_key, + sql.bindparam("pk_%d" % idx, type_=primary_key.type), + ) + for idx, primary_key in enumerate(self.primary_key, 1) + ] + return ( + sql.and_(*[k == v for (k, v) in params]), + util.column_dict(params), + ) + + @HasMemoized.memoized_attribute + def _equivalent_columns(self) -> _EquivalentColumnMap: + """Create a map of all equivalent columns, based on + the determination of column pairs that are equated to + one another based on inherit condition. This is designed + to work with the queries that util.polymorphic_union + comes up with, which often don't include the columns from + the base table directly (including the subclass table columns + only). + + The resulting structure is a dictionary of columns mapped + to lists of equivalent columns, e.g.:: + + {tablea.col1: {tableb.col1, tablec.col1}, tablea.col2: {tabled.col2}} + + """ # noqa: E501 + result: _EquivalentColumnMap = {} + + def visit_binary(binary): + if binary.operator == operators.eq: + if binary.left in result: + result[binary.left].add(binary.right) + else: + result[binary.left] = {binary.right} + if binary.right in result: + result[binary.right].add(binary.left) + else: + result[binary.right] = {binary.left} + + for mapper in self.base_mapper.self_and_descendants: + if mapper.inherit_condition is not None: + visitors.traverse( + mapper.inherit_condition, {}, {"binary": visit_binary} + ) + + return result + + def _is_userland_descriptor(self, assigned_name: str, obj: Any) -> bool: + if isinstance( + obj, + ( + _MappedAttribute, + instrumentation.ClassManager, + expression.ColumnElement, + ), + ): + return False + else: + return assigned_name not in self._dataclass_fields + + @HasMemoized.memoized_attribute + def _dataclass_fields(self): + return [f.name for f in util.dataclass_fields(self.class_)] + + def _should_exclude(self, name, assigned_name, local, column): + """determine whether a particular property should be implicitly + present on the class. + + This occurs when properties are propagated from an inherited class, or + are applied from the columns present in the mapped table. + + """ + + if column is not None and sql_base._never_select_column(column): + return True + + # check for class-bound attributes and/or descriptors, + # either local or from an inherited class + # ignore dataclass field default values + if local: + if self.class_.__dict__.get( + assigned_name, None + ) is not None and self._is_userland_descriptor( + assigned_name, self.class_.__dict__[assigned_name] + ): + return True + else: + attr = self.class_manager._get_class_attr_mro(assigned_name, None) + if attr is not None and self._is_userland_descriptor( + assigned_name, attr + ): + return True + + if ( + self.include_properties is not None + and name not in self.include_properties + and (column is None or column not in self.include_properties) + ): + self._log("not including property %s" % (name)) + return True + + if self.exclude_properties is not None and ( + name in self.exclude_properties + or (column is not None and column in self.exclude_properties) + ): + self._log("excluding property %s" % (name)) + return True + + return False + + def common_parent(self, other: Mapper[Any]) -> bool: + """Return true if the given mapper shares a + common inherited parent as this mapper.""" + + return self.base_mapper is other.base_mapper + + def is_sibling(self, other: Mapper[Any]) -> bool: + """return true if the other mapper is an inheriting sibling to this + one. common parent but different branch + + """ + return ( + self.base_mapper is other.base_mapper + and not self.isa(other) + and not other.isa(self) + ) + + def _canload( + self, state: InstanceState[Any], allow_subtypes: bool + ) -> bool: + s = self.primary_mapper() + if self.polymorphic_on is not None or allow_subtypes: + return _state_mapper(state).isa(s) + else: + return _state_mapper(state) is s + + def isa(self, other: Mapper[Any]) -> bool: + """Return True if the this mapper inherits from the given mapper.""" + + m: Optional[Mapper[Any]] = self + while m and m is not other: + m = m.inherits + return bool(m) + + def iterate_to_root(self) -> Iterator[Mapper[Any]]: + m: Optional[Mapper[Any]] = self + while m: + yield m + m = m.inherits + + @HasMemoized.memoized_attribute + def self_and_descendants(self) -> Sequence[Mapper[Any]]: + """The collection including this mapper and all descendant mappers. + + This includes not just the immediately inheriting mappers but + all their inheriting mappers as well. + + """ + descendants = [] + stack = deque([self]) + while stack: + item = stack.popleft() + descendants.append(item) + stack.extend(item._inheriting_mappers) + return util.WeakSequence(descendants) + + def polymorphic_iterator(self) -> Iterator[Mapper[Any]]: + """Iterate through the collection including this mapper and + all descendant mappers. + + This includes not just the immediately inheriting mappers but + all their inheriting mappers as well. + + To iterate through an entire hierarchy, use + ``mapper.base_mapper.polymorphic_iterator()``. + + """ + return iter(self.self_and_descendants) + + def primary_mapper(self) -> Mapper[Any]: + """Return the primary mapper corresponding to this mapper's class key + (class).""" + + return self.class_manager.mapper + + @property + def primary_base_mapper(self) -> Mapper[Any]: + return self.class_manager.mapper.base_mapper + + def _result_has_identity_key(self, result, adapter=None): + pk_cols: Sequence[ColumnClause[Any]] = self.primary_key + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + rk = result.keys() + for col in pk_cols: + if col not in rk: + return False + else: + return True + + def identity_key_from_row( + self, + row: Union[Row[Any], RowMapping], + identity_token: Optional[Any] = None, + adapter: Optional[ORMAdapter] = None, + ) -> _IdentityKeyType[_O]: + """Return an identity-map key for use in storing/retrieving an + item from the identity map. + + :param row: A :class:`.Row` or :class:`.RowMapping` produced from a + result set that selected from the ORM mapped primary key columns. + + .. versionchanged:: 2.0 + :class:`.Row` or :class:`.RowMapping` are accepted + for the "row" argument + + """ + pk_cols: Sequence[ColumnClause[Any]] = self.primary_key + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + + mapping: RowMapping + if hasattr(row, "_mapping"): + mapping = row._mapping + else: + mapping = row # type: ignore[assignment] + + return ( + self._identity_class, + tuple(mapping[column] for column in pk_cols), + identity_token, + ) + + def identity_key_from_primary_key( + self, + primary_key: Tuple[Any, ...], + identity_token: Optional[Any] = None, + ) -> _IdentityKeyType[_O]: + """Return an identity-map key for use in storing/retrieving an + item from an identity map. + + :param primary_key: A list of values indicating the identifier. + + """ + return ( + self._identity_class, + tuple(primary_key), + identity_token, + ) + + def identity_key_from_instance(self, instance: _O) -> _IdentityKeyType[_O]: + """Return the identity key for the given instance, based on + its primary key attributes. + + If the instance's state is expired, calling this method + will result in a database check to see if the object has been deleted. + If the row no longer exists, + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + This value is typically also found on the instance state under the + attribute name `key`. + + """ + state = attributes.instance_state(instance) + return self._identity_key_from_state(state, PassiveFlag.PASSIVE_OFF) + + def _identity_key_from_state( + self, + state: InstanceState[_O], + passive: PassiveFlag = PassiveFlag.PASSIVE_RETURN_NO_VALUE, + ) -> _IdentityKeyType[_O]: + dict_ = state.dict + manager = state.manager + return ( + self._identity_class, + tuple( + [ + manager[prop.key].impl.get(state, dict_, passive) + for prop in self._identity_key_props + ] + ), + state.identity_token, + ) + + def primary_key_from_instance(self, instance: _O) -> Tuple[Any, ...]: + """Return the list of primary key values for the given + instance. + + If the instance's state is expired, calling this method + will result in a database check to see if the object has been deleted. + If the row no longer exists, + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + """ + state = attributes.instance_state(instance) + identity_key = self._identity_key_from_state( + state, PassiveFlag.PASSIVE_OFF + ) + return identity_key[1] + + @HasMemoized.memoized_attribute + def _persistent_sortkey_fn(self): + key_fns = [col.type.sort_key_function for col in self.primary_key] + + if set(key_fns).difference([None]): + + def key(state): + return tuple( + key_fn(val) if key_fn is not None else val + for key_fn, val in zip(key_fns, state.key[1]) + ) + + else: + + def key(state): + return state.key[1] + + return key + + @HasMemoized.memoized_attribute + def _identity_key_props(self): + return [self._columntoproperty[col] for col in self.primary_key] + + @HasMemoized.memoized_attribute + def _all_pk_cols(self): + collection: Set[ColumnClause[Any]] = set() + for table in self.tables: + collection.update(self._pks_by_table[table]) + return collection + + @HasMemoized.memoized_attribute + def _should_undefer_in_wildcard(self): + cols: Set[ColumnElement[Any]] = set(self.primary_key) + if self.polymorphic_on is not None: + cols.add(self.polymorphic_on) + return cols + + @HasMemoized.memoized_attribute + def _primary_key_propkeys(self): + return {self._columntoproperty[col].key for col in self._all_pk_cols} + + def _get_state_attr_by_column( + self, + state: InstanceState[_O], + dict_: _InstanceDict, + column: ColumnElement[Any], + passive: PassiveFlag = PassiveFlag.PASSIVE_RETURN_NO_VALUE, + ) -> Any: + prop = self._columntoproperty[column] + return state.manager[prop.key].impl.get(state, dict_, passive=passive) + + def _set_committed_state_attr_by_column(self, state, dict_, column, value): + prop = self._columntoproperty[column] + state.manager[prop.key].impl.set_committed_value(state, dict_, value) + + def _set_state_attr_by_column(self, state, dict_, column, value): + prop = self._columntoproperty[column] + state.manager[prop.key].impl.set(state, dict_, value, None) + + def _get_committed_attr_by_column(self, obj, column): + state = attributes.instance_state(obj) + dict_ = attributes.instance_dict(obj) + return self._get_committed_state_attr_by_column( + state, dict_, column, passive=PassiveFlag.PASSIVE_OFF + ) + + def _get_committed_state_attr_by_column( + self, state, dict_, column, passive=PassiveFlag.PASSIVE_RETURN_NO_VALUE + ): + prop = self._columntoproperty[column] + return state.manager[prop.key].impl.get_committed_value( + state, dict_, passive=passive + ) + + def _optimized_get_statement(self, state, attribute_names): + """assemble a WHERE clause which retrieves a given state by primary + key, using a minimized set of tables. + + Applies to a joined-table inheritance mapper where the + requested attribute names are only present on joined tables, + not the base table. The WHERE clause attempts to include + only those tables to minimize joins. + + """ + props = self._props + + col_attribute_names = set(attribute_names).intersection( + state.mapper.column_attrs.keys() + ) + tables: Set[FromClause] = set( + chain( + *[ + sql_util.find_tables(c, check_columns=True) + for key in col_attribute_names + for c in props[key].columns + ] + ) + ) + + if self.base_mapper.local_table in tables: + return None + + def visit_binary(binary): + leftcol = binary.left + rightcol = binary.right + if leftcol is None or rightcol is None: + return + + if leftcol.table not in tables: + leftval = self._get_committed_state_attr_by_column( + state, + state.dict, + leftcol, + passive=PassiveFlag.PASSIVE_NO_INITIALIZE, + ) + if leftval in orm_util._none_set: + raise _OptGetColumnsNotAvailable() + binary.left = sql.bindparam( + None, leftval, type_=binary.right.type + ) + elif rightcol.table not in tables: + rightval = self._get_committed_state_attr_by_column( + state, + state.dict, + rightcol, + passive=PassiveFlag.PASSIVE_NO_INITIALIZE, + ) + if rightval in orm_util._none_set: + raise _OptGetColumnsNotAvailable() + binary.right = sql.bindparam( + None, rightval, type_=binary.right.type + ) + + allconds: List[ColumnElement[bool]] = [] + + start = False + + # as of #7507, from the lowest base table on upwards, + # we include all intermediary tables. + + for mapper in reversed(list(self.iterate_to_root())): + if mapper.local_table in tables: + start = True + elif not isinstance(mapper.local_table, expression.TableClause): + return None + if start and not mapper.single: + assert mapper.inherits + assert not mapper.concrete + assert mapper.inherit_condition is not None + allconds.append(mapper.inherit_condition) + tables.add(mapper.local_table) + + # only the bottom table needs its criteria to be altered to fit + # the primary key ident - the rest of the tables upwards to the + # descendant-most class should all be present and joined to each + # other. + try: + _traversed = visitors.cloned_traverse( + allconds[0], {}, {"binary": visit_binary} + ) + except _OptGetColumnsNotAvailable: + return None + else: + allconds[0] = _traversed + + cond = sql.and_(*allconds) + + cols = [] + for key in col_attribute_names: + cols.extend(props[key].columns) + return ( + sql.select(*cols) + .where(cond) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + ) + + def _iterate_to_target_viawpoly(self, mapper): + if self.isa(mapper): + prev = self + for m in self.iterate_to_root(): + yield m + + if m is not prev and prev not in m._with_polymorphic_mappers: + break + + prev = m + if m is mapper: + break + + @HasMemoized.memoized_attribute + def _would_selectinload_combinations_cache(self): + return {} + + def _would_selectin_load_only_from_given_mapper(self, super_mapper): + """return True if this mapper would "selectin" polymorphic load based + on the given super mapper, and not from a setting from a subclass. + + given:: + + class A: ... + + + class B(A): + __mapper_args__ = {"polymorphic_load": "selectin"} + + + class C(B): ... + + + class D(B): + __mapper_args__ = {"polymorphic_load": "selectin"} + + ``inspect(C)._would_selectin_load_only_from_given_mapper(inspect(B))`` + returns True, because C does selectin loading because of B's setting. + + OTOH, ``inspect(D) + ._would_selectin_load_only_from_given_mapper(inspect(B))`` + returns False, because D does selectin loading because of its own + setting; when we are doing a selectin poly load from B, we want to + filter out D because it would already have its own selectin poly load + set up separately. + + Added as part of #9373. + + """ + cache = self._would_selectinload_combinations_cache + + try: + return cache[super_mapper] + except KeyError: + pass + + # assert that given object is a supermapper, meaning we already + # strong reference it directly or indirectly. this allows us + # to not worry that we are creating new strongrefs to unrelated + # mappers or other objects. + assert self.isa(super_mapper) + + mapper = super_mapper + for m in self._iterate_to_target_viawpoly(mapper): + if m.polymorphic_load == "selectin": + retval = m is super_mapper + break + else: + retval = False + + cache[super_mapper] = retval + return retval + + def _should_selectin_load(self, enabled_via_opt, polymorphic_from): + if not enabled_via_opt: + # common case, takes place for all polymorphic loads + mapper = polymorphic_from + for m in self._iterate_to_target_viawpoly(mapper): + if m.polymorphic_load == "selectin": + return m + else: + # uncommon case, selectin load options were used + enabled_via_opt = set(enabled_via_opt) + enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt} + for entity in enabled_via_opt.union([polymorphic_from]): + mapper = entity.mapper + for m in self._iterate_to_target_viawpoly(mapper): + if ( + m.polymorphic_load == "selectin" + or m in enabled_via_opt_mappers + ): + return enabled_via_opt_mappers.get(m, m) + + return None + + @util.preload_module("sqlalchemy.orm.strategy_options") + def _subclass_load_via_in(self, entity, polymorphic_from): + """Assemble a that can load the columns local to + this subclass as a SELECT with IN. + + """ + + strategy_options = util.preloaded.orm_strategy_options + + assert self.inherits + + if self.polymorphic_on is not None: + polymorphic_prop = self._columntoproperty[self.polymorphic_on] + keep_props = set([polymorphic_prop] + self._identity_key_props) + else: + keep_props = set(self._identity_key_props) + + disable_opt = strategy_options.Load(entity) + enable_opt = strategy_options.Load(entity) + + classes_to_include = {self} + m: Optional[Mapper[Any]] = self.inherits + while ( + m is not None + and m is not polymorphic_from + and m.polymorphic_load == "selectin" + ): + classes_to_include.add(m) + m = m.inherits + + for prop in self.column_attrs + self.relationships: + # skip prop keys that are not instrumented on the mapped class. + # this is primarily the "_sa_polymorphic_on" property that gets + # created for an ad-hoc polymorphic_on SQL expression, issue #8704 + if prop.key not in self.class_manager: + continue + + if prop.parent in classes_to_include or prop in keep_props: + # "enable" options, to turn on the properties that we want to + # load by default (subject to options from the query) + if not isinstance(prop, StrategizedProperty): + continue + + enable_opt = enable_opt._set_generic_strategy( + # convert string name to an attribute before passing + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. + (getattr(entity.entity_namespace, prop.key),), + dict(prop.strategy_key), + _reconcile_to_other=True, + ) + else: + # "disable" options, to turn off the properties from the + # superclass that we *don't* want to load, applied after + # the options from the query to override them + disable_opt = disable_opt._set_generic_strategy( + # convert string name to an attribute before passing + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. + (getattr(entity.entity_namespace, prop.key),), + {"do_nothing": True}, + _reconcile_to_other=False, + ) + + primary_key = [ + sql_util._deep_annotate(pk, {"_orm_adapt": True}) + for pk in self.primary_key + ] + + in_expr: ColumnElement[Any] + + if len(primary_key) > 1: + in_expr = sql.tuple_(*primary_key) + else: + in_expr = primary_key[0] + + if entity.is_aliased_class: + assert entity.mapper is self + + q = sql.select(entity).set_label_style( + LABEL_STYLE_TABLENAME_PLUS_COL + ) + + in_expr = entity._adapter.traverse(in_expr) + primary_key = [entity._adapter.traverse(k) for k in primary_key] + q = q.where( + in_expr.in_(sql.bindparam("primary_keys", expanding=True)) + ).order_by(*primary_key) + else: + q = sql.select(self).set_label_style( + LABEL_STYLE_TABLENAME_PLUS_COL + ) + q = q.where( + in_expr.in_(sql.bindparam("primary_keys", expanding=True)) + ).order_by(*primary_key) + + return q, enable_opt, disable_opt + + @HasMemoized.memoized_attribute + def _subclass_load_via_in_mapper(self): + # the default is loading this mapper against the basemost mapper + return self._subclass_load_via_in(self, self.base_mapper) + + def cascade_iterator( + self, + type_: str, + state: InstanceState[_O], + halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None, + ) -> Iterator[ + Tuple[object, Mapper[Any], InstanceState[Any], _InstanceDict] + ]: + r"""Iterate each element and its mapper in an object graph, + for all relationships that meet the given cascade rule. + + :param type\_: + The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``, + etc.). + + .. note:: the ``"all"`` cascade is not accepted here. For a generic + object traversal function, see :ref:`faq_walk_objects`. + + :param state: + The lead InstanceState. child items will be processed per + the relationships defined for this object's mapper. + + :return: the method yields individual object instances. + + .. seealso:: + + :ref:`unitofwork_cascades` + + :ref:`faq_walk_objects` - illustrates a generic function to + traverse all objects without relying on cascades. + + """ + visited_states: Set[InstanceState[Any]] = set() + prp, mpp = object(), object() + + assert state.mapper.isa(self) + + # this is actually a recursive structure, fully typing it seems + # a little too difficult for what it's worth here + visitables: Deque[ + Tuple[ + Deque[Any], + object, + Optional[InstanceState[Any]], + Optional[_InstanceDict], + ] + ] + + visitables = deque( + [(deque(state.mapper._props.values()), prp, state, state.dict)] + ) + + while visitables: + iterator, item_type, parent_state, parent_dict = visitables[-1] + if not iterator: + visitables.pop() + continue + + if item_type is prp: + prop = iterator.popleft() + if not prop.cascade or type_ not in prop.cascade: + continue + assert parent_state is not None + assert parent_dict is not None + queue = deque( + prop.cascade_iterator( + type_, + parent_state, + parent_dict, + visited_states, + halt_on, + ) + ) + if queue: + visitables.append((queue, mpp, None, None)) + elif item_type is mpp: + ( + instance, + instance_mapper, + corresponding_state, + corresponding_dict, + ) = iterator.popleft() + yield ( + instance, + instance_mapper, + corresponding_state, + corresponding_dict, + ) + visitables.append( + ( + deque(instance_mapper._props.values()), + prp, + corresponding_state, + corresponding_dict, + ) + ) + + @HasMemoized.memoized_attribute + def _compiled_cache(self): + return util.LRUCache(self._compiled_cache_size) + + @HasMemoized.memoized_attribute + def _multiple_persistence_tables(self): + return len(self.tables) > 1 + + @HasMemoized.memoized_attribute + def _sorted_tables(self): + table_to_mapper: Dict[TableClause, Mapper[Any]] = {} + + for mapper in self.base_mapper.self_and_descendants: + for t in mapper.tables: + table_to_mapper.setdefault(t, mapper) + + extra_dependencies = [] + for table, mapper in table_to_mapper.items(): + super_ = mapper.inherits + if super_: + extra_dependencies.extend( + [(super_table, table) for super_table in super_.tables] + ) + + def skip(fk): + # attempt to skip dependencies that are not + # significant to the inheritance chain + # for two tables that are related by inheritance. + # while that dependency may be important, it's technically + # not what we mean to sort on here. + parent = table_to_mapper.get(fk.parent.table) + dep = table_to_mapper.get(fk.column.table) + if ( + parent is not None + and dep is not None + and dep is not parent + and dep.inherit_condition is not None + ): + cols = set(sql_util._find_columns(dep.inherit_condition)) + if parent.inherit_condition is not None: + cols = cols.union( + sql_util._find_columns(parent.inherit_condition) + ) + return fk.parent not in cols and fk.column not in cols + else: + return fk.parent not in cols + return False + + sorted_ = sql_util.sort_tables( + table_to_mapper, + skip_fn=skip, + extra_dependencies=extra_dependencies, + ) + + ret = util.OrderedDict() + for t in sorted_: + ret[t] = table_to_mapper[t] + return ret + + def _memo(self, key: Any, callable_: Callable[[], _T]) -> _T: + if key in self._memoized_values: + return cast(_T, self._memoized_values[key]) + else: + self._memoized_values[key] = value = callable_() + return value + + @util.memoized_property + def _table_to_equated(self): + """memoized map of tables to collections of columns to be + synchronized upwards to the base mapper.""" + + result: util.defaultdict[ + Table, + List[ + Tuple[ + Mapper[Any], + List[Tuple[ColumnElement[Any], ColumnElement[Any]]], + ] + ], + ] = util.defaultdict(list) + + def set_union(x, y): + return x.union(y) + + for table in self._sorted_tables: + cols = set(table.c) + + for m in self.iterate_to_root(): + if m._inherits_equated_pairs and cols.intersection( + reduce( + set_union, + [l.proxy_set for l, r in m._inherits_equated_pairs], + ) + ): + result[table].append((m, m._inherits_equated_pairs)) + + return result + + +class _OptGetColumnsNotAvailable(Exception): + pass + + +def configure_mappers() -> None: + """Initialize the inter-mapper relationships of all mappers that + have been constructed thus far across all :class:`_orm.registry` + collections. + + The configure step is used to reconcile and initialize the + :func:`_orm.relationship` linkages between mapped classes, as well as to + invoke configuration events such as the + :meth:`_orm.MapperEvents.before_configured` and + :meth:`_orm.MapperEvents.after_configured`, which may be used by ORM + extensions or user-defined extension hooks. + + Mapper configuration is normally invoked automatically, the first time + mappings from a particular :class:`_orm.registry` are used, as well as + whenever mappings are used and additional not-yet-configured mappers have + been constructed. The automatic configuration process however is local only + to the :class:`_orm.registry` involving the target mapper and any related + :class:`_orm.registry` objects which it may depend on; this is + equivalent to invoking the :meth:`_orm.registry.configure` method + on a particular :class:`_orm.registry`. + + By contrast, the :func:`_orm.configure_mappers` function will invoke the + configuration process on all :class:`_orm.registry` objects that + exist in memory, and may be useful for scenarios where many individual + :class:`_orm.registry` objects that are nonetheless interrelated are + in use. + + .. versionchanged:: 1.4 + + As of SQLAlchemy 1.4.0b2, this function works on a + per-:class:`_orm.registry` basis, locating all :class:`_orm.registry` + objects present and invoking the :meth:`_orm.registry.configure` method + on each. The :meth:`_orm.registry.configure` method may be preferred to + limit the configuration of mappers to those local to a particular + :class:`_orm.registry` and/or declarative base class. + + Points at which automatic configuration is invoked include when a mapped + class is instantiated into an instance, as well as when ORM queries + are emitted using :meth:`.Session.query` or :meth:`_orm.Session.execute` + with an ORM-enabled statement. + + The mapper configure process, whether invoked by + :func:`_orm.configure_mappers` or from :meth:`_orm.registry.configure`, + provides several event hooks that can be used to augment the mapper + configuration step. These hooks include: + + * :meth:`.MapperEvents.before_configured` - called once before + :func:`.configure_mappers` or :meth:`_orm.registry.configure` does any + work; this can be used to establish additional options, properties, or + related mappings before the operation proceeds. + + * :meth:`.MapperEvents.mapper_configured` - called as each individual + :class:`_orm.Mapper` is configured within the process; will include all + mapper state except for backrefs set up by other mappers that are still + to be configured. + + * :meth:`.MapperEvents.after_configured` - called once after + :func:`.configure_mappers` or :meth:`_orm.registry.configure` is + complete; at this stage, all :class:`_orm.Mapper` objects that fall + within the scope of the configuration operation will be fully configured. + Note that the calling application may still have other mappings that + haven't been produced yet, such as if they are in modules as yet + unimported, and may also have mappings that are still to be configured, + if they are in other :class:`_orm.registry` collections not part of the + current scope of configuration. + + """ + + _configure_registries(_all_registries(), cascade=True) + + +def _configure_registries( + registries: Set[_RegistryType], cascade: bool +) -> None: + for reg in registries: + if reg._new_mappers: + break + else: + return + + with _CONFIGURE_MUTEX: + global _already_compiling + if _already_compiling: + return + _already_compiling = True + try: + # double-check inside mutex + for reg in registries: + if reg._new_mappers: + break + else: + return + + Mapper.dispatch._for_class(Mapper).before_configured() # type: ignore # noqa: E501 + # initialize properties on all mappers + # note that _mapper_registry is unordered, which + # may randomly conceal/reveal issues related to + # the order of mapper compilation + + _do_configure_registries(registries, cascade) + finally: + _already_compiling = False + Mapper.dispatch._for_class(Mapper).after_configured() # type: ignore + + +@util.preload_module("sqlalchemy.orm.decl_api") +def _do_configure_registries( + registries: Set[_RegistryType], cascade: bool +) -> None: + registry = util.preloaded.orm_decl_api.registry + + orig = set(registries) + + for reg in registry._recurse_with_dependencies(registries): + has_skip = False + + for mapper in reg._mappers_to_configure(): + run_configure = None + + for fn in mapper.dispatch.before_mapper_configured: + run_configure = fn(mapper, mapper.class_) + if run_configure is EXT_SKIP: + has_skip = True + break + if run_configure is EXT_SKIP: + continue + + if getattr(mapper, "_configure_failed", False): + e = sa_exc.InvalidRequestError( + "One or more mappers failed to initialize - " + "can't proceed with initialization of other " + "mappers. Triggering mapper: '%s'. " + "Original exception was: %s" + % (mapper, mapper._configure_failed) + ) + e._configure_failed = mapper._configure_failed # type: ignore + raise e + + if not mapper.configured: + try: + mapper._post_configure_properties() + mapper._expire_memoizations() + mapper.dispatch.mapper_configured(mapper, mapper.class_) + except Exception: + exc = sys.exc_info()[1] + if not hasattr(exc, "_configure_failed"): + mapper._configure_failed = exc + raise + if not has_skip: + reg._new_mappers = False + + if not cascade and reg._dependencies.difference(orig): + raise sa_exc.InvalidRequestError( + "configure was called with cascade=False but " + "additional registries remain" + ) + + +@util.preload_module("sqlalchemy.orm.decl_api") +def _dispose_registries(registries: Set[_RegistryType], cascade: bool) -> None: + registry = util.preloaded.orm_decl_api.registry + + orig = set(registries) + + for reg in registry._recurse_with_dependents(registries): + if not cascade and reg._dependents.difference(orig): + raise sa_exc.InvalidRequestError( + "Registry has dependent registries that are not disposed; " + "pass cascade=True to clear these also" + ) + + while reg._managers: + try: + manager, _ = reg._managers.popitem() + except KeyError: + # guard against race between while and popitem + pass + else: + reg._dispose_manager_and_mapper(manager) + + reg._non_primary_mappers.clear() + reg._dependents.clear() + for dep in reg._dependencies: + dep._dependents.discard(reg) + reg._dependencies.clear() + # this wasn't done in the 1.3 clear_mappers() and in fact it + # was a bug, as it could cause configure_mappers() to invoke + # the "before_configured" event even though mappers had all been + # disposed. + reg._new_mappers = False + + +def reconstructor(fn: _Fn) -> _Fn: + """Decorate a method as the 'reconstructor' hook. + + Designates a single method as the "reconstructor", an ``__init__``-like + method that will be called by the ORM after the instance has been + loaded from the database or otherwise reconstituted. + + .. tip:: + + The :func:`_orm.reconstructor` decorator makes use of the + :meth:`_orm.InstanceEvents.load` event hook, which can be + used directly. + + The reconstructor will be invoked with no arguments. Scalar + (non-collection) database-mapped attributes of the instance will + be available for use within the function. Eagerly-loaded + collections are generally not yet available and will usually only + contain the first element. ORM state changes made to objects at + this stage will not be recorded for the next flush() operation, so + the activity within a reconstructor should be conservative. + + .. seealso:: + + :meth:`.InstanceEvents.load` + + """ + fn.__sa_reconstructor__ = True # type: ignore[attr-defined] + return fn + + +def validates( + *names: str, include_removes: bool = False, include_backrefs: bool = True +) -> Callable[[_Fn], _Fn]: + r"""Decorate a method as a 'validator' for one or more named properties. + + Designates a method as a validator, a method which receives the + name of the attribute as well as a value to be assigned, or in the + case of a collection, the value to be added to the collection. + The function can then raise validation exceptions to halt the + process from continuing (where Python's built-in ``ValueError`` + and ``AssertionError`` exceptions are reasonable choices), or can + modify or replace the value before proceeding. The function should + otherwise return the given value. + + Note that a validator for a collection **cannot** issue a load of that + collection within the validation routine - this usage raises + an assertion to avoid recursion overflows. This is a reentrant + condition which is not supported. + + :param \*names: list of attribute names to be validated. + :param include_removes: if True, "remove" events will be + sent as well - the validation function must accept an additional + argument "is_remove" which will be a boolean. + + :param include_backrefs: defaults to ``True``; if ``False``, the + validation function will not emit if the originator is an attribute + event related via a backref. This can be used for bi-directional + :func:`.validates` usage where only one validator should emit per + attribute operation. + + .. versionchanged:: 2.0.16 This paramter inadvertently defaulted to + ``False`` for releases 2.0.0 through 2.0.15. Its correct default + of ``True`` is restored in 2.0.16. + + .. seealso:: + + :ref:`simple_validators` - usage examples for :func:`.validates` + + """ + + def wrap(fn: _Fn) -> _Fn: + fn.__sa_validators__ = names # type: ignore[attr-defined] + fn.__sa_validation_opts__ = { # type: ignore[attr-defined] + "include_removes": include_removes, + "include_backrefs": include_backrefs, + } + return fn + + return wrap + + +def _event_on_load(state, ctx): + instrumenting_mapper = state.manager.mapper + + if instrumenting_mapper._reconstructor: + instrumenting_mapper._reconstructor(state.obj()) + + +def _event_on_init(state, args, kwargs): + """Run init_instance hooks. + + This also includes mapper compilation, normally not needed + here but helps with some piecemeal configuration + scenarios (such as in the ORM tutorial). + + """ + + instrumenting_mapper = state.manager.mapper + if instrumenting_mapper: + instrumenting_mapper._check_configure() + if instrumenting_mapper._set_polymorphic_identity: + instrumenting_mapper._set_polymorphic_identity(state) + + +class _ColumnMapping(Dict["ColumnElement[Any]", "MapperProperty[Any]"]): + """Error reporting helper for mapper._columntoproperty.""" + + __slots__ = ("mapper",) + + def __init__(self, mapper): + # TODO: weakref would be a good idea here + self.mapper = mapper + + def __missing__(self, column): + prop = self.mapper._props.get(column) + if prop: + raise orm_exc.UnmappedColumnError( + "Column '%s.%s' is not available, due to " + "conflicting property '%s':%r" + % (column.table.name, column.name, column.key, prop) + ) + raise orm_exc.UnmappedColumnError( + "No column %s is configured on mapper %s..." + % (column, self.mapper) + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/path_registry.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/path_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..388e46098d656c4308956d7cf977e4b1f4e25dd0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/path_registry.py @@ -0,0 +1,811 @@ +# orm/path_registry.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +"""Path tracking utilities, representing mapper graph traversals. + +""" + +from __future__ import annotations + +from functools import reduce +from itertools import chain +import logging +import operator +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from . import base as orm_base +from ._typing import insp_is_mapper_property +from .. import exc +from .. import util +from ..sql import visitors +from ..sql.cache_key import HasCacheKey + +if TYPE_CHECKING: + from ._typing import _InternalEntityType + from .interfaces import StrategizedProperty + from .mapper import Mapper + from .relationships import RelationshipProperty + from .util import AliasedInsp + from ..sql.cache_key import _CacheKeyTraversalType + from ..sql.elements import BindParameter + from ..sql.visitors import anon_map + from ..util.typing import _LiteralStar + from ..util.typing import TypeGuard + + def is_root(path: PathRegistry) -> TypeGuard[RootRegistry]: ... + + def is_entity(path: PathRegistry) -> TypeGuard[AbstractEntityRegistry]: ... + +else: + is_root = operator.attrgetter("is_root") + is_entity = operator.attrgetter("is_entity") + + +_SerializedPath = List[Any] +_StrPathToken = str +_PathElementType = Union[ + _StrPathToken, "_InternalEntityType[Any]", "StrategizedProperty[Any]" +] + +# the representation is in fact +# a tuple with alternating: +# [_InternalEntityType[Any], Union[str, StrategizedProperty[Any]], +# _InternalEntityType[Any], Union[str, StrategizedProperty[Any]], ...] +# this might someday be a tuple of 2-tuples instead, but paths can be +# chopped at odd intervals as well so this is less flexible +_PathRepresentation = Tuple[_PathElementType, ...] + +# NOTE: these names are weird since the array is 0-indexed, +# the "_Odd" entries are at 0, 2, 4, etc +_OddPathRepresentation = Sequence["_InternalEntityType[Any]"] +_EvenPathRepresentation = Sequence[Union["StrategizedProperty[Any]", str]] + + +log = logging.getLogger(__name__) + + +def _unreduce_path(path: _SerializedPath) -> PathRegistry: + return PathRegistry.deserialize(path) + + +_WILDCARD_TOKEN: _LiteralStar = "*" +_DEFAULT_TOKEN = "_sa_default" + + +class PathRegistry(HasCacheKey): + """Represent query load paths and registry functions. + + Basically represents structures like: + + (, "orders", , "items", ) + + These structures are generated by things like + query options (joinedload(), subqueryload(), etc.) and are + used to compose keys stored in the query._attributes dictionary + for various options. + + They are then re-composed at query compile/result row time as + the query is formed and as rows are fetched, where they again + serve to compose keys to look up options in the context.attributes + dictionary, which is copied from query._attributes. + + The path structure has a limited amount of caching, where each + "root" ultimately pulls from a fixed registry associated with + the first mapper, that also contains elements for each of its + property keys. However paths longer than two elements, which + are the exception rather than the rule, are generated on an + as-needed basis. + + """ + + __slots__ = () + + is_token = False + is_root = False + has_entity = False + is_property = False + is_entity = False + + is_unnatural: bool + + path: _PathRepresentation + natural_path: _PathRepresentation + parent: Optional[PathRegistry] + root: RootRegistry + + _cache_key_traversal: _CacheKeyTraversalType = [ + ("path", visitors.ExtendedInternalTraversal.dp_has_cache_key_list) + ] + + def __eq__(self, other: Any) -> bool: + try: + return other is not None and self.path == other._path_for_compare + except AttributeError: + util.warn( + "Comparison of PathRegistry to %r is not supported" + % (type(other)) + ) + return False + + def __ne__(self, other: Any) -> bool: + try: + return other is None or self.path != other._path_for_compare + except AttributeError: + util.warn( + "Comparison of PathRegistry to %r is not supported" + % (type(other)) + ) + return True + + @property + def _path_for_compare(self) -> Optional[_PathRepresentation]: + return self.path + + def odd_element(self, index: int) -> _InternalEntityType[Any]: + return self.path[index] # type: ignore + + def set(self, attributes: Dict[Any, Any], key: Any, value: Any) -> None: + log.debug("set '%s' on path '%s' to '%s'", key, self, value) + attributes[(key, self.natural_path)] = value + + def setdefault( + self, attributes: Dict[Any, Any], key: Any, value: Any + ) -> None: + log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value) + attributes.setdefault((key, self.natural_path), value) + + def get( + self, attributes: Dict[Any, Any], key: Any, value: Optional[Any] = None + ) -> Any: + key = (key, self.natural_path) + if key in attributes: + return attributes[key] + else: + return value + + def __len__(self) -> int: + return len(self.path) + + def __hash__(self) -> int: + return id(self) + + @overload + def __getitem__(self, entity: _StrPathToken) -> TokenRegistry: ... + + @overload + def __getitem__(self, entity: int) -> _PathElementType: ... + + @overload + def __getitem__(self, entity: slice) -> _PathRepresentation: ... + + @overload + def __getitem__( + self, entity: _InternalEntityType[Any] + ) -> AbstractEntityRegistry: ... + + @overload + def __getitem__( + self, entity: StrategizedProperty[Any] + ) -> PropRegistry: ... + + def __getitem__( + self, + entity: Union[ + _StrPathToken, + int, + slice, + _InternalEntityType[Any], + StrategizedProperty[Any], + ], + ) -> Union[ + TokenRegistry, + _PathElementType, + _PathRepresentation, + PropRegistry, + AbstractEntityRegistry, + ]: + raise NotImplementedError() + + # TODO: what are we using this for? + @property + def length(self) -> int: + return len(self.path) + + def pairs( + self, + ) -> Iterator[ + Tuple[_InternalEntityType[Any], Union[str, StrategizedProperty[Any]]] + ]: + odd_path = cast(_OddPathRepresentation, self.path) + even_path = cast(_EvenPathRepresentation, odd_path) + for i in range(0, len(odd_path), 2): + yield odd_path[i], even_path[i + 1] + + def contains_mapper(self, mapper: Mapper[Any]) -> bool: + _m_path = cast(_OddPathRepresentation, self.path) + for path_mapper in [_m_path[i] for i in range(0, len(_m_path), 2)]: + if path_mapper.mapper.isa(mapper): + return True + else: + return False + + def contains(self, attributes: Dict[Any, Any], key: Any) -> bool: + return (key, self.path) in attributes + + def __reduce__(self) -> Any: + return _unreduce_path, (self.serialize(),) + + @classmethod + def _serialize_path(cls, path: _PathRepresentation) -> _SerializedPath: + _m_path = cast(_OddPathRepresentation, path) + _p_path = cast(_EvenPathRepresentation, path) + + return list( + zip( + tuple( + m.class_ if (m.is_mapper or m.is_aliased_class) else str(m) + for m in [_m_path[i] for i in range(0, len(_m_path), 2)] + ), + tuple( + p.key if insp_is_mapper_property(p) else str(p) + for p in [_p_path[i] for i in range(1, len(_p_path), 2)] + ) + + (None,), + ) + ) + + @classmethod + def _deserialize_path(cls, path: _SerializedPath) -> _PathRepresentation: + def _deserialize_mapper_token(mcls: Any) -> Any: + return ( + # note: we likely dont want configure=True here however + # this is maintained at the moment for backwards compatibility + orm_base._inspect_mapped_class(mcls, configure=True) + if mcls not in PathToken._intern + else PathToken._intern[mcls] + ) + + def _deserialize_key_token(mcls: Any, key: Any) -> Any: + if key is None: + return None + elif key in PathToken._intern: + return PathToken._intern[key] + else: + mp = orm_base._inspect_mapped_class(mcls, configure=True) + assert mp is not None + return mp.attrs[key] + + p = tuple( + chain( + *[ + ( + _deserialize_mapper_token(mcls), + _deserialize_key_token(mcls, key), + ) + for mcls, key in path + ] + ) + ) + if p and p[-1] is None: + p = p[0:-1] + return p + + def serialize(self) -> _SerializedPath: + path = self.path + return self._serialize_path(path) + + @classmethod + def deserialize(cls, path: _SerializedPath) -> PathRegistry: + assert path is not None + p = cls._deserialize_path(path) + return cls.coerce(p) + + @overload + @classmethod + def per_mapper(cls, mapper: Mapper[Any]) -> CachingEntityRegistry: ... + + @overload + @classmethod + def per_mapper(cls, mapper: AliasedInsp[Any]) -> SlotsEntityRegistry: ... + + @classmethod + def per_mapper( + cls, mapper: _InternalEntityType[Any] + ) -> AbstractEntityRegistry: + if mapper.is_mapper: + return CachingEntityRegistry(cls.root, mapper) + else: + return SlotsEntityRegistry(cls.root, mapper) + + @classmethod + def coerce(cls, raw: _PathRepresentation) -> PathRegistry: + def _red(prev: PathRegistry, next_: _PathElementType) -> PathRegistry: + return prev[next_] + + # can't quite get mypy to appreciate this one :) + return reduce(_red, raw, cls.root) # type: ignore + + def __add__(self, other: PathRegistry) -> PathRegistry: + def _red(prev: PathRegistry, next_: _PathElementType) -> PathRegistry: + return prev[next_] + + return reduce(_red, other.path, self) + + def __str__(self) -> str: + return f"ORM Path[{' -> '.join(str(elem) for elem in self.path)}]" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.path!r})" + + +class CreatesToken(PathRegistry): + __slots__ = () + + is_aliased_class: bool + is_root: bool + + def token(self, token: _StrPathToken) -> TokenRegistry: + if token.endswith(f":{_WILDCARD_TOKEN}"): + return TokenRegistry(self, token) + elif token.endswith(f":{_DEFAULT_TOKEN}"): + return TokenRegistry(self.root, token) + else: + raise exc.ArgumentError(f"invalid token: {token}") + + +class RootRegistry(CreatesToken): + """Root registry, defers to mappers so that + paths are maintained per-root-mapper. + + """ + + __slots__ = () + + inherit_cache = True + + path = natural_path = () + has_entity = False + is_aliased_class = False + is_root = True + is_unnatural = False + + def _getitem( + self, entity: Any + ) -> Union[TokenRegistry, AbstractEntityRegistry]: + if entity in PathToken._intern: + if TYPE_CHECKING: + assert isinstance(entity, _StrPathToken) + return TokenRegistry(self, PathToken._intern[entity]) + else: + try: + return entity._path_registry # type: ignore + except AttributeError: + raise IndexError( + f"invalid argument for RootRegistry.__getitem__: {entity}" + ) + + def _truncate_recursive(self) -> RootRegistry: + return self + + if not TYPE_CHECKING: + __getitem__ = _getitem + + +PathRegistry.root = RootRegistry() + + +class PathToken(orm_base.InspectionAttr, HasCacheKey, str): + """cacheable string token""" + + _intern: Dict[str, PathToken] = {} + + def _gen_cache_key( + self, anon_map: anon_map, bindparams: List[BindParameter[Any]] + ) -> Tuple[Any, ...]: + return (str(self),) + + @property + def _path_for_compare(self) -> Optional[_PathRepresentation]: + return None + + @classmethod + def intern(cls, strvalue: str) -> PathToken: + if strvalue in cls._intern: + return cls._intern[strvalue] + else: + cls._intern[strvalue] = result = PathToken(strvalue) + return result + + +class TokenRegistry(PathRegistry): + __slots__ = ("token", "parent", "path", "natural_path") + + inherit_cache = True + + token: _StrPathToken + parent: CreatesToken + + def __init__(self, parent: CreatesToken, token: _StrPathToken): + token = PathToken.intern(token) + + self.token = token + self.parent = parent + self.path = parent.path + (token,) + self.natural_path = parent.natural_path + (token,) + + has_entity = False + + is_token = True + + def generate_for_superclasses(self) -> Iterator[PathRegistry]: + # NOTE: this method is no longer used. consider removal + parent = self.parent + if is_root(parent): + yield self + return + + if TYPE_CHECKING: + assert isinstance(parent, AbstractEntityRegistry) + if not parent.is_aliased_class: + for mp_ent in parent.mapper.iterate_to_root(): + yield TokenRegistry(parent.parent[mp_ent], self.token) + elif ( + parent.is_aliased_class + and cast( + "AliasedInsp[Any]", + parent.entity, + )._is_with_polymorphic + ): + yield self + for ent in cast( + "AliasedInsp[Any]", parent.entity + )._with_polymorphic_entities: + yield TokenRegistry(parent.parent[ent], self.token) + else: + yield self + + def _generate_natural_for_superclasses( + self, + ) -> Iterator[_PathRepresentation]: + parent = self.parent + if is_root(parent): + yield self.natural_path + return + + if TYPE_CHECKING: + assert isinstance(parent, AbstractEntityRegistry) + for mp_ent in parent.mapper.iterate_to_root(): + yield TokenRegistry(parent.parent[mp_ent], self.token).natural_path + if ( + parent.is_aliased_class + and cast( + "AliasedInsp[Any]", + parent.entity, + )._is_with_polymorphic + ): + yield self.natural_path + for ent in cast( + "AliasedInsp[Any]", parent.entity + )._with_polymorphic_entities: + yield ( + TokenRegistry(parent.parent[ent], self.token).natural_path + ) + else: + yield self.natural_path + + def _getitem(self, entity: Any) -> Any: + try: + return self.path[entity] + except TypeError as err: + raise IndexError(f"{entity}") from err + + if not TYPE_CHECKING: + __getitem__ = _getitem + + +class PropRegistry(PathRegistry): + __slots__ = ( + "prop", + "parent", + "path", + "natural_path", + "has_entity", + "entity", + "mapper", + "_wildcard_path_loader_key", + "_default_path_loader_key", + "_loader_key", + "is_unnatural", + ) + inherit_cache = True + is_property = True + + prop: StrategizedProperty[Any] + mapper: Optional[Mapper[Any]] + entity: Optional[_InternalEntityType[Any]] + + def __init__( + self, parent: AbstractEntityRegistry, prop: StrategizedProperty[Any] + ): + + # restate this path in terms of the + # given StrategizedProperty's parent. + insp = cast("_InternalEntityType[Any]", parent[-1]) + natural_parent: AbstractEntityRegistry = parent + + # inherit "is_unnatural" from the parent + self.is_unnatural = parent.parent.is_unnatural or bool( + parent.mapper.inherits + ) + + if not insp.is_aliased_class or insp._use_mapper_path: # type: ignore + parent = natural_parent = parent.parent[prop.parent] + elif ( + insp.is_aliased_class + and insp.with_polymorphic_mappers + and prop.parent in insp.with_polymorphic_mappers + ): + subclass_entity: _InternalEntityType[Any] = parent[-1]._entity_for_mapper(prop.parent) # type: ignore # noqa: E501 + parent = parent.parent[subclass_entity] + + # when building a path where with_polymorphic() is in use, + # special logic to determine the "natural path" when subclass + # entities are used. + # + # here we are trying to distinguish between a path that starts + # on a with_polymorphic entity vs. one that starts on a + # normal entity that introduces a with_polymorphic() in the + # middle using of_type(): + # + # # as in test_polymorphic_rel-> + # # test_subqueryload_on_subclass_uses_path_correctly + # wp = with_polymorphic(RegularEntity, "*") + # sess.query(wp).options(someload(wp.SomeSubEntity.foos)) + # + # vs + # + # # as in test_relationship->JoinedloadWPolyOfTypeContinued + # wp = with_polymorphic(SomeFoo, "*") + # sess.query(RegularEntity).options( + # someload(RegularEntity.foos.of_type(wp)) + # .someload(wp.SubFoo.bar) + # ) + # + # in the former case, the Query as it generates a path that we + # want to match will be in terms of the with_polymorphic at the + # beginning. in the latter case, Query will generate simple + # paths that don't know about this with_polymorphic, so we must + # use a separate natural path. + # + # + if parent.parent: + natural_parent = parent.parent[subclass_entity.mapper] + self.is_unnatural = True + else: + natural_parent = parent + elif ( + natural_parent.parent + and insp.is_aliased_class + and prop.parent # this should always be the case here + is not insp.mapper + and insp.mapper.isa(prop.parent) + ): + natural_parent = parent.parent[prop.parent] + + self.prop = prop + self.parent = parent + self.path = parent.path + (prop,) + self.natural_path = natural_parent.natural_path + (prop,) + + self.has_entity = prop._links_to_entity + if prop._is_relationship: + if TYPE_CHECKING: + assert isinstance(prop, RelationshipProperty) + self.entity = prop.entity + self.mapper = prop.mapper + else: + self.entity = None + self.mapper = None + + self._wildcard_path_loader_key = ( + "loader", + parent.natural_path + self.prop._wildcard_token, + ) + self._default_path_loader_key = self.prop._default_path_loader_key + self._loader_key = ("loader", self.natural_path) + + def _truncate_recursive(self) -> PropRegistry: + earliest = None + for i, token in enumerate(reversed(self.path[:-1])): + if token is self.prop: + earliest = i + + if earliest is None: + return self + else: + return self.coerce(self.path[0 : -(earliest + 1)]) # type: ignore + + @property + def entity_path(self) -> AbstractEntityRegistry: + assert self.entity is not None + return self[self.entity] + + def _getitem( + self, entity: Union[int, slice, _InternalEntityType[Any]] + ) -> Union[AbstractEntityRegistry, _PathElementType, _PathRepresentation]: + if isinstance(entity, (int, slice)): + return self.path[entity] + else: + return SlotsEntityRegistry(self, entity) + + if not TYPE_CHECKING: + __getitem__ = _getitem + + +class AbstractEntityRegistry(CreatesToken): + __slots__ = ( + "key", + "parent", + "is_aliased_class", + "path", + "entity", + "natural_path", + ) + + has_entity = True + is_entity = True + + parent: Union[RootRegistry, PropRegistry] + key: _InternalEntityType[Any] + entity: _InternalEntityType[Any] + is_aliased_class: bool + + def __init__( + self, + parent: Union[RootRegistry, PropRegistry], + entity: _InternalEntityType[Any], + ): + self.key = entity + self.parent = parent + self.is_aliased_class = entity.is_aliased_class + self.entity = entity + self.path = parent.path + (entity,) + + # the "natural path" is the path that we get when Query is traversing + # from the lead entities into the various relationships; it corresponds + # to the structure of mappers and relationships. when we are given a + # path that comes from loader options, as of 1.3 it can have ac-hoc + # with_polymorphic() and other AliasedInsp objects inside of it, which + # are usually not present in mappings. So here we track both the + # "enhanced" path in self.path and the "natural" path that doesn't + # include those objects so these two traversals can be matched up. + + # the test here for "(self.is_aliased_class or parent.is_unnatural)" + # are to avoid the more expensive conditional logic that follows if we + # know we don't have to do it. This conditional can just as well be + # "if parent.path:", it just is more function calls. + # + # This is basically the only place that the "is_unnatural" flag + # actually changes behavior. + if parent.path and (self.is_aliased_class or parent.is_unnatural): + # this is an infrequent code path used only for loader strategies + # that also make use of of_type(). + if entity.mapper.isa(parent.natural_path[-1].mapper): # type: ignore # noqa: E501 + self.natural_path = parent.natural_path + (entity.mapper,) + else: + self.natural_path = parent.natural_path + ( + parent.natural_path[-1].entity, # type: ignore + ) + # it seems to make sense that since these paths get mixed up + # with statements that are cached or not, we should make + # sure the natural path is cacheable across different occurrences + # of equivalent AliasedClass objects. however, so far this + # does not seem to be needed for whatever reason. + # elif not parent.path and self.is_aliased_class: + # self.natural_path = (self.entity._generate_cache_key()[0], ) + else: + self.natural_path = self.path + + def _truncate_recursive(self) -> AbstractEntityRegistry: + return self.parent._truncate_recursive()[self.entity] + + @property + def root_entity(self) -> _InternalEntityType[Any]: + return self.odd_element(0) + + @property + def entity_path(self) -> PathRegistry: + return self + + @property + def mapper(self) -> Mapper[Any]: + return self.entity.mapper + + def __bool__(self) -> bool: + return True + + def _getitem( + self, entity: Any + ) -> Union[_PathElementType, _PathRepresentation, PathRegistry]: + if isinstance(entity, (int, slice)): + return self.path[entity] + elif entity in PathToken._intern: + return TokenRegistry(self, PathToken._intern[entity]) + else: + return PropRegistry(self, entity) + + if not TYPE_CHECKING: + __getitem__ = _getitem + + +class SlotsEntityRegistry(AbstractEntityRegistry): + # for aliased class, return lightweight, no-cycles created + # version + inherit_cache = True + + +class _ERDict(Dict[Any, Any]): + def __init__(self, registry: CachingEntityRegistry): + self.registry = registry + + def __missing__(self, key: Any) -> PropRegistry: + self[key] = item = PropRegistry(self.registry, key) + + return item + + +class CachingEntityRegistry(AbstractEntityRegistry): + # for long lived mapper, return dict based caching + # version that creates reference cycles + + __slots__ = ("_cache",) + + inherit_cache = True + + def __init__( + self, + parent: Union[RootRegistry, PropRegistry], + entity: _InternalEntityType[Any], + ): + super().__init__(parent, entity) + self._cache = _ERDict(self) + + def pop(self, key: Any, default: Any) -> Any: + return self._cache.pop(key, default) + + def _getitem(self, entity: Any) -> Any: + if isinstance(entity, (int, slice)): + return self.path[entity] + elif isinstance(entity, PathToken): + return TokenRegistry(self, entity) + else: + return self._cache[entity] + + if not TYPE_CHECKING: + __getitem__ = _getitem + + +if TYPE_CHECKING: + + def path_is_entity( + path: PathRegistry, + ) -> TypeGuard[AbstractEntityRegistry]: ... + + def path_is_property(path: PathRegistry) -> TypeGuard[PropRegistry]: ... + +else: + path_is_entity = operator.attrgetter("is_entity") + path_is_property = operator.attrgetter("is_property") diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/persistence.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/persistence.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe8557add9619686d11cf474fa3e3176259595b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/persistence.py @@ -0,0 +1,1782 @@ +# orm/persistence.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""private module containing functions used to emit INSERT, UPDATE +and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending +mappers. + +The functions here are called only by the unit of work functions +in unitofwork.py. + +""" +from __future__ import annotations + +from itertools import chain +from itertools import groupby +from itertools import zip_longest +import operator + +from . import attributes +from . import exc as orm_exc +from . import loading +from . import sync +from .base import state_str +from .. import exc as sa_exc +from .. import future +from .. import sql +from .. import util +from ..engine import cursor as _cursor +from ..sql import operators +from ..sql.elements import BooleanClauseList +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL + + +def save_obj(base_mapper, states, uowtransaction, single=False): + """Issue ``INSERT`` and/or ``UPDATE`` statements for a list + of objects. + + This is called within the context of a UOWTransaction during a + flush operation, given a list of states to be flushed. The + base mapper in an inheritance hierarchy handles the inserts/ + updates for all descendant mappers. + + """ + + # if batch=false, call _save_obj separately for each object + if not single and not base_mapper.batch: + for state in _sort_states(base_mapper, states): + save_obj(base_mapper, [state], uowtransaction, single=True) + return + + states_to_update = [] + states_to_insert = [] + + for ( + state, + dict_, + mapper, + connection, + has_identity, + row_switch, + update_version_id, + ) in _organize_states_for_save(base_mapper, states, uowtransaction): + if has_identity or row_switch: + states_to_update.append( + (state, dict_, mapper, connection, update_version_id) + ) + else: + states_to_insert.append((state, dict_, mapper, connection)) + + for table, mapper in base_mapper._sorted_tables.items(): + if table not in mapper._pks_by_table: + continue + insert = _collect_insert_commands(table, states_to_insert) + + update = _collect_update_commands( + uowtransaction, table, states_to_update + ) + + _emit_update_statements( + base_mapper, + uowtransaction, + mapper, + table, + update, + ) + + _emit_insert_statements( + base_mapper, + uowtransaction, + mapper, + table, + insert, + ) + + _finalize_insert_update_commands( + base_mapper, + uowtransaction, + chain( + ( + (state, state_dict, mapper, connection, False) + for (state, state_dict, mapper, connection) in states_to_insert + ), + ( + (state, state_dict, mapper, connection, True) + for ( + state, + state_dict, + mapper, + connection, + update_version_id, + ) in states_to_update + ), + ), + ) + + +def post_update(base_mapper, states, uowtransaction, post_update_cols): + """Issue UPDATE statements on behalf of a relationship() which + specifies post_update. + + """ + + states_to_update = list( + _organize_states_for_post_update(base_mapper, states, uowtransaction) + ) + + for table, mapper in base_mapper._sorted_tables.items(): + if table not in mapper._pks_by_table: + continue + + update = ( + ( + state, + state_dict, + sub_mapper, + connection, + ( + mapper._get_committed_state_attr_by_column( + state, state_dict, mapper.version_id_col + ) + if mapper.version_id_col is not None + else None + ), + ) + for state, state_dict, sub_mapper, connection in states_to_update + if table in sub_mapper._pks_by_table + ) + + update = _collect_post_update_commands( + base_mapper, uowtransaction, table, update, post_update_cols + ) + + _emit_post_update_statements( + base_mapper, + uowtransaction, + mapper, + table, + update, + ) + + +def delete_obj(base_mapper, states, uowtransaction): + """Issue ``DELETE`` statements for a list of objects. + + This is called within the context of a UOWTransaction during a + flush operation. + + """ + + states_to_delete = list( + _organize_states_for_delete(base_mapper, states, uowtransaction) + ) + + table_to_mapper = base_mapper._sorted_tables + + for table in reversed(list(table_to_mapper.keys())): + mapper = table_to_mapper[table] + if table not in mapper._pks_by_table: + continue + elif mapper.inherits and mapper.passive_deletes: + continue + + delete = _collect_delete_commands( + base_mapper, uowtransaction, table, states_to_delete + ) + + _emit_delete_statements( + base_mapper, + uowtransaction, + mapper, + table, + delete, + ) + + for ( + state, + state_dict, + mapper, + connection, + update_version_id, + ) in states_to_delete: + mapper.dispatch.after_delete(mapper, connection, state) + + +def _organize_states_for_save(base_mapper, states, uowtransaction): + """Make an initial pass across a set of states for INSERT or + UPDATE. + + This includes splitting out into distinct lists for + each, calling before_insert/before_update, obtaining + key information for each state including its dictionary, + mapper, the connection to use for the execution per state, + and the identity flag. + + """ + + for state, dict_, mapper, connection in _connections_for_states( + base_mapper, uowtransaction, states + ): + has_identity = bool(state.key) + + instance_key = state.key or mapper._identity_key_from_state(state) + + row_switch = update_version_id = None + + # call before_XXX extensions + if not has_identity: + mapper.dispatch.before_insert(mapper, connection, state) + else: + mapper.dispatch.before_update(mapper, connection, state) + + if mapper._validate_polymorphic_identity: + mapper._validate_polymorphic_identity(mapper, state, dict_) + + # detect if we have a "pending" instance (i.e. has + # no instance_key attached to it), and another instance + # with the same identity key already exists as persistent. + # convert to an UPDATE if so. + if ( + not has_identity + and instance_key in uowtransaction.session.identity_map + ): + instance = uowtransaction.session.identity_map[instance_key] + existing = attributes.instance_state(instance) + + if not uowtransaction.was_already_deleted(existing): + if not uowtransaction.is_deleted(existing): + util.warn( + "New instance %s with identity key %s conflicts " + "with persistent instance %s" + % (state_str(state), instance_key, state_str(existing)) + ) + else: + base_mapper._log_debug( + "detected row switch for identity %s. " + "will update %s, remove %s from " + "transaction", + instance_key, + state_str(state), + state_str(existing), + ) + + # remove the "delete" flag from the existing element + uowtransaction.remove_state_actions(existing) + row_switch = existing + + if (has_identity or row_switch) and mapper.version_id_col is not None: + update_version_id = mapper._get_committed_state_attr_by_column( + row_switch if row_switch else state, + row_switch.dict if row_switch else dict_, + mapper.version_id_col, + ) + + yield ( + state, + dict_, + mapper, + connection, + has_identity, + row_switch, + update_version_id, + ) + + +def _organize_states_for_post_update(base_mapper, states, uowtransaction): + """Make an initial pass across a set of states for UPDATE + corresponding to post_update. + + This includes obtaining key information for each state + including its dictionary, mapper, the connection to use for + the execution per state. + + """ + return _connections_for_states(base_mapper, uowtransaction, states) + + +def _organize_states_for_delete(base_mapper, states, uowtransaction): + """Make an initial pass across a set of states for DELETE. + + This includes calling out before_delete and obtaining + key information for each state including its dictionary, + mapper, the connection to use for the execution per state. + + """ + for state, dict_, mapper, connection in _connections_for_states( + base_mapper, uowtransaction, states + ): + mapper.dispatch.before_delete(mapper, connection, state) + + if mapper.version_id_col is not None: + update_version_id = mapper._get_committed_state_attr_by_column( + state, dict_, mapper.version_id_col + ) + else: + update_version_id = None + + yield (state, dict_, mapper, connection, update_version_id) + + +def _collect_insert_commands( + table, + states_to_insert, + *, + bulk=False, + return_defaults=False, + render_nulls=False, + include_bulk_keys=(), +): + """Identify sets of values to use in INSERT statements for a + list of states. + + """ + for state, state_dict, mapper, connection in states_to_insert: + if table not in mapper._pks_by_table: + continue + + params = {} + value_params = {} + + propkey_to_col = mapper._propkey_to_col[table] + + eval_none = mapper._insert_cols_evaluating_none[table] + + for propkey in set(propkey_to_col).intersection(state_dict): + value = state_dict[propkey] + col = propkey_to_col[propkey] + if value is None and col not in eval_none and not render_nulls: + continue + elif not bulk and ( + hasattr(value, "__clause_element__") + or isinstance(value, sql.ClauseElement) + ): + value_params[col] = ( + value.__clause_element__() + if hasattr(value, "__clause_element__") + else value + ) + else: + params[col.key] = value + + if not bulk: + # for all the columns that have no default and we don't have + # a value and where "None" is not a special value, add + # explicit None to the INSERT. This is a legacy behavior + # which might be worth removing, as it should not be necessary + # and also produces confusion, given that "missing" and None + # now have distinct meanings + for colkey in ( + mapper._insert_cols_as_none[table] + .difference(params) + .difference([c.key for c in value_params]) + ): + params[colkey] = None + + if not bulk or return_defaults: + # params are in terms of Column key objects, so + # compare to pk_keys_by_table + has_all_pks = mapper._pk_keys_by_table[table].issubset(params) + + if mapper.base_mapper._prefer_eager_defaults( + connection.dialect, table + ): + has_all_defaults = mapper._server_default_col_keys[ + table + ].issubset(params) + else: + has_all_defaults = True + else: + has_all_defaults = has_all_pks = True + + if ( + mapper.version_id_generator is not False + and mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ): + params[mapper.version_id_col.key] = mapper.version_id_generator( + None + ) + + if bulk: + if mapper._set_polymorphic_identity: + params.setdefault( + mapper._polymorphic_attr_key, mapper.polymorphic_identity + ) + + if include_bulk_keys: + params.update((k, state_dict[k]) for k in include_bulk_keys) + + yield ( + state, + state_dict, + params, + mapper, + connection, + value_params, + has_all_pks, + has_all_defaults, + ) + + +def _collect_update_commands( + uowtransaction, + table, + states_to_update, + *, + bulk=False, + use_orm_update_stmt=None, + include_bulk_keys=(), +): + """Identify sets of values to use in UPDATE statements for a + list of states. + + This function works intricately with the history system + to determine exactly what values should be updated + as well as how the row should be matched within an UPDATE + statement. Includes some tricky scenarios where the primary + key of an object might have been changed. + + """ + + for ( + state, + state_dict, + mapper, + connection, + update_version_id, + ) in states_to_update: + if table not in mapper._pks_by_table: + continue + + pks = mapper._pks_by_table[table] + + if use_orm_update_stmt is not None: + # TODO: ordered values, etc + value_params = use_orm_update_stmt._values + else: + value_params = {} + + propkey_to_col = mapper._propkey_to_col[table] + + if bulk: + # keys here are mapped attribute keys, so + # look at mapper attribute keys for pk + params = { + propkey_to_col[propkey].key: state_dict[propkey] + for propkey in set(propkey_to_col) + .intersection(state_dict) + .difference(mapper._pk_attr_keys_by_table[table]) + } + has_all_defaults = True + else: + params = {} + for propkey in set(propkey_to_col).intersection( + state.committed_state + ): + value = state_dict[propkey] + col = propkey_to_col[propkey] + + if hasattr(value, "__clause_element__") or isinstance( + value, sql.ClauseElement + ): + value_params[col] = ( + value.__clause_element__() + if hasattr(value, "__clause_element__") + else value + ) + # guard against values that generate non-__nonzero__ + # objects for __eq__() + elif ( + state.manager[propkey].impl.is_equal( + value, state.committed_state[propkey] + ) + is not True + ): + params[col.key] = value + + if mapper.base_mapper.eager_defaults is True: + has_all_defaults = ( + mapper._server_onupdate_default_col_keys[table] + ).issubset(params) + else: + has_all_defaults = True + + if ( + update_version_id is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ): + if not bulk and not (params or value_params): + # HACK: check for history in other tables, in case the + # history is only in a different table than the one + # where the version_id_col is. This logic was lost + # from 0.9 -> 1.0.0 and restored in 1.0.6. + for prop in mapper._columntoproperty.values(): + history = state.manager[prop.key].impl.get_history( + state, state_dict, attributes.PASSIVE_NO_INITIALIZE + ) + if history.added: + break + else: + # no net change, break + continue + + col = mapper.version_id_col + no_params = not params and not value_params + params[col._label] = update_version_id + + if ( + bulk or col.key not in params + ) and mapper.version_id_generator is not False: + val = mapper.version_id_generator(update_version_id) + params[col.key] = val + elif mapper.version_id_generator is False and no_params: + # no version id generator, no values set on the table, + # and version id wasn't manually incremented. + # set version id to itself so we get an UPDATE + # statement + params[col.key] = update_version_id + + elif not (params or value_params): + continue + + has_all_pks = True + expect_pk_cascaded = False + if bulk: + # keys here are mapped attribute keys, so + # look at mapper attribute keys for pk + pk_params = { + propkey_to_col[propkey]._label: state_dict.get(propkey) + for propkey in set(propkey_to_col).intersection( + mapper._pk_attr_keys_by_table[table] + ) + } + if util.NONE_SET.intersection(pk_params.values()): + raise sa_exc.InvalidRequestError( + f"No primary key value supplied for column(s) " + f"""{ + ', '.join( + str(c) for c in pks if pk_params[c._label] is None + ) + }; """ + "per-row ORM Bulk UPDATE by Primary Key requires that " + "records contain primary key values", + code="bupq", + ) + + else: + pk_params = {} + for col in pks: + propkey = mapper._columntoproperty[col].key + + history = state.manager[propkey].impl.get_history( + state, state_dict, attributes.PASSIVE_OFF + ) + + if history.added: + if ( + not history.deleted + or ("pk_cascaded", state, col) + in uowtransaction.attributes + ): + expect_pk_cascaded = True + pk_params[col._label] = history.added[0] + params.pop(col.key, None) + else: + # else, use the old value to locate the row + pk_params[col._label] = history.deleted[0] + if col in value_params: + has_all_pks = False + else: + pk_params[col._label] = history.unchanged[0] + if pk_params[col._label] is None: + raise orm_exc.FlushError( + "Can't update table %s using NULL for primary " + "key value on column %s" % (table, col) + ) + + if include_bulk_keys: + params.update((k, state_dict[k]) for k in include_bulk_keys) + + if params or value_params: + params.update(pk_params) + yield ( + state, + state_dict, + params, + mapper, + connection, + value_params, + has_all_defaults, + has_all_pks, + ) + elif expect_pk_cascaded: + # no UPDATE occurs on this table, but we expect that CASCADE rules + # have changed the primary key of the row; propagate this event to + # other columns that expect to have been modified. this normally + # occurs after the UPDATE is emitted however we invoke it here + # explicitly in the absence of our invoking an UPDATE + for m, equated_pairs in mapper._table_to_equated[table]: + sync.populate( + state, + m, + state, + m, + equated_pairs, + uowtransaction, + mapper.passive_updates, + ) + + +def _collect_post_update_commands( + base_mapper, uowtransaction, table, states_to_update, post_update_cols +): + """Identify sets of values to use in UPDATE statements for a + list of states within a post_update operation. + + """ + + for ( + state, + state_dict, + mapper, + connection, + update_version_id, + ) in states_to_update: + # assert table in mapper._pks_by_table + + pks = mapper._pks_by_table[table] + params = {} + hasdata = False + + for col in mapper._cols_by_table[table]: + if col in pks: + params[col._label] = mapper._get_state_attr_by_column( + state, state_dict, col, passive=attributes.PASSIVE_OFF + ) + + elif col in post_update_cols or col.onupdate is not None: + prop = mapper._columntoproperty[col] + history = state.manager[prop.key].impl.get_history( + state, state_dict, attributes.PASSIVE_NO_INITIALIZE + ) + if history.added: + value = history.added[0] + params[col.key] = value + hasdata = True + if hasdata: + if ( + update_version_id is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ): + col = mapper.version_id_col + params[col._label] = update_version_id + + if ( + bool(state.key) + and col.key not in params + and mapper.version_id_generator is not False + ): + val = mapper.version_id_generator(update_version_id) + params[col.key] = val + yield state, state_dict, mapper, connection, params + + +def _collect_delete_commands( + base_mapper, uowtransaction, table, states_to_delete +): + """Identify values to use in DELETE statements for a list of + states to be deleted.""" + + for ( + state, + state_dict, + mapper, + connection, + update_version_id, + ) in states_to_delete: + if table not in mapper._pks_by_table: + continue + + params = {} + for col in mapper._pks_by_table[table]: + params[col.key] = value = ( + mapper._get_committed_state_attr_by_column( + state, state_dict, col + ) + ) + if value is None: + raise orm_exc.FlushError( + "Can't delete from table %s " + "using NULL for primary " + "key value on column %s" % (table, col) + ) + + if ( + update_version_id is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ): + params[mapper.version_id_col.key] = update_version_id + yield params, connection + + +def _emit_update_statements( + base_mapper, + uowtransaction, + mapper, + table, + update, + *, + bookkeeping=True, + use_orm_update_stmt=None, + enable_check_rowcount=True, +): + """Emit UPDATE statements corresponding to value lists collected + by _collect_update_commands().""" + + needs_version_id = ( + mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ) + + execution_options = {"compiled_cache": base_mapper._compiled_cache} + + def update_stmt(existing_stmt=None): + clauses = BooleanClauseList._construct_raw(operators.and_) + + for col in mapper._pks_by_table[table]: + clauses._append_inplace( + col == sql.bindparam(col._label, type_=col.type) + ) + + if needs_version_id: + clauses._append_inplace( + mapper.version_id_col + == sql.bindparam( + mapper.version_id_col._label, + type_=mapper.version_id_col.type, + ) + ) + + if existing_stmt is not None: + stmt = existing_stmt.where(clauses) + else: + stmt = table.update().where(clauses) + return stmt + + if use_orm_update_stmt is not None: + cached_stmt = update_stmt(use_orm_update_stmt) + + else: + cached_stmt = base_mapper._memo(("update", table), update_stmt) + + for ( + (connection, paramkeys, hasvalue, has_all_defaults, has_all_pks), + records, + ) in groupby( + update, + lambda rec: ( + rec[4], # connection + set(rec[2]), # set of parameter keys + bool(rec[5]), # whether or not we have "value" parameters + rec[6], # has_all_defaults + rec[7], # has all pks + ), + ): + rows = 0 + records = list(records) + + statement = cached_stmt + + if use_orm_update_stmt is not None: + statement = statement._annotate( + { + "_emit_update_table": table, + "_emit_update_mapper": mapper, + } + ) + + return_defaults = False + + if not has_all_pks: + statement = statement.return_defaults(*mapper._pks_by_table[table]) + return_defaults = True + + if ( + bookkeeping + and not has_all_defaults + and mapper.base_mapper.eager_defaults is True + # change as of #8889 - if RETURNING is not going to be used anyway, + # (applies to MySQL, MariaDB which lack UPDATE RETURNING) ensure + # we can do an executemany UPDATE which is more efficient + and table.implicit_returning + and connection.dialect.update_returning + ): + statement = statement.return_defaults( + *mapper._server_onupdate_default_cols[table] + ) + return_defaults = True + + if mapper._version_id_has_server_side_value: + statement = statement.return_defaults(mapper.version_id_col) + return_defaults = True + + assert_singlerow = connection.dialect.supports_sane_rowcount + + assert_multirow = ( + assert_singlerow + and connection.dialect.supports_sane_multi_rowcount + ) + + # change as of #8889 - if RETURNING is not going to be used anyway, + # (applies to MySQL, MariaDB which lack UPDATE RETURNING) ensure + # we can do an executemany UPDATE which is more efficient + allow_executemany = not return_defaults and not needs_version_id + + if hasvalue: + for ( + state, + state_dict, + params, + mapper, + connection, + value_params, + has_all_defaults, + has_all_pks, + ) in records: + c = connection.execute( + statement.values(value_params), + params, + execution_options=execution_options, + ) + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params, + True, + c.returned_defaults, + ) + rows += c.rowcount + check_rowcount = enable_check_rowcount and assert_singlerow + else: + if not allow_executemany: + check_rowcount = enable_check_rowcount and assert_singlerow + for ( + state, + state_dict, + params, + mapper, + connection, + value_params, + has_all_defaults, + has_all_pks, + ) in records: + c = connection.execute( + statement, params, execution_options=execution_options + ) + + # TODO: why with bookkeeping=False? + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params, + True, + c.returned_defaults, + ) + rows += c.rowcount + else: + multiparams = [rec[2] for rec in records] + + check_rowcount = enable_check_rowcount and ( + assert_multirow + or (assert_singlerow and len(multiparams) == 1) + ) + + c = connection.execute( + statement, multiparams, execution_options=execution_options + ) + + rows += c.rowcount + + for ( + state, + state_dict, + params, + mapper, + connection, + value_params, + has_all_defaults, + has_all_pks, + ) in records: + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params, + True, + ( + c.returned_defaults + if not c.context.executemany + else None + ), + ) + + if check_rowcount: + if rows != len(records): + raise orm_exc.StaleDataError( + "UPDATE statement on table '%s' expected to " + "update %d row(s); %d were matched." + % (table.description, len(records), rows) + ) + + elif needs_version_id: + util.warn( + "Dialect %s does not support updated rowcount " + "- versioning cannot be verified." + % c.dialect.dialect_description + ) + + +def _emit_insert_statements( + base_mapper, + uowtransaction, + mapper, + table, + insert, + *, + bookkeeping=True, + use_orm_insert_stmt=None, + execution_options=None, +): + """Emit INSERT statements corresponding to value lists collected + by _collect_insert_commands().""" + + if use_orm_insert_stmt is not None: + cached_stmt = use_orm_insert_stmt + exec_opt = util.EMPTY_DICT + + # if a user query with RETURNING was passed, we definitely need + # to use RETURNING. + returning_is_required_anyway = bool(use_orm_insert_stmt._returning) + deterministic_results_reqd = ( + returning_is_required_anyway + and use_orm_insert_stmt._sort_by_parameter_order + ) or bookkeeping + else: + returning_is_required_anyway = False + deterministic_results_reqd = bookkeeping + cached_stmt = base_mapper._memo(("insert", table), table.insert) + exec_opt = {"compiled_cache": base_mapper._compiled_cache} + + if execution_options: + execution_options = util.EMPTY_DICT.merge_with( + exec_opt, execution_options + ) + else: + execution_options = exec_opt + + return_result = None + + for ( + (connection, _, hasvalue, has_all_pks, has_all_defaults), + records, + ) in groupby( + insert, + lambda rec: ( + rec[4], # connection + set(rec[2]), # parameter keys + bool(rec[5]), # whether we have "value" parameters + rec[6], + rec[7], + ), + ): + statement = cached_stmt + + if use_orm_insert_stmt is not None: + statement = statement._annotate( + { + "_emit_insert_table": table, + "_emit_insert_mapper": mapper, + } + ) + + if ( + ( + not bookkeeping + or ( + has_all_defaults + or not base_mapper._prefer_eager_defaults( + connection.dialect, table + ) + or not table.implicit_returning + or not connection.dialect.insert_returning + ) + ) + and not returning_is_required_anyway + and has_all_pks + and not hasvalue + ): + # the "we don't need newly generated values back" section. + # here we have all the PKs, all the defaults or we don't want + # to fetch them, or the dialect doesn't support RETURNING at all + # so we have to post-fetch / use lastrowid anyway. + records = list(records) + multiparams = [rec[2] for rec in records] + + result = connection.execute( + statement, multiparams, execution_options=execution_options + ) + if bookkeeping: + for ( + ( + state, + state_dict, + params, + mapper_rec, + conn, + value_params, + has_all_pks, + has_all_defaults, + ), + last_inserted_params, + ) in zip(records, result.context.compiled_parameters): + if state: + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + result, + last_inserted_params, + value_params, + False, + ( + result.returned_defaults + if not result.context.executemany + else None + ), + ) + else: + _postfetch_bulk_save(mapper_rec, state_dict, table) + + else: + # here, we need defaults and/or pk values back or we otherwise + # know that we are using RETURNING in any case + + records = list(records) + + if returning_is_required_anyway or ( + table.implicit_returning and not hasvalue and len(records) > 1 + ): + if ( + deterministic_results_reqd + and connection.dialect.insert_executemany_returning_sort_by_parameter_order # noqa: E501 + ) or ( + not deterministic_results_reqd + and connection.dialect.insert_executemany_returning + ): + do_executemany = True + elif returning_is_required_anyway: + if deterministic_results_reqd: + dt = " with RETURNING and sort by parameter order" + else: + dt = " with RETURNING" + raise sa_exc.InvalidRequestError( + f"Can't use explicit RETURNING for bulk INSERT " + f"operation with " + f"{connection.dialect.dialect_description} backend; " + f"executemany{dt} is not enabled for this dialect." + ) + else: + do_executemany = False + else: + do_executemany = False + + if use_orm_insert_stmt is None: + if ( + not has_all_defaults + and base_mapper._prefer_eager_defaults( + connection.dialect, table + ) + ): + statement = statement.return_defaults( + *mapper._server_default_cols[table], + sort_by_parameter_order=bookkeeping, + ) + + if mapper.version_id_col is not None: + statement = statement.return_defaults( + mapper.version_id_col, + sort_by_parameter_order=bookkeeping, + ) + elif do_executemany: + statement = statement.return_defaults( + *table.primary_key, sort_by_parameter_order=bookkeeping + ) + + if do_executemany: + multiparams = [rec[2] for rec in records] + + result = connection.execute( + statement, multiparams, execution_options=execution_options + ) + + if use_orm_insert_stmt is not None: + if return_result is None: + return_result = result + else: + return_result = return_result.splice_vertically(result) + + if bookkeeping: + for ( + ( + state, + state_dict, + params, + mapper_rec, + conn, + value_params, + has_all_pks, + has_all_defaults, + ), + last_inserted_params, + inserted_primary_key, + returned_defaults, + ) in zip_longest( + records, + result.context.compiled_parameters, + result.inserted_primary_key_rows, + result.returned_defaults_rows or (), + ): + if inserted_primary_key is None: + # this is a real problem and means that we didn't + # get back as many PK rows. we can't continue + # since this indicates PK rows were missing, which + # means we likely mis-populated records starting + # at that point with incorrectly matched PK + # values. + raise orm_exc.FlushError( + "Multi-row INSERT statement for %s did not " + "produce " + "the correct number of INSERTed rows for " + "RETURNING. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % mapper_rec + ) + + for pk, col in zip( + inserted_primary_key, + mapper._pks_by_table[table], + ): + prop = mapper_rec._columntoproperty[col] + if state_dict.get(prop.key) is None: + state_dict[prop.key] = pk + + if state: + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + result, + last_inserted_params, + value_params, + False, + returned_defaults, + ) + else: + _postfetch_bulk_save(mapper_rec, state_dict, table) + else: + assert not returning_is_required_anyway + + for ( + state, + state_dict, + params, + mapper_rec, + connection, + value_params, + has_all_pks, + has_all_defaults, + ) in records: + if value_params: + result = connection.execute( + statement.values(value_params), + params, + execution_options=execution_options, + ) + else: + result = connection.execute( + statement, + params, + execution_options=execution_options, + ) + + primary_key = result.inserted_primary_key + if primary_key is None: + raise orm_exc.FlushError( + "Single-row INSERT statement for %s " + "did not produce a " + "new primary key result " + "being invoked. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % (mapper_rec,) + ) + for pk, col in zip( + primary_key, mapper._pks_by_table[table] + ): + prop = mapper_rec._columntoproperty[col] + if ( + col in value_params + or state_dict.get(prop.key) is None + ): + state_dict[prop.key] = pk + if bookkeeping: + if state: + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + result, + result.context.compiled_parameters[0], + value_params, + False, + ( + result.returned_defaults + if not result.context.executemany + else None + ), + ) + else: + _postfetch_bulk_save(mapper_rec, state_dict, table) + + if use_orm_insert_stmt is not None: + if return_result is None: + return _cursor.null_dml_result() + else: + return return_result + + +def _emit_post_update_statements( + base_mapper, uowtransaction, mapper, table, update +): + """Emit UPDATE statements corresponding to value lists collected + by _collect_post_update_commands().""" + + execution_options = {"compiled_cache": base_mapper._compiled_cache} + + needs_version_id = ( + mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ) + + def update_stmt(): + clauses = BooleanClauseList._construct_raw(operators.and_) + + for col in mapper._pks_by_table[table]: + clauses._append_inplace( + col == sql.bindparam(col._label, type_=col.type) + ) + + if needs_version_id: + clauses._append_inplace( + mapper.version_id_col + == sql.bindparam( + mapper.version_id_col._label, + type_=mapper.version_id_col.type, + ) + ) + + stmt = table.update().where(clauses) + + return stmt + + statement = base_mapper._memo(("post_update", table), update_stmt) + + if mapper._version_id_has_server_side_value: + statement = statement.return_defaults(mapper.version_id_col) + + # execute each UPDATE in the order according to the original + # list of states to guarantee row access order, but + # also group them into common (connection, cols) sets + # to support executemany(). + for key, records in groupby( + update, + lambda rec: (rec[3], set(rec[4])), # connection # parameter keys + ): + rows = 0 + + records = list(records) + connection = key[0] + + assert_singlerow = connection.dialect.supports_sane_rowcount + assert_multirow = ( + assert_singlerow + and connection.dialect.supports_sane_multi_rowcount + ) + allow_executemany = not needs_version_id or assert_multirow + + if not allow_executemany: + check_rowcount = assert_singlerow + for state, state_dict, mapper_rec, connection, params in records: + c = connection.execute( + statement, params, execution_options=execution_options + ) + + _postfetch_post_update( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + ) + rows += c.rowcount + else: + multiparams = [ + params + for state, state_dict, mapper_rec, conn, params in records + ] + + check_rowcount = assert_multirow or ( + assert_singlerow and len(multiparams) == 1 + ) + + c = connection.execute( + statement, multiparams, execution_options=execution_options + ) + + rows += c.rowcount + for state, state_dict, mapper_rec, connection, params in records: + _postfetch_post_update( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + ) + + if check_rowcount: + if rows != len(records): + raise orm_exc.StaleDataError( + "UPDATE statement on table '%s' expected to " + "update %d row(s); %d were matched." + % (table.description, len(records), rows) + ) + + elif needs_version_id: + util.warn( + "Dialect %s does not support updated rowcount " + "- versioning cannot be verified." + % c.dialect.dialect_description + ) + + +def _emit_delete_statements( + base_mapper, uowtransaction, mapper, table, delete +): + """Emit DELETE statements corresponding to value lists collected + by _collect_delete_commands().""" + + need_version_id = ( + mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ) + + def delete_stmt(): + clauses = BooleanClauseList._construct_raw(operators.and_) + + for col in mapper._pks_by_table[table]: + clauses._append_inplace( + col == sql.bindparam(col.key, type_=col.type) + ) + + if need_version_id: + clauses._append_inplace( + mapper.version_id_col + == sql.bindparam( + mapper.version_id_col.key, type_=mapper.version_id_col.type + ) + ) + + return table.delete().where(clauses) + + statement = base_mapper._memo(("delete", table), delete_stmt) + for connection, recs in groupby(delete, lambda rec: rec[1]): # connection + del_objects = [params for params, connection in recs] + + execution_options = {"compiled_cache": base_mapper._compiled_cache} + expected = len(del_objects) + rows_matched = -1 + only_warn = False + + if ( + need_version_id + and not connection.dialect.supports_sane_multi_rowcount + ): + if connection.dialect.supports_sane_rowcount: + rows_matched = 0 + # execute deletes individually so that versioned + # rows can be verified + for params in del_objects: + c = connection.execute( + statement, params, execution_options=execution_options + ) + rows_matched += c.rowcount + else: + util.warn( + "Dialect %s does not support deleted rowcount " + "- versioning cannot be verified." + % connection.dialect.dialect_description + ) + connection.execute( + statement, del_objects, execution_options=execution_options + ) + else: + c = connection.execute( + statement, del_objects, execution_options=execution_options + ) + + if not need_version_id: + only_warn = True + + rows_matched = c.rowcount + + if ( + base_mapper.confirm_deleted_rows + and rows_matched > -1 + and expected != rows_matched + and ( + connection.dialect.supports_sane_multi_rowcount + or len(del_objects) == 1 + ) + ): + # TODO: why does this "only warn" if versioning is turned off, + # whereas the UPDATE raises? + if only_warn: + util.warn( + "DELETE statement on table '%s' expected to " + "delete %d row(s); %d were matched. Please set " + "confirm_deleted_rows=False within the mapper " + "configuration to prevent this warning." + % (table.description, expected, rows_matched) + ) + else: + raise orm_exc.StaleDataError( + "DELETE statement on table '%s' expected to " + "delete %d row(s); %d were matched. Please set " + "confirm_deleted_rows=False within the mapper " + "configuration to prevent this warning." + % (table.description, expected, rows_matched) + ) + + +def _finalize_insert_update_commands(base_mapper, uowtransaction, states): + """finalize state on states that have been inserted or updated, + including calling after_insert/after_update events. + + """ + for state, state_dict, mapper, connection, has_identity in states: + if mapper._readonly_props: + readonly = state.unmodified_intersection( + [ + p.key + for p in mapper._readonly_props + if ( + p.expire_on_flush + and (not p.deferred or p.key in state.dict) + ) + or ( + not p.expire_on_flush + and not p.deferred + and p.key not in state.dict + ) + ] + ) + if readonly: + state._expire_attributes(state.dict, readonly) + + # if eager_defaults option is enabled, load + # all expired cols. Else if we have a version_id_col, make sure + # it isn't expired. + toload_now = [] + + # this is specifically to emit a second SELECT for eager_defaults, + # so only if it's set to True, not "auto" + if base_mapper.eager_defaults is True: + toload_now.extend( + state._unloaded_non_object.intersection( + mapper._server_default_plus_onupdate_propkeys + ) + ) + + if ( + mapper.version_id_col is not None + and mapper.version_id_generator is False + ): + if mapper._version_id_prop.key in state.unloaded: + toload_now.extend([mapper._version_id_prop.key]) + + if toload_now: + state.key = base_mapper._identity_key_from_state(state) + stmt = future.select(mapper).set_label_style( + LABEL_STYLE_TABLENAME_PLUS_COL + ) + loading.load_on_ident( + uowtransaction.session, + stmt, + state.key, + refresh_state=state, + only_load_props=toload_now, + ) + + # call after_XXX extensions + if not has_identity: + mapper.dispatch.after_insert(mapper, connection, state) + else: + mapper.dispatch.after_update(mapper, connection, state) + + if ( + mapper.version_id_generator is False + and mapper.version_id_col is not None + ): + if state_dict[mapper._version_id_prop.key] is None: + raise orm_exc.FlushError( + "Instance does not contain a non-NULL version value" + ) + + +def _postfetch_post_update( + mapper, uowtransaction, table, state, dict_, result, params +): + needs_version_id = ( + mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ) + + if not uowtransaction.is_deleted(state): + # post updating after a regular INSERT or UPDATE, do a full postfetch + prefetch_cols = result.context.compiled.prefetch + postfetch_cols = result.context.compiled.postfetch + elif needs_version_id: + # post updating before a DELETE with a version_id_col, need to + # postfetch just version_id_col + prefetch_cols = postfetch_cols = () + else: + # post updating before a DELETE without a version_id_col, + # don't need to postfetch + return + + if needs_version_id: + prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] + + refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) + if refresh_flush: + load_evt_attrs = [] + + for c in prefetch_cols: + if c.key in params and c in mapper._columntoproperty: + dict_[mapper._columntoproperty[c].key] = params[c.key] + if refresh_flush: + load_evt_attrs.append(mapper._columntoproperty[c].key) + + if refresh_flush and load_evt_attrs: + mapper.class_manager.dispatch.refresh_flush( + state, uowtransaction, load_evt_attrs + ) + + if postfetch_cols: + state._expire_attributes( + state.dict, + [ + mapper._columntoproperty[c].key + for c in postfetch_cols + if c in mapper._columntoproperty + ], + ) + + +def _postfetch( + mapper, + uowtransaction, + table, + state, + dict_, + result, + params, + value_params, + isupdate, + returned_defaults, +): + """Expire attributes in need of newly persisted database state, + after an INSERT or UPDATE statement has proceeded for that + state.""" + + prefetch_cols = result.context.compiled.prefetch + postfetch_cols = result.context.compiled.postfetch + returning_cols = result.context.compiled.effective_returning + + if ( + mapper.version_id_col is not None + and mapper.version_id_col in mapper._cols_by_table[table] + ): + prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] + + refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) + if refresh_flush: + load_evt_attrs = [] + + if returning_cols: + row = returned_defaults + if row is not None: + for row_value, col in zip(row, returning_cols): + # pk cols returned from insert are handled + # distinctly, don't step on the values here + if col.primary_key and result.context.isinsert: + continue + + # note that columns can be in the "return defaults" that are + # not mapped to this mapper, typically because they are + # "excluded", which can be specified directly or also occurs + # when using declarative w/ single table inheritance + prop = mapper._columntoproperty.get(col) + if prop: + dict_[prop.key] = row_value + if refresh_flush: + load_evt_attrs.append(prop.key) + + for c in prefetch_cols: + if c.key in params and c in mapper._columntoproperty: + pkey = mapper._columntoproperty[c].key + + # set prefetched value in dict and also pop from committed_state, + # since this is new database state that replaces whatever might + # have previously been fetched (see #10800). this is essentially a + # shorthand version of set_committed_value(), which could also be + # used here directly (with more overhead) + dict_[pkey] = params[c.key] + state.committed_state.pop(pkey, None) + + if refresh_flush: + load_evt_attrs.append(pkey) + + if refresh_flush and load_evt_attrs: + mapper.class_manager.dispatch.refresh_flush( + state, uowtransaction, load_evt_attrs + ) + + if isupdate and value_params: + # explicitly suit the use case specified by + # [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING + # database which are set to themselves in order to do a version bump. + postfetch_cols.extend( + [ + col + for col in value_params + if col.primary_key and col not in returning_cols + ] + ) + + if postfetch_cols: + state._expire_attributes( + state.dict, + [ + mapper._columntoproperty[c].key + for c in postfetch_cols + if c in mapper._columntoproperty + ], + ) + + # synchronize newly inserted ids from one table to the next + # TODO: this still goes a little too often. would be nice to + # have definitive list of "columns that changed" here + for m, equated_pairs in mapper._table_to_equated[table]: + sync.populate( + state, + m, + state, + m, + equated_pairs, + uowtransaction, + mapper.passive_updates, + ) + + +def _postfetch_bulk_save(mapper, dict_, table): + for m, equated_pairs in mapper._table_to_equated[table]: + sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) + + +def _connections_for_states(base_mapper, uowtransaction, states): + """Return an iterator of (state, state.dict, mapper, connection). + + The states are sorted according to _sort_states, then paired + with the connection they should be using for the given + unit of work transaction. + + """ + # if session has a connection callable, + # organize individual states with the connection + # to use for update + if uowtransaction.session.connection_callable: + connection_callable = uowtransaction.session.connection_callable + else: + connection = uowtransaction.transaction.connection(base_mapper) + connection_callable = None + + for state in _sort_states(base_mapper, states): + if connection_callable: + connection = connection_callable(base_mapper, state.obj()) + + mapper = state.manager.mapper + + yield state, state.dict, mapper, connection + + +def _sort_states(mapper, states): + pending = set(states) + persistent = {s for s in pending if s.key is not None} + pending.difference_update(persistent) + + try: + persistent_sorted = sorted( + persistent, key=mapper._persistent_sortkey_fn + ) + except TypeError as err: + raise sa_exc.InvalidRequestError( + "Could not sort objects by primary key; primary key " + "values must be sortable in Python (was: %s)" % err + ) from err + return ( + sorted(pending, key=operator.attrgetter("insert_order")) + + persistent_sorted + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/properties.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/properties.py new file mode 100644 index 0000000000000000000000000000000000000000..75ad5b1ca0eb908a4555dd59001f46ed23fb9a6b --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/properties.py @@ -0,0 +1,884 @@ +# orm/properties.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""MapperProperty implementations. + +This is a private module which defines the behavior of individual ORM- +mapped attributes. + +""" + +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Dict +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import exc as orm_exc +from . import strategy_options +from .base import _DeclarativeMapped +from .base import class_mapper +from .descriptor_props import CompositeProperty +from .descriptor_props import ConcreteInheritedProperty +from .descriptor_props import SynonymProperty +from .interfaces import _AttributeOptions +from .interfaces import _DEFAULT_ATTRIBUTE_OPTIONS +from .interfaces import _IntrospectsAnnotations +from .interfaces import _MapsColumns +from .interfaces import MapperProperty +from .interfaces import PropComparator +from .interfaces import StrategizedProperty +from .relationships import RelationshipProperty +from .util import de_stringify_annotation +from .. import exc as sa_exc +from .. import ForeignKey +from .. import log +from .. import util +from ..sql import coercions +from ..sql import roles +from ..sql.base import _NoArg +from ..sql.schema import Column +from ..sql.schema import SchemaConst +from ..sql.type_api import TypeEngine +from ..util.typing import de_optionalize_union_types +from ..util.typing import get_args +from ..util.typing import includes_none +from ..util.typing import is_a_type +from ..util.typing import is_fwd_ref +from ..util.typing import is_pep593 +from ..util.typing import is_pep695 +from ..util.typing import Self + +if TYPE_CHECKING: + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import _ORMColumnExprArgument + from ._typing import _RegistryType + from .base import Mapped + from .decl_base import _ClassScanMapperConfig + from .mapper import Mapper + from .session import Session + from .state import _InstallLoaderCallableProto + from .state import InstanceState + from ..sql._typing import _InfoType + from ..sql.elements import ColumnElement + from ..sql.elements import NamedColumn + from ..sql.operators import OperatorType + from ..util.typing import _AnnotationScanType + from ..util.typing import RODescriptorReference + +_T = TypeVar("_T", bound=Any) +_PT = TypeVar("_PT", bound=Any) +_NC = TypeVar("_NC", bound="NamedColumn[Any]") + +__all__ = [ + "ColumnProperty", + "CompositeProperty", + "ConcreteInheritedProperty", + "RelationshipProperty", + "SynonymProperty", +] + + +@log.class_logger +class ColumnProperty( + _MapsColumns[_T], + StrategizedProperty[_T], + _IntrospectsAnnotations, + log.Identified, +): + """Describes an object attribute that corresponds to a table column + or other column expression. + + Public constructor is the :func:`_orm.column_property` function. + + """ + + strategy_wildcard_key = strategy_options._COLUMN_TOKEN + inherit_cache = True + """:meta private:""" + + _links_to_entity = False + + columns: List[NamedColumn[Any]] + + _is_polymorphic_discriminator: bool + + _mapped_by_synonym: Optional[str] + + comparator_factory: Type[PropComparator[_T]] + + __slots__ = ( + "columns", + "group", + "deferred", + "instrument", + "comparator_factory", + "active_history", + "expire_on_flush", + "_creation_order", + "_is_polymorphic_discriminator", + "_mapped_by_synonym", + "_deferred_column_loader", + "_raise_column_loader", + "_renders_in_subqueries", + "raiseload", + ) + + def __init__( + self, + column: _ORMColumnExprArgument[_T], + *additional_columns: _ORMColumnExprArgument[Any], + attribute_options: Optional[_AttributeOptions] = None, + group: Optional[str] = None, + deferred: bool = False, + raiseload: bool = False, + comparator_factory: Optional[Type[PropComparator[_T]]] = None, + active_history: bool = False, + expire_on_flush: bool = True, + info: Optional[_InfoType] = None, + doc: Optional[str] = None, + _instrument: bool = True, + _assume_readonly_dc_attributes: bool = False, + ): + super().__init__( + attribute_options=attribute_options, + _assume_readonly_dc_attributes=_assume_readonly_dc_attributes, + ) + columns = (column,) + additional_columns + self.columns = [ + coercions.expect(roles.LabeledColumnExprRole, c) for c in columns + ] + self.group = group + self.deferred = deferred + self.raiseload = raiseload + self.instrument = _instrument + self.comparator_factory = ( + comparator_factory + if comparator_factory is not None + else self.__class__.Comparator + ) + self.active_history = active_history + self.expire_on_flush = expire_on_flush + + if info is not None: + self.info.update(info) + + if doc is not None: + self.doc = doc + else: + for col in reversed(self.columns): + doc = getattr(col, "doc", None) + if doc is not None: + self.doc = doc + break + else: + self.doc = None + + util.set_creation_order(self) + + self.strategy_key = ( + ("deferred", self.deferred), + ("instrument", self.instrument), + ) + if self.raiseload: + self.strategy_key += (("raiseload", True),) + + def declarative_scan( + self, + decl_scan: _ClassScanMapperConfig, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + mapped_container: Optional[Type[Mapped[Any]]], + annotation: Optional[_AnnotationScanType], + extracted_mapped_annotation: Optional[_AnnotationScanType], + is_dataclass_field: bool, + ) -> None: + column = self.columns[0] + if column.key is None: + column.key = key + if column.name is None: + column.name = key + + @property + def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]: + return self + + @property + def columns_to_assign(self) -> List[Tuple[Column[Any], int]]: + # mypy doesn't care about the isinstance here + return [ + (c, 0) # type: ignore + for c in self.columns + if isinstance(c, Column) and c.table is None + ] + + def _memoized_attr__renders_in_subqueries(self) -> bool: + if ("query_expression", True) in self.strategy_key: + return self.strategy._have_default_expression # type: ignore + + return ("deferred", True) not in self.strategy_key or ( + self not in self.parent._readonly_props # type: ignore + ) + + @util.preload_module("sqlalchemy.orm.state", "sqlalchemy.orm.strategies") + def _memoized_attr__deferred_column_loader( + self, + ) -> _InstallLoaderCallableProto[Any]: + state = util.preloaded.orm_state + strategies = util.preloaded.orm_strategies + return state.InstanceState._instance_level_callable_processor( + self.parent.class_manager, + strategies.LoadDeferredColumns(self.key), + self.key, + ) + + @util.preload_module("sqlalchemy.orm.state", "sqlalchemy.orm.strategies") + def _memoized_attr__raise_column_loader( + self, + ) -> _InstallLoaderCallableProto[Any]: + state = util.preloaded.orm_state + strategies = util.preloaded.orm_strategies + return state.InstanceState._instance_level_callable_processor( + self.parent.class_manager, + strategies.LoadDeferredColumns(self.key, True), + self.key, + ) + + def __clause_element__(self) -> roles.ColumnsClauseRole: + """Allow the ColumnProperty to work in expression before it is turned + into an instrumented attribute. + """ + + return self.expression + + @property + def expression(self) -> roles.ColumnsClauseRole: + """Return the primary column or expression for this ColumnProperty. + + E.g.:: + + + class File(Base): + # ... + + name = Column(String(64)) + extension = Column(String(8)) + filename = column_property(name + "." + extension) + path = column_property("C:/" + filename.expression) + + .. seealso:: + + :ref:`mapper_column_property_sql_expressions_composed` + + """ + return self.columns[0] + + def instrument_class(self, mapper: Mapper[Any]) -> None: + if not self.instrument: + return + + attributes.register_descriptor( + mapper.class_, + self.key, + comparator=self.comparator_factory(self, mapper), + parententity=mapper, + doc=self.doc, + ) + + def do_init(self) -> None: + super().do_init() + + if len(self.columns) > 1 and set(self.parent.primary_key).issuperset( + self.columns + ): + util.warn( + ( + "On mapper %s, primary key column '%s' is being combined " + "with distinct primary key column '%s' in attribute '%s'. " + "Use explicit properties to give each column its own " + "mapped attribute name." + ) + % (self.parent, self.columns[1], self.columns[0], self.key) + ) + + def copy(self) -> ColumnProperty[_T]: + return ColumnProperty( + *self.columns, + deferred=self.deferred, + group=self.group, + active_history=self.active_history, + ) + + def merge( + self, + session: Session, + source_state: InstanceState[Any], + source_dict: _InstanceDict, + dest_state: InstanceState[Any], + dest_dict: _InstanceDict, + load: bool, + _recursive: Dict[Any, object], + _resolve_conflict_map: Dict[_IdentityKeyType[Any], object], + ) -> None: + if not self.instrument: + return + elif self.key in source_dict: + value = source_dict[self.key] + + if not load: + dest_dict[self.key] = value + else: + impl = dest_state.get_impl(self.key) + impl.set(dest_state, dest_dict, value, None) + elif dest_state.has_identity and self.key not in dest_dict: + dest_state._expire_attributes( + dest_dict, [self.key], no_loader=True + ) + + class Comparator(util.MemoizedSlots, PropComparator[_PT]): + """Produce boolean, comparison, and other operators for + :class:`.ColumnProperty` attributes. + + See the documentation for :class:`.PropComparator` for a brief + overview. + + .. seealso:: + + :class:`.PropComparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + if not TYPE_CHECKING: + # prevent pylance from being clever about slots + __slots__ = "__clause_element__", "info", "expressions" + + prop: RODescriptorReference[ColumnProperty[_PT]] + + expressions: Sequence[NamedColumn[Any]] + """The full sequence of columns referenced by this + attribute, adjusted for any aliasing in progress. + + .. versionadded:: 1.3.17 + + .. seealso:: + + :ref:`maptojoin` - usage example + """ + + def _orm_annotate_column(self, column: _NC) -> _NC: + """annotate and possibly adapt a column to be returned + as the mapped-attribute exposed version of the column. + + The column in this context needs to act as much like the + column in an ORM mapped context as possible, so includes + annotations to give hints to various ORM functions as to + the source entity of this column. It also adapts it + to the mapper's with_polymorphic selectable if one is + present. + + """ + + pe = self._parententity + annotations: Dict[str, Any] = { + "entity_namespace": pe, + "parententity": pe, + "parentmapper": pe, + "proxy_key": self.prop.key, + } + + col = column + + # for a mapper with polymorphic_on and an adapter, return + # the column against the polymorphic selectable. + # see also orm.util._orm_downgrade_polymorphic_columns + # for the reverse operation. + if self._parentmapper._polymorphic_adapter: + mapper_local_col = col + col = self._parentmapper._polymorphic_adapter.traverse(col) + + # this is a clue to the ORM Query etc. that this column + # was adapted to the mapper's polymorphic_adapter. the + # ORM uses this hint to know which column its adapting. + annotations["adapt_column"] = mapper_local_col + + return col._annotate(annotations)._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": pe} + ) + + if TYPE_CHECKING: + + def __clause_element__(self) -> NamedColumn[_PT]: ... + + def _memoized_method___clause_element__( + self, + ) -> NamedColumn[_PT]: + if self.adapter: + return self.adapter(self.prop.columns[0], self.prop.key) + else: + return self._orm_annotate_column(self.prop.columns[0]) + + def _memoized_attr_info(self) -> _InfoType: + """The .info dictionary for this attribute.""" + + ce = self.__clause_element__() + try: + return ce.info # type: ignore + except AttributeError: + return self.prop.info + + def _memoized_attr_expressions(self) -> Sequence[NamedColumn[Any]]: + """The full sequence of columns referenced by this + attribute, adjusted for any aliasing in progress. + + .. versionadded:: 1.3.17 + + """ + if self.adapter: + return [ + self.adapter(col, self.prop.key) + for col in self.prop.columns + ] + else: + return [ + self._orm_annotate_column(col) for col in self.prop.columns + ] + + def _fallback_getattr(self, key: str) -> Any: + """proxy attribute access down to the mapped column. + + this allows user-defined comparison methods to be accessed. + """ + return getattr(self.__clause_element__(), key) + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(self.__clause_element__(), *other, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + col = self.__clause_element__() + return op(col._bind_param(op, other), col, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def __str__(self) -> str: + if not self.parent or not self.key: + return object.__repr__(self) + return str(self.parent.class_.__name__) + "." + self.key + + +class MappedSQLExpression(ColumnProperty[_T], _DeclarativeMapped[_T]): + """Declarative front-end for the :class:`.ColumnProperty` class. + + Public constructor is the :func:`_orm.column_property` function. + + .. versionchanged:: 2.0 Added :class:`_orm.MappedSQLExpression` as + a Declarative compatible subclass for :class:`_orm.ColumnProperty`. + + .. seealso:: + + :class:`.MappedColumn` + + """ + + inherit_cache = True + """:meta private:""" + + +class MappedColumn( + _IntrospectsAnnotations, + _MapsColumns[_T], + _DeclarativeMapped[_T], +): + """Maps a single :class:`_schema.Column` on a class. + + :class:`_orm.MappedColumn` is a specialization of the + :class:`_orm.ColumnProperty` class and is oriented towards declarative + configuration. + + To construct :class:`_orm.MappedColumn` objects, use the + :func:`_orm.mapped_column` constructor function. + + .. versionadded:: 2.0 + + + """ + + __slots__ = ( + "column", + "_creation_order", + "_sort_order", + "foreign_keys", + "_has_nullable", + "_has_insert_default", + "deferred", + "deferred_group", + "deferred_raiseload", + "active_history", + "_attribute_options", + "_has_dataclass_arguments", + "_use_existing_column", + ) + + deferred: Union[_NoArg, bool] + deferred_raiseload: bool + deferred_group: Optional[str] + + column: Column[_T] + foreign_keys: Optional[Set[ForeignKey]] + _attribute_options: _AttributeOptions + + def __init__(self, *arg: Any, **kw: Any): + self._attribute_options = attr_opts = kw.pop( + "attribute_options", _DEFAULT_ATTRIBUTE_OPTIONS + ) + + self._use_existing_column = kw.pop("use_existing_column", False) + + self._has_dataclass_arguments = ( + attr_opts is not None + and attr_opts != _DEFAULT_ATTRIBUTE_OPTIONS + and any( + attr_opts[i] is not _NoArg.NO_ARG + for i, attr in enumerate(attr_opts._fields) + if attr != "dataclasses_default" + ) + ) + + insert_default = kw.pop("insert_default", _NoArg.NO_ARG) + self._has_insert_default = insert_default is not _NoArg.NO_ARG + + if self._has_insert_default: + kw["default"] = insert_default + elif attr_opts.dataclasses_default is not _NoArg.NO_ARG: + kw["default"] = attr_opts.dataclasses_default + + self.deferred_group = kw.pop("deferred_group", None) + self.deferred_raiseload = kw.pop("deferred_raiseload", None) + self.deferred = kw.pop("deferred", _NoArg.NO_ARG) + self.active_history = kw.pop("active_history", False) + + self._sort_order = kw.pop("sort_order", _NoArg.NO_ARG) + self.column = cast("Column[_T]", Column(*arg, **kw)) + self.foreign_keys = self.column.foreign_keys + self._has_nullable = "nullable" in kw and kw.get("nullable") not in ( + None, + SchemaConst.NULL_UNSPECIFIED, + ) + util.set_creation_order(self) + + def _copy(self, **kw: Any) -> Self: + new = self.__class__.__new__(self.__class__) + new.column = self.column._copy(**kw) + new.deferred = self.deferred + new.deferred_group = self.deferred_group + new.deferred_raiseload = self.deferred_raiseload + new.foreign_keys = new.column.foreign_keys + new.active_history = self.active_history + new._has_nullable = self._has_nullable + new._attribute_options = self._attribute_options + new._has_insert_default = self._has_insert_default + new._has_dataclass_arguments = self._has_dataclass_arguments + new._use_existing_column = self._use_existing_column + new._sort_order = self._sort_order + util.set_creation_order(new) + return new + + @property + def name(self) -> str: + return self.column.name + + @property + def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]: + effective_deferred = self.deferred + if effective_deferred is _NoArg.NO_ARG: + effective_deferred = bool( + self.deferred_group or self.deferred_raiseload + ) + + if effective_deferred or self.active_history: + return ColumnProperty( + self.column, + deferred=effective_deferred, + group=self.deferred_group, + raiseload=self.deferred_raiseload, + attribute_options=self._attribute_options, + active_history=self.active_history, + ) + else: + return None + + @property + def columns_to_assign(self) -> List[Tuple[Column[Any], int]]: + return [ + ( + self.column, + ( + self._sort_order + if self._sort_order is not _NoArg.NO_ARG + else 0 + ), + ) + ] + + def __clause_element__(self) -> Column[_T]: + return self.column + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(self.__clause_element__(), *other, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + col = self.__clause_element__() + return op(col._bind_param(op, other), col, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def found_in_pep593_annotated(self) -> Any: + # return a blank mapped_column(). This mapped_column()'s + # Column will be merged into it in _init_column_for_annotation(). + return MappedColumn() + + def declarative_scan( + self, + decl_scan: _ClassScanMapperConfig, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + mapped_container: Optional[Type[Mapped[Any]]], + annotation: Optional[_AnnotationScanType], + extracted_mapped_annotation: Optional[_AnnotationScanType], + is_dataclass_field: bool, + ) -> None: + column = self.column + + if ( + self._use_existing_column + and decl_scan.inherits + and decl_scan.single + ): + if decl_scan.is_deferred: + raise sa_exc.ArgumentError( + "Can't use use_existing_column with deferred mappers" + ) + supercls_mapper = class_mapper(decl_scan.inherits, False) + + colname = column.name if column.name is not None else key + column = self.column = supercls_mapper.local_table.c.get( # type: ignore[assignment] # noqa: E501 + colname, column + ) + + if column.key is None: + column.key = key + if column.name is None: + column.name = key + + sqltype = column.type + + if extracted_mapped_annotation is None: + if sqltype._isnull and not self.column.foreign_keys: + self._raise_for_required(key, cls) + else: + return + + self._init_column_for_annotation( + cls, + registry, + extracted_mapped_annotation, + originating_module, + ) + + @util.preload_module("sqlalchemy.orm.decl_base") + def declarative_scan_for_composite( + self, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + param_name: str, + param_annotation: _AnnotationScanType, + ) -> None: + decl_base = util.preloaded.orm_decl_base + decl_base._undefer_column_name(param_name, self.column) + self._init_column_for_annotation( + cls, registry, param_annotation, originating_module + ) + + def _init_column_for_annotation( + self, + cls: Type[Any], + registry: _RegistryType, + argument: _AnnotationScanType, + originating_module: Optional[str], + ) -> None: + sqltype = self.column.type + + if is_fwd_ref( + argument, check_generic=True, check_for_plain_string=True + ): + assert originating_module is not None + argument = de_stringify_annotation( + cls, argument, originating_module, include_generic=True + ) + + nullable = includes_none(argument) + + if not self._has_nullable: + self.column.nullable = nullable + + our_type = de_optionalize_union_types(argument) + + find_mapped_in: Tuple[Any, ...] = () + our_type_is_pep593 = False + raw_pep_593_type = None + + if is_pep593(our_type): + our_type_is_pep593 = True + + pep_593_components = get_args(our_type) + raw_pep_593_type = pep_593_components[0] + if nullable: + raw_pep_593_type = de_optionalize_union_types(raw_pep_593_type) + find_mapped_in = pep_593_components[1:] + elif is_pep695(argument) and is_pep593(argument.__value__): + # do not support nested annotation inside unions ets + find_mapped_in = get_args(argument.__value__)[1:] + + use_args_from: Optional[MappedColumn[Any]] + for elem in find_mapped_in: + if isinstance(elem, MappedColumn): + use_args_from = elem + break + else: + use_args_from = None + + if use_args_from is not None: + if ( + not self._has_insert_default + and use_args_from.column.default is not None + ): + self.column.default = None + + use_args_from.column._merge(self.column) + sqltype = self.column.type + + if ( + use_args_from.deferred is not _NoArg.NO_ARG + and self.deferred is _NoArg.NO_ARG + ): + self.deferred = use_args_from.deferred + + if ( + use_args_from.deferred_group is not None + and self.deferred_group is None + ): + self.deferred_group = use_args_from.deferred_group + + if ( + use_args_from.deferred_raiseload is not None + and self.deferred_raiseload is None + ): + self.deferred_raiseload = use_args_from.deferred_raiseload + + if ( + use_args_from._use_existing_column + and not self._use_existing_column + ): + self._use_existing_column = True + + if use_args_from.active_history: + self.active_history = use_args_from.active_history + + if ( + use_args_from._sort_order is not None + and self._sort_order is _NoArg.NO_ARG + ): + self._sort_order = use_args_from._sort_order + + if ( + use_args_from.column.key is not None + or use_args_from.column.name is not None + ): + util.warn_deprecated( + "Can't use the 'key' or 'name' arguments in " + "Annotated with mapped_column(); this will be ignored", + "2.0.22", + ) + + if use_args_from._has_dataclass_arguments: + for idx, arg in enumerate( + use_args_from._attribute_options._fields + ): + if ( + use_args_from._attribute_options[idx] + is not _NoArg.NO_ARG + ): + arg = arg.replace("dataclasses_", "") + util.warn_deprecated( + f"Argument '{arg}' is a dataclass argument and " + "cannot be specified within a mapped_column() " + "bundled inside of an Annotated object", + "2.0.22", + ) + + if sqltype._isnull and not self.column.foreign_keys: + checks: List[Any] + if our_type_is_pep593: + checks = [our_type, raw_pep_593_type] + else: + checks = [our_type] + + for check_type in checks: + new_sqltype = registry._resolve_type(check_type) + if new_sqltype is not None: + break + else: + if isinstance(our_type, TypeEngine) or ( + isinstance(our_type, type) + and issubclass(our_type, TypeEngine) + ): + raise orm_exc.MappedAnnotationError( + f"The type provided inside the {self.column.key!r} " + "attribute Mapped annotation is the SQLAlchemy type " + f"{our_type}. Expected a Python type instead" + ) + elif is_a_type(our_type): + raise orm_exc.MappedAnnotationError( + "Could not locate SQLAlchemy Core type for Python " + f"type {our_type} inside the {self.column.key!r} " + "attribute Mapped annotation" + ) + else: + raise orm_exc.MappedAnnotationError( + f"The object provided inside the {self.column.key!r} " + "attribute Mapped annotation is not a Python type, " + f"it's the object {our_type!r}. Expected a Python " + "type." + ) + + self.column._set_type(new_sqltype) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/query.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/query.py new file mode 100644 index 0000000000000000000000000000000000000000..3489c15fd6f56e236e20cbf1b03c691062a4f9d1 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/query.py @@ -0,0 +1,3453 @@ +# orm/query.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""The Query class and support. + +Defines the :class:`_query.Query` class, the central +construct used by the ORM to construct database queries. + +The :class:`_query.Query` class should not be confused with the +:class:`_expression.Select` class, which defines database +SELECT operations at the SQL (non-ORM) level. ``Query`` differs from +``Select`` in that it returns ORM-mapped objects and interacts with an +ORM session, whereas the ``Select`` construct interacts directly with the +database to return iterable result sets. + +""" +from __future__ import annotations + +import collections.abc as collections_abc +import operator +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import attributes +from . import interfaces +from . import loading +from . import util as orm_util +from ._typing import _O +from .base import _assertions +from .context import _column_descriptions +from .context import _determine_last_joined_entity +from .context import _legacy_filter_by_entity_zero +from .context import FromStatement +from .context import ORMCompileState +from .context import QueryContext +from .interfaces import ORMColumnDescription +from .interfaces import ORMColumnsClauseRole +from .util import AliasedClass +from .util import object_mapper +from .util import with_parent +from .. import exc as sa_exc +from .. import inspect +from .. import inspection +from .. import log +from .. import sql +from .. import util +from ..engine import Result +from ..engine import Row +from ..event import dispatcher +from ..event import EventTarget +from ..sql import coercions +from ..sql import expression +from ..sql import roles +from ..sql import Select +from ..sql import util as sql_util +from ..sql import visitors +from ..sql._typing import _FromClauseArgument +from ..sql._typing import _TP +from ..sql.annotation import SupportsCloneAnnotations +from ..sql.base import _entity_namespace_key +from ..sql.base import _generative +from ..sql.base import _NoArg +from ..sql.base import Executable +from ..sql.base import Generative +from ..sql.elements import BooleanClauseList +from ..sql.expression import Exists +from ..sql.selectable import _MemoizedSelectEntities +from ..sql.selectable import _SelectFromElements +from ..sql.selectable import ForUpdateArg +from ..sql.selectable import HasHints +from ..sql.selectable import HasPrefixes +from ..sql.selectable import HasSuffixes +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..sql.selectable import SelectLabelStyle +from ..util.typing import Literal +from ..util.typing import Self + + +if TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _ExternalEntityType + from ._typing import _InternalEntityType + from ._typing import SynchronizeSessionArgument + from .mapper import Mapper + from .path_registry import PathRegistry + from .session import _PKIdentityArgument + from .session import Session + from .state import InstanceState + from ..engine.cursor import CursorResult + from ..engine.interfaces import _ImmutableExecuteOptions + from ..engine.interfaces import CompiledCacheType + from ..engine.interfaces import IsolationLevel + from ..engine.interfaces import SchemaTranslateMapType + from ..engine.result import FrozenResult + from ..engine.result import ScalarResult + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _ColumnExpressionOrStrLabelArgument + from ..sql._typing import _ColumnsClauseArgument + from ..sql._typing import _DMLColumnArgument + from ..sql._typing import _JoinTargetArgument + from ..sql._typing import _LimitOffsetType + from ..sql._typing import _MAYBE_ENTITY + from ..sql._typing import _no_kw + from ..sql._typing import _NOT_ENTITY + from ..sql._typing import _OnClauseArgument + from ..sql._typing import _PropagateAttrsType + from ..sql._typing import _T0 + from ..sql._typing import _T1 + from ..sql._typing import _T2 + from ..sql._typing import _T3 + from ..sql._typing import _T4 + from ..sql._typing import _T5 + from ..sql._typing import _T6 + from ..sql._typing import _T7 + from ..sql._typing import _TypedColumnClauseArgument as _TCCA + from ..sql.base import CacheableOptions + from ..sql.base import ExecutableOption + from ..sql.dml import UpdateBase + from ..sql.elements import ColumnElement + from ..sql.elements import Label + from ..sql.selectable import _ForUpdateOfArgument + from ..sql.selectable import _JoinTargetElement + from ..sql.selectable import _SetupJoinsElement + from ..sql.selectable import Alias + from ..sql.selectable import CTE + from ..sql.selectable import ExecutableReturnsRows + from ..sql.selectable import FromClause + from ..sql.selectable import ScalarSelect + from ..sql.selectable import Subquery + + +__all__ = ["Query", "QueryContext"] + +_T = TypeVar("_T", bound=Any) + + +@inspection._self_inspects +@log.class_logger +class Query( + _SelectFromElements, + SupportsCloneAnnotations, + HasPrefixes, + HasSuffixes, + HasHints, + EventTarget, + log.Identified, + Generative, + Executable, + Generic[_T], +): + """ORM-level SQL construction object. + + .. legacy:: The ORM :class:`.Query` object is a legacy construct + as of SQLAlchemy 2.0. See the notes at the top of + :ref:`query_api_toplevel` for an overview, including links to migration + documentation. + + :class:`_query.Query` objects are normally initially generated using the + :meth:`~.Session.query` method of :class:`.Session`, and in + less common cases by instantiating the :class:`_query.Query` directly and + associating with a :class:`.Session` using the + :meth:`_query.Query.with_session` + method. + + """ + + # elements that are in Core and can be cached in the same way + _where_criteria: Tuple[ColumnElement[Any], ...] = () + _having_criteria: Tuple[ColumnElement[Any], ...] = () + + _order_by_clauses: Tuple[ColumnElement[Any], ...] = () + _group_by_clauses: Tuple[ColumnElement[Any], ...] = () + _limit_clause: Optional[ColumnElement[Any]] = None + _offset_clause: Optional[ColumnElement[Any]] = None + + _distinct: bool = False + _distinct_on: Tuple[ColumnElement[Any], ...] = () + + _for_update_arg: Optional[ForUpdateArg] = None + _correlate: Tuple[FromClause, ...] = () + _auto_correlate: bool = True + _from_obj: Tuple[FromClause, ...] = () + _setup_joins: Tuple[_SetupJoinsElement, ...] = () + + _label_style: SelectLabelStyle = SelectLabelStyle.LABEL_STYLE_LEGACY_ORM + + _memoized_select_entities = () + + _compile_options: Union[Type[CacheableOptions], CacheableOptions] = ( + ORMCompileState.default_compile_options + ) + + _with_options: Tuple[ExecutableOption, ...] + load_options = QueryContext.default_load_options + { + "_legacy_uniquing": True + } + + _params: util.immutabledict[str, Any] = util.EMPTY_DICT + + # local Query builder state, not needed for + # compilation or execution + _enable_assertions = True + + _statement: Optional[ExecutableReturnsRows] = None + + session: Session + + dispatch: dispatcher[Query[_T]] + + # mirrors that of ClauseElement, used to propagate the "orm" + # plugin as well as the "subject" of the plugin, e.g. the mapper + # we are querying against. + @util.memoized_property + def _propagate_attrs(self) -> _PropagateAttrsType: + return util.EMPTY_DICT + + def __init__( + self, + entities: Union[ + _ColumnsClauseArgument[Any], Sequence[_ColumnsClauseArgument[Any]] + ], + session: Optional[Session] = None, + ): + """Construct a :class:`_query.Query` directly. + + E.g.:: + + q = Query([User, Address], session=some_session) + + The above is equivalent to:: + + q = some_session.query(User, Address) + + :param entities: a sequence of entities and/or SQL expressions. + + :param session: a :class:`.Session` with which the + :class:`_query.Query` + will be associated. Optional; a :class:`_query.Query` + can be associated + with a :class:`.Session` generatively via the + :meth:`_query.Query.with_session` method as well. + + .. seealso:: + + :meth:`.Session.query` + + :meth:`_query.Query.with_session` + + """ + + # session is usually present. There's one case in subqueryloader + # where it stores a Query without a Session and also there are tests + # for the query(Entity).with_session(session) API which is likely in + # some old recipes, however these are legacy as select() can now be + # used. + self.session = session # type: ignore + self._set_entities(entities) + + def _set_propagate_attrs(self, values: Mapping[str, Any]) -> Self: + self._propagate_attrs = util.immutabledict(values) + return self + + def _set_entities( + self, + entities: Union[ + _ColumnsClauseArgument[Any], Iterable[_ColumnsClauseArgument[Any]] + ], + ) -> None: + self._raw_columns = [ + coercions.expect( + roles.ColumnsClauseRole, + ent, + apply_propagate_attrs=self, + post_inspect=True, + ) + for ent in util.to_list(entities) + ] + + def tuples(self: Query[_O]) -> Query[Tuple[_O]]: + """return a tuple-typed form of this :class:`.Query`. + + This method invokes the :meth:`.Query.only_return_tuples` + method with a value of ``True``, which by itself ensures that this + :class:`.Query` will always return :class:`.Row` objects, even + if the query is made against a single entity. It then also + at the typing level will return a "typed" query, if possible, + that will type result rows as ``Tuple`` objects with typed + elements. + + This method can be compared to the :meth:`.Result.tuples` method, + which returns "self", but from a typing perspective returns an object + that will yield typed ``Tuple`` objects for results. Typing + takes effect only if this :class:`.Query` object is a typed + query object already. + + .. versionadded:: 2.0 + + .. seealso:: + + :meth:`.Result.tuples` - v2 equivalent method. + + """ + return self.only_return_tuples(True) # type: ignore + + def _entity_from_pre_ent_zero(self) -> Optional[_InternalEntityType[Any]]: + if not self._raw_columns: + return None + + ent = self._raw_columns[0] + + if "parententity" in ent._annotations: + return ent._annotations["parententity"] # type: ignore + elif "bundle" in ent._annotations: + return ent._annotations["bundle"] # type: ignore + else: + # label, other SQL expression + for element in visitors.iterate(ent): + if "parententity" in element._annotations: + return element._annotations["parententity"] # type: ignore # noqa: E501 + else: + return None + + def _only_full_mapper_zero(self, methname: str) -> Mapper[Any]: + if ( + len(self._raw_columns) != 1 + or "parententity" not in self._raw_columns[0]._annotations + or not self._raw_columns[0].is_selectable + ): + raise sa_exc.InvalidRequestError( + "%s() can only be used against " + "a single mapped class." % methname + ) + + return self._raw_columns[0]._annotations["parententity"] # type: ignore # noqa: E501 + + def _set_select_from( + self, obj: Iterable[_FromClauseArgument], set_base_alias: bool + ) -> None: + fa = [ + coercions.expect( + roles.StrictFromClauseRole, + elem, + allow_select=True, + apply_propagate_attrs=self, + ) + for elem in obj + ] + + self._compile_options += {"_set_base_alias": set_base_alias} + self._from_obj = tuple(fa) + + @_generative + def _set_lazyload_from(self, state: InstanceState[Any]) -> Self: + self.load_options += {"_lazy_loaded_from": state} + return self + + def _get_condition(self) -> None: + """used by legacy BakedQuery""" + self._no_criterion_condition("get", order_by=False, distinct=False) + + def _get_existing_condition(self) -> None: + self._no_criterion_assertion("get", order_by=False, distinct=False) + + def _no_criterion_assertion( + self, meth: str, order_by: bool = True, distinct: bool = True + ) -> None: + if not self._enable_assertions: + return + if ( + self._where_criteria + or self._statement is not None + or self._from_obj + or self._setup_joins + or self._limit_clause is not None + or self._offset_clause is not None + or self._group_by_clauses + or (order_by and self._order_by_clauses) + or (distinct and self._distinct) + ): + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a " + "Query with existing criterion. " % meth + ) + + def _no_criterion_condition( + self, meth: str, order_by: bool = True, distinct: bool = True + ) -> None: + self._no_criterion_assertion(meth, order_by, distinct) + + self._from_obj = self._setup_joins = () + if self._statement is not None: + self._compile_options += {"_statement": None} + self._where_criteria = () + self._distinct = False + + self._order_by_clauses = self._group_by_clauses = () + + def _no_clauseelement_condition(self, meth: str) -> None: + if not self._enable_assertions: + return + if self._order_by_clauses: + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a " + "Query with existing criterion. " % meth + ) + self._no_criterion_condition(meth) + + def _no_statement_condition(self, meth: str) -> None: + if not self._enable_assertions: + return + if self._statement is not None: + raise sa_exc.InvalidRequestError( + ( + "Query.%s() being called on a Query with an existing full " + "statement - can't apply criterion." + ) + % meth + ) + + def _no_limit_offset(self, meth: str) -> None: + if not self._enable_assertions: + return + if self._limit_clause is not None or self._offset_clause is not None: + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a Query which already has LIMIT " + "or OFFSET applied. Call %s() before limit() or offset() " + "are applied." % (meth, meth) + ) + + @property + def _has_row_limiting_clause(self) -> bool: + return ( + self._limit_clause is not None or self._offset_clause is not None + ) + + def _get_options( + self, + populate_existing: Optional[bool] = None, + version_check: Optional[bool] = None, + only_load_props: Optional[Sequence[str]] = None, + refresh_state: Optional[InstanceState[Any]] = None, + identity_token: Optional[Any] = None, + ) -> Self: + load_options: Dict[str, Any] = {} + compile_options: Dict[str, Any] = {} + + if version_check: + load_options["_version_check"] = version_check + if populate_existing: + load_options["_populate_existing"] = populate_existing + if refresh_state: + load_options["_refresh_state"] = refresh_state + compile_options["_for_refresh_state"] = True + if only_load_props: + compile_options["_only_load_props"] = frozenset(only_load_props) + if identity_token: + load_options["_identity_token"] = identity_token + + if load_options: + self.load_options += load_options + if compile_options: + self._compile_options += compile_options + + return self + + def _clone(self, **kw: Any) -> Self: + return self._generate() + + def _get_select_statement_only(self) -> Select[_T]: + if self._statement is not None: + raise sa_exc.InvalidRequestError( + "Can't call this method on a Query that uses from_statement()" + ) + return cast("Select[_T]", self.statement) + + @property + def statement(self) -> Union[Select[_T], FromStatement[_T], UpdateBase]: + """The full SELECT statement represented by this Query. + + The statement by default will not have disambiguating labels + applied to the construct unless with_labels(True) is called + first. + + """ + + # .statement can return the direct future.Select() construct here, as + # long as we are not using subsequent adaption features that + # are made against raw entities, e.g. from_self(), with_polymorphic(), + # select_entity_from(). If these features are being used, then + # the Select() we return will not have the correct .selected_columns + # collection and will not embed in subsequent queries correctly. + # We could find a way to make this collection "correct", however + # this would not be too different from doing the full compile as + # we are doing in any case, the Select() would still not have the + # proper state for other attributes like whereclause, order_by, + # and these features are all deprecated in any case. + # + # for these reasons, Query is not a Select, it remains an ORM + # object for which __clause_element__() must be called in order for + # it to provide a real expression object. + # + # from there, it starts to look much like Query itself won't be + # passed into the execute process and won't generate its own cache + # key; this will all occur in terms of the ORM-enabled Select. + stmt: Union[Select[_T], FromStatement[_T], UpdateBase] + + if not self._compile_options._set_base_alias: + # if we don't have legacy top level aliasing features in use + # then convert to a future select() directly + stmt = self._statement_20(for_statement=True) + else: + stmt = self._compile_state(for_statement=True).statement + + if self._params: + stmt = stmt.params(self._params) + + return stmt + + def _final_statement(self, legacy_query_style: bool = True) -> Select[Any]: + """Return the 'final' SELECT statement for this :class:`.Query`. + + This is used by the testing suite only and is fairly inefficient. + + This is the Core-only select() that will be rendered by a complete + compilation of this query, and is what .statement used to return + in 1.3. + + + """ + + q = self._clone() + + return q._compile_state( + use_legacy_query_style=legacy_query_style + ).statement # type: ignore + + def _statement_20( + self, for_statement: bool = False, use_legacy_query_style: bool = True + ) -> Union[Select[_T], FromStatement[_T]]: + # TODO: this event needs to be deprecated, as it currently applies + # only to ORM query and occurs at this spot that is now more + # or less an artificial spot + if self.dispatch.before_compile: + for fn in self.dispatch.before_compile: + new_query = fn(self) + if new_query is not None and new_query is not self: + self = new_query + if not fn._bake_ok: # type: ignore + self._compile_options += {"_bake_ok": False} + + compile_options = self._compile_options + compile_options += { + "_for_statement": for_statement, + "_use_legacy_query_style": use_legacy_query_style, + } + + stmt: Union[Select[_T], FromStatement[_T]] + + if self._statement is not None: + stmt = FromStatement(self._raw_columns, self._statement) + stmt.__dict__.update( + _with_options=self._with_options, + _with_context_options=self._with_context_options, + _compile_options=compile_options, + _execution_options=self._execution_options, + _propagate_attrs=self._propagate_attrs, + ) + else: + # Query / select() internal attributes are 99% cross-compatible + stmt = Select._create_raw_select(**self.__dict__) + stmt.__dict__.update( + _label_style=self._label_style, + _compile_options=compile_options, + _propagate_attrs=self._propagate_attrs, + ) + stmt.__dict__.pop("session", None) + + # ensure the ORM context is used to compile the statement, even + # if it has no ORM entities. This is so ORM-only things like + # _legacy_joins are picked up that wouldn't be picked up by the + # Core statement context + if "compile_state_plugin" not in stmt._propagate_attrs: + stmt._propagate_attrs = stmt._propagate_attrs.union( + {"compile_state_plugin": "orm", "plugin_subject": None} + ) + + return stmt + + def subquery( + self, + name: Optional[str] = None, + with_labels: bool = False, + reduce_columns: bool = False, + ) -> Subquery: + """Return the full SELECT statement represented by + this :class:`_query.Query`, embedded within an + :class:`_expression.Alias`. + + Eager JOIN generation within the query is disabled. + + .. seealso:: + + :meth:`_sql.Select.subquery` - v2 comparable method. + + :param name: string name to be assigned as the alias; + this is passed through to :meth:`_expression.FromClause.alias`. + If ``None``, a name will be deterministically generated + at compile time. + + :param with_labels: if True, :meth:`.with_labels` will be called + on the :class:`_query.Query` first to apply table-qualified labels + to all columns. + + :param reduce_columns: if True, + :meth:`_expression.Select.reduce_columns` will + be called on the resulting :func:`_expression.select` construct, + to remove same-named columns where one also refers to the other + via foreign key or WHERE clause equivalence. + + """ + q = self.enable_eagerloads(False) + if with_labels: + q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + + stmt = q._get_select_statement_only() + + if TYPE_CHECKING: + assert isinstance(stmt, Select) + + if reduce_columns: + stmt = stmt.reduce_columns() + return stmt.subquery(name=name) + + def cte( + self, + name: Optional[str] = None, + recursive: bool = False, + nesting: bool = False, + ) -> CTE: + r"""Return the full SELECT statement represented by this + :class:`_query.Query` represented as a common table expression (CTE). + + Parameters and usage are the same as those of the + :meth:`_expression.SelectBase.cte` method; see that method for + further details. + + Here is the `PostgreSQL WITH + RECURSIVE example + `_. + Note that, in this example, the ``included_parts`` cte and the + ``incl_alias`` alias of it are Core selectables, which + means the columns are accessed via the ``.c.`` attribute. The + ``parts_alias`` object is an :func:`_orm.aliased` instance of the + ``Part`` entity, so column-mapped attributes are available + directly:: + + from sqlalchemy.orm import aliased + + + class Part(Base): + __tablename__ = "part" + part = Column(String, primary_key=True) + sub_part = Column(String, primary_key=True) + quantity = Column(Integer) + + + included_parts = ( + session.query(Part.sub_part, Part.part, Part.quantity) + .filter(Part.part == "our part") + .cte(name="included_parts", recursive=True) + ) + + incl_alias = aliased(included_parts, name="pr") + parts_alias = aliased(Part, name="p") + included_parts = included_parts.union_all( + session.query( + parts_alias.sub_part, parts_alias.part, parts_alias.quantity + ).filter(parts_alias.part == incl_alias.c.sub_part) + ) + + q = session.query( + included_parts.c.sub_part, + func.sum(included_parts.c.quantity).label("total_quantity"), + ).group_by(included_parts.c.sub_part) + + .. seealso:: + + :meth:`_sql.Select.cte` - v2 equivalent method. + + """ # noqa: E501 + return ( + self.enable_eagerloads(False) + ._get_select_statement_only() + .cte(name=name, recursive=recursive, nesting=nesting) + ) + + def label(self, name: Optional[str]) -> Label[Any]: + """Return the full SELECT statement represented by this + :class:`_query.Query`, converted + to a scalar subquery with a label of the given name. + + .. seealso:: + + :meth:`_sql.Select.label` - v2 comparable method. + + """ + + return ( + self.enable_eagerloads(False) + ._get_select_statement_only() + .label(name) + ) + + @overload + def as_scalar( # type: ignore[overload-overlap] + self: Query[Tuple[_MAYBE_ENTITY]], + ) -> ScalarSelect[_MAYBE_ENTITY]: ... + + @overload + def as_scalar( + self: Query[Tuple[_NOT_ENTITY]], + ) -> ScalarSelect[_NOT_ENTITY]: ... + + @overload + def as_scalar(self) -> ScalarSelect[Any]: ... + + @util.deprecated( + "1.4", + "The :meth:`_query.Query.as_scalar` method is deprecated and will be " + "removed in a future release. Please refer to " + ":meth:`_query.Query.scalar_subquery`.", + ) + def as_scalar(self) -> ScalarSelect[Any]: + """Return the full SELECT statement represented by this + :class:`_query.Query`, converted to a scalar subquery. + + """ + return self.scalar_subquery() + + @overload + def scalar_subquery( + self: Query[Tuple[_MAYBE_ENTITY]], + ) -> ScalarSelect[Any]: ... + + @overload + def scalar_subquery( + self: Query[Tuple[_NOT_ENTITY]], + ) -> ScalarSelect[_NOT_ENTITY]: ... + + @overload + def scalar_subquery(self) -> ScalarSelect[Any]: ... + + def scalar_subquery(self) -> ScalarSelect[Any]: + """Return the full SELECT statement represented by this + :class:`_query.Query`, converted to a scalar subquery. + + Analogous to + :meth:`sqlalchemy.sql.expression.SelectBase.scalar_subquery`. + + .. versionchanged:: 1.4 The :meth:`_query.Query.scalar_subquery` + method replaces the :meth:`_query.Query.as_scalar` method. + + .. seealso:: + + :meth:`_sql.Select.scalar_subquery` - v2 comparable method. + + """ + + return ( + self.enable_eagerloads(False) + ._get_select_statement_only() + .scalar_subquery() + ) + + @property + def selectable(self) -> Union[Select[_T], FromStatement[_T], UpdateBase]: + """Return the :class:`_expression.Select` object emitted by this + :class:`_query.Query`. + + Used for :func:`_sa.inspect` compatibility, this is equivalent to:: + + query.enable_eagerloads(False).with_labels().statement + + """ + return self.__clause_element__() + + def __clause_element__( + self, + ) -> Union[Select[_T], FromStatement[_T], UpdateBase]: + return ( + self._with_compile_options( + _enable_eagerloads=False, _render_for_subquery=True + ) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .statement + ) + + @overload + def only_return_tuples( + self: Query[_O], value: Literal[True] + ) -> RowReturningQuery[Tuple[_O]]: ... + + @overload + def only_return_tuples( + self: Query[_O], value: Literal[False] + ) -> Query[_O]: ... + + @_generative + def only_return_tuples(self, value: bool) -> Query[Any]: + """When set to True, the query results will always be a + :class:`.Row` object. + + This can change a query that normally returns a single entity + as a scalar to return a :class:`.Row` result in all cases. + + .. seealso:: + + :meth:`.Query.tuples` - returns tuples, but also at the typing + level will type results as ``Tuple``. + + :meth:`_query.Query.is_single_entity` + + :meth:`_engine.Result.tuples` - v2 comparable method. + + """ + self.load_options += dict(_only_return_tuples=value) + return self + + @property + def is_single_entity(self) -> bool: + """Indicates if this :class:`_query.Query` + returns tuples or single entities. + + Returns True if this query returns a single entity for each instance + in its result list, and False if this query returns a tuple of entities + for each result. + + .. versionadded:: 1.3.11 + + .. seealso:: + + :meth:`_query.Query.only_return_tuples` + + """ + return ( + not self.load_options._only_return_tuples + and len(self._raw_columns) == 1 + and "parententity" in self._raw_columns[0]._annotations + and isinstance( + self._raw_columns[0]._annotations["parententity"], + ORMColumnsClauseRole, + ) + ) + + @_generative + def enable_eagerloads(self, value: bool) -> Self: + """Control whether or not eager joins and subqueries are + rendered. + + When set to False, the returned Query will not render + eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, + :func:`~sqlalchemy.orm.subqueryload` options + or mapper-level ``lazy='joined'``/``lazy='subquery'`` + configurations. + + This is used primarily when nesting the Query's + statement into a subquery or other + selectable, or when using :meth:`_query.Query.yield_per`. + + """ + self._compile_options += {"_enable_eagerloads": value} + return self + + @_generative + def _with_compile_options(self, **opt: Any) -> Self: + self._compile_options += opt + return self + + @util.became_legacy_20( + ":meth:`_orm.Query.with_labels` and :meth:`_orm.Query.apply_labels`", + alternative="Use set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) " + "instead.", + ) + def with_labels(self) -> Self: + return self.set_label_style( + SelectLabelStyle.LABEL_STYLE_TABLENAME_PLUS_COL + ) + + apply_labels = with_labels + + @property + def get_label_style(self) -> SelectLabelStyle: + """ + Retrieve the current label style. + + .. versionadded:: 1.4 + + .. seealso:: + + :meth:`_sql.Select.get_label_style` - v2 equivalent method. + + """ + return self._label_style + + def set_label_style(self, style: SelectLabelStyle) -> Self: + """Apply column labels to the return value of Query.statement. + + Indicates that this Query's `statement` accessor should return + a SELECT statement that applies labels to all columns in the + form _; this is commonly used to + disambiguate columns from multiple tables which have the same + name. + + When the `Query` actually issues SQL to load rows, it always + uses column labeling. + + .. note:: The :meth:`_query.Query.set_label_style` method *only* applies + the output of :attr:`_query.Query.statement`, and *not* to any of + the result-row invoking systems of :class:`_query.Query` itself, + e.g. + :meth:`_query.Query.first`, :meth:`_query.Query.all`, etc. + To execute + a query using :meth:`_query.Query.set_label_style`, invoke the + :attr:`_query.Query.statement` using :meth:`.Session.execute`:: + + result = session.execute( + query.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).statement + ) + + .. versionadded:: 1.4 + + + .. seealso:: + + :meth:`_sql.Select.set_label_style` - v2 equivalent method. + + """ # noqa + if self._label_style is not style: + self = self._generate() + self._label_style = style + return self + + @_generative + def enable_assertions(self, value: bool) -> Self: + """Control whether assertions are generated. + + When set to False, the returned Query will + not assert its state before certain operations, + including that LIMIT/OFFSET has not been applied + when filter() is called, no criterion exists + when get() is called, and no "from_statement()" + exists when filter()/order_by()/group_by() etc. + is called. This more permissive mode is used by + custom Query subclasses to specify criterion or + other modifiers outside of the usual usage patterns. + + Care should be taken to ensure that the usage + pattern is even possible. A statement applied + by from_statement() will override any criterion + set by filter() or order_by(), for example. + + """ + self._enable_assertions = value + return self + + @property + def whereclause(self) -> Optional[ColumnElement[bool]]: + """A readonly attribute which returns the current WHERE criterion for + this Query. + + This returned value is a SQL expression construct, or ``None`` if no + criterion has been established. + + .. seealso:: + + :attr:`_sql.Select.whereclause` - v2 equivalent property. + + """ + return BooleanClauseList._construct_for_whereclause( + self._where_criteria + ) + + @_generative + def _with_current_path(self, path: PathRegistry) -> Self: + """indicate that this query applies to objects loaded + within a certain path. + + Used by deferred loaders (see strategies.py) which transfer + query options from an originating query to a newly generated + query intended for the deferred load. + + """ + self._compile_options += {"_current_path": path} + return self + + @_generative + def yield_per(self, count: int) -> Self: + r"""Yield only ``count`` rows at a time. + + The purpose of this method is when fetching very large result sets + (> 10K rows), to batch results in sub-collections and yield them + out partially, so that the Python interpreter doesn't need to declare + very large areas of memory which is both time consuming and leads + to excessive memory use. The performance from fetching hundreds of + thousands of rows can often double when a suitable yield-per setting + (e.g. approximately 1000) is used, even with DBAPIs that buffer + rows (which are most). + + As of SQLAlchemy 1.4, the :meth:`_orm.Query.yield_per` method is + equivalent to using the ``yield_per`` execution option at the ORM + level. See the section :ref:`orm_queryguide_yield_per` for further + background on this option. + + .. seealso:: + + :ref:`orm_queryguide_yield_per` + + """ + self.load_options += {"_yield_per": count} + return self + + @util.became_legacy_20( + ":meth:`_orm.Query.get`", + alternative="The method is now available as :meth:`_orm.Session.get`", + ) + def get(self, ident: _PKIdentityArgument) -> Optional[Any]: + """Return an instance based on the given primary key identifier, + or ``None`` if not found. + + E.g.:: + + my_user = session.query(User).get(5) + + some_object = session.query(VersionedFoo).get((5, 10)) + + some_object = session.query(VersionedFoo).get({"id": 5, "version_id": 10}) + + :meth:`_query.Query.get` is special in that it provides direct + access to the identity map of the owning :class:`.Session`. + If the given primary key identifier is present + in the local identity map, the object is returned + directly from this collection and no SQL is emitted, + unless the object has been marked fully expired. + If not present, + a SELECT is performed in order to locate the object. + + :meth:`_query.Query.get` also will perform a check if + the object is present in the identity map and + marked as expired - a SELECT + is emitted to refresh the object as well as to + ensure that the row is still present. + If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + :meth:`_query.Query.get` is only used to return a single + mapped instance, not multiple instances or + individual column constructs, and strictly + on a single primary key value. The originating + :class:`_query.Query` must be constructed in this way, + i.e. against a single mapped entity, + with no additional filtering criterion. Loading + options via :meth:`_query.Query.options` may be applied + however, and will be used if the object is not + yet locally present. + + :param ident: A scalar, tuple, or dictionary representing the + primary key. For a composite (e.g. multiple column) primary key, + a tuple or dictionary should be passed. + + For a single-column primary key, the scalar calling form is typically + the most expedient. If the primary key of a row is the value "5", + the call looks like:: + + my_object = query.get(5) + + The tuple form contains primary key values typically in + the order in which they correspond to the mapped + :class:`_schema.Table` + object's primary key columns, or if the + :paramref:`_orm.Mapper.primary_key` configuration parameter were + used, in + the order used for that parameter. For example, if the primary key + of a row is represented by the integer + digits "5, 10" the call would look like:: + + my_object = query.get((5, 10)) + + The dictionary form should include as keys the mapped attribute names + corresponding to each element of the primary key. If the mapped class + has the attributes ``id``, ``version_id`` as the attributes which + store the object's primary key value, the call would look like:: + + my_object = query.get({"id": 5, "version_id": 10}) + + .. versionadded:: 1.3 the :meth:`_query.Query.get` + method now optionally + accepts a dictionary of attribute names to values in order to + indicate a primary key identifier. + + + :return: The object instance, or ``None``. + + """ # noqa: E501 + self._no_criterion_assertion("get", order_by=False, distinct=False) + + # we still implement _get_impl() so that baked query can override + # it + return self._get_impl(ident, loading.load_on_pk_identity) + + def _get_impl( + self, + primary_key_identity: _PKIdentityArgument, + db_load_fn: Callable[..., Any], + identity_token: Optional[Any] = None, + ) -> Optional[Any]: + mapper = self._only_full_mapper_zero("get") + return self.session._get_impl( + mapper, + primary_key_identity, + db_load_fn, + populate_existing=self.load_options._populate_existing, + with_for_update=self._for_update_arg, + options=self._with_options, + identity_token=identity_token, + execution_options=self._execution_options, + ) + + @property + def lazy_loaded_from(self) -> Optional[InstanceState[Any]]: + """An :class:`.InstanceState` that is using this :class:`_query.Query` + for a lazy load operation. + + .. deprecated:: 1.4 This attribute should be viewed via the + :attr:`.ORMExecuteState.lazy_loaded_from` attribute, within + the context of the :meth:`.SessionEvents.do_orm_execute` + event. + + .. seealso:: + + :attr:`.ORMExecuteState.lazy_loaded_from` + + """ + return self.load_options._lazy_loaded_from # type: ignore + + @property + def _current_path(self) -> PathRegistry: + return self._compile_options._current_path # type: ignore + + @_generative + def correlate( + self, + *fromclauses: Union[Literal[None, False], _FromClauseArgument], + ) -> Self: + """Return a :class:`.Query` construct which will correlate the given + FROM clauses to that of an enclosing :class:`.Query` or + :func:`~.expression.select`. + + The method here accepts mapped classes, :func:`.aliased` constructs, + and :class:`_orm.Mapper` constructs as arguments, which are resolved + into expression constructs, in addition to appropriate expression + constructs. + + The correlation arguments are ultimately passed to + :meth:`_expression.Select.correlate` + after coercion to expression constructs. + + The correlation arguments take effect in such cases + as when :meth:`_query.Query.from_self` is used, or when + a subquery as returned by :meth:`_query.Query.subquery` is + embedded in another :func:`_expression.select` construct. + + .. seealso:: + + :meth:`_sql.Select.correlate` - v2 equivalent method. + + """ + + self._auto_correlate = False + if fromclauses and fromclauses[0] in {None, False}: + self._correlate = () + else: + self._correlate = self._correlate + tuple( + coercions.expect(roles.FromClauseRole, f) for f in fromclauses + ) + return self + + @_generative + def autoflush(self, setting: bool) -> Self: + """Return a Query with a specific 'autoflush' setting. + + As of SQLAlchemy 1.4, the :meth:`_orm.Query.autoflush` method + is equivalent to using the ``autoflush`` execution option at the + ORM level. See the section :ref:`orm_queryguide_autoflush` for + further background on this option. + + """ + self.load_options += {"_autoflush": setting} + return self + + @_generative + def populate_existing(self) -> Self: + """Return a :class:`_query.Query` + that will expire and refresh all instances + as they are loaded, or reused from the current :class:`.Session`. + + As of SQLAlchemy 1.4, the :meth:`_orm.Query.populate_existing` method + is equivalent to using the ``populate_existing`` execution option at + the ORM level. See the section :ref:`orm_queryguide_populate_existing` + for further background on this option. + + """ + self.load_options += {"_populate_existing": True} + return self + + @_generative + def _with_invoke_all_eagers(self, value: bool) -> Self: + """Set the 'invoke all eagers' flag which causes joined- and + subquery loaders to traverse into already-loaded related objects + and collections. + + Default is that of :attr:`_query.Query._invoke_all_eagers`. + + """ + self.load_options += {"_invoke_all_eagers": value} + return self + + @util.became_legacy_20( + ":meth:`_orm.Query.with_parent`", + alternative="Use the :func:`_orm.with_parent` standalone construct.", + ) + @util.preload_module("sqlalchemy.orm.relationships") + def with_parent( + self, + instance: object, + property: Optional[ # noqa: A002 + attributes.QueryableAttribute[Any] + ] = None, + from_entity: Optional[_ExternalEntityType[Any]] = None, + ) -> Self: + """Add filtering criterion that relates the given instance + to a child object or collection, using its attribute state + as well as an established :func:`_orm.relationship()` + configuration. + + The method uses the :func:`.with_parent` function to generate + the clause, the result of which is passed to + :meth:`_query.Query.filter`. + + Parameters are the same as :func:`.with_parent`, with the exception + that the given property can be None, in which case a search is + performed against this :class:`_query.Query` object's target mapper. + + :param instance: + An instance which has some :func:`_orm.relationship`. + + :param property: + Class bound attribute which indicates + what relationship from the instance should be used to reconcile the + parent/child relationship. + + :param from_entity: + Entity in which to consider as the left side. This defaults to the + "zero" entity of the :class:`_query.Query` itself. + + """ + relationships = util.preloaded.orm_relationships + + if from_entity: + entity_zero = inspect(from_entity) + else: + entity_zero = _legacy_filter_by_entity_zero(self) + if property is None: + # TODO: deprecate, property has to be supplied + mapper = object_mapper(instance) + + for prop in mapper.iterate_properties: + if ( + isinstance(prop, relationships.RelationshipProperty) + and prop.mapper is entity_zero.mapper # type: ignore + ): + property = prop # type: ignore # noqa: A001 + break + else: + raise sa_exc.InvalidRequestError( + "Could not locate a property which relates instances " + "of class '%s' to instances of class '%s'" + % ( + entity_zero.mapper.class_.__name__, # type: ignore + instance.__class__.__name__, + ) + ) + + return self.filter( + with_parent( + instance, + property, # type: ignore + entity_zero.entity, # type: ignore + ) + ) + + @_generative + def add_entity( + self, + entity: _EntityType[Any], + alias: Optional[Union[Alias, Subquery]] = None, + ) -> Query[Any]: + """add a mapped entity to the list of result columns + to be returned. + + .. seealso:: + + :meth:`_sql.Select.add_columns` - v2 comparable method. + """ + + if alias is not None: + # TODO: deprecate + entity = AliasedClass(entity, alias) + + self._raw_columns = list(self._raw_columns) + + self._raw_columns.append( + coercions.expect( + roles.ColumnsClauseRole, entity, apply_propagate_attrs=self + ) + ) + return self + + @_generative + def with_session(self, session: Session) -> Self: + """Return a :class:`_query.Query` that will use the given + :class:`.Session`. + + While the :class:`_query.Query` + object is normally instantiated using the + :meth:`.Session.query` method, it is legal to build the + :class:`_query.Query` + directly without necessarily using a :class:`.Session`. Such a + :class:`_query.Query` object, or any :class:`_query.Query` + already associated + with a different :class:`.Session`, can produce a new + :class:`_query.Query` + object associated with a target session using this method:: + + from sqlalchemy.orm import Query + + query = Query([MyClass]).filter(MyClass.id == 5) + + result = query.with_session(my_session).one() + + """ + + self.session = session + return self + + def _legacy_from_self( + self, *entities: _ColumnsClauseArgument[Any] + ) -> Self: + # used for query.count() as well as for the same + # function in BakedQuery, as well as some old tests in test_baked.py. + + fromclause = ( + self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .correlate(None) + .subquery() + ._anonymous_fromclause() + ) + + q = self._from_selectable(fromclause) + + if entities: + q._set_entities(entities) + return q + + @_generative + def _set_enable_single_crit(self, val: bool) -> Self: + self._compile_options += {"_enable_single_crit": val} + return self + + @_generative + def _from_selectable( + self, fromclause: FromClause, set_entity_from: bool = True + ) -> Self: + for attr in ( + "_where_criteria", + "_order_by_clauses", + "_group_by_clauses", + "_limit_clause", + "_offset_clause", + "_last_joined_entity", + "_setup_joins", + "_memoized_select_entities", + "_distinct", + "_distinct_on", + "_having_criteria", + "_prefixes", + "_suffixes", + ): + self.__dict__.pop(attr, None) + self._set_select_from([fromclause], set_entity_from) + self._compile_options += { + "_enable_single_crit": False, + } + + return self + + @util.deprecated( + "1.4", + ":meth:`_query.Query.values` " + "is deprecated and will be removed in a " + "future release. Please use :meth:`_query.Query.with_entities`", + ) + def values(self, *columns: _ColumnsClauseArgument[Any]) -> Iterable[Any]: + """Return an iterator yielding result tuples corresponding + to the given list of columns + + """ + return self._values_no_warn(*columns) + + _values = values + + def _values_no_warn( + self, *columns: _ColumnsClauseArgument[Any] + ) -> Iterable[Any]: + if not columns: + return iter(()) + q = self._clone().enable_eagerloads(False) + q._set_entities(columns) + if not q.load_options._yield_per: + q.load_options += {"_yield_per": 10} + return iter(q) + + @util.deprecated( + "1.4", + ":meth:`_query.Query.value` " + "is deprecated and will be removed in a " + "future release. Please use :meth:`_query.Query.with_entities` " + "in combination with :meth:`_query.Query.scalar`", + ) + def value(self, column: _ColumnExpressionArgument[Any]) -> Any: + """Return a scalar result corresponding to the given + column expression. + + """ + try: + return next(self._values_no_warn(column))[0] # type: ignore + except StopIteration: + return None + + @overload + def with_entities(self, _entity: _EntityType[_O]) -> Query[_O]: ... + + @overload + def with_entities( + self, + _colexpr: roles.TypedColumnsClauseRole[_T], + ) -> RowReturningQuery[Tuple[_T]]: ... + + # START OVERLOADED FUNCTIONS self.with_entities RowReturningQuery 2-8 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def with_entities( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] + ) -> RowReturningQuery[Tuple[_T0, _T1]]: ... + + @overload + def with_entities( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2]]: ... + + @overload + def with_entities( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def with_entities( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def with_entities( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def with_entities( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def with_entities( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7]]: ... + + # END OVERLOADED FUNCTIONS self.with_entities + + @overload + def with_entities( + self, *entities: _ColumnsClauseArgument[Any] + ) -> Query[Any]: ... + + @_generative + def with_entities( + self, *entities: _ColumnsClauseArgument[Any], **__kw: Any + ) -> Query[Any]: + r"""Return a new :class:`_query.Query` + replacing the SELECT list with the + given entities. + + e.g.:: + + # Users, filtered on some arbitrary criterion + # and then ordered by related email address + q = ( + session.query(User) + .join(User.address) + .filter(User.name.like("%ed%")) + .order_by(Address.email) + ) + + # given *only* User.id==5, Address.email, and 'q', what + # would the *next* User in the result be ? + subq = ( + q.with_entities(Address.email) + .order_by(None) + .filter(User.id == 5) + .subquery() + ) + q = q.join((subq, subq.c.email < Address.email)).limit(1) + + .. seealso:: + + :meth:`_sql.Select.with_only_columns` - v2 comparable method. + """ + if __kw: + raise _no_kw() + + # Query has all the same fields as Select for this operation + # this could in theory be based on a protocol but not sure if it's + # worth it + _MemoizedSelectEntities._generate_for_statement(self) # type: ignore + self._set_entities(entities) + return self + + @_generative + def add_columns( + self, *column: _ColumnExpressionArgument[Any] + ) -> Query[Any]: + """Add one or more column expressions to the list + of result columns to be returned. + + .. seealso:: + + :meth:`_sql.Select.add_columns` - v2 comparable method. + """ + + self._raw_columns = list(self._raw_columns) + + self._raw_columns.extend( + coercions.expect( + roles.ColumnsClauseRole, + c, + apply_propagate_attrs=self, + post_inspect=True, + ) + for c in column + ) + return self + + @util.deprecated( + "1.4", + ":meth:`_query.Query.add_column` " + "is deprecated and will be removed in a " + "future release. Please use :meth:`_query.Query.add_columns`", + ) + def add_column(self, column: _ColumnExpressionArgument[Any]) -> Query[Any]: + """Add a column expression to the list of result columns to be + returned. + + """ + return self.add_columns(column) + + @_generative + def options(self, *args: ExecutableOption) -> Self: + """Return a new :class:`_query.Query` object, + applying the given list of + mapper options. + + Most supplied options regard changing how column- and + relationship-mapped attributes are loaded. + + .. seealso:: + + :ref:`loading_columns` + + :ref:`relationship_loader_options` + + """ + + opts = tuple(util.flatten_iterator(args)) + if self._compile_options._current_path: + # opting for lower method overhead for the checks + for opt in opts: + if not opt._is_core and opt._is_legacy_option: # type: ignore + opt.process_query_conditionally(self) # type: ignore + else: + for opt in opts: + if not opt._is_core and opt._is_legacy_option: # type: ignore + opt.process_query(self) # type: ignore + + self._with_options += opts + return self + + def with_transformation( + self, fn: Callable[[Query[Any]], Query[Any]] + ) -> Query[Any]: + """Return a new :class:`_query.Query` object transformed by + the given function. + + E.g.:: + + def filter_something(criterion): + def transform(q): + return q.filter(criterion) + + return transform + + + q = q.with_transformation(filter_something(x == 5)) + + This allows ad-hoc recipes to be created for :class:`_query.Query` + objects. + + """ + return fn(self) + + def get_execution_options(self) -> _ImmutableExecuteOptions: + """Get the non-SQL options which will take effect during execution. + + .. versionadded:: 1.3 + + .. seealso:: + + :meth:`_query.Query.execution_options` + + :meth:`_sql.Select.get_execution_options` - v2 comparable method. + + """ + return self._execution_options + + @overload + def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + no_parameters: bool = False, + stream_results: bool = False, + max_row_buffer: int = ..., + yield_per: int = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + populate_existing: bool = False, + autoflush: bool = False, + preserve_rowcount: bool = False, + **opt: Any, + ) -> Self: ... + + @overload + def execution_options(self, **opt: Any) -> Self: ... + + @_generative + def execution_options(self, **kwargs: Any) -> Self: + """Set non-SQL options which take effect during execution. + + Options allowed here include all of those accepted by + :meth:`_engine.Connection.execution_options`, as well as a series + of ORM specific options: + + ``populate_existing=True`` - equivalent to using + :meth:`_orm.Query.populate_existing` + + ``autoflush=True|False`` - equivalent to using + :meth:`_orm.Query.autoflush` + + ``yield_per=`` - equivalent to using + :meth:`_orm.Query.yield_per` + + Note that the ``stream_results`` execution option is enabled + automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` + method or execution option is used. + + .. versionadded:: 1.4 - added ORM options to + :meth:`_orm.Query.execution_options` + + The execution options may also be specified on a per execution basis + when using :term:`2.0 style` queries via the + :paramref:`_orm.Session.execution_options` parameter. + + .. warning:: The + :paramref:`_engine.Connection.execution_options.stream_results` + parameter should not be used at the level of individual ORM + statement executions, as the :class:`_orm.Session` will not track + objects from different schema translate maps within a single + session. For multiple schema translate maps within the scope of a + single :class:`_orm.Session`, see :ref:`examples_sharding`. + + + .. seealso:: + + :ref:`engine_stream_results` + + :meth:`_query.Query.get_execution_options` + + :meth:`_sql.Select.execution_options` - v2 equivalent method. + + """ + self._execution_options = self._execution_options.union(kwargs) + return self + + @_generative + def with_for_update( + self, + *, + nowait: bool = False, + read: bool = False, + of: Optional[_ForUpdateOfArgument] = None, + skip_locked: bool = False, + key_share: bool = False, + ) -> Self: + """return a new :class:`_query.Query` + with the specified options for the + ``FOR UPDATE`` clause. + + The behavior of this method is identical to that of + :meth:`_expression.GenerativeSelect.with_for_update`. + When called with no arguments, + the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause + appended. When additional arguments are specified, backend-specific + options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` + can take effect. + + E.g.:: + + q = ( + sess.query(User) + .populate_existing() + .with_for_update(nowait=True, of=User) + ) + + The above query on a PostgreSQL backend will render like: + + .. sourcecode:: sql + + SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT + + .. warning:: + + Using ``with_for_update`` in the context of eager loading + relationships is not officially supported or recommended by + SQLAlchemy and may not work with certain queries on various + database backends. When ``with_for_update`` is successfully used + with a query that involves :func:`_orm.joinedload`, SQLAlchemy will + attempt to emit SQL that locks all involved tables. + + .. note:: It is generally a good idea to combine the use of the + :meth:`_orm.Query.populate_existing` method when using the + :meth:`_orm.Query.with_for_update` method. The purpose of + :meth:`_orm.Query.populate_existing` is to force all the data read + from the SELECT to be populated into the ORM objects returned, + even if these objects are already in the :term:`identity map`. + + .. seealso:: + + :meth:`_expression.GenerativeSelect.with_for_update` + - Core level method with + full argument and behavioral description. + + :meth:`_orm.Query.populate_existing` - overwrites attributes of + objects already loaded in the identity map. + + """ # noqa: E501 + + self._for_update_arg = ForUpdateArg( + read=read, + nowait=nowait, + of=of, + skip_locked=skip_locked, + key_share=key_share, + ) + return self + + @_generative + def params( + self, __params: Optional[Dict[str, Any]] = None, **kw: Any + ) -> Self: + r"""Add values for bind parameters which may have been + specified in filter(). + + Parameters may be specified using \**kwargs, or optionally a single + dictionary as the first positional argument. The reason for both is + that \**kwargs is convenient, however some parameter dictionaries + contain unicode keys in which case \**kwargs cannot be used. + + """ + if __params: + kw.update(__params) + self._params = self._params.union(kw) + return self + + def where(self, *criterion: _ColumnExpressionArgument[bool]) -> Self: + """A synonym for :meth:`.Query.filter`. + + .. versionadded:: 1.4 + + .. seealso:: + + :meth:`_sql.Select.where` - v2 equivalent method. + + """ + return self.filter(*criterion) + + @_generative + @_assertions(_no_statement_condition, _no_limit_offset) + def filter(self, *criterion: _ColumnExpressionArgument[bool]) -> Self: + r"""Apply the given filtering criterion to a copy + of this :class:`_query.Query`, using SQL expressions. + + e.g.:: + + session.query(MyClass).filter(MyClass.name == "some name") + + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: + + session.query(MyClass).filter(MyClass.name == "some name", MyClass.id > 5) + + The criterion is any SQL expression object applicable to the + WHERE clause of a select. String expressions are coerced + into SQL expression constructs via the :func:`_expression.text` + construct. + + .. seealso:: + + :meth:`_query.Query.filter_by` - filter on keyword expressions. + + :meth:`_sql.Select.where` - v2 equivalent method. + + """ # noqa: E501 + for crit in list(criterion): + crit = coercions.expect( + roles.WhereHavingRole, crit, apply_propagate_attrs=self + ) + + self._where_criteria += (crit,) + return self + + @util.memoized_property + def _last_joined_entity( + self, + ) -> Optional[Union[_InternalEntityType[Any], _JoinTargetElement]]: + if self._setup_joins: + return _determine_last_joined_entity( + self._setup_joins, + ) + else: + return None + + def _filter_by_zero(self) -> Any: + """for the filter_by() method, return the target entity for which + we will attempt to derive an expression from based on string name. + + """ + + if self._setup_joins: + _last_joined_entity = self._last_joined_entity + if _last_joined_entity is not None: + return _last_joined_entity + + # discussion related to #7239 + # special check determines if we should try to derive attributes + # for filter_by() from the "from object", i.e., if the user + # called query.select_from(some selectable).filter_by(some_attr=value). + # We don't want to do that in the case that methods like + # from_self(), select_entity_from(), or a set op like union() were + # called; while these methods also place a + # selectable in the _from_obj collection, they also set up + # the _set_base_alias boolean which turns on the whole "adapt the + # entity to this selectable" thing, meaning the query still continues + # to construct itself in terms of the lead entity that was passed + # to query(), e.g. query(User).from_self() is still in terms of User, + # and not the subquery that from_self() created. This feature of + # "implicitly adapt all occurrences of entity X to some arbitrary + # subquery" is the main thing I am trying to do away with in 2.0 as + # users should now used aliased() for that, but I can't entirely get + # rid of it due to query.union() and other set ops relying upon it. + # + # compare this to the base Select()._filter_by_zero() which can + # just return self._from_obj[0] if present, because there is no + # "_set_base_alias" feature. + # + # IOW, this conditional essentially detects if + # "select_from(some_selectable)" has been called, as opposed to + # "select_entity_from()", "from_self()" + # or "union() / some_set_op()". + if self._from_obj and not self._compile_options._set_base_alias: + return self._from_obj[0] + + return self._raw_columns[0] + + def filter_by(self, **kwargs: Any) -> Self: + r"""Apply the given filtering criterion to a copy + of this :class:`_query.Query`, using keyword expressions. + + e.g.:: + + session.query(MyClass).filter_by(name="some name") + + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: + + session.query(MyClass).filter_by(name="some name", id=5) + + The keyword expressions are extracted from the primary + entity of the query, or the last entity that was the + target of a call to :meth:`_query.Query.join`. + + .. seealso:: + + :meth:`_query.Query.filter` - filter on SQL expressions. + + :meth:`_sql.Select.filter_by` - v2 comparable method. + + """ + from_entity = self._filter_by_zero() + + clauses = [ + _entity_namespace_key(from_entity, key) == value + for key, value in kwargs.items() + ] + return self.filter(*clauses) + + @_generative + def order_by( + self, + __first: Union[ + Literal[None, False, _NoArg.NO_ARG], + _ColumnExpressionOrStrLabelArgument[Any], + ] = _NoArg.NO_ARG, + *clauses: _ColumnExpressionOrStrLabelArgument[Any], + ) -> Self: + """Apply one or more ORDER BY criteria to the query and return + the newly resulting :class:`_query.Query`. + + e.g.:: + + q = session.query(Entity).order_by(Entity.id, Entity.name) + + Calling this method multiple times is equivalent to calling it once + with all the clauses concatenated. All existing ORDER BY criteria may + be cancelled by passing ``None`` by itself. New ORDER BY criteria may + then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: + + # will erase all ORDER BY and ORDER BY new_col alone + q = q.order_by(None).order_by(new_col) + + .. seealso:: + + These sections describe ORDER BY in terms of :term:`2.0 style` + invocation but apply to :class:`_orm.Query` as well: + + :ref:`tutorial_order_by` - in the :ref:`unified_tutorial` + + :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial` + + :meth:`_sql.Select.order_by` - v2 equivalent method. + + """ + + for assertion in (self._no_statement_condition, self._no_limit_offset): + assertion("order_by") + + if not clauses and (__first is None or __first is False): + self._order_by_clauses = () + elif __first is not _NoArg.NO_ARG: + criterion = tuple( + coercions.expect(roles.OrderByRole, clause) + for clause in (__first,) + clauses + ) + self._order_by_clauses += criterion + + return self + + @_generative + def group_by( + self, + __first: Union[ + Literal[None, False, _NoArg.NO_ARG], + _ColumnExpressionOrStrLabelArgument[Any], + ] = _NoArg.NO_ARG, + *clauses: _ColumnExpressionOrStrLabelArgument[Any], + ) -> Self: + """Apply one or more GROUP BY criterion to the query and return + the newly resulting :class:`_query.Query`. + + All existing GROUP BY settings can be suppressed by + passing ``None`` - this will suppress any GROUP BY configured + on mappers as well. + + .. seealso:: + + These sections describe GROUP BY in terms of :term:`2.0 style` + invocation but apply to :class:`_orm.Query` as well: + + :ref:`tutorial_group_by_w_aggregates` - in the + :ref:`unified_tutorial` + + :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial` + + :meth:`_sql.Select.group_by` - v2 equivalent method. + + """ + + for assertion in (self._no_statement_condition, self._no_limit_offset): + assertion("group_by") + + if not clauses and (__first is None or __first is False): + self._group_by_clauses = () + elif __first is not _NoArg.NO_ARG: + criterion = tuple( + coercions.expect(roles.GroupByRole, clause) + for clause in (__first,) + clauses + ) + self._group_by_clauses += criterion + return self + + @_generative + @_assertions(_no_statement_condition, _no_limit_offset) + def having(self, *having: _ColumnExpressionArgument[bool]) -> Self: + r"""Apply a HAVING criterion to the query and return the + newly resulting :class:`_query.Query`. + + :meth:`_query.Query.having` is used in conjunction with + :meth:`_query.Query.group_by`. + + HAVING criterion makes it possible to use filters on aggregate + functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: + + q = ( + session.query(User.id) + .join(User.addresses) + .group_by(User.id) + .having(func.count(Address.id) > 2) + ) + + .. seealso:: + + :meth:`_sql.Select.having` - v2 equivalent method. + + """ + + for criterion in having: + having_criteria = coercions.expect( + roles.WhereHavingRole, criterion + ) + self._having_criteria += (having_criteria,) + return self + + def _set_op(self, expr_fn: Any, *q: Query[Any]) -> Self: + list_of_queries = (self,) + q + return self._from_selectable(expr_fn(*(list_of_queries)).subquery()) + + def union(self, *q: Query[Any]) -> Self: + """Produce a UNION of this Query against one or more queries. + + e.g.:: + + q1 = sess.query(SomeClass).filter(SomeClass.foo == "bar") + q2 = sess.query(SomeClass).filter(SomeClass.bar == "foo") + + q3 = q1.union(q2) + + The method accepts multiple Query objects so as to control + the level of nesting. A series of ``union()`` calls such as:: + + x.union(y).union(z).all() + + will nest on each ``union()``, and produces: + + .. sourcecode:: sql + + SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION + SELECT * FROM y) UNION SELECT * FROM Z) + + Whereas:: + + x.union(y, z).all() + + produces: + + .. sourcecode:: sql + + SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION + SELECT * FROM Z) + + Note that many database backends do not allow ORDER BY to + be rendered on a query called within UNION, EXCEPT, etc. + To disable all ORDER BY clauses including those configured + on mappers, issue ``query.order_by(None)`` - the resulting + :class:`_query.Query` object will not render ORDER BY within + its SELECT statement. + + .. seealso:: + + :meth:`_sql.Select.union` - v2 equivalent method. + + """ + return self._set_op(expression.union, *q) + + def union_all(self, *q: Query[Any]) -> Self: + """Produce a UNION ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + .. seealso:: + + :meth:`_sql.Select.union_all` - v2 equivalent method. + + """ + return self._set_op(expression.union_all, *q) + + def intersect(self, *q: Query[Any]) -> Self: + """Produce an INTERSECT of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + .. seealso:: + + :meth:`_sql.Select.intersect` - v2 equivalent method. + + """ + return self._set_op(expression.intersect, *q) + + def intersect_all(self, *q: Query[Any]) -> Self: + """Produce an INTERSECT ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + .. seealso:: + + :meth:`_sql.Select.intersect_all` - v2 equivalent method. + + """ + return self._set_op(expression.intersect_all, *q) + + def except_(self, *q: Query[Any]) -> Self: + """Produce an EXCEPT of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + .. seealso:: + + :meth:`_sql.Select.except_` - v2 equivalent method. + + """ + return self._set_op(expression.except_, *q) + + def except_all(self, *q: Query[Any]) -> Self: + """Produce an EXCEPT ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + .. seealso:: + + :meth:`_sql.Select.except_all` - v2 equivalent method. + + """ + return self._set_op(expression.except_all, *q) + + @_generative + @_assertions(_no_statement_condition, _no_limit_offset) + def join( + self, + target: _JoinTargetArgument, + onclause: Optional[_OnClauseArgument] = None, + *, + isouter: bool = False, + full: bool = False, + ) -> Self: + r"""Create a SQL JOIN against this :class:`_query.Query` + object's criterion + and apply generatively, returning the newly resulting + :class:`_query.Query`. + + **Simple Relationship Joins** + + Consider a mapping between two classes ``User`` and ``Address``, + with a relationship ``User.addresses`` representing a collection + of ``Address`` objects associated with each ``User``. The most + common usage of :meth:`_query.Query.join` + is to create a JOIN along this + relationship, using the ``User.addresses`` attribute as an indicator + for how this should occur:: + + q = session.query(User).join(User.addresses) + + Where above, the call to :meth:`_query.Query.join` along + ``User.addresses`` will result in SQL approximately equivalent to: + + .. sourcecode:: sql + + SELECT user.id, user.name + FROM user JOIN address ON user.id = address.user_id + + In the above example we refer to ``User.addresses`` as passed to + :meth:`_query.Query.join` as the "on clause", that is, it indicates + how the "ON" portion of the JOIN should be constructed. + + To construct a chain of joins, multiple :meth:`_query.Query.join` + calls may be used. The relationship-bound attribute implies both + the left and right side of the join at once:: + + q = ( + session.query(User) + .join(User.orders) + .join(Order.items) + .join(Item.keywords) + ) + + .. note:: as seen in the above example, **the order in which each + call to the join() method occurs is important**. Query would not, + for example, know how to join correctly if we were to specify + ``User``, then ``Item``, then ``Order``, in our chain of joins; in + such a case, depending on the arguments passed, it may raise an + error that it doesn't know how to join, or it may produce invalid + SQL in which case the database will raise an error. In correct + practice, the + :meth:`_query.Query.join` method is invoked in such a way that lines + up with how we would want the JOIN clauses in SQL to be + rendered, and each call should represent a clear link from what + precedes it. + + **Joins to a Target Entity or Selectable** + + A second form of :meth:`_query.Query.join` allows any mapped entity or + core selectable construct as a target. In this usage, + :meth:`_query.Query.join` will attempt to create a JOIN along the + natural foreign key relationship between two entities:: + + q = session.query(User).join(Address) + + In the above calling form, :meth:`_query.Query.join` is called upon to + create the "on clause" automatically for us. This calling form will + ultimately raise an error if either there are no foreign keys between + the two entities, or if there are multiple foreign key linkages between + the target entity and the entity or entities already present on the + left side such that creating a join requires more information. Note + that when indicating a join to a target without any ON clause, ORM + configured relationships are not taken into account. + + **Joins to a Target with an ON Clause** + + The third calling form allows both the target entity as well + as the ON clause to be passed explicitly. A example that includes + a SQL expression as the ON clause is as follows:: + + q = session.query(User).join(Address, User.id == Address.user_id) + + The above form may also use a relationship-bound attribute as the + ON clause as well:: + + q = session.query(User).join(Address, User.addresses) + + The above syntax can be useful for the case where we wish + to join to an alias of a particular target entity. If we wanted + to join to ``Address`` twice, it could be achieved using two + aliases set up using the :func:`~sqlalchemy.orm.aliased` function:: + + a1 = aliased(Address) + a2 = aliased(Address) + + q = ( + session.query(User) + .join(a1, User.addresses) + .join(a2, User.addresses) + .filter(a1.email_address == "ed@foo.com") + .filter(a2.email_address == "ed@bar.com") + ) + + The relationship-bound calling form can also specify a target entity + using the :meth:`_orm.PropComparator.of_type` method; a query + equivalent to the one above would be:: + + a1 = aliased(Address) + a2 = aliased(Address) + + q = ( + session.query(User) + .join(User.addresses.of_type(a1)) + .join(User.addresses.of_type(a2)) + .filter(a1.email_address == "ed@foo.com") + .filter(a2.email_address == "ed@bar.com") + ) + + **Augmenting Built-in ON Clauses** + + As a substitute for providing a full custom ON condition for an + existing relationship, the :meth:`_orm.PropComparator.and_` function + may be applied to a relationship attribute to augment additional + criteria into the ON clause; the additional criteria will be combined + with the default criteria using AND:: + + q = session.query(User).join( + User.addresses.and_(Address.email_address != "foo@bar.com") + ) + + .. versionadded:: 1.4 + + **Joining to Tables and Subqueries** + + + The target of a join may also be any table or SELECT statement, + which may be related to a target entity or not. Use the + appropriate ``.subquery()`` method in order to make a subquery + out of a query:: + + subq = ( + session.query(Address) + .filter(Address.email_address == "ed@foo.com") + .subquery() + ) + + + q = session.query(User).join(subq, User.id == subq.c.user_id) + + Joining to a subquery in terms of a specific relationship and/or + target entity may be achieved by linking the subquery to the + entity using :func:`_orm.aliased`:: + + subq = ( + session.query(Address) + .filter(Address.email_address == "ed@foo.com") + .subquery() + ) + + address_subq = aliased(Address, subq) + + q = session.query(User).join(User.addresses.of_type(address_subq)) + + **Controlling what to Join From** + + In cases where the left side of the current state of + :class:`_query.Query` is not in line with what we want to join from, + the :meth:`_query.Query.select_from` method may be used:: + + q = ( + session.query(Address) + .select_from(User) + .join(User.addresses) + .filter(User.name == "ed") + ) + + Which will produce SQL similar to: + + .. sourcecode:: sql + + SELECT address.* FROM user + JOIN address ON user.id=address.user_id + WHERE user.name = :name_1 + + .. seealso:: + + :meth:`_sql.Select.join` - v2 equivalent method. + + :param \*props: Incoming arguments for :meth:`_query.Query.join`, + the props collection in modern use should be considered to be a one + or two argument form, either as a single "target" entity or ORM + attribute-bound relationship, or as a target entity plus an "on + clause" which may be a SQL expression or ORM attribute-bound + relationship. + + :param isouter=False: If True, the join used will be a left outer join, + just as if the :meth:`_query.Query.outerjoin` method were called. + + :param full=False: render FULL OUTER JOIN; implies ``isouter``. + + """ + + join_target = coercions.expect( + roles.JoinTargetRole, + target, + apply_propagate_attrs=self, + legacy=True, + ) + if onclause is not None: + onclause_element = coercions.expect( + roles.OnClauseRole, onclause, legacy=True + ) + else: + onclause_element = None + + self._setup_joins += ( + ( + join_target, + onclause_element, + None, + { + "isouter": isouter, + "full": full, + }, + ), + ) + + self.__dict__.pop("_last_joined_entity", None) + return self + + def outerjoin( + self, + target: _JoinTargetArgument, + onclause: Optional[_OnClauseArgument] = None, + *, + full: bool = False, + ) -> Self: + """Create a left outer join against this ``Query`` object's criterion + and apply generatively, returning the newly resulting ``Query``. + + Usage is the same as the ``join()`` method. + + .. seealso:: + + :meth:`_sql.Select.outerjoin` - v2 equivalent method. + + """ + return self.join(target, onclause=onclause, isouter=True, full=full) + + @_generative + @_assertions(_no_statement_condition) + def reset_joinpoint(self) -> Self: + """Return a new :class:`.Query`, where the "join point" has + been reset back to the base FROM entities of the query. + + This method is usually used in conjunction with the + ``aliased=True`` feature of the :meth:`~.Query.join` + method. See the example in :meth:`~.Query.join` for how + this is used. + + """ + self._last_joined_entity = None + + return self + + @_generative + @_assertions(_no_clauseelement_condition) + def select_from(self, *from_obj: _FromClauseArgument) -> Self: + r"""Set the FROM clause of this :class:`.Query` explicitly. + + :meth:`.Query.select_from` is often used in conjunction with + :meth:`.Query.join` in order to control which entity is selected + from on the "left" side of the join. + + The entity or selectable object here effectively replaces the + "left edge" of any calls to :meth:`~.Query.join`, when no + joinpoint is otherwise established - usually, the default "join + point" is the leftmost entity in the :class:`~.Query` object's + list of entities to be selected. + + A typical example:: + + q = ( + session.query(Address) + .select_from(User) + .join(User.addresses) + .filter(User.name == "ed") + ) + + Which produces SQL equivalent to: + + .. sourcecode:: sql + + SELECT address.* FROM user + JOIN address ON user.id=address.user_id + WHERE user.name = :name_1 + + :param \*from_obj: collection of one or more entities to apply + to the FROM clause. Entities can be mapped classes, + :class:`.AliasedClass` objects, :class:`.Mapper` objects + as well as core :class:`.FromClause` elements like subqueries. + + .. seealso:: + + :meth:`~.Query.join` + + :meth:`.Query.select_entity_from` + + :meth:`_sql.Select.select_from` - v2 equivalent method. + + """ + + self._set_select_from(from_obj, False) + return self + + def __getitem__(self, item: Any) -> Any: + return orm_util._getitem( + self, + item, + ) + + @_generative + @_assertions(_no_statement_condition) + def slice( + self, + start: int, + stop: int, + ) -> Self: + """Computes the "slice" of the :class:`_query.Query` represented by + the given indices and returns the resulting :class:`_query.Query`. + + The start and stop indices behave like the argument to Python's + built-in :func:`range` function. This method provides an + alternative to using ``LIMIT``/``OFFSET`` to get a slice of the + query. + + For example, :: + + session.query(User).order_by(User.id).slice(1, 3) + + renders as + + .. sourcecode:: sql + + SELECT users.id AS users_id, + users.name AS users_name + FROM users ORDER BY users.id + LIMIT ? OFFSET ? + (2, 1) + + .. seealso:: + + :meth:`_query.Query.limit` + + :meth:`_query.Query.offset` + + :meth:`_sql.Select.slice` - v2 equivalent method. + + """ + + self._limit_clause, self._offset_clause = sql_util._make_slice( + self._limit_clause, self._offset_clause, start, stop + ) + return self + + @_generative + @_assertions(_no_statement_condition) + def limit(self, limit: _LimitOffsetType) -> Self: + """Apply a ``LIMIT`` to the query and return the newly resulting + ``Query``. + + .. seealso:: + + :meth:`_sql.Select.limit` - v2 equivalent method. + + """ + self._limit_clause = sql_util._offset_or_limit_clause(limit) + return self + + @_generative + @_assertions(_no_statement_condition) + def offset(self, offset: _LimitOffsetType) -> Self: + """Apply an ``OFFSET`` to the query and return the newly resulting + ``Query``. + + .. seealso:: + + :meth:`_sql.Select.offset` - v2 equivalent method. + """ + self._offset_clause = sql_util._offset_or_limit_clause(offset) + return self + + @_generative + @_assertions(_no_statement_condition) + def distinct(self, *expr: _ColumnExpressionArgument[Any]) -> Self: + r"""Apply a ``DISTINCT`` to the query and return the newly resulting + ``Query``. + + + .. note:: + + The ORM-level :meth:`.distinct` call includes logic that will + automatically add columns from the ORDER BY of the query to the + columns clause of the SELECT statement, to satisfy the common need + of the database backend that ORDER BY columns be part of the SELECT + list when DISTINCT is used. These columns *are not* added to the + list of columns actually fetched by the :class:`_query.Query`, + however, + so would not affect results. The columns are passed through when + using the :attr:`_query.Query.statement` accessor, however. + + .. deprecated:: 2.0 This logic is deprecated and will be removed + in SQLAlchemy 2.0. See :ref:`migration_20_query_distinct` + for a description of this use case in 2.0. + + .. seealso:: + + :meth:`_sql.Select.distinct` - v2 equivalent method. + + :param \*expr: optional column expressions. When present, + the PostgreSQL dialect will render a ``DISTINCT ON ()`` + construct. + + .. deprecated:: 1.4 Using \*expr in other dialects is deprecated + and will raise :class:`_exc.CompileError` in a future version. + + """ + if expr: + self._distinct = True + self._distinct_on = self._distinct_on + tuple( + coercions.expect(roles.ByOfRole, e) for e in expr + ) + else: + self._distinct = True + return self + + def all(self) -> List[_T]: + """Return the results represented by this :class:`_query.Query` + as a list. + + This results in an execution of the underlying SQL statement. + + .. warning:: The :class:`_query.Query` object, + when asked to return either + a sequence or iterator that consists of full ORM-mapped entities, + will **deduplicate entries based on primary key**. See the FAQ for + more details. + + .. seealso:: + + :ref:`faq_query_deduplicating` + + .. seealso:: + + :meth:`_engine.Result.all` - v2 comparable method. + + :meth:`_engine.Result.scalars` - v2 comparable method. + """ + return self._iter().all() # type: ignore + + @_generative + @_assertions(_no_clauseelement_condition) + def from_statement(self, statement: ExecutableReturnsRows) -> Self: + """Execute the given SELECT statement and return results. + + This method bypasses all internal statement compilation, and the + statement is executed without modification. + + The statement is typically either a :func:`_expression.text` + or :func:`_expression.select` construct, and should return the set + of columns + appropriate to the entity class represented by this + :class:`_query.Query`. + + .. seealso:: + + :meth:`_sql.Select.from_statement` - v2 comparable method. + + """ + statement = coercions.expect( + roles.SelectStatementRole, statement, apply_propagate_attrs=self + ) + self._statement = statement + return self + + def first(self) -> Optional[_T]: + """Return the first result of this ``Query`` or + None if the result doesn't contain any row. + + first() applies a limit of one within the generated SQL, so that + only one primary entity row is generated on the server side + (note this may consist of multiple result rows if join-loaded + collections are present). + + Calling :meth:`_query.Query.first` + results in an execution of the underlying + query. + + .. seealso:: + + :meth:`_query.Query.one` + + :meth:`_query.Query.one_or_none` + + :meth:`_engine.Result.first` - v2 comparable method. + + :meth:`_engine.Result.scalars` - v2 comparable method. + + """ + # replicates limit(1) behavior + if self._statement is not None: + return self._iter().first() # type: ignore + else: + return self.limit(1)._iter().first() # type: ignore + + def one_or_none(self) -> Optional[_T]: + """Return at most one result or raise an exception. + + Returns ``None`` if the query selects + no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` + if multiple object identities are returned, or if multiple + rows are returned for a query that returns only scalar values + as opposed to full identity-mapped entities. + + Calling :meth:`_query.Query.one_or_none` + results in an execution of the + underlying query. + + .. seealso:: + + :meth:`_query.Query.first` + + :meth:`_query.Query.one` + + :meth:`_engine.Result.one_or_none` - v2 comparable method. + + :meth:`_engine.Result.scalar_one_or_none` - v2 comparable method. + + """ + return self._iter().one_or_none() # type: ignore + + def one(self) -> _T: + """Return exactly one result or raise an exception. + + Raises :class:`_exc.NoResultFound` if the query selects no rows. + Raises :class:`_exc.MultipleResultsFound` if multiple object identities + are returned, or if multiple rows are returned for a query that returns + only scalar values as opposed to full identity-mapped entities. + + Calling :meth:`.one` results in an execution of the underlying query. + + .. seealso:: + + :meth:`_query.Query.first` + + :meth:`_query.Query.one_or_none` + + :meth:`_engine.Result.one` - v2 comparable method. + + :meth:`_engine.Result.scalar_one` - v2 comparable method. + + """ + return self._iter().one() # type: ignore + + def scalar(self) -> Any: + """Return the first element of the first result or None + if no rows present. If multiple rows are returned, + raises :class:`_exc.MultipleResultsFound`. + + >>> session.query(Item).scalar() + + >>> session.query(Item.id).scalar() + 1 + >>> session.query(Item.id).filter(Item.id < 0).scalar() + None + >>> session.query(Item.id, Item.name).scalar() + 1 + >>> session.query(func.count(Parent.id)).scalar() + 20 + + This results in an execution of the underlying query. + + .. seealso:: + + :meth:`_engine.Result.scalar` - v2 comparable method. + + """ + # TODO: not sure why we can't use result.scalar() here + try: + ret = self.one() + if not isinstance(ret, collections_abc.Sequence): + return ret + return ret[0] + except sa_exc.NoResultFound: + return None + + def __iter__(self) -> Iterator[_T]: + result = self._iter() + try: + yield from result # type: ignore + except GeneratorExit: + # issue #8710 - direct iteration is not re-usable after + # an iterable block is broken, so close the result + result._soft_close() + raise + + def _iter(self) -> Union[ScalarResult[_T], Result[_T]]: + # new style execution. + params = self._params + + statement = self._statement_20() + result: Union[ScalarResult[_T], Result[_T]] = self.session.execute( + statement, + params, + execution_options={"_sa_orm_load_options": self.load_options}, + ) + + # legacy: automatically set scalars, unique + if result._attributes.get("is_single_entity", False): + result = cast("Result[_T]", result).scalars() + + if ( + result._attributes.get("filtered", False) + and not self.load_options._yield_per + ): + result = result.unique() + + return result + + def __str__(self) -> str: + statement = self._statement_20() + + try: + bind = ( + self._get_bind_args(statement, self.session.get_bind) + if self.session + else None + ) + except sa_exc.UnboundExecutionError: + bind = None + + return str(statement.compile(bind)) + + def _get_bind_args(self, statement: Any, fn: Any, **kw: Any) -> Any: + return fn(clause=statement, **kw) + + @property + def column_descriptions(self) -> List[ORMColumnDescription]: + """Return metadata about the columns which would be + returned by this :class:`_query.Query`. + + Format is a list of dictionaries:: + + user_alias = aliased(User, name="user2") + q = sess.query(User, User.id, user_alias) + + # this expression: + q.column_descriptions + + # would return: + [ + { + "name": "User", + "type": User, + "aliased": False, + "expr": User, + "entity": User, + }, + { + "name": "id", + "type": Integer(), + "aliased": False, + "expr": User.id, + "entity": User, + }, + { + "name": "user2", + "type": User, + "aliased": True, + "expr": user_alias, + "entity": user_alias, + }, + ] + + .. seealso:: + + This API is available using :term:`2.0 style` queries as well, + documented at: + + * :ref:`queryguide_inspection` + + * :attr:`.Select.column_descriptions` + + """ + + return _column_descriptions(self, legacy=True) + + @util.deprecated( + "2.0", + "The :meth:`_orm.Query.instances` method is deprecated and will " + "be removed in a future release. " + "Use the Select.from_statement() method or aliased() construct in " + "conjunction with Session.execute() instead.", + ) + def instances( + self, + result_proxy: CursorResult[Any], + context: Optional[QueryContext] = None, + ) -> Any: + """Return an ORM result given a :class:`_engine.CursorResult` and + :class:`.QueryContext`. + + """ + if context is None: + util.warn_deprecated( + "Using the Query.instances() method without a context " + "is deprecated and will be disallowed in a future release. " + "Please make use of :meth:`_query.Query.from_statement` " + "for linking ORM results to arbitrary select constructs.", + version="1.4", + ) + compile_state = self._compile_state(for_statement=False) + + context = QueryContext( + compile_state, + compile_state.statement, + compile_state.statement, + self._params, + self.session, + self.load_options, + ) + + result = loading.instances(result_proxy, context) + + # legacy: automatically set scalars, unique + if result._attributes.get("is_single_entity", False): + result = result.scalars() # type: ignore + + if result._attributes.get("filtered", False): + result = result.unique() + + # TODO: isn't this supposed to be a list? + return result + + @util.became_legacy_20( + ":meth:`_orm.Query.merge_result`", + alternative="The method is superseded by the " + ":func:`_orm.merge_frozen_result` function.", + enable_warnings=False, # warnings occur via loading.merge_result + ) + def merge_result( + self, + iterator: Union[ + FrozenResult[Any], Iterable[Sequence[Any]], Iterable[object] + ], + load: bool = True, + ) -> Union[FrozenResult[Any], Iterable[Any]]: + """Merge a result into this :class:`_query.Query` object's Session. + + Given an iterator returned by a :class:`_query.Query` + of the same structure + as this one, return an identical iterator of results, with all mapped + instances merged into the session using :meth:`.Session.merge`. This + is an optimized method which will merge all mapped instances, + preserving the structure of the result rows and unmapped columns with + less method overhead than that of calling :meth:`.Session.merge` + explicitly for each value. + + The structure of the results is determined based on the column list of + this :class:`_query.Query` - if these do not correspond, + unchecked errors + will occur. + + The 'load' argument is the same as that of :meth:`.Session.merge`. + + For an example of how :meth:`_query.Query.merge_result` is used, see + the source code for the example :ref:`examples_caching`, where + :meth:`_query.Query.merge_result` is used to efficiently restore state + from a cache back into a target :class:`.Session`. + + """ + + return loading.merge_result(self, iterator, load) + + def exists(self) -> Exists: + """A convenience method that turns a query into an EXISTS subquery + of the form EXISTS (SELECT 1 FROM ... WHERE ...). + + e.g.:: + + q = session.query(User).filter(User.name == "fred") + session.query(q.exists()) + + Producing SQL similar to: + + .. sourcecode:: sql + + SELECT EXISTS ( + SELECT 1 FROM users WHERE users.name = :name_1 + ) AS anon_1 + + The EXISTS construct is usually used in the WHERE clause:: + + session.query(User.id).filter(q.exists()).scalar() + + Note that some databases such as SQL Server don't allow an + EXISTS expression to be present in the columns clause of a + SELECT. To select a simple boolean value based on the exists + as a WHERE, use :func:`.literal`:: + + from sqlalchemy import literal + + session.query(literal(True)).filter(q.exists()).scalar() + + .. seealso:: + + :meth:`_sql.Select.exists` - v2 comparable method. + + """ + + # .add_columns() for the case that we are a query().select_from(X), + # so that ".statement" can be produced (#2995) but also without + # omitting the FROM clause from a query(X) (#2818); + # .with_only_columns() after we have a core select() so that + # we get just "SELECT 1" without any entities. + + inner = ( + self.enable_eagerloads(False) + .add_columns(sql.literal_column("1")) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + ._get_select_statement_only() + .with_only_columns(1) + ) + + ezero = self._entity_from_pre_ent_zero() + if ezero is not None: + inner = inner.select_from(ezero) + + return sql.exists(inner) + + def count(self) -> int: + r"""Return a count of rows this the SQL formed by this :class:`Query` + would return. + + This generates the SQL for this Query as follows: + + .. sourcecode:: sql + + SELECT count(1) AS count_1 FROM ( + SELECT + ) AS anon_1 + + The above SQL returns a single row, which is the aggregate value + of the count function; the :meth:`_query.Query.count` + method then returns + that single integer value. + + .. warning:: + + It is important to note that the value returned by + count() is **not the same as the number of ORM objects that this + Query would return from a method such as the .all() method**. + The :class:`_query.Query` object, + when asked to return full entities, + will **deduplicate entries based on primary key**, meaning if the + same primary key value would appear in the results more than once, + only one object of that primary key would be present. This does + not apply to a query that is against individual columns. + + .. seealso:: + + :ref:`faq_query_deduplicating` + + For fine grained control over specific columns to count, to skip the + usage of a subquery or otherwise control of the FROM clause, or to use + other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` + expressions in conjunction with :meth:`~.Session.query`, i.e.:: + + from sqlalchemy import func + + # count User records, without + # using a subquery. + session.query(func.count(User.id)) + + # return count of user "id" grouped + # by "name" + session.query(func.count(User.id)).group_by(User.name) + + from sqlalchemy import distinct + + # count distinct "name" values + session.query(func.count(distinct(User.name))) + + .. seealso:: + + :ref:`migration_20_query_usage` + + """ + col = sql.func.count(sql.literal_column("*")) + return ( # type: ignore + self._legacy_from_self(col).enable_eagerloads(False).scalar() + ) + + def delete( + self, + synchronize_session: SynchronizeSessionArgument = "auto", + delete_args: Optional[Dict[Any, Any]] = None, + ) -> int: + r"""Perform a DELETE with an arbitrary WHERE clause. + + Deletes rows matched by this query from the database. + + E.g.:: + + sess.query(User).filter(User.age == 25).delete(synchronize_session=False) + + sess.query(User).filter(User.age == 25).delete( + synchronize_session="evaluate" + ) + + .. warning:: + + See the section :ref:`orm_expression_update_delete` for important + caveats and warnings, including limitations when using bulk UPDATE + and DELETE with mapper inheritance configurations. + + :param synchronize_session: chooses the strategy to update the + attributes on objects in the session. See the section + :ref:`orm_expression_update_delete` for a discussion of these + strategies. + + :param delete_args: Optional dictionary, if present will be passed + to the underlying :func:`_expression.delete` construct as the ``**kw`` + for the object. May be used to pass dialect-specific arguments such + as ``mysql_limit``. + + .. versionadded:: 2.0.37 + + :return: the count of rows matched as returned by the database's + "row count" feature. + + .. seealso:: + + :ref:`orm_expression_update_delete` + + """ # noqa: E501 + + bulk_del = BulkDelete(self, delete_args) + if self.dispatch.before_compile_delete: + for fn in self.dispatch.before_compile_delete: + new_query = fn(bulk_del.query, bulk_del) + if new_query is not None: + bulk_del.query = new_query + + self = bulk_del.query + + delete_ = sql.delete(*self._raw_columns) # type: ignore + + if delete_args: + delete_ = delete_.with_dialect_options(**delete_args) + + delete_._where_criteria = self._where_criteria + result: CursorResult[Any] = self.session.execute( + delete_, + self._params, + execution_options=self._execution_options.union( + {"synchronize_session": synchronize_session} + ), + ) + bulk_del.result = result # type: ignore + self.session.dispatch.after_bulk_delete(bulk_del) + result.close() + + return result.rowcount + + def update( + self, + values: Dict[_DMLColumnArgument, Any], + synchronize_session: SynchronizeSessionArgument = "auto", + update_args: Optional[Dict[Any, Any]] = None, + ) -> int: + r"""Perform an UPDATE with an arbitrary WHERE clause. + + Updates rows matched by this query in the database. + + E.g.:: + + sess.query(User).filter(User.age == 25).update( + {User.age: User.age - 10}, synchronize_session=False + ) + + sess.query(User).filter(User.age == 25).update( + {"age": User.age - 10}, synchronize_session="evaluate" + ) + + .. warning:: + + See the section :ref:`orm_expression_update_delete` for important + caveats and warnings, including limitations when using arbitrary + UPDATE and DELETE with mapper inheritance configurations. + + :param values: a dictionary with attributes names, or alternatively + mapped attributes or SQL expressions, as keys, and literal + values or sql expressions as values. If :ref:`parameter-ordered + mode ` is desired, the values can + be passed as a list of 2-tuples; this requires that the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag is passed to the :paramref:`.Query.update.update_args` dictionary + as well. + + :param synchronize_session: chooses the strategy to update the + attributes on objects in the session. See the section + :ref:`orm_expression_update_delete` for a discussion of these + strategies. + + :param update_args: Optional dictionary, if present will be passed + to the underlying :func:`_expression.update` construct as the ``**kw`` + for the object. May be used to pass dialect-specific arguments such + as ``mysql_limit``, as well as other special arguments such as + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`. + + :return: the count of rows matched as returned by the database's + "row count" feature. + + + .. seealso:: + + :ref:`orm_expression_update_delete` + + """ + + update_args = update_args or {} + + bulk_ud = BulkUpdate(self, values, update_args) + + if self.dispatch.before_compile_update: + for fn in self.dispatch.before_compile_update: + new_query = fn(bulk_ud.query, bulk_ud) + if new_query is not None: + bulk_ud.query = new_query + self = bulk_ud.query + + upd = sql.update(*self._raw_columns) # type: ignore + + ppo = update_args.pop("preserve_parameter_order", False) + if ppo: + upd = upd.ordered_values(*values) # type: ignore + else: + upd = upd.values(values) + if update_args: + upd = upd.with_dialect_options(**update_args) + + upd._where_criteria = self._where_criteria + result: CursorResult[Any] = self.session.execute( + upd, + self._params, + execution_options=self._execution_options.union( + {"synchronize_session": synchronize_session} + ), + ) + bulk_ud.result = result # type: ignore + self.session.dispatch.after_bulk_update(bulk_ud) + result.close() + return result.rowcount + + def _compile_state( + self, for_statement: bool = False, **kw: Any + ) -> ORMCompileState: + """Create an out-of-compiler ORMCompileState object. + + The ORMCompileState object is normally created directly as a result + of the SQLCompiler.process() method being handed a Select() + or FromStatement() object that uses the "orm" plugin. This method + provides a means of creating this ORMCompileState object directly + without using the compiler. + + This method is used only for deprecated cases, which include + the .from_self() method for a Query that has multiple levels + of .from_self() in use, as well as the instances() method. It is + also used within the test suite to generate ORMCompileState objects + for test purposes. + + """ + + stmt = self._statement_20(for_statement=for_statement, **kw) + assert for_statement == stmt._compile_options._for_statement + + # this chooses between ORMFromStatementCompileState and + # ORMSelectCompileState. We could also base this on + # query._statement is not None as we have the ORM Query here + # however this is the more general path. + compile_state_cls = cast( + ORMCompileState, + ORMCompileState._get_plugin_class_for_plugin(stmt, "orm"), + ) + + return compile_state_cls._create_orm_context( + stmt, toplevel=True, compiler=None + ) + + def _compile_context(self, for_statement: bool = False) -> QueryContext: + compile_state = self._compile_state(for_statement=for_statement) + context = QueryContext( + compile_state, + compile_state.statement, + compile_state.statement, + self._params, + self.session, + self.load_options, + ) + + return context + + +class AliasOption(interfaces.LoaderOption): + inherit_cache = False + + @util.deprecated( + "1.4", + "The :class:`.AliasOption` object is not necessary " + "for entities to be matched up to a query that is established " + "via :meth:`.Query.from_statement` and now does nothing.", + ) + def __init__(self, alias: Union[Alias, Subquery]): + r"""Return a :class:`.MapperOption` that will indicate to the + :class:`_query.Query` + that the main table has been aliased. + + """ + + def process_compile_state(self, compile_state: ORMCompileState) -> None: + pass + + +class BulkUD: + """State used for the orm.Query version of update() / delete(). + + This object is now specific to Query only. + + """ + + def __init__(self, query: Query[Any]): + self.query = query.enable_eagerloads(False) + self._validate_query_state() + self.mapper = self.query._entity_from_pre_ent_zero() + + def _validate_query_state(self) -> None: + for attr, methname, notset, op in ( + ("_limit_clause", "limit()", None, operator.is_), + ("_offset_clause", "offset()", None, operator.is_), + ("_order_by_clauses", "order_by()", (), operator.eq), + ("_group_by_clauses", "group_by()", (), operator.eq), + ("_distinct", "distinct()", False, operator.is_), + ( + "_from_obj", + "join(), outerjoin(), select_from(), or from_self()", + (), + operator.eq, + ), + ( + "_setup_joins", + "join(), outerjoin(), select_from(), or from_self()", + (), + operator.eq, + ), + ): + if not op(getattr(self.query, attr), notset): + raise sa_exc.InvalidRequestError( + "Can't call Query.update() or Query.delete() " + "when %s has been called" % (methname,) + ) + + @property + def session(self) -> Session: + return self.query.session + + +class BulkUpdate(BulkUD): + """BulkUD which handles UPDATEs.""" + + def __init__( + self, + query: Query[Any], + values: Dict[_DMLColumnArgument, Any], + update_kwargs: Optional[Dict[Any, Any]], + ): + super().__init__(query) + self.values = values + self.update_kwargs = update_kwargs + + +class BulkDelete(BulkUD): + """BulkUD which handles DELETEs.""" + + def __init__( + self, + query: Query[Any], + delete_kwargs: Optional[Dict[Any, Any]], + ): + super().__init__(query) + self.delete_kwargs = delete_kwargs + + +class RowReturningQuery(Query[Row[_TP]]): + if TYPE_CHECKING: + + def tuples(self) -> Query[_TP]: # type: ignore + ... diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/relationships.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/relationships.py new file mode 100644 index 0000000000000000000000000000000000000000..eae00338f10db62b2d76b0a47ad1de2be4537812 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/relationships.py @@ -0,0 +1,3509 @@ +# orm/relationships.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Heuristics related to join conditions as used in +:func:`_orm.relationship`. + +Provides the :class:`.JoinCondition` object, which encapsulates +SQL annotation and aliasing behavior focused on the `primaryjoin` +and `secondaryjoin` aspects of :func:`_orm.relationship`. + +""" +from __future__ import annotations + +import collections +from collections import abc +import dataclasses +import inspect as _py_inspect +import itertools +import re +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Collection +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NamedTuple +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import strategy_options +from ._typing import insp_is_aliased_class +from ._typing import is_has_collection_adapter +from .base import _DeclarativeMapped +from .base import _is_mapped_class +from .base import class_mapper +from .base import DynamicMapped +from .base import LoaderCallableStatus +from .base import PassiveFlag +from .base import state_str +from .base import WriteOnlyMapped +from .interfaces import _AttributeOptions +from .interfaces import _IntrospectsAnnotations +from .interfaces import MANYTOMANY +from .interfaces import MANYTOONE +from .interfaces import ONETOMANY +from .interfaces import PropComparator +from .interfaces import RelationshipDirection +from .interfaces import StrategizedProperty +from .util import _orm_annotate +from .util import _orm_deannotate +from .util import CascadeOptions +from .. import exc as sa_exc +from .. import Exists +from .. import log +from .. import schema +from .. import sql +from .. import util +from ..inspection import inspect +from ..sql import coercions +from ..sql import expression +from ..sql import operators +from ..sql import roles +from ..sql import visitors +from ..sql._typing import _ColumnExpressionArgument +from ..sql._typing import _HasClauseElement +from ..sql.annotation import _safe_annotate +from ..sql.elements import ColumnClause +from ..sql.elements import ColumnElement +from ..sql.util import _deep_annotate +from ..sql.util import _deep_deannotate +from ..sql.util import _shallow_annotate +from ..sql.util import adapt_criterion_to_null +from ..sql.util import ClauseAdapter +from ..sql.util import join_condition +from ..sql.util import selectables_overlap +from ..sql.util import visit_binary_product +from ..util.typing import de_optionalize_union_types +from ..util.typing import Literal +from ..util.typing import resolve_name_to_real_class_name + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _ExternalEntityType + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import _InternalEntityType + from ._typing import _O + from ._typing import _RegistryType + from .base import Mapped + from .clsregistry import _class_resolver + from .clsregistry import _ModNS + from .decl_base import _ClassScanMapperConfig + from .dependency import DependencyProcessor + from .mapper import Mapper + from .query import Query + from .session import Session + from .state import InstanceState + from .strategies import LazyLoader + from .util import AliasedClass + from .util import AliasedInsp + from ..sql._typing import _CoreAdapterProto + from ..sql._typing import _EquivalentColumnMap + from ..sql._typing import _InfoType + from ..sql.annotation import _AnnotationDict + from ..sql.annotation import SupportsAnnotations + from ..sql.elements import BinaryExpression + from ..sql.elements import BindParameter + from ..sql.elements import ClauseElement + from ..sql.schema import Table + from ..sql.selectable import FromClause + from ..util.typing import _AnnotationScanType + from ..util.typing import RODescriptorReference + +_T = TypeVar("_T", bound=Any) +_T1 = TypeVar("_T1", bound=Any) +_T2 = TypeVar("_T2", bound=Any) + +_PT = TypeVar("_PT", bound=Any) + +_PT2 = TypeVar("_PT2", bound=Any) + + +_RelationshipArgumentType = Union[ + str, + Type[_T], + Callable[[], Type[_T]], + "Mapper[_T]", + "AliasedClass[_T]", + Callable[[], "Mapper[_T]"], + Callable[[], "AliasedClass[_T]"], +] + +_LazyLoadArgumentType = Literal[ + "select", + "joined", + "selectin", + "subquery", + "raise", + "raise_on_sql", + "noload", + "immediate", + "write_only", + "dynamic", + True, + False, + None, +] + + +_RelationshipJoinConditionArgument = Union[ + str, _ColumnExpressionArgument[bool] +] +_RelationshipSecondaryArgument = Union[ + "FromClause", str, Callable[[], "FromClause"] +] +_ORMOrderByArgument = Union[ + Literal[False], + str, + _ColumnExpressionArgument[Any], + Callable[[], _ColumnExpressionArgument[Any]], + Callable[[], Iterable[_ColumnExpressionArgument[Any]]], + Iterable[Union[str, _ColumnExpressionArgument[Any]]], +] +ORMBackrefArgument = Union[str, Tuple[str, Dict[str, Any]]] + +_ORMColCollectionElement = Union[ + ColumnClause[Any], + _HasClauseElement[Any], + roles.DMLColumnRole, + "Mapped[Any]", +] +_ORMColCollectionArgument = Union[ + str, + Sequence[_ORMColCollectionElement], + Callable[[], Sequence[_ORMColCollectionElement]], + Callable[[], _ORMColCollectionElement], + _ORMColCollectionElement, +] + + +_CEA = TypeVar("_CEA", bound=_ColumnExpressionArgument[Any]) + +_CE = TypeVar("_CE", bound="ColumnElement[Any]") + + +_ColumnPairIterable = Iterable[Tuple[ColumnElement[Any], ColumnElement[Any]]] + +_ColumnPairs = Sequence[Tuple[ColumnElement[Any], ColumnElement[Any]]] + +_MutableColumnPairs = List[Tuple[ColumnElement[Any], ColumnElement[Any]]] + + +def remote(expr: _CEA) -> _CEA: + """Annotate a portion of a primaryjoin expression + with a 'remote' annotation. + + See the section :ref:`relationship_custom_foreign` for a + description of use. + + .. seealso:: + + :ref:`relationship_custom_foreign` + + :func:`.foreign` + + """ + return _annotate_columns( # type: ignore + coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True} + ) + + +def foreign(expr: _CEA) -> _CEA: + """Annotate a portion of a primaryjoin expression + with a 'foreign' annotation. + + See the section :ref:`relationship_custom_foreign` for a + description of use. + + .. seealso:: + + :ref:`relationship_custom_foreign` + + :func:`.remote` + + """ + + return _annotate_columns( # type: ignore + coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True} + ) + + +@dataclasses.dataclass +class _RelationshipArg(Generic[_T1, _T2]): + """stores a user-defined parameter value that must be resolved and + parsed later at mapper configuration time. + + """ + + __slots__ = "name", "argument", "resolved" + name: str + argument: _T1 + resolved: Optional[_T2] + + def _is_populated(self) -> bool: + return self.argument is not None + + def _resolve_against_registry( + self, clsregistry_resolver: Callable[[str, bool], _class_resolver] + ) -> None: + attr_value = self.argument + + if isinstance(attr_value, str): + self.resolved = clsregistry_resolver( + attr_value, self.name == "secondary" + )() + elif callable(attr_value) and not _is_mapped_class(attr_value): + self.resolved = attr_value() + else: + self.resolved = attr_value + + +_RelationshipOrderByArg = Union[Literal[False], Tuple[ColumnElement[Any], ...]] + + +class _RelationshipArgs(NamedTuple): + """stores user-passed parameters that are resolved at mapper configuration + time. + + """ + + secondary: _RelationshipArg[ + Optional[_RelationshipSecondaryArgument], + Optional[FromClause], + ] + primaryjoin: _RelationshipArg[ + Optional[_RelationshipJoinConditionArgument], + Optional[ColumnElement[Any]], + ] + secondaryjoin: _RelationshipArg[ + Optional[_RelationshipJoinConditionArgument], + Optional[ColumnElement[Any]], + ] + order_by: _RelationshipArg[_ORMOrderByArgument, _RelationshipOrderByArg] + foreign_keys: _RelationshipArg[ + Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]] + ] + remote_side: _RelationshipArg[ + Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]] + ] + + +@log.class_logger +class RelationshipProperty( + _IntrospectsAnnotations, StrategizedProperty[_T], log.Identified +): + """Describes an object property that holds a single item or list + of items that correspond to a related database table. + + Public constructor is the :func:`_orm.relationship` function. + + .. seealso:: + + :ref:`relationship_config_toplevel` + + """ + + strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN + inherit_cache = True + """:meta private:""" + + _links_to_entity = True + _is_relationship = True + + _overlaps: Sequence[str] + + _lazy_strategy: LazyLoader + + _persistence_only = dict( + passive_deletes=False, + passive_updates=True, + enable_typechecks=True, + active_history=False, + cascade_backrefs=False, + ) + + _dependency_processor: Optional[DependencyProcessor] = None + + primaryjoin: ColumnElement[bool] + secondaryjoin: Optional[ColumnElement[bool]] + secondary: Optional[FromClause] + _join_condition: JoinCondition + order_by: _RelationshipOrderByArg + + _user_defined_foreign_keys: Set[ColumnElement[Any]] + _calculated_foreign_keys: Set[ColumnElement[Any]] + + remote_side: Set[ColumnElement[Any]] + local_columns: Set[ColumnElement[Any]] + + synchronize_pairs: _ColumnPairs + secondary_synchronize_pairs: Optional[_ColumnPairs] + + local_remote_pairs: Optional[_ColumnPairs] + + direction: RelationshipDirection + + _init_args: _RelationshipArgs + + def __init__( + self, + argument: Optional[_RelationshipArgumentType[_T]] = None, + secondary: Optional[_RelationshipSecondaryArgument] = None, + *, + uselist: Optional[bool] = None, + collection_class: Optional[ + Union[Type[Collection[Any]], Callable[[], Collection[Any]]] + ] = None, + primaryjoin: Optional[_RelationshipJoinConditionArgument] = None, + secondaryjoin: Optional[_RelationshipJoinConditionArgument] = None, + back_populates: Optional[str] = None, + order_by: _ORMOrderByArgument = False, + backref: Optional[ORMBackrefArgument] = None, + overlaps: Optional[str] = None, + post_update: bool = False, + cascade: str = "save-update, merge", + viewonly: bool = False, + attribute_options: Optional[_AttributeOptions] = None, + lazy: _LazyLoadArgumentType = "select", + passive_deletes: Union[Literal["all"], bool] = False, + passive_updates: bool = True, + active_history: bool = False, + enable_typechecks: bool = True, + foreign_keys: Optional[_ORMColCollectionArgument] = None, + remote_side: Optional[_ORMColCollectionArgument] = None, + join_depth: Optional[int] = None, + comparator_factory: Optional[ + Type[RelationshipProperty.Comparator[Any]] + ] = None, + single_parent: bool = False, + innerjoin: bool = False, + distinct_target_key: Optional[bool] = None, + load_on_pending: bool = False, + query_class: Optional[Type[Query[Any]]] = None, + info: Optional[_InfoType] = None, + omit_join: Literal[None, False] = None, + sync_backref: Optional[bool] = None, + doc: Optional[str] = None, + bake_queries: Literal[True] = True, + cascade_backrefs: Literal[False] = False, + _local_remote_pairs: Optional[_ColumnPairs] = None, + _legacy_inactive_history_style: bool = False, + ): + super().__init__(attribute_options=attribute_options) + + self.uselist = uselist + self.argument = argument + + self._init_args = _RelationshipArgs( + _RelationshipArg("secondary", secondary, None), + _RelationshipArg("primaryjoin", primaryjoin, None), + _RelationshipArg("secondaryjoin", secondaryjoin, None), + _RelationshipArg("order_by", order_by, None), + _RelationshipArg("foreign_keys", foreign_keys, None), + _RelationshipArg("remote_side", remote_side, None), + ) + + self.post_update = post_update + self.viewonly = viewonly + if viewonly: + self._warn_for_persistence_only_flags( + passive_deletes=passive_deletes, + passive_updates=passive_updates, + enable_typechecks=enable_typechecks, + active_history=active_history, + cascade_backrefs=cascade_backrefs, + ) + if viewonly and sync_backref: + raise sa_exc.ArgumentError( + "sync_backref and viewonly cannot both be True" + ) + self.sync_backref = sync_backref + self.lazy = lazy + self.single_parent = single_parent + self.collection_class = collection_class + self.passive_deletes = passive_deletes + + if cascade_backrefs: + raise sa_exc.ArgumentError( + "The 'cascade_backrefs' parameter passed to " + "relationship() may only be set to False." + ) + + self.passive_updates = passive_updates + self.enable_typechecks = enable_typechecks + self.query_class = query_class + self.innerjoin = innerjoin + self.distinct_target_key = distinct_target_key + self.doc = doc + self.active_history = active_history + self._legacy_inactive_history_style = _legacy_inactive_history_style + + self.join_depth = join_depth + if omit_join: + util.warn( + "setting omit_join to True is not supported; selectin " + "loading of this relationship may not work correctly if this " + "flag is set explicitly. omit_join optimization is " + "automatically detected for conditions under which it is " + "supported." + ) + + self.omit_join = omit_join + self.local_remote_pairs = _local_remote_pairs + self.load_on_pending = load_on_pending + self.comparator_factory = ( + comparator_factory or RelationshipProperty.Comparator + ) + util.set_creation_order(self) + + if info is not None: + self.info.update(info) + + self.strategy_key = (("lazy", self.lazy),) + + self._reverse_property: Set[RelationshipProperty[Any]] = set() + + if overlaps: + self._overlaps = set(re.split(r"\s*,\s*", overlaps)) # type: ignore # noqa: E501 + else: + self._overlaps = () + + # mypy ignoring the @property setter + self.cascade = cascade # type: ignore + + self.back_populates = back_populates + + if self.back_populates: + if backref: + raise sa_exc.ArgumentError( + "backref and back_populates keyword arguments " + "are mutually exclusive" + ) + self.backref = None + else: + self.backref = backref + + def _warn_for_persistence_only_flags(self, **kw: Any) -> None: + for k, v in kw.items(): + if v != self._persistence_only[k]: + # we are warning here rather than warn deprecated as this is a + # configuration mistake, and Python shows regular warnings more + # aggressively than deprecation warnings by default. Unlike the + # case of setting viewonly with cascade, the settings being + # warned about here are not actively doing the wrong thing + # against viewonly=True, so it is not as urgent to have these + # raise an error. + util.warn( + "Setting %s on relationship() while also " + "setting viewonly=True does not make sense, as a " + "viewonly=True relationship does not perform persistence " + "operations. This configuration may raise an error " + "in a future release." % (k,) + ) + + def instrument_class(self, mapper: Mapper[Any]) -> None: + attributes.register_descriptor( + mapper.class_, + self.key, + comparator=self.comparator_factory(self, mapper), + parententity=mapper, + doc=self.doc, + ) + + class Comparator(util.MemoizedSlots, PropComparator[_PT]): + """Produce boolean, comparison, and other operators for + :class:`.RelationshipProperty` attributes. + + See the documentation for :class:`.PropComparator` for a brief + overview of ORM level operator definition. + + .. seealso:: + + :class:`.PropComparator` + + :class:`.ColumnProperty.Comparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + __slots__ = ( + "entity", + "mapper", + "property", + "_of_type", + "_extra_criteria", + ) + + prop: RODescriptorReference[RelationshipProperty[_PT]] + _of_type: Optional[_EntityType[_PT]] + + def __init__( + self, + prop: RelationshipProperty[_PT], + parentmapper: _InternalEntityType[Any], + adapt_to_entity: Optional[AliasedInsp[Any]] = None, + of_type: Optional[_EntityType[_PT]] = None, + extra_criteria: Tuple[ColumnElement[bool], ...] = (), + ): + """Construction of :class:`.RelationshipProperty.Comparator` + is internal to the ORM's attribute mechanics. + + """ + self.prop = prop + self._parententity = parentmapper + self._adapt_to_entity = adapt_to_entity + if of_type: + self._of_type = of_type + else: + self._of_type = None + self._extra_criteria = extra_criteria + + def adapt_to_entity( + self, adapt_to_entity: AliasedInsp[Any] + ) -> RelationshipProperty.Comparator[Any]: + return self.__class__( + self.prop, + self._parententity, + adapt_to_entity=adapt_to_entity, + of_type=self._of_type, + ) + + entity: _InternalEntityType[_PT] + """The target entity referred to by this + :class:`.RelationshipProperty.Comparator`. + + This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp` + object. + + This is the "target" or "remote" side of the + :func:`_orm.relationship`. + + """ + + mapper: Mapper[_PT] + """The target :class:`_orm.Mapper` referred to by this + :class:`.RelationshipProperty.Comparator`. + + This is the "target" or "remote" side of the + :func:`_orm.relationship`. + + """ + + def _memoized_attr_entity(self) -> _InternalEntityType[_PT]: + if self._of_type: + return inspect(self._of_type) # type: ignore + else: + return self.prop.entity + + def _memoized_attr_mapper(self) -> Mapper[_PT]: + return self.entity.mapper + + def _source_selectable(self) -> FromClause: + if self._adapt_to_entity: + return self._adapt_to_entity.selectable + else: + return self.property.parent._with_polymorphic_selectable + + def __clause_element__(self) -> ColumnElement[bool]: + adapt_from = self._source_selectable() + if self._of_type: + of_type_entity = inspect(self._of_type) + else: + of_type_entity = None + + ( + pj, + sj, + source, + dest, + secondary, + target_adapter, + ) = self.prop._create_joins( + source_selectable=adapt_from, + source_polymorphic=True, + of_type_entity=of_type_entity, + alias_secondary=True, + extra_criteria=self._extra_criteria, + ) + if sj is not None: + return pj & sj + else: + return pj + + def of_type(self, class_: _EntityType[Any]) -> PropComparator[_PT]: + r"""Redefine this object in terms of a polymorphic subclass. + + See :meth:`.PropComparator.of_type` for an example. + + + """ + return RelationshipProperty.Comparator( + self.prop, + self._parententity, + adapt_to_entity=self._adapt_to_entity, + of_type=class_, + extra_criteria=self._extra_criteria, + ) + + def and_( + self, *criteria: _ColumnExpressionArgument[bool] + ) -> PropComparator[Any]: + """Add AND criteria. + + See :meth:`.PropComparator.and_` for an example. + + .. versionadded:: 1.4 + + """ + exprs = tuple( + coercions.expect(roles.WhereHavingRole, clause) + for clause in util.coerce_generator_arg(criteria) + ) + + return RelationshipProperty.Comparator( + self.prop, + self._parententity, + adapt_to_entity=self._adapt_to_entity, + of_type=self._of_type, + extra_criteria=self._extra_criteria + exprs, + ) + + def in_(self, other: Any) -> NoReturn: + """Produce an IN clause - this is not implemented + for :func:`_orm.relationship`-based attributes at this time. + + """ + raise NotImplementedError( + "in_() not yet supported for " + "relationships. For a simple " + "many-to-one, use in_() against " + "the set of foreign key values." + ) + + # https://github.com/python/mypy/issues/4266 + __hash__ = None # type: ignore + + def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + """Implement the ``==`` operator. + + In a many-to-one context, such as: + + .. sourcecode:: text + + MyClass.some_prop == + + this will typically produce a + clause such as: + + .. sourcecode:: text + + mytable.related_id == + + Where ```` is the primary key of the given + object. + + The ``==`` operator provides partial functionality for non- + many-to-one comparisons: + + * Comparisons against collections are not supported. + Use :meth:`~.Relationship.Comparator.contains`. + * Compared to a scalar one-to-many, will produce a + clause that compares the target columns in the parent to + the given target. + * Compared to a scalar many-to-many, an alias + of the association table will be rendered as + well, forming a natural join that is part of the + main body of the query. This will not work for + queries that go beyond simple AND conjunctions of + comparisons, such as those which use OR. Use + explicit joins, outerjoins, or + :meth:`~.Relationship.Comparator.has` for + more comprehensive non-many-to-one scalar + membership tests. + * Comparisons against ``None`` given in a one-to-many + or many-to-many context produce a NOT EXISTS clause. + + """ + if other is None or isinstance(other, expression.Null): + if self.property.direction in [ONETOMANY, MANYTOMANY]: + return ~self._criterion_exists() + else: + return _orm_annotate( + self.property._optimized_compare( + None, adapt_source=self.adapter + ) + ) + elif self.property.uselist: + raise sa_exc.InvalidRequestError( + "Can't compare a collection to an object or collection; " + "use contains() to test for membership." + ) + else: + return _orm_annotate( + self.property._optimized_compare( + other, adapt_source=self.adapter + ) + ) + + def _criterion_exists( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> Exists: + where_criteria = ( + coercions.expect(roles.WhereHavingRole, criterion) + if criterion is not None + else None + ) + + if getattr(self, "_of_type", None): + info: Optional[_InternalEntityType[Any]] = inspect( + self._of_type + ) + assert info is not None + target_mapper, to_selectable, is_aliased_class = ( + info.mapper, + info.selectable, + info.is_aliased_class, + ) + if self.property._is_self_referential and not is_aliased_class: + to_selectable = to_selectable._anonymous_fromclause() + + single_crit = target_mapper._single_table_criterion + if single_crit is not None: + if where_criteria is not None: + where_criteria = single_crit & where_criteria + else: + where_criteria = single_crit + else: + is_aliased_class = False + to_selectable = None + + if self.adapter: + source_selectable = self._source_selectable() + else: + source_selectable = None + + ( + pj, + sj, + source, + dest, + secondary, + target_adapter, + ) = self.property._create_joins( + dest_selectable=to_selectable, + source_selectable=source_selectable, + ) + + for k in kwargs: + crit = getattr(self.property.mapper.class_, k) == kwargs[k] + if where_criteria is None: + where_criteria = crit + else: + where_criteria = where_criteria & crit + + # annotate the *local* side of the join condition, in the case + # of pj + sj this is the full primaryjoin, in the case of just + # pj its the local side of the primaryjoin. + if sj is not None: + j = _orm_annotate(pj) & sj + else: + j = _orm_annotate(pj, exclude=self.property.remote_side) + + if ( + where_criteria is not None + and target_adapter + and not is_aliased_class + ): + # limit this adapter to annotated only? + where_criteria = target_adapter.traverse(where_criteria) + + # only have the "joined left side" of what we + # return be subject to Query adaption. The right + # side of it is used for an exists() subquery and + # should not correlate or otherwise reach out + # to anything in the enclosing query. + if where_criteria is not None: + where_criteria = where_criteria._annotate( + {"no_replacement_traverse": True} + ) + + crit = j & sql.True_._ifnone(where_criteria) + + if secondary is not None: + ex = ( + sql.exists(1) + .where(crit) + .select_from(dest, secondary) + .correlate_except(dest, secondary) + ) + else: + ex = ( + sql.exists(1) + .where(crit) + .select_from(dest) + .correlate_except(dest) + ) + return ex + + def any( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + """Produce an expression that tests a collection against + particular criterion, using EXISTS. + + An expression like:: + + session.query(MyClass).filter( + MyClass.somereference.any(SomeRelated.x == 2) + ) + + Will produce a query like: + + .. sourcecode:: sql + + SELECT * FROM my_table WHERE + EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id + AND related.x=2) + + Because :meth:`~.Relationship.Comparator.any` uses + a correlated subquery, its performance is not nearly as + good when compared against large target tables as that of + using a join. + + :meth:`~.Relationship.Comparator.any` is particularly + useful for testing for empty collections:: + + session.query(MyClass).filter(~MyClass.somereference.any()) + + will produce: + + .. sourcecode:: sql + + SELECT * FROM my_table WHERE + NOT (EXISTS (SELECT 1 FROM related WHERE + related.my_id=my_table.id)) + + :meth:`~.Relationship.Comparator.any` is only + valid for collections, i.e. a :func:`_orm.relationship` + that has ``uselist=True``. For scalar references, + use :meth:`~.Relationship.Comparator.has`. + + """ + if not self.property.uselist: + raise sa_exc.InvalidRequestError( + "'any()' not implemented for scalar " + "attributes. Use has()." + ) + + return self._criterion_exists(criterion, **kwargs) + + def has( + self, + criterion: Optional[_ColumnExpressionArgument[bool]] = None, + **kwargs: Any, + ) -> ColumnElement[bool]: + """Produce an expression that tests a scalar reference against + particular criterion, using EXISTS. + + An expression like:: + + session.query(MyClass).filter( + MyClass.somereference.has(SomeRelated.x == 2) + ) + + Will produce a query like: + + .. sourcecode:: sql + + SELECT * FROM my_table WHERE + EXISTS (SELECT 1 FROM related WHERE + related.id==my_table.related_id AND related.x=2) + + Because :meth:`~.Relationship.Comparator.has` uses + a correlated subquery, its performance is not nearly as + good when compared against large target tables as that of + using a join. + + :meth:`~.Relationship.Comparator.has` is only + valid for scalar references, i.e. a :func:`_orm.relationship` + that has ``uselist=False``. For collection references, + use :meth:`~.Relationship.Comparator.any`. + + """ + if self.property.uselist: + raise sa_exc.InvalidRequestError( + "'has()' not implemented for collections. Use any()." + ) + return self._criterion_exists(criterion, **kwargs) + + def contains( + self, other: _ColumnExpressionArgument[Any], **kwargs: Any + ) -> ColumnElement[bool]: + """Return a simple expression that tests a collection for + containment of a particular item. + + :meth:`~.Relationship.Comparator.contains` is + only valid for a collection, i.e. a + :func:`_orm.relationship` that implements + one-to-many or many-to-many with ``uselist=True``. + + When used in a simple one-to-many context, an + expression like:: + + MyClass.contains(other) + + Produces a clause like: + + .. sourcecode:: sql + + mytable.id == + + Where ```` is the value of the foreign key + attribute on ``other`` which refers to the primary + key of its parent object. From this it follows that + :meth:`~.Relationship.Comparator.contains` is + very useful when used with simple one-to-many + operations. + + For many-to-many operations, the behavior of + :meth:`~.Relationship.Comparator.contains` + has more caveats. The association table will be + rendered in the statement, producing an "implicit" + join, that is, includes multiple tables in the FROM + clause which are equated in the WHERE clause:: + + query(MyClass).filter(MyClass.contains(other)) + + Produces a query like: + + .. sourcecode:: sql + + SELECT * FROM my_table, my_association_table AS + my_association_table_1 WHERE + my_table.id = my_association_table_1.parent_id + AND my_association_table_1.child_id = + + Where ```` would be the primary key of + ``other``. From the above, it is clear that + :meth:`~.Relationship.Comparator.contains` + will **not** work with many-to-many collections when + used in queries that move beyond simple AND + conjunctions, such as multiple + :meth:`~.Relationship.Comparator.contains` + expressions joined by OR. In such cases subqueries or + explicit "outer joins" will need to be used instead. + See :meth:`~.Relationship.Comparator.any` for + a less-performant alternative using EXISTS, or refer + to :meth:`_query.Query.outerjoin` + as well as :ref:`orm_queryguide_joins` + for more details on constructing outer joins. + + kwargs may be ignored by this operator but are required for API + conformance. + """ + if not self.prop.uselist: + raise sa_exc.InvalidRequestError( + "'contains' not implemented for scalar " + "attributes. Use ==" + ) + + clause = self.prop._optimized_compare( + other, adapt_source=self.adapter + ) + + if self.prop.secondaryjoin is not None: + clause.negation_clause = self.__negated_contains_or_equals( + other + ) + + return clause + + def __negated_contains_or_equals( + self, other: Any + ) -> ColumnElement[bool]: + if self.prop.direction == MANYTOONE: + state = attributes.instance_state(other) + + def state_bindparam( + local_col: ColumnElement[Any], + state: InstanceState[Any], + remote_col: ColumnElement[Any], + ) -> BindParameter[Any]: + dict_ = state.dict + return sql.bindparam( + local_col.key, + type_=local_col.type, + unique=True, + callable_=self.prop._get_attr_w_warn_on_none( + self.prop.mapper, state, dict_, remote_col + ), + ) + + def adapt(col: _CE) -> _CE: + if self.adapter: + return self.adapter(col) + else: + return col + + if self.property._use_get: + return sql.and_( + *[ + sql.or_( + adapt(x) + != state_bindparam(adapt(x), state, y), + adapt(x) == None, + ) + for (x, y) in self.property.local_remote_pairs + ] + ) + + criterion = sql.and_( + *[ + x == y + for (x, y) in zip( + self.property.mapper.primary_key, + self.property.mapper.primary_key_from_instance(other), + ) + ] + ) + + return ~self._criterion_exists(criterion) + + def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + """Implement the ``!=`` operator. + + In a many-to-one context, such as: + + .. sourcecode:: text + + MyClass.some_prop != + + This will typically produce a clause such as: + + .. sourcecode:: sql + + mytable.related_id != + + Where ```` is the primary key of the + given object. + + The ``!=`` operator provides partial functionality for non- + many-to-one comparisons: + + * Comparisons against collections are not supported. + Use + :meth:`~.Relationship.Comparator.contains` + in conjunction with :func:`_expression.not_`. + * Compared to a scalar one-to-many, will produce a + clause that compares the target columns in the parent to + the given target. + * Compared to a scalar many-to-many, an alias + of the association table will be rendered as + well, forming a natural join that is part of the + main body of the query. This will not work for + queries that go beyond simple AND conjunctions of + comparisons, such as those which use OR. Use + explicit joins, outerjoins, or + :meth:`~.Relationship.Comparator.has` in + conjunction with :func:`_expression.not_` for + more comprehensive non-many-to-one scalar + membership tests. + * Comparisons against ``None`` given in a one-to-many + or many-to-many context produce an EXISTS clause. + + """ + if other is None or isinstance(other, expression.Null): + if self.property.direction == MANYTOONE: + return _orm_annotate( + ~self.property._optimized_compare( + None, adapt_source=self.adapter + ) + ) + + else: + return self._criterion_exists() + elif self.property.uselist: + raise sa_exc.InvalidRequestError( + "Can't compare a collection" + " to an object or collection; use " + "contains() to test for membership." + ) + else: + return _orm_annotate(self.__negated_contains_or_equals(other)) + + def _memoized_attr_property(self) -> RelationshipProperty[_PT]: + self.prop.parent._check_configure() + return self.prop + + def _with_parent( + self, + instance: object, + alias_secondary: bool = True, + from_entity: Optional[_EntityType[Any]] = None, + ) -> ColumnElement[bool]: + assert instance is not None + adapt_source: Optional[_CoreAdapterProto] = None + if from_entity is not None: + insp: Optional[_InternalEntityType[Any]] = inspect(from_entity) + assert insp is not None + if insp_is_aliased_class(insp): + adapt_source = insp._adapter.adapt_clause + return self._optimized_compare( + instance, + value_is_parent=True, + adapt_source=adapt_source, + alias_secondary=alias_secondary, + ) + + def _optimized_compare( + self, + state: Any, + value_is_parent: bool = False, + adapt_source: Optional[_CoreAdapterProto] = None, + alias_secondary: bool = True, + ) -> ColumnElement[bool]: + if state is not None: + try: + state = inspect(state) + except sa_exc.NoInspectionAvailable: + state = None + + if state is None or not getattr(state, "is_instance", False): + raise sa_exc.ArgumentError( + "Mapped instance expected for relationship " + "comparison to object. Classes, queries and other " + "SQL elements are not accepted in this context; for " + "comparison with a subquery, " + "use %s.has(**criteria)." % self + ) + reverse_direction = not value_is_parent + + if state is None: + return self._lazy_none_clause( + reverse_direction, adapt_source=adapt_source + ) + + if not reverse_direction: + criterion, bind_to_col = ( + self._lazy_strategy._lazywhere, + self._lazy_strategy._bind_to_col, + ) + else: + criterion, bind_to_col = ( + self._lazy_strategy._rev_lazywhere, + self._lazy_strategy._rev_bind_to_col, + ) + + if reverse_direction: + mapper = self.mapper + else: + mapper = self.parent + + dict_ = attributes.instance_dict(state.obj()) + + def visit_bindparam(bindparam: BindParameter[Any]) -> None: + if bindparam._identifying_key in bind_to_col: + bindparam.callable = self._get_attr_w_warn_on_none( + mapper, + state, + dict_, + bind_to_col[bindparam._identifying_key], + ) + + if self.secondary is not None and alias_secondary: + criterion = ClauseAdapter( + self.secondary._anonymous_fromclause() + ).traverse(criterion) + + criterion = visitors.cloned_traverse( + criterion, {}, {"bindparam": visit_bindparam} + ) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion + + def _get_attr_w_warn_on_none( + self, + mapper: Mapper[Any], + state: InstanceState[Any], + dict_: _InstanceDict, + column: ColumnElement[Any], + ) -> Callable[[], Any]: + """Create the callable that is used in a many-to-one expression. + + E.g.:: + + u1 = s.query(User).get(5) + + expr = Address.user == u1 + + Above, the SQL should be "address.user_id = 5". The callable + returned by this method produces the value "5" based on the identity + of ``u1``. + + """ + + # in this callable, we're trying to thread the needle through + # a wide variety of scenarios, including: + # + # * the object hasn't been flushed yet and there's no value for + # the attribute as of yet + # + # * the object hasn't been flushed yet but it has a user-defined + # value + # + # * the object has a value but it's expired and not locally present + # + # * the object has a value but it's expired and not locally present, + # and the object is also detached + # + # * The object hadn't been flushed yet, there was no value, but + # later, the object has been expired and detached, and *now* + # they're trying to evaluate it + # + # * the object had a value, but it was changed to a new value, and + # then expired + # + # * the object had a value, but it was changed to a new value, and + # then expired, then the object was detached + # + # * the object has a user-set value, but it's None and we don't do + # the comparison correctly for that so warn + # + + prop = mapper.get_property_by_column(column) + + # by invoking this method, InstanceState will track the last known + # value for this key each time the attribute is to be expired. + # this feature was added explicitly for use in this method. + state._track_last_known_value(prop.key) + + lkv_fixed = state._last_known_values + + def _go() -> Any: + assert lkv_fixed is not None + last_known = to_return = lkv_fixed[prop.key] + existing_is_available = ( + last_known is not LoaderCallableStatus.NO_VALUE + ) + + # we support that the value may have changed. so here we + # try to get the most recent value including re-fetching. + # only if we can't get a value now due to detachment do we return + # the last known value + current_value = mapper._get_state_attr_by_column( + state, + dict_, + column, + passive=( + PassiveFlag.PASSIVE_OFF + if state.persistent + else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK + ), + ) + + if current_value is LoaderCallableStatus.NEVER_SET: + if not existing_is_available: + raise sa_exc.InvalidRequestError( + "Can't resolve value for column %s on object " + "%s; no value has been set for this column" + % (column, state_str(state)) + ) + elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT: + if not existing_is_available: + raise sa_exc.InvalidRequestError( + "Can't resolve value for column %s on object " + "%s; the object is detached and the value was " + "expired" % (column, state_str(state)) + ) + else: + to_return = current_value + if to_return is None: + util.warn( + "Got None for value of column %s; this is unsupported " + "for a relationship comparison and will not " + "currently produce an IS comparison " + "(but may in a future release)" % column + ) + return to_return + + return _go + + def _lazy_none_clause( + self, + reverse_direction: bool = False, + adapt_source: Optional[_CoreAdapterProto] = None, + ) -> ColumnElement[bool]: + if not reverse_direction: + criterion, bind_to_col = ( + self._lazy_strategy._lazywhere, + self._lazy_strategy._bind_to_col, + ) + else: + criterion, bind_to_col = ( + self._lazy_strategy._rev_lazywhere, + self._lazy_strategy._rev_bind_to_col, + ) + + criterion = adapt_criterion_to_null(criterion, bind_to_col) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion + + def __str__(self) -> str: + return str(self.parent.class_.__name__) + "." + self.key + + def merge( + self, + session: Session, + source_state: InstanceState[Any], + source_dict: _InstanceDict, + dest_state: InstanceState[Any], + dest_dict: _InstanceDict, + load: bool, + _recursive: Dict[Any, object], + _resolve_conflict_map: Dict[_IdentityKeyType[Any], object], + ) -> None: + if load: + for r in self._reverse_property: + if (source_state, r) in _recursive: + return + + if "merge" not in self._cascade: + return + + if self.key not in source_dict: + return + + if self.uselist: + impl = source_state.get_impl(self.key) + + assert is_has_collection_adapter(impl) + instances_iterable = impl.get_collection(source_state, source_dict) + + # if this is a CollectionAttributeImpl, then empty should + # be False, otherwise "self.key in source_dict" should not be + # True + assert not instances_iterable.empty if impl.collection else True + + if load: + # for a full merge, pre-load the destination collection, + # so that individual _merge of each item pulls from identity + # map for those already present. + # also assumes CollectionAttributeImpl behavior of loading + # "old" list in any case + dest_state.get_impl(self.key).get( + dest_state, dest_dict, passive=PassiveFlag.PASSIVE_MERGE + ) + + dest_list = [] + for current in instances_iterable: + current_state = attributes.instance_state(current) + current_dict = attributes.instance_dict(current) + _recursive[(current_state, self)] = True + obj = session._merge( + current_state, + current_dict, + load=load, + _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map, + ) + if obj is not None: + dest_list.append(obj) + + if not load: + coll = attributes.init_state_collection( + dest_state, dest_dict, self.key + ) + for c in dest_list: + coll.append_without_event(c) + else: + dest_impl = dest_state.get_impl(self.key) + assert is_has_collection_adapter(dest_impl) + dest_impl.set( + dest_state, + dest_dict, + dest_list, + _adapt=False, + passive=PassiveFlag.PASSIVE_MERGE, + ) + else: + current = source_dict[self.key] + if current is not None: + current_state = attributes.instance_state(current) + current_dict = attributes.instance_dict(current) + _recursive[(current_state, self)] = True + obj = session._merge( + current_state, + current_dict, + load=load, + _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map, + ) + else: + obj = None + + if not load: + dest_dict[self.key] = obj + else: + dest_state.get_impl(self.key).set( + dest_state, dest_dict, obj, None + ) + + def _value_as_iterable( + self, + state: InstanceState[_O], + dict_: _InstanceDict, + key: str, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> Sequence[Tuple[InstanceState[_O], _O]]: + """Return a list of tuples (state, obj) for the given + key. + + returns an empty list if the value is None/empty/PASSIVE_NO_RESULT + """ + + impl = state.manager[key].impl + x = impl.get(state, dict_, passive=passive) + if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None: + return [] + elif is_has_collection_adapter(impl): + return [ + (attributes.instance_state(o), o) + for o in impl.get_collection(state, dict_, x, passive=passive) + ] + else: + return [(attributes.instance_state(x), x)] + + def cascade_iterator( + self, + type_: str, + state: InstanceState[Any], + dict_: _InstanceDict, + visited_states: Set[InstanceState[Any]], + halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None, + ) -> Iterator[Tuple[Any, Mapper[Any], InstanceState[Any], _InstanceDict]]: + # assert type_ in self._cascade + + # only actively lazy load on the 'delete' cascade + if type_ != "delete" or self.passive_deletes: + passive = PassiveFlag.PASSIVE_NO_INITIALIZE + else: + passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE + + if type_ == "save-update": + tuples = state.manager[self.key].impl.get_all_pending(state, dict_) + else: + tuples = self._value_as_iterable( + state, dict_, self.key, passive=passive + ) + + skip_pending = ( + type_ == "refresh-expire" and "delete-orphan" not in self._cascade + ) + + for instance_state, c in tuples: + if instance_state in visited_states: + continue + + if c is None: + # would like to emit a warning here, but + # would not be consistent with collection.append(None) + # current behavior of silently skipping. + # see [ticket:2229] + continue + + assert instance_state is not None + instance_dict = attributes.instance_dict(c) + + if halt_on and halt_on(instance_state): + continue + + if skip_pending and not instance_state.key: + continue + + instance_mapper = instance_state.manager.mapper + + if not instance_mapper.isa(self.mapper.class_manager.mapper): + raise AssertionError( + "Attribute '%s' on class '%s' " + "doesn't handle objects " + "of type '%s'" + % (self.key, self.parent.class_, c.__class__) + ) + + visited_states.add(instance_state) + + yield c, instance_mapper, instance_state, instance_dict + + @property + def _effective_sync_backref(self) -> bool: + if self.viewonly: + return False + else: + return self.sync_backref is not False + + @staticmethod + def _check_sync_backref( + rel_a: RelationshipProperty[Any], rel_b: RelationshipProperty[Any] + ) -> None: + if rel_a.viewonly and rel_b.sync_backref: + raise sa_exc.InvalidRequestError( + "Relationship %s cannot specify sync_backref=True since %s " + "includes viewonly=True." % (rel_b, rel_a) + ) + if ( + rel_a.viewonly + and not rel_b.viewonly + and rel_b.sync_backref is not False + ): + rel_b.sync_backref = False + + def _add_reverse_property(self, key: str) -> None: + other = self.mapper.get_property(key, _configure_mappers=False) + if not isinstance(other, RelationshipProperty): + raise sa_exc.InvalidRequestError( + "back_populates on relationship '%s' refers to attribute '%s' " + "that is not a relationship. The back_populates parameter " + "should refer to the name of a relationship on the target " + "class." % (self, other) + ) + # viewonly and sync_backref cases + # 1. self.viewonly==True and other.sync_backref==True -> error + # 2. self.viewonly==True and other.viewonly==False and + # other.sync_backref==None -> warn sync_backref=False, set to False + self._check_sync_backref(self, other) + # 3. other.viewonly==True and self.sync_backref==True -> error + # 4. other.viewonly==True and self.viewonly==False and + # self.sync_backref==None -> warn sync_backref=False, set to False + self._check_sync_backref(other, self) + + self._reverse_property.add(other) + other._reverse_property.add(self) + + other._setup_entity() + + if not other.mapper.common_parent(self.parent): + raise sa_exc.ArgumentError( + "reverse_property %r on " + "relationship %s references relationship %s, which " + "does not reference mapper %s" + % (key, self, other, self.parent) + ) + + if ( + other._configure_started + and self.direction in (ONETOMANY, MANYTOONE) + and self.direction == other.direction + ): + raise sa_exc.ArgumentError( + "%s and back-reference %s are " + "both of the same direction %r. Did you mean to " + "set remote_side on the many-to-one side ?" + % (other, self, self.direction) + ) + + @util.memoized_property + def entity(self) -> _InternalEntityType[_T]: + """Return the target mapped entity, which is an inspect() of the + class or aliased class that is referenced by this + :class:`.RelationshipProperty`. + + """ + self.parent._check_configure() + return self.entity + + @util.memoized_property + def mapper(self) -> Mapper[_T]: + """Return the targeted :class:`_orm.Mapper` for this + :class:`.RelationshipProperty`. + + """ + return self.entity.mapper + + def do_init(self) -> None: + self._check_conflicts() + self._process_dependent_arguments() + self._setup_entity() + self._setup_registry_dependencies() + self._setup_join_conditions() + self._check_cascade_settings(self._cascade) + self._post_init() + self._generate_backref() + self._join_condition._warn_for_conflicting_sync_targets() + super().do_init() + self._lazy_strategy = cast( + "LazyLoader", self._get_strategy((("lazy", "select"),)) + ) + + def _setup_registry_dependencies(self) -> None: + self.parent.mapper.registry._set_depends_on( + self.entity.mapper.registry + ) + + def _process_dependent_arguments(self) -> None: + """Convert incoming configuration arguments to their + proper form. + + Callables are resolved, ORM annotations removed. + + """ + + # accept callables for other attributes which may require + # deferred initialization. This technique is used + # by declarative "string configs" and some recipes. + init_args = self._init_args + + for attr in ( + "order_by", + "primaryjoin", + "secondaryjoin", + "secondary", + "foreign_keys", + "remote_side", + ): + rel_arg = getattr(init_args, attr) + + rel_arg._resolve_against_registry(self._clsregistry_resolvers[1]) + + # remove "annotations" which are present if mapped class + # descriptors are used to create the join expression. + for attr in "primaryjoin", "secondaryjoin": + rel_arg = getattr(init_args, attr) + val = rel_arg.resolved + if val is not None: + rel_arg.resolved = _orm_deannotate( + coercions.expect( + roles.ColumnArgumentRole, val, argname=attr + ) + ) + + secondary = init_args.secondary.resolved + if secondary is not None and _is_mapped_class(secondary): + raise sa_exc.ArgumentError( + "secondary argument %s passed to to relationship() %s must " + "be a Table object or other FROM clause; can't send a mapped " + "class directly as rows in 'secondary' are persisted " + "independently of a class that is mapped " + "to that same table." % (secondary, self) + ) + + # ensure expressions in self.order_by, foreign_keys, + # remote_side are all columns, not strings. + if ( + init_args.order_by.resolved is not False + and init_args.order_by.resolved is not None + ): + self.order_by = tuple( + coercions.expect( + roles.ColumnArgumentRole, x, argname="order_by" + ) + for x in util.to_list(init_args.order_by.resolved) + ) + else: + self.order_by = False + + self._user_defined_foreign_keys = util.column_set( + coercions.expect( + roles.ColumnArgumentRole, x, argname="foreign_keys" + ) + for x in util.to_column_set(init_args.foreign_keys.resolved) + ) + + self.remote_side = util.column_set( + coercions.expect( + roles.ColumnArgumentRole, x, argname="remote_side" + ) + for x in util.to_column_set(init_args.remote_side.resolved) + ) + + def declarative_scan( + self, + decl_scan: _ClassScanMapperConfig, + registry: _RegistryType, + cls: Type[Any], + originating_module: Optional[str], + key: str, + mapped_container: Optional[Type[Mapped[Any]]], + annotation: Optional[_AnnotationScanType], + extracted_mapped_annotation: Optional[_AnnotationScanType], + is_dataclass_field: bool, + ) -> None: + if extracted_mapped_annotation is None: + if self.argument is None: + self._raise_for_required(key, cls) + else: + return + + argument = extracted_mapped_annotation + assert originating_module is not None + + if mapped_container is not None: + is_write_only = issubclass(mapped_container, WriteOnlyMapped) + is_dynamic = issubclass(mapped_container, DynamicMapped) + if is_write_only: + self.lazy = "write_only" + self.strategy_key = (("lazy", self.lazy),) + elif is_dynamic: + self.lazy = "dynamic" + self.strategy_key = (("lazy", self.lazy),) + else: + is_write_only = is_dynamic = False + + argument = de_optionalize_union_types(argument) + + if hasattr(argument, "__origin__"): + arg_origin = argument.__origin__ + if isinstance(arg_origin, type) and issubclass( + arg_origin, abc.Collection + ): + if self.collection_class is None: + if _py_inspect.isabstract(arg_origin): + raise sa_exc.ArgumentError( + f"Collection annotation type {arg_origin} cannot " + "be instantiated; please provide an explicit " + "'collection_class' parameter " + "(e.g. list, set, etc.) to the " + "relationship() function to accompany this " + "annotation" + ) + + self.collection_class = arg_origin + + elif not is_write_only and not is_dynamic: + self.uselist = False + + if argument.__args__: # type: ignore + if isinstance(arg_origin, type) and issubclass( + arg_origin, typing.Mapping + ): + type_arg = argument.__args__[-1] # type: ignore + else: + type_arg = argument.__args__[0] # type: ignore + if hasattr(type_arg, "__forward_arg__"): + str_argument = type_arg.__forward_arg__ + + argument = resolve_name_to_real_class_name( + str_argument, originating_module + ) + else: + argument = type_arg + else: + raise sa_exc.ArgumentError( + f"Generic alias {argument} requires an argument" + ) + elif hasattr(argument, "__forward_arg__"): + argument = argument.__forward_arg__ + + argument = resolve_name_to_real_class_name( + argument, originating_module + ) + + if ( + self.collection_class is None + and not is_write_only + and not is_dynamic + ): + self.uselist = False + + # ticket #8759 + # if a lead argument was given to relationship(), like + # `relationship("B")`, use that, don't replace it with class we + # found in the annotation. The declarative_scan() method call here is + # still useful, as we continue to derive collection type and do + # checking of the annotation in any case. + if self.argument is None: + self.argument = cast("_RelationshipArgumentType[_T]", argument) + + @util.preload_module("sqlalchemy.orm.mapper") + def _setup_entity(self, __argument: Any = None) -> None: + if "entity" in self.__dict__: + return + + mapperlib = util.preloaded.orm_mapper + + if __argument: + argument = __argument + else: + argument = self.argument + + resolved_argument: _ExternalEntityType[Any] + + if isinstance(argument, str): + # we might want to cleanup clsregistry API to make this + # more straightforward + resolved_argument = cast( + "_ExternalEntityType[Any]", + self._clsregistry_resolve_name(argument)(), + ) + elif callable(argument) and not isinstance( + argument, (type, mapperlib.Mapper) + ): + resolved_argument = argument() + else: + resolved_argument = argument + + entity: _InternalEntityType[Any] + + if isinstance(resolved_argument, type): + entity = class_mapper(resolved_argument, configure=False) + else: + try: + entity = inspect(resolved_argument) + except sa_exc.NoInspectionAvailable: + entity = None # type: ignore + + if not hasattr(entity, "mapper"): + raise sa_exc.ArgumentError( + "relationship '%s' expects " + "a class or a mapper argument (received: %s)" + % (self.key, type(resolved_argument)) + ) + + self.entity = entity + self.target = self.entity.persist_selectable + + def _setup_join_conditions(self) -> None: + self._join_condition = jc = JoinCondition( + parent_persist_selectable=self.parent.persist_selectable, + child_persist_selectable=self.entity.persist_selectable, + parent_local_selectable=self.parent.local_table, + child_local_selectable=self.entity.local_table, + primaryjoin=self._init_args.primaryjoin.resolved, + secondary=self._init_args.secondary.resolved, + secondaryjoin=self._init_args.secondaryjoin.resolved, + parent_equivalents=self.parent._equivalent_columns, + child_equivalents=self.mapper._equivalent_columns, + consider_as_foreign_keys=self._user_defined_foreign_keys, + local_remote_pairs=self.local_remote_pairs, + remote_side=self.remote_side, + self_referential=self._is_self_referential, + prop=self, + support_sync=not self.viewonly, + can_be_synced_fn=self._columns_are_mapped, + ) + self.primaryjoin = jc.primaryjoin + self.secondaryjoin = jc.secondaryjoin + self.secondary = jc.secondary + self.direction = jc.direction + self.local_remote_pairs = jc.local_remote_pairs + self.remote_side = jc.remote_columns + self.local_columns = jc.local_columns + self.synchronize_pairs = jc.synchronize_pairs + self._calculated_foreign_keys = jc.foreign_key_columns + self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs + + @property + def _clsregistry_resolve_arg( + self, + ) -> Callable[[str, bool], _class_resolver]: + return self._clsregistry_resolvers[1] + + @property + def _clsregistry_resolve_name( + self, + ) -> Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]]: + return self._clsregistry_resolvers[0] + + @util.memoized_property + @util.preload_module("sqlalchemy.orm.clsregistry") + def _clsregistry_resolvers( + self, + ) -> Tuple[ + Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]], + Callable[[str, bool], _class_resolver], + ]: + _resolver = util.preloaded.orm_clsregistry._resolver + + return _resolver(self.parent.class_, self) + + def _check_conflicts(self) -> None: + """Test that this relationship is legal, warn about + inheritance conflicts.""" + if self.parent.non_primary and not class_mapper( + self.parent.class_, configure=False + ).has_property(self.key): + raise sa_exc.ArgumentError( + "Attempting to assign a new " + "relationship '%s' to a non-primary mapper on " + "class '%s'. New relationships can only be added " + "to the primary mapper, i.e. the very first mapper " + "created for class '%s' " + % ( + self.key, + self.parent.class_.__name__, + self.parent.class_.__name__, + ) + ) + + @property + def cascade(self) -> CascadeOptions: + """Return the current cascade setting for this + :class:`.RelationshipProperty`. + """ + return self._cascade + + @cascade.setter + def cascade(self, cascade: Union[str, CascadeOptions]) -> None: + self._set_cascade(cascade) + + def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) -> None: + cascade = CascadeOptions(cascade_arg) + + if self.viewonly: + cascade = CascadeOptions( + cascade.intersection(CascadeOptions._viewonly_cascades) + ) + + if "mapper" in self.__dict__: + self._check_cascade_settings(cascade) + self._cascade = cascade + + if self._dependency_processor: + self._dependency_processor.cascade = cascade + + def _check_cascade_settings(self, cascade: CascadeOptions) -> None: + if ( + cascade.delete_orphan + and not self.single_parent + and (self.direction is MANYTOMANY or self.direction is MANYTOONE) + ): + raise sa_exc.ArgumentError( + "For %(direction)s relationship %(rel)s, delete-orphan " + "cascade is normally " + 'configured only on the "one" side of a one-to-many ' + "relationship, " + 'and not on the "many" side of a many-to-one or many-to-many ' + "relationship. " + "To force this relationship to allow a particular " + '"%(relatedcls)s" object to be referenced by only ' + 'a single "%(clsname)s" object at a time via the ' + "%(rel)s relationship, which " + "would allow " + "delete-orphan cascade to take place in this direction, set " + "the single_parent=True flag." + % { + "rel": self, + "direction": ( + "many-to-one" + if self.direction is MANYTOONE + else "many-to-many" + ), + "clsname": self.parent.class_.__name__, + "relatedcls": self.mapper.class_.__name__, + }, + code="bbf0", + ) + + if self.passive_deletes == "all" and ( + "delete" in cascade or "delete-orphan" in cascade + ): + raise sa_exc.ArgumentError( + "On %s, can't set passive_deletes='all' in conjunction " + "with 'delete' or 'delete-orphan' cascade" % self + ) + + if cascade.delete_orphan: + self.mapper.primary_mapper()._delete_orphans.append( + (self.key, self.parent.class_) + ) + + def _persists_for(self, mapper: Mapper[Any]) -> bool: + """Return True if this property will persist values on behalf + of the given mapper. + + """ + + return ( + self.key in mapper.relationships + and mapper.relationships[self.key] is self + ) + + def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool: + """Return True if all columns in the given collection are + mapped by the tables referenced by this :class:`.RelationshipProperty`. + + """ + + secondary = self._init_args.secondary.resolved + for c in cols: + if secondary is not None and secondary.c.contains_column(c): + continue + if not self.parent.persist_selectable.c.contains_column( + c + ) and not self.target.c.contains_column(c): + return False + return True + + def _generate_backref(self) -> None: + """Interpret the 'backref' instruction to create a + :func:`_orm.relationship` complementary to this one.""" + + if self.parent.non_primary: + return + if self.backref is not None and not self.back_populates: + kwargs: Dict[str, Any] + if isinstance(self.backref, str): + backref_key, kwargs = self.backref, {} + else: + backref_key, kwargs = self.backref + mapper = self.mapper.primary_mapper() + + if not mapper.concrete: + check = set(mapper.iterate_to_root()).union( + mapper.self_and_descendants + ) + for m in check: + if m.has_property(backref_key) and not m.concrete: + raise sa_exc.ArgumentError( + "Error creating backref " + "'%s' on relationship '%s': property of that " + "name exists on mapper '%s'" + % (backref_key, self, m) + ) + + # determine primaryjoin/secondaryjoin for the + # backref. Use the one we had, so that + # a custom join doesn't have to be specified in + # both directions. + if self.secondary is not None: + # for many to many, just switch primaryjoin/ + # secondaryjoin. use the annotated + # pj/sj on the _join_condition. + pj = kwargs.pop( + "primaryjoin", + self._join_condition.secondaryjoin_minus_local, + ) + sj = kwargs.pop( + "secondaryjoin", + self._join_condition.primaryjoin_minus_local, + ) + else: + pj = kwargs.pop( + "primaryjoin", + self._join_condition.primaryjoin_reverse_remote, + ) + sj = kwargs.pop("secondaryjoin", None) + if sj: + raise sa_exc.InvalidRequestError( + "Can't assign 'secondaryjoin' on a backref " + "against a non-secondary relationship." + ) + + foreign_keys = kwargs.pop( + "foreign_keys", self._user_defined_foreign_keys + ) + parent = self.parent.primary_mapper() + kwargs.setdefault("viewonly", self.viewonly) + kwargs.setdefault("post_update", self.post_update) + kwargs.setdefault("passive_updates", self.passive_updates) + kwargs.setdefault("sync_backref", self.sync_backref) + self.back_populates = backref_key + relationship = RelationshipProperty( + parent, + self.secondary, + primaryjoin=pj, + secondaryjoin=sj, + foreign_keys=foreign_keys, + back_populates=self.key, + **kwargs, + ) + mapper._configure_property( + backref_key, relationship, warn_for_existing=True + ) + + if self.back_populates: + self._add_reverse_property(self.back_populates) + + @util.preload_module("sqlalchemy.orm.dependency") + def _post_init(self) -> None: + dependency = util.preloaded.orm_dependency + + if self.uselist is None: + self.uselist = self.direction is not MANYTOONE + if not self.viewonly: + self._dependency_processor = ( # type: ignore + dependency.DependencyProcessor.from_relationship + )(self) + + @util.memoized_property + def _use_get(self) -> bool: + """memoize the 'use_get' attribute of this RelationshipLoader's + lazyloader.""" + + strategy = self._lazy_strategy + return strategy.use_get + + @util.memoized_property + def _is_self_referential(self) -> bool: + return self.mapper.common_parent(self.parent) + + def _create_joins( + self, + source_polymorphic: bool = False, + source_selectable: Optional[FromClause] = None, + dest_selectable: Optional[FromClause] = None, + of_type_entity: Optional[_InternalEntityType[Any]] = None, + alias_secondary: bool = False, + extra_criteria: Tuple[ColumnElement[bool], ...] = (), + ) -> Tuple[ + ColumnElement[bool], + Optional[ColumnElement[bool]], + FromClause, + FromClause, + Optional[FromClause], + Optional[ClauseAdapter], + ]: + aliased = False + + if alias_secondary and self.secondary is not None: + aliased = True + + if source_selectable is None: + if source_polymorphic and self.parent.with_polymorphic: + source_selectable = self.parent._with_polymorphic_selectable + + if of_type_entity: + dest_mapper = of_type_entity.mapper + if dest_selectable is None: + dest_selectable = of_type_entity.selectable + aliased = True + else: + dest_mapper = self.mapper + + if dest_selectable is None: + dest_selectable = self.entity.selectable + if self.mapper.with_polymorphic: + aliased = True + + if self._is_self_referential and source_selectable is None: + dest_selectable = dest_selectable._anonymous_fromclause() + aliased = True + elif ( + dest_selectable is not self.mapper._with_polymorphic_selectable + or self.mapper.with_polymorphic + ): + aliased = True + + single_crit = dest_mapper._single_table_criterion + aliased = aliased or ( + source_selectable is not None + and ( + source_selectable + is not self.parent._with_polymorphic_selectable + or source_selectable._is_subquery + ) + ) + + ( + primaryjoin, + secondaryjoin, + secondary, + target_adapter, + dest_selectable, + ) = self._join_condition.join_targets( + source_selectable, + dest_selectable, + aliased, + single_crit, + extra_criteria, + ) + if source_selectable is None: + source_selectable = self.parent.local_table + if dest_selectable is None: + dest_selectable = self.entity.local_table + return ( + primaryjoin, + secondaryjoin, + source_selectable, + dest_selectable, + secondary, + target_adapter, + ) + + +def _annotate_columns(element: _CE, annotations: _AnnotationDict) -> _CE: + def clone(elem: _CE) -> _CE: + if isinstance(elem, expression.ColumnClause): + elem = elem._annotate(annotations.copy()) # type: ignore + elem._copy_internals(clone=clone) + return elem + + if element is not None: + element = clone(element) + clone = None # type: ignore # remove gc cycles + return element + + +class JoinCondition: + primaryjoin_initial: Optional[ColumnElement[bool]] + primaryjoin: ColumnElement[bool] + secondaryjoin: Optional[ColumnElement[bool]] + secondary: Optional[FromClause] + prop: RelationshipProperty[Any] + + synchronize_pairs: _ColumnPairs + secondary_synchronize_pairs: _ColumnPairs + direction: RelationshipDirection + + parent_persist_selectable: FromClause + child_persist_selectable: FromClause + parent_local_selectable: FromClause + child_local_selectable: FromClause + + _local_remote_pairs: Optional[_ColumnPairs] + + def __init__( + self, + parent_persist_selectable: FromClause, + child_persist_selectable: FromClause, + parent_local_selectable: FromClause, + child_local_selectable: FromClause, + *, + primaryjoin: Optional[ColumnElement[bool]] = None, + secondary: Optional[FromClause] = None, + secondaryjoin: Optional[ColumnElement[bool]] = None, + parent_equivalents: Optional[_EquivalentColumnMap] = None, + child_equivalents: Optional[_EquivalentColumnMap] = None, + consider_as_foreign_keys: Any = None, + local_remote_pairs: Optional[_ColumnPairs] = None, + remote_side: Any = None, + self_referential: Any = False, + prop: RelationshipProperty[Any], + support_sync: bool = True, + can_be_synced_fn: Callable[..., bool] = lambda *c: True, + ): + self.parent_persist_selectable = parent_persist_selectable + self.parent_local_selectable = parent_local_selectable + self.child_persist_selectable = child_persist_selectable + self.child_local_selectable = child_local_selectable + self.parent_equivalents = parent_equivalents + self.child_equivalents = child_equivalents + self.primaryjoin_initial = primaryjoin + self.secondaryjoin = secondaryjoin + self.secondary = secondary + self.consider_as_foreign_keys = consider_as_foreign_keys + self._local_remote_pairs = local_remote_pairs + self._remote_side = remote_side + self.prop = prop + self.self_referential = self_referential + self.support_sync = support_sync + self.can_be_synced_fn = can_be_synced_fn + + self._determine_joins() + assert self.primaryjoin is not None + + self._sanitize_joins() + self._annotate_fks() + self._annotate_remote() + self._annotate_local() + self._annotate_parentmapper() + self._setup_pairs() + self._check_foreign_cols(self.primaryjoin, True) + if self.secondaryjoin is not None: + self._check_foreign_cols(self.secondaryjoin, False) + self._determine_direction() + self._check_remote_side() + self._log_joins() + + def _log_joins(self) -> None: + log = self.prop.logger + log.info("%s setup primary join %s", self.prop, self.primaryjoin) + log.info("%s setup secondary join %s", self.prop, self.secondaryjoin) + log.info( + "%s synchronize pairs [%s]", + self.prop, + ",".join( + "(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs + ), + ) + log.info( + "%s secondary synchronize pairs [%s]", + self.prop, + ",".join( + "(%s => %s)" % (l, r) + for (l, r) in self.secondary_synchronize_pairs or [] + ), + ) + log.info( + "%s local/remote pairs [%s]", + self.prop, + ",".join( + "(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs + ), + ) + log.info( + "%s remote columns [%s]", + self.prop, + ",".join("%s" % col for col in self.remote_columns), + ) + log.info( + "%s local columns [%s]", + self.prop, + ",".join("%s" % col for col in self.local_columns), + ) + log.info("%s relationship direction %s", self.prop, self.direction) + + def _sanitize_joins(self) -> None: + """remove the parententity annotation from our join conditions which + can leak in here based on some declarative patterns and maybe others. + + "parentmapper" is relied upon both by the ORM evaluator as well as + the use case in _join_fixture_inh_selfref_w_entity + that relies upon it being present, see :ticket:`3364`. + + """ + + self.primaryjoin = _deep_deannotate( + self.primaryjoin, values=("parententity", "proxy_key") + ) + if self.secondaryjoin is not None: + self.secondaryjoin = _deep_deannotate( + self.secondaryjoin, values=("parententity", "proxy_key") + ) + + def _determine_joins(self) -> None: + """Determine the 'primaryjoin' and 'secondaryjoin' attributes, + if not passed to the constructor already. + + This is based on analysis of the foreign key relationships + between the parent and target mapped selectables. + + """ + if self.secondaryjoin is not None and self.secondary is None: + raise sa_exc.ArgumentError( + "Property %s specified with secondary " + "join condition but " + "no secondary argument" % self.prop + ) + + # find a join between the given mapper's mapped table and + # the given table. will try the mapper's local table first + # for more specificity, then if not found will try the more + # general mapped table, which in the case of inheritance is + # a join. + try: + consider_as_foreign_keys = self.consider_as_foreign_keys or None + if self.secondary is not None: + if self.secondaryjoin is None: + self.secondaryjoin = join_condition( + self.child_persist_selectable, + self.secondary, + a_subset=self.child_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys, + ) + if self.primaryjoin_initial is None: + self.primaryjoin = join_condition( + self.parent_persist_selectable, + self.secondary, + a_subset=self.parent_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys, + ) + else: + self.primaryjoin = self.primaryjoin_initial + else: + if self.primaryjoin_initial is None: + self.primaryjoin = join_condition( + self.parent_persist_selectable, + self.child_persist_selectable, + a_subset=self.parent_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys, + ) + else: + self.primaryjoin = self.primaryjoin_initial + except sa_exc.NoForeignKeysError as nfe: + if self.secondary is not None: + raise sa_exc.NoForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are no foreign keys " + "linking these tables via secondary table '%s'. " + "Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or " + "specify 'primaryjoin' and 'secondaryjoin' " + "expressions." % (self.prop, self.secondary) + ) from nfe + else: + raise sa_exc.NoForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are no foreign keys " + "linking these tables. " + "Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or " + "specify a 'primaryjoin' expression." % self.prop + ) from nfe + except sa_exc.AmbiguousForeignKeysError as afe: + if self.secondary is not None: + raise sa_exc.AmbiguousForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are multiple foreign key " + "paths linking the tables via secondary table '%s'. " + "Specify the 'foreign_keys' " + "argument, providing a list of those columns which " + "should be counted as containing a foreign key " + "reference from the secondary table to each of the " + "parent and child tables." % (self.prop, self.secondary) + ) from afe + else: + raise sa_exc.AmbiguousForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are multiple foreign key " + "paths linking the tables. Specify the " + "'foreign_keys' argument, providing a list of those " + "columns which should be counted as containing a " + "foreign key reference to the parent table." % self.prop + ) from afe + + @property + def primaryjoin_minus_local(self) -> ColumnElement[bool]: + return _deep_deannotate(self.primaryjoin, values=("local", "remote")) + + @property + def secondaryjoin_minus_local(self) -> ColumnElement[bool]: + assert self.secondaryjoin is not None + return _deep_deannotate(self.secondaryjoin, values=("local", "remote")) + + @util.memoized_property + def primaryjoin_reverse_remote(self) -> ColumnElement[bool]: + """Return the primaryjoin condition suitable for the + "reverse" direction. + + If the primaryjoin was delivered here with pre-existing + "remote" annotations, the local/remote annotations + are reversed. Otherwise, the local/remote annotations + are removed. + + """ + if self._has_remote_annotations: + + def replace(element: _CE, **kw: Any) -> Optional[_CE]: + if "remote" in element._annotations: + v = dict(element._annotations) + del v["remote"] + v["local"] = True + return element._with_annotations(v) + elif "local" in element._annotations: + v = dict(element._annotations) + del v["local"] + v["remote"] = True + return element._with_annotations(v) + + return None + + return visitors.replacement_traverse(self.primaryjoin, {}, replace) + else: + if self._has_foreign_annotations: + # TODO: coverage + return _deep_deannotate( + self.primaryjoin, values=("local", "remote") + ) + else: + return _deep_deannotate(self.primaryjoin) + + def _has_annotation(self, clause: ClauseElement, annotation: str) -> bool: + for col in visitors.iterate(clause, {}): + if annotation in col._annotations: + return True + else: + return False + + @util.memoized_property + def _has_foreign_annotations(self) -> bool: + return self._has_annotation(self.primaryjoin, "foreign") + + @util.memoized_property + def _has_remote_annotations(self) -> bool: + return self._has_annotation(self.primaryjoin, "remote") + + def _annotate_fks(self) -> None: + """Annotate the primaryjoin and secondaryjoin + structures with 'foreign' annotations marking columns + considered as foreign. + + """ + if self._has_foreign_annotations: + return + + if self.consider_as_foreign_keys: + self._annotate_from_fk_list() + else: + self._annotate_present_fks() + + def _annotate_from_fk_list(self) -> None: + def check_fk(element: _CE, **kw: Any) -> Optional[_CE]: + if element in self.consider_as_foreign_keys: + return element._annotate({"foreign": True}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, check_fk + ) + if self.secondaryjoin is not None: + self.secondaryjoin = visitors.replacement_traverse( + self.secondaryjoin, {}, check_fk + ) + + def _annotate_present_fks(self) -> None: + if self.secondary is not None: + secondarycols = util.column_set(self.secondary.c) + else: + secondarycols = set() + + def is_foreign( + a: ColumnElement[Any], b: ColumnElement[Any] + ) -> Optional[ColumnElement[Any]]: + if isinstance(a, schema.Column) and isinstance(b, schema.Column): + if a.references(b): + return a + elif b.references(a): + return b + + if secondarycols: + if a in secondarycols and b not in secondarycols: + return a + elif b in secondarycols and a not in secondarycols: + return b + + return None + + def visit_binary(binary: BinaryExpression[Any]) -> None: + if not isinstance( + binary.left, sql.ColumnElement + ) or not isinstance(binary.right, sql.ColumnElement): + return + + if ( + "foreign" not in binary.left._annotations + and "foreign" not in binary.right._annotations + ): + col = is_foreign(binary.left, binary.right) + if col is not None: + if col.compare(binary.left): + binary.left = binary.left._annotate({"foreign": True}) + elif col.compare(binary.right): + binary.right = binary.right._annotate( + {"foreign": True} + ) + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, {}, {"binary": visit_binary} + ) + if self.secondaryjoin is not None: + self.secondaryjoin = visitors.cloned_traverse( + self.secondaryjoin, {}, {"binary": visit_binary} + ) + + def _refers_to_parent_table(self) -> bool: + """Return True if the join condition contains column + comparisons where both columns are in both tables. + + """ + pt = self.parent_persist_selectable + mt = self.child_persist_selectable + result = False + + def visit_binary(binary: BinaryExpression[Any]) -> None: + nonlocal result + c, f = binary.left, binary.right + if ( + isinstance(c, expression.ColumnClause) + and isinstance(f, expression.ColumnClause) + and pt.is_derived_from(c.table) + and pt.is_derived_from(f.table) + and mt.is_derived_from(c.table) + and mt.is_derived_from(f.table) + ): + result = True + + visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary}) + return result + + def _tables_overlap(self) -> bool: + """Return True if parent/child tables have some overlap.""" + + return selectables_overlap( + self.parent_persist_selectable, self.child_persist_selectable + ) + + def _annotate_remote(self) -> None: + """Annotate the primaryjoin and secondaryjoin + structures with 'remote' annotations marking columns + considered as part of the 'remote' side. + + """ + if self._has_remote_annotations: + return + + if self.secondary is not None: + self._annotate_remote_secondary() + elif self._local_remote_pairs or self._remote_side: + self._annotate_remote_from_args() + elif self._refers_to_parent_table(): + self._annotate_selfref( + lambda col: "foreign" in col._annotations, False + ) + elif self._tables_overlap(): + self._annotate_remote_with_overlap() + else: + self._annotate_remote_distinct_selectables() + + def _annotate_remote_secondary(self) -> None: + """annotate 'remote' in primaryjoin, secondaryjoin + when 'secondary' is present. + + """ + + assert self.secondary is not None + fixed_secondary = self.secondary + + def repl(element: _CE, **kw: Any) -> Optional[_CE]: + if fixed_secondary.c.contains_column(element): + return element._annotate({"remote": True}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl + ) + + assert self.secondaryjoin is not None + self.secondaryjoin = visitors.replacement_traverse( + self.secondaryjoin, {}, repl + ) + + def _annotate_selfref( + self, fn: Callable[[ColumnElement[Any]], bool], remote_side_given: bool + ) -> None: + """annotate 'remote' in primaryjoin, secondaryjoin + when the relationship is detected as self-referential. + + """ + + def visit_binary(binary: BinaryExpression[Any]) -> None: + equated = binary.left.compare(binary.right) + if isinstance(binary.left, expression.ColumnClause) and isinstance( + binary.right, expression.ColumnClause + ): + # assume one to many - FKs are "remote" + if fn(binary.left): + binary.left = binary.left._annotate({"remote": True}) + if fn(binary.right) and not equated: + binary.right = binary.right._annotate({"remote": True}) + elif not remote_side_given: + self._warn_non_column_elements() + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, {}, {"binary": visit_binary} + ) + + def _annotate_remote_from_args(self) -> None: + """annotate 'remote' in primaryjoin, secondaryjoin + when the 'remote_side' or '_local_remote_pairs' + arguments are used. + + """ + if self._local_remote_pairs: + if self._remote_side: + raise sa_exc.ArgumentError( + "remote_side argument is redundant " + "against more detailed _local_remote_side " + "argument." + ) + + remote_side = [r for (l, r) in self._local_remote_pairs] + else: + remote_side = self._remote_side + + if self._refers_to_parent_table(): + self._annotate_selfref(lambda col: col in remote_side, True) + else: + + def repl(element: _CE, **kw: Any) -> Optional[_CE]: + # use set() to avoid generating ``__eq__()`` expressions + # against each element + if element in set(remote_side): + return element._annotate({"remote": True}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl + ) + + def _annotate_remote_with_overlap(self) -> None: + """annotate 'remote' in primaryjoin, secondaryjoin + when the parent/child tables have some set of + tables in common, though is not a fully self-referential + relationship. + + """ + + def visit_binary(binary: BinaryExpression[Any]) -> None: + binary.left, binary.right = proc_left_right( + binary.left, binary.right + ) + binary.right, binary.left = proc_left_right( + binary.right, binary.left + ) + + check_entities = ( + self.prop is not None and self.prop.mapper is not self.prop.parent + ) + + def proc_left_right( + left: ColumnElement[Any], right: ColumnElement[Any] + ) -> Tuple[ColumnElement[Any], ColumnElement[Any]]: + if isinstance(left, expression.ColumnClause) and isinstance( + right, expression.ColumnClause + ): + if self.child_persist_selectable.c.contains_column( + right + ) and self.parent_persist_selectable.c.contains_column(left): + right = right._annotate({"remote": True}) + elif ( + check_entities + and right._annotations.get("parentmapper") is self.prop.mapper + ): + right = right._annotate({"remote": True}) + elif ( + check_entities + and left._annotations.get("parentmapper") is self.prop.mapper + ): + left = left._annotate({"remote": True}) + else: + self._warn_non_column_elements() + + return left, right + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, {}, {"binary": visit_binary} + ) + + def _annotate_remote_distinct_selectables(self) -> None: + """annotate 'remote' in primaryjoin, secondaryjoin + when the parent/child tables are entirely + separate. + + """ + + def repl(element: _CE, **kw: Any) -> Optional[_CE]: + if self.child_persist_selectable.c.contains_column(element) and ( + not self.parent_local_selectable.c.contains_column(element) + or self.child_local_selectable.c.contains_column(element) + ): + return element._annotate({"remote": True}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl + ) + + def _warn_non_column_elements(self) -> None: + util.warn( + "Non-simple column elements in primary " + "join condition for property %s - consider using " + "remote() annotations to mark the remote side." % self.prop + ) + + def _annotate_local(self) -> None: + """Annotate the primaryjoin and secondaryjoin + structures with 'local' annotations. + + This annotates all column elements found + simultaneously in the parent table + and the join condition that don't have a + 'remote' annotation set up from + _annotate_remote() or user-defined. + + """ + if self._has_annotation(self.primaryjoin, "local"): + return + + if self._local_remote_pairs: + local_side = util.column_set( + [l for (l, r) in self._local_remote_pairs] + ) + else: + local_side = util.column_set(self.parent_persist_selectable.c) + + def locals_(element: _CE, **kw: Any) -> Optional[_CE]: + if "remote" not in element._annotations and element in local_side: + return element._annotate({"local": True}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, locals_ + ) + + def _annotate_parentmapper(self) -> None: + def parentmappers_(element: _CE, **kw: Any) -> Optional[_CE]: + if "remote" in element._annotations: + return element._annotate({"parentmapper": self.prop.mapper}) + elif "local" in element._annotations: + return element._annotate({"parentmapper": self.prop.parent}) + return None + + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, parentmappers_ + ) + + def _check_remote_side(self) -> None: + if not self.local_remote_pairs: + raise sa_exc.ArgumentError( + "Relationship %s could " + "not determine any unambiguous local/remote column " + "pairs based on join condition and remote_side " + "arguments. " + "Consider using the remote() annotation to " + "accurately mark those elements of the join " + "condition that are on the remote side of " + "the relationship." % (self.prop,) + ) + else: + not_target = util.column_set( + self.parent_persist_selectable.c + ).difference(self.child_persist_selectable.c) + + for _, rmt in self.local_remote_pairs: + if rmt in not_target: + util.warn( + "Expression %s is marked as 'remote', but these " + "column(s) are local to the local side. The " + "remote() annotation is needed only for a " + "self-referential relationship where both sides " + "of the relationship refer to the same tables." + % (rmt,) + ) + + def _check_foreign_cols( + self, join_condition: ColumnElement[bool], primary: bool + ) -> None: + """Check the foreign key columns collected and emit error + messages.""" + foreign_cols = self._gather_columns_with_annotation( + join_condition, "foreign" + ) + + has_foreign = bool(foreign_cols) + + if primary: + can_sync = bool(self.synchronize_pairs) + else: + can_sync = bool(self.secondary_synchronize_pairs) + + if ( + self.support_sync + and can_sync + or (not self.support_sync and has_foreign) + ): + return + + # from here below is just determining the best error message + # to report. Check for a join condition using any operator + # (not just ==), perhaps they need to turn on "viewonly=True". + if self.support_sync and has_foreign and not can_sync: + err = ( + "Could not locate any simple equality expressions " + "involving locally mapped foreign key columns for " + "%s join condition " + "'%s' on relationship %s." + % ( + primary and "primary" or "secondary", + join_condition, + self.prop, + ) + ) + err += ( + " Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or are " + "annotated in the join condition with the foreign() " + "annotation. To allow comparison operators other than " + "'==', the relationship can be marked as viewonly=True." + ) + + raise sa_exc.ArgumentError(err) + else: + err = ( + "Could not locate any relevant foreign key columns " + "for %s join condition '%s' on relationship %s." + % ( + primary and "primary" or "secondary", + join_condition, + self.prop, + ) + ) + err += ( + " Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or are " + "annotated in the join condition with the foreign() " + "annotation." + ) + raise sa_exc.ArgumentError(err) + + def _determine_direction(self) -> None: + """Determine if this relationship is one to many, many to one, + many to many. + + """ + if self.secondaryjoin is not None: + self.direction = MANYTOMANY + else: + parentcols = util.column_set(self.parent_persist_selectable.c) + targetcols = util.column_set(self.child_persist_selectable.c) + + # fk collection which suggests ONETOMANY. + onetomany_fk = targetcols.intersection(self.foreign_key_columns) + + # fk collection which suggests MANYTOONE. + + manytoone_fk = parentcols.intersection(self.foreign_key_columns) + + if onetomany_fk and manytoone_fk: + # fks on both sides. test for overlap of local/remote + # with foreign key. + # we will gather columns directly from their annotations + # without deannotating, so that we can distinguish on a column + # that refers to itself. + + # 1. columns that are both remote and FK suggest + # onetomany. + onetomany_local = self._gather_columns_with_annotation( + self.primaryjoin, "remote", "foreign" + ) + + # 2. columns that are FK but are not remote (e.g. local) + # suggest manytoone. + manytoone_local = { + c + for c in self._gather_columns_with_annotation( + self.primaryjoin, "foreign" + ) + if "remote" not in c._annotations + } + + # 3. if both collections are present, remove columns that + # refer to themselves. This is for the case of + # and_(Me.id == Me.remote_id, Me.version == Me.version) + if onetomany_local and manytoone_local: + self_equated = self.remote_columns.intersection( + self.local_columns + ) + onetomany_local = onetomany_local.difference(self_equated) + manytoone_local = manytoone_local.difference(self_equated) + + # at this point, if only one or the other collection is + # present, we know the direction, otherwise it's still + # ambiguous. + + if onetomany_local and not manytoone_local: + self.direction = ONETOMANY + elif manytoone_local and not onetomany_local: + self.direction = MANYTOONE + else: + raise sa_exc.ArgumentError( + "Can't determine relationship" + " direction for relationship '%s' - foreign " + "key columns within the join condition are present " + "in both the parent and the child's mapped tables. " + "Ensure that only those columns referring " + "to a parent column are marked as foreign, " + "either via the foreign() annotation or " + "via the foreign_keys argument." % self.prop + ) + elif onetomany_fk: + self.direction = ONETOMANY + elif manytoone_fk: + self.direction = MANYTOONE + else: + raise sa_exc.ArgumentError( + "Can't determine relationship " + "direction for relationship '%s' - foreign " + "key columns are present in neither the parent " + "nor the child's mapped tables" % self.prop + ) + + def _deannotate_pairs( + self, collection: _ColumnPairIterable + ) -> _MutableColumnPairs: + """provide deannotation for the various lists of + pairs, so that using them in hashes doesn't incur + high-overhead __eq__() comparisons against + original columns mapped. + + """ + return [(x._deannotate(), y._deannotate()) for x, y in collection] + + def _setup_pairs(self) -> None: + sync_pairs: _MutableColumnPairs = [] + lrp: util.OrderedSet[Tuple[ColumnElement[Any], ColumnElement[Any]]] = ( + util.OrderedSet([]) + ) + secondary_sync_pairs: _MutableColumnPairs = [] + + def go( + joincond: ColumnElement[bool], + collection: _MutableColumnPairs, + ) -> None: + def visit_binary( + binary: BinaryExpression[Any], + left: ColumnElement[Any], + right: ColumnElement[Any], + ) -> None: + if ( + "remote" in right._annotations + and "remote" not in left._annotations + and self.can_be_synced_fn(left) + ): + lrp.add((left, right)) + elif ( + "remote" in left._annotations + and "remote" not in right._annotations + and self.can_be_synced_fn(right) + ): + lrp.add((right, left)) + if binary.operator is operators.eq and self.can_be_synced_fn( + left, right + ): + if "foreign" in right._annotations: + collection.append((left, right)) + elif "foreign" in left._annotations: + collection.append((right, left)) + + visit_binary_product(visit_binary, joincond) + + for joincond, collection in [ + (self.primaryjoin, sync_pairs), + (self.secondaryjoin, secondary_sync_pairs), + ]: + if joincond is None: + continue + go(joincond, collection) + + self.local_remote_pairs = self._deannotate_pairs(lrp) + self.synchronize_pairs = self._deannotate_pairs(sync_pairs) + self.secondary_synchronize_pairs = self._deannotate_pairs( + secondary_sync_pairs + ) + + _track_overlapping_sync_targets: weakref.WeakKeyDictionary[ + ColumnElement[Any], + weakref.WeakKeyDictionary[ + RelationshipProperty[Any], ColumnElement[Any] + ], + ] = weakref.WeakKeyDictionary() + + def _warn_for_conflicting_sync_targets(self) -> None: + if not self.support_sync: + return + + # we would like to detect if we are synchronizing any column + # pairs in conflict with another relationship that wishes to sync + # an entirely different column to the same target. This is a + # very rare edge case so we will try to minimize the memory/overhead + # impact of this check + for from_, to_ in [ + (from_, to_) for (from_, to_) in self.synchronize_pairs + ] + [ + (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs + ]: + # save ourselves a ton of memory and overhead by only + # considering columns that are subject to a overlapping + # FK constraints at the core level. This condition can arise + # if multiple relationships overlap foreign() directly, but + # we're going to assume it's typically a ForeignKeyConstraint- + # level configuration that benefits from this warning. + + if to_ not in self._track_overlapping_sync_targets: + self._track_overlapping_sync_targets[to_] = ( + weakref.WeakKeyDictionary({self.prop: from_}) + ) + else: + other_props = [] + prop_to_from = self._track_overlapping_sync_targets[to_] + + for pr, fr_ in prop_to_from.items(): + if ( + not pr.mapper._dispose_called + and pr not in self.prop._reverse_property + and pr.key not in self.prop._overlaps + and self.prop.key not in pr._overlaps + # note: the "__*" symbol is used internally by + # SQLAlchemy as a general means of suppressing the + # overlaps warning for some extension cases, however + # this is not currently + # a publicly supported symbol and may change at + # any time. + and "__*" not in self.prop._overlaps + and "__*" not in pr._overlaps + and not self.prop.parent.is_sibling(pr.parent) + and not self.prop.mapper.is_sibling(pr.mapper) + and not self.prop.parent.is_sibling(pr.mapper) + and not self.prop.mapper.is_sibling(pr.parent) + and ( + self.prop.key != pr.key + or not self.prop.parent.common_parent(pr.parent) + ) + ): + other_props.append((pr, fr_)) + + if other_props: + util.warn( + "relationship '%s' will copy column %s to column %s, " + "which conflicts with relationship(s): %s. " + "If this is not the intention, consider if these " + "relationships should be linked with " + "back_populates, or if viewonly=True should be " + "applied to one or more if they are read-only. " + "For the less common case that foreign key " + "constraints are partially overlapping, the " + "orm.foreign() " + "annotation can be used to isolate the columns that " + "should be written towards. To silence this " + "warning, add the parameter 'overlaps=\"%s\"' to the " + "'%s' relationship." + % ( + self.prop, + from_, + to_, + ", ".join( + sorted( + "'%s' (copies %s to %s)" % (pr, fr_, to_) + for (pr, fr_) in other_props + ) + ), + ",".join(sorted(pr.key for pr, fr in other_props)), + self.prop, + ), + code="qzyx", + ) + self._track_overlapping_sync_targets[to_][self.prop] = from_ + + @util.memoized_property + def remote_columns(self) -> Set[ColumnElement[Any]]: + return self._gather_join_annotations("remote") + + @util.memoized_property + def local_columns(self) -> Set[ColumnElement[Any]]: + return self._gather_join_annotations("local") + + @util.memoized_property + def foreign_key_columns(self) -> Set[ColumnElement[Any]]: + return self._gather_join_annotations("foreign") + + def _gather_join_annotations( + self, annotation: str + ) -> Set[ColumnElement[Any]]: + s = set( + self._gather_columns_with_annotation(self.primaryjoin, annotation) + ) + if self.secondaryjoin is not None: + s.update( + self._gather_columns_with_annotation( + self.secondaryjoin, annotation + ) + ) + return {x._deannotate() for x in s} + + def _gather_columns_with_annotation( + self, clause: ColumnElement[Any], *annotation: Iterable[str] + ) -> Set[ColumnElement[Any]]: + annotation_set = set(annotation) + return { + cast(ColumnElement[Any], col) + for col in visitors.iterate(clause, {}) + if annotation_set.issubset(col._annotations) + } + + @util.memoized_property + def _secondary_lineage_set(self) -> FrozenSet[ColumnElement[Any]]: + if self.secondary is not None: + return frozenset( + itertools.chain(*[c.proxy_set for c in self.secondary.c]) + ) + else: + return util.EMPTY_SET + + def join_targets( + self, + source_selectable: Optional[FromClause], + dest_selectable: FromClause, + aliased: bool, + single_crit: Optional[ColumnElement[bool]] = None, + extra_criteria: Tuple[ColumnElement[bool], ...] = (), + ) -> Tuple[ + ColumnElement[bool], + Optional[ColumnElement[bool]], + Optional[FromClause], + Optional[ClauseAdapter], + FromClause, + ]: + """Given a source and destination selectable, create a + join between them. + + This takes into account aliasing the join clause + to reference the appropriate corresponding columns + in the target objects, as well as the extra child + criterion, equivalent column sets, etc. + + """ + # place a barrier on the destination such that + # replacement traversals won't ever dig into it. + # its internal structure remains fixed + # regardless of context. + dest_selectable = _shallow_annotate( + dest_selectable, {"no_replacement_traverse": True} + ) + + primaryjoin, secondaryjoin, secondary = ( + self.primaryjoin, + self.secondaryjoin, + self.secondary, + ) + + # adjust the join condition for single table inheritance, + # in the case that the join is to a subclass + # this is analogous to the + # "_adjust_for_single_table_inheritance()" method in Query. + + if single_crit is not None: + if secondaryjoin is not None: + secondaryjoin = secondaryjoin & single_crit + else: + primaryjoin = primaryjoin & single_crit + + if extra_criteria: + + def mark_exclude_cols( + elem: SupportsAnnotations, annotations: _AnnotationDict + ) -> SupportsAnnotations: + """note unrelated columns in the "extra criteria" as either + should be adapted or not adapted, even though they are not + part of our "local" or "remote" side. + + see #9779 for this case, as well as #11010 for a follow up + + """ + + parentmapper_for_element = elem._annotations.get( + "parentmapper", None + ) + + if ( + parentmapper_for_element is not self.prop.parent + and parentmapper_for_element is not self.prop.mapper + and elem not in self._secondary_lineage_set + ): + return _safe_annotate(elem, annotations) + else: + return elem + + extra_criteria = tuple( + _deep_annotate( + elem, + {"should_not_adapt": True}, + annotate_callable=mark_exclude_cols, + ) + for elem in extra_criteria + ) + + if secondaryjoin is not None: + secondaryjoin = secondaryjoin & sql.and_(*extra_criteria) + else: + primaryjoin = primaryjoin & sql.and_(*extra_criteria) + + if aliased: + if secondary is not None: + secondary = secondary._anonymous_fromclause(flat=True) + primary_aliasizer = ClauseAdapter( + secondary, + exclude_fn=_local_col_exclude, + ) + secondary_aliasizer = ClauseAdapter( + dest_selectable, equivalents=self.child_equivalents + ).chain(primary_aliasizer) + if source_selectable is not None: + primary_aliasizer = ClauseAdapter( + secondary, + exclude_fn=_local_col_exclude, + ).chain( + ClauseAdapter( + source_selectable, + equivalents=self.parent_equivalents, + ) + ) + + secondaryjoin = secondary_aliasizer.traverse(secondaryjoin) + else: + primary_aliasizer = ClauseAdapter( + dest_selectable, + exclude_fn=_local_col_exclude, + equivalents=self.child_equivalents, + ) + if source_selectable is not None: + primary_aliasizer.chain( + ClauseAdapter( + source_selectable, + exclude_fn=_remote_col_exclude, + equivalents=self.parent_equivalents, + ) + ) + secondary_aliasizer = None + + primaryjoin = primary_aliasizer.traverse(primaryjoin) + target_adapter = secondary_aliasizer or primary_aliasizer + target_adapter.exclude_fn = None + else: + target_adapter = None + return ( + primaryjoin, + secondaryjoin, + secondary, + target_adapter, + dest_selectable, + ) + + def create_lazy_clause(self, reverse_direction: bool = False) -> Tuple[ + ColumnElement[bool], + Dict[str, ColumnElement[Any]], + Dict[ColumnElement[Any], ColumnElement[Any]], + ]: + binds: Dict[ColumnElement[Any], BindParameter[Any]] = {} + equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {} + + has_secondary = self.secondaryjoin is not None + + if has_secondary: + lookup = collections.defaultdict(list) + for l, r in self.local_remote_pairs: + lookup[l].append((l, r)) + equated_columns[r] = l + elif not reverse_direction: + for l, r in self.local_remote_pairs: + equated_columns[r] = l + else: + for l, r in self.local_remote_pairs: + equated_columns[l] = r + + def col_to_bind( + element: ColumnElement[Any], **kw: Any + ) -> Optional[BindParameter[Any]]: + if ( + (not reverse_direction and "local" in element._annotations) + or reverse_direction + and ( + (has_secondary and element in lookup) + or (not has_secondary and "remote" in element._annotations) + ) + ): + if element not in binds: + binds[element] = sql.bindparam( + None, None, type_=element.type, unique=True + ) + return binds[element] + return None + + lazywhere = self.primaryjoin + if self.secondaryjoin is None or not reverse_direction: + lazywhere = visitors.replacement_traverse( + lazywhere, {}, col_to_bind + ) + + if self.secondaryjoin is not None: + secondaryjoin = self.secondaryjoin + if reverse_direction: + secondaryjoin = visitors.replacement_traverse( + secondaryjoin, {}, col_to_bind + ) + lazywhere = sql.and_(lazywhere, secondaryjoin) + + bind_to_col = {binds[col].key: col for col in binds} + + return lazywhere, bind_to_col, equated_columns + + +class _ColInAnnotations: + """Serializable object that tests for names in c._annotations. + + TODO: does this need to be serializable anymore? can we find what the + use case was for that? + + """ + + __slots__ = ("names",) + + def __init__(self, *names: str): + self.names = frozenset(names) + + def __call__(self, c: ClauseElement) -> bool: + return bool(self.names.intersection(c._annotations)) + + +_local_col_exclude = _ColInAnnotations("local", "should_not_adapt") +_remote_col_exclude = _ColInAnnotations("remote", "should_not_adapt") + + +class Relationship( + RelationshipProperty[_T], + _DeclarativeMapped[_T], +): + """Describes an object property that holds a single item or list + of items that correspond to a related database table. + + Public constructor is the :func:`_orm.relationship` function. + + .. seealso:: + + :ref:`relationship_config_toplevel` + + .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative + compatible subclass for :class:`_orm.RelationshipProperty`. + + """ + + inherit_cache = True + """:meta private:""" + + +class _RelationshipDeclared( # type: ignore[misc] + Relationship[_T], + WriteOnlyMapped[_T], # not compatible with Mapped[_T] + DynamicMapped[_T], # not compatible with Mapped[_T] +): + """Relationship subclass used implicitly for declarative mapping.""" + + inherit_cache = True + """:meta private:""" + + @classmethod + def _mapper_property_name(cls) -> str: + return "Relationship" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/scoping.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/scoping.py new file mode 100644 index 0000000000000000000000000000000000000000..df5a6534dce87d9c78d283cdff56803cfc6ee86c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/scoping.py @@ -0,0 +1,2162 @@ +# orm/scoping.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .session import _S +from .session import Session +from .. import exc as sa_exc +from .. import util +from ..util import create_proxy_methods +from ..util import ScopedRegistry +from ..util import ThreadLocalRegistry +from ..util import warn +from ..util import warn_deprecated +from ..util.typing import Protocol + +if TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _IdentityKeyType + from ._typing import OrmExecuteOptionsParameter + from .identity import IdentityMap + from .interfaces import ORMOption + from .mapper import Mapper + from .query import Query + from .query import RowReturningQuery + from .session import _BindArguments + from .session import _EntityBindKey + from .session import _PKIdentityArgument + from .session import _SessionBind + from .session import sessionmaker + from .session import SessionTransaction + from ..engine import Connection + from ..engine import CursorResult + from ..engine import Engine + from ..engine import Result + from ..engine import Row + from ..engine import RowMapping + from ..engine.interfaces import _CoreAnyExecuteParams + from ..engine.interfaces import _CoreSingleExecuteParams + from ..engine.interfaces import CoreExecuteOptionsParameter + from ..engine.result import ScalarResult + from ..sql._typing import _ColumnsClauseArgument + from ..sql._typing import _T0 + from ..sql._typing import _T1 + from ..sql._typing import _T2 + from ..sql._typing import _T3 + from ..sql._typing import _T4 + from ..sql._typing import _T5 + from ..sql._typing import _T6 + from ..sql._typing import _T7 + from ..sql._typing import _TypedColumnClauseArgument as _TCCA + from ..sql.base import Executable + from ..sql.dml import UpdateBase + from ..sql.elements import ClauseElement + from ..sql.roles import TypedColumnsClauseRole + from ..sql.selectable import ForUpdateParameter + from ..sql.selectable import TypedReturnsRows + +_T = TypeVar("_T", bound=Any) + + +class QueryPropertyDescriptor(Protocol): + """Describes the type applied to a class-level + :meth:`_orm.scoped_session.query_property` attribute. + + .. versionadded:: 2.0.5 + + """ + + def __get__(self, instance: Any, owner: Type[_T]) -> Query[_T]: ... + + +_O = TypeVar("_O", bound=object) + +__all__ = ["scoped_session"] + + +@create_proxy_methods( + Session, + ":class:`_orm.Session`", + ":class:`_orm.scoping.scoped_session`", + classmethods=["close_all", "object_session", "identity_key"], + methods=[ + "__contains__", + "__iter__", + "add", + "add_all", + "begin", + "begin_nested", + "close", + "reset", + "commit", + "connection", + "delete", + "execute", + "expire", + "expire_all", + "expunge", + "expunge_all", + "flush", + "get", + "get_one", + "get_bind", + "is_modified", + "bulk_save_objects", + "bulk_insert_mappings", + "bulk_update_mappings", + "merge", + "query", + "refresh", + "rollback", + "scalar", + "scalars", + ], + attributes=[ + "bind", + "dirty", + "deleted", + "new", + "identity_map", + "is_active", + "autoflush", + "no_autoflush", + "info", + ], +) +class scoped_session(Generic[_S]): + """Provides scoped management of :class:`.Session` objects. + + See :ref:`unitofwork_contextual` for a tutorial. + + .. note:: + + When using :ref:`asyncio_toplevel`, the async-compatible + :class:`_asyncio.async_scoped_session` class should be + used in place of :class:`.scoped_session`. + + """ + + _support_async: bool = False + + session_factory: sessionmaker[_S] + """The `session_factory` provided to `__init__` is stored in this + attribute and may be accessed at a later time. This can be useful when + a new non-scoped :class:`.Session` is needed.""" + + registry: ScopedRegistry[_S] + + def __init__( + self, + session_factory: sessionmaker[_S], + scopefunc: Optional[Callable[[], Any]] = None, + ): + """Construct a new :class:`.scoped_session`. + + :param session_factory: a factory to create new :class:`.Session` + instances. This is usually, but not necessarily, an instance + of :class:`.sessionmaker`. + :param scopefunc: optional function which defines + the current scope. If not passed, the :class:`.scoped_session` + object assumes "thread-local" scope, and will use + a Python ``threading.local()`` in order to maintain the current + :class:`.Session`. If passed, the function should return + a hashable token; this token will be used as the key in a + dictionary in order to store and retrieve the current + :class:`.Session`. + + """ + self.session_factory = session_factory + + if scopefunc: + self.registry = ScopedRegistry(session_factory, scopefunc) + else: + self.registry = ThreadLocalRegistry(session_factory) + + @property + def _proxied(self) -> _S: + return self.registry() + + def __call__(self, **kw: Any) -> _S: + r"""Return the current :class:`.Session`, creating it + using the :attr:`.scoped_session.session_factory` if not present. + + :param \**kw: Keyword arguments will be passed to the + :attr:`.scoped_session.session_factory` callable, if an existing + :class:`.Session` is not present. If the :class:`.Session` is present + and keyword arguments have been passed, + :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. + + """ + if kw: + if self.registry.has(): + raise sa_exc.InvalidRequestError( + "Scoped session is already present; " + "no new arguments may be specified." + ) + else: + sess = self.session_factory(**kw) + self.registry.set(sess) + else: + sess = self.registry() + if not self._support_async and sess._is_asyncio: + warn_deprecated( + "Using `scoped_session` with asyncio is deprecated and " + "will raise an error in a future version. " + "Please use `async_scoped_session` instead.", + "1.4.23", + ) + return sess + + def configure(self, **kwargs: Any) -> None: + """reconfigure the :class:`.sessionmaker` used by this + :class:`.scoped_session`. + + See :meth:`.sessionmaker.configure`. + + """ + + if self.registry.has(): + warn( + "At least one scoped session is already present. " + " configure() can not affect sessions that have " + "already been created." + ) + + self.session_factory.configure(**kwargs) + + def remove(self) -> None: + """Dispose of the current :class:`.Session`, if present. + + This will first call :meth:`.Session.close` method + on the current :class:`.Session`, which releases any existing + transactional/connection resources still being held; transactions + specifically are rolled back. The :class:`.Session` is then + discarded. Upon next usage within the same scope, + the :class:`.scoped_session` will produce a new + :class:`.Session` object. + + """ + + if self.registry.has(): + self.registry().close() + self.registry.clear() + + def query_property( + self, query_cls: Optional[Type[Query[_T]]] = None + ) -> QueryPropertyDescriptor: + """return a class property which produces a legacy + :class:`_query.Query` object against the class and the current + :class:`.Session` when called. + + .. legacy:: The :meth:`_orm.scoped_session.query_property` accessor + is specific to the legacy :class:`.Query` object and is not + considered to be part of :term:`2.0-style` ORM use. + + e.g.:: + + from sqlalchemy.orm import QueryPropertyDescriptor + from sqlalchemy.orm import scoped_session + from sqlalchemy.orm import sessionmaker + + Session = scoped_session(sessionmaker()) + + + class MyClass: + query: QueryPropertyDescriptor = Session.query_property() + + + # after mappers are defined + result = MyClass.query.filter(MyClass.name == "foo").all() + + Produces instances of the session's configured query class by + default. To override and use a custom implementation, provide + a ``query_cls`` callable. The callable will be invoked with + the class's mapper as a positional argument and a session + keyword argument. + + There is no limit to the number of query properties placed on + a class. + + """ + + class query: + def __get__(s, instance: Any, owner: Type[_O]) -> Query[_O]: + if query_cls: + # custom query class + return query_cls(owner, session=self.registry()) # type: ignore # noqa: E501 + else: + # session's configured query class + return self.registry().query(owner) + + return query() + + # START PROXY METHODS scoped_session + + # code within this block is **programmatically, + # statically generated** by tools/generate_proxy_methods.py + + def __contains__(self, instance: object) -> bool: + r"""Return True if the instance is associated with this session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The instance may be pending or persistent within the Session for a + result of True. + + + """ # noqa: E501 + + return self._proxied.__contains__(instance) + + def __iter__(self) -> Iterator[object]: + r"""Iterate over all pending or persistent instances within this + Session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + + """ # noqa: E501 + + return self._proxied.__iter__() + + def add(self, instance: object, _warn: bool = True) -> None: + r"""Place an object into this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. + + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. + + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` + + + """ # noqa: E501 + + return self._proxied.add(instance, _warn=_warn) + + def add_all(self, instances: Iterable[object]) -> None: + r"""Add the given collection of instances to this :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + + """ # noqa: E501 + + return self._proxied.add_all(instances) + + def begin(self, nested: bool = False) -> SessionTransaction: + r"""Begin a transaction, or nested transaction, + on this :class:`.Session`, if one is not already begun. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The :class:`_orm.Session` object features **autobegin** behavior, + so that normally it is not necessary to call the + :meth:`_orm.Session.begin` + method explicitly. However, it may be used in order to control + the scope of when the transactional state is begun. + + When used to begin the outermost transaction, an error is raised + if this :class:`.Session` is already inside of a transaction. + + :param nested: if True, begins a SAVEPOINT transaction and is + equivalent to calling :meth:`~.Session.begin_nested`. For + documentation on SAVEPOINT transactions, please see + :ref:`session_begin_nested`. + + :return: the :class:`.SessionTransaction` object. Note that + :class:`.SessionTransaction` + acts as a Python context manager, allowing :meth:`.Session.begin` + to be used in a "with" block. See :ref:`session_explicit_begin` for + an example. + + .. seealso:: + + :ref:`session_autobegin` + + :ref:`unitofwork_transaction` + + :meth:`.Session.begin_nested` + + + + """ # noqa: E501 + + return self._proxied.begin(nested=nested) + + def begin_nested(self) -> SessionTransaction: + r"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The target database(s) and associated drivers must support SQL + SAVEPOINT for this method to function correctly. + + For documentation on SAVEPOINT + transactions, please see :ref:`session_begin_nested`. + + :return: the :class:`.SessionTransaction` object. Note that + :class:`.SessionTransaction` acts as a context manager, allowing + :meth:`.Session.begin_nested` to be used in a "with" block. + See :ref:`session_begin_nested` for a usage example. + + .. seealso:: + + :ref:`session_begin_nested` + + :ref:`pysqlite_serializable` - special workarounds required + with the SQLite driver in order for SAVEPOINT to work + correctly. For asyncio use cases, see the section + :ref:`aiosqlite_serializable`. + + + """ # noqa: E501 + + return self._proxied.begin_nested() + + def close(self) -> None: + r"""Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This expunges all ORM objects associated with this + :class:`_orm.Session`, ends any transaction in progress and + :term:`releases` any :class:`_engine.Connection` objects which this + :class:`_orm.Session` itself has checked out from associated + :class:`_engine.Engine` objects. The operation then leaves the + :class:`_orm.Session` in a state which it may be used again. + + .. tip:: + + In the default running mode the :meth:`_orm.Session.close` + method **does not prevent the Session from being used again**. + The :class:`_orm.Session` itself does not actually have a + distinct "closed" state; it merely means + the :class:`_orm.Session` will release all database connections + and ORM objects. + + Setting the parameter :paramref:`_orm.Session.close_resets_only` + to ``False`` will instead make the ``close`` final, meaning that + any further action on the session will be forbidden. + + .. versionchanged:: 1.4 The :meth:`.Session.close` method does not + immediately create a new :class:`.SessionTransaction` object; + instead, the new :class:`.SessionTransaction` is created only if + the :class:`.Session` is used again for a database operation. + + .. seealso:: + + :ref:`session_closing` - detail on the semantics of + :meth:`_orm.Session.close` and :meth:`_orm.Session.reset`. + + :meth:`_orm.Session.reset` - a similar method that behaves like + ``close()`` with the parameter + :paramref:`_orm.Session.close_resets_only` set to ``True``. + + + """ # noqa: E501 + + return self._proxied.close() + + def reset(self) -> None: + r"""Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`, resetting the session to its initial state. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This method provides for same "reset-only" behavior that the + :meth:`_orm.Session.close` method has provided historically, where the + state of the :class:`_orm.Session` is reset as though the object were + brand new, and ready to be used again. + This method may then be useful for :class:`_orm.Session` objects + which set :paramref:`_orm.Session.close_resets_only` to ``False``, + so that "reset only" behavior is still available. + + .. versionadded:: 2.0.22 + + .. seealso:: + + :ref:`session_closing` - detail on the semantics of + :meth:`_orm.Session.close` and :meth:`_orm.Session.reset`. + + :meth:`_orm.Session.close` - a similar method will additionally + prevent re-use of the Session when the parameter + :paramref:`_orm.Session.close_resets_only` is set to ``False``. + + """ # noqa: E501 + + return self._proxied.reset() + + def commit(self) -> None: + r"""Flush pending changes and commit the current transaction. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + When the COMMIT operation is complete, all objects are fully + :term:`expired`, erasing their internal contents, which will be + automatically re-loaded when the objects are next accessed. In the + interim, these objects are in an expired state and will not function if + they are :term:`detached` from the :class:`.Session`. Additionally, + this re-load operation is not supported when using asyncio-oriented + APIs. The :paramref:`.Session.expire_on_commit` parameter may be used + to disable this behavior. + + When there is no transaction in place for the :class:`.Session`, + indicating that no operations were invoked on this :class:`.Session` + since the previous call to :meth:`.Session.commit`, the method will + begin and commit an internal-only "logical" transaction, that does not + normally affect the database unless pending flush changes were + detected, but will still invoke event handlers and object expiration + rules. + + The outermost database transaction is committed unconditionally, + automatically releasing any SAVEPOINTs in effect. + + .. seealso:: + + :ref:`session_committing` + + :ref:`unitofwork_transaction` + + :ref:`asyncio_orm_avoid_lazyloads` + + + """ # noqa: E501 + + return self._proxied.commit() + + def connection( + self, + bind_arguments: Optional[_BindArguments] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Connection: + r"""Return a :class:`_engine.Connection` object corresponding to this + :class:`.Session` object's transactional state. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Either the :class:`_engine.Connection` corresponding to the current + transaction is returned, or if no transaction is in progress, a new + one is begun and the :class:`_engine.Connection` + returned (note that no + transactional state is established with the DBAPI until the first + SQL statement is emitted). + + Ambiguity in multi-bind or unbound :class:`.Session` objects can be + resolved through any of the optional keyword arguments. This + ultimately makes usage of the :meth:`.get_bind` method for resolution. + + :param bind_arguments: dictionary of bind arguments. May include + "mapper", "bind", "clause", other custom arguments that are passed + to :meth:`.Session.get_bind`. + + :param execution_options: a dictionary of execution options that will + be passed to :meth:`_engine.Connection.execution_options`, **when the + connection is first procured only**. If the connection is already + present within the :class:`.Session`, a warning is emitted and + the arguments are ignored. + + .. seealso:: + + :ref:`session_transaction_isolation` + + + """ # noqa: E501 + + return self._proxied.connection( + bind_arguments=bind_arguments, execution_options=execution_options + ) + + def delete(self, instance: object) -> None: + r"""Mark an instance as deleted. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The object is assumed to be either :term:`persistent` or + :term:`detached` when passed; after the method is called, the + object will remain in the :term:`persistent` state until the next + flush proceeds. During this time, the object will also be a member + of the :attr:`_orm.Session.deleted` collection. + + When the next flush proceeds, the object will move to the + :term:`deleted` state, indicating a ``DELETE`` statement was emitted + for its row within the current transaction. When the transaction + is successfully committed, + the deleted object is moved to the :term:`detached` state and is + no longer present within this :class:`_orm.Session`. + + .. seealso:: + + :ref:`session_deleting` - at :ref:`session_basics` + + + """ # noqa: E501 + + return self._proxied.delete(instance) + + @overload + def execute( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[_T]: ... + + @overload + def execute( + self, + statement: UpdateBase, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> CursorResult[Any]: ... + + @overload + def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: ... + + def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: + r"""Execute a SQL expression construct. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Returns a :class:`_engine.Result` object representing + results of the statement execution. + + E.g.:: + + from sqlalchemy import select + + result = session.execute(select(User).where(User.id == 5)) + + The API contract of :meth:`_orm.Session.execute` is similar to that + of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version + of :class:`_engine.Connection`. + + .. versionchanged:: 1.4 the :meth:`_orm.Session.execute` method is + now the primary point of ORM statement execution when using + :term:`2.0 style` ORM usage. + + :param statement: + An executable statement (i.e. an :class:`.Executable` expression + such as :func:`_expression.select`). + + :param params: + Optional dictionary, or list of dictionaries, containing + bound parameter values. If a single dictionary, single-row + execution occurs; if a list of dictionaries, an + "executemany" will be invoked. The keys in each dictionary + must correspond to parameter names present in the statement. + + :param execution_options: optional dictionary of execution options, + which will be associated with the statement execution. This + dictionary can provide a subset of the options that are accepted + by :meth:`_engine.Connection.execution_options`, and may also + provide additional options understood only in an ORM context. + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + + :param bind_arguments: dictionary of additional arguments to determine + the bind. May include "mapper", "bind", or other custom arguments. + Contents of this dictionary are passed to the + :meth:`.Session.get_bind` method. + + :return: a :class:`_engine.Result` object. + + + + """ # noqa: E501 + + return self._proxied.execute( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + _parent_execute_state=_parent_execute_state, + _add_event=_add_event, + ) + + def expire( + self, instance: object, attribute_names: Optional[Iterable[str]] = None + ) -> None: + r"""Expire the attributes on an instance. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Marks the attributes of an instance as out of date. When an expired + attribute is next accessed, a query will be issued to the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire all objects in the :class:`.Session` simultaneously, + use :meth:`Session.expire_all`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire` only makes sense for the specific + case that a non-ORM SQL statement was emitted in the current + transaction. + + :param instance: The instance to be refreshed. + :param attribute_names: optional list of string attribute names + indicating a subset of attributes to be expired. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + """ # noqa: E501 + + return self._proxied.expire(instance, attribute_names=attribute_names) + + def expire_all(self) -> None: + r"""Expires all persistent instances within this Session. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + When any attributes on a persistent instance is next accessed, + a query will be issued using the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire individual objects and individual attributes + on those objects, use :meth:`Session.expire`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire_all` is not usually needed, + assuming the transaction is isolated. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + + """ # noqa: E501 + + return self._proxied.expire_all() + + def expunge(self, instance: object) -> None: + r"""Remove the `instance` from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This will free all internal references to the instance. Cascading + will be applied according to the *expunge* cascade rule. + + + """ # noqa: E501 + + return self._proxied.expunge(instance) + + def expunge_all(self) -> None: + r"""Remove all object instances from this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This is equivalent to calling ``expunge(obj)`` on all objects in this + ``Session``. + + + """ # noqa: E501 + + return self._proxied.expunge_all() + + def flush(self, objects: Optional[Sequence[Any]] = None) -> None: + r"""Flush all the object changes to the database. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Writes out all pending object creations, deletions and modifications + to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are + automatically ordered by the Session's unit of work dependency + solver. + + Database operations will be issued in the current transactional + context and do not affect the state of the transaction, unless an + error occurs, in which case the entire transaction is rolled back. + You may flush() as often as you like within a transaction to move + changes from Python to the database's transaction buffer. + + :param objects: Optional; restricts the flush operation to operate + only on elements that are in the given collection. + + This feature is for an extremely narrow set of use cases where + particular objects may need to be operated upon before the + full flush() occurs. It is not intended for general use. + + + """ # noqa: E501 + + return self._proxied.flush(objects=objects) + + def get( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> Optional[_O]: + r"""Return an instance based on the given primary key identifier, + or ``None`` if not found. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + E.g.:: + + my_user = session.get(User, 5) + + some_object = session.get(VersionedFoo, (5, 10)) + + some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10}) + + .. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved + from the now legacy :meth:`_orm.Query.get` method. + + :meth:`_orm.Session.get` is special in that it provides direct + access to the identity map of the :class:`.Session`. + If the given primary key identifier is present + in the local identity map, the object is returned + directly from this collection and no SQL is emitted, + unless the object has been marked fully expired. + If not present, + a SELECT is performed in order to locate the object. + + :meth:`_orm.Session.get` also will perform a check if + the object is present in the identity map and + marked as expired - a SELECT + is emitted to refresh the object as well as to + ensure that the row is still present. + If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + :param entity: a mapped class or :class:`.Mapper` indicating the + type of entity to be loaded. + + :param ident: A scalar, tuple, or dictionary representing the + primary key. For a composite (e.g. multiple column) primary key, + a tuple or dictionary should be passed. + + For a single-column primary key, the scalar calling form is typically + the most expedient. If the primary key of a row is the value "5", + the call looks like:: + + my_object = session.get(SomeClass, 5) + + The tuple form contains primary key values typically in + the order in which they correspond to the mapped + :class:`_schema.Table` + object's primary key columns, or if the + :paramref:`_orm.Mapper.primary_key` configuration parameter were + used, in + the order used for that parameter. For example, if the primary key + of a row is represented by the integer + digits "5, 10" the call would look like:: + + my_object = session.get(SomeClass, (5, 10)) + + The dictionary form should include as keys the mapped attribute names + corresponding to each element of the primary key. If the mapped class + has the attributes ``id``, ``version_id`` as the attributes which + store the object's primary key value, the call would look like:: + + my_object = session.get(SomeClass, {"id": 5, "version_id": 10}) + + :param options: optional sequence of loader options which will be + applied to the query, if one is emitted. + + :param populate_existing: causes the method to unconditionally emit + a SQL query and refresh the object with the newly loaded data, + regardless of whether or not the object is already present. + + :param with_for_update: optional boolean ``True`` indicating FOR UPDATE + should be used, or may be a dictionary containing flags to + indicate a more specific set of FOR UPDATE flags for the SELECT; + flags should match the parameters of + :meth:`_query.Query.with_for_update`. + Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + + :param execution_options: optional dictionary of execution options, + which will be associated with the query execution if one is emitted. + This dictionary can provide a subset of the options that are + accepted by :meth:`_engine.Connection.execution_options`, and may + also provide additional options understood only in an ORM context. + + .. versionadded:: 1.4.29 + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + + :param bind_arguments: dictionary of additional arguments to determine + the bind. May include "mapper", "bind", or other custom arguments. + Contents of this dictionary are passed to the + :meth:`.Session.get_bind` method. + + .. versionadded: 2.0.0rc1 + + :return: The object instance, or ``None``. + + + """ # noqa: E501 + + return self._proxied.get( + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + def get_one( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> _O: + r"""Return exactly one instance based on the given primary key + identifier, or raise an exception if not found. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Raises :class:`_exc.NoResultFound` if the query selects no rows. + + For a detailed documentation of the arguments see the + method :meth:`.Session.get`. + + .. versionadded:: 2.0.22 + + :return: The object instance. + + .. seealso:: + + :meth:`.Session.get` - equivalent method that instead + returns ``None`` if no row was found with the provided primary + key + + + """ # noqa: E501 + + return self._proxied.get_one( + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + def get_bind( + self, + mapper: Optional[_EntityBindKey[_O]] = None, + *, + clause: Optional[ClauseElement] = None, + bind: Optional[_SessionBind] = None, + _sa_skip_events: Optional[bool] = None, + _sa_skip_for_implicit_returning: bool = False, + **kw: Any, + ) -> Union[Engine, Connection]: + r"""Return a "bind" to which this :class:`.Session` is bound. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The "bind" is usually an instance of :class:`_engine.Engine`, + except in the case where the :class:`.Session` has been + explicitly bound directly to a :class:`_engine.Connection`. + + For a multiply-bound or unbound :class:`.Session`, the + ``mapper`` or ``clause`` arguments are used to determine the + appropriate bind to return. + + Note that the "mapper" argument is usually present + when :meth:`.Session.get_bind` is called via an ORM + operation such as a :meth:`.Session.query`, each + individual INSERT/UPDATE/DELETE operation within a + :meth:`.Session.flush`, call, etc. + + The order of resolution is: + + 1. if mapper given and :paramref:`.Session.binds` is present, + locate a bind based first on the mapper in use, then + on the mapped class in use, then on any base classes that are + present in the ``__mro__`` of the mapped class, from more specific + superclasses to more general. + 2. if clause given and ``Session.binds`` is present, + locate a bind based on :class:`_schema.Table` objects + found in the given clause present in ``Session.binds``. + 3. if ``Session.binds`` is present, return that. + 4. if clause given, attempt to return a bind + linked to the :class:`_schema.MetaData` ultimately + associated with the clause. + 5. if mapper given, attempt to return a bind + linked to the :class:`_schema.MetaData` ultimately + associated with the :class:`_schema.Table` or other + selectable to which the mapper is mapped. + 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` + is raised. + + Note that the :meth:`.Session.get_bind` method can be overridden on + a user-defined subclass of :class:`.Session` to provide any kind + of bind resolution scheme. See the example at + :ref:`session_custom_partitioning`. + + :param mapper: + Optional mapped class or corresponding :class:`_orm.Mapper` instance. + The bind can be derived from a :class:`_orm.Mapper` first by + consulting the "binds" map associated with this :class:`.Session`, + and secondly by consulting the :class:`_schema.MetaData` associated + with the :class:`_schema.Table` to which the :class:`_orm.Mapper` is + mapped for a bind. + + :param clause: + A :class:`_expression.ClauseElement` (i.e. + :func:`_expression.select`, + :func:`_expression.text`, + etc.). If the ``mapper`` argument is not present or could not + produce a bind, the given expression construct will be searched + for a bound element, typically a :class:`_schema.Table` + associated with + bound :class:`_schema.MetaData`. + + .. seealso:: + + :ref:`session_partitioning` + + :paramref:`.Session.binds` + + :meth:`.Session.bind_mapper` + + :meth:`.Session.bind_table` + + + """ # noqa: E501 + + return self._proxied.get_bind( + mapper=mapper, + clause=clause, + bind=bind, + _sa_skip_events=_sa_skip_events, + _sa_skip_for_implicit_returning=_sa_skip_for_implicit_returning, + **kw, + ) + + def is_modified( + self, instance: object, include_collections: bool = True + ) -> bool: + r"""Return ``True`` if the given instance has locally + modified attributes. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This method retrieves the history for each instrumented + attribute on the instance and performs a comparison of the current + value to its previously flushed or committed value, if any. + + It is in effect a more expensive and accurate + version of checking for the given instance in the + :attr:`.Session.dirty` collection; a full test for + each attribute's net "dirty" status is performed. + + E.g.:: + + return session.is_modified(someobject) + + A few caveats to this method apply: + + * Instances present in the :attr:`.Session.dirty` collection may + report ``False`` when tested with this method. This is because + the object may have received change events via attribute mutation, + thus placing it in :attr:`.Session.dirty`, but ultimately the state + is the same as that loaded from the database, resulting in no net + change here. + * Scalar attributes may not have recorded the previously set + value when a new value was applied, if the attribute was not loaded, + or was expired, at the time the new value was received - in these + cases, the attribute is assumed to have a change, even if there is + ultimately no net change against its database value. SQLAlchemy in + most cases does not need the "old" value when a set event occurs, so + it skips the expense of a SQL call if the old value isn't present, + based on the assumption that an UPDATE of the scalar value is + usually needed, and in those few cases where it isn't, is less + expensive on average than issuing a defensive SELECT. + + The "old" value is fetched unconditionally upon set only if the + attribute container has the ``active_history`` flag set to ``True``. + This flag is set typically for primary key attributes and scalar + object references that are not a simple many-to-one. To set this + flag for any arbitrary mapped column, use the ``active_history`` + argument with :func:`.column_property`. + + :param instance: mapped instance to be tested for pending changes. + :param include_collections: Indicates if multivalued collections + should be included in the operation. Setting this to ``False`` is a + way to detect only local-column based properties (i.e. scalar columns + or many-to-one foreign keys) that would result in an UPDATE for this + instance upon flush. + + + """ # noqa: E501 + + return self._proxied.is_modified( + instance, include_collections=include_collections + ) + + def bulk_save_objects( + self, + objects: Iterable[object], + return_defaults: bool = False, + update_changed_only: bool = True, + preserve_order: bool = True, + ) -> None: + r"""Perform a bulk save of the given list of objects. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. + + For general INSERT and UPDATE of existing ORM mapped objects, + prefer standard :term:`unit of work` data management patterns, + introduced in the :ref:`unified_tutorial` at + :ref:`tutorial_orm_data_manipulation`. SQLAlchemy 2.0 + now uses :ref:`engine_insertmanyvalues` with modern dialects + which solves previous issues of bulk INSERT slowness. + + :param objects: a sequence of mapped object instances. The mapped + objects are persisted as is, and are **not** associated with the + :class:`.Session` afterwards. + + For each object, whether the object is sent as an INSERT or an + UPDATE is dependent on the same rules used by the :class:`.Session` + in traditional operation; if the object has the + :attr:`.InstanceState.key` + attribute set, then the object is assumed to be "detached" and + will result in an UPDATE. Otherwise, an INSERT is used. + + In the case of an UPDATE, statements are grouped based on which + attributes have changed, and are thus to be the subject of each + SET clause. If ``update_changed_only`` is False, then all + attributes present within each object are applied to the UPDATE + statement, which may help in allowing the statements to be grouped + together into a larger executemany(), and will also reduce the + overhead of checking history on attributes. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary key values ahead of time; however, + :paramref:`.Session.bulk_save_objects.return_defaults` **greatly + reduces the performance gains** of the method overall. It is strongly + advised to please use the standard :meth:`_orm.Session.add_all` + approach. + + :param update_changed_only: when True, UPDATE statements are rendered + based on those attributes in each state that have logged changes. + When False, all attributes present are rendered into the SET clause + with the exception of primary key attributes. + + :param preserve_order: when True, the order of inserts and updates + matches exactly the order in which the objects are given. When + False, common types of objects are grouped into inserts + and updates, to allow for more batching opportunities. + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + + """ # noqa: E501 + + return self._proxied.bulk_save_objects( + objects, + return_defaults=return_defaults, + update_changed_only=update_changed_only, + preserve_order=preserve_order, + ) + + def bulk_insert_mappings( + self, + mapper: Mapper[Any], + mappings: Iterable[Dict[str, Any]], + return_defaults: bool = False, + render_nulls: bool = False, + ) -> None: + r"""Perform a bulk insert of the given list of mapping dictionaries. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. The 2.0 API shares + implementation details with this method and adds new features + as well. + + :param mapper: a mapped class, or the actual :class:`_orm.Mapper` + object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a sequence of dictionaries, each one containing the + state of the mapped row to be inserted, in terms of the attribute + names on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary must contain all + keys to be populated into all tables. + + :param return_defaults: when True, the INSERT process will be altered + to ensure that newly generated primary key values will be fetched. + The rationale for this parameter is typically to enable + :ref:`Joined Table Inheritance ` mappings to + be bulk inserted. + + .. note:: for backends that don't support RETURNING, the + :paramref:`_orm.Session.bulk_insert_mappings.return_defaults` + parameter can significantly decrease performance as INSERT + statements can no longer be batched. See + :ref:`engine_insertmanyvalues` + for background on which backends are affected. + + :param render_nulls: When True, a value of ``None`` will result + in a NULL value being included in the INSERT statement, rather + than the column being omitted from the INSERT. This allows all + the rows being INSERTed to have the identical set of columns which + allows the full set of rows to be batched to the DBAPI. Normally, + each column-set that contains a different combination of NULL values + than the previous row must omit a different series of columns from + the rendered INSERT statement, which means it must be emitted as a + separate statement. By passing this flag, the full set of rows + are guaranteed to be batchable into one batch; the cost however is + that server-side defaults which are invoked by an omitted column will + be skipped, so care must be taken to ensure that these are not + necessary. + + .. warning:: + + When this flag is set, **server side default SQL values will + not be invoked** for those columns that are inserted as NULL; + the NULL value will be sent explicitly. Care must be taken + to ensure that no server-side default functions need to be + invoked for the operation as a whole. + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_update_mappings` + + + """ # noqa: E501 + + return self._proxied.bulk_insert_mappings( + mapper, + mappings, + return_defaults=return_defaults, + render_nulls=render_nulls, + ) + + def bulk_update_mappings( + self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]] + ) -> None: + r"""Perform a bulk update of the given list of mapping dictionaries. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. The 2.0 API shares + implementation details with this method and adds new features + as well. + + :param mapper: a mapped class, or the actual :class:`_orm.Mapper` + object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a sequence of dictionaries, each one containing the + state of the mapped row to be updated, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, such + as a joined-inheritance mapping, each dictionary may contain keys + corresponding to all tables. All those keys which are present and + are not part of the primary key are applied to the SET clause of the + UPDATE statement; the primary key values, which are required, are + applied to the WHERE clause. + + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_save_objects` + + + """ # noqa: E501 + + return self._proxied.bulk_update_mappings(mapper, mappings) + + def merge( + self, + instance: _O, + *, + load: bool = True, + options: Optional[Sequence[ORMOption]] = None, + ) -> _O: + r"""Copy the state of a given instance into a corresponding instance + within this :class:`.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + :meth:`.Session.merge` examines the primary key attributes of the + source instance, and attempts to reconcile it with an instance of the + same primary key in the session. If not found locally, it attempts + to load the object from the database based on primary key, and if + none can be located, creates a new instance. The state of each + attribute on the source instance is then copied to the target + instance. The resulting target instance is then returned by the + method; the original source instance is left unmodified, and + un-associated with the :class:`.Session` if not already. + + This operation cascades to associated instances if the association is + mapped with ``cascade="merge"``. + + See :ref:`unitofwork_merging` for a detailed discussion of merging. + + :param instance: Instance to be merged. + :param load: Boolean, when False, :meth:`.merge` switches into + a "high performance" mode which causes it to forego emitting history + events as well as all database access. This flag is used for + cases such as transferring graphs of objects into a :class:`.Session` + from a second level cache, or to transfer just-loaded objects + into the :class:`.Session` owned by a worker thread or process + without re-querying the database. + + The ``load=False`` use case adds the caveat that the given + object has to be in a "clean" state, that is, has no pending changes + to be flushed - even if the incoming object is detached from any + :class:`.Session`. This is so that when + the merge operation populates local attributes and + cascades to related objects and + collections, the values can be "stamped" onto the + target object as is, without generating any history or attribute + events, and without the need to reconcile the incoming data with + any existing related objects or collections that might not + be loaded. The resulting objects from ``load=False`` are always + produced as "clean", so it is only appropriate that the given objects + should be "clean" as well, else this suggests a mis-use of the + method. + :param options: optional sequence of loader options which will be + applied to the :meth:`_orm.Session.get` method when the merge + operation loads the existing version of the object from the database. + + .. versionadded:: 1.4.24 + + + .. seealso:: + + :func:`.make_transient_to_detached` - provides for an alternative + means of "merging" a single object into the :class:`.Session` + + + """ # noqa: E501 + + return self._proxied.merge(instance, load=load, options=options) + + @overload + def query(self, _entity: _EntityType[_O]) -> Query[_O]: ... + + @overload + def query( + self, _colexpr: TypedColumnsClauseRole[_T] + ) -> RowReturningQuery[Tuple[_T]]: ... + + # START OVERLOADED FUNCTIONS self.query RowReturningQuery 2-8 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def query( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] + ) -> RowReturningQuery[Tuple[_T0, _T1]]: ... + + @overload + def query( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7]]: ... + + # END OVERLOADED FUNCTIONS self.query + + @overload + def query( + self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any + ) -> Query[Any]: ... + + def query( + self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any + ) -> Query[Any]: + r"""Return a new :class:`_query.Query` object corresponding to this + :class:`_orm.Session`. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Note that the :class:`_query.Query` object is legacy as of + SQLAlchemy 2.0; the :func:`_sql.select` construct is now used + to construct ORM queries. + + .. seealso:: + + :ref:`unified_tutorial` + + :ref:`queryguide_toplevel` + + :ref:`query_api_toplevel` - legacy API doc + + + """ # noqa: E501 + + return self._proxied.query(*entities, **kwargs) + + def refresh( + self, + instance: object, + attribute_names: Optional[Iterable[str]] = None, + with_for_update: ForUpdateParameter = None, + ) -> None: + r"""Expire and refresh attributes on the given instance. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + The selected attributes will first be expired as they would when using + :meth:`_orm.Session.expire`; then a SELECT statement will be issued to + the database to refresh column-oriented attributes with the current + value available in the current transaction. + + :func:`_orm.relationship` oriented attributes will also be immediately + loaded if they were already eagerly loaded on the object, using the + same eager loading strategy that they were loaded with originally. + + .. versionadded:: 1.4 - the :meth:`_orm.Session.refresh` method + can also refresh eagerly loaded attributes. + + :func:`_orm.relationship` oriented attributes that would normally + load using the ``select`` (or "lazy") loader strategy will also + load **if they are named explicitly in the attribute_names + collection**, emitting a SELECT statement for the attribute using the + ``immediate`` loader strategy. If lazy-loaded relationships are not + named in :paramref:`_orm.Session.refresh.attribute_names`, then + they remain as "lazy loaded" attributes and are not implicitly + refreshed. + + .. versionchanged:: 2.0.4 The :meth:`_orm.Session.refresh` method + will now refresh lazy-loaded :func:`_orm.relationship` oriented + attributes for those which are named explicitly in the + :paramref:`_orm.Session.refresh.attribute_names` collection. + + .. tip:: + + While the :meth:`_orm.Session.refresh` method is capable of + refreshing both column and relationship oriented attributes, its + primary focus is on refreshing of local column-oriented attributes + on a single instance. For more open ended "refresh" functionality, + including the ability to refresh the attributes on many objects at + once while having explicit control over relationship loader + strategies, use the + :ref:`populate existing ` feature + instead. + + Note that a highly isolated transaction will return the same values as + were previously read in that same transaction, regardless of changes + in database state outside of that transaction. Refreshing + attributes usually only makes sense at the start of a transaction + where database rows have not yet been accessed. + + :param attribute_names: optional. An iterable collection of + string attribute names indicating a subset of attributes to + be refreshed. + + :param with_for_update: optional boolean ``True`` indicating FOR UPDATE + should be used, or may be a dictionary containing flags to + indicate a more specific set of FOR UPDATE flags for the SELECT; + flags should match the parameters of + :meth:`_query.Query.with_for_update`. + Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.expire_all` + + :ref:`orm_queryguide_populate_existing` - allows any ORM query + to refresh objects as they would be loaded normally. + + + """ # noqa: E501 + + return self._proxied.refresh( + instance, + attribute_names=attribute_names, + with_for_update=with_for_update, + ) + + def rollback(self) -> None: + r"""Rollback the current transaction in progress. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + If no transaction is in progress, this method is a pass-through. + + The method always rolls back + the topmost database transaction, discarding any nested + transactions that may be in progress. + + .. seealso:: + + :ref:`session_rollback` + + :ref:`unitofwork_transaction` + + + """ # noqa: E501 + + return self._proxied.rollback() + + @overload + def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Optional[_T]: ... + + @overload + def scalar( + self, + statement: Executable, + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: ... + + def scalar( + self, + statement: Executable, + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: + r"""Execute a statement and return a scalar result. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Usage and parameters are the same as that of + :meth:`_orm.Session.execute`; the return result is a scalar Python + value. + + + """ # noqa: E501 + + return self._proxied.scalar( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @overload + def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[_T]: ... + + @overload + def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: ... + + def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: + r"""Execute a statement and return the results as scalars. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + Usage and parameters are the same as that of + :meth:`_orm.Session.execute`; the return result is a + :class:`_result.ScalarResult` filtering object which + will return single elements rather than :class:`_row.Row` objects. + + :return: a :class:`_result.ScalarResult` object + + .. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars` + + .. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars` + + .. seealso:: + + :ref:`orm_queryguide_select_orm_entities` - contrasts the behavior + of :meth:`_orm.Session.execute` to :meth:`_orm.Session.scalars` + + + """ # noqa: E501 + + return self._proxied.scalars( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + **kw, + ) + + @property + def bind(self) -> Optional[Union[Engine, Connection]]: + r"""Proxy for the :attr:`_orm.Session.bind` attribute + on behalf of the :class:`_orm.scoping.scoped_session` class. + + """ # noqa: E501 + + return self._proxied.bind + + @bind.setter + def bind(self, attr: Optional[Union[Engine, Connection]]) -> None: + self._proxied.bind = attr + + @property + def dirty(self) -> Any: + r"""The set of all persistent instances considered dirty. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + E.g.:: + + some_mapped_object in session.dirty + + Instances are considered dirty when they were modified but not + deleted. + + Note that this 'dirty' calculation is 'optimistic'; most + attribute-setting or collection modification operations will + mark an instance as 'dirty' and place it in this set, even if + there is no net change to the attribute's value. At flush + time, the value of each attribute is compared to its + previously saved value, and if there's no net change, no SQL + operation will occur (this is a more expensive operation so + it's only done at flush time). + + To check if an instance has actionable net changes to its + attributes, use the :meth:`.Session.is_modified` method. + + + """ # noqa: E501 + + return self._proxied.dirty + + @property + def deleted(self) -> Any: + r"""The set of all instances marked as 'deleted' within this ``Session`` + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + """ # noqa: E501 + + return self._proxied.deleted + + @property + def new(self) -> Any: + r"""The set of all instances marked as 'new' within this ``Session``. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + """ # noqa: E501 + + return self._proxied.new + + @property + def identity_map(self) -> IdentityMap: + r"""Proxy for the :attr:`_orm.Session.identity_map` attribute + on behalf of the :class:`_orm.scoping.scoped_session` class. + + """ # noqa: E501 + + return self._proxied.identity_map + + @identity_map.setter + def identity_map(self, attr: IdentityMap) -> None: + self._proxied.identity_map = attr + + @property + def is_active(self) -> Any: + r"""True if this :class:`.Session` not in "partial rollback" state. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + .. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins + a new transaction immediately, so this attribute will be False + when the :class:`_orm.Session` is first instantiated. + + "partial rollback" state typically indicates that the flush process + of the :class:`_orm.Session` has failed, and that the + :meth:`_orm.Session.rollback` method must be emitted in order to + fully roll back the transaction. + + If this :class:`_orm.Session` is not in a transaction at all, the + :class:`_orm.Session` will autobegin when it is first used, so in this + case :attr:`_orm.Session.is_active` will return True. + + Otherwise, if this :class:`_orm.Session` is within a transaction, + and that transaction has not been rolled back internally, the + :attr:`_orm.Session.is_active` will also return True. + + .. seealso:: + + :ref:`faq_session_rollback` + + :meth:`_orm.Session.in_transaction` + + + """ # noqa: E501 + + return self._proxied.is_active + + @property + def autoflush(self) -> bool: + r"""Proxy for the :attr:`_orm.Session.autoflush` attribute + on behalf of the :class:`_orm.scoping.scoped_session` class. + + """ # noqa: E501 + + return self._proxied.autoflush + + @autoflush.setter + def autoflush(self, attr: bool) -> None: + self._proxied.autoflush = attr + + @property + def no_autoflush(self) -> Any: + r"""Return a context manager that disables autoflush. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + e.g.:: + + with session.no_autoflush: + + some_object = SomeClass() + session.add(some_object) + # won't autoflush + some_object.related_thing = session.query(SomeRelated).first() + + Operations that proceed within the ``with:`` block + will not be subject to flushes occurring upon query + access. This is useful when initializing a series + of objects which involve existing database queries, + where the uncompleted object should not yet be flushed. + + + """ # noqa: E501 + + return self._proxied.no_autoflush + + @property + def info(self) -> Any: + r"""A user-modifiable dictionary. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class + on behalf of the :class:`_orm.scoping.scoped_session` class. + + The initial value of this dictionary can be populated using the + ``info`` argument to the :class:`.Session` constructor or + :class:`.sessionmaker` constructor or factory methods. The dictionary + here is always local to this :class:`.Session` and can be modified + independently of all other :class:`.Session` objects. + + + """ # noqa: E501 + + return self._proxied.info + + @classmethod + def close_all(cls) -> None: + r"""Close *all* sessions in memory. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + .. deprecated:: 1.3 The :meth:`.Session.close_all` method is deprecated and will be removed in a future release. Please refer to :func:`.session.close_all_sessions`. + + """ # noqa: E501 + + return Session.close_all() + + @classmethod + def object_session(cls, instance: object) -> Optional[Session]: + r"""Return the :class:`.Session` to which an object belongs. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This is an alias of :func:`.object_session`. + + + """ # noqa: E501 + + return Session.object_session(instance) + + @classmethod + def identity_key( + cls, + class_: Optional[Type[Any]] = None, + ident: Union[Any, Tuple[Any, ...]] = None, + *, + instance: Optional[Any] = None, + row: Optional[Union[Row[Any], RowMapping]] = None, + identity_token: Optional[Any] = None, + ) -> _IdentityKeyType[Any]: + r"""Return an identity key. + + .. container:: class_bases + + Proxied for the :class:`_orm.Session` class on + behalf of the :class:`_orm.scoping.scoped_session` class. + + This is an alias of :func:`.util.identity_key`. + + + """ # noqa: E501 + + return Session.identity_key( + class_=class_, + ident=ident, + instance=instance, + row=row, + identity_token=identity_token, + ) + + # END PROXY METHODS scoped_session + + +ScopedSession = scoped_session +"""Old name for backwards compatibility.""" diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/session.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/session.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7b2c2b59fdcfb0a7137817839d477ec646c1bd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/session.py @@ -0,0 +1,5294 @@ +# orm/session.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Provides the Session class and related utilities.""" + +from __future__ import annotations + +import contextlib +from enum import Enum +import itertools +import sys +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes +from . import bulk_persistence +from . import context +from . import descriptor_props +from . import exc +from . import identity +from . import loading +from . import query +from . import state as statelib +from ._typing import _O +from ._typing import insp_is_mapper +from ._typing import is_composite_class +from ._typing import is_orm_option +from ._typing import is_user_defined_option +from .base import _class_to_mapper +from .base import _none_set +from .base import _state_mapper +from .base import instance_str +from .base import LoaderCallableStatus +from .base import object_mapper +from .base import object_state +from .base import PassiveFlag +from .base import state_str +from .context import FromStatement +from .context import ORMCompileState +from .identity import IdentityMap +from .query import Query +from .state import InstanceState +from .state_changes import _StateChange +from .state_changes import _StateChangeState +from .state_changes import _StateChangeStates +from .unitofwork import UOWTransaction +from .. import engine +from .. import exc as sa_exc +from .. import sql +from .. import util +from ..engine import Connection +from ..engine import Engine +from ..engine.util import TransactionalContext +from ..event import dispatcher +from ..event import EventTarget +from ..inspection import inspect +from ..inspection import Inspectable +from ..sql import coercions +from ..sql import dml +from ..sql import roles +from ..sql import Select +from ..sql import TableClause +from ..sql import visitors +from ..sql.base import _NoArg +from ..sql.base import CompileState +from ..sql.schema import Table +from ..sql.selectable import ForUpdateArg +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..util import IdentitySet +from ..util.typing import Literal +from ..util.typing import Protocol + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import OrmExecuteOptionsParameter + from .interfaces import ORMOption + from .interfaces import UserDefinedOption + from .mapper import Mapper + from .path_registry import PathRegistry + from .query import RowReturningQuery + from ..engine import CursorResult + from ..engine import Result + from ..engine import Row + from ..engine import RowMapping + from ..engine.base import Transaction + from ..engine.base import TwoPhaseTransaction + from ..engine.interfaces import _CoreAnyExecuteParams + from ..engine.interfaces import _CoreSingleExecuteParams + from ..engine.interfaces import _ExecuteOptions + from ..engine.interfaces import CoreExecuteOptionsParameter + from ..engine.result import ScalarResult + from ..event import _InstanceLevelDispatch + from ..sql._typing import _ColumnsClauseArgument + from ..sql._typing import _InfoType + from ..sql._typing import _T0 + from ..sql._typing import _T1 + from ..sql._typing import _T2 + from ..sql._typing import _T3 + from ..sql._typing import _T4 + from ..sql._typing import _T5 + from ..sql._typing import _T6 + from ..sql._typing import _T7 + from ..sql._typing import _TypedColumnClauseArgument as _TCCA + from ..sql.base import Executable + from ..sql.base import ExecutableOption + from ..sql.dml import UpdateBase + from ..sql.elements import ClauseElement + from ..sql.roles import TypedColumnsClauseRole + from ..sql.selectable import ForUpdateParameter + from ..sql.selectable import TypedReturnsRows + +_T = TypeVar("_T", bound=Any) + +__all__ = [ + "Session", + "SessionTransaction", + "sessionmaker", + "ORMExecuteState", + "close_all_sessions", + "make_transient", + "make_transient_to_detached", + "object_session", +] + +_sessions: weakref.WeakValueDictionary[int, Session] = ( + weakref.WeakValueDictionary() +) +"""Weak-referencing dictionary of :class:`.Session` objects. +""" + +statelib._sessions = _sessions + +_PKIdentityArgument = Union[Any, Tuple[Any, ...]] + +_BindArguments = Dict[str, Any] + +_EntityBindKey = Union[Type[_O], "Mapper[_O]"] +_SessionBindKey = Union[Type[Any], "Mapper[Any]", "TableClause", str] +_SessionBind = Union["Engine", "Connection"] + +JoinTransactionMode = Literal[ + "conditional_savepoint", + "rollback_only", + "control_fully", + "create_savepoint", +] + + +class _ConnectionCallableProto(Protocol): + """a callable that returns a :class:`.Connection` given an instance. + + This callable, when present on a :class:`.Session`, is called only from the + ORM's persistence mechanism (i.e. the unit of work flush process) to allow + for connection-per-instance schemes (i.e. horizontal sharding) to be used + as persistence time. + + This callable is not present on a plain :class:`.Session`, however + is established when using the horizontal sharding extension. + + """ + + def __call__( + self, + mapper: Optional[Mapper[Any]] = None, + instance: Optional[object] = None, + **kw: Any, + ) -> Connection: ... + + +def _state_session(state: InstanceState[Any]) -> Optional[Session]: + """Given an :class:`.InstanceState`, return the :class:`.Session` + associated, if any. + """ + return state.session + + +class _SessionClassMethods: + """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" + + @classmethod + @util.deprecated( + "1.3", + "The :meth:`.Session.close_all` method is deprecated and will be " + "removed in a future release. Please refer to " + ":func:`.session.close_all_sessions`.", + ) + def close_all(cls) -> None: + """Close *all* sessions in memory.""" + + close_all_sessions() + + @classmethod + @util.preload_module("sqlalchemy.orm.util") + def identity_key( + cls, + class_: Optional[Type[Any]] = None, + ident: Union[Any, Tuple[Any, ...]] = None, + *, + instance: Optional[Any] = None, + row: Optional[Union[Row[Any], RowMapping]] = None, + identity_token: Optional[Any] = None, + ) -> _IdentityKeyType[Any]: + """Return an identity key. + + This is an alias of :func:`.util.identity_key`. + + """ + return util.preloaded.orm_util.identity_key( + class_, + ident, + instance=instance, + row=row, + identity_token=identity_token, + ) + + @classmethod + def object_session(cls, instance: object) -> Optional[Session]: + """Return the :class:`.Session` to which an object belongs. + + This is an alias of :func:`.object_session`. + + """ + + return object_session(instance) + + +class SessionTransactionState(_StateChangeState): + ACTIVE = 1 + PREPARED = 2 + COMMITTED = 3 + DEACTIVE = 4 + CLOSED = 5 + PROVISIONING_CONNECTION = 6 + + +# backwards compatibility +ACTIVE, PREPARED, COMMITTED, DEACTIVE, CLOSED, PROVISIONING_CONNECTION = tuple( + SessionTransactionState +) + + +class ORMExecuteState(util.MemoizedSlots): + """Represents a call to the :meth:`_orm.Session.execute` method, as passed + to the :meth:`.SessionEvents.do_orm_execute` event hook. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`session_execute_events` - top level documentation on how + to use :meth:`_orm.SessionEvents.do_orm_execute` + + """ + + __slots__ = ( + "session", + "statement", + "parameters", + "execution_options", + "local_execution_options", + "bind_arguments", + "identity_token", + "_compile_state_cls", + "_starting_event_idx", + "_events_todo", + "_update_execution_options", + ) + + session: Session + """The :class:`_orm.Session` in use.""" + + statement: Executable + """The SQL statement being invoked. + + For an ORM selection as would + be retrieved from :class:`_orm.Query`, this is an instance of + :class:`_sql.select` that was generated from the ORM query. + """ + + parameters: Optional[_CoreAnyExecuteParams] + """Dictionary of parameters that was passed to + :meth:`_orm.Session.execute`.""" + + execution_options: _ExecuteOptions + """The complete dictionary of current execution options. + + This is a merge of the statement level options with the + locally passed execution options. + + .. seealso:: + + :attr:`_orm.ORMExecuteState.local_execution_options` + + :meth:`_sql.Executable.execution_options` + + :ref:`orm_queryguide_execution_options` + + """ + + local_execution_options: _ExecuteOptions + """Dictionary view of the execution options passed to the + :meth:`.Session.execute` method. + + This does not include options that may be associated with the statement + being invoked. + + .. seealso:: + + :attr:`_orm.ORMExecuteState.execution_options` + + """ + + bind_arguments: _BindArguments + """The dictionary passed as the + :paramref:`_orm.Session.execute.bind_arguments` dictionary. + + This dictionary may be used by extensions to :class:`_orm.Session` to pass + arguments that will assist in determining amongst a set of database + connections which one should be used to invoke this statement. + + """ + + _compile_state_cls: Optional[Type[ORMCompileState]] + _starting_event_idx: int + _events_todo: List[Any] + _update_execution_options: Optional[_ExecuteOptions] + + def __init__( + self, + session: Session, + statement: Executable, + parameters: Optional[_CoreAnyExecuteParams], + execution_options: _ExecuteOptions, + bind_arguments: _BindArguments, + compile_state_cls: Optional[Type[ORMCompileState]], + events_todo: List[_InstanceLevelDispatch[Session]], + ): + """Construct a new :class:`_orm.ORMExecuteState`. + + this object is constructed internally. + + """ + self.session = session + self.statement = statement + self.parameters = parameters + self.local_execution_options = execution_options + self.execution_options = statement._execution_options.union( + execution_options + ) + self.bind_arguments = bind_arguments + self._compile_state_cls = compile_state_cls + self._events_todo = list(events_todo) + + def _remaining_events(self) -> List[_InstanceLevelDispatch[Session]]: + return self._events_todo[self._starting_event_idx + 1 :] + + def invoke_statement( + self, + statement: Optional[Executable] = None, + params: Optional[_CoreAnyExecuteParams] = None, + execution_options: Optional[OrmExecuteOptionsParameter] = None, + bind_arguments: Optional[_BindArguments] = None, + ) -> Result[Any]: + """Execute the statement represented by this + :class:`.ORMExecuteState`, without re-invoking events that have + already proceeded. + + This method essentially performs a re-entrant execution of the current + statement for which the :meth:`.SessionEvents.do_orm_execute` event is + being currently invoked. The use case for this is for event handlers + that want to override how the ultimate + :class:`_engine.Result` object is returned, such as for schemes that + retrieve results from an offline cache or which concatenate results + from multiple executions. + + When the :class:`_engine.Result` object is returned by the actual + handler function within :meth:`_orm.SessionEvents.do_orm_execute` and + is propagated to the calling + :meth:`_orm.Session.execute` method, the remainder of the + :meth:`_orm.Session.execute` method is preempted and the + :class:`_engine.Result` object is returned to the caller of + :meth:`_orm.Session.execute` immediately. + + :param statement: optional statement to be invoked, in place of the + statement currently represented by :attr:`.ORMExecuteState.statement`. + + :param params: optional dictionary of parameters or list of parameters + which will be merged into the existing + :attr:`.ORMExecuteState.parameters` of this :class:`.ORMExecuteState`. + + .. versionchanged:: 2.0 a list of parameter dictionaries is accepted + for executemany executions. + + :param execution_options: optional dictionary of execution options + will be merged into the existing + :attr:`.ORMExecuteState.execution_options` of this + :class:`.ORMExecuteState`. + + :param bind_arguments: optional dictionary of bind_arguments + which will be merged amongst the current + :attr:`.ORMExecuteState.bind_arguments` + of this :class:`.ORMExecuteState`. + + :return: a :class:`_engine.Result` object with ORM-level results. + + .. seealso:: + + :ref:`do_orm_execute_re_executing` - background and examples on the + appropriate usage of :meth:`_orm.ORMExecuteState.invoke_statement`. + + + """ + + if statement is None: + statement = self.statement + + _bind_arguments = dict(self.bind_arguments) + if bind_arguments: + _bind_arguments.update(bind_arguments) + _bind_arguments["_sa_skip_events"] = True + + _params: Optional[_CoreAnyExecuteParams] + if params: + if self.is_executemany: + _params = [] + exec_many_parameters = cast( + "List[Dict[str, Any]]", self.parameters + ) + for _existing_params, _new_params in itertools.zip_longest( + exec_many_parameters, + cast("List[Dict[str, Any]]", params), + ): + if _existing_params is None or _new_params is None: + raise sa_exc.InvalidRequestError( + f"Can't apply executemany parameters to " + f"statement; number of parameter sets passed to " + f"Session.execute() ({len(exec_many_parameters)}) " + f"does not match number of parameter sets given " + f"to ORMExecuteState.invoke_statement() " + f"({len(params)})" + ) + _existing_params = dict(_existing_params) + _existing_params.update(_new_params) + _params.append(_existing_params) + else: + _params = dict(cast("Dict[str, Any]", self.parameters)) + _params.update(cast("Dict[str, Any]", params)) + else: + _params = self.parameters + + _execution_options = self.local_execution_options + if execution_options: + _execution_options = _execution_options.union(execution_options) + + return self.session._execute_internal( + statement, + _params, + execution_options=_execution_options, + bind_arguments=_bind_arguments, + _parent_execute_state=self, + ) + + @property + def bind_mapper(self) -> Optional[Mapper[Any]]: + """Return the :class:`_orm.Mapper` that is the primary "bind" mapper. + + For an :class:`_orm.ORMExecuteState` object invoking an ORM + statement, that is, the :attr:`_orm.ORMExecuteState.is_orm_statement` + attribute is ``True``, this attribute will return the + :class:`_orm.Mapper` that is considered to be the "primary" mapper + of the statement. The term "bind mapper" refers to the fact that + a :class:`_orm.Session` object may be "bound" to multiple + :class:`_engine.Engine` objects keyed to mapped classes, and the + "bind mapper" determines which of those :class:`_engine.Engine` objects + would be selected. + + For a statement that is invoked against a single mapped class, + :attr:`_orm.ORMExecuteState.bind_mapper` is intended to be a reliable + way of getting this mapper. + + .. versionadded:: 1.4.0b2 + + .. seealso:: + + :attr:`_orm.ORMExecuteState.all_mappers` + + + """ + mp: Optional[Mapper[Any]] = self.bind_arguments.get("mapper", None) + return mp + + @property + def all_mappers(self) -> Sequence[Mapper[Any]]: + """Return a sequence of all :class:`_orm.Mapper` objects that are + involved at the top level of this statement. + + By "top level" we mean those :class:`_orm.Mapper` objects that would + be represented in the result set rows for a :func:`_sql.select` + query, or for a :func:`_dml.update` or :func:`_dml.delete` query, + the mapper that is the main subject of the UPDATE or DELETE. + + .. versionadded:: 1.4.0b2 + + .. seealso:: + + :attr:`_orm.ORMExecuteState.bind_mapper` + + + + """ + if not self.is_orm_statement: + return [] + elif isinstance(self.statement, (Select, FromStatement)): + result = [] + seen = set() + for d in self.statement.column_descriptions: + ent = d["entity"] + if ent: + insp = inspect(ent, raiseerr=False) + if insp and insp.mapper and insp.mapper not in seen: + seen.add(insp.mapper) + result.append(insp.mapper) + return result + elif self.statement.is_dml and self.bind_mapper: + return [self.bind_mapper] + else: + return [] + + @property + def is_orm_statement(self) -> bool: + """return True if the operation is an ORM statement. + + This indicates that the select(), insert(), update(), or delete() + being invoked contains ORM entities as subjects. For a statement + that does not have ORM entities and instead refers only to + :class:`.Table` metadata, it is invoked as a Core SQL statement + and no ORM-level automation takes place. + + """ + return self._compile_state_cls is not None + + @property + def is_executemany(self) -> bool: + """return True if the parameters are a multi-element list of + dictionaries with more than one dictionary. + + .. versionadded:: 2.0 + + """ + return isinstance(self.parameters, list) + + @property + def is_select(self) -> bool: + """return True if this is a SELECT operation. + + .. versionchanged:: 2.0.30 - the attribute is also True for a + :meth:`_sql.Select.from_statement` construct that is itself against + a :class:`_sql.Select` construct, such as + ``select(Entity).from_statement(select(..))`` + + """ + return self.statement.is_select + + @property + def is_from_statement(self) -> bool: + """return True if this operation is a + :meth:`_sql.Select.from_statement` operation. + + This is independent from :attr:`_orm.ORMExecuteState.is_select`, as a + ``select().from_statement()`` construct can be used with + INSERT/UPDATE/DELETE RETURNING types of statements as well. + :attr:`_orm.ORMExecuteState.is_select` will only be set if the + :meth:`_sql.Select.from_statement` is itself against a + :class:`_sql.Select` construct. + + .. versionadded:: 2.0.30 + + """ + return self.statement.is_from_statement + + @property + def is_insert(self) -> bool: + """return True if this is an INSERT operation. + + .. versionchanged:: 2.0.30 - the attribute is also True for a + :meth:`_sql.Select.from_statement` construct that is itself against + a :class:`_sql.Insert` construct, such as + ``select(Entity).from_statement(insert(..))`` + + """ + return self.statement.is_dml and self.statement.is_insert + + @property + def is_update(self) -> bool: + """return True if this is an UPDATE operation. + + .. versionchanged:: 2.0.30 - the attribute is also True for a + :meth:`_sql.Select.from_statement` construct that is itself against + a :class:`_sql.Update` construct, such as + ``select(Entity).from_statement(update(..))`` + + """ + return self.statement.is_dml and self.statement.is_update + + @property + def is_delete(self) -> bool: + """return True if this is a DELETE operation. + + .. versionchanged:: 2.0.30 - the attribute is also True for a + :meth:`_sql.Select.from_statement` construct that is itself against + a :class:`_sql.Delete` construct, such as + ``select(Entity).from_statement(delete(..))`` + + """ + return self.statement.is_dml and self.statement.is_delete + + @property + def _is_crud(self) -> bool: + return isinstance(self.statement, (dml.Update, dml.Delete)) + + def update_execution_options(self, **opts: Any) -> None: + """Update the local execution options with new values.""" + self.local_execution_options = self.local_execution_options.union(opts) + + def _orm_compile_options( + self, + ) -> Optional[ + Union[ + context.ORMCompileState.default_compile_options, + Type[context.ORMCompileState.default_compile_options], + ] + ]: + if not self.is_select: + return None + try: + opts = self.statement._compile_options + except AttributeError: + return None + + if opts is not None and opts.isinstance( + context.ORMCompileState.default_compile_options + ): + return opts # type: ignore + else: + return None + + @property + def lazy_loaded_from(self) -> Optional[InstanceState[Any]]: + """An :class:`.InstanceState` that is using this statement execution + for a lazy load operation. + + The primary rationale for this attribute is to support the horizontal + sharding extension, where it is available within specific query + execution time hooks created by this extension. To that end, the + attribute is only intended to be meaningful at **query execution + time**, and importantly not any time prior to that, including query + compilation time. + + """ + return self.load_options._lazy_loaded_from + + @property + def loader_strategy_path(self) -> Optional[PathRegistry]: + """Return the :class:`.PathRegistry` for the current load path. + + This object represents the "path" in a query along relationships + when a particular object or collection is being loaded. + + """ + opts = self._orm_compile_options() + if opts is not None: + return opts._current_path + else: + return None + + @property + def is_column_load(self) -> bool: + """Return True if the operation is refreshing column-oriented + attributes on an existing ORM object. + + This occurs during operations such as :meth:`_orm.Session.refresh`, + as well as when an attribute deferred by :func:`_orm.defer` is + being loaded, or an attribute that was expired either directly + by :meth:`_orm.Session.expire` or via a commit operation is being + loaded. + + Handlers will very likely not want to add any options to queries + when such an operation is occurring as the query should be a straight + primary key fetch which should not have any additional WHERE criteria, + and loader options travelling with the instance + will have already been added to the query. + + .. versionadded:: 1.4.0b2 + + .. seealso:: + + :attr:`_orm.ORMExecuteState.is_relationship_load` + + """ + opts = self._orm_compile_options() + return opts is not None and opts._for_refresh_state + + @property + def is_relationship_load(self) -> bool: + """Return True if this load is loading objects on behalf of a + relationship. + + This means, the loader in effect is either a LazyLoader, + SelectInLoader, SubqueryLoader, or similar, and the entire + SELECT statement being emitted is on behalf of a relationship + load. + + Handlers will very likely not want to add any options to queries + when such an operation is occurring, as loader options are already + capable of being propagated to relationship loaders and should + be already present. + + .. seealso:: + + :attr:`_orm.ORMExecuteState.is_column_load` + + """ + opts = self._orm_compile_options() + if opts is None: + return False + path = self.loader_strategy_path + return path is not None and not path.is_root + + @property + def load_options( + self, + ) -> Union[ + context.QueryContext.default_load_options, + Type[context.QueryContext.default_load_options], + ]: + """Return the load_options that will be used for this execution.""" + + if not self.is_select: + raise sa_exc.InvalidRequestError( + "This ORM execution is not against a SELECT statement " + "so there are no load options." + ) + + lo: Union[ + context.QueryContext.default_load_options, + Type[context.QueryContext.default_load_options], + ] = self.execution_options.get( + "_sa_orm_load_options", context.QueryContext.default_load_options + ) + return lo + + @property + def update_delete_options( + self, + ) -> Union[ + bulk_persistence.BulkUDCompileState.default_update_options, + Type[bulk_persistence.BulkUDCompileState.default_update_options], + ]: + """Return the update_delete_options that will be used for this + execution.""" + + if not self._is_crud: + raise sa_exc.InvalidRequestError( + "This ORM execution is not against an UPDATE or DELETE " + "statement so there are no update options." + ) + uo: Union[ + bulk_persistence.BulkUDCompileState.default_update_options, + Type[bulk_persistence.BulkUDCompileState.default_update_options], + ] = self.execution_options.get( + "_sa_orm_update_options", + bulk_persistence.BulkUDCompileState.default_update_options, + ) + return uo + + @property + def _non_compile_orm_options(self) -> Sequence[ORMOption]: + return [ + opt + for opt in self.statement._with_options + if is_orm_option(opt) and not opt._is_compile_state + ] + + @property + def user_defined_options(self) -> Sequence[UserDefinedOption]: + """The sequence of :class:`.UserDefinedOptions` that have been + associated with the statement being invoked. + + """ + return [ + opt + for opt in self.statement._with_options + if is_user_defined_option(opt) + ] + + +class SessionTransactionOrigin(Enum): + """indicates the origin of a :class:`.SessionTransaction`. + + This enumeration is present on the + :attr:`.SessionTransaction.origin` attribute of any + :class:`.SessionTransaction` object. + + .. versionadded:: 2.0 + + """ + + AUTOBEGIN = 0 + """transaction were started by autobegin""" + + BEGIN = 1 + """transaction were started by calling :meth:`_orm.Session.begin`""" + + BEGIN_NESTED = 2 + """tranaction were started by :meth:`_orm.Session.begin_nested`""" + + SUBTRANSACTION = 3 + """transaction is an internal "subtransaction" """ + + +class SessionTransaction(_StateChange, TransactionalContext): + """A :class:`.Session`-level transaction. + + :class:`.SessionTransaction` is produced from the + :meth:`_orm.Session.begin` + and :meth:`_orm.Session.begin_nested` methods. It's largely an internal + object that in modern use provides a context manager for session + transactions. + + Documentation on interacting with :class:`_orm.SessionTransaction` is + at: :ref:`unitofwork_transaction`. + + + .. versionchanged:: 1.4 The scoping and API methods to work with the + :class:`_orm.SessionTransaction` object directly have been simplified. + + .. seealso:: + + :ref:`unitofwork_transaction` + + :meth:`.Session.begin` + + :meth:`.Session.begin_nested` + + :meth:`.Session.rollback` + + :meth:`.Session.commit` + + :meth:`.Session.in_transaction` + + :meth:`.Session.in_nested_transaction` + + :meth:`.Session.get_transaction` + + :meth:`.Session.get_nested_transaction` + + + """ + + _rollback_exception: Optional[BaseException] = None + + _connections: Dict[ + Union[Engine, Connection], Tuple[Connection, Transaction, bool, bool] + ] + session: Session + _parent: Optional[SessionTransaction] + + _state: SessionTransactionState + + _new: weakref.WeakKeyDictionary[InstanceState[Any], object] + _deleted: weakref.WeakKeyDictionary[InstanceState[Any], object] + _dirty: weakref.WeakKeyDictionary[InstanceState[Any], object] + _key_switches: weakref.WeakKeyDictionary[ + InstanceState[Any], Tuple[Any, Any] + ] + + origin: SessionTransactionOrigin + """Origin of this :class:`_orm.SessionTransaction`. + + Refers to a :class:`.SessionTransactionOrigin` instance which is an + enumeration indicating the source event that led to constructing + this :class:`_orm.SessionTransaction`. + + .. versionadded:: 2.0 + + """ + + nested: bool = False + """Indicates if this is a nested, or SAVEPOINT, transaction. + + When :attr:`.SessionTransaction.nested` is True, it is expected + that :attr:`.SessionTransaction.parent` will be present as well, + linking to the enclosing :class:`.SessionTransaction`. + + .. seealso:: + + :attr:`.SessionTransaction.origin` + + """ + + def __init__( + self, + session: Session, + origin: SessionTransactionOrigin, + parent: Optional[SessionTransaction] = None, + ): + TransactionalContext._trans_ctx_check(session) + + self.session = session + self._connections = {} + self._parent = parent + self.nested = nested = origin is SessionTransactionOrigin.BEGIN_NESTED + self.origin = origin + + if session._close_state is _SessionCloseState.CLOSED: + raise sa_exc.InvalidRequestError( + "This Session has been permanently closed and is unable " + "to handle any more transaction requests." + ) + + if nested: + if not parent: + raise sa_exc.InvalidRequestError( + "Can't start a SAVEPOINT transaction when no existing " + "transaction is in progress" + ) + + self._previous_nested_transaction = session._nested_transaction + elif origin is SessionTransactionOrigin.SUBTRANSACTION: + assert parent is not None + else: + assert parent is None + + self._state = SessionTransactionState.ACTIVE + + self._take_snapshot() + + # make sure transaction is assigned before we call the + # dispatch + self.session._transaction = self + + self.session.dispatch.after_transaction_create(self.session, self) + + def _raise_for_prerequisite_state( + self, operation_name: str, state: _StateChangeState + ) -> NoReturn: + if state is SessionTransactionState.DEACTIVE: + if self._rollback_exception: + raise sa_exc.PendingRollbackError( + "This Session's transaction has been rolled back " + "due to a previous exception during flush." + " To begin a new transaction with this Session, " + "first issue Session.rollback()." + f" Original exception was: {self._rollback_exception}", + code="7s2a", + ) + else: + raise sa_exc.InvalidRequestError( + "This session is in 'inactive' state, due to the " + "SQL transaction being rolled back; no further SQL " + "can be emitted within this transaction." + ) + elif state is SessionTransactionState.CLOSED: + raise sa_exc.ResourceClosedError("This transaction is closed") + elif state is SessionTransactionState.PROVISIONING_CONNECTION: + raise sa_exc.InvalidRequestError( + "This session is provisioning a new connection; concurrent " + "operations are not permitted", + code="isce", + ) + else: + raise sa_exc.InvalidRequestError( + f"This session is in '{state.name.lower()}' state; no " + "further SQL can be emitted within this transaction." + ) + + @property + def parent(self) -> Optional[SessionTransaction]: + """The parent :class:`.SessionTransaction` of this + :class:`.SessionTransaction`. + + If this attribute is ``None``, indicates this + :class:`.SessionTransaction` is at the top of the stack, and + corresponds to a real "COMMIT"/"ROLLBACK" + block. If non-``None``, then this is either a "subtransaction" + (an internal marker object used by the flush process) or a + "nested" / SAVEPOINT transaction. If the + :attr:`.SessionTransaction.nested` attribute is ``True``, then + this is a SAVEPOINT, and if ``False``, indicates this a subtransaction. + + """ + return self._parent + + @property + def is_active(self) -> bool: + return ( + self.session is not None + and self._state is SessionTransactionState.ACTIVE + ) + + @property + def _is_transaction_boundary(self) -> bool: + return self.nested or not self._parent + + @_StateChange.declare_states( + (SessionTransactionState.ACTIVE,), _StateChangeStates.NO_CHANGE + ) + def connection( + self, + bindkey: Optional[Mapper[Any]], + execution_options: Optional[_ExecuteOptions] = None, + **kwargs: Any, + ) -> Connection: + bind = self.session.get_bind(bindkey, **kwargs) + return self._connection_for_bind(bind, execution_options) + + @_StateChange.declare_states( + (SessionTransactionState.ACTIVE,), _StateChangeStates.NO_CHANGE + ) + def _begin(self, nested: bool = False) -> SessionTransaction: + return SessionTransaction( + self.session, + ( + SessionTransactionOrigin.BEGIN_NESTED + if nested + else SessionTransactionOrigin.SUBTRANSACTION + ), + self, + ) + + def _iterate_self_and_parents( + self, upto: Optional[SessionTransaction] = None + ) -> Iterable[SessionTransaction]: + current = self + result: Tuple[SessionTransaction, ...] = () + while current: + result += (current,) + if current._parent is upto: + break + elif current._parent is None: + raise sa_exc.InvalidRequestError( + "Transaction %s is not on the active transaction list" + % (upto) + ) + else: + current = current._parent + + return result + + def _take_snapshot(self) -> None: + if not self._is_transaction_boundary: + parent = self._parent + assert parent is not None + self._new = parent._new + self._deleted = parent._deleted + self._dirty = parent._dirty + self._key_switches = parent._key_switches + return + + is_begin = self.origin in ( + SessionTransactionOrigin.BEGIN, + SessionTransactionOrigin.AUTOBEGIN, + ) + if not is_begin and not self.session._flushing: + self.session.flush() + + self._new = weakref.WeakKeyDictionary() + self._deleted = weakref.WeakKeyDictionary() + self._dirty = weakref.WeakKeyDictionary() + self._key_switches = weakref.WeakKeyDictionary() + + def _restore_snapshot(self, dirty_only: bool = False) -> None: + """Restore the restoration state taken before a transaction began. + + Corresponds to a rollback. + + """ + assert self._is_transaction_boundary + + to_expunge = set(self._new).union(self.session._new) + self.session._expunge_states(to_expunge, to_transient=True) + + for s, (oldkey, newkey) in self._key_switches.items(): + # we probably can do this conditionally based on + # if we expunged or not, but safe_discard does that anyway + self.session.identity_map.safe_discard(s) + + # restore the old key + s.key = oldkey + + # now restore the object, but only if we didn't expunge + if s not in to_expunge: + self.session.identity_map.replace(s) + + for s in set(self._deleted).union(self.session._deleted): + self.session._update_impl(s, revert_deletion=True) + + assert not self.session._deleted + + for s in self.session.identity_map.all_states(): + if not dirty_only or s.modified or s in self._dirty: + s._expire(s.dict, self.session.identity_map._modified) + + def _remove_snapshot(self) -> None: + """Remove the restoration state taken before a transaction began. + + Corresponds to a commit. + + """ + assert self._is_transaction_boundary + + if not self.nested and self.session.expire_on_commit: + for s in self.session.identity_map.all_states(): + s._expire(s.dict, self.session.identity_map._modified) + + statelib.InstanceState._detach_states( + list(self._deleted), self.session + ) + self._deleted.clear() + elif self.nested: + parent = self._parent + assert parent is not None + parent._new.update(self._new) + parent._dirty.update(self._dirty) + parent._deleted.update(self._deleted) + parent._key_switches.update(self._key_switches) + + @_StateChange.declare_states( + (SessionTransactionState.ACTIVE,), _StateChangeStates.NO_CHANGE + ) + def _connection_for_bind( + self, + bind: _SessionBind, + execution_options: Optional[CoreExecuteOptionsParameter], + ) -> Connection: + if bind in self._connections: + if execution_options: + util.warn( + "Connection is already established for the " + "given bind; execution_options ignored" + ) + return self._connections[bind][0] + + self._state = SessionTransactionState.PROVISIONING_CONNECTION + + local_connect = False + should_commit = True + + try: + if self._parent: + conn = self._parent._connection_for_bind( + bind, execution_options + ) + if not self.nested: + return conn + else: + if isinstance(bind, engine.Connection): + conn = bind + if conn.engine in self._connections: + raise sa_exc.InvalidRequestError( + "Session already has a Connection associated " + "for the given Connection's Engine" + ) + else: + conn = bind.connect() + local_connect = True + + try: + if execution_options: + conn = conn.execution_options(**execution_options) + + transaction: Transaction + if self.session.twophase and self._parent is None: + # TODO: shouldn't we only be here if not + # conn.in_transaction() ? + # if twophase is set and conn.in_transaction(), validate + # that it is in fact twophase. + transaction = conn.begin_twophase() + elif self.nested: + transaction = conn.begin_nested() + elif conn.in_transaction(): + join_transaction_mode = self.session.join_transaction_mode + + if join_transaction_mode == "conditional_savepoint": + if conn.in_nested_transaction(): + join_transaction_mode = "create_savepoint" + else: + join_transaction_mode = "rollback_only" + + if local_connect: + util.warn( + "The engine provided as bind produced a " + "connection that is already in a transaction. " + "This is usually caused by a core event, " + "such as 'engine_connect', that has left a " + "transaction open. The effective join " + "transaction mode used by this session is " + f"{join_transaction_mode!r}. To silence this " + "warning, do not leave transactions open" + ) + if join_transaction_mode in ( + "control_fully", + "rollback_only", + ): + if conn.in_nested_transaction(): + transaction = ( + conn._get_required_nested_transaction() + ) + else: + transaction = conn._get_required_transaction() + if join_transaction_mode == "rollback_only": + should_commit = False + elif join_transaction_mode == "create_savepoint": + transaction = conn.begin_nested() + else: + assert False, join_transaction_mode + else: + transaction = conn.begin() + except: + # connection will not not be associated with this Session; + # close it immediately so that it isn't closed under GC + if local_connect: + conn.close() + raise + else: + bind_is_connection = isinstance(bind, engine.Connection) + + self._connections[conn] = self._connections[conn.engine] = ( + conn, + transaction, + should_commit, + not bind_is_connection, + ) + self.session.dispatch.after_begin(self.session, self, conn) + return conn + finally: + self._state = SessionTransactionState.ACTIVE + + def prepare(self) -> None: + if self._parent is not None or not self.session.twophase: + raise sa_exc.InvalidRequestError( + "'twophase' mode not enabled, or not root transaction; " + "can't prepare." + ) + self._prepare_impl() + + @_StateChange.declare_states( + (SessionTransactionState.ACTIVE,), SessionTransactionState.PREPARED + ) + def _prepare_impl(self) -> None: + if self._parent is None or self.nested: + self.session.dispatch.before_commit(self.session) + + stx = self.session._transaction + assert stx is not None + if stx is not self: + for subtransaction in stx._iterate_self_and_parents(upto=self): + subtransaction.commit() + + if not self.session._flushing: + for _flush_guard in range(100): + if self.session._is_clean(): + break + self.session.flush() + else: + raise exc.FlushError( + "Over 100 subsequent flushes have occurred within " + "session.commit() - is an after_flush() hook " + "creating new objects?" + ) + + if self._parent is None and self.session.twophase: + try: + for t in set(self._connections.values()): + cast("TwoPhaseTransaction", t[1]).prepare() + except: + with util.safe_reraise(): + self.rollback() + + self._state = SessionTransactionState.PREPARED + + @_StateChange.declare_states( + (SessionTransactionState.ACTIVE, SessionTransactionState.PREPARED), + SessionTransactionState.CLOSED, + ) + def commit(self, _to_root: bool = False) -> None: + if self._state is not SessionTransactionState.PREPARED: + with self._expect_state(SessionTransactionState.PREPARED): + self._prepare_impl() + + if self._parent is None or self.nested: + for conn, trans, should_commit, autoclose in set( + self._connections.values() + ): + if should_commit: + trans.commit() + + self._state = SessionTransactionState.COMMITTED + self.session.dispatch.after_commit(self.session) + + self._remove_snapshot() + + with self._expect_state(SessionTransactionState.CLOSED): + self.close() + + if _to_root and self._parent: + self._parent.commit(_to_root=True) + + @_StateChange.declare_states( + ( + SessionTransactionState.ACTIVE, + SessionTransactionState.DEACTIVE, + SessionTransactionState.PREPARED, + ), + SessionTransactionState.CLOSED, + ) + def rollback( + self, _capture_exception: bool = False, _to_root: bool = False + ) -> None: + stx = self.session._transaction + assert stx is not None + if stx is not self: + for subtransaction in stx._iterate_self_and_parents(upto=self): + subtransaction.close() + + boundary = self + rollback_err = None + if self._state in ( + SessionTransactionState.ACTIVE, + SessionTransactionState.PREPARED, + ): + for transaction in self._iterate_self_and_parents(): + if transaction._parent is None or transaction.nested: + try: + for t in set(transaction._connections.values()): + t[1].rollback() + + transaction._state = SessionTransactionState.DEACTIVE + self.session.dispatch.after_rollback(self.session) + except: + rollback_err = sys.exc_info() + finally: + transaction._state = SessionTransactionState.DEACTIVE + transaction._restore_snapshot( + dirty_only=transaction.nested + ) + boundary = transaction + break + else: + transaction._state = SessionTransactionState.DEACTIVE + + sess = self.session + + if not rollback_err and not sess._is_clean(): + # if items were added, deleted, or mutated + # here, we need to re-restore the snapshot + util.warn( + "Session's state has been changed on " + "a non-active transaction - this state " + "will be discarded." + ) + boundary._restore_snapshot(dirty_only=boundary.nested) + + with self._expect_state(SessionTransactionState.CLOSED): + self.close() + + if self._parent and _capture_exception: + self._parent._rollback_exception = sys.exc_info()[1] + + if rollback_err and rollback_err[1]: + raise rollback_err[1].with_traceback(rollback_err[2]) + + sess.dispatch.after_soft_rollback(sess, self) + + if _to_root and self._parent: + self._parent.rollback(_to_root=True) + + @_StateChange.declare_states( + _StateChangeStates.ANY, SessionTransactionState.CLOSED + ) + def close(self, invalidate: bool = False) -> None: + if self.nested: + self.session._nested_transaction = ( + self._previous_nested_transaction + ) + + self.session._transaction = self._parent + + for connection, transaction, should_commit, autoclose in set( + self._connections.values() + ): + if invalidate and self._parent is None: + connection.invalidate() + if should_commit and transaction.is_active: + transaction.close() + if autoclose and self._parent is None: + connection.close() + + self._state = SessionTransactionState.CLOSED + sess = self.session + + # TODO: these two None sets were historically after the + # event hook below, and in 2.0 I changed it this way for some reason, + # and I remember there being a reason, but not what it was. + # Why do we need to get rid of them at all? test_memusage::CycleTest + # passes with these commented out. + # self.session = None # type: ignore + # self._connections = None # type: ignore + + sess.dispatch.after_transaction_end(sess, self) + + def _get_subject(self) -> Session: + return self.session + + def _transaction_is_active(self) -> bool: + return self._state is SessionTransactionState.ACTIVE + + def _transaction_is_closed(self) -> bool: + return self._state is SessionTransactionState.CLOSED + + def _rollback_can_be_called(self) -> bool: + return self._state not in (COMMITTED, CLOSED) + + +class _SessionCloseState(Enum): + ACTIVE = 1 + CLOSED = 2 + CLOSE_IS_RESET = 3 + + +class Session(_SessionClassMethods, EventTarget): + """Manages persistence operations for ORM-mapped objects. + + The :class:`_orm.Session` is **not safe for use in concurrent threads.**. + See :ref:`session_faq_threadsafe` for background. + + The Session's usage paradigm is described at :doc:`/orm/session`. + + + """ + + _is_asyncio = False + + dispatch: dispatcher[Session] + + identity_map: IdentityMap + """A mapping of object identities to objects themselves. + + Iterating through ``Session.identity_map.values()`` provides + access to the full set of persistent objects (i.e., those + that have row identity) currently in the session. + + .. seealso:: + + :func:`.identity_key` - helper function to produce the keys used + in this dictionary. + + """ + + _new: Dict[InstanceState[Any], Any] + _deleted: Dict[InstanceState[Any], Any] + bind: Optional[Union[Engine, Connection]] + __binds: Dict[_SessionBindKey, _SessionBind] + _flushing: bool + _warn_on_events: bool + _transaction: Optional[SessionTransaction] + _nested_transaction: Optional[SessionTransaction] + hash_key: int + autoflush: bool + expire_on_commit: bool + enable_baked_queries: bool + twophase: bool + join_transaction_mode: JoinTransactionMode + _query_cls: Type[Query[Any]] + _close_state: _SessionCloseState + + def __init__( + self, + bind: Optional[_SessionBind] = None, + *, + autoflush: bool = True, + future: Literal[True] = True, + expire_on_commit: bool = True, + autobegin: bool = True, + twophase: bool = False, + binds: Optional[Dict[_SessionBindKey, _SessionBind]] = None, + enable_baked_queries: bool = True, + info: Optional[_InfoType] = None, + query_cls: Optional[Type[Query[Any]]] = None, + autocommit: Literal[False] = False, + join_transaction_mode: JoinTransactionMode = "conditional_savepoint", + close_resets_only: Union[bool, _NoArg] = _NoArg.NO_ARG, + ): + r"""Construct a new :class:`_orm.Session`. + + See also the :class:`.sessionmaker` function which is used to + generate a :class:`.Session`-producing callable with a given + set of arguments. + + :param autoflush: When ``True``, all query operations will issue a + :meth:`~.Session.flush` call to this ``Session`` before proceeding. + This is a convenience feature so that :meth:`~.Session.flush` need + not be called repeatedly in order for database queries to retrieve + results. + + .. seealso:: + + :ref:`session_flushing` - additional background on autoflush + + :param autobegin: Automatically start transactions (i.e. equivalent to + invoking :meth:`_orm.Session.begin`) when database access is + requested by an operation. Defaults to ``True``. Set to + ``False`` to prevent a :class:`_orm.Session` from implicitly + beginning transactions after construction, as well as after any of + the :meth:`_orm.Session.rollback`, :meth:`_orm.Session.commit`, + or :meth:`_orm.Session.close` methods are called. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`session_autobegin_disable` + + :param bind: An optional :class:`_engine.Engine` or + :class:`_engine.Connection` to + which this ``Session`` should be bound. When specified, all SQL + operations performed by this session will execute via this + connectable. + + :param binds: A dictionary which may specify any number of + :class:`_engine.Engine` or :class:`_engine.Connection` + objects as the source of + connectivity for SQL operations on a per-entity basis. The keys + of the dictionary consist of any series of mapped classes, + arbitrary Python classes that are bases for mapped classes, + :class:`_schema.Table` objects and :class:`_orm.Mapper` objects. + The + values of the dictionary are then instances of + :class:`_engine.Engine` + or less commonly :class:`_engine.Connection` objects. + Operations which + proceed relative to a particular mapped class will consult this + dictionary for the closest matching entity in order to determine + which :class:`_engine.Engine` should be used for a particular SQL + operation. The complete heuristics for resolution are + described at :meth:`.Session.get_bind`. Usage looks like:: + + Session = sessionmaker( + binds={ + SomeMappedClass: create_engine("postgresql+psycopg2://engine1"), + SomeDeclarativeBase: create_engine( + "postgresql+psycopg2://engine2" + ), + some_mapper: create_engine("postgresql+psycopg2://engine3"), + some_table: create_engine("postgresql+psycopg2://engine4"), + } + ) + + .. seealso:: + + :ref:`session_partitioning` + + :meth:`.Session.bind_mapper` + + :meth:`.Session.bind_table` + + :meth:`.Session.get_bind` + + + :param \class_: Specify an alternate class other than + ``sqlalchemy.orm.session.Session`` which should be used by the + returned class. This is the only argument that is local to the + :class:`.sessionmaker` function, and is not sent directly to the + constructor for ``Session``. + + :param enable_baked_queries: legacy; defaults to ``True``. + A parameter consumed + by the :mod:`sqlalchemy.ext.baked` extension to determine if + "baked queries" should be cached, as is the normal operation + of this extension. When set to ``False``, caching as used by + this particular extension is disabled. + + .. versionchanged:: 1.4 The ``sqlalchemy.ext.baked`` extension is + legacy and is not used by any of SQLAlchemy's internals. This + flag therefore only affects applications that are making explicit + use of this extension within their own code. + + :param expire_on_commit: Defaults to ``True``. When ``True``, all + instances will be fully expired after each :meth:`~.commit`, + so that all attribute/object access subsequent to a completed + transaction will load from the most recent database state. + + .. seealso:: + + :ref:`session_committing` + + :param future: Deprecated; this flag is always True. + + .. seealso:: + + :ref:`migration_20_toplevel` + + :param info: optional dictionary of arbitrary data to be associated + with this :class:`.Session`. Is available via the + :attr:`.Session.info` attribute. Note the dictionary is copied at + construction time so that modifications to the per- + :class:`.Session` dictionary will be local to that + :class:`.Session`. + + :param query_cls: Class which should be used to create new Query + objects, as returned by the :meth:`~.Session.query` method. + Defaults to :class:`_query.Query`. + + :param twophase: When ``True``, all transactions will be started as + a "two phase" transaction, i.e. using the "two phase" semantics + of the database in use along with an XID. During a + :meth:`~.commit`, after :meth:`~.flush` has been issued for all + attached databases, the :meth:`~.TwoPhaseTransaction.prepare` + method on each database's :class:`.TwoPhaseTransaction` will be + called. This allows each database to roll back the entire + transaction, before each transaction is committed. + + :param autocommit: the "autocommit" keyword is present for backwards + compatibility but must remain at its default value of ``False``. + + :param join_transaction_mode: Describes the transactional behavior to + take when a given bind is a :class:`_engine.Connection` that + has already begun a transaction outside the scope of this + :class:`_orm.Session`; in other words the + :meth:`_engine.Connection.in_transaction()` method returns True. + + The following behaviors only take effect when the :class:`_orm.Session` + **actually makes use of the connection given**; that is, a method + such as :meth:`_orm.Session.execute`, :meth:`_orm.Session.connection`, + etc. are actually invoked: + + * ``"conditional_savepoint"`` - this is the default. if the given + :class:`_engine.Connection` is begun within a transaction but + does not have a SAVEPOINT, then ``"rollback_only"`` is used. + If the :class:`_engine.Connection` is additionally within + a SAVEPOINT, in other words + :meth:`_engine.Connection.in_nested_transaction()` method returns + True, then ``"create_savepoint"`` is used. + + ``"conditional_savepoint"`` behavior attempts to make use of + savepoints in order to keep the state of the existing transaction + unchanged, but only if there is already a savepoint in progress; + otherwise, it is not assumed that the backend in use has adequate + support for SAVEPOINT, as availability of this feature varies. + ``"conditional_savepoint"`` also seeks to establish approximate + backwards compatibility with previous :class:`_orm.Session` + behavior, for applications that are not setting a specific mode. It + is recommended that one of the explicit settings be used. + + * ``"create_savepoint"`` - the :class:`_orm.Session` will use + :meth:`_engine.Connection.begin_nested()` in all cases to create + its own transaction. This transaction by its nature rides + "on top" of any existing transaction that's opened on the given + :class:`_engine.Connection`; if the underlying database and + the driver in use has full, non-broken support for SAVEPOINT, the + external transaction will remain unaffected throughout the + lifespan of the :class:`_orm.Session`. + + The ``"create_savepoint"`` mode is the most useful for integrating + a :class:`_orm.Session` into a test suite where an externally + initiated transaction should remain unaffected; however, it relies + on proper SAVEPOINT support from the underlying driver and + database. + + .. tip:: When using SQLite, the SQLite driver included through + Python 3.11 does not handle SAVEPOINTs correctly in all cases + without workarounds. See the sections + :ref:`pysqlite_serializable` and :ref:`aiosqlite_serializable` + for details on current workarounds. + + * ``"control_fully"`` - the :class:`_orm.Session` will take + control of the given transaction as its own; + :meth:`_orm.Session.commit` will call ``.commit()`` on the + transaction, :meth:`_orm.Session.rollback` will call + ``.rollback()`` on the transaction, :meth:`_orm.Session.close` will + call ``.rollback`` on the transaction. + + .. tip:: This mode of use is equivalent to how SQLAlchemy 1.4 would + handle a :class:`_engine.Connection` given with an existing + SAVEPOINT (i.e. :meth:`_engine.Connection.begin_nested`); the + :class:`_orm.Session` would take full control of the existing + SAVEPOINT. + + * ``"rollback_only"`` - the :class:`_orm.Session` will take control + of the given transaction for ``.rollback()`` calls only; + ``.commit()`` calls will not be propagated to the given + transaction. ``.close()`` calls will have no effect on the + given transaction. + + .. tip:: This mode of use is equivalent to how SQLAlchemy 1.4 would + handle a :class:`_engine.Connection` given with an existing + regular database transaction (i.e. + :meth:`_engine.Connection.begin`); the :class:`_orm.Session` + would propagate :meth:`_orm.Session.rollback` calls to the + underlying transaction, but not :meth:`_orm.Session.commit` or + :meth:`_orm.Session.close` calls. + + .. versionadded:: 2.0.0rc1 + + :param close_resets_only: Defaults to ``True``. Determines if + the session should reset itself after calling ``.close()`` + or should pass in a no longer usable state, disabling re-use. + + .. versionadded:: 2.0.22 added flag ``close_resets_only``. + A future SQLAlchemy version may change the default value of + this flag to ``False``. + + .. seealso:: + + :ref:`session_closing` - Detail on the semantics of + :meth:`_orm.Session.close` and :meth:`_orm.Session.reset`. + + """ # noqa + + # considering allowing the "autocommit" keyword to still be accepted + # as long as it's False, so that external test suites, oslo.db etc + # continue to function as the argument appears to be passed in lots + # of cases including in our own test suite + if autocommit: + raise sa_exc.ArgumentError( + "autocommit=True is no longer supported" + ) + self.identity_map = identity.WeakInstanceDict() + + if not future: + raise sa_exc.ArgumentError( + "The 'future' parameter passed to " + "Session() may only be set to True." + ) + + self._new = {} # InstanceState->object, strong refs object + self._deleted = {} # same + self.bind = bind + self.__binds = {} + self._flushing = False + self._warn_on_events = False + self._transaction = None + self._nested_transaction = None + self.hash_key = _new_sessionid() + self.autobegin = autobegin + self.autoflush = autoflush + self.expire_on_commit = expire_on_commit + self.enable_baked_queries = enable_baked_queries + + # the idea is that at some point NO_ARG will warn that in the future + # the default will switch to close_resets_only=False. + if close_resets_only in (True, _NoArg.NO_ARG): + self._close_state = _SessionCloseState.CLOSE_IS_RESET + else: + self._close_state = _SessionCloseState.ACTIVE + if ( + join_transaction_mode + and join_transaction_mode + not in JoinTransactionMode.__args__ # type: ignore + ): + raise sa_exc.ArgumentError( + f"invalid selection for join_transaction_mode: " + f'"{join_transaction_mode}"' + ) + self.join_transaction_mode = join_transaction_mode + + self.twophase = twophase + self._query_cls = query_cls if query_cls else query.Query + if info: + self.info.update(info) + + if binds is not None: + for key, bind in binds.items(): + self._add_bind(key, bind) + + _sessions[self.hash_key] = self + + # used by sqlalchemy.engine.util.TransactionalContext + _trans_context_manager: Optional[TransactionalContext] = None + + connection_callable: Optional[_ConnectionCallableProto] = None + + def __enter__(self: _S) -> _S: + return self + + def __exit__(self, type_: Any, value: Any, traceback: Any) -> None: + self.close() + + @contextlib.contextmanager + def _maker_context_manager(self: _S) -> Iterator[_S]: + with self: + with self.begin(): + yield self + + def in_transaction(self) -> bool: + """Return True if this :class:`_orm.Session` has begun a transaction. + + .. versionadded:: 1.4 + + .. seealso:: + + :attr:`_orm.Session.is_active` + + + """ + return self._transaction is not None + + def in_nested_transaction(self) -> bool: + """Return True if this :class:`_orm.Session` has begun a nested + transaction, e.g. SAVEPOINT. + + .. versionadded:: 1.4 + + """ + return self._nested_transaction is not None + + def get_transaction(self) -> Optional[SessionTransaction]: + """Return the current root transaction in progress, if any. + + .. versionadded:: 1.4 + + """ + trans = self._transaction + while trans is not None and trans._parent is not None: + trans = trans._parent + return trans + + def get_nested_transaction(self) -> Optional[SessionTransaction]: + """Return the current nested transaction in progress, if any. + + .. versionadded:: 1.4 + + """ + + return self._nested_transaction + + @util.memoized_property + def info(self) -> _InfoType: + """A user-modifiable dictionary. + + The initial value of this dictionary can be populated using the + ``info`` argument to the :class:`.Session` constructor or + :class:`.sessionmaker` constructor or factory methods. The dictionary + here is always local to this :class:`.Session` and can be modified + independently of all other :class:`.Session` objects. + + """ + return {} + + def _autobegin_t(self, begin: bool = False) -> SessionTransaction: + if self._transaction is None: + if not begin and not self.autobegin: + raise sa_exc.InvalidRequestError( + "Autobegin is disabled on this Session; please call " + "session.begin() to start a new transaction" + ) + trans = SessionTransaction( + self, + ( + SessionTransactionOrigin.BEGIN + if begin + else SessionTransactionOrigin.AUTOBEGIN + ), + ) + assert self._transaction is trans + return trans + + return self._transaction + + def begin(self, nested: bool = False) -> SessionTransaction: + """Begin a transaction, or nested transaction, + on this :class:`.Session`, if one is not already begun. + + The :class:`_orm.Session` object features **autobegin** behavior, + so that normally it is not necessary to call the + :meth:`_orm.Session.begin` + method explicitly. However, it may be used in order to control + the scope of when the transactional state is begun. + + When used to begin the outermost transaction, an error is raised + if this :class:`.Session` is already inside of a transaction. + + :param nested: if True, begins a SAVEPOINT transaction and is + equivalent to calling :meth:`~.Session.begin_nested`. For + documentation on SAVEPOINT transactions, please see + :ref:`session_begin_nested`. + + :return: the :class:`.SessionTransaction` object. Note that + :class:`.SessionTransaction` + acts as a Python context manager, allowing :meth:`.Session.begin` + to be used in a "with" block. See :ref:`session_explicit_begin` for + an example. + + .. seealso:: + + :ref:`session_autobegin` + + :ref:`unitofwork_transaction` + + :meth:`.Session.begin_nested` + + + """ + + trans = self._transaction + if trans is None: + trans = self._autobegin_t(begin=True) + + if not nested: + return trans + + assert trans is not None + + if nested: + trans = trans._begin(nested=nested) + assert self._transaction is trans + self._nested_transaction = trans + else: + raise sa_exc.InvalidRequestError( + "A transaction is already begun on this Session." + ) + + return trans # needed for __enter__/__exit__ hook + + def begin_nested(self) -> SessionTransaction: + """Begin a "nested" transaction on this Session, e.g. SAVEPOINT. + + The target database(s) and associated drivers must support SQL + SAVEPOINT for this method to function correctly. + + For documentation on SAVEPOINT + transactions, please see :ref:`session_begin_nested`. + + :return: the :class:`.SessionTransaction` object. Note that + :class:`.SessionTransaction` acts as a context manager, allowing + :meth:`.Session.begin_nested` to be used in a "with" block. + See :ref:`session_begin_nested` for a usage example. + + .. seealso:: + + :ref:`session_begin_nested` + + :ref:`pysqlite_serializable` - special workarounds required + with the SQLite driver in order for SAVEPOINT to work + correctly. For asyncio use cases, see the section + :ref:`aiosqlite_serializable`. + + """ + return self.begin(nested=True) + + def rollback(self) -> None: + """Rollback the current transaction in progress. + + If no transaction is in progress, this method is a pass-through. + + The method always rolls back + the topmost database transaction, discarding any nested + transactions that may be in progress. + + .. seealso:: + + :ref:`session_rollback` + + :ref:`unitofwork_transaction` + + """ + if self._transaction is None: + pass + else: + self._transaction.rollback(_to_root=True) + + def commit(self) -> None: + """Flush pending changes and commit the current transaction. + + When the COMMIT operation is complete, all objects are fully + :term:`expired`, erasing their internal contents, which will be + automatically re-loaded when the objects are next accessed. In the + interim, these objects are in an expired state and will not function if + they are :term:`detached` from the :class:`.Session`. Additionally, + this re-load operation is not supported when using asyncio-oriented + APIs. The :paramref:`.Session.expire_on_commit` parameter may be used + to disable this behavior. + + When there is no transaction in place for the :class:`.Session`, + indicating that no operations were invoked on this :class:`.Session` + since the previous call to :meth:`.Session.commit`, the method will + begin and commit an internal-only "logical" transaction, that does not + normally affect the database unless pending flush changes were + detected, but will still invoke event handlers and object expiration + rules. + + The outermost database transaction is committed unconditionally, + automatically releasing any SAVEPOINTs in effect. + + .. seealso:: + + :ref:`session_committing` + + :ref:`unitofwork_transaction` + + :ref:`asyncio_orm_avoid_lazyloads` + + """ + trans = self._transaction + if trans is None: + trans = self._autobegin_t() + + trans.commit(_to_root=True) + + def prepare(self) -> None: + """Prepare the current transaction in progress for two phase commit. + + If no transaction is in progress, this method raises an + :exc:`~sqlalchemy.exc.InvalidRequestError`. + + Only root transactions of two phase sessions can be prepared. If the + current transaction is not such, an + :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. + + """ + trans = self._transaction + if trans is None: + trans = self._autobegin_t() + + trans.prepare() + + def connection( + self, + bind_arguments: Optional[_BindArguments] = None, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + ) -> Connection: + r"""Return a :class:`_engine.Connection` object corresponding to this + :class:`.Session` object's transactional state. + + Either the :class:`_engine.Connection` corresponding to the current + transaction is returned, or if no transaction is in progress, a new + one is begun and the :class:`_engine.Connection` + returned (note that no + transactional state is established with the DBAPI until the first + SQL statement is emitted). + + Ambiguity in multi-bind or unbound :class:`.Session` objects can be + resolved through any of the optional keyword arguments. This + ultimately makes usage of the :meth:`.get_bind` method for resolution. + + :param bind_arguments: dictionary of bind arguments. May include + "mapper", "bind", "clause", other custom arguments that are passed + to :meth:`.Session.get_bind`. + + :param execution_options: a dictionary of execution options that will + be passed to :meth:`_engine.Connection.execution_options`, **when the + connection is first procured only**. If the connection is already + present within the :class:`.Session`, a warning is emitted and + the arguments are ignored. + + .. seealso:: + + :ref:`session_transaction_isolation` + + """ + + if bind_arguments: + bind = bind_arguments.pop("bind", None) + + if bind is None: + bind = self.get_bind(**bind_arguments) + else: + bind = self.get_bind() + + return self._connection_for_bind( + bind, + execution_options=execution_options, + ) + + def _connection_for_bind( + self, + engine: _SessionBind, + execution_options: Optional[CoreExecuteOptionsParameter] = None, + **kw: Any, + ) -> Connection: + TransactionalContext._trans_ctx_check(self) + + trans = self._transaction + if trans is None: + trans = self._autobegin_t() + return trans._connection_for_bind(engine, execution_options) + + @overload + def _execute_internal( + self, + statement: Executable, + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + _scalar_result: Literal[True] = ..., + ) -> Any: ... + + @overload + def _execute_internal( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + _scalar_result: bool = ..., + ) -> Result[Any]: ... + + def _execute_internal( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + _scalar_result: bool = False, + ) -> Any: + statement = coercions.expect(roles.StatementRole, statement) + + if not bind_arguments: + bind_arguments = {} + else: + bind_arguments = dict(bind_arguments) + + if ( + statement._propagate_attrs.get("compile_state_plugin", None) + == "orm" + ): + compile_state_cls = CompileState._get_plugin_class_for_plugin( + statement, "orm" + ) + if TYPE_CHECKING: + assert isinstance( + compile_state_cls, context.AbstractORMCompileState + ) + else: + compile_state_cls = None + bind_arguments.setdefault("clause", statement) + + execution_options = util.coerce_to_immutabledict(execution_options) + + if _parent_execute_state: + events_todo = _parent_execute_state._remaining_events() + else: + events_todo = self.dispatch.do_orm_execute + if _add_event: + events_todo = list(events_todo) + [_add_event] + + if events_todo: + if compile_state_cls is not None: + # for event handlers, do the orm_pre_session_exec + # pass ahead of the event handlers, so that things like + # .load_options, .update_delete_options etc. are populated. + # is_pre_event=True allows the hook to hold off on things + # it doesn't want to do twice, including autoflush as well + # as "pre fetch" for DML, etc. + ( + statement, + execution_options, + ) = compile_state_cls.orm_pre_session_exec( + self, + statement, + params, + execution_options, + bind_arguments, + True, + ) + + orm_exec_state = ORMExecuteState( + self, + statement, + params, + execution_options, + bind_arguments, + compile_state_cls, + events_todo, + ) + for idx, fn in enumerate(events_todo): + orm_exec_state._starting_event_idx = idx + fn_result: Optional[Result[Any]] = fn(orm_exec_state) + if fn_result: + if _scalar_result: + return fn_result.scalar() + else: + return fn_result + + statement = orm_exec_state.statement + execution_options = orm_exec_state.local_execution_options + + if compile_state_cls is not None: + # now run orm_pre_session_exec() "for real". if there were + # event hooks, this will re-run the steps that interpret + # new execution_options into load_options / update_delete_options, + # which we assume the event hook might have updated. + # autoflush will also be invoked in this step if enabled. + ( + statement, + execution_options, + ) = compile_state_cls.orm_pre_session_exec( + self, + statement, + params, + execution_options, + bind_arguments, + False, + ) + + bind = self.get_bind(**bind_arguments) + + conn = self._connection_for_bind(bind) + + if _scalar_result and not compile_state_cls: + if TYPE_CHECKING: + params = cast(_CoreSingleExecuteParams, params) + return conn.scalar( + statement, params or {}, execution_options=execution_options + ) + + if compile_state_cls: + result: Result[Any] = compile_state_cls.orm_execute_statement( + self, + statement, + params or {}, + execution_options, + bind_arguments, + conn, + ) + else: + result = conn.execute( + statement, params or {}, execution_options=execution_options + ) + + if _scalar_result: + return result.scalar() + else: + return result + + @overload + def execute( + self, + statement: TypedReturnsRows[_T], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[_T]: ... + + @overload + def execute( + self, + statement: UpdateBase, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> CursorResult[Any]: ... + + @overload + def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: ... + + def execute( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + _parent_execute_state: Optional[Any] = None, + _add_event: Optional[Any] = None, + ) -> Result[Any]: + r"""Execute a SQL expression construct. + + Returns a :class:`_engine.Result` object representing + results of the statement execution. + + E.g.:: + + from sqlalchemy import select + + result = session.execute(select(User).where(User.id == 5)) + + The API contract of :meth:`_orm.Session.execute` is similar to that + of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version + of :class:`_engine.Connection`. + + .. versionchanged:: 1.4 the :meth:`_orm.Session.execute` method is + now the primary point of ORM statement execution when using + :term:`2.0 style` ORM usage. + + :param statement: + An executable statement (i.e. an :class:`.Executable` expression + such as :func:`_expression.select`). + + :param params: + Optional dictionary, or list of dictionaries, containing + bound parameter values. If a single dictionary, single-row + execution occurs; if a list of dictionaries, an + "executemany" will be invoked. The keys in each dictionary + must correspond to parameter names present in the statement. + + :param execution_options: optional dictionary of execution options, + which will be associated with the statement execution. This + dictionary can provide a subset of the options that are accepted + by :meth:`_engine.Connection.execution_options`, and may also + provide additional options understood only in an ORM context. + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + + :param bind_arguments: dictionary of additional arguments to determine + the bind. May include "mapper", "bind", or other custom arguments. + Contents of this dictionary are passed to the + :meth:`.Session.get_bind` method. + + :return: a :class:`_engine.Result` object. + + + """ + return self._execute_internal( + statement, + params, + execution_options=execution_options, + bind_arguments=bind_arguments, + _parent_execute_state=_parent_execute_state, + _add_event=_add_event, + ) + + @overload + def scalar( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Optional[_T]: ... + + @overload + def scalar( + self, + statement: Executable, + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: ... + + def scalar( + self, + statement: Executable, + params: Optional[_CoreSingleExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> Any: + """Execute a statement and return a scalar result. + + Usage and parameters are the same as that of + :meth:`_orm.Session.execute`; the return result is a scalar Python + value. + + """ + + return self._execute_internal( + statement, + params, + execution_options=execution_options, + bind_arguments=bind_arguments, + _scalar_result=True, + **kw, + ) + + @overload + def scalars( + self, + statement: TypedReturnsRows[Tuple[_T]], + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[_T]: ... + + @overload + def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: ... + + def scalars( + self, + statement: Executable, + params: Optional[_CoreAnyExecuteParams] = None, + *, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + **kw: Any, + ) -> ScalarResult[Any]: + """Execute a statement and return the results as scalars. + + Usage and parameters are the same as that of + :meth:`_orm.Session.execute`; the return result is a + :class:`_result.ScalarResult` filtering object which + will return single elements rather than :class:`_row.Row` objects. + + :return: a :class:`_result.ScalarResult` object + + .. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars` + + .. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars` + + .. seealso:: + + :ref:`orm_queryguide_select_orm_entities` - contrasts the behavior + of :meth:`_orm.Session.execute` to :meth:`_orm.Session.scalars` + + """ + + return self._execute_internal( + statement, + params=params, + execution_options=execution_options, + bind_arguments=bind_arguments, + _scalar_result=False, # mypy appreciates this + **kw, + ).scalars() + + def close(self) -> None: + """Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`. + + This expunges all ORM objects associated with this + :class:`_orm.Session`, ends any transaction in progress and + :term:`releases` any :class:`_engine.Connection` objects which this + :class:`_orm.Session` itself has checked out from associated + :class:`_engine.Engine` objects. The operation then leaves the + :class:`_orm.Session` in a state which it may be used again. + + .. tip:: + + In the default running mode the :meth:`_orm.Session.close` + method **does not prevent the Session from being used again**. + The :class:`_orm.Session` itself does not actually have a + distinct "closed" state; it merely means + the :class:`_orm.Session` will release all database connections + and ORM objects. + + Setting the parameter :paramref:`_orm.Session.close_resets_only` + to ``False`` will instead make the ``close`` final, meaning that + any further action on the session will be forbidden. + + .. versionchanged:: 1.4 The :meth:`.Session.close` method does not + immediately create a new :class:`.SessionTransaction` object; + instead, the new :class:`.SessionTransaction` is created only if + the :class:`.Session` is used again for a database operation. + + .. seealso:: + + :ref:`session_closing` - detail on the semantics of + :meth:`_orm.Session.close` and :meth:`_orm.Session.reset`. + + :meth:`_orm.Session.reset` - a similar method that behaves like + ``close()`` with the parameter + :paramref:`_orm.Session.close_resets_only` set to ``True``. + + """ + self._close_impl(invalidate=False) + + def reset(self) -> None: + """Close out the transactional resources and ORM objects used by this + :class:`_orm.Session`, resetting the session to its initial state. + + This method provides for same "reset-only" behavior that the + :meth:`_orm.Session.close` method has provided historically, where the + state of the :class:`_orm.Session` is reset as though the object were + brand new, and ready to be used again. + This method may then be useful for :class:`_orm.Session` objects + which set :paramref:`_orm.Session.close_resets_only` to ``False``, + so that "reset only" behavior is still available. + + .. versionadded:: 2.0.22 + + .. seealso:: + + :ref:`session_closing` - detail on the semantics of + :meth:`_orm.Session.close` and :meth:`_orm.Session.reset`. + + :meth:`_orm.Session.close` - a similar method will additionally + prevent re-use of the Session when the parameter + :paramref:`_orm.Session.close_resets_only` is set to ``False``. + """ + self._close_impl(invalidate=False, is_reset=True) + + def invalidate(self) -> None: + """Close this Session, using connection invalidation. + + This is a variant of :meth:`.Session.close` that will additionally + ensure that the :meth:`_engine.Connection.invalidate` + method will be called on each :class:`_engine.Connection` object + that is currently in use for a transaction (typically there is only + one connection unless the :class:`_orm.Session` is used with + multiple engines). + + This can be called when the database is known to be in a state where + the connections are no longer safe to be used. + + Below illustrates a scenario when using `gevent + `_, which can produce ``Timeout`` exceptions + that may mean the underlying connection should be discarded:: + + import gevent + + try: + sess = Session() + sess.add(User()) + sess.commit() + except gevent.Timeout: + sess.invalidate() + raise + except: + sess.rollback() + raise + + The method additionally does everything that :meth:`_orm.Session.close` + does, including that all ORM objects are expunged. + + """ + self._close_impl(invalidate=True) + + def _close_impl(self, invalidate: bool, is_reset: bool = False) -> None: + if not is_reset and self._close_state is _SessionCloseState.ACTIVE: + self._close_state = _SessionCloseState.CLOSED + self.expunge_all() + if self._transaction is not None: + for transaction in self._transaction._iterate_self_and_parents(): + transaction.close(invalidate) + + def expunge_all(self) -> None: + """Remove all object instances from this ``Session``. + + This is equivalent to calling ``expunge(obj)`` on all objects in this + ``Session``. + + """ + + all_states = self.identity_map.all_states() + list(self._new) + self.identity_map._kill() + self.identity_map = identity.WeakInstanceDict() + self._new = {} + self._deleted = {} + + statelib.InstanceState._detach_states(all_states, self) + + def _add_bind(self, key: _SessionBindKey, bind: _SessionBind) -> None: + try: + insp = inspect(key) + except sa_exc.NoInspectionAvailable as err: + if not isinstance(key, type): + raise sa_exc.ArgumentError( + "Not an acceptable bind target: %s" % key + ) from err + else: + self.__binds[key] = bind + else: + if TYPE_CHECKING: + assert isinstance(insp, Inspectable) + + if isinstance(insp, TableClause): + self.__binds[insp] = bind + elif insp_is_mapper(insp): + self.__binds[insp.class_] = bind + for _selectable in insp._all_tables: + self.__binds[_selectable] = bind + else: + raise sa_exc.ArgumentError( + "Not an acceptable bind target: %s" % key + ) + + def bind_mapper( + self, mapper: _EntityBindKey[_O], bind: _SessionBind + ) -> None: + """Associate a :class:`_orm.Mapper` or arbitrary Python class with a + "bind", e.g. an :class:`_engine.Engine` or + :class:`_engine.Connection`. + + The given entity is added to a lookup used by the + :meth:`.Session.get_bind` method. + + :param mapper: a :class:`_orm.Mapper` object, + or an instance of a mapped + class, or any Python class that is the base of a set of mapped + classes. + + :param bind: an :class:`_engine.Engine` or :class:`_engine.Connection` + object. + + .. seealso:: + + :ref:`session_partitioning` + + :paramref:`.Session.binds` + + :meth:`.Session.bind_table` + + + """ + self._add_bind(mapper, bind) + + def bind_table(self, table: TableClause, bind: _SessionBind) -> None: + """Associate a :class:`_schema.Table` with a "bind", e.g. an + :class:`_engine.Engine` + or :class:`_engine.Connection`. + + The given :class:`_schema.Table` is added to a lookup used by the + :meth:`.Session.get_bind` method. + + :param table: a :class:`_schema.Table` object, + which is typically the target + of an ORM mapping, or is present within a selectable that is + mapped. + + :param bind: an :class:`_engine.Engine` or :class:`_engine.Connection` + object. + + .. seealso:: + + :ref:`session_partitioning` + + :paramref:`.Session.binds` + + :meth:`.Session.bind_mapper` + + + """ + self._add_bind(table, bind) + + def get_bind( + self, + mapper: Optional[_EntityBindKey[_O]] = None, + *, + clause: Optional[ClauseElement] = None, + bind: Optional[_SessionBind] = None, + _sa_skip_events: Optional[bool] = None, + _sa_skip_for_implicit_returning: bool = False, + **kw: Any, + ) -> Union[Engine, Connection]: + """Return a "bind" to which this :class:`.Session` is bound. + + The "bind" is usually an instance of :class:`_engine.Engine`, + except in the case where the :class:`.Session` has been + explicitly bound directly to a :class:`_engine.Connection`. + + For a multiply-bound or unbound :class:`.Session`, the + ``mapper`` or ``clause`` arguments are used to determine the + appropriate bind to return. + + Note that the "mapper" argument is usually present + when :meth:`.Session.get_bind` is called via an ORM + operation such as a :meth:`.Session.query`, each + individual INSERT/UPDATE/DELETE operation within a + :meth:`.Session.flush`, call, etc. + + The order of resolution is: + + 1. if mapper given and :paramref:`.Session.binds` is present, + locate a bind based first on the mapper in use, then + on the mapped class in use, then on any base classes that are + present in the ``__mro__`` of the mapped class, from more specific + superclasses to more general. + 2. if clause given and ``Session.binds`` is present, + locate a bind based on :class:`_schema.Table` objects + found in the given clause present in ``Session.binds``. + 3. if ``Session.binds`` is present, return that. + 4. if clause given, attempt to return a bind + linked to the :class:`_schema.MetaData` ultimately + associated with the clause. + 5. if mapper given, attempt to return a bind + linked to the :class:`_schema.MetaData` ultimately + associated with the :class:`_schema.Table` or other + selectable to which the mapper is mapped. + 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` + is raised. + + Note that the :meth:`.Session.get_bind` method can be overridden on + a user-defined subclass of :class:`.Session` to provide any kind + of bind resolution scheme. See the example at + :ref:`session_custom_partitioning`. + + :param mapper: + Optional mapped class or corresponding :class:`_orm.Mapper` instance. + The bind can be derived from a :class:`_orm.Mapper` first by + consulting the "binds" map associated with this :class:`.Session`, + and secondly by consulting the :class:`_schema.MetaData` associated + with the :class:`_schema.Table` to which the :class:`_orm.Mapper` is + mapped for a bind. + + :param clause: + A :class:`_expression.ClauseElement` (i.e. + :func:`_expression.select`, + :func:`_expression.text`, + etc.). If the ``mapper`` argument is not present or could not + produce a bind, the given expression construct will be searched + for a bound element, typically a :class:`_schema.Table` + associated with + bound :class:`_schema.MetaData`. + + .. seealso:: + + :ref:`session_partitioning` + + :paramref:`.Session.binds` + + :meth:`.Session.bind_mapper` + + :meth:`.Session.bind_table` + + """ + + # this function is documented as a subclassing hook, so we have + # to call this method even if the return is simple + if bind: + return bind + elif not self.__binds and self.bind: + # simplest and most common case, we have a bind and no + # per-mapper/table binds, we're done + return self.bind + + # we don't have self.bind and either have self.__binds + # or we don't have self.__binds (which is legacy). Look at the + # mapper and the clause + if mapper is None and clause is None: + if self.bind: + return self.bind + else: + raise sa_exc.UnboundExecutionError( + "This session is not bound to a single Engine or " + "Connection, and no context was provided to locate " + "a binding." + ) + + # look more closely at the mapper. + if mapper is not None: + try: + inspected_mapper = inspect(mapper) + except sa_exc.NoInspectionAvailable as err: + if isinstance(mapper, type): + raise exc.UnmappedClassError(mapper) from err + else: + raise + else: + inspected_mapper = None + + # match up the mapper or clause in the __binds + if self.__binds: + # matching mappers and selectables to entries in the + # binds dictionary; supported use case. + if inspected_mapper: + for cls in inspected_mapper.class_.__mro__: + if cls in self.__binds: + return self.__binds[cls] + if clause is None: + clause = inspected_mapper.persist_selectable + + if clause is not None: + plugin_subject = clause._propagate_attrs.get( + "plugin_subject", None + ) + + if plugin_subject is not None: + for cls in plugin_subject.mapper.class_.__mro__: + if cls in self.__binds: + return self.__binds[cls] + + for obj in visitors.iterate(clause): + if obj in self.__binds: + if TYPE_CHECKING: + assert isinstance(obj, Table) + return self.__binds[obj] + + # none of the __binds matched, but we have a fallback bind. + # return that + if self.bind: + return self.bind + + context = [] + if inspected_mapper is not None: + context.append(f"mapper {inspected_mapper}") + if clause is not None: + context.append("SQL expression") + + raise sa_exc.UnboundExecutionError( + f"Could not locate a bind configured on " + f'{", ".join(context)} or this Session.' + ) + + @overload + def query(self, _entity: _EntityType[_O]) -> Query[_O]: ... + + @overload + def query( + self, _colexpr: TypedColumnsClauseRole[_T] + ) -> RowReturningQuery[Tuple[_T]]: ... + + # START OVERLOADED FUNCTIONS self.query RowReturningQuery 2-8 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def query( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] + ) -> RowReturningQuery[Tuple[_T0, _T1]]: ... + + @overload + def query( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def query( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + ) -> RowReturningQuery[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7]]: ... + + # END OVERLOADED FUNCTIONS self.query + + @overload + def query( + self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any + ) -> Query[Any]: ... + + def query( + self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any + ) -> Query[Any]: + """Return a new :class:`_query.Query` object corresponding to this + :class:`_orm.Session`. + + Note that the :class:`_query.Query` object is legacy as of + SQLAlchemy 2.0; the :func:`_sql.select` construct is now used + to construct ORM queries. + + .. seealso:: + + :ref:`unified_tutorial` + + :ref:`queryguide_toplevel` + + :ref:`query_api_toplevel` - legacy API doc + + """ + + return self._query_cls(entities, self, **kwargs) + + def _identity_lookup( + self, + mapper: Mapper[_O], + primary_key_identity: Union[Any, Tuple[Any, ...]], + identity_token: Any = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + lazy_loaded_from: Optional[InstanceState[Any]] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> Union[Optional[_O], LoaderCallableStatus]: + """Locate an object in the identity map. + + Given a primary key identity, constructs an identity key and then + looks in the session's identity map. If present, the object may + be run through unexpiration rules (e.g. load unloaded attributes, + check if was deleted). + + e.g.:: + + obj = session._identity_lookup(inspect(SomeClass), (1,)) + + :param mapper: mapper in use + :param primary_key_identity: the primary key we are searching for, as + a tuple. + :param identity_token: identity token that should be used to create + the identity key. Used as is, however overriding subclasses can + repurpose this in order to interpret the value in a special way, + such as if None then look among multiple target tokens. + :param passive: passive load flag passed to + :func:`.loading.get_from_identity`, which impacts the behavior if + the object is found; the object may be validated and/or unexpired + if the flag allows for SQL to be emitted. + :param lazy_loaded_from: an :class:`.InstanceState` that is + specifically asking for this identity as a related identity. Used + for sharding schemes where there is a correspondence between an object + and a related object being lazy-loaded (or otherwise + relationship-loaded). + + :return: None if the object is not found in the identity map, *or* + if the object was unexpired and found to have been deleted. + if passive flags disallow SQL and the object is expired, returns + PASSIVE_NO_RESULT. In all other cases the instance is returned. + + .. versionchanged:: 1.4.0 - the :meth:`.Session._identity_lookup` + method was moved from :class:`_query.Query` to + :class:`.Session`, to avoid having to instantiate the + :class:`_query.Query` object. + + + """ + + key = mapper.identity_key_from_primary_key( + primary_key_identity, identity_token=identity_token + ) + + # work around: https://github.com/python/typing/discussions/1143 + return_value = loading.get_from_identity(self, mapper, key, passive) + return return_value + + @util.non_memoized_property + @contextlib.contextmanager + def no_autoflush(self) -> Iterator[Session]: + """Return a context manager that disables autoflush. + + e.g.:: + + with session.no_autoflush: + + some_object = SomeClass() + session.add(some_object) + # won't autoflush + some_object.related_thing = session.query(SomeRelated).first() + + Operations that proceed within the ``with:`` block + will not be subject to flushes occurring upon query + access. This is useful when initializing a series + of objects which involve existing database queries, + where the uncompleted object should not yet be flushed. + + """ + autoflush = self.autoflush + self.autoflush = False + try: + yield self + finally: + self.autoflush = autoflush + + @util.langhelpers.tag_method_for_warnings( + "This warning originated from the Session 'autoflush' process, " + "which was invoked automatically in response to a user-initiated " + "operation. Consider using ``no_autoflush`` context manager if this " + "warning happended while initializing objects.", + sa_exc.SAWarning, + ) + def _autoflush(self) -> None: + if self.autoflush and not self._flushing: + try: + self.flush() + except sa_exc.StatementError as e: + # note we are reraising StatementError as opposed to + # raising FlushError with "chaining" to remain compatible + # with code that catches StatementError, IntegrityError, + # etc. + e.add_detail( + "raised as a result of Query-invoked autoflush; " + "consider using a session.no_autoflush block if this " + "flush is occurring prematurely" + ) + raise e.with_traceback(sys.exc_info()[2]) + + def refresh( + self, + instance: object, + attribute_names: Optional[Iterable[str]] = None, + with_for_update: ForUpdateParameter = None, + ) -> None: + """Expire and refresh attributes on the given instance. + + The selected attributes will first be expired as they would when using + :meth:`_orm.Session.expire`; then a SELECT statement will be issued to + the database to refresh column-oriented attributes with the current + value available in the current transaction. + + :func:`_orm.relationship` oriented attributes will also be immediately + loaded if they were already eagerly loaded on the object, using the + same eager loading strategy that they were loaded with originally. + + .. versionadded:: 1.4 - the :meth:`_orm.Session.refresh` method + can also refresh eagerly loaded attributes. + + :func:`_orm.relationship` oriented attributes that would normally + load using the ``select`` (or "lazy") loader strategy will also + load **if they are named explicitly in the attribute_names + collection**, emitting a SELECT statement for the attribute using the + ``immediate`` loader strategy. If lazy-loaded relationships are not + named in :paramref:`_orm.Session.refresh.attribute_names`, then + they remain as "lazy loaded" attributes and are not implicitly + refreshed. + + .. versionchanged:: 2.0.4 The :meth:`_orm.Session.refresh` method + will now refresh lazy-loaded :func:`_orm.relationship` oriented + attributes for those which are named explicitly in the + :paramref:`_orm.Session.refresh.attribute_names` collection. + + .. tip:: + + While the :meth:`_orm.Session.refresh` method is capable of + refreshing both column and relationship oriented attributes, its + primary focus is on refreshing of local column-oriented attributes + on a single instance. For more open ended "refresh" functionality, + including the ability to refresh the attributes on many objects at + once while having explicit control over relationship loader + strategies, use the + :ref:`populate existing ` feature + instead. + + Note that a highly isolated transaction will return the same values as + were previously read in that same transaction, regardless of changes + in database state outside of that transaction. Refreshing + attributes usually only makes sense at the start of a transaction + where database rows have not yet been accessed. + + :param attribute_names: optional. An iterable collection of + string attribute names indicating a subset of attributes to + be refreshed. + + :param with_for_update: optional boolean ``True`` indicating FOR UPDATE + should be used, or may be a dictionary containing flags to + indicate a more specific set of FOR UPDATE flags for the SELECT; + flags should match the parameters of + :meth:`_query.Query.with_for_update`. + Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.expire_all` + + :ref:`orm_queryguide_populate_existing` - allows any ORM query + to refresh objects as they would be loaded normally. + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + + self._expire_state(state, attribute_names) + + # this autoflush previously used to occur as a secondary effect + # of the load_on_ident below. Meaning we'd organize the SELECT + # based on current DB pks, then flush, then if pks changed in that + # flush, crash. this was unticketed but discovered as part of + # #8703. So here, autoflush up front, dont autoflush inside + # load_on_ident. + self._autoflush() + + if with_for_update == {}: + raise sa_exc.ArgumentError( + "with_for_update should be the boolean value " + "True, or a dictionary with options. " + "A blank dictionary is ambiguous." + ) + + with_for_update = ForUpdateArg._from_argument(with_for_update) + + stmt: Select[Any] = sql.select(object_mapper(instance)) + if ( + loading.load_on_ident( + self, + stmt, + state.key, + refresh_state=state, + with_for_update=with_for_update, + only_load_props=attribute_names, + require_pk_cols=True, + # technically unnecessary as we just did autoflush + # above, however removes the additional unnecessary + # call to _autoflush() + no_autoflush=True, + is_user_refresh=True, + ) + is None + ): + raise sa_exc.InvalidRequestError( + "Could not refresh instance '%s'" % instance_str(instance) + ) + + def expire_all(self) -> None: + """Expires all persistent instances within this Session. + + When any attributes on a persistent instance is next accessed, + a query will be issued using the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire individual objects and individual attributes + on those objects, use :meth:`Session.expire`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire_all` is not usually needed, + assuming the transaction is isolated. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + """ + for state in self.identity_map.all_states(): + state._expire(state.dict, self.identity_map._modified) + + def expire( + self, instance: object, attribute_names: Optional[Iterable[str]] = None + ) -> None: + """Expire the attributes on an instance. + + Marks the attributes of an instance as out of date. When an expired + attribute is next accessed, a query will be issued to the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire all objects in the :class:`.Session` simultaneously, + use :meth:`Session.expire_all`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire` only makes sense for the specific + case that a non-ORM SQL statement was emitted in the current + transaction. + + :param instance: The instance to be refreshed. + :param attribute_names: optional list of string attribute names + indicating a subset of attributes to be expired. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + :meth:`_orm.Query.populate_existing` + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + self._expire_state(state, attribute_names) + + def _expire_state( + self, + state: InstanceState[Any], + attribute_names: Optional[Iterable[str]], + ) -> None: + self._validate_persistent(state) + if attribute_names: + state._expire_attributes(state.dict, attribute_names) + else: + # pre-fetch the full cascade since the expire is going to + # remove associations + cascaded = list( + state.manager.mapper.cascade_iterator("refresh-expire", state) + ) + self._conditional_expire(state) + for o, m, st_, dct_ in cascaded: + self._conditional_expire(st_) + + def _conditional_expire( + self, state: InstanceState[Any], autoflush: Optional[bool] = None + ) -> None: + """Expire a state if persistent, else expunge if pending""" + + if state.key: + state._expire(state.dict, self.identity_map._modified) + elif state in self._new: + self._new.pop(state) + state._detach(self) + + def expunge(self, instance: object) -> None: + """Remove the `instance` from this ``Session``. + + This will free all internal references to the instance. Cascading + will be applied according to the *expunge* cascade rule. + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + if state.session_id is not self.hash_key: + raise sa_exc.InvalidRequestError( + "Instance %s is not present in this Session" % state_str(state) + ) + + cascaded = list( + state.manager.mapper.cascade_iterator("expunge", state) + ) + self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded]) + + def _expunge_states( + self, states: Iterable[InstanceState[Any]], to_transient: bool = False + ) -> None: + for state in states: + if state in self._new: + self._new.pop(state) + elif self.identity_map.contains_state(state): + self.identity_map.safe_discard(state) + self._deleted.pop(state, None) + elif self._transaction: + # state is "detached" from being deleted, but still present + # in the transaction snapshot + self._transaction._deleted.pop(state, None) + statelib.InstanceState._detach_states( + states, self, to_transient=to_transient + ) + + def _register_persistent(self, states: Set[InstanceState[Any]]) -> None: + """Register all persistent objects from a flush. + + This is used both for pending objects moving to the persistent + state as well as already persistent objects. + + """ + + pending_to_persistent = self.dispatch.pending_to_persistent or None + for state in states: + mapper = _state_mapper(state) + + # prevent against last minute dereferences of the object + obj = state.obj() + if obj is not None: + instance_key = mapper._identity_key_from_state(state) + + if ( + _none_set.intersection(instance_key[1]) + and not mapper.allow_partial_pks + or _none_set.issuperset(instance_key[1]) + ): + raise exc.FlushError( + "Instance %s has a NULL identity key. If this is an " + "auto-generated value, check that the database table " + "allows generation of new primary key values, and " + "that the mapped Column object is configured to " + "expect these generated values. Ensure also that " + "this flush() is not occurring at an inappropriate " + "time, such as within a load() event." + % state_str(state) + ) + + if state.key is None: + state.key = instance_key + elif state.key != instance_key: + # primary key switch. use safe_discard() in case another + # state has already replaced this one in the identity + # map (see test/orm/test_naturalpks.py ReversePKsTest) + self.identity_map.safe_discard(state) + trans = self._transaction + assert trans is not None + if state in trans._key_switches: + orig_key = trans._key_switches[state][0] + else: + orig_key = state.key + trans._key_switches[state] = ( + orig_key, + instance_key, + ) + state.key = instance_key + + # there can be an existing state in the identity map + # that is replaced when the primary keys of two instances + # are swapped; see test/orm/test_naturalpks.py -> test_reverse + old = self.identity_map.replace(state) + if ( + old is not None + and mapper._identity_key_from_state(old) == instance_key + and old.obj() is not None + ): + util.warn( + "Identity map already had an identity for %s, " + "replacing it with newly flushed object. Are there " + "load operations occurring inside of an event handler " + "within the flush?" % (instance_key,) + ) + state._orphaned_outside_of_session = False + + statelib.InstanceState._commit_all_states( + ((state, state.dict) for state in states), self.identity_map + ) + + self._register_altered(states) + + if pending_to_persistent is not None: + for state in states.intersection(self._new): + pending_to_persistent(self, state) + + # remove from new last, might be the last strong ref + for state in set(states).intersection(self._new): + self._new.pop(state) + + def _register_altered(self, states: Iterable[InstanceState[Any]]) -> None: + if self._transaction: + for state in states: + if state in self._new: + self._transaction._new[state] = True + else: + self._transaction._dirty[state] = True + + def _remove_newly_deleted( + self, states: Iterable[InstanceState[Any]] + ) -> None: + persistent_to_deleted = self.dispatch.persistent_to_deleted or None + for state in states: + if self._transaction: + self._transaction._deleted[state] = True + + if persistent_to_deleted is not None: + # get a strong reference before we pop out of + # self._deleted + obj = state.obj() # noqa + + self.identity_map.safe_discard(state) + self._deleted.pop(state, None) + state._deleted = True + # can't call state._detach() here, because this state + # is still in the transaction snapshot and needs to be + # tracked as part of that + if persistent_to_deleted is not None: + persistent_to_deleted(self, state) + + def add(self, instance: object, _warn: bool = True) -> None: + """Place an object into this :class:`_orm.Session`. + + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. + + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. + + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` + + """ + if _warn and self._warn_on_events: + self._flush_warning("Session.add()") + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + + self._save_or_update_state(state) + + def add_all(self, instances: Iterable[object]) -> None: + """Add the given collection of instances to this :class:`_orm.Session`. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + """ + + if self._warn_on_events: + self._flush_warning("Session.add_all()") + + for instance in instances: + self.add(instance, _warn=False) + + def _save_or_update_state(self, state: InstanceState[Any]) -> None: + state._orphaned_outside_of_session = False + self._save_or_update_impl(state) + + mapper = _state_mapper(state) + for o, m, st_, dct_ in mapper.cascade_iterator( + "save-update", state, halt_on=self._contains_state + ): + self._save_or_update_impl(st_) + + def delete(self, instance: object) -> None: + """Mark an instance as deleted. + + The object is assumed to be either :term:`persistent` or + :term:`detached` when passed; after the method is called, the + object will remain in the :term:`persistent` state until the next + flush proceeds. During this time, the object will also be a member + of the :attr:`_orm.Session.deleted` collection. + + When the next flush proceeds, the object will move to the + :term:`deleted` state, indicating a ``DELETE`` statement was emitted + for its row within the current transaction. When the transaction + is successfully committed, + the deleted object is moved to the :term:`detached` state and is + no longer present within this :class:`_orm.Session`. + + .. seealso:: + + :ref:`session_deleting` - at :ref:`session_basics` + + """ + if self._warn_on_events: + self._flush_warning("Session.delete()") + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + + self._delete_impl(state, instance, head=True) + + def _delete_impl( + self, state: InstanceState[Any], obj: object, head: bool + ) -> None: + if state.key is None: + if head: + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persisted" % state_str(state) + ) + else: + return + + to_attach = self._before_attach(state, obj) + + if state in self._deleted: + return + + self.identity_map.add(state) + + if to_attach: + self._after_attach(state, obj) + + if head: + # grab the cascades before adding the item to the deleted list + # so that autoflush does not delete the item + # the strong reference to the instance itself is significant here + cascade_states = list( + state.manager.mapper.cascade_iterator("delete", state) + ) + else: + cascade_states = None + + self._deleted[state] = obj + + if head: + if TYPE_CHECKING: + assert cascade_states is not None + for o, m, st_, dct_ in cascade_states: + self._delete_impl(st_, o, False) + + def get( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> Optional[_O]: + """Return an instance based on the given primary key identifier, + or ``None`` if not found. + + E.g.:: + + my_user = session.get(User, 5) + + some_object = session.get(VersionedFoo, (5, 10)) + + some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10}) + + .. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved + from the now legacy :meth:`_orm.Query.get` method. + + :meth:`_orm.Session.get` is special in that it provides direct + access to the identity map of the :class:`.Session`. + If the given primary key identifier is present + in the local identity map, the object is returned + directly from this collection and no SQL is emitted, + unless the object has been marked fully expired. + If not present, + a SELECT is performed in order to locate the object. + + :meth:`_orm.Session.get` also will perform a check if + the object is present in the identity map and + marked as expired - a SELECT + is emitted to refresh the object as well as to + ensure that the row is still present. + If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + :param entity: a mapped class or :class:`.Mapper` indicating the + type of entity to be loaded. + + :param ident: A scalar, tuple, or dictionary representing the + primary key. For a composite (e.g. multiple column) primary key, + a tuple or dictionary should be passed. + + For a single-column primary key, the scalar calling form is typically + the most expedient. If the primary key of a row is the value "5", + the call looks like:: + + my_object = session.get(SomeClass, 5) + + The tuple form contains primary key values typically in + the order in which they correspond to the mapped + :class:`_schema.Table` + object's primary key columns, or if the + :paramref:`_orm.Mapper.primary_key` configuration parameter were + used, in + the order used for that parameter. For example, if the primary key + of a row is represented by the integer + digits "5, 10" the call would look like:: + + my_object = session.get(SomeClass, (5, 10)) + + The dictionary form should include as keys the mapped attribute names + corresponding to each element of the primary key. If the mapped class + has the attributes ``id``, ``version_id`` as the attributes which + store the object's primary key value, the call would look like:: + + my_object = session.get(SomeClass, {"id": 5, "version_id": 10}) + + :param options: optional sequence of loader options which will be + applied to the query, if one is emitted. + + :param populate_existing: causes the method to unconditionally emit + a SQL query and refresh the object with the newly loaded data, + regardless of whether or not the object is already present. + + :param with_for_update: optional boolean ``True`` indicating FOR UPDATE + should be used, or may be a dictionary containing flags to + indicate a more specific set of FOR UPDATE flags for the SELECT; + flags should match the parameters of + :meth:`_query.Query.with_for_update`. + Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + + :param execution_options: optional dictionary of execution options, + which will be associated with the query execution if one is emitted. + This dictionary can provide a subset of the options that are + accepted by :meth:`_engine.Connection.execution_options`, and may + also provide additional options understood only in an ORM context. + + .. versionadded:: 1.4.29 + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + + :param bind_arguments: dictionary of additional arguments to determine + the bind. May include "mapper", "bind", or other custom arguments. + Contents of this dictionary are passed to the + :meth:`.Session.get_bind` method. + + .. versionadded: 2.0.0rc1 + + :return: The object instance, or ``None``. + + """ # noqa: E501 + return self._get_impl( + entity, + ident, + loading.load_on_pk_identity, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + def get_one( + self, + entity: _EntityBindKey[_O], + ident: _PKIdentityArgument, + *, + options: Optional[Sequence[ORMOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> _O: + """Return exactly one instance based on the given primary key + identifier, or raise an exception if not found. + + Raises :class:`_exc.NoResultFound` if the query selects no rows. + + For a detailed documentation of the arguments see the + method :meth:`.Session.get`. + + .. versionadded:: 2.0.22 + + :return: The object instance. + + .. seealso:: + + :meth:`.Session.get` - equivalent method that instead + returns ``None`` if no row was found with the provided primary + key + + """ + + instance = self.get( + entity, + ident, + options=options, + populate_existing=populate_existing, + with_for_update=with_for_update, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + if instance is None: + raise sa_exc.NoResultFound( + "No row was found when one was required" + ) + + return instance + + def _get_impl( + self, + entity: _EntityBindKey[_O], + primary_key_identity: _PKIdentityArgument, + db_load_fn: Callable[..., _O], + *, + options: Optional[Sequence[ExecutableOption]] = None, + populate_existing: bool = False, + with_for_update: ForUpdateParameter = None, + identity_token: Optional[Any] = None, + execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT, + bind_arguments: Optional[_BindArguments] = None, + ) -> Optional[_O]: + # convert composite types to individual args + if ( + is_composite_class(primary_key_identity) + and type(primary_key_identity) + in descriptor_props._composite_getters + ): + getter = descriptor_props._composite_getters[ + type(primary_key_identity) + ] + primary_key_identity = getter(primary_key_identity) + + mapper: Optional[Mapper[_O]] = inspect(entity) + + if mapper is None or not mapper.is_mapper: + raise sa_exc.ArgumentError( + "Expected mapped class or mapper, got: %r" % entity + ) + + is_dict = isinstance(primary_key_identity, dict) + if not is_dict: + primary_key_identity = util.to_list( + primary_key_identity, default=[None] + ) + + if len(primary_key_identity) != len(mapper.primary_key): + raise sa_exc.InvalidRequestError( + "Incorrect number of values in identifier to formulate " + "primary key for session.get(); primary key columns " + "are %s" % ",".join("'%s'" % c for c in mapper.primary_key) + ) + + if is_dict: + pk_synonyms = mapper._pk_synonyms + + if pk_synonyms: + correct_keys = set(pk_synonyms).intersection( + primary_key_identity + ) + + if correct_keys: + primary_key_identity = dict(primary_key_identity) + for k in correct_keys: + primary_key_identity[pk_synonyms[k]] = ( + primary_key_identity[k] + ) + + try: + primary_key_identity = list( + primary_key_identity[prop.key] + for prop in mapper._identity_key_props + ) + + except KeyError as err: + raise sa_exc.InvalidRequestError( + "Incorrect names of values in identifier to formulate " + "primary key for session.get(); primary key attribute " + "names are %s (synonym names are also accepted)" + % ",".join( + "'%s'" % prop.key + for prop in mapper._identity_key_props + ) + ) from err + + if ( + not populate_existing + and not mapper.always_refresh + and with_for_update is None + ): + instance = self._identity_lookup( + mapper, + primary_key_identity, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + if instance is not None: + # reject calls for id in identity map but class + # mismatch. + if not isinstance(instance, mapper.class_): + return None + return instance + + # TODO: this was being tested before, but this is not possible + assert instance is not LoaderCallableStatus.PASSIVE_CLASS_MISMATCH + + # set_label_style() not strictly necessary, however this will ensure + # that tablename_colname style is used which at the moment is + # asserted in a lot of unit tests :) + + load_options = context.QueryContext.default_load_options + + if populate_existing: + load_options += {"_populate_existing": populate_existing} + statement = sql.select(mapper).set_label_style( + LABEL_STYLE_TABLENAME_PLUS_COL + ) + if with_for_update is not None: + statement._for_update_arg = ForUpdateArg._from_argument( + with_for_update + ) + + if options: + statement = statement.options(*options) + return db_load_fn( + self, + statement, + primary_key_identity, + load_options=load_options, + identity_token=identity_token, + execution_options=execution_options, + bind_arguments=bind_arguments, + ) + + def merge( + self, + instance: _O, + *, + load: bool = True, + options: Optional[Sequence[ORMOption]] = None, + ) -> _O: + """Copy the state of a given instance into a corresponding instance + within this :class:`.Session`. + + :meth:`.Session.merge` examines the primary key attributes of the + source instance, and attempts to reconcile it with an instance of the + same primary key in the session. If not found locally, it attempts + to load the object from the database based on primary key, and if + none can be located, creates a new instance. The state of each + attribute on the source instance is then copied to the target + instance. The resulting target instance is then returned by the + method; the original source instance is left unmodified, and + un-associated with the :class:`.Session` if not already. + + This operation cascades to associated instances if the association is + mapped with ``cascade="merge"``. + + See :ref:`unitofwork_merging` for a detailed discussion of merging. + + :param instance: Instance to be merged. + :param load: Boolean, when False, :meth:`.merge` switches into + a "high performance" mode which causes it to forego emitting history + events as well as all database access. This flag is used for + cases such as transferring graphs of objects into a :class:`.Session` + from a second level cache, or to transfer just-loaded objects + into the :class:`.Session` owned by a worker thread or process + without re-querying the database. + + The ``load=False`` use case adds the caveat that the given + object has to be in a "clean" state, that is, has no pending changes + to be flushed - even if the incoming object is detached from any + :class:`.Session`. This is so that when + the merge operation populates local attributes and + cascades to related objects and + collections, the values can be "stamped" onto the + target object as is, without generating any history or attribute + events, and without the need to reconcile the incoming data with + any existing related objects or collections that might not + be loaded. The resulting objects from ``load=False`` are always + produced as "clean", so it is only appropriate that the given objects + should be "clean" as well, else this suggests a mis-use of the + method. + :param options: optional sequence of loader options which will be + applied to the :meth:`_orm.Session.get` method when the merge + operation loads the existing version of the object from the database. + + .. versionadded:: 1.4.24 + + + .. seealso:: + + :func:`.make_transient_to_detached` - provides for an alternative + means of "merging" a single object into the :class:`.Session` + + """ + + if self._warn_on_events: + self._flush_warning("Session.merge()") + + _recursive: Dict[InstanceState[Any], object] = {} + _resolve_conflict_map: Dict[_IdentityKeyType[Any], object] = {} + + if load: + # flush current contents if we expect to load data + self._autoflush() + + object_mapper(instance) # verify mapped + autoflush = self.autoflush + try: + self.autoflush = False + return self._merge( + attributes.instance_state(instance), + attributes.instance_dict(instance), + load=load, + options=options, + _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map, + ) + finally: + self.autoflush = autoflush + + def _merge( + self, + state: InstanceState[_O], + state_dict: _InstanceDict, + *, + options: Optional[Sequence[ORMOption]] = None, + load: bool, + _recursive: Dict[Any, object], + _resolve_conflict_map: Dict[_IdentityKeyType[Any], object], + ) -> _O: + mapper: Mapper[_O] = _state_mapper(state) + if state in _recursive: + return cast(_O, _recursive[state]) + + new_instance = False + key = state.key + + merged: Optional[_O] + + if key is None: + if state in self._new: + util.warn( + "Instance %s is already pending in this Session yet is " + "being merged again; this is probably not what you want " + "to do" % state_str(state) + ) + + if not load: + raise sa_exc.InvalidRequestError( + "merge() with load=False option does not support " + "objects transient (i.e. unpersisted) objects. flush() " + "all changes on mapped instances before merging with " + "load=False." + ) + key = mapper._identity_key_from_state(state) + key_is_persistent = LoaderCallableStatus.NEVER_SET not in key[ + 1 + ] and ( + not _none_set.intersection(key[1]) + or ( + mapper.allow_partial_pks + and not _none_set.issuperset(key[1]) + ) + ) + else: + key_is_persistent = True + + merged = self.identity_map.get(key) + + if merged is None: + if key_is_persistent and key in _resolve_conflict_map: + merged = cast(_O, _resolve_conflict_map[key]) + + elif not load: + if state.modified: + raise sa_exc.InvalidRequestError( + "merge() with load=False option does not support " + "objects marked as 'dirty'. flush() all changes on " + "mapped instances before merging with load=False." + ) + merged = mapper.class_manager.new_instance() + merged_state = attributes.instance_state(merged) + merged_state.key = key + self._update_impl(merged_state) + new_instance = True + + elif key_is_persistent: + merged = self.get( + mapper.class_, + key[1], + identity_token=key[2], + options=options, + ) + + if merged is None: + merged = mapper.class_manager.new_instance() + merged_state = attributes.instance_state(merged) + merged_dict = attributes.instance_dict(merged) + new_instance = True + self._save_or_update_state(merged_state) + else: + merged_state = attributes.instance_state(merged) + merged_dict = attributes.instance_dict(merged) + + _recursive[state] = merged + _resolve_conflict_map[key] = merged + + # check that we didn't just pull the exact same + # state out. + if state is not merged_state: + # version check if applicable + if mapper.version_id_col is not None: + existing_version = mapper._get_state_attr_by_column( + state, + state_dict, + mapper.version_id_col, + passive=PassiveFlag.PASSIVE_NO_INITIALIZE, + ) + + merged_version = mapper._get_state_attr_by_column( + merged_state, + merged_dict, + mapper.version_id_col, + passive=PassiveFlag.PASSIVE_NO_INITIALIZE, + ) + + if ( + existing_version + is not LoaderCallableStatus.PASSIVE_NO_RESULT + and merged_version + is not LoaderCallableStatus.PASSIVE_NO_RESULT + and existing_version != merged_version + ): + raise exc.StaleDataError( + "Version id '%s' on merged state %s " + "does not match existing version '%s'. " + "Leave the version attribute unset when " + "merging to update the most recent version." + % ( + existing_version, + state_str(merged_state), + merged_version, + ) + ) + + merged_state.load_path = state.load_path + merged_state.load_options = state.load_options + + # since we are copying load_options, we need to copy + # the callables_ that would have been generated by those + # load_options. + # assumes that the callables we put in state.callables_ + # are not instance-specific (which they should not be) + merged_state._copy_callables(state) + + for prop in mapper.iterate_properties: + prop.merge( + self, + state, + state_dict, + merged_state, + merged_dict, + load, + _recursive, + _resolve_conflict_map, + ) + + if not load: + # remove any history + merged_state._commit_all(merged_dict, self.identity_map) + merged_state.manager.dispatch._sa_event_merge_wo_load( + merged_state, None + ) + + if new_instance: + merged_state.manager.dispatch.load(merged_state, None) + + return merged + + def _validate_persistent(self, state: InstanceState[Any]) -> None: + if not self.identity_map.contains_state(state): + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persistent within this Session" + % state_str(state) + ) + + def _save_impl(self, state: InstanceState[Any]) -> None: + if state.key is not None: + raise sa_exc.InvalidRequestError( + "Object '%s' already has an identity - " + "it can't be registered as pending" % state_str(state) + ) + + obj = state.obj() + to_attach = self._before_attach(state, obj) + if state not in self._new: + self._new[state] = obj + state.insert_order = len(self._new) + if to_attach: + self._after_attach(state, obj) + + def _update_impl( + self, state: InstanceState[Any], revert_deletion: bool = False + ) -> None: + if state.key is None: + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persisted" % state_str(state) + ) + + if state._deleted: + if revert_deletion: + if not state._attached: + return + del state._deleted + else: + raise sa_exc.InvalidRequestError( + "Instance '%s' has been deleted. " + "Use the make_transient() " + "function to send this object back " + "to the transient state." % state_str(state) + ) + + obj = state.obj() + + # check for late gc + if obj is None: + return + + to_attach = self._before_attach(state, obj) + + self._deleted.pop(state, None) + if revert_deletion: + self.identity_map.replace(state) + else: + self.identity_map.add(state) + + if to_attach: + self._after_attach(state, obj) + elif revert_deletion: + self.dispatch.deleted_to_persistent(self, state) + + def _save_or_update_impl(self, state: InstanceState[Any]) -> None: + if state.key is None: + self._save_impl(state) + else: + self._update_impl(state) + + def enable_relationship_loading(self, obj: object) -> None: + """Associate an object with this :class:`.Session` for related + object loading. + + .. warning:: + + :meth:`.enable_relationship_loading` exists to serve special + use cases and is not recommended for general use. + + Accesses of attributes mapped with :func:`_orm.relationship` + will attempt to load a value from the database using this + :class:`.Session` as the source of connectivity. The values + will be loaded based on foreign key and primary key values + present on this object - if not present, then those relationships + will be unavailable. + + The object will be attached to this session, but will + **not** participate in any persistence operations; its state + for almost all purposes will remain either "transient" or + "detached", except for the case of relationship loading. + + Also note that backrefs will often not work as expected. + Altering a relationship-bound attribute on the target object + may not fire off a backref event, if the effective value + is what was already loaded from a foreign-key-holding value. + + The :meth:`.Session.enable_relationship_loading` method is + similar to the ``load_on_pending`` flag on :func:`_orm.relationship`. + Unlike that flag, :meth:`.Session.enable_relationship_loading` allows + an object to remain transient while still being able to load + related items. + + To make a transient object associated with a :class:`.Session` + via :meth:`.Session.enable_relationship_loading` pending, add + it to the :class:`.Session` using :meth:`.Session.add` normally. + If the object instead represents an existing identity in the database, + it should be merged using :meth:`.Session.merge`. + + :meth:`.Session.enable_relationship_loading` does not improve + behavior when the ORM is used normally - object references should be + constructed at the object level, not at the foreign key level, so + that they are present in an ordinary way before flush() + proceeds. This method is not intended for general use. + + .. seealso:: + + :paramref:`_orm.relationship.load_on_pending` - this flag + allows per-relationship loading of many-to-ones on items that + are pending. + + :func:`.make_transient_to_detached` - allows for an object to + be added to a :class:`.Session` without SQL emitted, which then + will unexpire attributes on access. + + """ + try: + state = attributes.instance_state(obj) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(obj) from err + + to_attach = self._before_attach(state, obj) + state._load_pending = True + if to_attach: + self._after_attach(state, obj) + + def _before_attach(self, state: InstanceState[Any], obj: object) -> bool: + self._autobegin_t() + + if state.session_id == self.hash_key: + return False + + if state.session_id and state.session_id in _sessions: + raise sa_exc.InvalidRequestError( + "Object '%s' is already attached to session '%s' " + "(this is '%s')" + % (state_str(state), state.session_id, self.hash_key) + ) + + self.dispatch.before_attach(self, state) + + return True + + def _after_attach(self, state: InstanceState[Any], obj: object) -> None: + state.session_id = self.hash_key + if state.modified and state._strong_obj is None: + state._strong_obj = obj + self.dispatch.after_attach(self, state) + + if state.key: + self.dispatch.detached_to_persistent(self, state) + else: + self.dispatch.transient_to_pending(self, state) + + def __contains__(self, instance: object) -> bool: + """Return True if the instance is associated with this session. + + The instance may be pending or persistent within the Session for a + result of True. + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + return self._contains_state(state) + + def __iter__(self) -> Iterator[object]: + """Iterate over all pending or persistent instances within this + Session. + + """ + return iter( + list(self._new.values()) + list(self.identity_map.values()) + ) + + def _contains_state(self, state: InstanceState[Any]) -> bool: + return state in self._new or self.identity_map.contains_state(state) + + def flush(self, objects: Optional[Sequence[Any]] = None) -> None: + """Flush all the object changes to the database. + + Writes out all pending object creations, deletions and modifications + to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are + automatically ordered by the Session's unit of work dependency + solver. + + Database operations will be issued in the current transactional + context and do not affect the state of the transaction, unless an + error occurs, in which case the entire transaction is rolled back. + You may flush() as often as you like within a transaction to move + changes from Python to the database's transaction buffer. + + :param objects: Optional; restricts the flush operation to operate + only on elements that are in the given collection. + + This feature is for an extremely narrow set of use cases where + particular objects may need to be operated upon before the + full flush() occurs. It is not intended for general use. + + """ + + if self._flushing: + raise sa_exc.InvalidRequestError("Session is already flushing") + + if self._is_clean(): + return + try: + self._flushing = True + self._flush(objects) + finally: + self._flushing = False + + def _flush_warning(self, method: Any) -> None: + util.warn( + "Usage of the '%s' operation is not currently supported " + "within the execution stage of the flush process. " + "Results may not be consistent. Consider using alternative " + "event listeners or connection-level operations instead." % method + ) + + def _is_clean(self) -> bool: + return ( + not self.identity_map.check_modified() + and not self._deleted + and not self._new + ) + + def _flush(self, objects: Optional[Sequence[object]] = None) -> None: + dirty = self._dirty_states + if not dirty and not self._deleted and not self._new: + self.identity_map._modified.clear() + return + + flush_context = UOWTransaction(self) + + if self.dispatch.before_flush: + self.dispatch.before_flush(self, flush_context, objects) + # re-establish "dirty states" in case the listeners + # added + dirty = self._dirty_states + + deleted = set(self._deleted) + new = set(self._new) + + dirty = set(dirty).difference(deleted) + + # create the set of all objects we want to operate upon + if objects: + # specific list passed in + objset = set() + for o in objects: + try: + state = attributes.instance_state(o) + + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(o) from err + objset.add(state) + else: + objset = None + + # store objects whose fate has been decided + processed = set() + + # put all saves/updates into the flush context. detect top-level + # orphans and throw them into deleted. + if objset: + proc = new.union(dirty).intersection(objset).difference(deleted) + else: + proc = new.union(dirty).difference(deleted) + + for state in proc: + is_orphan = _state_mapper(state)._is_orphan(state) + + is_persistent_orphan = is_orphan and state.has_identity + + if ( + is_orphan + and not is_persistent_orphan + and state._orphaned_outside_of_session + ): + self._expunge_states([state]) + else: + _reg = flush_context.register_object( + state, isdelete=is_persistent_orphan + ) + assert _reg, "Failed to add object to the flush context!" + processed.add(state) + + # put all remaining deletes into the flush context. + if objset: + proc = deleted.intersection(objset).difference(processed) + else: + proc = deleted.difference(processed) + for state in proc: + _reg = flush_context.register_object(state, isdelete=True) + assert _reg, "Failed to add object to the flush context!" + + if not flush_context.has_work: + return + + flush_context.transaction = transaction = self._autobegin_t()._begin() + try: + self._warn_on_events = True + try: + flush_context.execute() + finally: + self._warn_on_events = False + + self.dispatch.after_flush(self, flush_context) + + flush_context.finalize_flush_changes() + + if not objects and self.identity_map._modified: + len_ = len(self.identity_map._modified) + + statelib.InstanceState._commit_all_states( + [ + (state, state.dict) + for state in self.identity_map._modified + ], + instance_dict=self.identity_map, + ) + util.warn( + "Attribute history events accumulated on %d " + "previously clean instances " + "within inner-flush event handlers have been " + "reset, and will not result in database updates. " + "Consider using set_committed_value() within " + "inner-flush event handlers to avoid this warning." % len_ + ) + + # useful assertions: + # if not objects: + # assert not self.identity_map._modified + # else: + # assert self.identity_map._modified == \ + # self.identity_map._modified.difference(objects) + + self.dispatch.after_flush_postexec(self, flush_context) + + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + + def bulk_save_objects( + self, + objects: Iterable[object], + return_defaults: bool = False, + update_changed_only: bool = True, + preserve_order: bool = True, + ) -> None: + """Perform a bulk save of the given list of objects. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. + + For general INSERT and UPDATE of existing ORM mapped objects, + prefer standard :term:`unit of work` data management patterns, + introduced in the :ref:`unified_tutorial` at + :ref:`tutorial_orm_data_manipulation`. SQLAlchemy 2.0 + now uses :ref:`engine_insertmanyvalues` with modern dialects + which solves previous issues of bulk INSERT slowness. + + :param objects: a sequence of mapped object instances. The mapped + objects are persisted as is, and are **not** associated with the + :class:`.Session` afterwards. + + For each object, whether the object is sent as an INSERT or an + UPDATE is dependent on the same rules used by the :class:`.Session` + in traditional operation; if the object has the + :attr:`.InstanceState.key` + attribute set, then the object is assumed to be "detached" and + will result in an UPDATE. Otherwise, an INSERT is used. + + In the case of an UPDATE, statements are grouped based on which + attributes have changed, and are thus to be the subject of each + SET clause. If ``update_changed_only`` is False, then all + attributes present within each object are applied to the UPDATE + statement, which may help in allowing the statements to be grouped + together into a larger executemany(), and will also reduce the + overhead of checking history on attributes. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary key values ahead of time; however, + :paramref:`.Session.bulk_save_objects.return_defaults` **greatly + reduces the performance gains** of the method overall. It is strongly + advised to please use the standard :meth:`_orm.Session.add_all` + approach. + + :param update_changed_only: when True, UPDATE statements are rendered + based on those attributes in each state that have logged changes. + When False, all attributes present are rendered into the SET clause + with the exception of primary key attributes. + + :param preserve_order: when True, the order of inserts and updates + matches exactly the order in which the objects are given. When + False, common types of objects are grouped into inserts + and updates, to allow for more batching opportunities. + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + """ + + obj_states: Iterable[InstanceState[Any]] + + obj_states = (attributes.instance_state(obj) for obj in objects) + + if not preserve_order: + # the purpose of this sort is just so that common mappers + # and persistence states are grouped together, so that groupby + # will return a single group for a particular type of mapper. + # it's not trying to be deterministic beyond that. + obj_states = sorted( + obj_states, + key=lambda state: (id(state.mapper), state.key is not None), + ) + + def grouping_key( + state: InstanceState[_O], + ) -> Tuple[Mapper[_O], bool]: + return (state.mapper, state.key is not None) + + for (mapper, isupdate), states in itertools.groupby( + obj_states, grouping_key + ): + self._bulk_save_mappings( + mapper, + states, + isupdate=isupdate, + isstates=True, + return_defaults=return_defaults, + update_changed_only=update_changed_only, + render_nulls=False, + ) + + def bulk_insert_mappings( + self, + mapper: Mapper[Any], + mappings: Iterable[Dict[str, Any]], + return_defaults: bool = False, + render_nulls: bool = False, + ) -> None: + """Perform a bulk insert of the given list of mapping dictionaries. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. The 2.0 API shares + implementation details with this method and adds new features + as well. + + :param mapper: a mapped class, or the actual :class:`_orm.Mapper` + object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a sequence of dictionaries, each one containing the + state of the mapped row to be inserted, in terms of the attribute + names on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary must contain all + keys to be populated into all tables. + + :param return_defaults: when True, the INSERT process will be altered + to ensure that newly generated primary key values will be fetched. + The rationale for this parameter is typically to enable + :ref:`Joined Table Inheritance ` mappings to + be bulk inserted. + + .. note:: for backends that don't support RETURNING, the + :paramref:`_orm.Session.bulk_insert_mappings.return_defaults` + parameter can significantly decrease performance as INSERT + statements can no longer be batched. See + :ref:`engine_insertmanyvalues` + for background on which backends are affected. + + :param render_nulls: When True, a value of ``None`` will result + in a NULL value being included in the INSERT statement, rather + than the column being omitted from the INSERT. This allows all + the rows being INSERTed to have the identical set of columns which + allows the full set of rows to be batched to the DBAPI. Normally, + each column-set that contains a different combination of NULL values + than the previous row must omit a different series of columns from + the rendered INSERT statement, which means it must be emitted as a + separate statement. By passing this flag, the full set of rows + are guaranteed to be batchable into one batch; the cost however is + that server-side defaults which are invoked by an omitted column will + be skipped, so care must be taken to ensure that these are not + necessary. + + .. warning:: + + When this flag is set, **server side default SQL values will + not be invoked** for those columns that are inserted as NULL; + the NULL value will be sent explicitly. Care must be taken + to ensure that no server-side default functions need to be + invoked for the operation as a whole. + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_update_mappings` + + """ + self._bulk_save_mappings( + mapper, + mappings, + isupdate=False, + isstates=False, + return_defaults=return_defaults, + update_changed_only=False, + render_nulls=render_nulls, + ) + + def bulk_update_mappings( + self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]] + ) -> None: + """Perform a bulk update of the given list of mapping dictionaries. + + .. legacy:: + + This method is a legacy feature as of the 2.0 series of + SQLAlchemy. For modern bulk INSERT and UPDATE, see + the sections :ref:`orm_queryguide_bulk_insert` and + :ref:`orm_queryguide_bulk_update`. The 2.0 API shares + implementation details with this method and adds new features + as well. + + :param mapper: a mapped class, or the actual :class:`_orm.Mapper` + object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a sequence of dictionaries, each one containing the + state of the mapped row to be updated, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, such + as a joined-inheritance mapping, each dictionary may contain keys + corresponding to all tables. All those keys which are present and + are not part of the primary key are applied to the SET clause of the + UPDATE statement; the primary key values, which are required, are + applied to the WHERE clause. + + + .. seealso:: + + :doc:`queryguide/dml` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_save_objects` + + """ + self._bulk_save_mappings( + mapper, + mappings, + isupdate=True, + isstates=False, + return_defaults=False, + update_changed_only=False, + render_nulls=False, + ) + + def _bulk_save_mappings( + self, + mapper: Mapper[_O], + mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]], + *, + isupdate: bool, + isstates: bool, + return_defaults: bool, + update_changed_only: bool, + render_nulls: bool, + ) -> None: + mapper = _class_to_mapper(mapper) + self._flushing = True + + transaction = self._autobegin_t()._begin() + try: + if isupdate: + bulk_persistence._bulk_update( + mapper, + mappings, + transaction, + isstates=isstates, + update_changed_only=update_changed_only, + ) + else: + bulk_persistence._bulk_insert( + mapper, + mappings, + transaction, + isstates=isstates, + return_defaults=return_defaults, + render_nulls=render_nulls, + ) + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + finally: + self._flushing = False + + def is_modified( + self, instance: object, include_collections: bool = True + ) -> bool: + r"""Return ``True`` if the given instance has locally + modified attributes. + + This method retrieves the history for each instrumented + attribute on the instance and performs a comparison of the current + value to its previously flushed or committed value, if any. + + It is in effect a more expensive and accurate + version of checking for the given instance in the + :attr:`.Session.dirty` collection; a full test for + each attribute's net "dirty" status is performed. + + E.g.:: + + return session.is_modified(someobject) + + A few caveats to this method apply: + + * Instances present in the :attr:`.Session.dirty` collection may + report ``False`` when tested with this method. This is because + the object may have received change events via attribute mutation, + thus placing it in :attr:`.Session.dirty`, but ultimately the state + is the same as that loaded from the database, resulting in no net + change here. + * Scalar attributes may not have recorded the previously set + value when a new value was applied, if the attribute was not loaded, + or was expired, at the time the new value was received - in these + cases, the attribute is assumed to have a change, even if there is + ultimately no net change against its database value. SQLAlchemy in + most cases does not need the "old" value when a set event occurs, so + it skips the expense of a SQL call if the old value isn't present, + based on the assumption that an UPDATE of the scalar value is + usually needed, and in those few cases where it isn't, is less + expensive on average than issuing a defensive SELECT. + + The "old" value is fetched unconditionally upon set only if the + attribute container has the ``active_history`` flag set to ``True``. + This flag is set typically for primary key attributes and scalar + object references that are not a simple many-to-one. To set this + flag for any arbitrary mapped column, use the ``active_history`` + argument with :func:`.column_property`. + + :param instance: mapped instance to be tested for pending changes. + :param include_collections: Indicates if multivalued collections + should be included in the operation. Setting this to ``False`` is a + way to detect only local-column based properties (i.e. scalar columns + or many-to-one foreign keys) that would result in an UPDATE for this + instance upon flush. + + """ + state = object_state(instance) + + if not state.modified: + return False + + dict_ = state.dict + + for attr in state.manager.attributes: + if ( + not include_collections + and hasattr(attr.impl, "get_collection") + ) or not hasattr(attr.impl, "get_history"): + continue + + (added, unchanged, deleted) = attr.impl.get_history( + state, dict_, passive=PassiveFlag.NO_CHANGE + ) + + if added or deleted: + return True + else: + return False + + @property + def is_active(self) -> bool: + """True if this :class:`.Session` not in "partial rollback" state. + + .. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins + a new transaction immediately, so this attribute will be False + when the :class:`_orm.Session` is first instantiated. + + "partial rollback" state typically indicates that the flush process + of the :class:`_orm.Session` has failed, and that the + :meth:`_orm.Session.rollback` method must be emitted in order to + fully roll back the transaction. + + If this :class:`_orm.Session` is not in a transaction at all, the + :class:`_orm.Session` will autobegin when it is first used, so in this + case :attr:`_orm.Session.is_active` will return True. + + Otherwise, if this :class:`_orm.Session` is within a transaction, + and that transaction has not been rolled back internally, the + :attr:`_orm.Session.is_active` will also return True. + + .. seealso:: + + :ref:`faq_session_rollback` + + :meth:`_orm.Session.in_transaction` + + """ + return self._transaction is None or self._transaction.is_active + + @property + def _dirty_states(self) -> Iterable[InstanceState[Any]]: + """The set of all persistent states considered dirty. + + This method returns all states that were modified including + those that were possibly deleted. + + """ + return self.identity_map._dirty_states() + + @property + def dirty(self) -> IdentitySet: + """The set of all persistent instances considered dirty. + + E.g.:: + + some_mapped_object in session.dirty + + Instances are considered dirty when they were modified but not + deleted. + + Note that this 'dirty' calculation is 'optimistic'; most + attribute-setting or collection modification operations will + mark an instance as 'dirty' and place it in this set, even if + there is no net change to the attribute's value. At flush + time, the value of each attribute is compared to its + previously saved value, and if there's no net change, no SQL + operation will occur (this is a more expensive operation so + it's only done at flush time). + + To check if an instance has actionable net changes to its + attributes, use the :meth:`.Session.is_modified` method. + + """ + return IdentitySet( + [ + state.obj() + for state in self._dirty_states + if state not in self._deleted + ] + ) + + @property + def deleted(self) -> IdentitySet: + "The set of all instances marked as 'deleted' within this ``Session``" + + return util.IdentitySet(list(self._deleted.values())) + + @property + def new(self) -> IdentitySet: + "The set of all instances marked as 'new' within this ``Session``." + + return util.IdentitySet(list(self._new.values())) + + +_S = TypeVar("_S", bound="Session") + + +class sessionmaker(_SessionClassMethods, Generic[_S]): + """A configurable :class:`.Session` factory. + + The :class:`.sessionmaker` factory generates new + :class:`.Session` objects when called, creating them given + the configurational arguments established here. + + e.g.:: + + from sqlalchemy import create_engine + from sqlalchemy.orm import sessionmaker + + # an Engine, which the Session will use for connection + # resources + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/") + + Session = sessionmaker(engine) + + with Session() as session: + session.add(some_object) + session.add(some_other_object) + session.commit() + + Context manager use is optional; otherwise, the returned + :class:`_orm.Session` object may be closed explicitly via the + :meth:`_orm.Session.close` method. Using a + ``try:/finally:`` block is optional, however will ensure that the close + takes place even if there are database errors:: + + session = Session() + try: + session.add(some_object) + session.add(some_other_object) + session.commit() + finally: + session.close() + + :class:`.sessionmaker` acts as a factory for :class:`_orm.Session` + objects in the same way as an :class:`_engine.Engine` acts as a factory + for :class:`_engine.Connection` objects. In this way it also includes + a :meth:`_orm.sessionmaker.begin` method, that provides a context + manager which both begins and commits a transaction, as well as closes + out the :class:`_orm.Session` when complete, rolling back the transaction + if any errors occur:: + + Session = sessionmaker(engine) + + with Session.begin() as session: + session.add(some_object) + session.add(some_other_object) + # commits transaction, closes session + + .. versionadded:: 1.4 + + When calling upon :class:`_orm.sessionmaker` to construct a + :class:`_orm.Session`, keyword arguments may also be passed to the + method; these arguments will override that of the globally configured + parameters. Below we use a :class:`_orm.sessionmaker` bound to a certain + :class:`_engine.Engine` to produce a :class:`_orm.Session` that is instead + bound to a specific :class:`_engine.Connection` procured from that engine:: + + Session = sessionmaker(engine) + + # bind an individual session to a connection + + with engine.connect() as connection: + with Session(bind=connection) as session: + ... # work with session + + The class also includes a method :meth:`_orm.sessionmaker.configure`, which + can be used to specify additional keyword arguments to the factory, which + will take effect for subsequent :class:`.Session` objects generated. This + is usually used to associate one or more :class:`_engine.Engine` objects + with an existing + :class:`.sessionmaker` factory before it is first used:: + + # application starts, sessionmaker does not have + # an engine bound yet + Session = sessionmaker() + + # ... later, when an engine URL is read from a configuration + # file or other events allow the engine to be created + engine = create_engine("sqlite:///foo.db") + Session.configure(bind=engine) + + sess = Session() + # work with session + + .. seealso:: + + :ref:`session_getting` - introductory text on creating + sessions using :class:`.sessionmaker`. + + """ + + class_: Type[_S] + + @overload + def __init__( + self, + bind: Optional[_SessionBind] = ..., + *, + class_: Type[_S], + autoflush: bool = ..., + expire_on_commit: bool = ..., + info: Optional[_InfoType] = ..., + **kw: Any, + ): ... + + @overload + def __init__( + self: "sessionmaker[Session]", + bind: Optional[_SessionBind] = ..., + *, + autoflush: bool = ..., + expire_on_commit: bool = ..., + info: Optional[_InfoType] = ..., + **kw: Any, + ): ... + + def __init__( + self, + bind: Optional[_SessionBind] = None, + *, + class_: Type[_S] = Session, # type: ignore + autoflush: bool = True, + expire_on_commit: bool = True, + info: Optional[_InfoType] = None, + **kw: Any, + ): + r"""Construct a new :class:`.sessionmaker`. + + All arguments here except for ``class_`` correspond to arguments + accepted by :class:`.Session` directly. See the + :meth:`.Session.__init__` docstring for more details on parameters. + + :param bind: a :class:`_engine.Engine` or other :class:`.Connectable` + with + which newly created :class:`.Session` objects will be associated. + :param class\_: class to use in order to create new :class:`.Session` + objects. Defaults to :class:`.Session`. + :param autoflush: The autoflush setting to use with newly created + :class:`.Session` objects. + + .. seealso:: + + :ref:`session_flushing` - additional background on autoflush + + :param expire_on_commit=True: the + :paramref:`_orm.Session.expire_on_commit` setting to use + with newly created :class:`.Session` objects. + + :param info: optional dictionary of information that will be available + via :attr:`.Session.info`. Note this dictionary is *updated*, not + replaced, when the ``info`` parameter is specified to the specific + :class:`.Session` construction operation. + + :param \**kw: all other keyword arguments are passed to the + constructor of newly created :class:`.Session` objects. + + """ + kw["bind"] = bind + kw["autoflush"] = autoflush + kw["expire_on_commit"] = expire_on_commit + if info is not None: + kw["info"] = info + self.kw = kw + # make our own subclass of the given class, so that + # events can be associated with it specifically. + self.class_ = type(class_.__name__, (class_,), {}) + + def begin(self) -> contextlib.AbstractContextManager[_S]: + """Produce a context manager that both provides a new + :class:`_orm.Session` as well as a transaction that commits. + + + e.g.:: + + Session = sessionmaker(some_engine) + + with Session.begin() as session: + session.add(some_object) + + # commits transaction, closes session + + .. versionadded:: 1.4 + + + """ + + session = self() + return session._maker_context_manager() + + def __call__(self, **local_kw: Any) -> _S: + """Produce a new :class:`.Session` object using the configuration + established in this :class:`.sessionmaker`. + + In Python, the ``__call__`` method is invoked on an object when + it is "called" in the same way as a function:: + + Session = sessionmaker(some_engine) + session = Session() # invokes sessionmaker.__call__() + + """ + for k, v in self.kw.items(): + if k == "info" and "info" in local_kw: + d = v.copy() + d.update(local_kw["info"]) + local_kw["info"] = d + else: + local_kw.setdefault(k, v) + return self.class_(**local_kw) + + def configure(self, **new_kw: Any) -> None: + """(Re)configure the arguments for this sessionmaker. + + e.g.:: + + Session = sessionmaker() + + Session.configure(bind=create_engine("sqlite://")) + """ + self.kw.update(new_kw) + + def __repr__(self) -> str: + return "%s(class_=%r, %s)" % ( + self.__class__.__name__, + self.class_.__name__, + ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()), + ) + + +def close_all_sessions() -> None: + """Close all sessions in memory. + + This function consults a global registry of all :class:`.Session` objects + and calls :meth:`.Session.close` on them, which resets them to a clean + state. + + This function is not for general use but may be useful for test suites + within the teardown scheme. + + .. versionadded:: 1.3 + + """ + + for sess in _sessions.values(): + sess.close() + + +def make_transient(instance: object) -> None: + """Alter the state of the given instance so that it is :term:`transient`. + + .. note:: + + :func:`.make_transient` is a special-case function for + advanced use cases only. + + The given mapped instance is assumed to be in the :term:`persistent` or + :term:`detached` state. The function will remove its association with any + :class:`.Session` as well as its :attr:`.InstanceState.identity`. The + effect is that the object will behave as though it were newly constructed, + except retaining any attribute / collection values that were loaded at the + time of the call. The :attr:`.InstanceState.deleted` flag is also reset + if this object had been deleted as a result of using + :meth:`.Session.delete`. + + .. warning:: + + :func:`.make_transient` does **not** "unexpire" or otherwise eagerly + load ORM-mapped attributes that are not currently loaded at the time + the function is called. This includes attributes which: + + * were expired via :meth:`.Session.expire` + + * were expired as the natural effect of committing a session + transaction, e.g. :meth:`.Session.commit` + + * are normally :term:`lazy loaded` but are not currently loaded + + * are "deferred" (see :ref:`orm_queryguide_column_deferral`) and are + not yet loaded + + * were not present in the query which loaded this object, such as that + which is common in joined table inheritance and other scenarios. + + After :func:`.make_transient` is called, unloaded attributes such + as those above will normally resolve to the value ``None`` when + accessed, or an empty collection for a collection-oriented attribute. + As the object is transient and un-associated with any database + identity, it will no longer retrieve these values. + + .. seealso:: + + :func:`.make_transient_to_detached` + + """ + state = attributes.instance_state(instance) + s = _state_session(state) + if s: + s._expunge_states([state]) + + # remove expired state + state.expired_attributes.clear() + + # remove deferred callables + if state.callables: + del state.callables + + if state.key: + del state.key + if state._deleted: + del state._deleted + + +def make_transient_to_detached(instance: object) -> None: + """Make the given transient instance :term:`detached`. + + .. note:: + + :func:`.make_transient_to_detached` is a special-case function for + advanced use cases only. + + All attribute history on the given instance + will be reset as though the instance were freshly loaded + from a query. Missing attributes will be marked as expired. + The primary key attributes of the object, which are required, will be made + into the "key" of the instance. + + The object can then be added to a session, or merged + possibly with the load=False flag, at which point it will look + as if it were loaded that way, without emitting SQL. + + This is a special use case function that differs from a normal + call to :meth:`.Session.merge` in that a given persistent state + can be manufactured without any SQL calls. + + .. seealso:: + + :func:`.make_transient` + + :meth:`.Session.enable_relationship_loading` + + """ + state = attributes.instance_state(instance) + if state.session_id or state.key: + raise sa_exc.InvalidRequestError("Given object must be transient") + state.key = state.mapper._identity_key_from_state(state) + if state._deleted: + del state._deleted + state._commit_all(state.dict) + state._expire_attributes(state.dict, state.unloaded) + + +def object_session(instance: object) -> Optional[Session]: + """Return the :class:`.Session` to which the given instance belongs. + + This is essentially the same as the :attr:`.InstanceState.session` + accessor. See that attribute for details. + + """ + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE as err: + raise exc.UnmappedInstanceError(instance) from err + else: + return _state_session(state) + + +_new_sessionid = util.counter() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state.py new file mode 100644 index 0000000000000000000000000000000000000000..d4bbf92099312ba929c23ca6233133bb8367db88 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state.py @@ -0,0 +1,1143 @@ +# orm/state.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Defines instrumentation of instances. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + +""" + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Optional +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union +import weakref + +from . import base +from . import exc as orm_exc +from . import interfaces +from ._typing import _O +from ._typing import is_collection_impl +from .base import ATTR_WAS_SET +from .base import INIT_OK +from .base import LoaderCallableStatus +from .base import NEVER_SET +from .base import NO_VALUE +from .base import PASSIVE_NO_INITIALIZE +from .base import PASSIVE_NO_RESULT +from .base import PASSIVE_OFF +from .base import SQL_OK +from .path_registry import PathRegistry +from .. import exc as sa_exc +from .. import inspection +from .. import util +from ..util.typing import Literal +from ..util.typing import Protocol + +if TYPE_CHECKING: + from ._typing import _IdentityKeyType + from ._typing import _InstanceDict + from ._typing import _LoaderCallable + from .attributes import AttributeImpl + from .attributes import History + from .base import PassiveFlag + from .collections import _AdaptedCollectionProtocol + from .identity import IdentityMap + from .instrumentation import ClassManager + from .interfaces import ORMOption + from .mapper import Mapper + from .session import Session + from ..engine import Row + from ..ext.asyncio.session import async_session as _async_provider + from ..ext.asyncio.session import AsyncSession + +if TYPE_CHECKING: + _sessions: weakref.WeakValueDictionary[int, Session] +else: + # late-populated by session.py + _sessions = None + + +if not TYPE_CHECKING: + # optionally late-provided by sqlalchemy.ext.asyncio.session + + _async_provider = None # noqa + + +class _InstanceDictProto(Protocol): + def __call__(self) -> Optional[IdentityMap]: ... + + +class _InstallLoaderCallableProto(Protocol[_O]): + """used at result loading time to install a _LoaderCallable callable + upon a specific InstanceState, which will be used to populate an + attribute when that attribute is accessed. + + Concrete examples are per-instance deferred column loaders and + relationship lazy loaders. + + """ + + def __call__( + self, state: InstanceState[_O], dict_: _InstanceDict, row: Row[Any] + ) -> None: ... + + +@inspection._self_inspects +class InstanceState(interfaces.InspectionAttrInfo, Generic[_O]): + """Tracks state information at the instance level. + + The :class:`.InstanceState` is a key object used by the + SQLAlchemy ORM in order to track the state of an object; + it is created the moment an object is instantiated, typically + as a result of :term:`instrumentation` which SQLAlchemy applies + to the ``__init__()`` method of the class. + + :class:`.InstanceState` is also a semi-public object, + available for runtime inspection as to the state of a + mapped instance, including information such as its current + status within a particular :class:`.Session` and details + about data on individual attributes. The public API + in order to acquire a :class:`.InstanceState` object + is to use the :func:`_sa.inspect` system:: + + >>> from sqlalchemy import inspect + >>> insp = inspect(some_mapped_object) + >>> insp.attrs.nickname.history + History(added=['new nickname'], unchanged=(), deleted=['nickname']) + + .. seealso:: + + :ref:`orm_mapper_inspection_instancestate` + + """ + + __slots__ = ( + "__dict__", + "__weakref__", + "class_", + "manager", + "obj", + "committed_state", + "expired_attributes", + ) + + manager: ClassManager[_O] + session_id: Optional[int] = None + key: Optional[_IdentityKeyType[_O]] = None + runid: Optional[int] = None + load_options: Tuple[ORMOption, ...] = () + load_path: PathRegistry = PathRegistry.root + insert_order: Optional[int] = None + _strong_obj: Optional[object] = None + obj: weakref.ref[_O] + + committed_state: Dict[str, Any] + + modified: bool = False + """When ``True`` the object was modified.""" + expired: bool = False + """When ``True`` the object is :term:`expired`. + + .. seealso:: + + :ref:`session_expire` + """ + _deleted: bool = False + _load_pending: bool = False + _orphaned_outside_of_session: bool = False + is_instance: bool = True + identity_token: object = None + _last_known_values: Optional[Dict[str, Any]] = None + + _instance_dict: _InstanceDictProto + """A weak reference, or in the default case a plain callable, that + returns a reference to the current :class:`.IdentityMap`, if any. + + """ + if not TYPE_CHECKING: + + def _instance_dict(self): + """default 'weak reference' for _instance_dict""" + return None + + expired_attributes: Set[str] + """The set of keys which are 'expired' to be loaded by + the manager's deferred scalar loader, assuming no pending + changes. + + See also the ``unmodified`` collection which is intersected + against this set when a refresh operation occurs. + """ + + callables: Dict[str, Callable[[InstanceState[_O], PassiveFlag], Any]] + """A namespace where a per-state loader callable can be associated. + + In SQLAlchemy 1.0, this is only used for lazy loaders / deferred + loaders that were set up via query option. + + Previously, callables was used also to indicate expired attributes + by storing a link to the InstanceState itself in this dictionary. + This role is now handled by the expired_attributes set. + + """ + + if not TYPE_CHECKING: + callables = util.EMPTY_DICT + + def __init__(self, obj: _O, manager: ClassManager[_O]): + self.class_ = obj.__class__ + self.manager = manager + self.obj = weakref.ref(obj, self._cleanup) + self.committed_state = {} + self.expired_attributes = set() + + @util.memoized_property + def attrs(self) -> util.ReadOnlyProperties[AttributeState]: + """Return a namespace representing each attribute on + the mapped object, including its current value + and history. + + The returned object is an instance of :class:`.AttributeState`. + This object allows inspection of the current data + within an attribute as well as attribute history + since the last flush. + + """ + return util.ReadOnlyProperties( + {key: AttributeState(self, key) for key in self.manager} + ) + + @property + def transient(self) -> bool: + """Return ``True`` if the object is :term:`transient`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is None and not self._attached + + @property + def pending(self) -> bool: + """Return ``True`` if the object is :term:`pending`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is None and self._attached + + @property + def deleted(self) -> bool: + """Return ``True`` if the object is :term:`deleted`. + + An object that is in the deleted state is guaranteed to + not be within the :attr:`.Session.identity_map` of its parent + :class:`.Session`; however if the session's transaction is rolled + back, the object will be restored to the persistent state and + the identity map. + + .. note:: + + The :attr:`.InstanceState.deleted` attribute refers to a specific + state of the object that occurs between the "persistent" and + "detached" states; once the object is :term:`detached`, the + :attr:`.InstanceState.deleted` attribute **no longer returns + True**; in order to detect that a state was deleted, regardless + of whether or not the object is associated with a + :class:`.Session`, use the :attr:`.InstanceState.was_deleted` + accessor. + + .. versionadded: 1.1 + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and self._attached and self._deleted + + @property + def was_deleted(self) -> bool: + """Return True if this object is or was previously in the + "deleted" state and has not been reverted to persistent. + + This flag returns True once the object was deleted in flush. + When the object is expunged from the session either explicitly + or via transaction commit and enters the "detached" state, + this flag will continue to report True. + + .. seealso:: + + :attr:`.InstanceState.deleted` - refers to the "deleted" state + + :func:`.orm.util.was_deleted` - standalone function + + :ref:`session_object_states` + + """ + return self._deleted + + @property + def persistent(self) -> bool: + """Return ``True`` if the object is :term:`persistent`. + + An object that is in the persistent state is guaranteed to + be within the :attr:`.Session.identity_map` of its parent + :class:`.Session`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and self._attached and not self._deleted + + @property + def detached(self) -> bool: + """Return ``True`` if the object is :term:`detached`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and not self._attached + + @util.non_memoized_property + @util.preload_module("sqlalchemy.orm.session") + def _attached(self) -> bool: + return ( + self.session_id is not None + and self.session_id in util.preloaded.orm_session._sessions + ) + + def _track_last_known_value(self, key: str) -> None: + """Track the last known value of a particular key after expiration + operations. + + .. versionadded:: 1.3 + + """ + + lkv = self._last_known_values + if lkv is None: + self._last_known_values = lkv = {} + if key not in lkv: + lkv[key] = NO_VALUE + + @property + def session(self) -> Optional[Session]: + """Return the owning :class:`.Session` for this instance, + or ``None`` if none available. + + Note that the result here can in some cases be *different* + from that of ``obj in session``; an object that's been deleted + will report as not ``in session``, however if the transaction is + still in progress, this attribute will still refer to that session. + Only when the transaction is completed does the object become + fully detached under normal circumstances. + + .. seealso:: + + :attr:`_orm.InstanceState.async_session` + + """ + if self.session_id: + try: + return _sessions[self.session_id] + except KeyError: + pass + return None + + @property + def async_session(self) -> Optional[AsyncSession]: + """Return the owning :class:`_asyncio.AsyncSession` for this instance, + or ``None`` if none available. + + This attribute is only non-None when the :mod:`sqlalchemy.ext.asyncio` + API is in use for this ORM object. The returned + :class:`_asyncio.AsyncSession` object will be a proxy for the + :class:`_orm.Session` object that would be returned from the + :attr:`_orm.InstanceState.session` attribute for this + :class:`_orm.InstanceState`. + + .. versionadded:: 1.4.18 + + .. seealso:: + + :ref:`asyncio_toplevel` + + """ + if _async_provider is None: + return None + + sess = self.session + if sess is not None: + return _async_provider(sess) + else: + return None + + @property + def object(self) -> Optional[_O]: + """Return the mapped object represented by this + :class:`.InstanceState`. + + Returns None if the object has been garbage collected + + """ + return self.obj() + + @property + def identity(self) -> Optional[Tuple[Any, ...]]: + """Return the mapped identity of the mapped object. + This is the primary key identity as persisted by the ORM + which can always be passed directly to + :meth:`_query.Query.get`. + + Returns ``None`` if the object has no primary key identity. + + .. note:: + An object which is :term:`transient` or :term:`pending` + does **not** have a mapped identity until it is flushed, + even if its attributes include primary key values. + + """ + if self.key is None: + return None + else: + return self.key[1] + + @property + def identity_key(self) -> Optional[_IdentityKeyType[_O]]: + """Return the identity key for the mapped object. + + This is the key used to locate the object within + the :attr:`.Session.identity_map` mapping. It contains + the identity as returned by :attr:`.identity` within it. + + + """ + return self.key + + @util.memoized_property + def parents(self) -> Dict[int, Union[Literal[False], InstanceState[Any]]]: + return {} + + @util.memoized_property + def _pending_mutations(self) -> Dict[str, PendingCollection]: + return {} + + @util.memoized_property + def _empty_collections(self) -> Dict[str, _AdaptedCollectionProtocol]: + return {} + + @util.memoized_property + def mapper(self) -> Mapper[_O]: + """Return the :class:`_orm.Mapper` used for this mapped object.""" + return self.manager.mapper + + @property + def has_identity(self) -> bool: + """Return ``True`` if this object has an identity key. + + This should always have the same value as the + expression ``state.persistent`` or ``state.detached``. + + """ + return bool(self.key) + + @classmethod + def _detach_states( + self, + states: Iterable[InstanceState[_O]], + session: Session, + to_transient: bool = False, + ) -> None: + persistent_to_detached = ( + session.dispatch.persistent_to_detached or None + ) + deleted_to_detached = session.dispatch.deleted_to_detached or None + pending_to_transient = session.dispatch.pending_to_transient or None + persistent_to_transient = ( + session.dispatch.persistent_to_transient or None + ) + + for state in states: + deleted = state._deleted + pending = state.key is None + persistent = not pending and not deleted + + state.session_id = None + + if to_transient and state.key: + del state.key + if persistent: + if to_transient: + if persistent_to_transient is not None: + persistent_to_transient(session, state) + elif persistent_to_detached is not None: + persistent_to_detached(session, state) + elif deleted and deleted_to_detached is not None: + deleted_to_detached(session, state) + elif pending and pending_to_transient is not None: + pending_to_transient(session, state) + + state._strong_obj = None + + def _detach(self, session: Optional[Session] = None) -> None: + if session: + InstanceState._detach_states([self], session) + else: + self.session_id = self._strong_obj = None + + def _dispose(self) -> None: + # used by the test suite, apparently + self._detach() + + def _cleanup(self, ref: weakref.ref[_O]) -> None: + """Weakref callback cleanup. + + This callable cleans out the state when it is being garbage + collected. + + this _cleanup **assumes** that there are no strong refs to us! + Will not work otherwise! + + """ + + # Python builtins become undefined during interpreter shutdown. + # Guard against exceptions during this phase, as the method cannot + # proceed in any case if builtins have been undefined. + if dict is None: + return + + instance_dict = self._instance_dict() + if instance_dict is not None: + instance_dict._fast_discard(self) + del self._instance_dict + + # we can't possibly be in instance_dict._modified + # b.c. this is weakref cleanup only, that set + # is strong referencing! + # assert self not in instance_dict._modified + + self.session_id = self._strong_obj = None + + @property + def dict(self) -> _InstanceDict: + """Return the instance dict used by the object. + + Under normal circumstances, this is always synonymous + with the ``__dict__`` attribute of the mapped object, + unless an alternative instrumentation system has been + configured. + + In the case that the actual object has been garbage + collected, this accessor returns a blank dictionary. + + """ + o = self.obj() + if o is not None: + return base.instance_dict(o) + else: + return {} + + def _initialize_instance(*mixed: Any, **kwargs: Any) -> None: + self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa + manager = self.manager + + manager.dispatch.init(self, args, kwargs) + + try: + manager.original_init(*mixed[1:], **kwargs) + except: + with util.safe_reraise(): + manager.dispatch.init_failure(self, args, kwargs) + + def get_history(self, key: str, passive: PassiveFlag) -> History: + return self.manager[key].impl.get_history(self, self.dict, passive) + + def get_impl(self, key: str) -> AttributeImpl: + return self.manager[key].impl + + def _get_pending_mutation(self, key: str) -> PendingCollection: + if key not in self._pending_mutations: + self._pending_mutations[key] = PendingCollection() + return self._pending_mutations[key] + + def __getstate__(self) -> Dict[str, Any]: + state_dict: Dict[str, Any] = { + "instance": self.obj(), + "class_": self.class_, + "committed_state": self.committed_state, + "expired_attributes": self.expired_attributes, + } + state_dict.update( + (k, self.__dict__[k]) + for k in ( + "_pending_mutations", + "modified", + "expired", + "callables", + "key", + "parents", + "load_options", + "class_", + "expired_attributes", + "info", + ) + if k in self.__dict__ + ) + if self.load_path: + state_dict["load_path"] = self.load_path.serialize() + + state_dict["manager"] = self.manager._serialize(self, state_dict) + + return state_dict + + def __setstate__(self, state_dict: Dict[str, Any]) -> None: + inst = state_dict["instance"] + if inst is not None: + self.obj = weakref.ref(inst, self._cleanup) + self.class_ = inst.__class__ + else: + self.obj = lambda: None # type: ignore + self.class_ = state_dict["class_"] + + self.committed_state = state_dict.get("committed_state", {}) + self._pending_mutations = state_dict.get("_pending_mutations", {}) + self.parents = state_dict.get("parents", {}) + self.modified = state_dict.get("modified", False) + self.expired = state_dict.get("expired", False) + if "info" in state_dict: + self.info.update(state_dict["info"]) + if "callables" in state_dict: + self.callables = state_dict["callables"] + + self.expired_attributes = state_dict["expired_attributes"] + else: + if "expired_attributes" in state_dict: + self.expired_attributes = state_dict["expired_attributes"] + else: + self.expired_attributes = set() + + self.__dict__.update( + [ + (k, state_dict[k]) + for k in ("key", "load_options") + if k in state_dict + ] + ) + if self.key: + self.identity_token = self.key[2] + + if "load_path" in state_dict: + self.load_path = PathRegistry.deserialize(state_dict["load_path"]) + + state_dict["manager"](self, inst, state_dict) + + def _reset(self, dict_: _InstanceDict, key: str) -> None: + """Remove the given attribute and any + callables associated with it.""" + + old = dict_.pop(key, None) + manager_impl = self.manager[key].impl + if old is not None and is_collection_impl(manager_impl): + manager_impl._invalidate_collection(old) + self.expired_attributes.discard(key) + if self.callables: + self.callables.pop(key, None) + + def _copy_callables(self, from_: InstanceState[Any]) -> None: + if "callables" in from_.__dict__: + self.callables = dict(from_.callables) + + @classmethod + def _instance_level_callable_processor( + cls, manager: ClassManager[_O], fn: _LoaderCallable, key: Any + ) -> _InstallLoaderCallableProto[_O]: + impl = manager[key].impl + if is_collection_impl(impl): + fixed_impl = impl + + def _set_callable( + state: InstanceState[_O], dict_: _InstanceDict, row: Row[Any] + ) -> None: + if "callables" not in state.__dict__: + state.callables = {} + old = dict_.pop(key, None) + if old is not None: + fixed_impl._invalidate_collection(old) + state.callables[key] = fn + + else: + + def _set_callable( + state: InstanceState[_O], dict_: _InstanceDict, row: Row[Any] + ) -> None: + if "callables" not in state.__dict__: + state.callables = {} + state.callables[key] = fn + + return _set_callable + + def _expire( + self, dict_: _InstanceDict, modified_set: Set[InstanceState[Any]] + ) -> None: + self.expired = True + if self.modified: + modified_set.discard(self) + self.committed_state.clear() + self.modified = False + + self._strong_obj = None + + if "_pending_mutations" in self.__dict__: + del self.__dict__["_pending_mutations"] + + if "parents" in self.__dict__: + del self.__dict__["parents"] + + self.expired_attributes.update( + [impl.key for impl in self.manager._loader_impls] + ) + + if self.callables: + # the per state loader callables we can remove here are + # LoadDeferredColumns, which undefers a column at the instance + # level that is mapped with deferred, and LoadLazyAttribute, + # which lazy loads a relationship at the instance level that + # is mapped with "noload" or perhaps "immediateload". + # Before 1.4, only column-based + # attributes could be considered to be "expired", so here they + # were the only ones "unexpired", which means to make them deferred + # again. For the moment, as of 1.4 we also apply the same + # treatment relationships now, that is, an instance level lazy + # loader is reset in the same way as a column loader. + for k in self.expired_attributes.intersection(self.callables): + del self.callables[k] + + for k in self.manager._collection_impl_keys.intersection(dict_): + collection = dict_.pop(k) + collection._sa_adapter.invalidated = True + + if self._last_known_values: + self._last_known_values.update( + {k: dict_[k] for k in self._last_known_values if k in dict_} + ) + + for key in self.manager._all_key_set.intersection(dict_): + del dict_[key] + + self.manager.dispatch.expire(self, None) + + def _expire_attributes( + self, + dict_: _InstanceDict, + attribute_names: Iterable[str], + no_loader: bool = False, + ) -> None: + pending = self.__dict__.get("_pending_mutations", None) + + callables = self.callables + + for key in attribute_names: + impl = self.manager[key].impl + if impl.accepts_scalar_loader: + if no_loader and (impl.callable_ or key in callables): + continue + + self.expired_attributes.add(key) + if callables and key in callables: + del callables[key] + old = dict_.pop(key, NO_VALUE) + if is_collection_impl(impl) and old is not NO_VALUE: + impl._invalidate_collection(old) + + lkv = self._last_known_values + if lkv is not None and key in lkv and old is not NO_VALUE: + lkv[key] = old + + self.committed_state.pop(key, None) + if pending: + pending.pop(key, None) + + self.manager.dispatch.expire(self, attribute_names) + + def _load_expired( + self, state: InstanceState[_O], passive: PassiveFlag + ) -> LoaderCallableStatus: + """__call__ allows the InstanceState to act as a deferred + callable for loading expired attributes, which is also + serializable (picklable). + + """ + + if not passive & SQL_OK: + return PASSIVE_NO_RESULT + + toload = self.expired_attributes.intersection(self.unmodified) + toload = toload.difference( + attr + for attr in toload + if not self.manager[attr].impl.load_on_unexpire + ) + + self.manager.expired_attribute_loader(self, toload, passive) + + # if the loader failed, or this + # instance state didn't have an identity, + # the attributes still might be in the callables + # dict. ensure they are removed. + self.expired_attributes.clear() + + return ATTR_WAS_SET + + @property + def unmodified(self) -> Set[str]: + """Return the set of keys which have no uncommitted changes""" + + return set(self.manager).difference(self.committed_state) + + def unmodified_intersection(self, keys: Iterable[str]) -> Set[str]: + """Return self.unmodified.intersection(keys).""" + + return ( + set(keys) + .intersection(self.manager) + .difference(self.committed_state) + ) + + @property + def unloaded(self) -> Set[str]: + """Return the set of keys which do not have a loaded value. + + This includes expired attributes and any other attribute that was never + populated or modified. + + """ + return ( + set(self.manager) + .difference(self.committed_state) + .difference(self.dict) + ) + + @property + @util.deprecated( + "2.0", + "The :attr:`.InstanceState.unloaded_expirable` attribute is " + "deprecated. Please use :attr:`.InstanceState.unloaded`.", + ) + def unloaded_expirable(self) -> Set[str]: + """Synonymous with :attr:`.InstanceState.unloaded`. + + This attribute was added as an implementation-specific detail at some + point and should be considered to be private. + + """ + return self.unloaded + + @property + def _unloaded_non_object(self) -> Set[str]: + return self.unloaded.intersection( + attr + for attr in self.manager + if self.manager[attr].impl.accepts_scalar_loader + ) + + def _modified_event( + self, + dict_: _InstanceDict, + attr: Optional[AttributeImpl], + previous: Any, + collection: bool = False, + is_userland: bool = False, + ) -> None: + if attr: + if not attr.send_modified_events: + return + if is_userland and attr.key not in dict_: + raise sa_exc.InvalidRequestError( + "Can't flag attribute '%s' modified; it's not present in " + "the object state" % attr.key + ) + if attr.key not in self.committed_state or is_userland: + if collection: + if TYPE_CHECKING: + assert is_collection_impl(attr) + if previous is NEVER_SET: + if attr.key in dict_: + previous = dict_[attr.key] + + if previous not in (None, NO_VALUE, NEVER_SET): + previous = attr.copy(previous) + self.committed_state[attr.key] = previous + + lkv = self._last_known_values + if lkv is not None and attr.key in lkv: + lkv[attr.key] = NO_VALUE + + # assert self._strong_obj is None or self.modified + + if (self.session_id and self._strong_obj is None) or not self.modified: + self.modified = True + instance_dict = self._instance_dict() + if instance_dict: + has_modified = bool(instance_dict._modified) + instance_dict._modified.add(self) + else: + has_modified = False + + # only create _strong_obj link if attached + # to a session + + inst = self.obj() + if self.session_id: + self._strong_obj = inst + + # if identity map already had modified objects, + # assume autobegin already occurred, else check + # for autobegin + if not has_modified: + # inline of autobegin, to ensure session transaction + # snapshot is established + try: + session = _sessions[self.session_id] + except KeyError: + pass + else: + if session._transaction is None: + session._autobegin_t() + + if inst is None and attr: + raise orm_exc.ObjectDereferencedError( + "Can't emit change event for attribute '%s' - " + "parent object of type %s has been garbage " + "collected." + % (self.manager[attr.key], base.state_class_str(self)) + ) + + def _commit(self, dict_: _InstanceDict, keys: Iterable[str]) -> None: + """Commit attributes. + + This is used by a partial-attribute load operation to mark committed + those attributes which were refreshed from the database. + + Attributes marked as "expired" can potentially remain "expired" after + this step if a value was not populated in state.dict. + + """ + for key in keys: + self.committed_state.pop(key, None) + + self.expired = False + + self.expired_attributes.difference_update( + set(keys).intersection(dict_) + ) + + # the per-keys commit removes object-level callables, + # while that of commit_all does not. it's not clear + # if this behavior has a clear rationale, however tests do + # ensure this is what it does. + if self.callables: + for key in ( + set(self.callables).intersection(keys).intersection(dict_) + ): + del self.callables[key] + + def _commit_all( + self, dict_: _InstanceDict, instance_dict: Optional[IdentityMap] = None + ) -> None: + """commit all attributes unconditionally. + + This is used after a flush() or a full load/refresh + to remove all pending state from the instance. + + - all attributes are marked as "committed" + - the "strong dirty reference" is removed + - the "modified" flag is set to False + - any "expired" markers for scalar attributes loaded are removed. + - lazy load callables for objects / collections *stay* + + Attributes marked as "expired" can potentially remain + "expired" after this step if a value was not populated in state.dict. + + """ + self._commit_all_states([(self, dict_)], instance_dict) + + @classmethod + def _commit_all_states( + self, + iter_: Iterable[Tuple[InstanceState[Any], _InstanceDict]], + instance_dict: Optional[IdentityMap] = None, + ) -> None: + """Mass / highly inlined version of commit_all().""" + + for state, dict_ in iter_: + state_dict = state.__dict__ + + state.committed_state.clear() + + if "_pending_mutations" in state_dict: + del state_dict["_pending_mutations"] + + state.expired_attributes.difference_update(dict_) + + if instance_dict and state.modified: + instance_dict._modified.discard(state) + + state.modified = state.expired = False + state._strong_obj = None + + +class AttributeState: + """Provide an inspection interface corresponding + to a particular attribute on a particular mapped object. + + The :class:`.AttributeState` object is accessed + via the :attr:`.InstanceState.attrs` collection + of a particular :class:`.InstanceState`:: + + from sqlalchemy import inspect + + insp = inspect(some_mapped_object) + attr_state = insp.attrs.some_attribute + + """ + + __slots__ = ("state", "key") + + state: InstanceState[Any] + key: str + + def __init__(self, state: InstanceState[Any], key: str): + self.state = state + self.key = key + + @property + def loaded_value(self) -> Any: + """The current value of this attribute as loaded from the database. + + If the value has not been loaded, or is otherwise not present + in the object's dictionary, returns NO_VALUE. + + """ + return self.state.dict.get(self.key, NO_VALUE) + + @property + def value(self) -> Any: + """Return the value of this attribute. + + This operation is equivalent to accessing the object's + attribute directly or via ``getattr()``, and will fire + off any pending loader callables if needed. + + """ + return self.state.manager[self.key].__get__( + self.state.obj(), self.state.class_ + ) + + @property + def history(self) -> History: + """Return the current **pre-flush** change history for + this attribute, via the :class:`.History` interface. + + This method will **not** emit loader callables if the value of the + attribute is unloaded. + + .. note:: + + The attribute history system tracks changes on a **per flush + basis**. Each time the :class:`.Session` is flushed, the history + of each attribute is reset to empty. The :class:`.Session` by + default autoflushes each time a :class:`_query.Query` is invoked. + For + options on how to control this, see :ref:`session_flushing`. + + + .. seealso:: + + :meth:`.AttributeState.load_history` - retrieve history + using loader callables if the value is not locally present. + + :func:`.attributes.get_history` - underlying function + + """ + return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE) + + def load_history(self) -> History: + """Return the current **pre-flush** change history for + this attribute, via the :class:`.History` interface. + + This method **will** emit loader callables if the value of the + attribute is unloaded. + + .. note:: + + The attribute history system tracks changes on a **per flush + basis**. Each time the :class:`.Session` is flushed, the history + of each attribute is reset to empty. The :class:`.Session` by + default autoflushes each time a :class:`_query.Query` is invoked. + For + options on how to control this, see :ref:`session_flushing`. + + .. seealso:: + + :attr:`.AttributeState.history` + + :func:`.attributes.get_history` - underlying function + + """ + return self.state.get_history(self.key, PASSIVE_OFF ^ INIT_OK) + + +class PendingCollection: + """A writable placeholder for an unloaded collection. + + Stores items appended to and removed from a collection that has not yet + been loaded. When the collection is loaded, the changes stored in + PendingCollection are applied to it to produce the final result. + + """ + + __slots__ = ("deleted_items", "added_items") + + deleted_items: util.IdentitySet + added_items: util.OrderedIdentitySet + + def __init__(self) -> None: + self.deleted_items = util.IdentitySet() + self.added_items = util.OrderedIdentitySet() + + def merge_with_history(self, history: History) -> History: + return history._merge(self.added_items, self.deleted_items) + + def append(self, value: Any) -> None: + if value in self.deleted_items: + self.deleted_items.remove(value) + else: + self.added_items.add(value) + + def remove(self, value: Any) -> None: + if value in self.added_items: + self.added_items.remove(value) + else: + self.deleted_items.add(value) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state_changes.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state_changes.py new file mode 100644 index 0000000000000000000000000000000000000000..10e417e85d19ea02f3df4fad73c84bb9a3cc7736 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/state_changes.py @@ -0,0 +1,198 @@ +# orm/state_changes.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""State tracking utilities used by :class:`_orm.Session`. + +""" + +from __future__ import annotations + +import contextlib +from enum import Enum +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterator +from typing import NoReturn +from typing import Optional +from typing import Tuple +from typing import TypeVar +from typing import Union + +from .. import exc as sa_exc +from .. import util +from ..util.typing import Literal + +_F = TypeVar("_F", bound=Callable[..., Any]) + + +class _StateChangeState(Enum): + pass + + +class _StateChangeStates(_StateChangeState): + ANY = 1 + NO_CHANGE = 2 + CHANGE_IN_PROGRESS = 3 + + +class _StateChange: + """Supplies state assertion decorators. + + The current use case is for the :class:`_orm.SessionTransaction` class. The + :class:`_StateChange` class itself is agnostic of the + :class:`_orm.SessionTransaction` class so could in theory be generalized + for other systems as well. + + """ + + _next_state: _StateChangeState = _StateChangeStates.ANY + _state: _StateChangeState = _StateChangeStates.NO_CHANGE + _current_fn: Optional[Callable[..., Any]] = None + + def _raise_for_prerequisite_state( + self, operation_name: str, state: _StateChangeState + ) -> NoReturn: + raise sa_exc.IllegalStateChangeError( + f"Can't run operation '{operation_name}()' when Session " + f"is in state {state!r}", + code="isce", + ) + + @classmethod + def declare_states( + cls, + prerequisite_states: Union[ + Literal[_StateChangeStates.ANY], Tuple[_StateChangeState, ...] + ], + moves_to: _StateChangeState, + ) -> Callable[[_F], _F]: + """Method decorator declaring valid states. + + :param prerequisite_states: sequence of acceptable prerequisite + states. Can be the single constant _State.ANY to indicate no + prerequisite state + + :param moves_to: the expected state at the end of the method, assuming + no exceptions raised. Can be the constant _State.NO_CHANGE to + indicate state should not change at the end of the method. + + """ + assert prerequisite_states, "no prequisite states sent" + has_prerequisite_states = ( + prerequisite_states is not _StateChangeStates.ANY + ) + + prerequisite_state_collection = cast( + "Tuple[_StateChangeState, ...]", prerequisite_states + ) + expect_state_change = moves_to is not _StateChangeStates.NO_CHANGE + + @util.decorator + def _go(fn: _F, self: Any, *arg: Any, **kw: Any) -> Any: + current_state = self._state + + if ( + has_prerequisite_states + and current_state not in prerequisite_state_collection + ): + self._raise_for_prerequisite_state(fn.__name__, current_state) + + next_state = self._next_state + existing_fn = self._current_fn + expect_state = moves_to if expect_state_change else current_state + + if ( + # destination states are restricted + next_state is not _StateChangeStates.ANY + # method seeks to change state + and expect_state_change + # destination state incorrect + and next_state is not expect_state + ): + if existing_fn and next_state in ( + _StateChangeStates.NO_CHANGE, + _StateChangeStates.CHANGE_IN_PROGRESS, + ): + raise sa_exc.IllegalStateChangeError( + f"Method '{fn.__name__}()' can't be called here; " + f"method '{existing_fn.__name__}()' is already " + f"in progress and this would cause an unexpected " + f"state change to {moves_to!r}", + code="isce", + ) + else: + raise sa_exc.IllegalStateChangeError( + f"Cant run operation '{fn.__name__}()' here; " + f"will move to state {moves_to!r} where we are " + f"expecting {next_state!r}", + code="isce", + ) + + self._current_fn = fn + self._next_state = _StateChangeStates.CHANGE_IN_PROGRESS + try: + ret_value = fn(self, *arg, **kw) + except: + raise + else: + if self._state is expect_state: + return ret_value + + if self._state is current_state: + raise sa_exc.IllegalStateChangeError( + f"Method '{fn.__name__}()' failed to " + "change state " + f"to {moves_to!r} as expected", + code="isce", + ) + elif existing_fn: + raise sa_exc.IllegalStateChangeError( + f"While method '{existing_fn.__name__}()' was " + "running, " + f"method '{fn.__name__}()' caused an " + "unexpected " + f"state change to {self._state!r}", + code="isce", + ) + else: + raise sa_exc.IllegalStateChangeError( + f"Method '{fn.__name__}()' caused an unexpected " + f"state change to {self._state!r}", + code="isce", + ) + + finally: + self._next_state = next_state + self._current_fn = existing_fn + + return _go + + @contextlib.contextmanager + def _expect_state(self, expected: _StateChangeState) -> Iterator[Any]: + """called within a method that changes states. + + method must also use the ``@declare_states()`` decorator. + + """ + assert self._next_state is _StateChangeStates.CHANGE_IN_PROGRESS, ( + "Unexpected call to _expect_state outside of " + "state-changing method" + ) + + self._next_state = expected + try: + yield + except: + raise + else: + if self._state is not expected: + raise sa_exc.IllegalStateChangeError( + f"Unexpected state change to {self._state!r}", code="isce" + ) + finally: + self._next_state = _StateChangeStates.CHANGE_IN_PROGRESS diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategies.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..d9eaa2b388e3ac58edfefade944d200e966737e3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategies.py @@ -0,0 +1,3470 @@ +# orm/strategies.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""sqlalchemy.orm.interfaces.LoaderStrategy + implementations, and related MapperOptions.""" + +from __future__ import annotations + +import collections +import itertools +from typing import Any +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from . import attributes +from . import exc as orm_exc +from . import interfaces +from . import loading +from . import path_registry +from . import properties +from . import query +from . import relationships +from . import unitofwork +from . import util as orm_util +from .base import _DEFER_FOR_STATE +from .base import _RAISE_FOR_STATE +from .base import _SET_DEFERRED_EXPIRED +from .base import ATTR_WAS_SET +from .base import LoaderCallableStatus +from .base import PASSIVE_OFF +from .base import PassiveFlag +from .context import _column_descriptions +from .context import ORMCompileState +from .context import ORMSelectCompileState +from .context import QueryContext +from .interfaces import LoaderStrategy +from .interfaces import StrategizedProperty +from .session import _state_session +from .state import InstanceState +from .strategy_options import Load +from .util import _none_only_set +from .util import AliasedClass +from .. import event +from .. import exc as sa_exc +from .. import inspect +from .. import log +from .. import sql +from .. import util +from ..sql import util as sql_util +from ..sql import visitors +from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from ..sql.selectable import Select +from ..util.typing import Literal + +if TYPE_CHECKING: + from .mapper import Mapper + from .relationships import RelationshipProperty + from ..sql.elements import ColumnElement + + +def _register_attribute( + prop, + mapper, + useobject, + compare_function=None, + typecallable=None, + callable_=None, + proxy_property=None, + active_history=False, + impl_class=None, + **kw, +): + listen_hooks = [] + + uselist = useobject and prop.uselist + + if useobject and prop.single_parent: + listen_hooks.append(single_parent_validator) + + if prop.key in prop.parent.validators: + fn, opts = prop.parent.validators[prop.key] + listen_hooks.append( + lambda desc, prop: orm_util._validator_events( + desc, prop.key, fn, **opts + ) + ) + + if useobject: + listen_hooks.append(unitofwork.track_cascade_events) + + # need to assemble backref listeners + # after the singleparentvalidator, mapper validator + if useobject: + backref = prop.back_populates + if backref and prop._effective_sync_backref: + listen_hooks.append( + lambda desc, prop: attributes.backref_listeners( + desc, backref, uselist + ) + ) + + # a single MapperProperty is shared down a class inheritance + # hierarchy, so we set up attribute instrumentation and backref event + # for each mapper down the hierarchy. + + # typically, "mapper" is the same as prop.parent, due to the way + # the configure_mappers() process runs, however this is not strongly + # enforced, and in the case of a second configure_mappers() run the + # mapper here might not be prop.parent; also, a subclass mapper may + # be called here before a superclass mapper. That is, can't depend + # on mappers not already being set up so we have to check each one. + + for m in mapper.self_and_descendants: + if prop is m._props.get( + prop.key + ) and not m.class_manager._attr_has_impl(prop.key): + desc = attributes.register_attribute_impl( + m.class_, + prop.key, + parent_token=prop, + uselist=uselist, + compare_function=compare_function, + useobject=useobject, + trackparent=useobject + and ( + prop.single_parent + or prop.direction is interfaces.ONETOMANY + ), + typecallable=typecallable, + callable_=callable_, + active_history=active_history, + impl_class=impl_class, + send_modified_events=not useobject or not prop.viewonly, + doc=prop.doc, + **kw, + ) + + for hook in listen_hooks: + hook(desc, prop) + + +@properties.ColumnProperty.strategy_for(instrument=False, deferred=False) +class UninstrumentedColumnLoader(LoaderStrategy): + """Represent a non-instrumented MapperProperty. + + The polymorphic_on argument of mapper() often results in this, + if the argument is against the with_polymorphic selectable. + + """ + + __slots__ = ("columns",) + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.columns = self.parent_property.columns + + def setup_query( + self, + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection=None, + **kwargs, + ): + for c in self.columns: + if adapter: + c = adapter.columns[c] + compile_state._append_dedupe_col_collection(c, column_collection) + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + pass + + +@log.class_logger +@properties.ColumnProperty.strategy_for(instrument=True, deferred=False) +class ColumnLoader(LoaderStrategy): + """Provide loading behavior for a :class:`.ColumnProperty`.""" + + __slots__ = "columns", "is_composite" + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.columns = self.parent_property.columns + self.is_composite = hasattr(self.parent_property, "composite_class") + + def setup_query( + self, + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection, + memoized_populators, + check_for_adapt=False, + **kwargs, + ): + for c in self.columns: + if adapter: + if check_for_adapt: + c = adapter.adapt_check_present(c) + if c is None: + return + else: + c = adapter.columns[c] + + compile_state._append_dedupe_col_collection(c, column_collection) + + fetch = self.columns[0] + if adapter: + fetch = adapter.columns[fetch] + if fetch is None: + # None happens here only for dml bulk_persistence cases + # when context.DMLReturningColFilter is used + return + + memoized_populators[self.parent_property] = fetch + + def init_class_attribute(self, mapper): + self.is_class_level = True + coltype = self.columns[0].type + # TODO: check all columns ? check for foreign key as well? + active_history = ( + self.parent_property.active_history + or self.columns[0].primary_key + or ( + mapper.version_id_col is not None + and mapper._columntoproperty.get(mapper.version_id_col, None) + is self.parent_property + ) + ) + + _register_attribute( + self.parent_property, + mapper, + useobject=False, + compare_function=coltype.compare_values, + active_history=active_history, + ) + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + # look through list of columns represented here + # to see which, if any, is present in the row. + + for col in self.columns: + if adapter: + col = adapter.columns[col] + getter = result._getter(col, False) + if getter: + populators["quick"].append((self.key, getter)) + break + else: + populators["expire"].append((self.key, True)) + + +@log.class_logger +@properties.ColumnProperty.strategy_for(query_expression=True) +class ExpressionColumnLoader(ColumnLoader): + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + + # compare to the "default" expression that is mapped in + # the column. If it's sql.null, we don't need to render + # unless an expr is passed in the options. + null = sql.null().label(None) + self._have_default_expression = any( + not c.compare(null) for c in self.parent_property.columns + ) + + def setup_query( + self, + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection, + memoized_populators, + **kwargs, + ): + columns = None + if loadopt and loadopt._extra_criteria: + columns = loadopt._extra_criteria + + elif self._have_default_expression: + columns = self.parent_property.columns + + if columns is None: + return + + for c in columns: + if adapter: + c = adapter.columns[c] + compile_state._append_dedupe_col_collection(c, column_collection) + + fetch = columns[0] + if adapter: + fetch = adapter.columns[fetch] + if fetch is None: + # None is not expected to be the result of any + # adapter implementation here, however there may be theoretical + # usages of returning() with context.DMLReturningColFilter + return + + memoized_populators[self.parent_property] = fetch + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + # look through list of columns represented here + # to see which, if any, is present in the row. + if loadopt and loadopt._extra_criteria: + columns = loadopt._extra_criteria + + for col in columns: + if adapter: + col = adapter.columns[col] + getter = result._getter(col, False) + if getter: + populators["quick"].append((self.key, getter)) + break + else: + populators["expire"].append((self.key, True)) + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _register_attribute( + self.parent_property, + mapper, + useobject=False, + compare_function=self.columns[0].type.compare_values, + accepts_scalar_loader=False, + ) + + +@log.class_logger +@properties.ColumnProperty.strategy_for(deferred=True, instrument=True) +@properties.ColumnProperty.strategy_for( + deferred=True, instrument=True, raiseload=True +) +@properties.ColumnProperty.strategy_for(do_nothing=True) +class DeferredColumnLoader(LoaderStrategy): + """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" + + __slots__ = "columns", "group", "raiseload" + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + if hasattr(self.parent_property, "composite_class"): + raise NotImplementedError( + "Deferred loading for composite types not implemented yet" + ) + self.raiseload = self.strategy_opts.get("raiseload", False) + self.columns = self.parent_property.columns + self.group = self.parent_property.group + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + # for a DeferredColumnLoader, this method is only used during a + # "row processor only" query; see test_deferred.py -> + # tests with "rowproc_only" in their name. As of the 1.0 series, + # loading._instance_processor doesn't use a "row processing" function + # to populate columns, instead it uses data in the "populators" + # dictionary. Normally, the DeferredColumnLoader.setup_query() + # sets up that data in the "memoized_populators" dictionary + # and "create_row_processor()" here is never invoked. + + if ( + context.refresh_state + and context.query._compile_options._only_load_props + and self.key in context.query._compile_options._only_load_props + ): + self.parent_property._get_strategy( + (("deferred", False), ("instrument", True)) + ).create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + elif not self.is_class_level: + if self.raiseload: + set_deferred_for_local_state = ( + self.parent_property._raise_column_loader + ) + else: + set_deferred_for_local_state = ( + self.parent_property._deferred_column_loader + ) + populators["new"].append((self.key, set_deferred_for_local_state)) + else: + populators["expire"].append((self.key, False)) + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _register_attribute( + self.parent_property, + mapper, + useobject=False, + compare_function=self.columns[0].type.compare_values, + callable_=self._load_for_state, + load_on_unexpire=False, + ) + + def setup_query( + self, + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection, + memoized_populators, + only_load_props=None, + **kw, + ): + if ( + ( + compile_state.compile_options._render_for_subquery + and self.parent_property._renders_in_subqueries + ) + or ( + loadopt + and set(self.columns).intersection( + self.parent._should_undefer_in_wildcard + ) + ) + or ( + loadopt + and self.group + and loadopt.local_opts.get( + "undefer_group_%s" % self.group, False + ) + ) + or (only_load_props and self.key in only_load_props) + ): + self.parent_property._get_strategy( + (("deferred", False), ("instrument", True)) + ).setup_query( + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection, + memoized_populators, + **kw, + ) + elif self.is_class_level: + memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED + elif not self.raiseload: + memoized_populators[self.parent_property] = _DEFER_FOR_STATE + else: + memoized_populators[self.parent_property] = _RAISE_FOR_STATE + + def _load_for_state(self, state, passive): + if not state.key: + return LoaderCallableStatus.ATTR_EMPTY + + if not passive & PassiveFlag.SQL_OK: + return LoaderCallableStatus.PASSIVE_NO_RESULT + + localparent = state.manager.mapper + + if self.group: + toload = [ + p.key + for p in localparent.iterate_properties + if isinstance(p, StrategizedProperty) + and isinstance(p.strategy, DeferredColumnLoader) + and p.group == self.group + ] + else: + toload = [self.key] + + # narrow the keys down to just those which have no history + group = [k for k in toload if k in state.unmodified] + + session = _state_session(state) + if session is None: + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session; " + "deferred load operation of attribute '%s' cannot proceed" + % (orm_util.state_str(state), self.key) + ) + + if self.raiseload: + self._invoke_raise_load(state, passive, "raise") + + loading.load_scalar_attributes( + state.mapper, state, set(group), PASSIVE_OFF + ) + + return LoaderCallableStatus.ATTR_WAS_SET + + def _invoke_raise_load(self, state, passive, lazy): + raise sa_exc.InvalidRequestError( + "'%s' is not available due to raiseload=True" % (self,) + ) + + +class LoadDeferredColumns: + """serializable loader object used by DeferredColumnLoader""" + + def __init__(self, key: str, raiseload: bool = False): + self.key = key + self.raiseload = raiseload + + def __call__(self, state, passive=attributes.PASSIVE_OFF): + key = self.key + + localparent = state.manager.mapper + prop = localparent._props[key] + if self.raiseload: + strategy_key = ( + ("deferred", True), + ("instrument", True), + ("raiseload", True), + ) + else: + strategy_key = (("deferred", True), ("instrument", True)) + strategy = prop._get_strategy(strategy_key) + return strategy._load_for_state(state, passive) + + +class AbstractRelationshipLoader(LoaderStrategy): + """LoaderStratgies which deal with related objects.""" + + __slots__ = "mapper", "target", "uselist", "entity" + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.mapper = self.parent_property.mapper + self.entity = self.parent_property.entity + self.target = self.parent_property.target + self.uselist = self.parent_property.uselist + + def _immediateload_create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + return self.parent_property._get_strategy( + (("lazy", "immediate"),) + ).create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(do_nothing=True) +class DoNothingLoader(LoaderStrategy): + """Relationship loader that makes no change to the object's state. + + Compared to NoLoader, this loader does not initialize the + collection/attribute to empty/none; the usual default LazyLoader will + take effect. + + """ + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy="noload") +@relationships.RelationshipProperty.strategy_for(lazy=None) +class NoLoader(AbstractRelationshipLoader): + """Provide loading behavior for a :class:`.Relationship` + with "lazy=None". + + """ + + __slots__ = () + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _register_attribute( + self.parent_property, + mapper, + useobject=True, + typecallable=self.parent_property.collection_class, + ) + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + def invoke_no_load(state, dict_, row): + if self.uselist: + attributes.init_state_collection(state, dict_, self.key) + else: + dict_[self.key] = None + + populators["new"].append((self.key, invoke_no_load)) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy=True) +@relationships.RelationshipProperty.strategy_for(lazy="select") +@relationships.RelationshipProperty.strategy_for(lazy="raise") +@relationships.RelationshipProperty.strategy_for(lazy="raise_on_sql") +@relationships.RelationshipProperty.strategy_for(lazy="baked_select") +class LazyLoader( + AbstractRelationshipLoader, util.MemoizedSlots, log.Identified +): + """Provide loading behavior for a :class:`.Relationship` + with "lazy=True", that is loads when first accessed. + + """ + + __slots__ = ( + "_lazywhere", + "_rev_lazywhere", + "_lazyload_reverse_option", + "_order_by", + "use_get", + "is_aliased_class", + "_bind_to_col", + "_equated_columns", + "_rev_bind_to_col", + "_rev_equated_columns", + "_simple_lazy_clause", + "_raise_always", + "_raise_on_sql", + ) + + _lazywhere: ColumnElement[bool] + _bind_to_col: Dict[str, ColumnElement[Any]] + _rev_lazywhere: ColumnElement[bool] + _rev_bind_to_col: Dict[str, ColumnElement[Any]] + + parent_property: RelationshipProperty[Any] + + def __init__( + self, parent: RelationshipProperty[Any], strategy_key: Tuple[Any, ...] + ): + super().__init__(parent, strategy_key) + self._raise_always = self.strategy_opts["lazy"] == "raise" + self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql" + + self.is_aliased_class = inspect(self.entity).is_aliased_class + + join_condition = self.parent_property._join_condition + ( + self._lazywhere, + self._bind_to_col, + self._equated_columns, + ) = join_condition.create_lazy_clause() + + ( + self._rev_lazywhere, + self._rev_bind_to_col, + self._rev_equated_columns, + ) = join_condition.create_lazy_clause(reverse_direction=True) + + if self.parent_property.order_by: + self._order_by = [ + sql_util._deep_annotate(elem, {"_orm_adapt": True}) + for elem in util.to_list(self.parent_property.order_by) + ] + else: + self._order_by = None + + self.logger.info("%s lazy loading clause %s", self, self._lazywhere) + + # determine if our "lazywhere" clause is the same as the mapper's + # get() clause. then we can just use mapper.get() + # + # TODO: the "not self.uselist" can be taken out entirely; a m2o + # load that populates for a list (very unusual, but is possible with + # the API) can still set for "None" and the attribute system will + # populate as an empty list. + self.use_get = ( + not self.is_aliased_class + and not self.uselist + and self.entity._get_clause[0].compare( + self._lazywhere, + use_proxies=True, + compare_keys=False, + equivalents=self.mapper._equivalent_columns, + ) + ) + + if self.use_get: + for col in list(self._equated_columns): + if col in self.mapper._equivalent_columns: + for c in self.mapper._equivalent_columns[col]: + self._equated_columns[c] = self._equated_columns[col] + + self.logger.info( + "%s will use Session.get() to optimize instance loads", self + ) + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _legacy_inactive_history_style = ( + self.parent_property._legacy_inactive_history_style + ) + + if self.parent_property.active_history: + active_history = True + _deferred_history = False + + elif ( + self.parent_property.direction is not interfaces.MANYTOONE + or not self.use_get + ): + if _legacy_inactive_history_style: + active_history = True + _deferred_history = False + else: + active_history = False + _deferred_history = True + else: + active_history = _deferred_history = False + + _register_attribute( + self.parent_property, + mapper, + useobject=True, + callable_=self._load_for_state, + typecallable=self.parent_property.collection_class, + active_history=active_history, + _deferred_history=_deferred_history, + ) + + def _memoized_attr__simple_lazy_clause(self): + lazywhere = sql_util._deep_annotate( + self._lazywhere, {"_orm_adapt": True} + ) + + criterion, bind_to_col = (lazywhere, self._bind_to_col) + + params = [] + + def visit_bindparam(bindparam): + bindparam.unique = False + + visitors.traverse(criterion, {}, {"bindparam": visit_bindparam}) + + def visit_bindparam(bindparam): + if bindparam._identifying_key in bind_to_col: + params.append( + ( + bindparam.key, + bind_to_col[bindparam._identifying_key], + None, + ) + ) + elif bindparam.callable is None: + params.append((bindparam.key, None, bindparam.value)) + + criterion = visitors.cloned_traverse( + criterion, {}, {"bindparam": visit_bindparam} + ) + + return criterion, params + + def _generate_lazy_clause(self, state, passive): + criterion, param_keys = self._simple_lazy_clause + + if state is None: + return sql_util.adapt_criterion_to_null( + criterion, [key for key, ident, value in param_keys] + ) + + mapper = self.parent_property.parent + + o = state.obj() # strong ref + dict_ = attributes.instance_dict(o) + + if passive & PassiveFlag.INIT_OK: + passive ^= PassiveFlag.INIT_OK + + params = {} + for key, ident, value in param_keys: + if ident is not None: + if passive and passive & PassiveFlag.LOAD_AGAINST_COMMITTED: + value = mapper._get_committed_state_attr_by_column( + state, dict_, ident, passive + ) + else: + value = mapper._get_state_attr_by_column( + state, dict_, ident, passive + ) + + params[key] = value + + return criterion, params + + def _invoke_raise_load(self, state, passive, lazy): + raise sa_exc.InvalidRequestError( + "'%s' is not available due to lazy='%s'" % (self, lazy) + ) + + def _load_for_state( + self, + state, + passive, + loadopt=None, + extra_criteria=(), + extra_options=(), + alternate_effective_path=None, + execution_options=util.EMPTY_DICT, + ): + if not state.key and ( + ( + not self.parent_property.load_on_pending + and not state._load_pending + ) + or not state.session_id + ): + return LoaderCallableStatus.ATTR_EMPTY + + pending = not state.key + primary_key_identity = None + + use_get = self.use_get and (not loadopt or not loadopt._extra_criteria) + + if (not passive & PassiveFlag.SQL_OK and not use_get) or ( + not passive & attributes.NON_PERSISTENT_OK and pending + ): + return LoaderCallableStatus.PASSIVE_NO_RESULT + + if ( + # we were given lazy="raise" + self._raise_always + # the no_raise history-related flag was not passed + and not passive & PassiveFlag.NO_RAISE + and ( + # if we are use_get and related_object_ok is disabled, + # which means we are at most looking in the identity map + # for history purposes or otherwise returning + # PASSIVE_NO_RESULT, don't raise. This is also a + # history-related flag + not use_get + or passive & PassiveFlag.RELATED_OBJECT_OK + ) + ): + self._invoke_raise_load(state, passive, "raise") + + session = _state_session(state) + if not session: + if passive & PassiveFlag.NO_RAISE: + return LoaderCallableStatus.PASSIVE_NO_RESULT + + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session; " + "lazy load operation of attribute '%s' cannot proceed" + % (orm_util.state_str(state), self.key) + ) + + # if we have a simple primary key load, check the + # identity map without generating a Query at all + if use_get: + primary_key_identity = self._get_ident_for_use_get( + session, state, passive + ) + if LoaderCallableStatus.PASSIVE_NO_RESULT in primary_key_identity: + return LoaderCallableStatus.PASSIVE_NO_RESULT + elif LoaderCallableStatus.NEVER_SET in primary_key_identity: + return LoaderCallableStatus.NEVER_SET + + # test for None alone in primary_key_identity based on + # allow_partial_pks preference. PASSIVE_NO_RESULT and NEVER_SET + # have already been tested above + if not self.mapper.allow_partial_pks: + if _none_only_set.intersection(primary_key_identity): + return None + else: + if _none_only_set.issuperset(primary_key_identity): + return None + + if ( + self.key in state.dict + and not passive & PassiveFlag.DEFERRED_HISTORY_LOAD + ): + return LoaderCallableStatus.ATTR_WAS_SET + + # look for this identity in the identity map. Delegate to the + # Query class in use, as it may have special rules for how it + # does this, including how it decides what the correct + # identity_token would be for this identity. + + instance = session._identity_lookup( + self.entity, + primary_key_identity, + passive=passive, + lazy_loaded_from=state, + ) + + if instance is not None: + if instance is LoaderCallableStatus.PASSIVE_CLASS_MISMATCH: + return None + else: + return instance + elif ( + not passive & PassiveFlag.SQL_OK + or not passive & PassiveFlag.RELATED_OBJECT_OK + ): + return LoaderCallableStatus.PASSIVE_NO_RESULT + + return self._emit_lazyload( + session, + state, + primary_key_identity, + passive, + loadopt, + extra_criteria, + extra_options, + alternate_effective_path, + execution_options, + ) + + def _get_ident_for_use_get(self, session, state, passive): + instance_mapper = state.manager.mapper + + if passive & PassiveFlag.LOAD_AGAINST_COMMITTED: + get_attr = instance_mapper._get_committed_state_attr_by_column + else: + get_attr = instance_mapper._get_state_attr_by_column + + dict_ = state.dict + + return [ + get_attr(state, dict_, self._equated_columns[pk], passive=passive) + for pk in self.mapper.primary_key + ] + + @util.preload_module("sqlalchemy.orm.strategy_options") + def _emit_lazyload( + self, + session, + state, + primary_key_identity, + passive, + loadopt, + extra_criteria, + extra_options, + alternate_effective_path, + execution_options, + ): + strategy_options = util.preloaded.orm_strategy_options + + clauseelement = self.entity.__clause_element__() + stmt = Select._create_raw_select( + _raw_columns=[clauseelement], + _propagate_attrs=clauseelement._propagate_attrs, + _label_style=LABEL_STYLE_TABLENAME_PLUS_COL, + _compile_options=ORMCompileState.default_compile_options, + ) + load_options = QueryContext.default_load_options + + load_options += { + "_invoke_all_eagers": False, + "_lazy_loaded_from": state, + } + + if self.parent_property.secondary is not None: + stmt = stmt.select_from( + self.mapper, self.parent_property.secondary + ) + + pending = not state.key + + # don't autoflush on pending + if pending or passive & attributes.NO_AUTOFLUSH: + stmt._execution_options = util.immutabledict({"autoflush": False}) + + use_get = self.use_get + + if state.load_options or (loadopt and loadopt._extra_criteria): + if alternate_effective_path is None: + effective_path = state.load_path[self.parent_property] + else: + effective_path = alternate_effective_path[self.parent_property] + + opts = state.load_options + + if loadopt and loadopt._extra_criteria: + use_get = False + opts += ( + orm_util.LoaderCriteriaOption(self.entity, extra_criteria), + ) + + stmt._with_options = opts + elif alternate_effective_path is None: + # this path is used if there are not already any options + # in the query, but an event may want to add them + effective_path = state.mapper._path_registry[self.parent_property] + else: + # added by immediateloader + effective_path = alternate_effective_path[self.parent_property] + + if extra_options: + stmt._with_options += extra_options + + stmt._compile_options += {"_current_path": effective_path} + + if use_get: + if self._raise_on_sql and not passive & PassiveFlag.NO_RAISE: + self._invoke_raise_load(state, passive, "raise_on_sql") + + return loading.load_on_pk_identity( + session, + stmt, + primary_key_identity, + load_options=load_options, + execution_options=execution_options, + ) + + if self._order_by: + stmt._order_by_clauses = self._order_by + + def _lazyload_reverse(compile_context): + for rev in self.parent_property._reverse_property: + # reverse props that are MANYTOONE are loading *this* + # object from get(), so don't need to eager out to those. + if ( + rev.direction is interfaces.MANYTOONE + and rev._use_get + and not isinstance(rev.strategy, LazyLoader) + ): + strategy_options.Load._construct_for_existing_path( + compile_context.compile_options._current_path[ + rev.parent + ] + ).lazyload(rev).process_compile_state(compile_context) + + stmt._with_context_options += ( + (_lazyload_reverse, self.parent_property), + ) + + lazy_clause, params = self._generate_lazy_clause(state, passive) + + if execution_options: + execution_options = util.EMPTY_DICT.merge_with( + execution_options, + { + "_sa_orm_load_options": load_options, + }, + ) + else: + execution_options = { + "_sa_orm_load_options": load_options, + } + + if ( + self.key in state.dict + and not passive & PassiveFlag.DEFERRED_HISTORY_LOAD + ): + return LoaderCallableStatus.ATTR_WAS_SET + + if pending: + if util.has_intersection(orm_util._none_set, params.values()): + return None + + elif util.has_intersection(orm_util._never_set, params.values()): + return None + + if self._raise_on_sql and not passive & PassiveFlag.NO_RAISE: + self._invoke_raise_load(state, passive, "raise_on_sql") + + stmt._where_criteria = (lazy_clause,) + + result = session.execute( + stmt, params, execution_options=execution_options + ) + + result = result.unique().scalars().all() + + if self.uselist: + return result + else: + l = len(result) + if l: + if l > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for lazily-loaded attribute '%s' " + % self.parent_property + ) + + return result[0] + else: + return None + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + key = self.key + + if ( + context.load_options._is_user_refresh + and context.query._compile_options._only_load_props + and self.key in context.query._compile_options._only_load_props + ): + return self._immediateload_create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + if not self.is_class_level or (loadopt and loadopt._extra_criteria): + # we are not the primary manager for this attribute + # on this class - set up a + # per-instance lazyloader, which will override the + # class-level behavior. + # this currently only happens when using a + # "lazyload" option on a "no load" + # attribute - "eager" attributes always have a + # class-level lazyloader installed. + set_lazy_callable = ( + InstanceState._instance_level_callable_processor + )( + mapper.class_manager, + LoadLazyAttribute( + key, + self, + loadopt, + ( + loadopt._generate_extra_criteria(context) + if loadopt._extra_criteria + else None + ), + ), + key, + ) + + populators["new"].append((self.key, set_lazy_callable)) + elif context.populate_existing or mapper.always_refresh: + + def reset_for_lazy_callable(state, dict_, row): + # we are the primary manager for this attribute on + # this class - reset its + # per-instance attribute state, so that the class-level + # lazy loader is + # executed when next referenced on this instance. + # this is needed in + # populate_existing() types of scenarios to reset + # any existing state. + state._reset(dict_, key) + + populators["new"].append((self.key, reset_for_lazy_callable)) + + +class LoadLazyAttribute: + """semi-serializable loader object used by LazyLoader + + Historically, this object would be carried along with instances that + needed to run lazyloaders, so it had to be serializable to support + cached instances. + + this is no longer a general requirement, and the case where this object + is used is exactly the case where we can't really serialize easily, + which is when extra criteria in the loader option is present. + + We can't reliably serialize that as it refers to mapped entities and + AliasedClass objects that are local to the current process, which would + need to be matched up on deserialize e.g. the sqlalchemy.ext.serializer + approach. + + """ + + def __init__(self, key, initiating_strategy, loadopt, extra_criteria): + self.key = key + self.strategy_key = initiating_strategy.strategy_key + self.loadopt = loadopt + self.extra_criteria = extra_criteria + + def __getstate__(self): + if self.extra_criteria is not None: + util.warn( + "Can't reliably serialize a lazyload() option that " + "contains additional criteria; please use eager loading " + "for this case" + ) + return { + "key": self.key, + "strategy_key": self.strategy_key, + "loadopt": self.loadopt, + "extra_criteria": (), + } + + def __call__(self, state, passive=attributes.PASSIVE_OFF): + key = self.key + instance_mapper = state.manager.mapper + prop = instance_mapper._props[key] + strategy = prop._strategies[self.strategy_key] + + return strategy._load_for_state( + state, + passive, + loadopt=self.loadopt, + extra_criteria=self.extra_criteria, + ) + + +class PostLoader(AbstractRelationshipLoader): + """A relationship loader that emits a second SELECT statement.""" + + __slots__ = () + + def _setup_for_recursion(self, context, path, loadopt, join_depth=None): + effective_path = ( + context.compile_state.current_path or orm_util.PathRegistry.root + ) + path + + top_level_context = context._get_top_level_context() + execution_options = util.immutabledict( + {"sa_top_level_orm_context": top_level_context} + ) + + if loadopt: + recursion_depth = loadopt.local_opts.get("recursion_depth", None) + unlimited_recursion = recursion_depth == -1 + else: + recursion_depth = None + unlimited_recursion = False + + if recursion_depth is not None: + if not self.parent_property._is_self_referential: + raise sa_exc.InvalidRequestError( + f"recursion_depth option on relationship " + f"{self.parent_property} not valid for " + "non-self-referential relationship" + ) + recursion_depth = context.execution_options.get( + f"_recursion_depth_{id(self)}", recursion_depth + ) + + if not unlimited_recursion and recursion_depth < 0: + return ( + effective_path, + False, + execution_options, + recursion_depth, + ) + + if not unlimited_recursion: + execution_options = execution_options.union( + { + f"_recursion_depth_{id(self)}": recursion_depth - 1, + } + ) + + if loading.PostLoad.path_exists( + context, effective_path, self.parent_property + ): + return effective_path, False, execution_options, recursion_depth + + path_w_prop = path[self.parent_property] + effective_path_w_prop = effective_path[self.parent_property] + + if not path_w_prop.contains(context.attributes, "loader"): + if join_depth: + if effective_path_w_prop.length / 2 > join_depth: + return ( + effective_path, + False, + execution_options, + recursion_depth, + ) + elif effective_path_w_prop.contains_mapper(self.mapper): + return ( + effective_path, + False, + execution_options, + recursion_depth, + ) + + return effective_path, True, execution_options, recursion_depth + + +@relationships.RelationshipProperty.strategy_for(lazy="immediate") +class ImmediateLoader(PostLoader): + __slots__ = ("join_depth",) + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + + def init_class_attribute(self, mapper): + self.parent_property._get_strategy( + (("lazy", "select"),) + ).init_class_attribute(mapper) + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + if not context.compile_state.compile_options._enable_eagerloads: + return + + ( + effective_path, + run_loader, + execution_options, + recursion_depth, + ) = self._setup_for_recursion(context, path, loadopt, self.join_depth) + + if not run_loader: + # this will not emit SQL and will only emit for a many-to-one + # "use get" load. the "_RELATED" part means it may return + # instance even if its expired, since this is a mutually-recursive + # load operation. + flags = attributes.PASSIVE_NO_FETCH_RELATED | PassiveFlag.NO_RAISE + else: + flags = attributes.PASSIVE_OFF | PassiveFlag.NO_RAISE + + loading.PostLoad.callable_for_path( + context, + effective_path, + self.parent, + self.parent_property, + self._load_for_path, + loadopt, + flags, + recursion_depth, + execution_options, + ) + + def _load_for_path( + self, + context, + path, + states, + load_only, + loadopt, + flags, + recursion_depth, + execution_options, + ): + if recursion_depth: + new_opt = Load(loadopt.path.entity) + new_opt.context = ( + loadopt, + loadopt._recurse(), + ) + alternate_effective_path = path._truncate_recursive() + extra_options = (new_opt,) + else: + alternate_effective_path = path + extra_options = () + + key = self.key + lazyloader = self.parent_property._get_strategy((("lazy", "select"),)) + for state, overwrite in states: + dict_ = state.dict + + if overwrite or key not in dict_: + value = lazyloader._load_for_state( + state, + flags, + extra_options=extra_options, + alternate_effective_path=alternate_effective_path, + execution_options=execution_options, + ) + if value not in ( + ATTR_WAS_SET, + LoaderCallableStatus.PASSIVE_NO_RESULT, + ): + state.get_impl(key).set_committed_value( + state, dict_, value + ) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy="subquery") +class SubqueryLoader(PostLoader): + __slots__ = ("join_depth",) + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + + def init_class_attribute(self, mapper): + self.parent_property._get_strategy( + (("lazy", "select"),) + ).init_class_attribute(mapper) + + def _get_leftmost( + self, + orig_query_entity_index, + subq_path, + current_compile_state, + is_root, + ): + given_subq_path = subq_path + subq_path = subq_path.path + subq_mapper = orm_util._class_to_mapper(subq_path[0]) + + # determine attributes of the leftmost mapper + if ( + self.parent.isa(subq_mapper) + and self.parent_property is subq_path[1] + ): + leftmost_mapper, leftmost_prop = self.parent, self.parent_property + else: + leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1] + + if is_root: + # the subq_path is also coming from cached state, so when we start + # building up this path, it has to also be converted to be in terms + # of the current state. this is for the specific case of the entity + # is an AliasedClass against a subquery that's not otherwise going + # to adapt + new_subq_path = current_compile_state._entities[ + orig_query_entity_index + ].entity_zero._path_registry[leftmost_prop] + additional = len(subq_path) - len(new_subq_path) + if additional: + new_subq_path += path_registry.PathRegistry.coerce( + subq_path[-additional:] + ) + else: + new_subq_path = given_subq_path + + leftmost_cols = leftmost_prop.local_columns + + leftmost_attr = [ + getattr( + new_subq_path.path[0].entity, + leftmost_mapper._columntoproperty[c].key, + ) + for c in leftmost_cols + ] + + return leftmost_mapper, leftmost_attr, leftmost_prop, new_subq_path + + def _generate_from_original_query( + self, + orig_compile_state, + orig_query, + leftmost_mapper, + leftmost_attr, + leftmost_relationship, + orig_entity, + ): + # reformat the original query + # to look only for significant columns + q = orig_query._clone().correlate(None) + + # LEGACY: make a Query back from the select() !! + # This suits at least two legacy cases: + # 1. applications which expect before_compile() to be called + # below when we run .subquery() on this query (Keystone) + # 2. applications which are doing subqueryload with complex + # from_self() queries, as query.subquery() / .statement + # has to do the full compile context for multiply-nested + # from_self() (Neutron) - see test_subqload_from_self + # for demo. + q2 = query.Query.__new__(query.Query) + q2.__dict__.update(q.__dict__) + q = q2 + + # set the query's "FROM" list explicitly to what the + # FROM list would be in any case, as we will be limiting + # the columns in the SELECT list which may no longer include + # all entities mentioned in things like WHERE, JOIN, etc. + if not q._from_obj: + q._enable_assertions = False + q.select_from.non_generative( + q, + *{ + ent["entity"] + for ent in _column_descriptions( + orig_query, compile_state=orig_compile_state + ) + if ent["entity"] is not None + }, + ) + + # select from the identity columns of the outer (specifically, these + # are the 'local_cols' of the property). This will remove other + # columns from the query that might suggest the right entity which is + # why we do set select_from above. The attributes we have are + # coerced and adapted using the original query's adapter, which is + # needed only for the case of adapting a subclass column to + # that of a polymorphic selectable, e.g. we have + # Engineer.primary_language and the entity is Person. All other + # adaptations, e.g. from_self, select_entity_from(), will occur + # within the new query when it compiles, as the compile_state we are + # using here is only a partial one. If the subqueryload is from a + # with_polymorphic() or other aliased() object, left_attr will already + # be the correct attributes so no adaptation is needed. + target_cols = orig_compile_state._adapt_col_list( + [ + sql.coercions.expect(sql.roles.ColumnsClauseRole, o) + for o in leftmost_attr + ], + orig_compile_state._get_current_adapter(), + ) + q._raw_columns = target_cols + + distinct_target_key = leftmost_relationship.distinct_target_key + + if distinct_target_key is True: + q._distinct = True + elif distinct_target_key is None: + # if target_cols refer to a non-primary key or only + # part of a composite primary key, set the q as distinct + for t in {c.table for c in target_cols}: + if not set(target_cols).issuperset(t.primary_key): + q._distinct = True + break + + # don't need ORDER BY if no limit/offset + if not q._has_row_limiting_clause: + q._order_by_clauses = () + + if q._distinct is True and q._order_by_clauses: + # the logic to automatically add the order by columns to the query + # when distinct is True is deprecated in the query + to_add = sql_util.expand_column_list_from_order_by( + target_cols, q._order_by_clauses + ) + if to_add: + q._set_entities(target_cols + to_add) + + # the original query now becomes a subquery + # which we'll join onto. + # LEGACY: as "q" is a Query, the before_compile() event is invoked + # here. + embed_q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery() + left_alias = orm_util.AliasedClass( + leftmost_mapper, embed_q, use_mapper_path=True + ) + return left_alias + + def _prep_for_joins(self, left_alias, subq_path): + # figure out what's being joined. a.k.a. the fun part + to_join = [] + pairs = list(subq_path.pairs()) + + for i, (mapper, prop) in enumerate(pairs): + if i > 0: + # look at the previous mapper in the chain - + # if it is as or more specific than this prop's + # mapper, use that instead. + # note we have an assumption here that + # the non-first element is always going to be a mapper, + # not an AliasedClass + + prev_mapper = pairs[i - 1][1].mapper + to_append = prev_mapper if prev_mapper.isa(mapper) else mapper + else: + to_append = mapper + + to_join.append((to_append, prop.key)) + + # determine the immediate parent class we are joining from, + # which needs to be aliased. + + if len(to_join) < 2: + # in the case of a one level eager load, this is the + # leftmost "left_alias". + parent_alias = left_alias + else: + info = inspect(to_join[-1][0]) + if info.is_aliased_class: + parent_alias = info.entity + else: + # alias a plain mapper as we may be + # joining multiple times + parent_alias = orm_util.AliasedClass( + info.entity, use_mapper_path=True + ) + + local_cols = self.parent_property.local_columns + + local_attr = [ + getattr(parent_alias, self.parent._columntoproperty[c].key) + for c in local_cols + ] + return to_join, local_attr, parent_alias + + def _apply_joins( + self, q, to_join, left_alias, parent_alias, effective_entity + ): + ltj = len(to_join) + if ltj == 1: + to_join = [ + getattr(left_alias, to_join[0][1]).of_type(effective_entity) + ] + elif ltj == 2: + to_join = [ + getattr(left_alias, to_join[0][1]).of_type(parent_alias), + getattr(parent_alias, to_join[-1][1]).of_type( + effective_entity + ), + ] + elif ltj > 2: + middle = [ + ( + ( + orm_util.AliasedClass(item[0]) + if not inspect(item[0]).is_aliased_class + else item[0].entity + ), + item[1], + ) + for item in to_join[1:-1] + ] + inner = [] + + while middle: + item = middle.pop(0) + attr = getattr(item[0], item[1]) + if middle: + attr = attr.of_type(middle[0][0]) + else: + attr = attr.of_type(parent_alias) + + inner.append(attr) + + to_join = ( + [getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)] + + inner + + [ + getattr(parent_alias, to_join[-1][1]).of_type( + effective_entity + ) + ] + ) + + for attr in to_join: + q = q.join(attr) + + return q + + def _setup_options( + self, + context, + q, + subq_path, + rewritten_path, + orig_query, + effective_entity, + loadopt, + ): + # note that because the subqueryload object + # does not re-use the cached query, instead always making + # use of the current invoked query, while we have two queries + # here (orig and context.query), they are both non-cached + # queries and we can transfer the options as is without + # adjusting for new criteria. Some work on #6881 / #6889 + # brought this into question. + new_options = orig_query._with_options + + if loadopt and loadopt._extra_criteria: + new_options += ( + orm_util.LoaderCriteriaOption( + self.entity, + loadopt._generate_extra_criteria(context), + ), + ) + + # propagate loader options etc. to the new query. + # these will fire relative to subq_path. + q = q._with_current_path(rewritten_path) + q = q.options(*new_options) + + return q + + def _setup_outermost_orderby(self, q): + if self.parent_property.order_by: + + def _setup_outermost_orderby(compile_context): + compile_context.eager_order_by += tuple( + util.to_list(self.parent_property.order_by) + ) + + q = q._add_context_option( + _setup_outermost_orderby, self.parent_property + ) + + return q + + class _SubqCollections: + """Given a :class:`_query.Query` used to emit the "subquery load", + provide a load interface that executes the query at the + first moment a value is needed. + + """ + + __slots__ = ( + "session", + "execution_options", + "load_options", + "params", + "subq", + "_data", + ) + + def __init__(self, context, subq): + # avoid creating a cycle by storing context + # even though that's preferable + self.session = context.session + self.execution_options = context.execution_options + self.load_options = context.load_options + self.params = context.params or {} + self.subq = subq + self._data = None + + def get(self, key, default): + if self._data is None: + self._load() + return self._data.get(key, default) + + def _load(self): + self._data = collections.defaultdict(list) + + q = self.subq + assert q.session is None + + q = q.with_session(self.session) + + if self.load_options._populate_existing: + q = q.populate_existing() + # to work with baked query, the parameters may have been + # updated since this query was created, so take these into account + + rows = list(q.params(self.params)) + for k, v in itertools.groupby(rows, lambda x: x[1:]): + self._data[k].extend(vv[0] for vv in v) + + def loader(self, state, dict_, row): + if self._data is None: + self._load() + + def _setup_query_from_rowproc( + self, + context, + query_entity, + path, + entity, + loadopt, + adapter, + ): + compile_state = context.compile_state + if ( + not compile_state.compile_options._enable_eagerloads + or compile_state.compile_options._for_refresh_state + ): + return + + orig_query_entity_index = compile_state._entities.index(query_entity) + context.loaders_require_buffering = True + + path = path[self.parent_property] + + # build up a path indicating the path from the leftmost + # entity to the thing we're subquery loading. + with_poly_entity = path.get( + compile_state.attributes, "path_with_polymorphic", None + ) + if with_poly_entity is not None: + effective_entity = with_poly_entity + else: + effective_entity = self.entity + + subq_path, rewritten_path = context.query._execution_options.get( + ("subquery_paths", None), + (orm_util.PathRegistry.root, orm_util.PathRegistry.root), + ) + is_root = subq_path is orm_util.PathRegistry.root + subq_path = subq_path + path + rewritten_path = rewritten_path + path + + # use the current query being invoked, not the compile state + # one. this is so that we get the current parameters. however, + # it means we can't use the existing compile state, we have to make + # a new one. other approaches include possibly using the + # compiled query but swapping the params, seems only marginally + # less time spent but more complicated + orig_query = context.query._execution_options.get( + ("orig_query", SubqueryLoader), context.query + ) + + # make a new compile_state for the query that's probably cached, but + # we're sort of undoing a bit of that caching :( + compile_state_cls = ORMCompileState._get_plugin_class_for_plugin( + orig_query, "orm" + ) + + if orig_query._is_lambda_element: + if context.load_options._lazy_loaded_from is None: + util.warn( + 'subqueryloader for "%s" must invoke lambda callable ' + "at %r in " + "order to produce a new query, decreasing the efficiency " + "of caching for this statement. Consider using " + "selectinload() for more effective full-lambda caching" + % (self, orig_query) + ) + orig_query = orig_query._resolved + + # this is the more "quick" version, however it's not clear how + # much of this we need. in particular I can't get a test to + # fail if the "set_base_alias" is missing and not sure why that is. + orig_compile_state = compile_state_cls._create_entities_collection( + orig_query, legacy=False + ) + + ( + leftmost_mapper, + leftmost_attr, + leftmost_relationship, + rewritten_path, + ) = self._get_leftmost( + orig_query_entity_index, + rewritten_path, + orig_compile_state, + is_root, + ) + + # generate a new Query from the original, then + # produce a subquery from it. + left_alias = self._generate_from_original_query( + orig_compile_state, + orig_query, + leftmost_mapper, + leftmost_attr, + leftmost_relationship, + entity, + ) + + # generate another Query that will join the + # left alias to the target relationships. + # basically doing a longhand + # "from_self()". (from_self() itself not quite industrial + # strength enough for all contingencies...but very close) + + q = query.Query(effective_entity) + + q._execution_options = context.query._execution_options.merge_with( + context.execution_options, + { + ("orig_query", SubqueryLoader): orig_query, + ("subquery_paths", None): (subq_path, rewritten_path), + }, + ) + + q = q._set_enable_single_crit(False) + to_join, local_attr, parent_alias = self._prep_for_joins( + left_alias, subq_path + ) + + q = q.add_columns(*local_attr) + q = self._apply_joins( + q, to_join, left_alias, parent_alias, effective_entity + ) + + q = self._setup_options( + context, + q, + subq_path, + rewritten_path, + orig_query, + effective_entity, + loadopt, + ) + q = self._setup_outermost_orderby(q) + + return q + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + if ( + loadopt + and context.compile_state.statement is not None + and context.compile_state.statement.is_dml + ): + util.warn_deprecated( + "The subqueryload loader option is not compatible with DML " + "statements such as INSERT, UPDATE. Only SELECT may be used." + "This warning will become an exception in a future release.", + "2.0", + ) + + if context.refresh_state: + return self._immediateload_create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + _, run_loader, _, _ = self._setup_for_recursion( + context, path, loadopt, self.join_depth + ) + if not run_loader: + return + + if not isinstance(context.compile_state, ORMSelectCompileState): + # issue 7505 - subqueryload() in 1.3 and previous would silently + # degrade for from_statement() without warning. this behavior + # is restored here + return + + if not self.parent.class_manager[self.key].impl.supports_population: + raise sa_exc.InvalidRequestError( + "'%s' does not support object " + "population - eager loading cannot be applied." % self + ) + + # a little dance here as the "path" is still something that only + # semi-tracks the exact series of things we are loading, still not + # telling us about with_polymorphic() and stuff like that when it's at + # the root.. the initial MapperEntity is more accurate for this case. + if len(path) == 1: + if not orm_util._entity_isa(query_entity.entity_zero, self.parent): + return + elif not orm_util._entity_isa(path[-1], self.parent): + return + + subq = self._setup_query_from_rowproc( + context, + query_entity, + path, + path[-1], + loadopt, + adapter, + ) + + if subq is None: + return + + assert subq.session is None + + path = path[self.parent_property] + + local_cols = self.parent_property.local_columns + + # cache the loaded collections in the context + # so that inheriting mappers don't re-load when they + # call upon create_row_processor again + collections = path.get(context.attributes, "collections") + if collections is None: + collections = self._SubqCollections(context, subq) + path.set(context.attributes, "collections", collections) + + if adapter: + local_cols = [adapter.columns[c] for c in local_cols] + + if self.uselist: + self._create_collection_loader( + context, result, collections, local_cols, populators + ) + else: + self._create_scalar_loader( + context, result, collections, local_cols, populators + ) + + def _create_collection_loader( + self, context, result, collections, local_cols, populators + ): + tuple_getter = result._tuple_getter(local_cols) + + def load_collection_from_subq(state, dict_, row): + collection = collections.get(tuple_getter(row), ()) + state.get_impl(self.key).set_committed_value( + state, dict_, collection + ) + + def load_collection_from_subq_existing_row(state, dict_, row): + if self.key not in dict_: + load_collection_from_subq(state, dict_, row) + + populators["new"].append((self.key, load_collection_from_subq)) + populators["existing"].append( + (self.key, load_collection_from_subq_existing_row) + ) + + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) + + def _create_scalar_loader( + self, context, result, collections, local_cols, populators + ): + tuple_getter = result._tuple_getter(local_cols) + + def load_scalar_from_subq(state, dict_, row): + collection = collections.get(tuple_getter(row), (None,)) + if len(collection) > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded attribute '%s' " % self + ) + + scalar = collection[0] + state.get_impl(self.key).set_committed_value(state, dict_, scalar) + + def load_scalar_from_subq_existing_row(state, dict_, row): + if self.key not in dict_: + load_scalar_from_subq(state, dict_, row) + + populators["new"].append((self.key, load_scalar_from_subq)) + populators["existing"].append( + (self.key, load_scalar_from_subq_existing_row) + ) + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy="joined") +@relationships.RelationshipProperty.strategy_for(lazy=False) +class JoinedLoader(AbstractRelationshipLoader): + """Provide loading behavior for a :class:`.Relationship` + using joined eager loading. + + """ + + __slots__ = "join_depth" + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + + def init_class_attribute(self, mapper): + self.parent_property._get_strategy( + (("lazy", "select"),) + ).init_class_attribute(mapper) + + def setup_query( + self, + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection=None, + parentmapper=None, + chained_from_outerjoin=False, + **kwargs, + ): + """Add a left outer join to the statement that's being constructed.""" + + if not compile_state.compile_options._enable_eagerloads: + return + elif ( + loadopt + and compile_state.statement is not None + and compile_state.statement.is_dml + ): + util.warn_deprecated( + "The joinedload loader option is not compatible with DML " + "statements such as INSERT, UPDATE. Only SELECT may be used." + "This warning will become an exception in a future release.", + "2.0", + ) + elif self.uselist: + compile_state.multi_row_eager_loaders = True + + path = path[self.parent_property] + + user_defined_adapter = ( + self._init_user_defined_eager_proc( + loadopt, compile_state, compile_state.attributes + ) + if loadopt + else False + ) + + if user_defined_adapter is not False: + # setup an adapter but dont create any JOIN, assume it's already + # in the query + ( + clauses, + adapter, + add_to_collection, + ) = self._setup_query_on_user_defined_adapter( + compile_state, + query_entity, + path, + adapter, + user_defined_adapter, + ) + + # don't do "wrap" for multi-row, we want to wrap + # limited/distinct SELECT, + # because we want to put the JOIN on the outside. + + else: + # if not via query option, check for + # a cycle + if not path.contains(compile_state.attributes, "loader"): + if self.join_depth: + if path.length / 2 > self.join_depth: + return + elif path.contains_mapper(self.mapper): + return + + # add the JOIN and create an adapter + ( + clauses, + adapter, + add_to_collection, + chained_from_outerjoin, + ) = self._generate_row_adapter( + compile_state, + query_entity, + path, + loadopt, + adapter, + column_collection, + parentmapper, + chained_from_outerjoin, + ) + + # for multi-row, we want to wrap limited/distinct SELECT, + # because we want to put the JOIN on the outside. + compile_state.eager_adding_joins = True + + with_poly_entity = path.get( + compile_state.attributes, "path_with_polymorphic", None + ) + if with_poly_entity is not None: + with_polymorphic = inspect( + with_poly_entity + ).with_polymorphic_mappers + else: + with_polymorphic = None + + path = path[self.entity] + + loading._setup_entity_query( + compile_state, + self.mapper, + query_entity, + path, + clauses, + add_to_collection, + with_polymorphic=with_polymorphic, + parentmapper=self.mapper, + chained_from_outerjoin=chained_from_outerjoin, + ) + + has_nones = util.NONE_SET.intersection(compile_state.secondary_columns) + + if has_nones: + if with_poly_entity is not None: + raise sa_exc.InvalidRequestError( + "Detected unaliased columns when generating joined " + "load. Make sure to use aliased=True or flat=True " + "when using joined loading with with_polymorphic()." + ) + else: + compile_state.secondary_columns = [ + c for c in compile_state.secondary_columns if c is not None + ] + + def _init_user_defined_eager_proc( + self, loadopt, compile_state, target_attributes + ): + # check if the opt applies at all + if "eager_from_alias" not in loadopt.local_opts: + # nope + return False + + path = loadopt.path.parent + + # the option applies. check if the "user_defined_eager_row_processor" + # has been built up. + adapter = path.get( + compile_state.attributes, "user_defined_eager_row_processor", False + ) + if adapter is not False: + # just return it + return adapter + + # otherwise figure it out. + alias = loadopt.local_opts["eager_from_alias"] + root_mapper, prop = path[-2:] + + if alias is not None: + if isinstance(alias, str): + alias = prop.target.alias(alias) + adapter = orm_util.ORMAdapter( + orm_util._TraceAdaptRole.JOINEDLOAD_USER_DEFINED_ALIAS, + prop.mapper, + selectable=alias, + equivalents=prop.mapper._equivalent_columns, + limit_on_entity=False, + ) + else: + if path.contains( + compile_state.attributes, "path_with_polymorphic" + ): + with_poly_entity = path.get( + compile_state.attributes, "path_with_polymorphic" + ) + adapter = orm_util.ORMAdapter( + orm_util._TraceAdaptRole.JOINEDLOAD_PATH_WITH_POLYMORPHIC, + with_poly_entity, + equivalents=prop.mapper._equivalent_columns, + ) + else: + adapter = compile_state._polymorphic_adapters.get( + prop.mapper, None + ) + path.set( + target_attributes, + "user_defined_eager_row_processor", + adapter, + ) + + return adapter + + def _setup_query_on_user_defined_adapter( + self, context, entity, path, adapter, user_defined_adapter + ): + # apply some more wrapping to the "user defined adapter" + # if we are setting up the query for SQL render. + adapter = entity._get_entity_clauses(context) + + if adapter and user_defined_adapter: + user_defined_adapter = user_defined_adapter.wrap(adapter) + path.set( + context.attributes, + "user_defined_eager_row_processor", + user_defined_adapter, + ) + elif adapter: + user_defined_adapter = adapter + path.set( + context.attributes, + "user_defined_eager_row_processor", + user_defined_adapter, + ) + + add_to_collection = context.primary_columns + return user_defined_adapter, adapter, add_to_collection + + def _generate_row_adapter( + self, + compile_state, + entity, + path, + loadopt, + adapter, + column_collection, + parentmapper, + chained_from_outerjoin, + ): + with_poly_entity = path.get( + compile_state.attributes, "path_with_polymorphic", None + ) + if with_poly_entity: + to_adapt = with_poly_entity + else: + insp = inspect(self.entity) + if insp.is_aliased_class: + alt_selectable = insp.selectable + else: + alt_selectable = None + + to_adapt = orm_util.AliasedClass( + self.mapper, + alias=( + alt_selectable._anonymous_fromclause(flat=True) + if alt_selectable is not None + else None + ), + flat=True, + use_mapper_path=True, + ) + + to_adapt_insp = inspect(to_adapt) + + clauses = to_adapt_insp._memo( + ("joinedloader_ormadapter", self), + orm_util.ORMAdapter, + orm_util._TraceAdaptRole.JOINEDLOAD_MEMOIZED_ADAPTER, + to_adapt_insp, + equivalents=self.mapper._equivalent_columns, + adapt_required=True, + allow_label_resolve=False, + anonymize_labels=True, + ) + + assert clauses.is_aliased_class + + innerjoin = ( + loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin) + if loadopt is not None + else self.parent_property.innerjoin + ) + + if not innerjoin: + # if this is an outer join, all non-nested eager joins from + # this path must also be outer joins + chained_from_outerjoin = True + + compile_state.create_eager_joins.append( + ( + self._create_eager_join, + entity, + path, + adapter, + parentmapper, + clauses, + innerjoin, + chained_from_outerjoin, + loadopt._extra_criteria if loadopt else (), + ) + ) + + add_to_collection = compile_state.secondary_columns + path.set(compile_state.attributes, "eager_row_processor", clauses) + + return clauses, adapter, add_to_collection, chained_from_outerjoin + + def _create_eager_join( + self, + compile_state, + query_entity, + path, + adapter, + parentmapper, + clauses, + innerjoin, + chained_from_outerjoin, + extra_criteria, + ): + if parentmapper is None: + localparent = query_entity.mapper + else: + localparent = parentmapper + + # whether or not the Query will wrap the selectable in a subquery, + # and then attach eager load joins to that (i.e., in the case of + # LIMIT/OFFSET etc.) + should_nest_selectable = ( + compile_state.multi_row_eager_loaders + and compile_state._should_nest_selectable + ) + + query_entity_key = None + + if ( + query_entity not in compile_state.eager_joins + and not should_nest_selectable + and compile_state.from_clauses + ): + indexes = sql_util.find_left_clause_that_matches_given( + compile_state.from_clauses, query_entity.selectable + ) + + if len(indexes) > 1: + # for the eager load case, I can't reproduce this right + # now. For query.join() I can. + raise sa_exc.InvalidRequestError( + "Can't identify which query entity in which to joined " + "eager load from. Please use an exact match when " + "specifying the join path." + ) + + if indexes: + clause = compile_state.from_clauses[indexes[0]] + # join to an existing FROM clause on the query. + # key it to its list index in the eager_joins dict. + # Query._compile_context will adapt as needed and + # append to the FROM clause of the select(). + query_entity_key, default_towrap = indexes[0], clause + + if query_entity_key is None: + query_entity_key, default_towrap = ( + query_entity, + query_entity.selectable, + ) + + towrap = compile_state.eager_joins.setdefault( + query_entity_key, default_towrap + ) + + if adapter: + if getattr(adapter, "is_aliased_class", False): + # joining from an adapted entity. The adapted entity + # might be a "with_polymorphic", so resolve that to our + # specific mapper's entity before looking for our attribute + # name on it. + efm = adapter.aliased_insp._entity_for_mapper( + localparent + if localparent.isa(self.parent) + else self.parent + ) + + # look for our attribute on the adapted entity, else fall back + # to our straight property + onclause = getattr(efm.entity, self.key, self.parent_property) + else: + onclause = getattr( + orm_util.AliasedClass( + self.parent, adapter.selectable, use_mapper_path=True + ), + self.key, + self.parent_property, + ) + + else: + onclause = self.parent_property + + assert clauses.is_aliased_class + + attach_on_outside = ( + not chained_from_outerjoin + or not innerjoin + or innerjoin == "unnested" + or query_entity.entity_zero.represents_outer_join + ) + + extra_join_criteria = extra_criteria + additional_entity_criteria = compile_state.global_attributes.get( + ("additional_entity_criteria", self.mapper), () + ) + if additional_entity_criteria: + extra_join_criteria += tuple( + ae._resolve_where_criteria(self.mapper) + for ae in additional_entity_criteria + if ae.propagate_to_loaders + ) + + if attach_on_outside: + # this is the "classic" eager join case. + eagerjoin = orm_util._ORMJoin( + towrap, + clauses.aliased_insp, + onclause, + isouter=not innerjoin + or query_entity.entity_zero.represents_outer_join + or (chained_from_outerjoin and isinstance(towrap, sql.Join)), + _left_memo=self.parent, + _right_memo=path[self.mapper], + _extra_criteria=extra_join_criteria, + ) + else: + # all other cases are innerjoin=='nested' approach + eagerjoin = self._splice_nested_inner_join( + path, path[-2], towrap, clauses, onclause, extra_join_criteria + ) + + compile_state.eager_joins[query_entity_key] = eagerjoin + + # send a hint to the Query as to where it may "splice" this join + eagerjoin.stop_on = query_entity.selectable + + if not parentmapper: + # for parentclause that is the non-eager end of the join, + # ensure all the parent cols in the primaryjoin are actually + # in the + # columns clause (i.e. are not deferred), so that aliasing applied + # by the Query propagates those columns outward. + # This has the effect + # of "undefering" those columns. + for col in sql_util._find_columns( + self.parent_property.primaryjoin + ): + if localparent.persist_selectable.c.contains_column(col): + if adapter: + col = adapter.columns[col] + compile_state._append_dedupe_col_collection( + col, compile_state.primary_columns + ) + + if self.parent_property.order_by: + compile_state.eager_order_by += tuple( + (eagerjoin._target_adapter.copy_and_process)( + util.to_list(self.parent_property.order_by) + ) + ) + + def _splice_nested_inner_join( + self, + path, + entity_we_want_to_splice_onto, + join_obj, + clauses, + onclause, + extra_criteria, + entity_inside_join_structure: Union[ + Mapper, None, Literal[False] + ] = False, + detected_existing_path: Optional[path_registry.PathRegistry] = None, + ): + # recursive fn to splice a nested join into an existing one. + # entity_inside_join_structure=False means this is the outermost call, + # and it should return a value. entity_inside_join_structure= + # indicates we've descended into a join and are looking at a FROM + # clause representing this mapper; if this is not + # entity_we_want_to_splice_onto then return None to end the recursive + # branch + + assert entity_we_want_to_splice_onto is path[-2] + + if entity_inside_join_structure is False: + assert isinstance(join_obj, orm_util._ORMJoin) + + if isinstance(join_obj, sql.selectable.FromGrouping): + # FromGrouping - continue descending into the structure + return self._splice_nested_inner_join( + path, + entity_we_want_to_splice_onto, + join_obj.element, + clauses, + onclause, + extra_criteria, + entity_inside_join_structure, + ) + elif isinstance(join_obj, orm_util._ORMJoin): + # _ORMJoin - continue descending into the structure + + join_right_path = join_obj._right_memo + + # see if right side of join is viable + target_join = self._splice_nested_inner_join( + path, + entity_we_want_to_splice_onto, + join_obj.right, + clauses, + onclause, + extra_criteria, + entity_inside_join_structure=( + join_right_path[-1].mapper + if join_right_path is not None + else None + ), + ) + + if target_join is not None: + # for a right splice, attempt to flatten out + # a JOIN b JOIN c JOIN .. to avoid needless + # parenthesis nesting + if not join_obj.isouter and not target_join.isouter: + eagerjoin = join_obj._splice_into_center(target_join) + else: + eagerjoin = orm_util._ORMJoin( + join_obj.left, + target_join, + join_obj.onclause, + isouter=join_obj.isouter, + _left_memo=join_obj._left_memo, + ) + + eagerjoin._target_adapter = target_join._target_adapter + return eagerjoin + + else: + # see if left side of join is viable + target_join = self._splice_nested_inner_join( + path, + entity_we_want_to_splice_onto, + join_obj.left, + clauses, + onclause, + extra_criteria, + entity_inside_join_structure=join_obj._left_memo, + detected_existing_path=join_right_path, + ) + + if target_join is not None: + eagerjoin = orm_util._ORMJoin( + target_join, + join_obj.right, + join_obj.onclause, + isouter=join_obj.isouter, + _right_memo=join_obj._right_memo, + ) + eagerjoin._target_adapter = target_join._target_adapter + return eagerjoin + + # neither side viable, return None, or fail if this was the top + # most call + if entity_inside_join_structure is False: + assert ( + False + ), "assertion failed attempting to produce joined eager loads" + return None + + # reached an endpoint (e.g. a table that's mapped, or an alias of that + # table). determine if we can use this endpoint to splice onto + + # is this the entity we want to splice onto in the first place? + if not entity_we_want_to_splice_onto.isa(entity_inside_join_structure): + return None + + # path check. if we know the path how this join endpoint got here, + # lets look at our path we are satisfying and see if we're in the + # wrong place. This is specifically for when our entity may + # appear more than once in the path, issue #11449 + # updated in issue #11965. + if detected_existing_path and len(detected_existing_path) > 2: + # this assertion is currently based on how this call is made, + # where given a join_obj, the call will have these parameters as + # entity_inside_join_structure=join_obj._left_memo + # and entity_inside_join_structure=join_obj._right_memo.mapper + assert detected_existing_path[-3] is entity_inside_join_structure + + # from that, see if the path we are targeting matches the + # "existing" path of this join all the way up to the midpoint + # of this join object (e.g. the relationship). + # if not, then this is not our target + # + # a test condition where this test is false looks like: + # + # desired splice: Node->kind->Kind + # path of desired splice: NodeGroup->nodes->Node->kind + # path we've located: NodeGroup->nodes->Node->common_node->Node + # + # above, because we want to splice kind->Kind onto + # NodeGroup->nodes->Node, this is not our path because it actually + # goes more steps than we want into self-referential + # ->common_node->Node + # + # a test condition where this test is true looks like: + # + # desired splice: B->c2s->C2 + # path of desired splice: A->bs->B->c2s + # path we've located: A->bs->B->c1s->C1 + # + # above, we want to splice c2s->C2 onto B, and the located path + # shows that the join ends with B->c1s->C1. so we will + # add another join onto that, which would create a "branch" that + # we might represent in a pseudopath as: + # + # B->c1s->C1 + # ->c2s->C2 + # + # i.e. A JOIN B ON JOIN C1 ON + # JOIN C2 ON + # + + if detected_existing_path[0:-2] != path.path[0:-1]: + return None + + return orm_util._ORMJoin( + join_obj, + clauses.aliased_insp, + onclause, + isouter=False, + _left_memo=entity_inside_join_structure, + _right_memo=path[path[-1].mapper], + _extra_criteria=extra_criteria, + ) + + def _create_eager_adapter(self, context, result, adapter, path, loadopt): + compile_state = context.compile_state + + user_defined_adapter = ( + self._init_user_defined_eager_proc( + loadopt, compile_state, context.attributes + ) + if loadopt + else False + ) + + if user_defined_adapter is not False: + decorator = user_defined_adapter + # user defined eagerloads are part of the "primary" + # portion of the load. + # the adapters applied to the Query should be honored. + if compile_state.compound_eager_adapter and decorator: + decorator = decorator.wrap( + compile_state.compound_eager_adapter + ) + elif compile_state.compound_eager_adapter: + decorator = compile_state.compound_eager_adapter + else: + decorator = path.get( + compile_state.attributes, "eager_row_processor" + ) + if decorator is None: + return False + + if self.mapper._result_has_identity_key(result, decorator): + return decorator + else: + # no identity key - don't return a row + # processor, will cause a degrade to lazy + return False + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + + if not context.compile_state.compile_options._enable_eagerloads: + return + + if not self.parent.class_manager[self.key].impl.supports_population: + raise sa_exc.InvalidRequestError( + "'%s' does not support object " + "population - eager loading cannot be applied." % self + ) + + if self.uselist: + context.loaders_require_uniquing = True + + our_path = path[self.parent_property] + + eager_adapter = self._create_eager_adapter( + context, result, adapter, our_path, loadopt + ) + + if eager_adapter is not False: + key = self.key + + _instance = loading._instance_processor( + query_entity, + self.mapper, + context, + result, + our_path[self.entity], + eager_adapter, + ) + + if not self.uselist: + self._create_scalar_loader(context, key, _instance, populators) + else: + self._create_collection_loader( + context, key, _instance, populators + ) + else: + self.parent_property._get_strategy( + (("lazy", "select"),) + ).create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + def _create_collection_loader(self, context, key, _instance, populators): + def load_collection_from_joined_new_row(state, dict_, row): + # note this must unconditionally clear out any existing collection. + # an existing collection would be present only in the case of + # populate_existing(). + collection = attributes.init_state_collection(state, dict_, key) + result_list = util.UniqueAppender( + collection, "append_without_event" + ) + context.attributes[(state, key)] = result_list + inst = _instance(row) + if inst is not None: + result_list.append(inst) + + def load_collection_from_joined_existing_row(state, dict_, row): + if (state, key) in context.attributes: + result_list = context.attributes[(state, key)] + else: + # appender_key can be absent from context.attributes + # with isnew=False when self-referential eager loading + # is used; the same instance may be present in two + # distinct sets of result columns + collection = attributes.init_state_collection( + state, dict_, key + ) + result_list = util.UniqueAppender( + collection, "append_without_event" + ) + context.attributes[(state, key)] = result_list + inst = _instance(row) + if inst is not None: + result_list.append(inst) + + def load_collection_from_joined_exec(state, dict_, row): + _instance(row) + + populators["new"].append( + (self.key, load_collection_from_joined_new_row) + ) + populators["existing"].append( + (self.key, load_collection_from_joined_existing_row) + ) + if context.invoke_all_eagers: + populators["eager"].append( + (self.key, load_collection_from_joined_exec) + ) + + def _create_scalar_loader(self, context, key, _instance, populators): + def load_scalar_from_joined_new_row(state, dict_, row): + # set a scalar object instance directly on the parent + # object, bypassing InstrumentedAttribute event handlers. + dict_[key] = _instance(row) + + def load_scalar_from_joined_existing_row(state, dict_, row): + # call _instance on the row, even though the object has + # been created, so that we further descend into properties + existing = _instance(row) + + # conflicting value already loaded, this shouldn't happen + if key in dict_: + if existing is not dict_[key]: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded attribute '%s' " + % self + ) + else: + # this case is when one row has multiple loads of the + # same entity (e.g. via aliasing), one has an attribute + # that the other doesn't. + dict_[key] = existing + + def load_scalar_from_joined_exec(state, dict_, row): + _instance(row) + + populators["new"].append((self.key, load_scalar_from_joined_new_row)) + populators["existing"].append( + (self.key, load_scalar_from_joined_existing_row) + ) + if context.invoke_all_eagers: + populators["eager"].append( + (self.key, load_scalar_from_joined_exec) + ) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy="selectin") +class SelectInLoader(PostLoader, util.MemoizedSlots): + __slots__ = ( + "join_depth", + "omit_join", + "_parent_alias", + "_query_info", + "_fallback_query_info", + ) + + query_info = collections.namedtuple( + "queryinfo", + [ + "load_only_child", + "load_with_join", + "in_expr", + "pk_cols", + "zero_idx", + "child_lookup_cols", + ], + ) + + _chunksize = 500 + + def __init__(self, parent, strategy_key): + super().__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + is_m2o = self.parent_property.direction is interfaces.MANYTOONE + + if self.parent_property.omit_join is not None: + self.omit_join = self.parent_property.omit_join + else: + lazyloader = self.parent_property._get_strategy( + (("lazy", "select"),) + ) + if is_m2o: + self.omit_join = lazyloader.use_get + else: + self.omit_join = self.parent._get_clause[0].compare( + lazyloader._rev_lazywhere, + use_proxies=True, + compare_keys=False, + equivalents=self.parent._equivalent_columns, + ) + + if self.omit_join: + if is_m2o: + self._query_info = self._init_for_omit_join_m2o() + self._fallback_query_info = self._init_for_join() + else: + self._query_info = self._init_for_omit_join() + else: + self._query_info = self._init_for_join() + + def _init_for_omit_join(self): + pk_to_fk = dict( + self.parent_property._join_condition.local_remote_pairs + ) + pk_to_fk.update( + (equiv, pk_to_fk[k]) + for k in list(pk_to_fk) + for equiv in self.parent._equivalent_columns.get(k, ()) + ) + + pk_cols = fk_cols = [ + pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk + ] + if len(fk_cols) > 1: + in_expr = sql.tuple_(*fk_cols) + zero_idx = False + else: + in_expr = fk_cols[0] + zero_idx = True + + return self.query_info(False, False, in_expr, pk_cols, zero_idx, None) + + def _init_for_omit_join_m2o(self): + pk_cols = self.mapper.primary_key + if len(pk_cols) > 1: + in_expr = sql.tuple_(*pk_cols) + zero_idx = False + else: + in_expr = pk_cols[0] + zero_idx = True + + lazyloader = self.parent_property._get_strategy((("lazy", "select"),)) + lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols] + + return self.query_info( + True, False, in_expr, pk_cols, zero_idx, lookup_cols + ) + + def _init_for_join(self): + self._parent_alias = AliasedClass(self.parent.class_) + pa_insp = inspect(self._parent_alias) + pk_cols = [ + pa_insp._adapt_element(col) for col in self.parent.primary_key + ] + if len(pk_cols) > 1: + in_expr = sql.tuple_(*pk_cols) + zero_idx = False + else: + in_expr = pk_cols[0] + zero_idx = True + return self.query_info(False, True, in_expr, pk_cols, zero_idx, None) + + def init_class_attribute(self, mapper): + self.parent_property._get_strategy( + (("lazy", "select"),) + ).init_class_attribute(mapper) + + def create_row_processor( + self, + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ): + if context.refresh_state: + return self._immediateload_create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + ( + effective_path, + run_loader, + execution_options, + recursion_depth, + ) = self._setup_for_recursion( + context, path, loadopt, join_depth=self.join_depth + ) + + if not run_loader: + return + + if not context.compile_state.compile_options._enable_eagerloads: + return + + if not self.parent.class_manager[self.key].impl.supports_population: + raise sa_exc.InvalidRequestError( + "'%s' does not support object " + "population - eager loading cannot be applied." % self + ) + + # a little dance here as the "path" is still something that only + # semi-tracks the exact series of things we are loading, still not + # telling us about with_polymorphic() and stuff like that when it's at + # the root.. the initial MapperEntity is more accurate for this case. + if len(path) == 1: + if not orm_util._entity_isa(query_entity.entity_zero, self.parent): + return + elif not orm_util._entity_isa(path[-1], self.parent): + return + + selectin_path = effective_path + + path_w_prop = path[self.parent_property] + + # build up a path indicating the path from the leftmost + # entity to the thing we're subquery loading. + with_poly_entity = path_w_prop.get( + context.attributes, "path_with_polymorphic", None + ) + if with_poly_entity is not None: + effective_entity = inspect(with_poly_entity) + else: + effective_entity = self.entity + + loading.PostLoad.callable_for_path( + context, + selectin_path, + self.parent, + self.parent_property, + self._load_for_path, + effective_entity, + loadopt, + recursion_depth, + execution_options, + ) + + def _load_for_path( + self, + context, + path, + states, + load_only, + effective_entity, + loadopt, + recursion_depth, + execution_options, + ): + if load_only and self.key not in load_only: + return + + query_info = self._query_info + + if query_info.load_only_child: + our_states = collections.defaultdict(list) + none_states = [] + + mapper = self.parent + + for state, overwrite in states: + state_dict = state.dict + related_ident = tuple( + mapper._get_state_attr_by_column( + state, + state_dict, + lk, + passive=attributes.PASSIVE_NO_FETCH, + ) + for lk in query_info.child_lookup_cols + ) + # if the loaded parent objects do not have the foreign key + # to the related item loaded, then degrade into the joined + # version of selectinload + if LoaderCallableStatus.PASSIVE_NO_RESULT in related_ident: + query_info = self._fallback_query_info + break + + # organize states into lists keyed to particular foreign + # key values. + if None not in related_ident: + our_states[related_ident].append( + (state, state_dict, overwrite) + ) + else: + # For FK values that have None, add them to a + # separate collection that will be populated separately + none_states.append((state, state_dict, overwrite)) + + # note the above conditional may have changed query_info + if not query_info.load_only_child: + our_states = [ + (state.key[1], state, state.dict, overwrite) + for state, overwrite in states + ] + + pk_cols = query_info.pk_cols + in_expr = query_info.in_expr + + if not query_info.load_with_join: + # in "omit join" mode, the primary key column and the + # "in" expression are in terms of the related entity. So + # if the related entity is polymorphic or otherwise aliased, + # we need to adapt our "pk_cols" and "in_expr" to that + # entity. in non-"omit join" mode, these are against the + # parent entity and do not need adaption. + if effective_entity.is_aliased_class: + pk_cols = [ + effective_entity._adapt_element(col) for col in pk_cols + ] + in_expr = effective_entity._adapt_element(in_expr) + + bundle_ent = orm_util.Bundle("pk", *pk_cols) + bundle_sql = bundle_ent.__clause_element__() + + entity_sql = effective_entity.__clause_element__() + q = Select._create_raw_select( + _raw_columns=[bundle_sql, entity_sql], + _label_style=LABEL_STYLE_TABLENAME_PLUS_COL, + _compile_options=ORMCompileState.default_compile_options, + _propagate_attrs={ + "compile_state_plugin": "orm", + "plugin_subject": effective_entity, + }, + ) + + if not query_info.load_with_join: + # the Bundle we have in the "omit_join" case is against raw, non + # annotated columns, so to ensure the Query knows its primary + # entity, we add it explicitly. If we made the Bundle against + # annotated columns, we hit a performance issue in this specific + # case, which is detailed in issue #4347. + q = q.select_from(effective_entity) + else: + # in the non-omit_join case, the Bundle is against the annotated/ + # mapped column of the parent entity, but the #4347 issue does not + # occur in this case. + q = q.select_from(self._parent_alias).join( + getattr(self._parent_alias, self.parent_property.key).of_type( + effective_entity + ) + ) + + q = q.filter(in_expr.in_(sql.bindparam("primary_keys"))) + + # a test which exercises what these comments talk about is + # test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic + # + # effective_entity above is given to us in terms of the cached + # statement, namely this one: + orig_query = context.compile_state.select_statement + + # the actual statement that was requested is this one: + # context_query = context.user_passed_query + # + # that's not the cached one, however. So while it is of the identical + # structure, if it has entities like AliasedInsp, which we get from + # aliased() or with_polymorphic(), the AliasedInsp will likely be a + # different object identity each time, and will not match up + # hashing-wise to the corresponding AliasedInsp that's in the + # cached query, meaning it won't match on paths and loader lookups + # and loaders like this one will be skipped if it is used in options. + # + # as it turns out, standard loader options like selectinload(), + # lazyload() that have a path need + # to come from the cached query so that the AliasedInsp etc. objects + # that are in the query line up with the object that's in the path + # of the strategy object. however other options like + # with_loader_criteria() that doesn't have a path (has a fixed entity) + # and needs to have access to the latest closure state in order to + # be correct, we need to use the uncached one. + # + # as of #8399 we let the loader option itself figure out what it + # wants to do given cached and uncached version of itself. + + effective_path = path[self.parent_property] + + if orig_query is context.user_passed_query: + new_options = orig_query._with_options + else: + cached_options = orig_query._with_options + uncached_options = context.user_passed_query._with_options + + # propagate compile state options from the original query, + # updating their "extra_criteria" as necessary. + # note this will create a different cache key than + # "orig" options if extra_criteria is present, because the copy + # of extra_criteria will have different boundparam than that of + # the QueryableAttribute in the path + new_options = [ + orig_opt._adapt_cached_option_to_uncached_option( + context, uncached_opt + ) + for orig_opt, uncached_opt in zip( + cached_options, uncached_options + ) + ] + + if loadopt and loadopt._extra_criteria: + new_options += ( + orm_util.LoaderCriteriaOption( + effective_entity, + loadopt._generate_extra_criteria(context), + ), + ) + + if recursion_depth is not None: + effective_path = effective_path._truncate_recursive() + + q = q.options(*new_options) + + q = q._update_compile_options({"_current_path": effective_path}) + if context.populate_existing: + q = q.execution_options(populate_existing=True) + + if self.parent_property.order_by: + if not query_info.load_with_join: + eager_order_by = self.parent_property.order_by + if effective_entity.is_aliased_class: + eager_order_by = [ + effective_entity._adapt_element(elem) + for elem in eager_order_by + ] + q = q.order_by(*eager_order_by) + else: + + def _setup_outermost_orderby(compile_context): + compile_context.eager_order_by += tuple( + util.to_list(self.parent_property.order_by) + ) + + q = q._add_context_option( + _setup_outermost_orderby, self.parent_property + ) + + if query_info.load_only_child: + self._load_via_child( + our_states, + none_states, + query_info, + q, + context, + execution_options, + ) + else: + self._load_via_parent( + our_states, query_info, q, context, execution_options + ) + + def _load_via_child( + self, + our_states, + none_states, + query_info, + q, + context, + execution_options, + ): + uselist = self.uselist + + # this sort is really for the benefit of the unit tests + our_keys = sorted(our_states) + while our_keys: + chunk = our_keys[0 : self._chunksize] + our_keys = our_keys[self._chunksize :] + data = { + k: v + for k, v in context.session.execute( + q, + params={ + "primary_keys": [ + key[0] if query_info.zero_idx else key + for key in chunk + ] + }, + execution_options=execution_options, + ).unique() + } + + for key in chunk: + # for a real foreign key and no concurrent changes to the + # DB while running this method, "key" is always present in + # data. However, for primaryjoins without real foreign keys + # a non-None primaryjoin condition may still refer to no + # related object. + related_obj = data.get(key, None) + for state, dict_, overwrite in our_states[key]: + if not overwrite and self.key in dict_: + continue + + state.get_impl(self.key).set_committed_value( + state, + dict_, + related_obj if not uselist else [related_obj], + ) + # populate none states with empty value / collection + for state, dict_, overwrite in none_states: + if not overwrite and self.key in dict_: + continue + + # note it's OK if this is a uselist=True attribute, the empty + # collection will be populated + state.get_impl(self.key).set_committed_value(state, dict_, None) + + def _load_via_parent( + self, our_states, query_info, q, context, execution_options + ): + uselist = self.uselist + _empty_result = () if uselist else None + + while our_states: + chunk = our_states[0 : self._chunksize] + our_states = our_states[self._chunksize :] + + primary_keys = [ + key[0] if query_info.zero_idx else key + for key, state, state_dict, overwrite in chunk + ] + + data = collections.defaultdict(list) + for k, v in itertools.groupby( + context.session.execute( + q, + params={"primary_keys": primary_keys}, + execution_options=execution_options, + ).unique(), + lambda x: x[0], + ): + data[k].extend(vv[1] for vv in v) + + for key, state, state_dict, overwrite in chunk: + if not overwrite and self.key in state_dict: + continue + + collection = data.get(key, _empty_result) + + if not uselist and collection: + if len(collection) > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded " + "attribute '%s' " % self + ) + state.get_impl(self.key).set_committed_value( + state, state_dict, collection[0] + ) + else: + # note that empty tuple set on uselist=False sets the + # value to None + state.get_impl(self.key).set_committed_value( + state, state_dict, collection + ) + + +def single_parent_validator(desc, prop): + def _do_check(state, value, oldvalue, initiator): + if value is not None and initiator.key == prop.key: + hasparent = initiator.hasparent(attributes.instance_state(value)) + if hasparent and oldvalue is not value: + raise sa_exc.InvalidRequestError( + "Instance %s is already associated with an instance " + "of %s via its %s attribute, and is only allowed a " + "single parent." + % (orm_util.instance_str(value), state.class_, prop), + code="bbf1", + ) + return value + + def append(state, value, initiator): + return _do_check(state, value, None, initiator) + + def set_(state, value, oldvalue, initiator): + return _do_check(state, value, oldvalue, initiator) + + event.listen( + desc, "append", append, raw=True, retval=True, active_history=True + ) + event.listen(desc, "set", set_, raw=True, retval=True, active_history=True) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategy_options.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategy_options.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e6948a7ba11f612d50f495d5e1873842b800bd --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/strategy_options.py @@ -0,0 +1,2549 @@ +# orm/strategy_options.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +""" + +""" + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from . import util as orm_util +from ._typing import insp_is_aliased_class +from ._typing import insp_is_attribute +from ._typing import insp_is_mapper +from ._typing import insp_is_mapper_property +from .attributes import QueryableAttribute +from .base import InspectionAttr +from .interfaces import LoaderOption +from .path_registry import _DEFAULT_TOKEN +from .path_registry import _StrPathToken +from .path_registry import _WILDCARD_TOKEN +from .path_registry import AbstractEntityRegistry +from .path_registry import path_is_property +from .path_registry import PathRegistry +from .path_registry import TokenRegistry +from .util import _orm_full_deannotate +from .util import AliasedInsp +from .. import exc as sa_exc +from .. import inspect +from .. import util +from ..sql import and_ +from ..sql import cache_key +from ..sql import coercions +from ..sql import roles +from ..sql import traversals +from ..sql import visitors +from ..sql.base import _generative +from ..util.typing import Final +from ..util.typing import Literal +from ..util.typing import Self + +_RELATIONSHIP_TOKEN: Final[Literal["relationship"]] = "relationship" +_COLUMN_TOKEN: Final[Literal["column"]] = "column" + +_FN = TypeVar("_FN", bound="Callable[..., Any]") + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _InternalEntityType + from .context import _MapperEntity + from .context import ORMCompileState + from .context import QueryContext + from .interfaces import _StrategyKey + from .interfaces import MapperProperty + from .interfaces import ORMOption + from .mapper import Mapper + from .path_registry import _PathRepresentation + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _FromClauseArgument + from ..sql.cache_key import _CacheKeyTraversalType + from ..sql.cache_key import CacheKey + + +_AttrType = Union[Literal["*"], "QueryableAttribute[Any]"] + +_WildcardKeyType = Literal["relationship", "column"] +_StrategySpec = Dict[str, Any] +_OptsType = Dict[str, Any] +_AttrGroupType = Tuple[_AttrType, ...] + + +class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption): + __slots__ = ("propagate_to_loaders",) + + _is_strategy_option = True + propagate_to_loaders: bool + + def contains_eager( + self, + attr: _AttrType, + alias: Optional[_FromClauseArgument] = None, + _is_chain: bool = False, + _propagate_to_loaders: bool = False, + ) -> Self: + r"""Indicate that the given attribute should be eagerly loaded from + columns stated manually in the query. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + The option is used in conjunction with an explicit join that loads + the desired rows, i.e.:: + + sess.query(Order).join(Order.user).options(contains_eager(Order.user)) + + The above query would join from the ``Order`` entity to its related + ``User`` entity, and the returned ``Order`` objects would have the + ``Order.user`` attribute pre-populated. + + It may also be used for customizing the entries in an eagerly loaded + collection; queries will normally want to use the + :ref:`orm_queryguide_populate_existing` execution option assuming the + primary collection of parent objects may already have been loaded:: + + sess.query(User).join(User.addresses).filter( + Address.email_address.like("%@aol.com") + ).options(contains_eager(User.addresses)).populate_existing() + + See the section :ref:`contains_eager` for complete usage details. + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`contains_eager` + + """ + if alias is not None: + if not isinstance(alias, str): + coerced_alias = coercions.expect(roles.FromClauseRole, alias) + else: + util.warn_deprecated( + "Passing a string name for the 'alias' argument to " + "'contains_eager()` is deprecated, and will not work in a " + "future release. Please use a sqlalchemy.alias() or " + "sqlalchemy.orm.aliased() construct.", + version="1.4", + ) + coerced_alias = alias + + elif getattr(attr, "_of_type", None): + assert isinstance(attr, QueryableAttribute) + ot: Optional[_InternalEntityType[Any]] = inspect(attr._of_type) + assert ot is not None + coerced_alias = ot.selectable + else: + coerced_alias = None + + cloned = self._set_relationship_strategy( + attr, + {"lazy": "joined"}, + propagate_to_loaders=_propagate_to_loaders, + opts={"eager_from_alias": coerced_alias}, + _reconcile_to_other=True if _is_chain else None, + ) + return cloned + + def load_only(self, *attrs: _AttrType, raiseload: bool = False) -> Self: + r"""Indicate that for a particular entity, only the given list + of column-based attribute names should be loaded; all others will be + deferred. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + Example - given a class ``User``, load only the ``name`` and + ``fullname`` attributes:: + + session.query(User).options(load_only(User.name, User.fullname)) + + Example - given a relationship ``User.addresses -> Address``, specify + subquery loading for the ``User.addresses`` collection, but on each + ``Address`` object load only the ``email_address`` attribute:: + + session.query(User).options( + subqueryload(User.addresses).load_only(Address.email_address) + ) + + For a statement that has multiple entities, + the lead entity can be + specifically referred to using the :class:`_orm.Load` constructor:: + + stmt = ( + select(User, Address) + .join(User.addresses) + .options( + Load(User).load_only(User.name, User.fullname), + Load(Address).load_only(Address.email_address), + ) + ) + + When used together with the + :ref:`populate_existing ` + execution option only the attributes listed will be refreshed. + + :param \*attrs: Attributes to be loaded, all others will be deferred. + + :param raiseload: raise :class:`.InvalidRequestError` rather than + lazy loading a value when a deferred attribute is accessed. Used + to prevent unwanted SQL from being emitted. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`orm_queryguide_column_deferral` - in the + :ref:`queryguide_toplevel` + + :param \*attrs: Attributes to be loaded, all others will be deferred. + + :param raiseload: raise :class:`.InvalidRequestError` rather than + lazy loading a value when a deferred attribute is accessed. Used + to prevent unwanted SQL from being emitted. + + .. versionadded:: 2.0 + + """ + cloned = self._set_column_strategy( + attrs, + {"deferred": False, "instrument": True}, + ) + + wildcard_strategy = {"deferred": True, "instrument": True} + if raiseload: + wildcard_strategy["raiseload"] = True + + cloned = cloned._set_column_strategy( + ("*",), + wildcard_strategy, + ) + return cloned + + def joinedload( + self, + attr: _AttrType, + innerjoin: Optional[bool] = None, + ) -> Self: + """Indicate that the given attribute should be loaded using joined + eager loading. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + examples:: + + # joined-load the "orders" collection on "User" + select(User).options(joinedload(User.orders)) + + # joined-load Order.items and then Item.keywords + select(Order).options(joinedload(Order.items).joinedload(Item.keywords)) + + # lazily load Order.items, but when Items are loaded, + # joined-load the keywords collection + select(Order).options(lazyload(Order.items).joinedload(Item.keywords)) + + :param innerjoin: if ``True``, indicates that the joined eager load + should use an inner join instead of the default of left outer join:: + + select(Order).options(joinedload(Order.user, innerjoin=True)) + + In order to chain multiple eager joins together where some may be + OUTER and others INNER, right-nested joins are used to link them:: + + select(A).options( + joinedload(A.bs, innerjoin=False).joinedload(B.cs, innerjoin=True) + ) + + The above query, linking A.bs via "outer" join and B.cs via "inner" + join would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When + using older versions of SQLite (< 3.7.16), this form of JOIN is + translated to use full subqueries as this syntax is otherwise not + directly supported. + + The ``innerjoin`` flag can also be stated with the term ``"unnested"``. + This indicates that an INNER JOIN should be used, *unless* the join + is linked to a LEFT OUTER JOIN to the left, in which case it + will render as LEFT OUTER JOIN. For example, supposing ``A.bs`` + is an outerjoin:: + + select(A).options(joinedload(A.bs).joinedload(B.cs, innerjoin="unnested")) + + The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c", + rather than as "a LEFT OUTER JOIN (b JOIN c)". + + .. note:: The "unnested" flag does **not** affect the JOIN rendered + from a many-to-many association table, e.g. a table configured as + :paramref:`_orm.relationship.secondary`, to the target table; for + correctness of results, these joins are always INNER and are + therefore right-nested if linked to an OUTER join. + + .. note:: + + The joins produced by :func:`_orm.joinedload` are **anonymously + aliased**. The criteria by which the join proceeds cannot be + modified, nor can the ORM-enabled :class:`_sql.Select` or legacy + :class:`_query.Query` refer to these joins in any way, including + ordering. See :ref:`zen_of_eager_loading` for further detail. + + To produce a specific SQL JOIN which is explicitly available, use + :meth:`_sql.Select.join` and :meth:`_query.Query.join`. To combine + explicit JOINs with eager loading of collections, use + :func:`_orm.contains_eager`; see :ref:`contains_eager`. + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`joined_eager_loading` + + """ # noqa: E501 + loader = self._set_relationship_strategy( + attr, + {"lazy": "joined"}, + opts=( + {"innerjoin": innerjoin} + if innerjoin is not None + else util.EMPTY_DICT + ), + ) + return loader + + def subqueryload(self, attr: _AttrType) -> Self: + """Indicate that the given attribute should be loaded using + subquery eager loading. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + examples:: + + # subquery-load the "orders" collection on "User" + select(User).options(subqueryload(User.orders)) + + # subquery-load Order.items and then Item.keywords + select(Order).options( + subqueryload(Order.items).subqueryload(Item.keywords) + ) + + # lazily load Order.items, but when Items are loaded, + # subquery-load the keywords collection + select(Order).options(lazyload(Order.items).subqueryload(Item.keywords)) + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`subquery_eager_loading` + + """ + return self._set_relationship_strategy(attr, {"lazy": "subquery"}) + + def selectinload( + self, + attr: _AttrType, + recursion_depth: Optional[int] = None, + ) -> Self: + """Indicate that the given attribute should be loaded using + SELECT IN eager loading. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + examples:: + + # selectin-load the "orders" collection on "User" + select(User).options(selectinload(User.orders)) + + # selectin-load Order.items and then Item.keywords + select(Order).options( + selectinload(Order.items).selectinload(Item.keywords) + ) + + # lazily load Order.items, but when Items are loaded, + # selectin-load the keywords collection + select(Order).options(lazyload(Order.items).selectinload(Item.keywords)) + + :param recursion_depth: optional int; when set to a positive integer + in conjunction with a self-referential relationship, + indicates "selectin" loading will continue that many levels deep + automatically until no items are found. + + .. note:: The :paramref:`_orm.selectinload.recursion_depth` option + currently supports only self-referential relationships. There + is not yet an option to automatically traverse recursive structures + with more than one relationship involved. + + Additionally, the :paramref:`_orm.selectinload.recursion_depth` + parameter is new and experimental and should be treated as "alpha" + status for the 2.0 series. + + .. versionadded:: 2.0 added + :paramref:`_orm.selectinload.recursion_depth` + + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`selectin_eager_loading` + + """ + return self._set_relationship_strategy( + attr, + {"lazy": "selectin"}, + opts={"recursion_depth": recursion_depth}, + ) + + def lazyload(self, attr: _AttrType) -> Self: + """Indicate that the given attribute should be loaded using "lazy" + loading. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`lazy_loading` + + """ + return self._set_relationship_strategy(attr, {"lazy": "select"}) + + def immediateload( + self, + attr: _AttrType, + recursion_depth: Optional[int] = None, + ) -> Self: + """Indicate that the given attribute should be loaded using + an immediate load with a per-attribute SELECT statement. + + The load is achieved using the "lazyloader" strategy and does not + fire off any additional eager loaders. + + The :func:`.immediateload` option is superseded in general + by the :func:`.selectinload` option, which performs the same task + more efficiently by emitting a SELECT for all loaded objects. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + :param recursion_depth: optional int; when set to a positive integer + in conjunction with a self-referential relationship, + indicates "selectin" loading will continue that many levels deep + automatically until no items are found. + + .. note:: The :paramref:`_orm.immediateload.recursion_depth` option + currently supports only self-referential relationships. There + is not yet an option to automatically traverse recursive structures + with more than one relationship involved. + + .. warning:: This parameter is new and experimental and should be + treated as "alpha" status + + .. versionadded:: 2.0 added + :paramref:`_orm.immediateload.recursion_depth` + + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`selectin_eager_loading` + + """ + loader = self._set_relationship_strategy( + attr, + {"lazy": "immediate"}, + opts={"recursion_depth": recursion_depth}, + ) + return loader + + def noload(self, attr: _AttrType) -> Self: + """Indicate that the given relationship attribute should remain + unloaded. + + The relationship attribute will return ``None`` when accessed without + producing any loading effect. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + :func:`_orm.noload` applies to :func:`_orm.relationship` attributes + only. + + .. legacy:: The :func:`_orm.noload` option is **legacy**. As it + forces collections to be empty, which invariably leads to + non-intuitive and difficult to predict results. There are no + legitimate uses for this option in modern SQLAlchemy. + + .. seealso:: + + :ref:`loading_toplevel` + + """ + + return self._set_relationship_strategy(attr, {"lazy": "noload"}) + + def raiseload(self, attr: _AttrType, sql_only: bool = False) -> Self: + """Indicate that the given attribute should raise an error if accessed. + + A relationship attribute configured with :func:`_orm.raiseload` will + raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The + typical way this is useful is when an application is attempting to + ensure that all relationship attributes that are accessed in a + particular context would have been already loaded via eager loading. + Instead of having to read through SQL logs to ensure lazy loads aren't + occurring, this strategy will cause them to raise immediately. + + :func:`_orm.raiseload` applies to :func:`_orm.relationship` attributes + only. In order to apply raise-on-SQL behavior to a column-based + attribute, use the :paramref:`.orm.defer.raiseload` parameter on the + :func:`.defer` loader option. + + :param sql_only: if True, raise only if the lazy load would emit SQL, + but not if it is only checking the identity map, or determining that + the related value should just be None due to missing keys. When False, + the strategy will raise for all varieties of relationship loading. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`prevent_lazy_with_raiseload` + + :ref:`orm_queryguide_deferred_raiseload` + + """ + + return self._set_relationship_strategy( + attr, {"lazy": "raise_on_sql" if sql_only else "raise"} + ) + + def defaultload(self, attr: _AttrType) -> Self: + """Indicate an attribute should load using its predefined loader style. + + The behavior of this loading option is to not change the current + loading style of the attribute, meaning that the previously configured + one is used or, if no previous style was selected, the default + loading will be used. + + This method is used to link to other loader options further into + a chain of attributes without altering the loader style of the links + along the chain. For example, to set joined eager loading for an + element of an element:: + + session.query(MyClass).options( + defaultload(MyClass.someattribute).joinedload( + MyOtherClass.someotherattribute + ) + ) + + :func:`.defaultload` is also useful for setting column-level options on + a related class, namely that of :func:`.defer` and :func:`.undefer`:: + + session.scalars( + select(MyClass).options( + defaultload(MyClass.someattribute) + .defer("some_column") + .undefer("some_other_column") + ) + ) + + .. seealso:: + + :ref:`orm_queryguide_relationship_sub_options` + + :meth:`_orm.Load.options` + + """ + return self._set_relationship_strategy(attr, None) + + def defer(self, key: _AttrType, raiseload: bool = False) -> Self: + r"""Indicate that the given column-oriented attribute should be + deferred, e.g. not loaded until accessed. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + e.g.:: + + from sqlalchemy.orm import defer + + session.query(MyClass).options( + defer(MyClass.attribute_one), defer(MyClass.attribute_two) + ) + + To specify a deferred load of an attribute on a related class, + the path can be specified one token at a time, specifying the loading + style for each link along the chain. To leave the loading style + for a link unchanged, use :func:`_orm.defaultload`:: + + session.query(MyClass).options( + defaultload(MyClass.someattr).defer(RelatedClass.some_column) + ) + + Multiple deferral options related to a relationship can be bundled + at once using :meth:`_orm.Load.options`:: + + + select(MyClass).options( + defaultload(MyClass.someattr).options( + defer(RelatedClass.some_column), + defer(RelatedClass.some_other_column), + defer(RelatedClass.another_column), + ) + ) + + :param key: Attribute to be deferred. + + :param raiseload: raise :class:`.InvalidRequestError` rather than + lazy loading a value when the deferred attribute is accessed. Used + to prevent unwanted SQL from being emitted. + + .. versionadded:: 1.4 + + .. seealso:: + + :ref:`orm_queryguide_column_deferral` - in the + :ref:`queryguide_toplevel` + + :func:`_orm.load_only` + + :func:`_orm.undefer` + + """ + strategy = {"deferred": True, "instrument": True} + if raiseload: + strategy["raiseload"] = True + return self._set_column_strategy((key,), strategy) + + def undefer(self, key: _AttrType) -> Self: + r"""Indicate that the given column-oriented attribute should be + undeferred, e.g. specified within the SELECT statement of the entity + as a whole. + + The column being undeferred is typically set up on the mapping as a + :func:`.deferred` attribute. + + This function is part of the :class:`_orm.Load` interface and supports + both method-chained and standalone operation. + + Examples:: + + # undefer two columns + session.query(MyClass).options( + undefer(MyClass.col1), undefer(MyClass.col2) + ) + + # undefer all columns specific to a single class using Load + * + session.query(MyClass, MyOtherClass).options(Load(MyClass).undefer("*")) + + # undefer a column on a related object + select(MyClass).options(defaultload(MyClass.items).undefer(MyClass.text)) + + :param key: Attribute to be undeferred. + + .. seealso:: + + :ref:`orm_queryguide_column_deferral` - in the + :ref:`queryguide_toplevel` + + :func:`_orm.defer` + + :func:`_orm.undefer_group` + + """ # noqa: E501 + return self._set_column_strategy( + (key,), {"deferred": False, "instrument": True} + ) + + def undefer_group(self, name: str) -> Self: + """Indicate that columns within the given deferred group name should be + undeferred. + + The columns being undeferred are set up on the mapping as + :func:`.deferred` attributes and include a "group" name. + + E.g:: + + session.query(MyClass).options(undefer_group("large_attrs")) + + To undefer a group of attributes on a related entity, the path can be + spelled out using relationship loader options, such as + :func:`_orm.defaultload`:: + + select(MyClass).options( + defaultload("someattr").undefer_group("large_attrs") + ) + + .. seealso:: + + :ref:`orm_queryguide_column_deferral` - in the + :ref:`queryguide_toplevel` + + :func:`_orm.defer` + + :func:`_orm.undefer` + + """ + return self._set_column_strategy( + (_WILDCARD_TOKEN,), None, {f"undefer_group_{name}": True} + ) + + def with_expression( + self, + key: _AttrType, + expression: _ColumnExpressionArgument[Any], + ) -> Self: + r"""Apply an ad-hoc SQL expression to a "deferred expression" + attribute. + + This option is used in conjunction with the + :func:`_orm.query_expression` mapper-level construct that indicates an + attribute which should be the target of an ad-hoc SQL expression. + + E.g.:: + + stmt = select(SomeClass).options( + with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y) + ) + + .. versionadded:: 1.2 + + :param key: Attribute to be populated + + :param expr: SQL expression to be applied to the attribute. + + .. seealso:: + + :ref:`orm_queryguide_with_expression` - background and usage + examples + + """ + + expression = _orm_full_deannotate( + coercions.expect(roles.LabeledColumnExprRole, expression) + ) + + return self._set_column_strategy( + (key,), {"query_expression": True}, extra_criteria=(expression,) + ) + + def selectin_polymorphic(self, classes: Iterable[Type[Any]]) -> Self: + """Indicate an eager load should take place for all attributes + specific to a subclass. + + This uses an additional SELECT with IN against all matched primary + key values, and is the per-query analogue to the ``"selectin"`` + setting on the :paramref:`.mapper.polymorphic_load` parameter. + + .. versionadded:: 1.2 + + .. seealso:: + + :ref:`polymorphic_selectin` + + """ + self = self._set_class_strategy( + {"selectinload_polymorphic": True}, + opts={ + "entities": tuple( + sorted((inspect(cls) for cls in classes), key=id) + ) + }, + ) + return self + + @overload + def _coerce_strat(self, strategy: _StrategySpec) -> _StrategyKey: ... + + @overload + def _coerce_strat(self, strategy: Literal[None]) -> None: ... + + def _coerce_strat( + self, strategy: Optional[_StrategySpec] + ) -> Optional[_StrategyKey]: + if strategy is not None: + strategy_key = tuple(sorted(strategy.items())) + else: + strategy_key = None + return strategy_key + + @_generative + def _set_relationship_strategy( + self, + attr: _AttrType, + strategy: Optional[_StrategySpec], + propagate_to_loaders: bool = True, + opts: Optional[_OptsType] = None, + _reconcile_to_other: Optional[bool] = None, + ) -> Self: + strategy_key = self._coerce_strat(strategy) + + self._clone_for_bind_strategy( + (attr,), + strategy_key, + _RELATIONSHIP_TOKEN, + opts=opts, + propagate_to_loaders=propagate_to_loaders, + reconcile_to_other=_reconcile_to_other, + ) + return self + + @_generative + def _set_column_strategy( + self, + attrs: Tuple[_AttrType, ...], + strategy: Optional[_StrategySpec], + opts: Optional[_OptsType] = None, + extra_criteria: Optional[Tuple[Any, ...]] = None, + ) -> Self: + strategy_key = self._coerce_strat(strategy) + + self._clone_for_bind_strategy( + attrs, + strategy_key, + _COLUMN_TOKEN, + opts=opts, + attr_group=attrs, + extra_criteria=extra_criteria, + ) + return self + + @_generative + def _set_generic_strategy( + self, + attrs: Tuple[_AttrType, ...], + strategy: _StrategySpec, + _reconcile_to_other: Optional[bool] = None, + ) -> Self: + strategy_key = self._coerce_strat(strategy) + self._clone_for_bind_strategy( + attrs, + strategy_key, + None, + propagate_to_loaders=True, + reconcile_to_other=_reconcile_to_other, + ) + return self + + @_generative + def _set_class_strategy( + self, strategy: _StrategySpec, opts: _OptsType + ) -> Self: + strategy_key = self._coerce_strat(strategy) + + self._clone_for_bind_strategy(None, strategy_key, None, opts=opts) + return self + + def _apply_to_parent(self, parent: Load) -> None: + """apply this :class:`_orm._AbstractLoad` object as a sub-option o + a :class:`_orm.Load` object. + + Implementation is provided by subclasses. + + """ + raise NotImplementedError() + + def options(self, *opts: _AbstractLoad) -> Self: + r"""Apply a series of options as sub-options to this + :class:`_orm._AbstractLoad` object. + + Implementation is provided by subclasses. + + """ + raise NotImplementedError() + + def _clone_for_bind_strategy( + self, + attrs: Optional[Tuple[_AttrType, ...]], + strategy: Optional[_StrategyKey], + wildcard_key: Optional[_WildcardKeyType], + opts: Optional[_OptsType] = None, + attr_group: Optional[_AttrGroupType] = None, + propagate_to_loaders: bool = True, + reconcile_to_other: Optional[bool] = None, + extra_criteria: Optional[Tuple[Any, ...]] = None, + ) -> Self: + raise NotImplementedError() + + def process_compile_state_replaced_entities( + self, + compile_state: ORMCompileState, + mapper_entities: Sequence[_MapperEntity], + ) -> None: + if not compile_state.compile_options._enable_eagerloads: + return + + # process is being run here so that the options given are validated + # against what the lead entities were, as well as to accommodate + # for the entities having been replaced with equivalents + self._process( + compile_state, + mapper_entities, + not bool(compile_state.current_path), + ) + + def process_compile_state(self, compile_state: ORMCompileState) -> None: + if not compile_state.compile_options._enable_eagerloads: + return + + self._process( + compile_state, + compile_state._lead_mapper_entities, + not bool(compile_state.current_path) + and not compile_state.compile_options._for_refresh_state, + ) + + def _process( + self, + compile_state: ORMCompileState, + mapper_entities: Sequence[_MapperEntity], + raiseerr: bool, + ) -> None: + """implemented by subclasses""" + raise NotImplementedError() + + @classmethod + def _chop_path( + cls, + to_chop: _PathRepresentation, + path: PathRegistry, + debug: bool = False, + ) -> Optional[_PathRepresentation]: + i = -1 + + for i, (c_token, p_token) in enumerate( + zip(to_chop, path.natural_path) + ): + if isinstance(c_token, str): + if i == 0 and ( + c_token.endswith(f":{_DEFAULT_TOKEN}") + or c_token.endswith(f":{_WILDCARD_TOKEN}") + ): + return to_chop + elif ( + c_token != f"{_RELATIONSHIP_TOKEN}:{_WILDCARD_TOKEN}" + and c_token != p_token.key # type: ignore + ): + return None + + if c_token is p_token: + continue + elif ( + isinstance(c_token, InspectionAttr) + and insp_is_mapper(c_token) + and insp_is_mapper(p_token) + and c_token.isa(p_token) + ): + continue + + else: + return None + return to_chop[i + 1 :] + + +class Load(_AbstractLoad): + """Represents loader options which modify the state of a + ORM-enabled :class:`_sql.Select` or a legacy :class:`_query.Query` in + order to affect how various mapped attributes are loaded. + + The :class:`_orm.Load` object is in most cases used implicitly behind the + scenes when one makes use of a query option like :func:`_orm.joinedload`, + :func:`_orm.defer`, or similar. It typically is not instantiated directly + except for in some very specific cases. + + .. seealso:: + + :ref:`orm_queryguide_relationship_per_entity_wildcard` - illustrates an + example where direct use of :class:`_orm.Load` may be useful + + """ + + __slots__ = ( + "path", + "context", + "additional_source_entities", + ) + + _traverse_internals = [ + ("path", visitors.ExtendedInternalTraversal.dp_has_cache_key), + ( + "context", + visitors.InternalTraversal.dp_has_cache_key_list, + ), + ("propagate_to_loaders", visitors.InternalTraversal.dp_boolean), + ( + "additional_source_entities", + visitors.InternalTraversal.dp_has_cache_key_list, + ), + ] + _cache_key_traversal = None + + path: PathRegistry + context: Tuple[_LoadElement, ...] + additional_source_entities: Tuple[_InternalEntityType[Any], ...] + + def __init__(self, entity: _EntityType[Any]): + insp = cast("Union[Mapper[Any], AliasedInsp[Any]]", inspect(entity)) + insp._post_inspect + + self.path = insp._path_registry + self.context = () + self.propagate_to_loaders = False + self.additional_source_entities = () + + def __str__(self) -> str: + return f"Load({self.path[0]})" + + @classmethod + def _construct_for_existing_path( + cls, path: AbstractEntityRegistry + ) -> Load: + load = cls.__new__(cls) + load.path = path + load.context = () + load.propagate_to_loaders = False + load.additional_source_entities = () + return load + + def _adapt_cached_option_to_uncached_option( + self, context: QueryContext, uncached_opt: ORMOption + ) -> ORMOption: + if uncached_opt is self: + return self + return self._adjust_for_extra_criteria(context) + + def _prepend_path(self, path: PathRegistry) -> Load: + cloned = self._clone() + cloned.context = tuple( + element._prepend_path(path) for element in self.context + ) + return cloned + + def _adjust_for_extra_criteria(self, context: QueryContext) -> Load: + """Apply the current bound parameters in a QueryContext to all + occurrences "extra_criteria" stored within this ``Load`` object, + returning a new instance of this ``Load`` object. + + """ + + # avoid generating cache keys for the queries if we don't + # actually have any extra_criteria options, which is the + # common case + for value in self.context: + if value._extra_criteria: + break + else: + return self + + replacement_cache_key = context.user_passed_query._generate_cache_key() + + if replacement_cache_key is None: + return self + + orig_query = context.compile_state.select_statement + orig_cache_key = orig_query._generate_cache_key() + assert orig_cache_key is not None + + def process( + opt: _LoadElement, + replacement_cache_key: CacheKey, + orig_cache_key: CacheKey, + ) -> _LoadElement: + cloned_opt = opt._clone() + + cloned_opt._extra_criteria = tuple( + replacement_cache_key._apply_params_to_element( + orig_cache_key, crit + ) + for crit in cloned_opt._extra_criteria + ) + + return cloned_opt + + cloned = self._clone() + cloned.context = tuple( + ( + process(value, replacement_cache_key, orig_cache_key) + if value._extra_criteria + else value + ) + for value in self.context + ) + return cloned + + def _reconcile_query_entities_with_us(self, mapper_entities, raiseerr): + """called at process time to allow adjustment of the root + entity inside of _LoadElement objects. + + """ + path = self.path + + for ent in mapper_entities: + ezero = ent.entity_zero + if ezero and orm_util._entity_corresponds_to( + # technically this can be a token also, but this is + # safe to pass to _entity_corresponds_to() + ezero, + cast("_InternalEntityType[Any]", path[0]), + ): + return ezero + + return None + + def _process( + self, + compile_state: ORMCompileState, + mapper_entities: Sequence[_MapperEntity], + raiseerr: bool, + ) -> None: + reconciled_lead_entity = self._reconcile_query_entities_with_us( + mapper_entities, raiseerr + ) + + # if the context has a current path, this is a lazy load + has_current_path = bool(compile_state.compile_options._current_path) + + for loader in self.context: + # issue #11292 + # historically, propagate_to_loaders was only considered at + # object loading time, whether or not to carry along options + # onto an object's loaded state where it would be used by lazyload. + # however, the defaultload() option needs to propagate in case + # its sub-options propagate_to_loaders, but its sub-options + # that dont propagate should not be applied for lazy loaders. + # so we check again + if has_current_path and not loader.propagate_to_loaders: + continue + loader.process_compile_state( + self, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ) + + def _apply_to_parent(self, parent: Load) -> None: + """apply this :class:`_orm.Load` object as a sub-option of another + :class:`_orm.Load` object. + + This method is used by the :meth:`_orm.Load.options` method. + + """ + cloned = self._generate() + + assert cloned.propagate_to_loaders == self.propagate_to_loaders + + if not any( + orm_util._entity_corresponds_to_use_path_impl( + elem, cloned.path.odd_element(0) + ) + for elem in (parent.path.odd_element(-1),) + + parent.additional_source_entities + ): + if len(cloned.path) > 1: + attrname = cloned.path[1] + parent_entity = cloned.path[0] + else: + attrname = cloned.path[0] + parent_entity = cloned.path[0] + _raise_for_does_not_link(parent.path, attrname, parent_entity) + + cloned.path = PathRegistry.coerce(parent.path[0:-1] + cloned.path[:]) + + if self.context: + cloned.context = tuple( + value._prepend_path_from(parent) for value in self.context + ) + + if cloned.context: + parent.context += cloned.context + parent.additional_source_entities += ( + cloned.additional_source_entities + ) + + @_generative + def options(self, *opts: _AbstractLoad) -> Self: + r"""Apply a series of options as sub-options to this + :class:`_orm.Load` + object. + + E.g.:: + + query = session.query(Author) + query = query.options( + joinedload(Author.book).options( + load_only(Book.summary, Book.excerpt), + joinedload(Book.citations).options(joinedload(Citation.author)), + ) + ) + + :param \*opts: A series of loader option objects (ultimately + :class:`_orm.Load` objects) which should be applied to the path + specified by this :class:`_orm.Load` object. + + .. versionadded:: 1.3.6 + + .. seealso:: + + :func:`.defaultload` + + :ref:`orm_queryguide_relationship_sub_options` + + """ + for opt in opts: + try: + opt._apply_to_parent(self) + except AttributeError as ae: + if not isinstance(opt, _AbstractLoad): + raise sa_exc.ArgumentError( + f"Loader option {opt} is not compatible with the " + "Load.options() method." + ) from ae + else: + raise + return self + + def _clone_for_bind_strategy( + self, + attrs: Optional[Tuple[_AttrType, ...]], + strategy: Optional[_StrategyKey], + wildcard_key: Optional[_WildcardKeyType], + opts: Optional[_OptsType] = None, + attr_group: Optional[_AttrGroupType] = None, + propagate_to_loaders: bool = True, + reconcile_to_other: Optional[bool] = None, + extra_criteria: Optional[Tuple[Any, ...]] = None, + ) -> Self: + # for individual strategy that needs to propagate, set the whole + # Load container to also propagate, so that it shows up in + # InstanceState.load_options + if propagate_to_loaders: + self.propagate_to_loaders = True + + if self.path.is_token: + raise sa_exc.ArgumentError( + "Wildcard token cannot be followed by another entity" + ) + + elif path_is_property(self.path): + # re-use the lookup which will raise a nicely formatted + # LoaderStrategyException + if strategy: + self.path.prop._strategy_lookup(self.path.prop, strategy[0]) + else: + raise sa_exc.ArgumentError( + f"Mapped attribute '{self.path.prop}' does not " + "refer to a mapped entity" + ) + + if attrs is None: + load_element = _ClassStrategyLoad.create( + self.path, + None, + strategy, + wildcard_key, + opts, + propagate_to_loaders, + attr_group=attr_group, + reconcile_to_other=reconcile_to_other, + extra_criteria=extra_criteria, + ) + if load_element: + self.context += (load_element,) + assert opts is not None + self.additional_source_entities += cast( + "Tuple[_InternalEntityType[Any]]", opts["entities"] + ) + + else: + for attr in attrs: + if isinstance(attr, str): + load_element = _TokenStrategyLoad.create( + self.path, + attr, + strategy, + wildcard_key, + opts, + propagate_to_loaders, + attr_group=attr_group, + reconcile_to_other=reconcile_to_other, + extra_criteria=extra_criteria, + ) + else: + load_element = _AttributeStrategyLoad.create( + self.path, + attr, + strategy, + wildcard_key, + opts, + propagate_to_loaders, + attr_group=attr_group, + reconcile_to_other=reconcile_to_other, + extra_criteria=extra_criteria, + ) + + if load_element: + # for relationship options, update self.path on this Load + # object with the latest path. + if wildcard_key is _RELATIONSHIP_TOKEN: + self.path = load_element.path + self.context += (load_element,) + + # this seems to be effective for selectinloader, + # giving the extra match to one more level deep. + # but does not work for immediateloader, which still + # must add additional options at load time + if load_element.local_opts.get("recursion_depth", False): + r1 = load_element._recurse() + self.context += (r1,) + + return self + + def __getstate__(self): + d = self._shallow_to_dict() + d["path"] = self.path.serialize() + return d + + def __setstate__(self, state): + state["path"] = PathRegistry.deserialize(state["path"]) + self._shallow_from_dict(state) + + +class _WildcardLoad(_AbstractLoad): + """represent a standalone '*' load operation""" + + __slots__ = ("strategy", "path", "local_opts") + + _traverse_internals = [ + ("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj), + ("path", visitors.ExtendedInternalTraversal.dp_plain_obj), + ( + "local_opts", + visitors.ExtendedInternalTraversal.dp_string_multi_dict, + ), + ] + cache_key_traversal: _CacheKeyTraversalType = None + + strategy: Optional[Tuple[Any, ...]] + local_opts: _OptsType + path: Union[Tuple[()], Tuple[str]] + propagate_to_loaders = False + + def __init__(self) -> None: + self.path = () + self.strategy = None + self.local_opts = util.EMPTY_DICT + + def _clone_for_bind_strategy( + self, + attrs, + strategy, + wildcard_key, + opts=None, + attr_group=None, + propagate_to_loaders=True, + reconcile_to_other=None, + extra_criteria=None, + ): + assert attrs is not None + attr = attrs[0] + assert ( + wildcard_key + and isinstance(attr, str) + and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN) + ) + + attr = f"{wildcard_key}:{attr}" + + self.strategy = strategy + self.path = (attr,) + if opts: + self.local_opts = util.immutabledict(opts) + + assert extra_criteria is None + + def options(self, *opts: _AbstractLoad) -> Self: + raise NotImplementedError("Star option does not support sub-options") + + def _apply_to_parent(self, parent: Load) -> None: + """apply this :class:`_orm._WildcardLoad` object as a sub-option of + a :class:`_orm.Load` object. + + This method is used by the :meth:`_orm.Load.options` method. Note + that :class:`_orm.WildcardLoad` itself can't have sub-options, but + it may be used as the sub-option of a :class:`_orm.Load` object. + + """ + assert self.path + attr = self.path[0] + if attr.endswith(_DEFAULT_TOKEN): + attr = f"{attr.split(':')[0]}:{_WILDCARD_TOKEN}" + + effective_path = cast(AbstractEntityRegistry, parent.path).token(attr) + + assert effective_path.is_token + + loader = _TokenStrategyLoad.create( + effective_path, + None, + self.strategy, + None, + self.local_opts, + self.propagate_to_loaders, + ) + + parent.context += (loader,) + + def _process(self, compile_state, mapper_entities, raiseerr): + is_refresh = compile_state.compile_options._for_refresh_state + + if is_refresh and not self.propagate_to_loaders: + return + + entities = [ent.entity_zero for ent in mapper_entities] + current_path = compile_state.current_path + + start_path: _PathRepresentation = self.path + + if current_path: + # TODO: no cases in test suite where we actually get + # None back here + new_path = self._chop_path(start_path, current_path) + if new_path is None: + return + + # chop_path does not actually "chop" a wildcard token path, + # just returns it + assert new_path == start_path + + # start_path is a single-token tuple + assert start_path and len(start_path) == 1 + + token = start_path[0] + assert isinstance(token, str) + entity = self._find_entity_basestring(entities, token, raiseerr) + + if not entity: + return + + path_element = entity + + # transfer our entity-less state into a Load() object + # with a real entity path. Start with the lead entity + # we just located, then go through the rest of our path + # tokens and populate into the Load(). + + assert isinstance(token, str) + loader = _TokenStrategyLoad.create( + path_element._path_registry, + token, + self.strategy, + None, + self.local_opts, + self.propagate_to_loaders, + raiseerr=raiseerr, + ) + if not loader: + return + + assert loader.path.is_token + + # don't pass a reconciled lead entity here + loader.process_compile_state( + self, compile_state, mapper_entities, None, raiseerr + ) + + return loader + + def _find_entity_basestring( + self, + entities: Iterable[_InternalEntityType[Any]], + token: str, + raiseerr: bool, + ) -> Optional[_InternalEntityType[Any]]: + if token.endswith(f":{_WILDCARD_TOKEN}"): + if len(list(entities)) != 1: + if raiseerr: + raise sa_exc.ArgumentError( + "Can't apply wildcard ('*') or load_only() " + f"loader option to multiple entities " + f"{', '.join(str(ent) for ent in entities)}. Specify " + "loader options for each entity individually, such as " + f"""{ + ", ".join( + f"Load({ent}).some_option('*')" + for ent in entities + ) + }.""" + ) + elif token.endswith(_DEFAULT_TOKEN): + raiseerr = False + + for ent in entities: + # return only the first _MapperEntity when searching + # based on string prop name. Ideally object + # attributes are used to specify more exactly. + return ent + else: + if raiseerr: + raise sa_exc.ArgumentError( + "Query has only expression-based entities - " + f'can\'t find property named "{token}".' + ) + else: + return None + + def __getstate__(self) -> Dict[str, Any]: + d = self._shallow_to_dict() + return d + + def __setstate__(self, state: Dict[str, Any]) -> None: + self._shallow_from_dict(state) + + +class _LoadElement( + cache_key.HasCacheKey, traversals.HasShallowCopy, visitors.Traversible +): + """represents strategy information to select for a LoaderStrategy + and pass options to it. + + :class:`._LoadElement` objects provide the inner datastructure + stored by a :class:`_orm.Load` object and are also the object passed + to methods like :meth:`.LoaderStrategy.setup_query`. + + .. versionadded:: 2.0 + + """ + + __slots__ = ( + "path", + "strategy", + "propagate_to_loaders", + "local_opts", + "_extra_criteria", + "_reconcile_to_other", + ) + __visit_name__ = "load_element" + + _traverse_internals = [ + ("path", visitors.ExtendedInternalTraversal.dp_has_cache_key), + ("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj), + ( + "local_opts", + visitors.ExtendedInternalTraversal.dp_string_multi_dict, + ), + ("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list), + ("propagate_to_loaders", visitors.InternalTraversal.dp_plain_obj), + ("_reconcile_to_other", visitors.InternalTraversal.dp_plain_obj), + ] + _cache_key_traversal = None + + _extra_criteria: Tuple[Any, ...] + + _reconcile_to_other: Optional[bool] + strategy: Optional[_StrategyKey] + path: PathRegistry + propagate_to_loaders: bool + + local_opts: util.immutabledict[str, Any] + + is_token_strategy: bool + is_class_strategy: bool + + def __hash__(self) -> int: + return id(self) + + def __eq__(self, other): + return traversals.compare(self, other) + + @property + def is_opts_only(self) -> bool: + return bool(self.local_opts and self.strategy is None) + + def _clone(self, **kw: Any) -> _LoadElement: + cls = self.__class__ + s = cls.__new__(cls) + + self._shallow_copy_to(s) + return s + + def _update_opts(self, **kw: Any) -> _LoadElement: + new = self._clone() + new.local_opts = new.local_opts.union(kw) + return new + + def __getstate__(self) -> Dict[str, Any]: + d = self._shallow_to_dict() + d["path"] = self.path.serialize() + return d + + def __setstate__(self, state: Dict[str, Any]) -> None: + state["path"] = PathRegistry.deserialize(state["path"]) + self._shallow_from_dict(state) + + def _raise_for_no_match(self, parent_loader, mapper_entities): + path = parent_loader.path + + found_entities = False + for ent in mapper_entities: + ezero = ent.entity_zero + if ezero: + found_entities = True + break + + if not found_entities: + raise sa_exc.ArgumentError( + "Query has only expression-based entities; " + f"attribute loader options for {path[0]} can't " + "be applied here." + ) + else: + raise sa_exc.ArgumentError( + f"Mapped class {path[0]} does not apply to any of the " + f"root entities in this query, e.g. " + f"""{ + ", ".join( + str(x.entity_zero) + for x in mapper_entities if x.entity_zero + )}. Please """ + "specify the full path " + "from one of the root entities to the target " + "attribute. " + ) + + def _adjust_effective_path_for_current_path( + self, effective_path: PathRegistry, current_path: PathRegistry + ) -> Optional[PathRegistry]: + """receives the 'current_path' entry from an :class:`.ORMCompileState` + instance, which is set during lazy loads and secondary loader strategy + loads, and adjusts the given path to be relative to the + current_path. + + E.g. given a loader path and current path: + + .. sourcecode:: text + + lp: User -> orders -> Order -> items -> Item -> keywords -> Keyword + + cp: User -> orders -> Order -> items + + The adjusted path would be: + + .. sourcecode:: text + + Item -> keywords -> Keyword + + + """ + chopped_start_path = Load._chop_path( + effective_path.natural_path, current_path + ) + if not chopped_start_path: + return None + + tokens_removed_from_start_path = len(effective_path) - len( + chopped_start_path + ) + + loader_lead_path_element = self.path[tokens_removed_from_start_path] + + effective_path = PathRegistry.coerce( + (loader_lead_path_element,) + chopped_start_path[1:] + ) + + return effective_path + + def _init_path( + self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria + ): + """Apply ORM attributes and/or wildcard to an existing path, producing + a new path. + + This method is used within the :meth:`.create` method to initialize + a :class:`._LoadElement` object. + + """ + raise NotImplementedError() + + def _prepare_for_compile_state( + self, + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ): + """implemented by subclasses.""" + raise NotImplementedError() + + def process_compile_state( + self, + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ): + """populate ORMCompileState.attributes with loader state for this + _LoadElement. + + """ + keys = self._prepare_for_compile_state( + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ) + for key in keys: + if key in compile_state.attributes: + compile_state.attributes[key] = _LoadElement._reconcile( + self, compile_state.attributes[key] + ) + else: + compile_state.attributes[key] = self + + @classmethod + def create( + cls, + path: PathRegistry, + attr: Union[_AttrType, _StrPathToken, None], + strategy: Optional[_StrategyKey], + wildcard_key: Optional[_WildcardKeyType], + local_opts: Optional[_OptsType], + propagate_to_loaders: bool, + raiseerr: bool = True, + attr_group: Optional[_AttrGroupType] = None, + reconcile_to_other: Optional[bool] = None, + extra_criteria: Optional[Tuple[Any, ...]] = None, + ) -> _LoadElement: + """Create a new :class:`._LoadElement` object.""" + + opt = cls.__new__(cls) + opt.path = path + opt.strategy = strategy + opt.propagate_to_loaders = propagate_to_loaders + opt.local_opts = ( + util.immutabledict(local_opts) if local_opts else util.EMPTY_DICT + ) + opt._extra_criteria = () + + if reconcile_to_other is not None: + opt._reconcile_to_other = reconcile_to_other + elif strategy is None and not local_opts: + opt._reconcile_to_other = True + else: + opt._reconcile_to_other = None + + path = opt._init_path( + path, attr, wildcard_key, attr_group, raiseerr, extra_criteria + ) + + if not path: + return None # type: ignore + + assert opt.is_token_strategy == path.is_token + + opt.path = path + return opt + + def __init__(self) -> None: + raise NotImplementedError() + + def _recurse(self) -> _LoadElement: + cloned = self._clone() + cloned.path = PathRegistry.coerce(self.path[:] + self.path[-2:]) + + return cloned + + def _prepend_path_from(self, parent: Load) -> _LoadElement: + """adjust the path of this :class:`._LoadElement` to be + a subpath of that of the given parent :class:`_orm.Load` object's + path. + + This is used by the :meth:`_orm.Load._apply_to_parent` method, + which is in turn part of the :meth:`_orm.Load.options` method. + + """ + + if not any( + orm_util._entity_corresponds_to_use_path_impl( + elem, + self.path.odd_element(0), + ) + for elem in (parent.path.odd_element(-1),) + + parent.additional_source_entities + ): + raise sa_exc.ArgumentError( + f'Attribute "{self.path[1]}" does not link ' + f'from element "{parent.path[-1]}".' + ) + + return self._prepend_path(parent.path) + + def _prepend_path(self, path: PathRegistry) -> _LoadElement: + cloned = self._clone() + + assert cloned.strategy == self.strategy + assert cloned.local_opts == self.local_opts + assert cloned.is_class_strategy == self.is_class_strategy + + cloned.path = PathRegistry.coerce(path[0:-1] + cloned.path[:]) + + return cloned + + @staticmethod + def _reconcile( + replacement: _LoadElement, existing: _LoadElement + ) -> _LoadElement: + """define behavior for when two Load objects are to be put into + the context.attributes under the same key. + + :param replacement: ``_LoadElement`` that seeks to replace the + existing one + + :param existing: ``_LoadElement`` that is already present. + + """ + # mapper inheritance loading requires fine-grained "block other + # options" / "allow these options to be overridden" behaviors + # see test_poly_loading.py + + if replacement._reconcile_to_other: + return existing + elif replacement._reconcile_to_other is False: + return replacement + elif existing._reconcile_to_other: + return replacement + elif existing._reconcile_to_other is False: + return existing + + if existing is replacement: + return replacement + elif ( + existing.strategy == replacement.strategy + and existing.local_opts == replacement.local_opts + ): + return replacement + elif replacement.is_opts_only: + existing = existing._clone() + existing.local_opts = existing.local_opts.union( + replacement.local_opts + ) + existing._extra_criteria += replacement._extra_criteria + return existing + elif existing.is_opts_only: + replacement = replacement._clone() + replacement.local_opts = replacement.local_opts.union( + existing.local_opts + ) + replacement._extra_criteria += existing._extra_criteria + return replacement + elif replacement.path.is_token: + # use 'last one wins' logic for wildcard options. this is also + # kind of inconsistent vs. options that are specific paths which + # will raise as below + return replacement + + raise sa_exc.InvalidRequestError( + f"Loader strategies for {replacement.path} conflict" + ) + + +class _AttributeStrategyLoad(_LoadElement): + """Loader strategies against specific relationship or column paths. + + e.g.:: + + joinedload(User.addresses) + defer(Order.name) + selectinload(User.orders).lazyload(Order.items) + + """ + + __slots__ = ("_of_type", "_path_with_polymorphic_path") + + __visit_name__ = "attribute_strategy_load_element" + + _traverse_internals = _LoadElement._traverse_internals + [ + ("_of_type", visitors.ExtendedInternalTraversal.dp_multi), + ( + "_path_with_polymorphic_path", + visitors.ExtendedInternalTraversal.dp_has_cache_key, + ), + ] + + _of_type: Union[Mapper[Any], AliasedInsp[Any], None] + _path_with_polymorphic_path: Optional[PathRegistry] + + is_class_strategy = False + is_token_strategy = False + + def _init_path( + self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria + ): + assert attr is not None + self._of_type = None + self._path_with_polymorphic_path = None + insp, _, prop = _parse_attr_argument(attr) + + if insp.is_property: + # direct property can be sent from internal strategy logic + # that sets up specific loaders, such as + # emit_lazyload->_lazyload_reverse + # prop = found_property = attr + prop = attr + path = path[prop] + + if path.has_entity: + path = path.entity_path + return path + + elif not insp.is_attribute: + # should not reach here; + assert False + + # here we assume we have user-passed InstrumentedAttribute + if not orm_util._entity_corresponds_to_use_path_impl( + path[-1], attr.parent + ): + if raiseerr: + if attr_group and attr is not attr_group[0]: + raise sa_exc.ArgumentError( + "Can't apply wildcard ('*') or load_only() " + "loader option to multiple entities in the " + "same option. Use separate options per entity." + ) + else: + _raise_for_does_not_link(path, str(attr), attr.parent) + else: + return None + + # note the essential logic of this attribute was very different in + # 1.4, where there were caching failures in e.g. + # test_relationship_criteria.py::RelationshipCriteriaTest:: + # test_selectinload_nested_criteria[True] if an existing + # "_extra_criteria" on a Load object were replaced with that coming + # from an attribute. This appears to have been an artifact of how + # _UnboundLoad / Load interacted together, which was opaque and + # poorly defined. + if extra_criteria: + assert not attr._extra_criteria + self._extra_criteria = extra_criteria + else: + self._extra_criteria = attr._extra_criteria + + if getattr(attr, "_of_type", None): + ac = attr._of_type + ext_info = inspect(ac) + self._of_type = ext_info + + self._path_with_polymorphic_path = path.entity_path[prop] + + path = path[prop][ext_info] + + else: + path = path[prop] + + if path.has_entity: + path = path.entity_path + + return path + + def _generate_extra_criteria(self, context): + """Apply the current bound parameters in a QueryContext to the + immediate "extra_criteria" stored with this Load object. + + Load objects are typically pulled from the cached version of + the statement from a QueryContext. The statement currently being + executed will have new values (and keys) for bound parameters in the + extra criteria which need to be applied by loader strategies when + they handle this criteria for a result set. + + """ + + assert ( + self._extra_criteria + ), "this should only be called if _extra_criteria is present" + + orig_query = context.compile_state.select_statement + current_query = context.query + + # NOTE: while it seems like we should not do the "apply" operation + # here if orig_query is current_query, skipping it in the "optimized" + # case causes the query to be different from a cache key perspective, + # because we are creating a copy of the criteria which is no longer + # the same identity of the _extra_criteria in the loader option + # itself. cache key logic produces a different key for + # (A, copy_of_A) vs. (A, A), because in the latter case it shortens + # the second part of the key to just indicate on identity. + + # if orig_query is current_query: + # not cached yet. just do the and_() + # return and_(*self._extra_criteria) + + k1 = orig_query._generate_cache_key() + k2 = current_query._generate_cache_key() + + return k2._apply_params_to_element(k1, and_(*self._extra_criteria)) + + def _set_of_type_info(self, context, current_path): + assert self._path_with_polymorphic_path + + pwpi = self._of_type + assert pwpi + if not pwpi.is_aliased_class: + pwpi = inspect( + orm_util.AliasedInsp._with_polymorphic_factory( + pwpi.mapper.base_mapper, + (pwpi.mapper,), + aliased=True, + _use_mapper_path=True, + ) + ) + start_path = self._path_with_polymorphic_path + if current_path: + new_path = self._adjust_effective_path_for_current_path( + start_path, current_path + ) + if new_path is None: + return + start_path = new_path + + key = ("path_with_polymorphic", start_path.natural_path) + if key in context: + existing_aliased_insp = context[key] + this_aliased_insp = pwpi + new_aliased_insp = existing_aliased_insp._merge_with( + this_aliased_insp + ) + context[key] = new_aliased_insp + else: + context[key] = pwpi + + def _prepare_for_compile_state( + self, + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ): + # _AttributeStrategyLoad + + current_path = compile_state.current_path + is_refresh = compile_state.compile_options._for_refresh_state + assert not self.path.is_token + + if is_refresh and not self.propagate_to_loaders: + return [] + + if self._of_type: + # apply additional with_polymorphic alias that may have been + # generated. this has to happen even if this is a defaultload + self._set_of_type_info(compile_state.attributes, current_path) + + # omit setting loader attributes for a "defaultload" type of option + if not self.strategy and not self.local_opts: + return [] + + if raiseerr and not reconciled_lead_entity: + self._raise_for_no_match(parent_loader, mapper_entities) + + if self.path.has_entity: + effective_path = self.path.parent + else: + effective_path = self.path + + if current_path: + assert effective_path is not None + effective_path = self._adjust_effective_path_for_current_path( + effective_path, current_path + ) + if effective_path is None: + return [] + + return [("loader", cast(PathRegistry, effective_path).natural_path)] + + def __getstate__(self): + d = super().__getstate__() + + # can't pickle this. See + # test_pickled.py -> test_lazyload_extra_criteria_not_supported + # where we should be emitting a warning for the usual case where this + # would be non-None + d["_extra_criteria"] = () + + if self._path_with_polymorphic_path: + d["_path_with_polymorphic_path"] = ( + self._path_with_polymorphic_path.serialize() + ) + + if self._of_type: + if self._of_type.is_aliased_class: + d["_of_type"] = None + elif self._of_type.is_mapper: + d["_of_type"] = self._of_type.class_ + else: + assert False, "unexpected object for _of_type" + + return d + + def __setstate__(self, state): + super().__setstate__(state) + + if state.get("_path_with_polymorphic_path", None): + self._path_with_polymorphic_path = PathRegistry.deserialize( + state["_path_with_polymorphic_path"] + ) + else: + self._path_with_polymorphic_path = None + + if state.get("_of_type", None): + self._of_type = inspect(state["_of_type"]) + else: + self._of_type = None + + +class _TokenStrategyLoad(_LoadElement): + """Loader strategies against wildcard attributes + + e.g.:: + + raiseload("*") + Load(User).lazyload("*") + defer("*") + load_only(User.name, User.email) # will create a defer('*') + joinedload(User.addresses).raiseload("*") + + """ + + __visit_name__ = "token_strategy_load_element" + + inherit_cache = True + is_class_strategy = False + is_token_strategy = True + + def _init_path( + self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria + ): + # assert isinstance(attr, str) or attr is None + if attr is not None: + default_token = attr.endswith(_DEFAULT_TOKEN) + if attr.endswith(_WILDCARD_TOKEN) or default_token: + if wildcard_key: + attr = f"{wildcard_key}:{attr}" + + path = path.token(attr) + return path + else: + raise sa_exc.ArgumentError( + "Strings are not accepted for attribute names in loader " + "options; please use class-bound attributes directly." + ) + return path + + def _prepare_for_compile_state( + self, + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ): + # _TokenStrategyLoad + + current_path = compile_state.current_path + is_refresh = compile_state.compile_options._for_refresh_state + + assert self.path.is_token + + if is_refresh and not self.propagate_to_loaders: + return [] + + # omit setting attributes for a "defaultload" type of option + if not self.strategy and not self.local_opts: + return [] + + effective_path = self.path + if reconciled_lead_entity: + effective_path = PathRegistry.coerce( + (reconciled_lead_entity,) + effective_path.path[1:] + ) + + if current_path: + new_effective_path = self._adjust_effective_path_for_current_path( + effective_path, current_path + ) + if new_effective_path is None: + return [] + effective_path = new_effective_path + + # for a wildcard token, expand out the path we set + # to encompass everything from the query entity on + # forward. not clear if this is necessary when current_path + # is set. + + return [ + ("loader", natural_path) + for natural_path in ( + cast( + TokenRegistry, effective_path + )._generate_natural_for_superclasses() + ) + ] + + +class _ClassStrategyLoad(_LoadElement): + """Loader strategies that deals with a class as a target, not + an attribute path + + e.g.:: + + q = s.query(Person).options( + selectin_polymorphic(Person, [Engineer, Manager]) + ) + + """ + + inherit_cache = True + is_class_strategy = True + is_token_strategy = False + + __visit_name__ = "class_strategy_load_element" + + def _init_path( + self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria + ): + return path + + def _prepare_for_compile_state( + self, + parent_loader, + compile_state, + mapper_entities, + reconciled_lead_entity, + raiseerr, + ): + # _ClassStrategyLoad + + current_path = compile_state.current_path + is_refresh = compile_state.compile_options._for_refresh_state + + if is_refresh and not self.propagate_to_loaders: + return [] + + # omit setting attributes for a "defaultload" type of option + if not self.strategy and not self.local_opts: + return [] + + effective_path = self.path + + if current_path: + new_effective_path = self._adjust_effective_path_for_current_path( + effective_path, current_path + ) + if new_effective_path is None: + return [] + effective_path = new_effective_path + + return [("loader", effective_path.natural_path)] + + +def _generate_from_keys( + meth: Callable[..., _AbstractLoad], + keys: Tuple[_AttrType, ...], + chained: bool, + kw: Any, +) -> _AbstractLoad: + lead_element: Optional[_AbstractLoad] = None + + attr: Any + for is_default, _keys in (True, keys[0:-1]), (False, keys[-1:]): + for attr in _keys: + if isinstance(attr, str): + if attr.startswith("." + _WILDCARD_TOKEN): + util.warn_deprecated( + "The undocumented `.{WILDCARD}` format is " + "deprecated " + "and will be removed in a future version as " + "it is " + "believed to be unused. " + "If you have been using this functionality, " + "please " + "comment on Issue #4390 on the SQLAlchemy project " + "tracker.", + version="1.4", + ) + attr = attr[1:] + + if attr == _WILDCARD_TOKEN: + if is_default: + raise sa_exc.ArgumentError( + "Wildcard token cannot be followed by " + "another entity", + ) + + if lead_element is None: + lead_element = _WildcardLoad() + + lead_element = meth(lead_element, _DEFAULT_TOKEN, **kw) + + else: + raise sa_exc.ArgumentError( + "Strings are not accepted for attribute names in " + "loader options; please use class-bound " + "attributes directly.", + ) + else: + if lead_element is None: + _, lead_entity, _ = _parse_attr_argument(attr) + lead_element = Load(lead_entity) + + if is_default: + if not chained: + lead_element = lead_element.defaultload(attr) + else: + lead_element = meth( + lead_element, attr, _is_chain=True, **kw + ) + else: + lead_element = meth(lead_element, attr, **kw) + + assert lead_element + return lead_element + + +def _parse_attr_argument( + attr: _AttrType, +) -> Tuple[InspectionAttr, _InternalEntityType[Any], MapperProperty[Any]]: + """parse an attribute or wildcard argument to produce an + :class:`._AbstractLoad` instance. + + This is used by the standalone loader strategy functions like + ``joinedload()``, ``defer()``, etc. to produce :class:`_orm.Load` or + :class:`._WildcardLoad` objects. + + """ + try: + # TODO: need to figure out this None thing being returned by + # inspect(), it should not have None as an option in most cases + # if at all + insp: InspectionAttr = inspect(attr) # type: ignore + except sa_exc.NoInspectionAvailable as err: + raise sa_exc.ArgumentError( + "expected ORM mapped attribute for loader strategy argument" + ) from err + + lead_entity: _InternalEntityType[Any] + + if insp_is_mapper_property(insp): + lead_entity = insp.parent + prop = insp + elif insp_is_attribute(insp): + lead_entity = insp.parent + prop = insp.prop + else: + raise sa_exc.ArgumentError( + "expected ORM mapped attribute for loader strategy argument" + ) + + return insp, lead_entity, prop + + +def loader_unbound_fn(fn: _FN) -> _FN: + """decorator that applies docstrings between standalone loader functions + and the loader methods on :class:`._AbstractLoad`. + + """ + bound_fn = getattr(_AbstractLoad, fn.__name__) + fn_doc = bound_fn.__doc__ + bound_fn.__doc__ = f"""Produce a new :class:`_orm.Load` object with the +:func:`_orm.{fn.__name__}` option applied. + +See :func:`_orm.{fn.__name__}` for usage examples. + +""" + + fn.__doc__ = fn_doc + return fn + + +# standalone functions follow. docstrings are filled in +# by the ``@loader_unbound_fn`` decorator. + + +@loader_unbound_fn +def contains_eager(*keys: _AttrType, **kw: Any) -> _AbstractLoad: + return _generate_from_keys(Load.contains_eager, keys, True, kw) + + +@loader_unbound_fn +def load_only(*attrs: _AttrType, raiseload: bool = False) -> _AbstractLoad: + # TODO: attrs against different classes. we likely have to + # add some extra state to Load of some kind + _, lead_element, _ = _parse_attr_argument(attrs[0]) + return Load(lead_element).load_only(*attrs, raiseload=raiseload) + + +@loader_unbound_fn +def joinedload(*keys: _AttrType, **kw: Any) -> _AbstractLoad: + return _generate_from_keys(Load.joinedload, keys, False, kw) + + +@loader_unbound_fn +def subqueryload(*keys: _AttrType) -> _AbstractLoad: + return _generate_from_keys(Load.subqueryload, keys, False, {}) + + +@loader_unbound_fn +def selectinload( + *keys: _AttrType, recursion_depth: Optional[int] = None +) -> _AbstractLoad: + return _generate_from_keys( + Load.selectinload, keys, False, {"recursion_depth": recursion_depth} + ) + + +@loader_unbound_fn +def lazyload(*keys: _AttrType) -> _AbstractLoad: + return _generate_from_keys(Load.lazyload, keys, False, {}) + + +@loader_unbound_fn +def immediateload( + *keys: _AttrType, recursion_depth: Optional[int] = None +) -> _AbstractLoad: + return _generate_from_keys( + Load.immediateload, keys, False, {"recursion_depth": recursion_depth} + ) + + +@loader_unbound_fn +def noload(*keys: _AttrType) -> _AbstractLoad: + return _generate_from_keys(Load.noload, keys, False, {}) + + +@loader_unbound_fn +def raiseload(*keys: _AttrType, **kw: Any) -> _AbstractLoad: + return _generate_from_keys(Load.raiseload, keys, False, kw) + + +@loader_unbound_fn +def defaultload(*keys: _AttrType) -> _AbstractLoad: + return _generate_from_keys(Load.defaultload, keys, False, {}) + + +@loader_unbound_fn +def defer( + key: _AttrType, *addl_attrs: _AttrType, raiseload: bool = False +) -> _AbstractLoad: + if addl_attrs: + util.warn_deprecated( + "The *addl_attrs on orm.defer is deprecated. Please use " + "method chaining in conjunction with defaultload() to " + "indicate a path.", + version="1.3", + ) + + if raiseload: + kw = {"raiseload": raiseload} + else: + kw = {} + + return _generate_from_keys(Load.defer, (key,) + addl_attrs, False, kw) + + +@loader_unbound_fn +def undefer(key: _AttrType, *addl_attrs: _AttrType) -> _AbstractLoad: + if addl_attrs: + util.warn_deprecated( + "The *addl_attrs on orm.undefer is deprecated. Please use " + "method chaining in conjunction with defaultload() to " + "indicate a path.", + version="1.3", + ) + return _generate_from_keys(Load.undefer, (key,) + addl_attrs, False, {}) + + +@loader_unbound_fn +def undefer_group(name: str) -> _AbstractLoad: + element = _WildcardLoad() + return element.undefer_group(name) + + +@loader_unbound_fn +def with_expression( + key: _AttrType, expression: _ColumnExpressionArgument[Any] +) -> _AbstractLoad: + return _generate_from_keys( + Load.with_expression, (key,), False, {"expression": expression} + ) + + +@loader_unbound_fn +def selectin_polymorphic( + base_cls: _EntityType[Any], classes: Iterable[Type[Any]] +) -> _AbstractLoad: + ul = Load(base_cls) + return ul.selectin_polymorphic(classes) + + +def _raise_for_does_not_link(path, attrname, parent_entity): + if len(path) > 1: + path_is_of_type = path[-1].entity is not path[-2].mapper.class_ + if insp_is_aliased_class(parent_entity): + parent_entity_str = str(parent_entity) + else: + parent_entity_str = parent_entity.class_.__name__ + + raise sa_exc.ArgumentError( + f'ORM mapped entity or attribute "{attrname}" does not ' + f'link from relationship "{path[-2]}%s".%s' + % ( + f".of_type({path[-1]})" if path_is_of_type else "", + ( + " Did you mean to use " + f'"{path[-2]}' + f'.of_type({parent_entity_str})" or "loadopt.options(' + f"selectin_polymorphic({path[-2].mapper.class_.__name__}, " + f'[{parent_entity_str}]), ...)" ?' + if not path_is_of_type + and not path[-1].is_aliased_class + and orm_util._entity_corresponds_to( + path.entity, inspect(parent_entity).mapper + ) + else "" + ), + ) + ) + else: + raise sa_exc.ArgumentError( + f'ORM mapped attribute "{attrname}" does not ' + f'link mapped class "{path[-1]}"' + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/sync.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/sync.py new file mode 100644 index 0000000000000000000000000000000000000000..8f85a41a2c068b0779da9fc7bcd193b3d1ecd9a6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/sync.py @@ -0,0 +1,164 @@ +# orm/sync.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + + +"""private module containing functions used for copying data +between instances based on join conditions. + +""" + +from __future__ import annotations + +from . import exc +from . import util as orm_util +from .base import PassiveFlag + + +def populate( + source, + source_mapper, + dest, + dest_mapper, + synchronize_pairs, + uowcommit, + flag_cascaded_pks, +): + source_dict = source.dict + dest_dict = dest.dict + + for l, r in synchronize_pairs: + try: + # inline of source_mapper._get_state_attr_by_column + prop = source_mapper._columntoproperty[l] + value = source.manager[prop.key].impl.get( + source, source_dict, PassiveFlag.PASSIVE_OFF + ) + except exc.UnmappedColumnError as err: + _raise_col_to_prop(False, source_mapper, l, dest_mapper, r, err) + + try: + # inline of dest_mapper._set_state_attr_by_column + prop = dest_mapper._columntoproperty[r] + dest.manager[prop.key].impl.set(dest, dest_dict, value, None) + except exc.UnmappedColumnError as err: + _raise_col_to_prop(True, source_mapper, l, dest_mapper, r, err) + + # technically the "r.primary_key" check isn't + # needed here, but we check for this condition to limit + # how often this logic is invoked for memory/performance + # reasons, since we only need this info for a primary key + # destination. + if ( + flag_cascaded_pks + and l.primary_key + and r.primary_key + and r.references(l) + ): + uowcommit.attributes[("pk_cascaded", dest, r)] = True + + +def bulk_populate_inherit_keys(source_dict, source_mapper, synchronize_pairs): + # a simplified version of populate() used by bulk insert mode + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + value = source_dict[prop.key] + except exc.UnmappedColumnError as err: + _raise_col_to_prop(False, source_mapper, l, source_mapper, r, err) + + try: + prop = source_mapper._columntoproperty[r] + source_dict[prop.key] = value + except exc.UnmappedColumnError as err: + _raise_col_to_prop(True, source_mapper, l, source_mapper, r, err) + + +def clear(dest, dest_mapper, synchronize_pairs): + for l, r in synchronize_pairs: + if ( + r.primary_key + and dest_mapper._get_state_attr_by_column(dest, dest.dict, r) + not in orm_util._none_set + ): + raise AssertionError( + f"Dependency rule on column '{l}' " + "tried to blank-out primary key " + f"column '{r}' on instance '{orm_util.state_str(dest)}'" + ) + try: + dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None) + except exc.UnmappedColumnError as err: + _raise_col_to_prop(True, None, l, dest_mapper, r, err) + + +def update(source, source_mapper, dest, old_prefix, synchronize_pairs): + for l, r in synchronize_pairs: + try: + oldvalue = source_mapper._get_committed_attr_by_column( + source.obj(), l + ) + value = source_mapper._get_state_attr_by_column( + source, source.dict, l, passive=PassiveFlag.PASSIVE_OFF + ) + except exc.UnmappedColumnError as err: + _raise_col_to_prop(False, source_mapper, l, None, r, err) + dest[r.key] = value + dest[old_prefix + r.key] = oldvalue + + +def populate_dict(source, source_mapper, dict_, synchronize_pairs): + for l, r in synchronize_pairs: + try: + value = source_mapper._get_state_attr_by_column( + source, source.dict, l, passive=PassiveFlag.PASSIVE_OFF + ) + except exc.UnmappedColumnError as err: + _raise_col_to_prop(False, source_mapper, l, None, r, err) + + dict_[r.key] = value + + +def source_modified(uowcommit, source, source_mapper, synchronize_pairs): + """return true if the source object has changes from an old to a + new value on the given synchronize pairs + + """ + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + except exc.UnmappedColumnError as err: + _raise_col_to_prop(False, source_mapper, l, None, r, err) + history = uowcommit.get_attribute_history( + source, prop.key, PassiveFlag.PASSIVE_NO_INITIALIZE + ) + if bool(history.deleted): + return True + else: + return False + + +def _raise_col_to_prop( + isdest, source_mapper, source_column, dest_mapper, dest_column, err +): + if isdest: + raise exc.UnmappedColumnError( + "Can't execute sync rule for " + "destination column '%s'; mapper '%s' does not map " + "this column. Try using an explicit `foreign_keys` " + "collection which does not include this column (or use " + "a viewonly=True relation)." % (dest_column, dest_mapper) + ) from err + else: + raise exc.UnmappedColumnError( + "Can't execute sync rule for " + "source column '%s'; mapper '%s' does not map this " + "column. Try using an explicit `foreign_keys` " + "collection which does not include destination column " + "'%s' (or use a viewonly=True relation)." + % (source_column, source_mapper, dest_column) + ) from err diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/unitofwork.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/unitofwork.py new file mode 100644 index 0000000000000000000000000000000000000000..80897f29262fc243a28f8edf2bf8a2d7d434c9e8 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/unitofwork.py @@ -0,0 +1,796 @@ +# orm/unitofwork.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: ignore-errors + + +"""The internals for the unit of work system. + +The session's flush() process passes objects to a contextual object +here, which assembles flush tasks based on mappers and their properties, +organizes them in order of dependency, and executes. + +""" + +from __future__ import annotations + +from typing import Any +from typing import Dict +from typing import Optional +from typing import Set +from typing import TYPE_CHECKING + +from . import attributes +from . import exc as orm_exc +from . import util as orm_util +from .. import event +from .. import util +from ..util import topological + + +if TYPE_CHECKING: + from .dependency import DependencyProcessor + from .interfaces import MapperProperty + from .mapper import Mapper + from .session import Session + from .session import SessionTransaction + from .state import InstanceState + + +def track_cascade_events(descriptor, prop): + """Establish event listeners on object attributes which handle + cascade-on-set/append. + + """ + key = prop.key + + def append(state, item, initiator, **kw): + # process "save_update" cascade rules for when + # an instance is appended to the list of another instance + + if item is None: + return + + sess = state.session + if sess: + if sess._warn_on_events: + sess._flush_warning("collection append") + + prop = state.manager.mapper._props[key] + item_state = attributes.instance_state(item) + + if ( + prop._cascade.save_update + and (key == initiator.key) + and not sess._contains_state(item_state) + ): + sess._save_or_update_state(item_state) + return item + + def remove(state, item, initiator, **kw): + if item is None: + return + + sess = state.session + + prop = state.manager.mapper._props[key] + + if sess and sess._warn_on_events: + sess._flush_warning( + "collection remove" + if prop.uselist + else "related attribute delete" + ) + + if ( + item is not None + and item is not attributes.NEVER_SET + and item is not attributes.PASSIVE_NO_RESULT + and prop._cascade.delete_orphan + ): + # expunge pending orphans + item_state = attributes.instance_state(item) + + if prop.mapper._is_orphan(item_state): + if sess and item_state in sess._new: + sess.expunge(item) + else: + # the related item may or may not itself be in a + # Session, however the parent for which we are catching + # the event is not in a session, so memoize this on the + # item + item_state._orphaned_outside_of_session = True + + def set_(state, newvalue, oldvalue, initiator, **kw): + # process "save_update" cascade rules for when an instance + # is attached to another instance + if oldvalue is newvalue: + return newvalue + + sess = state.session + if sess: + if sess._warn_on_events: + sess._flush_warning("related attribute set") + + prop = state.manager.mapper._props[key] + if newvalue is not None: + newvalue_state = attributes.instance_state(newvalue) + if ( + prop._cascade.save_update + and (key == initiator.key) + and not sess._contains_state(newvalue_state) + ): + sess._save_or_update_state(newvalue_state) + + if ( + oldvalue is not None + and oldvalue is not attributes.NEVER_SET + and oldvalue is not attributes.PASSIVE_NO_RESULT + and prop._cascade.delete_orphan + ): + # possible to reach here with attributes.NEVER_SET ? + oldvalue_state = attributes.instance_state(oldvalue) + + if oldvalue_state in sess._new and prop.mapper._is_orphan( + oldvalue_state + ): + sess.expunge(oldvalue) + return newvalue + + event.listen( + descriptor, "append_wo_mutation", append, raw=True, include_key=True + ) + event.listen( + descriptor, "append", append, raw=True, retval=True, include_key=True + ) + event.listen( + descriptor, "remove", remove, raw=True, retval=True, include_key=True + ) + event.listen( + descriptor, "set", set_, raw=True, retval=True, include_key=True + ) + + +class UOWTransaction: + session: Session + transaction: SessionTransaction + attributes: Dict[str, Any] + deps: util.defaultdict[Mapper[Any], Set[DependencyProcessor]] + mappers: util.defaultdict[Mapper[Any], Set[InstanceState[Any]]] + + def __init__(self, session: Session): + self.session = session + + # dictionary used by external actors to + # store arbitrary state information. + self.attributes = {} + + # dictionary of mappers to sets of + # DependencyProcessors, which are also + # set to be part of the sorted flush actions, + # which have that mapper as a parent. + self.deps = util.defaultdict(set) + + # dictionary of mappers to sets of InstanceState + # items pending for flush which have that mapper + # as a parent. + self.mappers = util.defaultdict(set) + + # a dictionary of Preprocess objects, which gather + # additional states impacted by the flush + # and determine if a flush action is needed + self.presort_actions = {} + + # dictionary of PostSortRec objects, each + # one issues work during the flush within + # a certain ordering. + self.postsort_actions = {} + + # a set of 2-tuples, each containing two + # PostSortRec objects where the second + # is dependent on the first being executed + # first + self.dependencies = set() + + # dictionary of InstanceState-> (isdelete, listonly) + # tuples, indicating if this state is to be deleted + # or insert/updated, or just refreshed + self.states = {} + + # tracks InstanceStates which will be receiving + # a "post update" call. Keys are mappers, + # values are a set of states and a set of the + # columns which should be included in the update. + self.post_update_states = util.defaultdict(lambda: (set(), set())) + + @property + def has_work(self): + return bool(self.states) + + def was_already_deleted(self, state): + """Return ``True`` if the given state is expired and was deleted + previously. + """ + if state.expired: + try: + state._load_expired(state, attributes.PASSIVE_OFF) + except orm_exc.ObjectDeletedError: + self.session._remove_newly_deleted([state]) + return True + return False + + def is_deleted(self, state): + """Return ``True`` if the given state is marked as deleted + within this uowtransaction.""" + + return state in self.states and self.states[state][0] + + def memo(self, key, callable_): + if key in self.attributes: + return self.attributes[key] + else: + self.attributes[key] = ret = callable_() + return ret + + def remove_state_actions(self, state): + """Remove pending actions for a state from the uowtransaction.""" + + isdelete = self.states[state][0] + + self.states[state] = (isdelete, True) + + def get_attribute_history( + self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE + ): + """Facade to attributes.get_state_history(), including + caching of results.""" + + hashkey = ("history", state, key) + + # cache the objects, not the states; the strong reference here + # prevents newly loaded objects from being dereferenced during the + # flush process + + if hashkey in self.attributes: + history, state_history, cached_passive = self.attributes[hashkey] + # if the cached lookup was "passive" and now + # we want non-passive, do a non-passive lookup and re-cache + + if ( + not cached_passive & attributes.SQL_OK + and passive & attributes.SQL_OK + ): + impl = state.manager[key].impl + history = impl.get_history( + state, + state.dict, + attributes.PASSIVE_OFF + | attributes.LOAD_AGAINST_COMMITTED + | attributes.NO_RAISE, + ) + if history and impl.uses_objects: + state_history = history.as_state() + else: + state_history = history + self.attributes[hashkey] = (history, state_history, passive) + else: + impl = state.manager[key].impl + # TODO: store the history as (state, object) tuples + # so we don't have to keep converting here + history = impl.get_history( + state, + state.dict, + passive + | attributes.LOAD_AGAINST_COMMITTED + | attributes.NO_RAISE, + ) + if history and impl.uses_objects: + state_history = history.as_state() + else: + state_history = history + self.attributes[hashkey] = (history, state_history, passive) + + return state_history + + def has_dep(self, processor): + return (processor, True) in self.presort_actions + + def register_preprocessor(self, processor, fromparent): + key = (processor, fromparent) + if key not in self.presort_actions: + self.presort_actions[key] = Preprocess(processor, fromparent) + + def register_object( + self, + state: InstanceState[Any], + isdelete: bool = False, + listonly: bool = False, + cancel_delete: bool = False, + operation: Optional[str] = None, + prop: Optional[MapperProperty] = None, + ) -> bool: + if not self.session._contains_state(state): + # this condition is normal when objects are registered + # as part of a relationship cascade operation. it should + # not occur for the top-level register from Session.flush(). + if not state.deleted and operation is not None: + util.warn( + "Object of type %s not in session, %s operation " + "along '%s' will not proceed" + % (orm_util.state_class_str(state), operation, prop) + ) + return False + + if state not in self.states: + mapper = state.manager.mapper + + if mapper not in self.mappers: + self._per_mapper_flush_actions(mapper) + + self.mappers[mapper].add(state) + self.states[state] = (isdelete, listonly) + else: + if not listonly and (isdelete or cancel_delete): + self.states[state] = (isdelete, False) + return True + + def register_post_update(self, state, post_update_cols): + mapper = state.manager.mapper.base_mapper + states, cols = self.post_update_states[mapper] + states.add(state) + cols.update(post_update_cols) + + def _per_mapper_flush_actions(self, mapper): + saves = SaveUpdateAll(self, mapper.base_mapper) + deletes = DeleteAll(self, mapper.base_mapper) + self.dependencies.add((saves, deletes)) + + for dep in mapper._dependency_processors: + dep.per_property_preprocessors(self) + + for prop in mapper.relationships: + if prop.viewonly: + continue + dep = prop._dependency_processor + dep.per_property_preprocessors(self) + + @util.memoized_property + def _mapper_for_dep(self): + """return a dynamic mapping of (Mapper, DependencyProcessor) to + True or False, indicating if the DependencyProcessor operates + on objects of that Mapper. + + The result is stored in the dictionary persistently once + calculated. + + """ + return util.PopulateDict( + lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop + ) + + def filter_states_for_dep(self, dep, states): + """Filter the given list of InstanceStates to those relevant to the + given DependencyProcessor. + + """ + mapper_for_dep = self._mapper_for_dep + return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] + + def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): + checktup = (isdelete, listonly) + for mapper in mapper.base_mapper.self_and_descendants: + for state in self.mappers[mapper]: + if self.states[state] == checktup: + yield state + + def _generate_actions(self): + """Generate the full, unsorted collection of PostSortRecs as + well as dependency pairs for this UOWTransaction. + + """ + # execute presort_actions, until all states + # have been processed. a presort_action might + # add new states to the uow. + while True: + ret = False + for action in list(self.presort_actions.values()): + if action.execute(self): + ret = True + if not ret: + break + + # see if the graph of mapper dependencies has cycles. + self.cycles = cycles = topological.find_cycles( + self.dependencies, list(self.postsort_actions.values()) + ) + + if cycles: + # if yes, break the per-mapper actions into + # per-state actions + convert = { + rec: set(rec.per_state_flush_actions(self)) for rec in cycles + } + + # rewrite the existing dependencies to point to + # the per-state actions for those per-mapper actions + # that were broken up. + for edge in list(self.dependencies): + if ( + None in edge + or edge[0].disabled + or edge[1].disabled + or cycles.issuperset(edge) + ): + self.dependencies.remove(edge) + elif edge[0] in cycles: + self.dependencies.remove(edge) + for dep in convert[edge[0]]: + self.dependencies.add((dep, edge[1])) + elif edge[1] in cycles: + self.dependencies.remove(edge) + for dep in convert[edge[1]]: + self.dependencies.add((edge[0], dep)) + + return { + a for a in self.postsort_actions.values() if not a.disabled + }.difference(cycles) + + def execute(self) -> None: + postsort_actions = self._generate_actions() + + postsort_actions = sorted( + postsort_actions, + key=lambda item: item.sort_key, + ) + # sort = topological.sort(self.dependencies, postsort_actions) + # print "--------------" + # print "\ndependencies:", self.dependencies + # print "\ncycles:", self.cycles + # print "\nsort:", list(sort) + # print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) + + # execute + if self.cycles: + for subset in topological.sort_as_subsets( + self.dependencies, postsort_actions + ): + set_ = set(subset) + while set_: + n = set_.pop() + n.execute_aggregate(self, set_) + else: + for rec in topological.sort(self.dependencies, postsort_actions): + rec.execute(self) + + def finalize_flush_changes(self) -> None: + """Mark processed objects as clean / deleted after a successful + flush(). + + This method is called within the flush() method after the + execute() method has succeeded and the transaction has been committed. + + """ + if not self.states: + return + + states = set(self.states) + isdel = { + s for (s, (isdelete, listonly)) in self.states.items() if isdelete + } + other = states.difference(isdel) + if isdel: + self.session._remove_newly_deleted(isdel) + if other: + self.session._register_persistent(other) + + +class IterateMappersMixin: + __slots__ = () + + def _mappers(self, uow): + if self.fromparent: + return iter( + m + for m in self.dependency_processor.parent.self_and_descendants + if uow._mapper_for_dep[(m, self.dependency_processor)] + ) + else: + return self.dependency_processor.mapper.self_and_descendants + + +class Preprocess(IterateMappersMixin): + __slots__ = ( + "dependency_processor", + "fromparent", + "processed", + "setup_flush_actions", + ) + + def __init__(self, dependency_processor, fromparent): + self.dependency_processor = dependency_processor + self.fromparent = fromparent + self.processed = set() + self.setup_flush_actions = False + + def execute(self, uow): + delete_states = set() + save_states = set() + + for mapper in self._mappers(uow): + for state in uow.mappers[mapper].difference(self.processed): + (isdelete, listonly) = uow.states[state] + if not listonly: + if isdelete: + delete_states.add(state) + else: + save_states.add(state) + + if delete_states: + self.dependency_processor.presort_deletes(uow, delete_states) + self.processed.update(delete_states) + if save_states: + self.dependency_processor.presort_saves(uow, save_states) + self.processed.update(save_states) + + if delete_states or save_states: + if not self.setup_flush_actions and ( + self.dependency_processor.prop_has_changes( + uow, delete_states, True + ) + or self.dependency_processor.prop_has_changes( + uow, save_states, False + ) + ): + self.dependency_processor.per_property_flush_actions(uow) + self.setup_flush_actions = True + return True + else: + return False + + +class PostSortRec: + __slots__ = ("disabled",) + + def __new__(cls, uow, *args): + key = (cls,) + args + if key in uow.postsort_actions: + return uow.postsort_actions[key] + else: + uow.postsort_actions[key] = ret = object.__new__(cls) + ret.disabled = False + return ret + + def execute_aggregate(self, uow, recs): + self.execute(uow) + + +class ProcessAll(IterateMappersMixin, PostSortRec): + __slots__ = "dependency_processor", "isdelete", "fromparent", "sort_key" + + def __init__(self, uow, dependency_processor, isdelete, fromparent): + self.dependency_processor = dependency_processor + self.sort_key = ( + "ProcessAll", + self.dependency_processor.sort_key, + isdelete, + ) + self.isdelete = isdelete + self.fromparent = fromparent + uow.deps[dependency_processor.parent.base_mapper].add( + dependency_processor + ) + + def execute(self, uow): + states = self._elements(uow) + if self.isdelete: + self.dependency_processor.process_deletes(uow, states) + else: + self.dependency_processor.process_saves(uow, states) + + def per_state_flush_actions(self, uow): + # this is handled by SaveUpdateAll and DeleteAll, + # since a ProcessAll should unconditionally be pulled + # into per-state if either the parent/child mappers + # are part of a cycle + return iter([]) + + def __repr__(self): + return "%s(%s, isdelete=%s)" % ( + self.__class__.__name__, + self.dependency_processor, + self.isdelete, + ) + + def _elements(self, uow): + for mapper in self._mappers(uow): + for state in uow.mappers[mapper]: + (isdelete, listonly) = uow.states[state] + if isdelete == self.isdelete and not listonly: + yield state + + +class PostUpdateAll(PostSortRec): + __slots__ = "mapper", "isdelete", "sort_key" + + def __init__(self, uow, mapper, isdelete): + self.mapper = mapper + self.isdelete = isdelete + self.sort_key = ("PostUpdateAll", mapper._sort_key, isdelete) + + @util.preload_module("sqlalchemy.orm.persistence") + def execute(self, uow): + persistence = util.preloaded.orm_persistence + states, cols = uow.post_update_states[self.mapper] + states = [s for s in states if uow.states[s][0] == self.isdelete] + + persistence.post_update(self.mapper, states, uow, cols) + + +class SaveUpdateAll(PostSortRec): + __slots__ = ("mapper", "sort_key") + + def __init__(self, uow, mapper): + self.mapper = mapper + self.sort_key = ("SaveUpdateAll", mapper._sort_key) + assert mapper is mapper.base_mapper + + @util.preload_module("sqlalchemy.orm.persistence") + def execute(self, uow): + util.preloaded.orm_persistence.save_obj( + self.mapper, + uow.states_for_mapper_hierarchy(self.mapper, False, False), + uow, + ) + + def per_state_flush_actions(self, uow): + states = list( + uow.states_for_mapper_hierarchy(self.mapper, False, False) + ) + base_mapper = self.mapper.base_mapper + delete_all = DeleteAll(uow, base_mapper) + for state in states: + # keep saves before deletes - + # this ensures 'row switch' operations work + action = SaveUpdateState(uow, state) + uow.dependencies.add((action, delete_all)) + yield action + + for dep in uow.deps[self.mapper]: + states_for_prop = uow.filter_states_for_dep(dep, states) + dep.per_state_flush_actions(uow, states_for_prop, False) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.mapper) + + +class DeleteAll(PostSortRec): + __slots__ = ("mapper", "sort_key") + + def __init__(self, uow, mapper): + self.mapper = mapper + self.sort_key = ("DeleteAll", mapper._sort_key) + assert mapper is mapper.base_mapper + + @util.preload_module("sqlalchemy.orm.persistence") + def execute(self, uow): + util.preloaded.orm_persistence.delete_obj( + self.mapper, + uow.states_for_mapper_hierarchy(self.mapper, True, False), + uow, + ) + + def per_state_flush_actions(self, uow): + states = list( + uow.states_for_mapper_hierarchy(self.mapper, True, False) + ) + base_mapper = self.mapper.base_mapper + save_all = SaveUpdateAll(uow, base_mapper) + for state in states: + # keep saves before deletes - + # this ensures 'row switch' operations work + action = DeleteState(uow, state) + uow.dependencies.add((save_all, action)) + yield action + + for dep in uow.deps[self.mapper]: + states_for_prop = uow.filter_states_for_dep(dep, states) + dep.per_state_flush_actions(uow, states_for_prop, True) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.mapper) + + +class ProcessState(PostSortRec): + __slots__ = "dependency_processor", "isdelete", "state", "sort_key" + + def __init__(self, uow, dependency_processor, isdelete, state): + self.dependency_processor = dependency_processor + self.sort_key = ("ProcessState", dependency_processor.sort_key) + self.isdelete = isdelete + self.state = state + + def execute_aggregate(self, uow, recs): + cls_ = self.__class__ + dependency_processor = self.dependency_processor + isdelete = self.isdelete + our_recs = [ + r + for r in recs + if r.__class__ is cls_ + and r.dependency_processor is dependency_processor + and r.isdelete is isdelete + ] + recs.difference_update(our_recs) + states = [self.state] + [r.state for r in our_recs] + if isdelete: + dependency_processor.process_deletes(uow, states) + else: + dependency_processor.process_saves(uow, states) + + def __repr__(self): + return "%s(%s, %s, delete=%s)" % ( + self.__class__.__name__, + self.dependency_processor, + orm_util.state_str(self.state), + self.isdelete, + ) + + +class SaveUpdateState(PostSortRec): + __slots__ = "state", "mapper", "sort_key" + + def __init__(self, uow, state): + self.state = state + self.mapper = state.mapper.base_mapper + self.sort_key = ("ProcessState", self.mapper._sort_key) + + @util.preload_module("sqlalchemy.orm.persistence") + def execute_aggregate(self, uow, recs): + persistence = util.preloaded.orm_persistence + cls_ = self.__class__ + mapper = self.mapper + our_recs = [ + r for r in recs if r.__class__ is cls_ and r.mapper is mapper + ] + recs.difference_update(our_recs) + persistence.save_obj( + mapper, [self.state] + [r.state for r in our_recs], uow + ) + + def __repr__(self): + return "%s(%s)" % ( + self.__class__.__name__, + orm_util.state_str(self.state), + ) + + +class DeleteState(PostSortRec): + __slots__ = "state", "mapper", "sort_key" + + def __init__(self, uow, state): + self.state = state + self.mapper = state.mapper.base_mapper + self.sort_key = ("DeleteState", self.mapper._sort_key) + + @util.preload_module("sqlalchemy.orm.persistence") + def execute_aggregate(self, uow, recs): + persistence = util.preloaded.orm_persistence + cls_ = self.__class__ + mapper = self.mapper + our_recs = [ + r for r in recs if r.__class__ is cls_ and r.mapper is mapper + ] + recs.difference_update(our_recs) + states = [self.state] + [r.state for r in our_recs] + persistence.delete_obj( + mapper, [s for s in states if uow.states[s][0]], uow + ) + + def __repr__(self): + return "%s(%s)" % ( + self.__class__.__name__, + orm_util.state_str(self.state), + ) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/util.py new file mode 100644 index 0000000000000000000000000000000000000000..874c4f53b154d4a3a79c7265018eb23b3f0103d0 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/util.py @@ -0,0 +1,2403 @@ +# orm/util.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +from __future__ import annotations + +import enum +import functools +import re +import types +import typing +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Match +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import weakref + +from . import attributes # noqa +from . import exc +from . import exc as orm_exc +from ._typing import _O +from ._typing import insp_is_aliased_class +from ._typing import insp_is_mapper +from ._typing import prop_is_relationship +from .base import _class_to_mapper as _class_to_mapper +from .base import _MappedAnnotationBase +from .base import _never_set as _never_set # noqa: F401 +from .base import _none_only_set as _none_only_set # noqa: F401 +from .base import _none_set as _none_set # noqa: F401 +from .base import attribute_str as attribute_str # noqa: F401 +from .base import class_mapper as class_mapper +from .base import DynamicMapped +from .base import InspectionAttr as InspectionAttr +from .base import instance_str as instance_str # noqa: F401 +from .base import Mapped +from .base import object_mapper as object_mapper +from .base import object_state as object_state # noqa: F401 +from .base import opt_manager_of_class +from .base import ORMDescriptor +from .base import state_attribute_str as state_attribute_str # noqa: F401 +from .base import state_class_str as state_class_str # noqa: F401 +from .base import state_str as state_str # noqa: F401 +from .base import WriteOnlyMapped +from .interfaces import CriteriaOption +from .interfaces import MapperProperty as MapperProperty +from .interfaces import ORMColumnsClauseRole +from .interfaces import ORMEntityColumnsClauseRole +from .interfaces import ORMFromClauseRole +from .path_registry import PathRegistry as PathRegistry +from .. import event +from .. import exc as sa_exc +from .. import inspection +from .. import sql +from .. import util +from ..engine.result import result_tuple +from ..sql import coercions +from ..sql import expression +from ..sql import lambdas +from ..sql import roles +from ..sql import util as sql_util +from ..sql import visitors +from ..sql._typing import is_selectable +from ..sql.annotation import SupportsCloneAnnotations +from ..sql.base import ColumnCollection +from ..sql.cache_key import HasCacheKey +from ..sql.cache_key import MemoizedHasCacheKey +from ..sql.elements import ColumnElement +from ..sql.elements import KeyedColumnElement +from ..sql.selectable import FromClause +from ..util.langhelpers import MemoizedSlots +from ..util.typing import de_stringify_annotation as _de_stringify_annotation +from ..util.typing import eval_name_only as _eval_name_only +from ..util.typing import fixup_container_fwd_refs +from ..util.typing import get_origin +from ..util.typing import is_origin_of_cls +from ..util.typing import Literal +from ..util.typing import Protocol + +if typing.TYPE_CHECKING: + from ._typing import _EntityType + from ._typing import _IdentityKeyType + from ._typing import _InternalEntityType + from ._typing import _ORMCOLEXPR + from .context import _MapperEntity + from .context import ORMCompileState + from .mapper import Mapper + from .path_registry import AbstractEntityRegistry + from .query import Query + from .relationships import RelationshipProperty + from ..engine import Row + from ..engine import RowMapping + from ..sql._typing import _CE + from ..sql._typing import _ColumnExpressionArgument + from ..sql._typing import _EquivalentColumnMap + from ..sql._typing import _FromClauseArgument + from ..sql._typing import _OnClauseArgument + from ..sql._typing import _PropagateAttrsType + from ..sql.annotation import _SA + from ..sql.base import ReadOnlyColumnCollection + from ..sql.elements import BindParameter + from ..sql.selectable import _ColumnsClauseElement + from ..sql.selectable import Select + from ..sql.selectable import Selectable + from ..sql.visitors import anon_map + from ..util.typing import _AnnotationScanType + +_T = TypeVar("_T", bound=Any) + +all_cascades = frozenset( + ( + "delete", + "delete-orphan", + "all", + "merge", + "expunge", + "save-update", + "refresh-expire", + "none", + ) +) + +_de_stringify_partial = functools.partial( + functools.partial, + locals_=util.immutabledict( + { + "Mapped": Mapped, + "WriteOnlyMapped": WriteOnlyMapped, + "DynamicMapped": DynamicMapped, + } + ), +) + +# partial is practically useless as we have to write out the whole +# function and maintain the signature anyway + + +class _DeStringifyAnnotation(Protocol): + def __call__( + self, + cls: Type[Any], + annotation: _AnnotationScanType, + originating_module: str, + *, + str_cleanup_fn: Optional[Callable[[str, str], str]] = None, + include_generic: bool = False, + ) -> Type[Any]: ... + + +de_stringify_annotation = cast( + _DeStringifyAnnotation, _de_stringify_partial(_de_stringify_annotation) +) + + +class _EvalNameOnly(Protocol): + def __call__(self, name: str, module_name: str) -> Any: ... + + +eval_name_only = cast(_EvalNameOnly, _de_stringify_partial(_eval_name_only)) + + +class CascadeOptions(FrozenSet[str]): + """Keeps track of the options sent to + :paramref:`.relationship.cascade`""" + + _add_w_all_cascades = all_cascades.difference( + ["all", "none", "delete-orphan"] + ) + _allowed_cascades = all_cascades + + _viewonly_cascades = ["expunge", "all", "none", "refresh-expire", "merge"] + + __slots__ = ( + "save_update", + "delete", + "refresh_expire", + "merge", + "expunge", + "delete_orphan", + ) + + save_update: bool + delete: bool + refresh_expire: bool + merge: bool + expunge: bool + delete_orphan: bool + + def __new__( + cls, value_list: Optional[Union[Iterable[str], str]] + ) -> CascadeOptions: + if isinstance(value_list, str) or value_list is None: + return cls.from_string(value_list) # type: ignore + values = set(value_list) + if values.difference(cls._allowed_cascades): + raise sa_exc.ArgumentError( + "Invalid cascade option(s): %s" + % ", ".join( + [ + repr(x) + for x in sorted( + values.difference(cls._allowed_cascades) + ) + ] + ) + ) + + if "all" in values: + values.update(cls._add_w_all_cascades) + if "none" in values: + values.clear() + values.discard("all") + + self = super().__new__(cls, values) + self.save_update = "save-update" in values + self.delete = "delete" in values + self.refresh_expire = "refresh-expire" in values + self.merge = "merge" in values + self.expunge = "expunge" in values + self.delete_orphan = "delete-orphan" in values + + if self.delete_orphan and not self.delete: + util.warn("The 'delete-orphan' cascade option requires 'delete'.") + return self + + def __repr__(self): + return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)])) + + @classmethod + def from_string(cls, arg): + values = [c for c in re.split(r"\s*,\s*", arg or "") if c] + return cls(values) + + +def _validator_events(desc, key, validator, include_removes, include_backrefs): + """Runs a validation method on an attribute value to be set or + appended. + """ + + if not include_backrefs: + + def detect_is_backref(state, initiator): + impl = state.manager[key].impl + return initiator.impl is not impl + + if include_removes: + + def append(state, value, initiator): + if initiator.op is not attributes.OP_BULK_REPLACE and ( + include_backrefs or not detect_is_backref(state, initiator) + ): + return validator(state.obj(), key, value, False) + else: + return value + + def bulk_set(state, values, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + obj = state.obj() + values[:] = [ + validator(obj, key, value, False) for value in values + ] + + def set_(state, value, oldvalue, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value, False) + else: + return value + + def remove(state, value, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + validator(state.obj(), key, value, True) + + else: + + def append(state, value, initiator): + if initiator.op is not attributes.OP_BULK_REPLACE and ( + include_backrefs or not detect_is_backref(state, initiator) + ): + return validator(state.obj(), key, value) + else: + return value + + def bulk_set(state, values, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + obj = state.obj() + values[:] = [validator(obj, key, value) for value in values] + + def set_(state, value, oldvalue, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value) + else: + return value + + event.listen(desc, "append", append, raw=True, retval=True) + event.listen(desc, "bulk_replace", bulk_set, raw=True) + event.listen(desc, "set", set_, raw=True, retval=True) + if include_removes: + event.listen(desc, "remove", remove, raw=True, retval=True) + + +def polymorphic_union( + table_map, typecolname, aliasname="p_union", cast_nulls=True +): + """Create a ``UNION`` statement used by a polymorphic mapper. + + See :ref:`concrete_inheritance` for an example of how + this is used. + + :param table_map: mapping of polymorphic identities to + :class:`_schema.Table` objects. + :param typecolname: string name of a "discriminator" column, which will be + derived from the query, producing the polymorphic identity for + each row. If ``None``, no polymorphic discriminator is generated. + :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` + construct generated. + :param cast_nulls: if True, non-existent columns, which are represented + as labeled NULLs, will be passed into CAST. This is a legacy behavior + that is problematic on some backends such as Oracle - in which case it + can be set to False. + + """ + + colnames: util.OrderedSet[str] = util.OrderedSet() + colnamemaps = {} + types = {} + for key in table_map: + table = table_map[key] + + table = coercions.expect( + roles.StrictFromClauseRole, table, allow_select=True + ) + table_map[key] = table + + m = {} + for c in table.c: + if c.key == typecolname: + raise sa_exc.InvalidRequestError( + "Polymorphic union can't use '%s' as the discriminator " + "column due to mapped column %r; please apply the " + "'typecolname' " + "argument; this is available on " + "ConcreteBase as '_concrete_discriminator_name'" + % (typecolname, c) + ) + colnames.add(c.key) + m[c.key] = c + types[c.key] = c.type + colnamemaps[table] = m + + def col(name, table): + try: + return colnamemaps[table][name] + except KeyError: + if cast_nulls: + return sql.cast(sql.null(), types[name]).label(name) + else: + return sql.type_coerce(sql.null(), types[name]).label(name) + + result = [] + for type_, table in table_map.items(): + if typecolname is not None: + result.append( + sql.select( + *( + [col(name, table) for name in colnames] + + [ + sql.literal_column( + sql_util._quote_ddl_expr(type_) + ).label(typecolname) + ] + ) + ).select_from(table) + ) + else: + result.append( + sql.select( + *[col(name, table) for name in colnames] + ).select_from(table) + ) + return sql.union_all(*result).alias(aliasname) + + +def identity_key( + class_: Optional[Type[_T]] = None, + ident: Union[Any, Tuple[Any, ...]] = None, + *, + instance: Optional[_T] = None, + row: Optional[Union[Row[Any], RowMapping]] = None, + identity_token: Optional[Any] = None, +) -> _IdentityKeyType[_T]: + r"""Generate "identity key" tuples, as are used as keys in the + :attr:`.Session.identity_map` dictionary. + + This function has several call styles: + + * ``identity_key(class, ident, identity_token=token)`` + + This form receives a mapped class and a primary key scalar or + tuple as an argument. + + E.g.:: + + >>> identity_key(MyClass, (1, 2)) + (, (1, 2), None) + + :param class: mapped class (must be a positional argument) + :param ident: primary key, may be a scalar or tuple argument. + :param identity_token: optional identity token + + .. versionadded:: 1.2 added identity_token + + + * ``identity_key(instance=instance)`` + + This form will produce the identity key for a given instance. The + instance need not be persistent, only that its primary key attributes + are populated (else the key will contain ``None`` for those missing + values). + + E.g.:: + + >>> instance = MyClass(1, 2) + >>> identity_key(instance=instance) + (, (1, 2), None) + + In this form, the given instance is ultimately run though + :meth:`_orm.Mapper.identity_key_from_instance`, which will have the + effect of performing a database check for the corresponding row + if the object is expired. + + :param instance: object instance (must be given as a keyword arg) + + * ``identity_key(class, row=row, identity_token=token)`` + + This form is similar to the class/tuple form, except is passed a + database result row as a :class:`.Row` or :class:`.RowMapping` object. + + E.g.:: + + >>> row = engine.execute(text("select * from table where a=1 and b=2")).first() + >>> identity_key(MyClass, row=row) + (, (1, 2), None) + + :param class: mapped class (must be a positional argument) + :param row: :class:`.Row` row returned by a :class:`_engine.CursorResult` + (must be given as a keyword arg) + :param identity_token: optional identity token + + .. versionadded:: 1.2 added identity_token + + """ # noqa: E501 + if class_ is not None: + mapper = class_mapper(class_) + if row is None: + if ident is None: + raise sa_exc.ArgumentError("ident or row is required") + return mapper.identity_key_from_primary_key( + tuple(util.to_list(ident)), identity_token=identity_token + ) + else: + return mapper.identity_key_from_row( + row, identity_token=identity_token + ) + elif instance is not None: + mapper = object_mapper(instance) + return mapper.identity_key_from_instance(instance) + else: + raise sa_exc.ArgumentError("class or instance is required") + + +class _TraceAdaptRole(enum.Enum): + """Enumeration of all the use cases for ORMAdapter. + + ORMAdapter remains one of the most complicated aspects of the ORM, as it is + used for in-place adaption of column expressions to be applied to a SELECT, + replacing :class:`.Table` and other objects that are mapped to classes with + aliases of those tables in the case of joined eager loading, or in the case + of polymorphic loading as used with concrete mappings or other custom "with + polymorphic" parameters, with whole user-defined subqueries. The + enumerations provide an overview of all the use cases used by ORMAdapter, a + layer of formality as to the introduction of new ORMAdapter use cases (of + which none are anticipated), as well as a means to trace the origins of a + particular ORMAdapter within runtime debugging. + + SQLAlchemy 2.0 has greatly scaled back ORM features which relied heavily on + open-ended statement adaption, including the ``Query.with_polymorphic()`` + method and the ``Query.select_from_entity()`` methods, favoring + user-explicit aliasing schemes using the ``aliased()`` and + ``with_polymorphic()`` standalone constructs; these still use adaption, + however the adaption is applied in a narrower scope. + + """ + + # aliased() use that is used to adapt individual attributes at query + # construction time + ALIASED_INSP = enum.auto() + + # joinedload cases; typically adapt an ON clause of a relationship + # join + JOINEDLOAD_USER_DEFINED_ALIAS = enum.auto() + JOINEDLOAD_PATH_WITH_POLYMORPHIC = enum.auto() + JOINEDLOAD_MEMOIZED_ADAPTER = enum.auto() + + # polymorphic cases - these are complex ones that replace FROM + # clauses, replacing tables with subqueries + MAPPER_POLYMORPHIC_ADAPTER = enum.auto() + WITH_POLYMORPHIC_ADAPTER = enum.auto() + WITH_POLYMORPHIC_ADAPTER_RIGHT_JOIN = enum.auto() + DEPRECATED_JOIN_ADAPT_RIGHT_SIDE = enum.auto() + + # the from_statement() case, used only to adapt individual attributes + # from a given statement to local ORM attributes at result fetching + # time. assigned to ORMCompileState._from_obj_alias + ADAPT_FROM_STATEMENT = enum.auto() + + # the joinedload for queries that have LIMIT/OFFSET/DISTINCT case; + # the query is placed inside of a subquery with the LIMIT/OFFSET/etc., + # joinedloads are then placed on the outside. + # assigned to ORMCompileState.compound_eager_adapter + COMPOUND_EAGER_STATEMENT = enum.auto() + + # the legacy Query._set_select_from() case. + # this is needed for Query's set operations (i.e. UNION, etc. ) + # as well as "legacy from_self()", which while removed from 2.0 as + # public API, is used for the Query.count() method. this one + # still does full statement traversal + # assigned to ORMCompileState._from_obj_alias + LEGACY_SELECT_FROM_ALIAS = enum.auto() + + +class ORMStatementAdapter(sql_util.ColumnAdapter): + """ColumnAdapter which includes a role attribute.""" + + __slots__ = ("role",) + + def __init__( + self, + role: _TraceAdaptRole, + selectable: Selectable, + *, + equivalents: Optional[_EquivalentColumnMap] = None, + adapt_required: bool = False, + allow_label_resolve: bool = True, + anonymize_labels: bool = False, + adapt_on_names: bool = False, + adapt_from_selectables: Optional[AbstractSet[FromClause]] = None, + ): + self.role = role + super().__init__( + selectable, + equivalents=equivalents, + adapt_required=adapt_required, + allow_label_resolve=allow_label_resolve, + anonymize_labels=anonymize_labels, + adapt_on_names=adapt_on_names, + adapt_from_selectables=adapt_from_selectables, + ) + + +class ORMAdapter(sql_util.ColumnAdapter): + """ColumnAdapter subclass which excludes adaptation of entities from + non-matching mappers. + + """ + + __slots__ = ("role", "mapper", "is_aliased_class", "aliased_insp") + + is_aliased_class: bool + aliased_insp: Optional[AliasedInsp[Any]] + + def __init__( + self, + role: _TraceAdaptRole, + entity: _InternalEntityType[Any], + *, + equivalents: Optional[_EquivalentColumnMap] = None, + adapt_required: bool = False, + allow_label_resolve: bool = True, + anonymize_labels: bool = False, + selectable: Optional[Selectable] = None, + limit_on_entity: bool = True, + adapt_on_names: bool = False, + adapt_from_selectables: Optional[AbstractSet[FromClause]] = None, + ): + self.role = role + self.mapper = entity.mapper + if selectable is None: + selectable = entity.selectable + if insp_is_aliased_class(entity): + self.is_aliased_class = True + self.aliased_insp = entity + else: + self.is_aliased_class = False + self.aliased_insp = None + + super().__init__( + selectable, + equivalents, + adapt_required=adapt_required, + allow_label_resolve=allow_label_resolve, + anonymize_labels=anonymize_labels, + include_fn=self._include_fn if limit_on_entity else None, + adapt_on_names=adapt_on_names, + adapt_from_selectables=adapt_from_selectables, + ) + + def _include_fn(self, elem): + entity = elem._annotations.get("parentmapper", None) + + return not entity or entity.isa(self.mapper) or self.mapper.isa(entity) + + +class AliasedClass( + inspection.Inspectable["AliasedInsp[_O]"], ORMColumnsClauseRole[_O] +): + r"""Represents an "aliased" form of a mapped class for usage with Query. + + The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias` + construct, this object mimics the mapped class using a + ``__getattr__`` scheme and maintains a reference to a + real :class:`~sqlalchemy.sql.expression.Alias` object. + + A primary purpose of :class:`.AliasedClass` is to serve as an alternate + within a SQL statement generated by the ORM, such that an existing + mapped entity can be used in multiple contexts. A simple example:: + + # find all pairs of users with the same name + user_alias = aliased(User) + session.query(User, user_alias).join( + (user_alias, User.id > user_alias.id) + ).filter(User.name == user_alias.name) + + :class:`.AliasedClass` is also capable of mapping an existing mapped + class to an entirely new selectable, provided this selectable is column- + compatible with the existing mapped selectable, and it can also be + configured in a mapping as the target of a :func:`_orm.relationship`. + See the links below for examples. + + The :class:`.AliasedClass` object is constructed typically using the + :func:`_orm.aliased` function. It also is produced with additional + configuration when using the :func:`_orm.with_polymorphic` function. + + The resulting object is an instance of :class:`.AliasedClass`. + This object implements an attribute scheme which produces the + same attribute and method interface as the original mapped + class, allowing :class:`.AliasedClass` to be compatible + with any attribute technique which works on the original class, + including hybrid attributes (see :ref:`hybrids_toplevel`). + + The :class:`.AliasedClass` can be inspected for its underlying + :class:`_orm.Mapper`, aliased selectable, and other information + using :func:`_sa.inspect`:: + + from sqlalchemy import inspect + + my_alias = aliased(MyClass) + insp = inspect(my_alias) + + The resulting inspection object is an instance of :class:`.AliasedInsp`. + + + .. seealso:: + + :func:`.aliased` + + :func:`.with_polymorphic` + + :ref:`relationship_aliased_class` + + :ref:`relationship_to_window_function` + + + """ + + __name__: str + + def __init__( + self, + mapped_class_or_ac: _EntityType[_O], + alias: Optional[FromClause] = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, + with_polymorphic_mappers: Optional[Sequence[Mapper[Any]]] = None, + with_polymorphic_discriminator: Optional[ColumnElement[Any]] = None, + base_alias: Optional[AliasedInsp[Any]] = None, + use_mapper_path: bool = False, + represents_outer_join: bool = False, + ): + insp = cast( + "_InternalEntityType[_O]", inspection.inspect(mapped_class_or_ac) + ) + mapper = insp.mapper + + nest_adapters = False + + if alias is None: + if insp.is_aliased_class and insp.selectable._is_subquery: + alias = insp.selectable.alias() + else: + alias = ( + mapper._with_polymorphic_selectable._anonymous_fromclause( + name=name, + flat=flat, + ) + ) + elif insp.is_aliased_class: + nest_adapters = True + + assert alias is not None + self._aliased_insp = AliasedInsp( + self, + insp, + alias, + name, + ( + with_polymorphic_mappers + if with_polymorphic_mappers + else mapper.with_polymorphic_mappers + ), + ( + with_polymorphic_discriminator + if with_polymorphic_discriminator is not None + else mapper.polymorphic_on + ), + base_alias, + use_mapper_path, + adapt_on_names, + represents_outer_join, + nest_adapters, + ) + + self.__name__ = f"aliased({mapper.class_.__name__})" + + @classmethod + def _reconstitute_from_aliased_insp( + cls, aliased_insp: AliasedInsp[_O] + ) -> AliasedClass[_O]: + obj = cls.__new__(cls) + obj.__name__ = f"aliased({aliased_insp.mapper.class_.__name__})" + obj._aliased_insp = aliased_insp + + if aliased_insp._is_with_polymorphic: + for sub_aliased_insp in aliased_insp._with_polymorphic_entities: + if sub_aliased_insp is not aliased_insp: + ent = AliasedClass._reconstitute_from_aliased_insp( + sub_aliased_insp + ) + setattr(obj, sub_aliased_insp.class_.__name__, ent) + + return obj + + def __getattr__(self, key: str) -> Any: + try: + _aliased_insp = self.__dict__["_aliased_insp"] + except KeyError: + raise AttributeError() + else: + target = _aliased_insp._target + # maintain all getattr mechanics + attr = getattr(target, key) + + # attribute is a method, that will be invoked against a + # "self"; so just return a new method with the same function and + # new self + if hasattr(attr, "__call__") and hasattr(attr, "__self__"): + return types.MethodType(attr.__func__, self) + + # attribute is a descriptor, that will be invoked against a + # "self"; so invoke the descriptor against this self + if hasattr(attr, "__get__"): + attr = attr.__get__(None, self) + + # attributes within the QueryableAttribute system will want this + # to be invoked so the object can be adapted + if hasattr(attr, "adapt_to_entity"): + attr = attr.adapt_to_entity(_aliased_insp) + setattr(self, key, attr) + + return attr + + def _get_from_serialized( + self, key: str, mapped_class: _O, aliased_insp: AliasedInsp[_O] + ) -> Any: + # this method is only used in terms of the + # sqlalchemy.ext.serializer extension + attr = getattr(mapped_class, key) + if hasattr(attr, "__call__") and hasattr(attr, "__self__"): + return types.MethodType(attr.__func__, self) + + # attribute is a descriptor, that will be invoked against a + # "self"; so invoke the descriptor against this self + if hasattr(attr, "__get__"): + attr = attr.__get__(None, self) + + # attributes within the QueryableAttribute system will want this + # to be invoked so the object can be adapted + if hasattr(attr, "adapt_to_entity"): + aliased_insp._weak_entity = weakref.ref(self) + attr = attr.adapt_to_entity(aliased_insp) + setattr(self, key, attr) + + return attr + + def __repr__(self) -> str: + return "" % ( + id(self), + self._aliased_insp._target.__name__, + ) + + def __str__(self) -> str: + return str(self._aliased_insp) + + +@inspection._self_inspects +class AliasedInsp( + ORMEntityColumnsClauseRole[_O], + ORMFromClauseRole, + HasCacheKey, + InspectionAttr, + MemoizedSlots, + inspection.Inspectable["AliasedInsp[_O]"], + Generic[_O], +): + """Provide an inspection interface for an + :class:`.AliasedClass` object. + + The :class:`.AliasedInsp` object is returned + given an :class:`.AliasedClass` using the + :func:`_sa.inspect` function:: + + from sqlalchemy import inspect + from sqlalchemy.orm import aliased + + my_alias = aliased(MyMappedClass) + insp = inspect(my_alias) + + Attributes on :class:`.AliasedInsp` + include: + + * ``entity`` - the :class:`.AliasedClass` represented. + * ``mapper`` - the :class:`_orm.Mapper` mapping the underlying class. + * ``selectable`` - the :class:`_expression.Alias` + construct which ultimately + represents an aliased :class:`_schema.Table` or + :class:`_expression.Select` + construct. + * ``name`` - the name of the alias. Also is used as the attribute + name when returned in a result tuple from :class:`_query.Query`. + * ``with_polymorphic_mappers`` - collection of :class:`_orm.Mapper` + objects + indicating all those mappers expressed in the select construct + for the :class:`.AliasedClass`. + * ``polymorphic_on`` - an alternate column or SQL expression which + will be used as the "discriminator" for a polymorphic load. + + .. seealso:: + + :ref:`inspection_toplevel` + + """ + + __slots__ = ( + "__weakref__", + "_weak_entity", + "mapper", + "selectable", + "name", + "_adapt_on_names", + "with_polymorphic_mappers", + "polymorphic_on", + "_use_mapper_path", + "_base_alias", + "represents_outer_join", + "persist_selectable", + "local_table", + "_is_with_polymorphic", + "_with_polymorphic_entities", + "_adapter", + "_target", + "__clause_element__", + "_memoized_values", + "_all_column_expressions", + "_nest_adapters", + ) + + _cache_key_traversal = [ + ("name", visitors.ExtendedInternalTraversal.dp_string), + ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), + ("_use_mapper_path", visitors.ExtendedInternalTraversal.dp_boolean), + ("_target", visitors.ExtendedInternalTraversal.dp_inspectable), + ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), + ( + "with_polymorphic_mappers", + visitors.InternalTraversal.dp_has_cache_key_list, + ), + ("polymorphic_on", visitors.InternalTraversal.dp_clauseelement), + ] + + mapper: Mapper[_O] + selectable: FromClause + _adapter: ORMAdapter + with_polymorphic_mappers: Sequence[Mapper[Any]] + _with_polymorphic_entities: Sequence[AliasedInsp[Any]] + + _weak_entity: weakref.ref[AliasedClass[_O]] + """the AliasedClass that refers to this AliasedInsp""" + + _target: Union[Type[_O], AliasedClass[_O]] + """the thing referenced by the AliasedClass/AliasedInsp. + + In the vast majority of cases, this is the mapped class. However + it may also be another AliasedClass (alias of alias). + + """ + + def __init__( + self, + entity: AliasedClass[_O], + inspected: _InternalEntityType[_O], + selectable: FromClause, + name: Optional[str], + with_polymorphic_mappers: Optional[Sequence[Mapper[Any]]], + polymorphic_on: Optional[ColumnElement[Any]], + _base_alias: Optional[AliasedInsp[Any]], + _use_mapper_path: bool, + adapt_on_names: bool, + represents_outer_join: bool, + nest_adapters: bool, + ): + mapped_class_or_ac = inspected.entity + mapper = inspected.mapper + + self._weak_entity = weakref.ref(entity) + self.mapper = mapper + self.selectable = self.persist_selectable = self.local_table = ( + selectable + ) + self.name = name + self.polymorphic_on = polymorphic_on + self._base_alias = weakref.ref(_base_alias or self) + self._use_mapper_path = _use_mapper_path + self.represents_outer_join = represents_outer_join + self._nest_adapters = nest_adapters + + if with_polymorphic_mappers: + self._is_with_polymorphic = True + self.with_polymorphic_mappers = with_polymorphic_mappers + self._with_polymorphic_entities = [] + for poly in self.with_polymorphic_mappers: + if poly is not mapper: + ent = AliasedClass( + poly.class_, + selectable, + base_alias=self, + adapt_on_names=adapt_on_names, + use_mapper_path=_use_mapper_path, + ) + + setattr(self.entity, poly.class_.__name__, ent) + self._with_polymorphic_entities.append(ent._aliased_insp) + + else: + self._is_with_polymorphic = False + self.with_polymorphic_mappers = [mapper] + + self._adapter = ORMAdapter( + _TraceAdaptRole.ALIASED_INSP, + mapper, + selectable=selectable, + equivalents=mapper._equivalent_columns, + adapt_on_names=adapt_on_names, + anonymize_labels=True, + # make sure the adapter doesn't try to grab other tables that + # are not even the thing we are mapping, such as embedded + # selectables in subqueries or CTEs. See issue #6060 + adapt_from_selectables={ + m.selectable + for m in self.with_polymorphic_mappers + if not adapt_on_names + }, + limit_on_entity=False, + ) + + if nest_adapters: + # supports "aliased class of aliased class" use case + assert isinstance(inspected, AliasedInsp) + self._adapter = inspected._adapter.wrap(self._adapter) + + self._adapt_on_names = adapt_on_names + self._target = mapped_class_or_ac + + @classmethod + def _alias_factory( + cls, + element: Union[_EntityType[_O], FromClause], + alias: Optional[FromClause] = None, + name: Optional[str] = None, + flat: bool = False, + adapt_on_names: bool = False, + ) -> Union[AliasedClass[_O], FromClause]: + if isinstance(element, FromClause): + if adapt_on_names: + raise sa_exc.ArgumentError( + "adapt_on_names only applies to ORM elements" + ) + if name: + return element.alias(name=name, flat=flat) + else: + return coercions.expect( + roles.AnonymizedFromClauseRole, element, flat=flat + ) + else: + return AliasedClass( + element, + alias=alias, + flat=flat, + name=name, + adapt_on_names=adapt_on_names, + ) + + @classmethod + def _with_polymorphic_factory( + cls, + base: Union[Type[_O], Mapper[_O]], + classes: Union[Literal["*"], Iterable[_EntityType[Any]]], + selectable: Union[Literal[False, None], FromClause] = False, + flat: bool = False, + polymorphic_on: Optional[ColumnElement[Any]] = None, + aliased: bool = False, + innerjoin: bool = False, + adapt_on_names: bool = False, + name: Optional[str] = None, + _use_mapper_path: bool = False, + ) -> AliasedClass[_O]: + primary_mapper = _class_to_mapper(base) + + if selectable not in (None, False) and flat: + raise sa_exc.ArgumentError( + "the 'flat' and 'selectable' arguments cannot be passed " + "simultaneously to with_polymorphic()" + ) + + mappers, selectable = primary_mapper._with_polymorphic_args( + classes, selectable, innerjoin=innerjoin + ) + if aliased or flat: + assert selectable is not None + selectable = selectable._anonymous_fromclause(flat=flat) + + return AliasedClass( + base, + selectable, + name=name, + with_polymorphic_mappers=mappers, + adapt_on_names=adapt_on_names, + with_polymorphic_discriminator=polymorphic_on, + use_mapper_path=_use_mapper_path, + represents_outer_join=not innerjoin, + ) + + @property + def entity(self) -> AliasedClass[_O]: + # to eliminate reference cycles, the AliasedClass is held weakly. + # this produces some situations where the AliasedClass gets lost, + # particularly when one is created internally and only the AliasedInsp + # is passed around. + # to work around this case, we just generate a new one when we need + # it, as it is a simple class with very little initial state on it. + ent = self._weak_entity() + if ent is None: + ent = AliasedClass._reconstitute_from_aliased_insp(self) + self._weak_entity = weakref.ref(ent) + return ent + + is_aliased_class = True + "always returns True" + + def _memoized_method___clause_element__(self) -> FromClause: + return self.selectable._annotate( + { + "parentmapper": self.mapper, + "parententity": self, + "entity_namespace": self, + } + )._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": self} + ) + + @property + def entity_namespace(self) -> AliasedClass[_O]: + return self.entity + + @property + def class_(self) -> Type[_O]: + """Return the mapped class ultimately represented by this + :class:`.AliasedInsp`.""" + return self.mapper.class_ + + @property + def _path_registry(self) -> AbstractEntityRegistry: + if self._use_mapper_path: + return self.mapper._path_registry + else: + return PathRegistry.per_mapper(self) + + def __getstate__(self) -> Dict[str, Any]: + return { + "entity": self.entity, + "mapper": self.mapper, + "alias": self.selectable, + "name": self.name, + "adapt_on_names": self._adapt_on_names, + "with_polymorphic_mappers": self.with_polymorphic_mappers, + "with_polymorphic_discriminator": self.polymorphic_on, + "base_alias": self._base_alias(), + "use_mapper_path": self._use_mapper_path, + "represents_outer_join": self.represents_outer_join, + "nest_adapters": self._nest_adapters, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__init__( # type: ignore + state["entity"], + state["mapper"], + state["alias"], + state["name"], + state["with_polymorphic_mappers"], + state["with_polymorphic_discriminator"], + state["base_alias"], + state["use_mapper_path"], + state["adapt_on_names"], + state["represents_outer_join"], + state["nest_adapters"], + ) + + def _merge_with(self, other: AliasedInsp[_O]) -> AliasedInsp[_O]: + # assert self._is_with_polymorphic + # assert other._is_with_polymorphic + + primary_mapper = other.mapper + + assert self.mapper is primary_mapper + + our_classes = util.to_set( + mp.class_ for mp in self.with_polymorphic_mappers + ) + new_classes = {mp.class_ for mp in other.with_polymorphic_mappers} + if our_classes == new_classes: + return other + else: + classes = our_classes.union(new_classes) + + mappers, selectable = primary_mapper._with_polymorphic_args( + classes, None, innerjoin=not other.represents_outer_join + ) + selectable = selectable._anonymous_fromclause(flat=True) + return AliasedClass( + primary_mapper, + selectable, + with_polymorphic_mappers=mappers, + with_polymorphic_discriminator=other.polymorphic_on, + use_mapper_path=other._use_mapper_path, + represents_outer_join=other.represents_outer_join, + )._aliased_insp + + def _adapt_element( + self, expr: _ORMCOLEXPR, key: Optional[str] = None + ) -> _ORMCOLEXPR: + assert isinstance(expr, ColumnElement) + d: Dict[str, Any] = { + "parententity": self, + "parentmapper": self.mapper, + } + if key: + d["proxy_key"] = key + + # IMO mypy should see this one also as returning the same type + # we put into it, but it's not + return ( + self._adapter.traverse(expr) + ._annotate(d) + ._set_propagate_attrs( + {"compile_state_plugin": "orm", "plugin_subject": self} + ) + ) + + if TYPE_CHECKING: + # establish compatibility with the _ORMAdapterProto protocol, + # which in turn is compatible with _CoreAdapterProto. + + def _orm_adapt_element( + self, + obj: _CE, + key: Optional[str] = None, + ) -> _CE: ... + + else: + _orm_adapt_element = _adapt_element + + def _entity_for_mapper(self, mapper): + self_poly = self.with_polymorphic_mappers + if mapper in self_poly: + if mapper is self.mapper: + return self + else: + return getattr( + self.entity, mapper.class_.__name__ + )._aliased_insp + elif mapper.isa(self.mapper): + return self + else: + assert False, "mapper %s doesn't correspond to %s" % (mapper, self) + + def _memoized_attr__get_clause(self): + onclause, replacemap = self.mapper._get_clause + return ( + self._adapter.traverse(onclause), + { + self._adapter.traverse(col): param + for col, param in replacemap.items() + }, + ) + + def _memoized_attr__memoized_values(self): + return {} + + def _memoized_attr__all_column_expressions(self): + if self._is_with_polymorphic: + cols_plus_keys = self.mapper._columns_plus_keys( + [ent.mapper for ent in self._with_polymorphic_entities] + ) + else: + cols_plus_keys = self.mapper._columns_plus_keys() + + cols_plus_keys = [ + (key, self._adapt_element(col)) for key, col in cols_plus_keys + ] + + return ColumnCollection(cols_plus_keys) + + def _memo(self, key, callable_, *args, **kw): + if key in self._memoized_values: + return self._memoized_values[key] + else: + self._memoized_values[key] = value = callable_(*args, **kw) + return value + + def __repr__(self): + if self.with_polymorphic_mappers: + with_poly = "(%s)" % ", ".join( + mp.class_.__name__ for mp in self.with_polymorphic_mappers + ) + else: + with_poly = "" + return "" % ( + id(self), + self.class_.__name__, + with_poly, + ) + + def __str__(self): + if self._is_with_polymorphic: + return "with_polymorphic(%s, [%s])" % ( + self._target.__name__, + ", ".join( + mp.class_.__name__ + for mp in self.with_polymorphic_mappers + if mp is not self.mapper + ), + ) + else: + return "aliased(%s)" % (self._target.__name__,) + + +class _WrapUserEntity: + """A wrapper used within the loader_criteria lambda caller so that + we can bypass declared_attr descriptors on unmapped mixins, which + normally emit a warning for such use. + + might also be useful for other per-lambda instrumentations should + the need arise. + + """ + + __slots__ = ("subject",) + + def __init__(self, subject): + self.subject = subject + + @util.preload_module("sqlalchemy.orm.decl_api") + def __getattribute__(self, name): + decl_api = util.preloaded.orm.decl_api + + subject = object.__getattribute__(self, "subject") + if name in subject.__dict__ and isinstance( + subject.__dict__[name], decl_api.declared_attr + ): + return subject.__dict__[name].fget(subject) + else: + return getattr(subject, name) + + +class LoaderCriteriaOption(CriteriaOption): + """Add additional WHERE criteria to the load for all occurrences of + a particular entity. + + :class:`_orm.LoaderCriteriaOption` is invoked using the + :func:`_orm.with_loader_criteria` function; see that function for + details. + + .. versionadded:: 1.4 + + """ + + __slots__ = ( + "root_entity", + "entity", + "deferred_where_criteria", + "where_criteria", + "_where_crit_orig", + "include_aliases", + "propagate_to_loaders", + ) + + _traverse_internals = [ + ("root_entity", visitors.ExtendedInternalTraversal.dp_plain_obj), + ("entity", visitors.ExtendedInternalTraversal.dp_has_cache_key), + ("where_criteria", visitors.InternalTraversal.dp_clauseelement), + ("include_aliases", visitors.InternalTraversal.dp_boolean), + ("propagate_to_loaders", visitors.InternalTraversal.dp_boolean), + ] + + root_entity: Optional[Type[Any]] + entity: Optional[_InternalEntityType[Any]] + where_criteria: Union[ColumnElement[bool], lambdas.DeferredLambdaElement] + deferred_where_criteria: bool + include_aliases: bool + propagate_to_loaders: bool + + _where_crit_orig: Any + + def __init__( + self, + entity_or_base: _EntityType[Any], + where_criteria: Union[ + _ColumnExpressionArgument[bool], + Callable[[Any], _ColumnExpressionArgument[bool]], + ], + loader_only: bool = False, + include_aliases: bool = False, + propagate_to_loaders: bool = True, + track_closure_variables: bool = True, + ): + entity = cast( + "_InternalEntityType[Any]", + inspection.inspect(entity_or_base, False), + ) + if entity is None: + self.root_entity = cast("Type[Any]", entity_or_base) + self.entity = None + else: + self.root_entity = None + self.entity = entity + + self._where_crit_orig = where_criteria + if callable(where_criteria): + if self.root_entity is not None: + wrap_entity = self.root_entity + else: + assert entity is not None + wrap_entity = entity.entity + + self.deferred_where_criteria = True + self.where_criteria = lambdas.DeferredLambdaElement( + where_criteria, + roles.WhereHavingRole, + lambda_args=(_WrapUserEntity(wrap_entity),), + opts=lambdas.LambdaOptions( + track_closure_variables=track_closure_variables + ), + ) + else: + self.deferred_where_criteria = False + self.where_criteria = coercions.expect( + roles.WhereHavingRole, where_criteria + ) + + self.include_aliases = include_aliases + self.propagate_to_loaders = propagate_to_loaders + + @classmethod + def _unreduce( + cls, entity, where_criteria, include_aliases, propagate_to_loaders + ): + return LoaderCriteriaOption( + entity, + where_criteria, + include_aliases=include_aliases, + propagate_to_loaders=propagate_to_loaders, + ) + + def __reduce__(self): + return ( + LoaderCriteriaOption._unreduce, + ( + self.entity.class_ if self.entity else self.root_entity, + self._where_crit_orig, + self.include_aliases, + self.propagate_to_loaders, + ), + ) + + def _all_mappers(self) -> Iterator[Mapper[Any]]: + if self.entity: + yield from self.entity.mapper.self_and_descendants + else: + assert self.root_entity + stack = list(self.root_entity.__subclasses__()) + while stack: + subclass = stack.pop(0) + ent = cast( + "_InternalEntityType[Any]", + inspection.inspect(subclass, raiseerr=False), + ) + if ent: + yield from ent.mapper.self_and_descendants + else: + stack.extend(subclass.__subclasses__()) + + def _should_include(self, compile_state: ORMCompileState) -> bool: + if ( + compile_state.select_statement._annotations.get( + "for_loader_criteria", None + ) + is self + ): + return False + return True + + def _resolve_where_criteria( + self, ext_info: _InternalEntityType[Any] + ) -> ColumnElement[bool]: + if self.deferred_where_criteria: + crit = cast( + "ColumnElement[bool]", + self.where_criteria._resolve_with_args(ext_info.entity), + ) + else: + crit = self.where_criteria # type: ignore + assert isinstance(crit, ColumnElement) + return sql_util._deep_annotate( + crit, + {"for_loader_criteria": self}, + detect_subquery_cols=True, + ind_cols_on_fromclause=True, + ) + + def process_compile_state_replaced_entities( + self, + compile_state: ORMCompileState, + mapper_entities: Iterable[_MapperEntity], + ) -> None: + self.process_compile_state(compile_state) + + def process_compile_state(self, compile_state: ORMCompileState) -> None: + """Apply a modification to a given :class:`.CompileState`.""" + + # if options to limit the criteria to immediate query only, + # use compile_state.attributes instead + + self.get_global_criteria(compile_state.global_attributes) + + def get_global_criteria(self, attributes: Dict[Any, Any]) -> None: + for mp in self._all_mappers(): + load_criteria = attributes.setdefault( + ("additional_entity_criteria", mp), [] + ) + + load_criteria.append(self) + + +inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) + + +@inspection._inspects(type) +def _inspect_mc( + class_: Type[_O], +) -> Optional[Mapper[_O]]: + try: + class_manager = opt_manager_of_class(class_) + if class_manager is None or not class_manager.is_mapped: + return None + mapper = class_manager.mapper + except exc.NO_STATE: + return None + else: + return mapper + + +GenericAlias = type(List[Any]) + + +@inspection._inspects(GenericAlias) +def _inspect_generic_alias( + class_: Type[_O], +) -> Optional[Mapper[_O]]: + origin = cast("Type[_O]", get_origin(class_)) + return _inspect_mc(origin) + + +@inspection._self_inspects +class Bundle( + ORMColumnsClauseRole[_T], + SupportsCloneAnnotations, + MemoizedHasCacheKey, + inspection.Inspectable["Bundle[_T]"], + InspectionAttr, +): + """A grouping of SQL expressions that are returned by a :class:`.Query` + under one namespace. + + The :class:`.Bundle` essentially allows nesting of the tuple-based + results returned by a column-oriented :class:`_query.Query` object. + It also + is extensible via simple subclassing, where the primary capability + to override is that of how the set of expressions should be returned, + allowing post-processing as well as custom return types, without + involving ORM identity-mapped classes. + + .. seealso:: + + :ref:`bundles` + + + """ + + single_entity = False + """If True, queries for a single Bundle will be returned as a single + entity, rather than an element within a keyed tuple.""" + + is_clause_element = False + + is_mapper = False + + is_aliased_class = False + + is_bundle = True + + _propagate_attrs: _PropagateAttrsType = util.immutabledict() + + proxy_set = util.EMPTY_SET # type: ignore + + exprs: List[_ColumnsClauseElement] + + def __init__( + self, name: str, *exprs: _ColumnExpressionArgument[Any], **kw: Any + ): + r"""Construct a new :class:`.Bundle`. + + e.g.:: + + bn = Bundle("mybundle", MyClass.x, MyClass.y) + + for row in session.query(bn).filter(bn.c.x == 5).filter(bn.c.y == 4): + print(row.mybundle.x, row.mybundle.y) + + :param name: name of the bundle. + :param \*exprs: columns or SQL expressions comprising the bundle. + :param single_entity=False: if True, rows for this :class:`.Bundle` + can be returned as a "single entity" outside of any enclosing tuple + in the same manner as a mapped entity. + + """ # noqa: E501 + self.name = self._label = name + coerced_exprs = [ + coercions.expect( + roles.ColumnsClauseRole, expr, apply_propagate_attrs=self + ) + for expr in exprs + ] + self.exprs = coerced_exprs + + self.c = self.columns = ColumnCollection( + (getattr(col, "key", col._label), col) + for col in [e._annotations.get("bundle", e) for e in coerced_exprs] + ).as_readonly() + self.single_entity = kw.pop("single_entity", self.single_entity) + + def _gen_cache_key( + self, anon_map: anon_map, bindparams: List[BindParameter[Any]] + ) -> Tuple[Any, ...]: + return (self.__class__, self.name, self.single_entity) + tuple( + [expr._gen_cache_key(anon_map, bindparams) for expr in self.exprs] + ) + + @property + def mapper(self) -> Optional[Mapper[Any]]: + mp: Optional[Mapper[Any]] = self.exprs[0]._annotations.get( + "parentmapper", None + ) + return mp + + @property + def entity(self) -> Optional[_InternalEntityType[Any]]: + ie: Optional[_InternalEntityType[Any]] = self.exprs[ + 0 + ]._annotations.get("parententity", None) + return ie + + @property + def entity_namespace( + self, + ) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]: + return self.c + + columns: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]] + + """A namespace of SQL expressions referred to by this :class:`.Bundle`. + + e.g.:: + + bn = Bundle("mybundle", MyClass.x, MyClass.y) + + q = sess.query(bn).filter(bn.c.x == 5) + + Nesting of bundles is also supported:: + + b1 = Bundle( + "b1", + Bundle("b2", MyClass.a, MyClass.b), + Bundle("b3", MyClass.x, MyClass.y), + ) + + q = sess.query(b1).filter(b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) + + .. seealso:: + + :attr:`.Bundle.c` + + """ # noqa: E501 + + c: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]] + """An alias for :attr:`.Bundle.columns`.""" + + def _clone(self, **kw): + cloned = self.__class__.__new__(self.__class__) + cloned.__dict__.update(self.__dict__) + return cloned + + def __clause_element__(self): + # ensure existing entity_namespace remains + annotations = {"bundle": self, "entity_namespace": self} + annotations.update(self._annotations) + + plugin_subject = self.exprs[0]._propagate_attrs.get( + "plugin_subject", self.entity + ) + return ( + expression.ClauseList( + _literal_as_text_role=roles.ColumnsClauseRole, + group=False, + *[e._annotations.get("bundle", e) for e in self.exprs], + ) + ._annotate(annotations) + ._set_propagate_attrs( + # the Bundle *must* use the orm plugin no matter what. the + # subject can be None but it's much better if it's not. + { + "compile_state_plugin": "orm", + "plugin_subject": plugin_subject, + } + ) + ) + + @property + def clauses(self): + return self.__clause_element__().clauses + + def label(self, name): + """Provide a copy of this :class:`.Bundle` passing a new label.""" + + cloned = self._clone() + cloned.name = name + return cloned + + def create_row_processor( + self, + query: Select[Any], + procs: Sequence[Callable[[Row[Any]], Any]], + labels: Sequence[str], + ) -> Callable[[Row[Any]], Any]: + """Produce the "row processing" function for this :class:`.Bundle`. + + May be overridden by subclasses to provide custom behaviors when + results are fetched. The method is passed the statement object and a + set of "row processor" functions at query execution time; these + processor functions when given a result row will return the individual + attribute value, which can then be adapted into any kind of return data + structure. + + The example below illustrates replacing the usual :class:`.Row` + return structure with a straight Python dictionary:: + + from sqlalchemy.orm import Bundle + + + class DictBundle(Bundle): + def create_row_processor(self, query, procs, labels): + "Override create_row_processor to return values as dictionaries" + + def proc(row): + return dict(zip(labels, (proc(row) for proc in procs))) + + return proc + + A result from the above :class:`_orm.Bundle` will return dictionary + values:: + + bn = DictBundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.execute(select(bn)).where(bn.c.data1 == "d1"): + print(row.mybundle["data1"], row.mybundle["data2"]) + + """ # noqa: E501 + keyed_tuple = result_tuple(labels, [() for l in labels]) + + def proc(row: Row[Any]) -> Any: + return keyed_tuple([proc(row) for proc in procs]) + + return proc + + +def _orm_annotate(element: _SA, exclude: Optional[Any] = None) -> _SA: + """Deep copy the given ClauseElement, annotating each element with the + "_orm_adapt" flag. + + Elements within the exclude collection will be cloned but not annotated. + + """ + return sql_util._deep_annotate(element, {"_orm_adapt": True}, exclude) + + +def _orm_deannotate(element: _SA) -> _SA: + """Remove annotations that link a column to a particular mapping. + + Note this doesn't affect "remote" and "foreign" annotations + passed by the :func:`_orm.foreign` and :func:`_orm.remote` + annotators. + + """ + + return sql_util._deep_deannotate( + element, values=("_orm_adapt", "parententity") + ) + + +def _orm_full_deannotate(element: _SA) -> _SA: + return sql_util._deep_deannotate(element) + + +class _ORMJoin(expression.Join): + """Extend Join to support ORM constructs as input.""" + + __visit_name__ = expression.Join.__visit_name__ + + inherit_cache = True + + def __init__( + self, + left: _FromClauseArgument, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + isouter: bool = False, + full: bool = False, + _left_memo: Optional[Any] = None, + _right_memo: Optional[Any] = None, + _extra_criteria: Tuple[ColumnElement[bool], ...] = (), + ): + left_info = cast( + "Union[FromClause, _InternalEntityType[Any]]", + inspection.inspect(left), + ) + + right_info = cast( + "Union[FromClause, _InternalEntityType[Any]]", + inspection.inspect(right), + ) + adapt_to = right_info.selectable + + # used by joined eager loader + self._left_memo = _left_memo + self._right_memo = _right_memo + + if isinstance(onclause, attributes.QueryableAttribute): + if TYPE_CHECKING: + assert isinstance( + onclause.comparator, RelationshipProperty.Comparator + ) + on_selectable = onclause.comparator._source_selectable() + prop = onclause.property + _extra_criteria += onclause._extra_criteria + elif isinstance(onclause, MapperProperty): + # used internally by joined eager loader...possibly not ideal + prop = onclause + on_selectable = prop.parent.selectable + else: + prop = None + on_selectable = None + + left_selectable = left_info.selectable + if prop: + adapt_from: Optional[FromClause] + if sql_util.clause_is_present(on_selectable, left_selectable): + adapt_from = on_selectable + else: + assert isinstance(left_selectable, FromClause) + adapt_from = left_selectable + + ( + pj, + sj, + source, + dest, + secondary, + target_adapter, + ) = prop._create_joins( + source_selectable=adapt_from, + dest_selectable=adapt_to, + source_polymorphic=True, + of_type_entity=right_info, + alias_secondary=True, + extra_criteria=_extra_criteria, + ) + + if sj is not None: + if isouter: + # note this is an inner join from secondary->right + right = sql.join(secondary, right, sj) + onclause = pj + else: + left = sql.join(left, secondary, pj, isouter) + onclause = sj + else: + onclause = pj + + self._target_adapter = target_adapter + + # we don't use the normal coercions logic for _ORMJoin + # (probably should), so do some gymnastics to get the entity. + # logic here is for #8721, which was a major bug in 1.4 + # for almost two years, not reported/fixed until 1.4.43 (!) + if is_selectable(left_info): + parententity = left_selectable._annotations.get( + "parententity", None + ) + elif insp_is_mapper(left_info) or insp_is_aliased_class(left_info): + parententity = left_info + else: + parententity = None + + if parententity is not None: + self._annotations = self._annotations.union( + {"parententity": parententity} + ) + + augment_onclause = bool(_extra_criteria) and not prop + expression.Join.__init__(self, left, right, onclause, isouter, full) + + assert self.onclause is not None + + if augment_onclause: + self.onclause &= sql.and_(*_extra_criteria) + + if ( + not prop + and getattr(right_info, "mapper", None) + and right_info.mapper.single # type: ignore + ): + right_info = cast("_InternalEntityType[Any]", right_info) + # if single inheritance target and we are using a manual + # or implicit ON clause, augment it the same way we'd augment the + # WHERE. + single_crit = right_info.mapper._single_table_criterion + if single_crit is not None: + if insp_is_aliased_class(right_info): + single_crit = right_info._adapter.traverse(single_crit) + self.onclause = self.onclause & single_crit + + def _splice_into_center(self, other): + """Splice a join into the center. + + Given join(a, b) and join(b, c), return join(a, b).join(c) + + """ + leftmost = other + while isinstance(leftmost, sql.Join): + leftmost = leftmost.left + + assert self.right is leftmost + + left = _ORMJoin( + self.left, + other.left, + self.onclause, + isouter=self.isouter, + _left_memo=self._left_memo, + _right_memo=other._left_memo._path_registry, + ) + + return _ORMJoin( + left, + other.right, + other.onclause, + isouter=other.isouter, + _right_memo=other._right_memo, + ) + + def join( + self, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + isouter: bool = False, + full: bool = False, + ) -> _ORMJoin: + return _ORMJoin(self, right, onclause, full=full, isouter=isouter) + + def outerjoin( + self, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + full: bool = False, + ) -> _ORMJoin: + return _ORMJoin(self, right, onclause, isouter=True, full=full) + + +def with_parent( + instance: object, + prop: attributes.QueryableAttribute[Any], + from_entity: Optional[_EntityType[Any]] = None, +) -> ColumnElement[bool]: + """Create filtering criterion that relates this query's primary entity + to the given related instance, using established + :func:`_orm.relationship()` + configuration. + + E.g.:: + + stmt = select(Address).where(with_parent(some_user, User.addresses)) + + The SQL rendered is the same as that rendered when a lazy loader + would fire off from the given parent on that attribute, meaning + that the appropriate state is taken from the parent object in + Python without the need to render joins to the parent table + in the rendered statement. + + The given property may also make use of :meth:`_orm.PropComparator.of_type` + to indicate the left side of the criteria:: + + + a1 = aliased(Address) + a2 = aliased(Address) + stmt = select(a1, a2).where(with_parent(u1, User.addresses.of_type(a2))) + + The above use is equivalent to using the + :func:`_orm.with_parent.from_entity` argument:: + + a1 = aliased(Address) + a2 = aliased(Address) + stmt = select(a1, a2).where( + with_parent(u1, User.addresses, from_entity=a2) + ) + + :param instance: + An instance which has some :func:`_orm.relationship`. + + :param property: + Class-bound attribute, which indicates + what relationship from the instance should be used to reconcile the + parent/child relationship. + + :param from_entity: + Entity in which to consider as the left side. This defaults to the + "zero" entity of the :class:`_query.Query` itself. + + .. versionadded:: 1.2 + + """ # noqa: E501 + prop_t: RelationshipProperty[Any] + + if isinstance(prop, str): + raise sa_exc.ArgumentError( + "with_parent() accepts class-bound mapped attributes, not strings" + ) + elif isinstance(prop, attributes.QueryableAttribute): + if prop._of_type: + from_entity = prop._of_type + mapper_property = prop.property + if mapper_property is None or not prop_is_relationship( + mapper_property + ): + raise sa_exc.ArgumentError( + f"Expected relationship property for with_parent(), " + f"got {mapper_property}" + ) + prop_t = mapper_property + else: + prop_t = prop + + return prop_t._with_parent(instance, from_entity=from_entity) + + +def has_identity(object_: object) -> bool: + """Return True if the given object has a database + identity. + + This typically corresponds to the object being + in either the persistent or detached state. + + .. seealso:: + + :func:`.was_deleted` + + """ + state = attributes.instance_state(object_) + return state.has_identity + + +def was_deleted(object_: object) -> bool: + """Return True if the given object was deleted + within a session flush. + + This is regardless of whether or not the object is + persistent or detached. + + .. seealso:: + + :attr:`.InstanceState.was_deleted` + + """ + + state = attributes.instance_state(object_) + return state.was_deleted + + +def _entity_corresponds_to( + given: _InternalEntityType[Any], entity: _InternalEntityType[Any] +) -> bool: + """determine if 'given' corresponds to 'entity', in terms + of an entity passed to Query that would match the same entity + being referred to elsewhere in the query. + + """ + if insp_is_aliased_class(entity): + if insp_is_aliased_class(given): + if entity._base_alias() is given._base_alias(): + return True + return False + elif insp_is_aliased_class(given): + if given._use_mapper_path: + return entity in given.with_polymorphic_mappers + else: + return entity is given + + assert insp_is_mapper(given) + return entity.common_parent(given) + + +def _entity_corresponds_to_use_path_impl( + given: _InternalEntityType[Any], entity: _InternalEntityType[Any] +) -> bool: + """determine if 'given' corresponds to 'entity', in terms + of a path of loader options where a mapped attribute is taken to + be a member of a parent entity. + + e.g.:: + + someoption(A).someoption(A.b) # -> fn(A, A) -> True + someoption(A).someoption(C.d) # -> fn(A, C) -> False + + a1 = aliased(A) + someoption(a1).someoption(A.b) # -> fn(a1, A) -> False + someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True + + wp = with_polymorphic(A, [A1, A2]) + someoption(wp).someoption(A1.foo) # -> fn(wp, A1) -> False + someoption(wp).someoption(wp.A1.foo) # -> fn(wp, wp.A1) -> True + + """ + if insp_is_aliased_class(given): + return ( + insp_is_aliased_class(entity) + and not entity._use_mapper_path + and (given is entity or entity in given._with_polymorphic_entities) + ) + elif not insp_is_aliased_class(entity): + return given.isa(entity.mapper) + else: + return ( + entity._use_mapper_path + and given in entity.with_polymorphic_mappers + ) + + +def _entity_isa(given: _InternalEntityType[Any], mapper: Mapper[Any]) -> bool: + """determine if 'given' "is a" mapper, in terms of the given + would load rows of type 'mapper'. + + """ + if given.is_aliased_class: + return mapper in given.with_polymorphic_mappers or given.mapper.isa( + mapper + ) + elif given.with_polymorphic_mappers: + return mapper in given.with_polymorphic_mappers or given.isa(mapper) + else: + return given.isa(mapper) + + +def _getitem(iterable_query: Query[Any], item: Any) -> Any: + """calculate __getitem__ in terms of an iterable query object + that also has a slice() method. + + """ + + def _no_negative_indexes(): + raise IndexError( + "negative indexes are not accepted by SQL " + "index / slice operators" + ) + + if isinstance(item, slice): + start, stop, step = util.decode_slice(item) + + if ( + isinstance(stop, int) + and isinstance(start, int) + and stop - start <= 0 + ): + return [] + + elif (isinstance(start, int) and start < 0) or ( + isinstance(stop, int) and stop < 0 + ): + _no_negative_indexes() + + res = iterable_query.slice(start, stop) + if step is not None: + return list(res)[None : None : item.step] + else: + return list(res) + else: + if item == -1: + _no_negative_indexes() + else: + return list(iterable_query[item : item + 1])[0] + + +def _is_mapped_annotation( + raw_annotation: _AnnotationScanType, + cls: Type[Any], + originating_cls: Type[Any], +) -> bool: + try: + annotated = de_stringify_annotation( + cls, raw_annotation, originating_cls.__module__ + ) + except NameError: + # in most cases, at least within our own tests, we can raise + # here, which is more accurate as it prevents us from returning + # false negatives. However, in the real world, try to avoid getting + # involved with end-user annotations that have nothing to do with us. + # see issue #8888 where we bypass using this function in the case + # that we want to detect an unresolvable Mapped[] type. + return False + else: + return is_origin_of_cls(annotated, _MappedAnnotationBase) + + +class _CleanupError(Exception): + pass + + +def _cleanup_mapped_str_annotation( + annotation: str, originating_module: str +) -> str: + # fix up an annotation that comes in as the form: + # 'Mapped[List[Address]]' so that it instead looks like: + # 'Mapped[List["Address"]]' , which will allow us to get + # "Address" as a string + + # additionally, resolve symbols for these names since this is where + # we'd have to do it + + inner: Optional[Match[str]] + + mm = re.match(r"^([^ \|]+?)\[(.+)\]$", annotation) + + if not mm: + return annotation + + # ticket #8759. Resolve the Mapped name to a real symbol. + # originally this just checked the name. + try: + obj = eval_name_only(mm.group(1), originating_module) + except NameError as ne: + raise _CleanupError( + f'For annotation "{annotation}", could not resolve ' + f'container type "{mm.group(1)}". ' + "Please ensure this type is imported at the module level " + "outside of TYPE_CHECKING blocks" + ) from ne + + if obj is typing.ClassVar: + real_symbol = "ClassVar" + else: + try: + if issubclass(obj, _MappedAnnotationBase): + real_symbol = obj.__name__ + else: + return annotation + except TypeError: + # avoid isinstance(obj, type) check, just catch TypeError + return annotation + + # note: if one of the codepaths above didn't define real_symbol and + # then didn't return, real_symbol raises UnboundLocalError + # which is actually a NameError, and the calling routines don't + # notice this since they are catching NameError anyway. Just in case + # this is being modified in the future, something to be aware of. + + stack = [] + inner = mm + while True: + stack.append(real_symbol if mm is inner else inner.group(1)) + g2 = inner.group(2) + inner = re.match(r"^([^ \|]+?)\[(.+)\]$", g2) + if inner is None: + stack.append(g2) + break + + # stacks we want to rewrite, that is, quote the last entry which + # we think is a relationship class name: + # + # ['Mapped', 'List', 'Address'] + # ['Mapped', 'A'] + # + # stacks we dont want to rewrite, which are generally MappedColumn + # use cases: + # + # ['Mapped', "'Optional[Dict[str, str]]'"] + # ['Mapped', 'dict[str, str] | None'] + + if ( + # avoid already quoted symbols such as + # ['Mapped', "'Optional[Dict[str, str]]'"] + not re.match(r"""^["'].*["']$""", stack[-1]) + # avoid further generics like Dict[] such as + # ['Mapped', 'dict[str, str] | None'], + # ['Mapped', 'list[int] | list[str]'], + # ['Mapped', 'Union[list[int], list[str]]'], + and not re.search(r"[\[\]]", stack[-1]) + ): + stripchars = "\"' " + stack[-1] = ", ".join( + f'"{elem.strip(stripchars)}"' for elem in stack[-1].split(",") + ) + + annotation = "[".join(stack) + ("]" * (len(stack) - 1)) + + return annotation + + +def _extract_mapped_subtype( + raw_annotation: Optional[_AnnotationScanType], + cls: type, + originating_module: str, + key: str, + attr_cls: Type[Any], + required: bool, + is_dataclass_field: bool, + expect_mapped: bool = True, + raiseerr: bool = True, +) -> Optional[Tuple[Union[_AnnotationScanType, str], Optional[type]]]: + """given an annotation, figure out if it's ``Mapped[something]`` and if + so, return the ``something`` part. + + Includes error raise scenarios and other options. + + """ + + if raw_annotation is None: + if required: + raise orm_exc.MappedAnnotationError( + f"Python typing annotation is required for attribute " + f'"{cls.__name__}.{key}" when primary argument(s) for ' + f'"{attr_cls.__name__}" construct are None or not present' + ) + return None + + try: + # destringify the "outside" of the annotation. note we are not + # adding include_generic so it will *not* dig into generic contents, + # which will remain as ForwardRef or plain str under future annotations + # mode. The full destringify happens later when mapped_column goes + # to do a full lookup in the registry type_annotations_map. + annotated = de_stringify_annotation( + cls, + raw_annotation, + originating_module, + str_cleanup_fn=_cleanup_mapped_str_annotation, + ) + except _CleanupError as ce: + raise orm_exc.MappedAnnotationError( + f"Could not interpret annotation {raw_annotation}. " + "Check that it uses names that are correctly imported at the " + "module level. See chained stack trace for more hints." + ) from ce + except NameError as ne: + if raiseerr and "Mapped[" in raw_annotation: # type: ignore + raise orm_exc.MappedAnnotationError( + f"Could not interpret annotation {raw_annotation}. " + "Check that it uses names that are correctly imported at the " + "module level. See chained stack trace for more hints." + ) from ne + + annotated = raw_annotation # type: ignore + + if is_dataclass_field: + return annotated, None + else: + if not hasattr(annotated, "__origin__") or not is_origin_of_cls( + annotated, _MappedAnnotationBase + ): + if expect_mapped: + if not raiseerr: + return None + + origin = getattr(annotated, "__origin__", None) + if origin is typing.ClassVar: + return None + + # check for other kind of ORM descriptor like AssociationProxy, + # don't raise for that (issue #9957) + elif isinstance(origin, type) and issubclass( + origin, ORMDescriptor + ): + return None + + raise orm_exc.MappedAnnotationError( + f'Type annotation for "{cls.__name__}.{key}" ' + "can't be correctly interpreted for " + "Annotated Declarative Table form. ORM annotations " + "should normally make use of the ``Mapped[]`` generic " + "type, or other ORM-compatible generic type, as a " + "container for the actual type, which indicates the " + "intent that the attribute is mapped. " + "Class variables that are not intended to be mapped " + "by the ORM should use ClassVar[]. " + "To allow Annotated Declarative to disregard legacy " + "annotations which don't use Mapped[] to pass, set " + '"__allow_unmapped__ = True" on the class or a ' + "superclass this class.", + code="zlpr", + ) + + else: + return annotated, None + + if len(annotated.__args__) != 1: + raise orm_exc.MappedAnnotationError( + "Expected sub-type for Mapped[] annotation" + ) + + return ( + # fix dict/list/set args to be ForwardRef, see #11814 + fixup_container_fwd_refs(annotated.__args__[0]), + annotated.__origin__, + ) + + +def _mapper_property_as_plain_name(prop: Type[Any]) -> str: + if hasattr(prop, "_mapper_property_name"): + name = prop._mapper_property_name() + else: + name = None + return util.clsname_as_plain_name(prop, name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/writeonly.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/writeonly.py new file mode 100644 index 0000000000000000000000000000000000000000..ac034a09e0a879090b9a09941c6ace4c852a2148 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/orm/writeonly.py @@ -0,0 +1,678 @@ +# orm/writeonly.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Write-only collection API. + +This is an alternate mapped attribute style that only supports single-item +collection mutation operations. To read the collection, a select() +object must be executed each time. + +.. versionadded:: 2.0 + + +""" + +from __future__ import annotations + +from typing import Any +from typing import Collection +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from sqlalchemy.sql import bindparam +from . import attributes +from . import interfaces +from . import relationships +from . import strategies +from .base import NEVER_SET +from .base import object_mapper +from .base import PassiveFlag +from .base import RelationshipDirection +from .. import exc +from .. import inspect +from .. import log +from .. import util +from ..sql import delete +from ..sql import insert +from ..sql import select +from ..sql import update +from ..sql.dml import Delete +from ..sql.dml import Insert +from ..sql.dml import Update +from ..util.typing import Literal + +if TYPE_CHECKING: + from . import QueryableAttribute + from ._typing import _InstanceDict + from .attributes import AttributeEventToken + from .base import LoaderCallableStatus + from .collections import _AdaptedCollectionProtocol + from .collections import CollectionAdapter + from .mapper import Mapper + from .relationships import _RelationshipOrderByArg + from .state import InstanceState + from .util import AliasedClass + from ..event import _Dispatch + from ..sql.selectable import FromClause + from ..sql.selectable import Select + +_T = TypeVar("_T", bound=Any) + + +class WriteOnlyHistory(Generic[_T]): + """Overrides AttributeHistory to receive append/remove events directly.""" + + unchanged_items: util.OrderedIdentitySet + added_items: util.OrderedIdentitySet + deleted_items: util.OrderedIdentitySet + _reconcile_collection: bool + + def __init__( + self, + attr: WriteOnlyAttributeImpl, + state: InstanceState[_T], + passive: PassiveFlag, + apply_to: Optional[WriteOnlyHistory[_T]] = None, + ) -> None: + if apply_to: + if passive & PassiveFlag.SQL_OK: + raise exc.InvalidRequestError( + f"Attribute {attr} can't load the existing state from the " + "database for this operation; full iteration is not " + "permitted. If this is a delete operation, configure " + f"passive_deletes=True on the {attr} relationship in " + "order to resolve this error." + ) + + self.unchanged_items = apply_to.unchanged_items + self.added_items = apply_to.added_items + self.deleted_items = apply_to.deleted_items + self._reconcile_collection = apply_to._reconcile_collection + else: + self.deleted_items = util.OrderedIdentitySet() + self.added_items = util.OrderedIdentitySet() + self.unchanged_items = util.OrderedIdentitySet() + self._reconcile_collection = False + + @property + def added_plus_unchanged(self) -> List[_T]: + return list(self.added_items.union(self.unchanged_items)) + + @property + def all_items(self) -> List[_T]: + return list( + self.added_items.union(self.unchanged_items).union( + self.deleted_items + ) + ) + + def as_history(self) -> attributes.History: + if self._reconcile_collection: + added = self.added_items.difference(self.unchanged_items) + deleted = self.deleted_items.intersection(self.unchanged_items) + unchanged = self.unchanged_items.difference(deleted) + else: + added, unchanged, deleted = ( + self.added_items, + self.unchanged_items, + self.deleted_items, + ) + return attributes.History(list(added), list(unchanged), list(deleted)) + + def indexed(self, index: Union[int, slice]) -> Union[List[_T], _T]: + return list(self.added_items)[index] + + def add_added(self, value: _T) -> None: + self.added_items.add(value) + + def add_removed(self, value: _T) -> None: + if value in self.added_items: + self.added_items.remove(value) + else: + self.deleted_items.add(value) + + +class WriteOnlyAttributeImpl( + attributes.HasCollectionAdapter, attributes.AttributeImpl +): + uses_objects: bool = True + default_accepts_scalar_loader: bool = False + supports_population: bool = False + _supports_dynamic_iteration: bool = False + collection: bool = False + dynamic: bool = True + order_by: _RelationshipOrderByArg = () + collection_history_cls: Type[WriteOnlyHistory[Any]] = WriteOnlyHistory + + query_class: Type[WriteOnlyCollection[Any]] + + def __init__( + self, + class_: Union[Type[Any], AliasedClass[Any]], + key: str, + dispatch: _Dispatch[QueryableAttribute[Any]], + target_mapper: Mapper[_T], + order_by: _RelationshipOrderByArg, + **kw: Any, + ): + super().__init__(class_, key, None, dispatch, **kw) + self.target_mapper = target_mapper + self.query_class = WriteOnlyCollection + if order_by: + self.order_by = tuple(order_by) + + def get( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> Union[util.OrderedIdentitySet, WriteOnlyCollection[Any]]: + if not passive & PassiveFlag.SQL_OK: + return self._get_collection_history( + state, PassiveFlag.PASSIVE_NO_INITIALIZE + ).added_items + else: + return self.query_class(self, state) + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Literal[None] = ..., + passive: Literal[PassiveFlag.PASSIVE_OFF] = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: _AdaptedCollectionProtocol = ..., + passive: PassiveFlag = ..., + ) -> CollectionAdapter: ... + + @overload + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = ..., + passive: PassiveFlag = ..., + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: ... + + def get_collection( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + user_data: Optional[_AdaptedCollectionProtocol] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + ) -> Union[ + Literal[LoaderCallableStatus.PASSIVE_NO_RESULT], CollectionAdapter + ]: + data: Collection[Any] + if not passive & PassiveFlag.SQL_OK: + data = self._get_collection_history(state, passive).added_items + else: + history = self._get_collection_history(state, passive) + data = history.added_plus_unchanged + return DynamicCollectionAdapter(data) # type: ignore[return-value] + + @util.memoized_property + def _append_token( # type:ignore[override] + self, + ) -> attributes.AttributeEventToken: + return attributes.AttributeEventToken(self, attributes.OP_APPEND) + + @util.memoized_property + def _remove_token( # type:ignore[override] + self, + ) -> attributes.AttributeEventToken: + return attributes.AttributeEventToken(self, attributes.OP_REMOVE) + + def fire_append_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + collection_history: Optional[WriteOnlyHistory[Any]] = None, + ) -> None: + if collection_history is None: + collection_history = self._modified_event(state, dict_) + + collection_history.add_added(value) + + for fn in self.dispatch.append: + value = fn(state, value, initiator or self._append_token) + + if self.trackparent and value is not None: + self.sethasparent(attributes.instance_state(value), state, True) + + def fire_remove_event( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + collection_history: Optional[WriteOnlyHistory[Any]] = None, + ) -> None: + if collection_history is None: + collection_history = self._modified_event(state, dict_) + + collection_history.add_removed(value) + + if self.trackparent and value is not None: + self.sethasparent(attributes.instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, initiator or self._remove_token) + + def _modified_event( + self, state: InstanceState[Any], dict_: _InstanceDict + ) -> WriteOnlyHistory[Any]: + if self.key not in state.committed_state: + state.committed_state[self.key] = self.collection_history_cls( + self, state, PassiveFlag.PASSIVE_NO_FETCH + ) + + state._modified_event(dict_, self, NEVER_SET) + + # this is a hack to allow the entities.ComparableEntity fixture + # to work + dict_[self.key] = True + return state.committed_state[self.key] # type: ignore[no-any-return] + + def set( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken] = None, + passive: PassiveFlag = PassiveFlag.PASSIVE_OFF, + check_old: Any = None, + pop: bool = False, + _adapt: bool = True, + ) -> None: + if initiator and initiator.parent_token is self.parent_token: + return + + if pop and value is None: + return + + iterable = value + new_values = list(iterable) + if state.has_identity: + if not self._supports_dynamic_iteration: + raise exc.InvalidRequestError( + f'Collection "{self}" does not support implicit ' + "iteration; collection replacement operations " + "can't be used" + ) + old_collection = util.IdentitySet( + self.get(state, dict_, passive=passive) + ) + + collection_history = self._modified_event(state, dict_) + if not state.has_identity: + old_collection = collection_history.added_items + else: + old_collection = old_collection.union( + collection_history.added_items + ) + + constants = old_collection.intersection(new_values) + additions = util.IdentitySet(new_values).difference(constants) + removals = old_collection.difference(constants) + + for member in new_values: + if member in additions: + self.fire_append_event( + state, + dict_, + member, + None, + collection_history=collection_history, + ) + + for member in removals: + self.fire_remove_event( + state, + dict_, + member, + None, + collection_history=collection_history, + ) + + def delete(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError() + + def set_committed_value( + self, state: InstanceState[Any], dict_: _InstanceDict, value: Any + ) -> NoReturn: + raise NotImplementedError( + "Dynamic attributes don't support collection population." + ) + + def get_history( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_NO_FETCH, + ) -> attributes.History: + c = self._get_collection_history(state, passive) + return c.as_history() + + def get_all_pending( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + passive: PassiveFlag = PassiveFlag.PASSIVE_NO_INITIALIZE, + ) -> List[Tuple[InstanceState[Any], Any]]: + c = self._get_collection_history(state, passive) + return [(attributes.instance_state(x), x) for x in c.all_items] + + def _get_collection_history( + self, state: InstanceState[Any], passive: PassiveFlag + ) -> WriteOnlyHistory[Any]: + c: WriteOnlyHistory[Any] + if self.key in state.committed_state: + c = state.committed_state[self.key] + else: + c = self.collection_history_cls( + self, state, PassiveFlag.PASSIVE_NO_FETCH + ) + + if state.has_identity and (passive & PassiveFlag.INIT_OK): + return self.collection_history_cls( + self, state, passive, apply_to=c + ) + else: + return c + + def append( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PassiveFlag.PASSIVE_NO_FETCH, + ) -> None: + if initiator is not self: + self.fire_append_event(state, dict_, value, initiator) + + def remove( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PassiveFlag.PASSIVE_NO_FETCH, + ) -> None: + if initiator is not self: + self.fire_remove_event(state, dict_, value, initiator) + + def pop( + self, + state: InstanceState[Any], + dict_: _InstanceDict, + value: Any, + initiator: Optional[AttributeEventToken], + passive: PassiveFlag = PassiveFlag.PASSIVE_NO_FETCH, + ) -> None: + self.remove(state, dict_, value, initiator, passive=passive) + + +@log.class_logger +@relationships.RelationshipProperty.strategy_for(lazy="write_only") +class WriteOnlyLoader(strategies.AbstractRelationshipLoader, log.Identified): + impl_class = WriteOnlyAttributeImpl + + def init_class_attribute(self, mapper: Mapper[Any]) -> None: + self.is_class_level = True + if not self.uselist or self.parent_property.direction not in ( + interfaces.ONETOMANY, + interfaces.MANYTOMANY, + ): + raise exc.InvalidRequestError( + "On relationship %s, 'dynamic' loaders cannot be used with " + "many-to-one/one-to-one relationships and/or " + "uselist=False." % self.parent_property + ) + + strategies._register_attribute( # type: ignore[no-untyped-call] + self.parent_property, + mapper, + useobject=True, + impl_class=self.impl_class, + target_mapper=self.parent_property.mapper, + order_by=self.parent_property.order_by, + query_class=self.parent_property.query_class, + ) + + +class DynamicCollectionAdapter: + """simplified CollectionAdapter for internal API consistency""" + + data: Collection[Any] + + def __init__(self, data: Collection[Any]): + self.data = data + + def __iter__(self) -> Iterator[Any]: + return iter(self.data) + + def _reset_empty(self) -> None: + pass + + def __len__(self) -> int: + return len(self.data) + + def __bool__(self) -> bool: + return True + + +class AbstractCollectionWriter(Generic[_T]): + """Virtual collection which includes append/remove methods that synchronize + into the attribute event system. + + """ + + if not TYPE_CHECKING: + __slots__ = () + + instance: _T + _from_obj: Tuple[FromClause, ...] + + def __init__(self, attr: WriteOnlyAttributeImpl, state: InstanceState[_T]): + instance = state.obj() + if TYPE_CHECKING: + assert instance + self.instance = instance + self.attr = attr + + mapper = object_mapper(instance) + prop = mapper._props[self.attr.key] + + if prop.secondary is not None: + # this is a hack right now. The Query only knows how to + # make subsequent joins() without a given left-hand side + # from self._from_obj[0]. We need to ensure prop.secondary + # is in the FROM. So we purposely put the mapper selectable + # in _from_obj[0] to ensure a user-defined join() later on + # doesn't fail, and secondary is then in _from_obj[1]. + + # note also, we are using the official ORM-annotated selectable + # from __clause_element__(), see #7868 + self._from_obj = (prop.mapper.__clause_element__(), prop.secondary) + else: + self._from_obj = () + + self._where_criteria = ( + prop._with_parent(instance, alias_secondary=False), + ) + + if self.attr.order_by: + self._order_by_clauses = self.attr.order_by + else: + self._order_by_clauses = () + + def _add_all_impl(self, iterator: Iterable[_T]) -> None: + for item in iterator: + self.attr.append( + attributes.instance_state(self.instance), + attributes.instance_dict(self.instance), + item, + None, + ) + + def _remove_impl(self, item: _T) -> None: + self.attr.remove( + attributes.instance_state(self.instance), + attributes.instance_dict(self.instance), + item, + None, + ) + + +class WriteOnlyCollection(AbstractCollectionWriter[_T]): + """Write-only collection which can synchronize changes into the + attribute event system. + + The :class:`.WriteOnlyCollection` is used in a mapping by + using the ``"write_only"`` lazy loading strategy with + :func:`_orm.relationship`. For background on this configuration, + see :ref:`write_only_relationship`. + + .. versionadded:: 2.0 + + .. seealso:: + + :ref:`write_only_relationship` + + """ + + __slots__ = ( + "instance", + "attr", + "_where_criteria", + "_from_obj", + "_order_by_clauses", + ) + + def __iter__(self) -> NoReturn: + raise TypeError( + "WriteOnly collections don't support iteration in-place; " + "to query for collection items, use the select() method to " + "produce a SQL statement and execute it with session.scalars()." + ) + + def select(self) -> Select[Tuple[_T]]: + """Produce a :class:`_sql.Select` construct that represents the + rows within this instance-local :class:`_orm.WriteOnlyCollection`. + + """ + stmt = select(self.attr.target_mapper).where(*self._where_criteria) + if self._from_obj: + stmt = stmt.select_from(*self._from_obj) + if self._order_by_clauses: + stmt = stmt.order_by(*self._order_by_clauses) + return stmt + + def insert(self) -> Insert: + """For one-to-many collections, produce a :class:`_dml.Insert` which + will insert new rows in terms of this this instance-local + :class:`_orm.WriteOnlyCollection`. + + This construct is only supported for a :class:`_orm.Relationship` + that does **not** include the :paramref:`_orm.relationship.secondary` + parameter. For relationships that refer to a many-to-many table, + use ordinary bulk insert techniques to produce new objects, then + use :meth:`_orm.AbstractCollectionWriter.add_all` to associate them + with the collection. + + + """ + + state = inspect(self.instance) + mapper = state.mapper + prop = mapper._props[self.attr.key] + + if prop.direction is not RelationshipDirection.ONETOMANY: + raise exc.InvalidRequestError( + "Write only bulk INSERT only supported for one-to-many " + "collections; for many-to-many, use a separate bulk " + "INSERT along with add_all()." + ) + + dict_: Dict[str, Any] = {} + + for l, r in prop.synchronize_pairs: + fn = prop._get_attr_w_warn_on_none( + mapper, + state, + state.dict, + l, + ) + + dict_[r.key] = bindparam(None, callable_=fn) + + return insert(self.attr.target_mapper).values(**dict_) + + def update(self) -> Update: + """Produce a :class:`_dml.Update` which will refer to rows in terms + of this instance-local :class:`_orm.WriteOnlyCollection`. + + """ + return update(self.attr.target_mapper).where(*self._where_criteria) + + def delete(self) -> Delete: + """Produce a :class:`_dml.Delete` which will refer to rows in terms + of this instance-local :class:`_orm.WriteOnlyCollection`. + + """ + return delete(self.attr.target_mapper).where(*self._where_criteria) + + def add_all(self, iterator: Iterable[_T]) -> None: + """Add an iterable of items to this :class:`_orm.WriteOnlyCollection`. + + The given items will be persisted to the database in terms of + the parent instance's collection on the next flush. + + """ + self._add_all_impl(iterator) + + def add(self, item: _T) -> None: + """Add an item to this :class:`_orm.WriteOnlyCollection`. + + The given item will be persisted to the database in terms of + the parent instance's collection on the next flush. + + """ + self._add_all_impl([item]) + + def remove(self, item: _T) -> None: + """Remove an item from this :class:`_orm.WriteOnlyCollection`. + + The given item will be removed from the parent instance's collection on + the next flush. + + """ + self._remove_impl(item) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/__init__.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..188f709d7e4ab598f0aee07625c72ac1891f4384 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/__init__.py @@ -0,0 +1,145 @@ +# sql/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +from typing import Any +from typing import TYPE_CHECKING + +from ._typing import ColumnExpressionArgument as ColumnExpressionArgument +from ._typing import NotNullable as NotNullable +from ._typing import Nullable as Nullable +from .base import Executable as Executable +from .compiler import COLLECT_CARTESIAN_PRODUCTS as COLLECT_CARTESIAN_PRODUCTS +from .compiler import FROM_LINTING as FROM_LINTING +from .compiler import NO_LINTING as NO_LINTING +from .compiler import WARN_LINTING as WARN_LINTING +from .ddl import BaseDDLElement as BaseDDLElement +from .ddl import DDL as DDL +from .ddl import DDLElement as DDLElement +from .ddl import ExecutableDDLElement as ExecutableDDLElement +from .expression import Alias as Alias +from .expression import alias as alias +from .expression import all_ as all_ +from .expression import and_ as and_ +from .expression import any_ as any_ +from .expression import asc as asc +from .expression import between as between +from .expression import bindparam as bindparam +from .expression import case as case +from .expression import cast as cast +from .expression import ClauseElement as ClauseElement +from .expression import collate as collate +from .expression import column as column +from .expression import ColumnCollection as ColumnCollection +from .expression import ColumnElement as ColumnElement +from .expression import CompoundSelect as CompoundSelect +from .expression import cte as cte +from .expression import Delete as Delete +from .expression import delete as delete +from .expression import desc as desc +from .expression import distinct as distinct +from .expression import except_ as except_ +from .expression import except_all as except_all +from .expression import exists as exists +from .expression import extract as extract +from .expression import false as false +from .expression import False_ as False_ +from .expression import FromClause as FromClause +from .expression import func as func +from .expression import funcfilter as funcfilter +from .expression import Insert as Insert +from .expression import insert as insert +from .expression import intersect as intersect +from .expression import intersect_all as intersect_all +from .expression import Join as Join +from .expression import join as join +from .expression import label as label +from .expression import LABEL_STYLE_DEFAULT as LABEL_STYLE_DEFAULT +from .expression import ( + LABEL_STYLE_DISAMBIGUATE_ONLY as LABEL_STYLE_DISAMBIGUATE_ONLY, +) +from .expression import LABEL_STYLE_NONE as LABEL_STYLE_NONE +from .expression import ( + LABEL_STYLE_TABLENAME_PLUS_COL as LABEL_STYLE_TABLENAME_PLUS_COL, +) +from .expression import lambda_stmt as lambda_stmt +from .expression import LambdaElement as LambdaElement +from .expression import lateral as lateral +from .expression import literal as literal +from .expression import literal_column as literal_column +from .expression import modifier as modifier +from .expression import not_ as not_ +from .expression import null as null +from .expression import nulls_first as nulls_first +from .expression import nulls_last as nulls_last +from .expression import nullsfirst as nullsfirst +from .expression import nullslast as nullslast +from .expression import or_ as or_ +from .expression import outerjoin as outerjoin +from .expression import outparam as outparam +from .expression import over as over +from .expression import quoted_name as quoted_name +from .expression import Select as Select +from .expression import select as select +from .expression import Selectable as Selectable +from .expression import SelectLabelStyle as SelectLabelStyle +from .expression import SQLColumnExpression as SQLColumnExpression +from .expression import StatementLambdaElement as StatementLambdaElement +from .expression import Subquery as Subquery +from .expression import table as table +from .expression import TableClause as TableClause +from .expression import TableSample as TableSample +from .expression import tablesample as tablesample +from .expression import text as text +from .expression import true as true +from .expression import True_ as True_ +from .expression import try_cast as try_cast +from .expression import tuple_ as tuple_ +from .expression import type_coerce as type_coerce +from .expression import union as union +from .expression import union_all as union_all +from .expression import Update as Update +from .expression import update as update +from .expression import Values as Values +from .expression import values as values +from .expression import within_group as within_group +from .visitors import ClauseVisitor as ClauseVisitor + + +def __go(lcls: Any) -> None: + from .. import util as _sa_util + + from . import base + from . import coercions + from . import elements + from . import lambdas + from . import selectable + from . import schema + from . import traversals + from . import type_api + + if not TYPE_CHECKING: + base.coercions = elements.coercions = coercions + base.elements = elements + base.type_api = type_api + coercions.elements = elements + coercions.lambdas = lambdas + coercions.schema = schema + coercions.selectable = selectable + + from .annotation import _prepare_annotations + from .annotation import Annotated + from .elements import AnnotatedColumnElement + from .elements import ClauseList + from .selectable import AnnotatedFromClause + + _prepare_annotations(ColumnElement, AnnotatedColumnElement) + _prepare_annotations(FromClause, AnnotatedFromClause) + _prepare_annotations(ClauseList, Annotated) + + _sa_util.preloaded.import_prefix("sqlalchemy.sql") + + +__go(locals()) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_dml_constructors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_dml_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6f60115f19e87c4b37f1667fc513987cb26374 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_dml_constructors.py @@ -0,0 +1,132 @@ +# sql/_dml_constructors.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .dml import Delete +from .dml import Insert +from .dml import Update + +if TYPE_CHECKING: + from ._typing import _DMLTableArgument + + +def insert(table: _DMLTableArgument) -> Insert: + """Construct an :class:`_expression.Insert` object. + + E.g.:: + + from sqlalchemy import insert + + stmt = insert(user_table).values(name="username", fullname="Full Username") + + Similar functionality is available via the + :meth:`_expression.TableClause.insert` method on + :class:`_schema.Table`. + + .. seealso:: + + :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` + + + :param table: :class:`_expression.TableClause` + which is the subject of the + insert. + + :param values: collection of values to be inserted; see + :meth:`_expression.Insert.values` + for a description of allowed formats here. + Can be omitted entirely; a :class:`_expression.Insert` construct + will also dynamically render the VALUES clause at execution time + based on the parameters passed to :meth:`_engine.Connection.execute`. + + :param inline: if True, no attempt will be made to retrieve the + SQL-generated default values to be provided within the statement; + in particular, + this allows SQL expressions to be rendered 'inline' within the + statement without the need to pre-execute them beforehand; for + backends that support "returning", this turns off the "implicit + returning" feature for the statement. + + If both :paramref:`_expression.insert.values` and compile-time bind + parameters are present, the compile-time bind parameters override the + information specified within :paramref:`_expression.insert.values` on a + per-key basis. + + The keys within :paramref:`_expression.Insert.values` can be either + :class:`~sqlalchemy.schema.Column` objects or their string + identifiers. Each key may reference one of: + + * a literal data value (i.e. string, number, etc.); + * a Column object; + * a SELECT statement. + + If a ``SELECT`` statement is specified which references this + ``INSERT`` statement's table, the statement will be correlated + against the ``INSERT`` statement. + + .. seealso:: + + :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` + + """ # noqa: E501 + return Insert(table) + + +def update(table: _DMLTableArgument) -> Update: + r"""Construct an :class:`_expression.Update` object. + + E.g.:: + + from sqlalchemy import update + + stmt = ( + update(user_table).where(user_table.c.id == 5).values(name="user #5") + ) + + Similar functionality is available via the + :meth:`_expression.TableClause.update` method on + :class:`_schema.Table`. + + :param table: A :class:`_schema.Table` + object representing the database + table to be updated. + + + .. seealso:: + + :ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial` + + + """ # noqa: E501 + return Update(table) + + +def delete(table: _DMLTableArgument) -> Delete: + r"""Construct :class:`_expression.Delete` object. + + E.g.:: + + from sqlalchemy import delete + + stmt = delete(user_table).where(user_table.c.id == 5) + + Similar functionality is available via the + :meth:`_expression.TableClause.delete` method on + :class:`_schema.Table`. + + :param table: The table to delete rows from. + + .. seealso:: + + :ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial` + + + """ + return Delete(table) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_elements_constructors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_elements_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..3359998f3d84c114f0d3bf96c9ae89fb4ad8844d --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_elements_constructors.py @@ -0,0 +1,1872 @@ +# sql/_elements_constructors.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple as typing_Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import coercions +from . import roles +from .base import _NoArg +from .coercions import _document_text_coercion +from .elements import BindParameter +from .elements import BooleanClauseList +from .elements import Case +from .elements import Cast +from .elements import CollationClause +from .elements import CollectionAggregate +from .elements import ColumnClause +from .elements import ColumnElement +from .elements import Extract +from .elements import False_ +from .elements import FunctionFilter +from .elements import Label +from .elements import Null +from .elements import Over +from .elements import TextClause +from .elements import True_ +from .elements import TryCast +from .elements import Tuple +from .elements import TypeCoerce +from .elements import UnaryExpression +from .elements import WithinGroup +from .functions import FunctionElement +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + from ._typing import _ByArgument + from ._typing import _ColumnExpressionArgument + from ._typing import _ColumnExpressionOrLiteralArgument + from ._typing import _ColumnExpressionOrStrLabelArgument + from ._typing import _TypeEngineArgument + from .elements import BinaryExpression + from .selectable import FromClause + from .type_api import TypeEngine + +_T = TypeVar("_T") + + +def all_(expr: _ColumnExpressionArgument[_T]) -> CollectionAggregate[bool]: + """Produce an ALL expression. + + For dialects such as that of PostgreSQL, this operator applies + to usage of the :class:`_types.ARRAY` datatype, for that of + MySQL, it may apply to a subquery. e.g.:: + + # renders on PostgreSQL: + # '5 = ALL (somearray)' + expr = 5 == all_(mytable.c.somearray) + + # renders on MySQL: + # '5 = ALL (SELECT value FROM table)' + expr = 5 == all_(select(table.c.value)) + + Comparison to NULL may work using ``None``:: + + None == all_(mytable.c.somearray) + + The any_() / all_() operators also feature a special "operand flipping" + behavior such that if any_() / all_() are used on the left side of a + comparison using a standalone operator such as ``==``, ``!=``, etc. + (not including operator methods such as + :meth:`_sql.ColumnOperators.is_`) the rendered expression is flipped:: + + # would render '5 = ALL (column)` + all_(mytable.c.column) == 5 + + Or with ``None``, which note will not perform + the usual step of rendering "IS" as is normally the case for NULL:: + + # would render 'NULL = ALL(somearray)' + all_(mytable.c.somearray) == None + + .. versionchanged:: 1.4.26 repaired the use of any_() / all_() + comparing to NULL on the right side to be flipped to the left. + + The column-level :meth:`_sql.ColumnElement.all_` method (not to be + confused with :class:`_types.ARRAY` level + :meth:`_types.ARRAY.Comparator.all`) is shorthand for + ``all_(col)``:: + + 5 == mytable.c.somearray.all_() + + .. seealso:: + + :meth:`_sql.ColumnOperators.all_` + + :func:`_expression.any_` + + """ + return CollectionAggregate._create_all(expr) + + +def and_( # type: ignore[empty-body] + initial_clause: Union[Literal[True], _ColumnExpressionArgument[bool]], + *clauses: _ColumnExpressionArgument[bool], +) -> ColumnElement[bool]: + r"""Produce a conjunction of expressions joined by ``AND``. + + E.g.:: + + from sqlalchemy import and_ + + stmt = select(users_table).where( + and_(users_table.c.name == "wendy", users_table.c.enrolled == True) + ) + + The :func:`.and_` conjunction is also available using the + Python ``&`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select(users_table).where( + (users_table.c.name == "wendy") & (users_table.c.enrolled == True) + ) + + The :func:`.and_` operation is also implicit in some cases; + the :meth:`_expression.Select.where` + method for example can be invoked multiple + times against a statement, which will have the effect of each + clause being combined using :func:`.and_`:: + + stmt = ( + select(users_table) + .where(users_table.c.name == "wendy") + .where(users_table.c.enrolled == True) + ) + + The :func:`.and_` construct must be given at least one positional + argument in order to be valid; a :func:`.and_` construct with no + arguments is ambiguous. To produce an "empty" or dynamically + generated :func:`.and_` expression, from a given list of expressions, + a "default" element of :func:`_sql.true` (or just ``True``) should be + specified:: + + from sqlalchemy import true + + criteria = and_(true(), *expressions) + + The above expression will compile to SQL as the expression ``true`` + or ``1 = 1``, depending on backend, if no other expressions are + present. If expressions are present, then the :func:`_sql.true` value is + ignored as it does not affect the outcome of an AND expression that + has other elements. + + .. deprecated:: 1.4 The :func:`.and_` element now requires that at + least one argument is passed; creating the :func:`.and_` construct + with no arguments is deprecated, and will emit a deprecation warning + while continuing to produce a blank SQL string. + + .. seealso:: + + :func:`.or_` + + """ + ... + + +if not TYPE_CHECKING: + # handle deprecated case which allows zero-arguments + def and_(*clauses): # noqa: F811 + r"""Produce a conjunction of expressions joined by ``AND``. + + E.g.:: + + from sqlalchemy import and_ + + stmt = select(users_table).where( + and_(users_table.c.name == "wendy", users_table.c.enrolled == True) + ) + + The :func:`.and_` conjunction is also available using the + Python ``&`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select(users_table).where( + (users_table.c.name == "wendy") & (users_table.c.enrolled == True) + ) + + The :func:`.and_` operation is also implicit in some cases; + the :meth:`_expression.Select.where` + method for example can be invoked multiple + times against a statement, which will have the effect of each + clause being combined using :func:`.and_`:: + + stmt = ( + select(users_table) + .where(users_table.c.name == "wendy") + .where(users_table.c.enrolled == True) + ) + + The :func:`.and_` construct must be given at least one positional + argument in order to be valid; a :func:`.and_` construct with no + arguments is ambiguous. To produce an "empty" or dynamically + generated :func:`.and_` expression, from a given list of expressions, + a "default" element of :func:`_sql.true` (or just ``True``) should be + specified:: + + from sqlalchemy import true + + criteria = and_(true(), *expressions) + + The above expression will compile to SQL as the expression ``true`` + or ``1 = 1``, depending on backend, if no other expressions are + present. If expressions are present, then the :func:`_sql.true` value + is ignored as it does not affect the outcome of an AND expression that + has other elements. + + .. deprecated:: 1.4 The :func:`.and_` element now requires that at + least one argument is passed; creating the :func:`.and_` construct + with no arguments is deprecated, and will emit a deprecation warning + while continuing to produce a blank SQL string. + + .. seealso:: + + :func:`.or_` + + """ # noqa: E501 + return BooleanClauseList.and_(*clauses) + + +def any_(expr: _ColumnExpressionArgument[_T]) -> CollectionAggregate[bool]: + """Produce an ANY expression. + + For dialects such as that of PostgreSQL, this operator applies + to usage of the :class:`_types.ARRAY` datatype, for that of + MySQL, it may apply to a subquery. e.g.:: + + # renders on PostgreSQL: + # '5 = ANY (somearray)' + expr = 5 == any_(mytable.c.somearray) + + # renders on MySQL: + # '5 = ANY (SELECT value FROM table)' + expr = 5 == any_(select(table.c.value)) + + Comparison to NULL may work using ``None`` or :func:`_sql.null`:: + + None == any_(mytable.c.somearray) + + The any_() / all_() operators also feature a special "operand flipping" + behavior such that if any_() / all_() are used on the left side of a + comparison using a standalone operator such as ``==``, ``!=``, etc. + (not including operator methods such as + :meth:`_sql.ColumnOperators.is_`) the rendered expression is flipped:: + + # would render '5 = ANY (column)` + any_(mytable.c.column) == 5 + + Or with ``None``, which note will not perform + the usual step of rendering "IS" as is normally the case for NULL:: + + # would render 'NULL = ANY(somearray)' + any_(mytable.c.somearray) == None + + .. versionchanged:: 1.4.26 repaired the use of any_() / all_() + comparing to NULL on the right side to be flipped to the left. + + The column-level :meth:`_sql.ColumnElement.any_` method (not to be + confused with :class:`_types.ARRAY` level + :meth:`_types.ARRAY.Comparator.any`) is shorthand for + ``any_(col)``:: + + 5 = mytable.c.somearray.any_() + + .. seealso:: + + :meth:`_sql.ColumnOperators.any_` + + :func:`_expression.all_` + + """ + return CollectionAggregate._create_any(expr) + + +def asc( + column: _ColumnExpressionOrStrLabelArgument[_T], +) -> UnaryExpression[_T]: + """Produce an ascending ``ORDER BY`` clause element. + + e.g.:: + + from sqlalchemy import asc + + stmt = select(users_table).order_by(asc(users_table.c.name)) + + will produce SQL as: + + .. sourcecode:: sql + + SELECT id, name FROM user ORDER BY name ASC + + The :func:`.asc` function is a standalone version of the + :meth:`_expression.ColumnElement.asc` + method available on all SQL expressions, + e.g.:: + + + stmt = select(users_table).order_by(users_table.c.name.asc()) + + :param column: A :class:`_expression.ColumnElement` (e.g. + scalar SQL expression) + with which to apply the :func:`.asc` operation. + + .. seealso:: + + :func:`.desc` + + :func:`.nulls_first` + + :func:`.nulls_last` + + :meth:`_expression.Select.order_by` + + """ + return UnaryExpression._create_asc(column) + + +def collate( + expression: _ColumnExpressionArgument[str], collation: str +) -> BinaryExpression[str]: + """Return the clause ``expression COLLATE collation``. + + e.g.:: + + collate(mycolumn, "utf8_bin") + + produces: + + .. sourcecode:: sql + + mycolumn COLLATE utf8_bin + + The collation expression is also quoted if it is a case sensitive + identifier, e.g. contains uppercase characters. + + .. versionchanged:: 1.2 quoting is automatically applied to COLLATE + expressions if they are case sensitive. + + """ + return CollationClause._create_collation_expression(expression, collation) + + +def between( + expr: _ColumnExpressionOrLiteralArgument[_T], + lower_bound: Any, + upper_bound: Any, + symmetric: bool = False, +) -> BinaryExpression[bool]: + """Produce a ``BETWEEN`` predicate clause. + + E.g.:: + + from sqlalchemy import between + + stmt = select(users_table).where(between(users_table.c.id, 5, 7)) + + Would produce SQL resembling: + + .. sourcecode:: sql + + SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2 + + The :func:`.between` function is a standalone version of the + :meth:`_expression.ColumnElement.between` method available on all + SQL expressions, as in:: + + stmt = select(users_table).where(users_table.c.id.between(5, 7)) + + All arguments passed to :func:`.between`, including the left side + column expression, are coerced from Python scalar values if a + the value is not a :class:`_expression.ColumnElement` subclass. + For example, + three fixed values can be compared as in:: + + print(between(5, 3, 7)) + + Which would produce:: + + :param_1 BETWEEN :param_2 AND :param_3 + + :param expr: a column expression, typically a + :class:`_expression.ColumnElement` + instance or alternatively a Python scalar expression to be coerced + into a column expression, serving as the left side of the ``BETWEEN`` + expression. + + :param lower_bound: a column or Python scalar expression serving as the + lower bound of the right side of the ``BETWEEN`` expression. + + :param upper_bound: a column or Python scalar expression serving as the + upper bound of the right side of the ``BETWEEN`` expression. + + :param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note + that not all databases support this syntax. + + .. seealso:: + + :meth:`_expression.ColumnElement.between` + + """ + col_expr = coercions.expect(roles.ExpressionElementRole, expr) + return col_expr.between(lower_bound, upper_bound, symmetric=symmetric) + + +def outparam( + key: str, type_: Optional[TypeEngine[_T]] = None +) -> BindParameter[_T]: + """Create an 'OUT' parameter for usage in functions (stored procedures), + for databases which support them. + + The ``outparam`` can be used like a regular function parameter. + The "output" value will be available from the + :class:`~sqlalchemy.engine.CursorResult` object via its ``out_parameters`` + attribute, which returns a dictionary containing the values. + + """ + return BindParameter(key, None, type_=type_, unique=False, isoutparam=True) + + +@overload +def not_(clause: BinaryExpression[_T]) -> BinaryExpression[_T]: ... + + +@overload +def not_(clause: _ColumnExpressionArgument[_T]) -> ColumnElement[_T]: ... + + +def not_(clause: _ColumnExpressionArgument[_T]) -> ColumnElement[_T]: + """Return a negation of the given clause, i.e. ``NOT(clause)``. + + The ``~`` operator is also overloaded on all + :class:`_expression.ColumnElement` subclasses to produce the + same result. + + """ + + return coercions.expect(roles.ExpressionElementRole, clause).__invert__() + + +def bindparam( + key: Optional[str], + value: Any = _NoArg.NO_ARG, + type_: Optional[_TypeEngineArgument[_T]] = None, + unique: bool = False, + required: Union[bool, Literal[_NoArg.NO_ARG]] = _NoArg.NO_ARG, + quote: Optional[bool] = None, + callable_: Optional[Callable[[], Any]] = None, + expanding: bool = False, + isoutparam: bool = False, + literal_execute: bool = False, +) -> BindParameter[_T]: + r"""Produce a "bound expression". + + The return value is an instance of :class:`.BindParameter`; this + is a :class:`_expression.ColumnElement` + subclass which represents a so-called + "placeholder" value in a SQL expression, the value of which is + supplied at the point at which the statement in executed against a + database connection. + + In SQLAlchemy, the :func:`.bindparam` construct has + the ability to carry along the actual value that will be ultimately + used at expression time. In this way, it serves not just as + a "placeholder" for eventual population, but also as a means of + representing so-called "unsafe" values which should not be rendered + directly in a SQL statement, but rather should be passed along + to the :term:`DBAPI` as values which need to be correctly escaped + and potentially handled for type-safety. + + When using :func:`.bindparam` explicitly, the use case is typically + one of traditional deferment of parameters; the :func:`.bindparam` + construct accepts a name which can then be referred to at execution + time:: + + from sqlalchemy import bindparam + + stmt = select(users_table).where( + users_table.c.name == bindparam("username") + ) + + The above statement, when rendered, will produce SQL similar to: + + .. sourcecode:: sql + + SELECT id, name FROM user WHERE name = :username + + In order to populate the value of ``:username`` above, the value + would typically be applied at execution time to a method + like :meth:`_engine.Connection.execute`:: + + result = connection.execute(stmt, {"username": "wendy"}) + + Explicit use of :func:`.bindparam` is also common when producing + UPDATE or DELETE statements that are to be invoked multiple times, + where the WHERE criterion of the statement is to change on each + invocation, such as:: + + stmt = ( + users_table.update() + .where(user_table.c.name == bindparam("username")) + .values(fullname=bindparam("fullname")) + ) + + connection.execute( + stmt, + [ + {"username": "wendy", "fullname": "Wendy Smith"}, + {"username": "jack", "fullname": "Jack Jones"}, + ], + ) + + SQLAlchemy's Core expression system makes wide use of + :func:`.bindparam` in an implicit sense. It is typical that Python + literal values passed to virtually all SQL expression functions are + coerced into fixed :func:`.bindparam` constructs. For example, given + a comparison operation such as:: + + expr = users_table.c.name == "Wendy" + + The above expression will produce a :class:`.BinaryExpression` + construct, where the left side is the :class:`_schema.Column` object + representing the ``name`` column, and the right side is a + :class:`.BindParameter` representing the literal value:: + + print(repr(expr.right)) + BindParameter("%(4327771088 name)s", "Wendy", type_=String()) + + The expression above will render SQL such as: + + .. sourcecode:: sql + + user.name = :name_1 + + Where the ``:name_1`` parameter name is an anonymous name. The + actual string ``Wendy`` is not in the rendered string, but is carried + along where it is later used within statement execution. If we + invoke a statement like the following:: + + stmt = select(users_table).where(users_table.c.name == "Wendy") + result = connection.execute(stmt) + + We would see SQL logging output as: + + .. sourcecode:: sql + + SELECT "user".id, "user".name + FROM "user" + WHERE "user".name = %(name_1)s + {'name_1': 'Wendy'} + + Above, we see that ``Wendy`` is passed as a parameter to the database, + while the placeholder ``:name_1`` is rendered in the appropriate form + for the target database, in this case the PostgreSQL database. + + Similarly, :func:`.bindparam` is invoked automatically when working + with :term:`CRUD` statements as far as the "VALUES" portion is + concerned. The :func:`_expression.insert` construct produces an + ``INSERT`` expression which will, at statement execution time, generate + bound placeholders based on the arguments passed, as in:: + + stmt = users_table.insert() + result = connection.execute(stmt, {"name": "Wendy"}) + + The above will produce SQL output as: + + .. sourcecode:: sql + + INSERT INTO "user" (name) VALUES (%(name)s) + {'name': 'Wendy'} + + The :class:`_expression.Insert` construct, at + compilation/execution time, rendered a single :func:`.bindparam` + mirroring the column name ``name`` as a result of the single ``name`` + parameter we passed to the :meth:`_engine.Connection.execute` method. + + :param key: + the key (e.g. the name) for this bind param. + Will be used in the generated + SQL statement for dialects that use named parameters. This + value may be modified when part of a compilation operation, + if other :class:`BindParameter` objects exist with the same + key, or if its length is too long and truncation is + required. + + If omitted, an "anonymous" name is generated for the bound parameter; + when given a value to bind, the end result is equivalent to calling upon + the :func:`.literal` function with a value to bind, particularly + if the :paramref:`.bindparam.unique` parameter is also provided. + + :param value: + Initial value for this bind param. Will be used at statement + execution time as the value for this parameter passed to the + DBAPI, if no other value is indicated to the statement execution + method for this particular parameter name. Defaults to ``None``. + + :param callable\_: + A callable function that takes the place of "value". The function + will be called at statement execution time to determine the + ultimate value. Used for scenarios where the actual bind + value cannot be determined at the point at which the clause + construct is created, but embedded bind values are still desirable. + + :param type\_: + A :class:`.TypeEngine` class or instance representing an optional + datatype for this :func:`.bindparam`. If not passed, a type + may be determined automatically for the bind, based on the given + value; for example, trivial Python types such as ``str``, + ``int``, ``bool`` + may result in the :class:`.String`, :class:`.Integer` or + :class:`.Boolean` types being automatically selected. + + The type of a :func:`.bindparam` is significant especially in that + the type will apply pre-processing to the value before it is + passed to the database. For example, a :func:`.bindparam` which + refers to a datetime value, and is specified as holding the + :class:`.DateTime` type, may apply conversion needed to the + value (such as stringification on SQLite) before passing the value + to the database. + + :param unique: + if True, the key name of this :class:`.BindParameter` will be + modified if another :class:`.BindParameter` of the same name + already has been located within the containing + expression. This flag is used generally by the internals + when producing so-called "anonymous" bound expressions, it + isn't generally applicable to explicitly-named :func:`.bindparam` + constructs. + + :param required: + If ``True``, a value is required at execution time. If not passed, + it defaults to ``True`` if neither :paramref:`.bindparam.value` + or :paramref:`.bindparam.callable` were passed. If either of these + parameters are present, then :paramref:`.bindparam.required` + defaults to ``False``. + + :param quote: + True if this parameter name requires quoting and is not + currently known as a SQLAlchemy reserved word; this currently + only applies to the Oracle Database backends, where bound names must + sometimes be quoted. + + :param isoutparam: + if True, the parameter should be treated like a stored procedure + "OUT" parameter. This applies to backends such as Oracle Database which + support OUT parameters. + + :param expanding: + if True, this parameter will be treated as an "expanding" parameter + at execution time; the parameter value is expected to be a sequence, + rather than a scalar value, and the string SQL statement will + be transformed on a per-execution basis to accommodate the sequence + with a variable number of parameter slots passed to the DBAPI. + This is to allow statement caching to be used in conjunction with + an IN clause. + + .. seealso:: + + :meth:`.ColumnOperators.in_` + + :ref:`baked_in` - with baked queries + + .. note:: The "expanding" feature does not support "executemany"- + style parameter sets. + + .. versionadded:: 1.2 + + .. versionchanged:: 1.3 the "expanding" bound parameter feature now + supports empty lists. + + :param literal_execute: + if True, the bound parameter will be rendered in the compile phase + with a special "POSTCOMPILE" token, and the SQLAlchemy compiler will + render the final value of the parameter into the SQL statement at + statement execution time, omitting the value from the parameter + dictionary / list passed to DBAPI ``cursor.execute()``. This + produces a similar effect as that of using the ``literal_binds``, + compilation flag, however takes place as the statement is sent to + the DBAPI ``cursor.execute()`` method, rather than when the statement + is compiled. The primary use of this + capability is for rendering LIMIT / OFFSET clauses for database + drivers that can't accommodate for bound parameters in these + contexts, while allowing SQL constructs to be cacheable at the + compilation level. + + .. versionadded:: 1.4 Added "post compile" bound parameters + + .. seealso:: + + :ref:`change_4808`. + + .. seealso:: + + :ref:`tutorial_sending_parameters` - in the + :ref:`unified_tutorial` + + + """ + return BindParameter( + key, + value, + type_, + unique, + required, + quote, + callable_, + expanding, + isoutparam, + literal_execute, + ) + + +def case( + *whens: Union[ + typing_Tuple[_ColumnExpressionArgument[bool], Any], Mapping[Any, Any] + ], + value: Optional[Any] = None, + else_: Optional[Any] = None, +) -> Case[Any]: + r"""Produce a ``CASE`` expression. + + The ``CASE`` construct in SQL is a conditional object that + acts somewhat analogously to an "if/then" construct in other + languages. It returns an instance of :class:`.Case`. + + :func:`.case` in its usual form is passed a series of "when" + constructs, that is, a list of conditions and results as tuples:: + + from sqlalchemy import case + + stmt = select(users_table).where( + case( + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + else_="E", + ) + ) + + The above statement will produce SQL resembling: + + .. sourcecode:: sql + + SELECT id, name FROM user + WHERE CASE + WHEN (name = :name_1) THEN :param_1 + WHEN (name = :name_2) THEN :param_2 + ELSE :param_3 + END + + When simple equality expressions of several values against a single + parent column are needed, :func:`.case` also has a "shorthand" format + used via the + :paramref:`.case.value` parameter, which is passed a column + expression to be compared. In this form, the :paramref:`.case.whens` + parameter is passed as a dictionary containing expressions to be + compared against keyed to result expressions. The statement below is + equivalent to the preceding statement:: + + stmt = select(users_table).where( + case({"wendy": "W", "jack": "J"}, value=users_table.c.name, else_="E") + ) + + The values which are accepted as result values in + :paramref:`.case.whens` as well as with :paramref:`.case.else_` are + coerced from Python literals into :func:`.bindparam` constructs. + SQL expressions, e.g. :class:`_expression.ColumnElement` constructs, + are accepted + as well. To coerce a literal string expression into a constant + expression rendered inline, use the :func:`_expression.literal_column` + construct, + as in:: + + from sqlalchemy import case, literal_column + + case( + (orderline.c.qty > 100, literal_column("'greaterthan100'")), + (orderline.c.qty > 10, literal_column("'greaterthan10'")), + else_=literal_column("'lessthan10'"), + ) + + The above will render the given constants without using bound + parameters for the result values (but still for the comparison + values), as in: + + .. sourcecode:: sql + + CASE + WHEN (orderline.qty > :qty_1) THEN 'greaterthan100' + WHEN (orderline.qty > :qty_2) THEN 'greaterthan10' + ELSE 'lessthan10' + END + + :param \*whens: The criteria to be compared against, + :paramref:`.case.whens` accepts two different forms, based on + whether or not :paramref:`.case.value` is used. + + .. versionchanged:: 1.4 the :func:`_sql.case` + function now accepts the series of WHEN conditions positionally + + In the first form, it accepts multiple 2-tuples passed as positional + arguments; each 2-tuple consists of ``(, )``, + where the SQL expression is a boolean expression and "value" is a + resulting value, e.g.:: + + case( + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + ) + + In the second form, it accepts a Python dictionary of comparison + values mapped to a resulting value; this form requires + :paramref:`.case.value` to be present, and values will be compared + using the ``==`` operator, e.g.:: + + case({"wendy": "W", "jack": "J"}, value=users_table.c.name) + + :param value: An optional SQL expression which will be used as a + fixed "comparison point" for candidate values within a dictionary + passed to :paramref:`.case.whens`. + + :param else\_: An optional SQL expression which will be the evaluated + result of the ``CASE`` construct if all expressions within + :paramref:`.case.whens` evaluate to false. When omitted, most + databases will produce a result of NULL if none of the "when" + expressions evaluate to true. + + + """ # noqa: E501 + return Case(*whens, value=value, else_=else_) + + +def cast( + expression: _ColumnExpressionOrLiteralArgument[Any], + type_: _TypeEngineArgument[_T], +) -> Cast[_T]: + r"""Produce a ``CAST`` expression. + + :func:`.cast` returns an instance of :class:`.Cast`. + + E.g.:: + + from sqlalchemy import cast, Numeric + + stmt = select(cast(product_table.c.unit_price, Numeric(10, 4))) + + The above statement will produce SQL resembling: + + .. sourcecode:: sql + + SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product + + The :func:`.cast` function performs two distinct functions when + used. The first is that it renders the ``CAST`` expression within + the resulting SQL string. The second is that it associates the given + type (e.g. :class:`.TypeEngine` class or instance) with the column + expression on the Python side, which means the expression will take + on the expression operator behavior associated with that type, + as well as the bound-value handling and result-row-handling behavior + of the type. + + An alternative to :func:`.cast` is the :func:`.type_coerce` function. + This function performs the second task of associating an expression + with a specific type, but does not render the ``CAST`` expression + in SQL. + + :param expression: A SQL expression, such as a + :class:`_expression.ColumnElement` + expression or a Python string which will be coerced into a bound + literal value. + + :param type\_: A :class:`.TypeEngine` class or instance indicating + the type to which the ``CAST`` should apply. + + .. seealso:: + + :ref:`tutorial_casts` + + :func:`.try_cast` - an alternative to CAST that results in + NULLs when the cast fails, instead of raising an error. + Only supported by some dialects. + + :func:`.type_coerce` - an alternative to CAST that coerces the type + on the Python side only, which is often sufficient to generate the + correct SQL and data coercion. + + + """ + return Cast(expression, type_) + + +def try_cast( + expression: _ColumnExpressionOrLiteralArgument[Any], + type_: _TypeEngineArgument[_T], +) -> TryCast[_T]: + """Produce a ``TRY_CAST`` expression for backends which support it; + this is a ``CAST`` which returns NULL for un-castable conversions. + + In SQLAlchemy, this construct is supported **only** by the SQL Server + dialect, and will raise a :class:`.CompileError` if used on other + included backends. However, third party backends may also support + this construct. + + .. tip:: As :func:`_sql.try_cast` originates from the SQL Server dialect, + it's importable both from ``sqlalchemy.`` as well as from + ``sqlalchemy.dialects.mssql``. + + :func:`_sql.try_cast` returns an instance of :class:`.TryCast` and + generally behaves similarly to the :class:`.Cast` construct; + at the SQL level, the difference between ``CAST`` and ``TRY_CAST`` + is that ``TRY_CAST`` returns NULL for an un-castable expression, + such as attempting to cast a string ``"hi"`` to an integer value. + + E.g.:: + + from sqlalchemy import select, try_cast, Numeric + + stmt = select(try_cast(product_table.c.unit_price, Numeric(10, 4))) + + The above would render on Microsoft SQL Server as: + + .. sourcecode:: sql + + SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4)) + FROM product_table + + .. versionadded:: 2.0.14 :func:`.try_cast` has been + generalized from the SQL Server dialect into a general use + construct that may be supported by additional dialects. + + """ + return TryCast(expression, type_) + + +def column( + text: str, + type_: Optional[_TypeEngineArgument[_T]] = None, + is_literal: bool = False, + _selectable: Optional[FromClause] = None, +) -> ColumnClause[_T]: + """Produce a :class:`.ColumnClause` object. + + The :class:`.ColumnClause` is a lightweight analogue to the + :class:`_schema.Column` class. The :func:`_expression.column` + function can + be invoked with just a name alone, as in:: + + from sqlalchemy import column + + id, name = column("id"), column("name") + stmt = select(id, name).select_from("user") + + The above statement would produce SQL like: + + .. sourcecode:: sql + + SELECT id, name FROM user + + Once constructed, :func:`_expression.column` + may be used like any other SQL + expression element such as within :func:`_expression.select` + constructs:: + + from sqlalchemy.sql import column + + id, name = column("id"), column("name") + stmt = select(id, name).select_from("user") + + The text handled by :func:`_expression.column` + is assumed to be handled + like the name of a database column; if the string contains mixed case, + special characters, or matches a known reserved word on the target + backend, the column expression will render using the quoting + behavior determined by the backend. To produce a textual SQL + expression that is rendered exactly without any quoting, + use :func:`_expression.literal_column` instead, + or pass ``True`` as the + value of :paramref:`_expression.column.is_literal`. Additionally, + full SQL + statements are best handled using the :func:`_expression.text` + construct. + + :func:`_expression.column` can be used in a table-like + fashion by combining it with the :func:`.table` function + (which is the lightweight analogue to :class:`_schema.Table` + ) to produce + a working table construct with minimal boilerplate:: + + from sqlalchemy import table, column, select + + user = table( + "user", + column("id"), + column("name"), + column("description"), + ) + + stmt = select(user.c.description).where(user.c.name == "wendy") + + A :func:`_expression.column` / :func:`.table` + construct like that illustrated + above can be created in an + ad-hoc fashion and is not associated with any + :class:`_schema.MetaData`, DDL, or events, unlike its + :class:`_schema.Table` counterpart. + + :param text: the text of the element. + + :param type: :class:`_types.TypeEngine` object which can associate + this :class:`.ColumnClause` with a type. + + :param is_literal: if True, the :class:`.ColumnClause` is assumed to + be an exact expression that will be delivered to the output with no + quoting rules applied regardless of case sensitive settings. the + :func:`_expression.literal_column()` function essentially invokes + :func:`_expression.column` while passing ``is_literal=True``. + + .. seealso:: + + :class:`_schema.Column` + + :func:`_expression.literal_column` + + :func:`.table` + + :func:`_expression.text` + + :ref:`tutorial_select_arbitrary_text` + + """ + return ColumnClause(text, type_, is_literal, _selectable) + + +def desc( + column: _ColumnExpressionOrStrLabelArgument[_T], +) -> UnaryExpression[_T]: + """Produce a descending ``ORDER BY`` clause element. + + e.g.:: + + from sqlalchemy import desc + + stmt = select(users_table).order_by(desc(users_table.c.name)) + + will produce SQL as: + + .. sourcecode:: sql + + SELECT id, name FROM user ORDER BY name DESC + + The :func:`.desc` function is a standalone version of the + :meth:`_expression.ColumnElement.desc` + method available on all SQL expressions, + e.g.:: + + + stmt = select(users_table).order_by(users_table.c.name.desc()) + + :param column: A :class:`_expression.ColumnElement` (e.g. + scalar SQL expression) + with which to apply the :func:`.desc` operation. + + .. seealso:: + + :func:`.asc` + + :func:`.nulls_first` + + :func:`.nulls_last` + + :meth:`_expression.Select.order_by` + + """ + return UnaryExpression._create_desc(column) + + +def distinct(expr: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]: + """Produce an column-expression-level unary ``DISTINCT`` clause. + + This applies the ``DISTINCT`` keyword to an **individual column + expression** (e.g. not the whole statement), and renders **specifically + in that column position**; this is used for containment within + an aggregate function, as in:: + + from sqlalchemy import distinct, func + + stmt = select(users_table.c.id, func.count(distinct(users_table.c.name))) + + The above would produce an statement resembling: + + .. sourcecode:: sql + + SELECT user.id, count(DISTINCT user.name) FROM user + + .. tip:: The :func:`_sql.distinct` function does **not** apply DISTINCT + to the full SELECT statement, instead applying a DISTINCT modifier + to **individual column expressions**. For general ``SELECT DISTINCT`` + support, use the + :meth:`_sql.Select.distinct` method on :class:`_sql.Select`. + + The :func:`.distinct` function is also available as a column-level + method, e.g. :meth:`_expression.ColumnElement.distinct`, as in:: + + stmt = select(func.count(users_table.c.name.distinct())) + + The :func:`.distinct` operator is different from the + :meth:`_expression.Select.distinct` method of + :class:`_expression.Select`, + which produces a ``SELECT`` statement + with ``DISTINCT`` applied to the result set as a whole, + e.g. a ``SELECT DISTINCT`` expression. See that method for further + information. + + .. seealso:: + + :meth:`_expression.ColumnElement.distinct` + + :meth:`_expression.Select.distinct` + + :data:`.func` + + """ # noqa: E501 + return UnaryExpression._create_distinct(expr) + + +def bitwise_not(expr: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]: + """Produce a unary bitwise NOT clause, typically via the ``~`` operator. + + Not to be confused with boolean negation :func:`_sql.not_`. + + .. versionadded:: 2.0.2 + + .. seealso:: + + :ref:`operators_bitwise` + + + """ + + return UnaryExpression._create_bitwise_not(expr) + + +def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract: + """Return a :class:`.Extract` construct. + + This is typically available as :func:`.extract` + as well as ``func.extract`` from the + :data:`.func` namespace. + + :param field: The field to extract. + + .. warning:: This field is used as a literal SQL string. + **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**. + + :param expr: A column or Python scalar expression serving as the + right side of the ``EXTRACT`` expression. + + E.g.:: + + from sqlalchemy import extract + from sqlalchemy import table, column + + logged_table = table( + "user", + column("id"), + column("date_created"), + ) + + stmt = select(logged_table.c.id).where( + extract("YEAR", logged_table.c.date_created) == 2021 + ) + + In the above example, the statement is used to select ids from the + database where the ``YEAR`` component matches a specific value. + + Similarly, one can also select an extracted component:: + + stmt = select(extract("YEAR", logged_table.c.date_created)).where( + logged_table.c.id == 1 + ) + + The implementation of ``EXTRACT`` may vary across database backends. + Users are reminded to consult their database documentation. + """ + return Extract(field, expr) + + +def false() -> False_: + """Return a :class:`.False_` construct. + + E.g.: + + .. sourcecode:: pycon+sql + + >>> from sqlalchemy import false + >>> print(select(t.c.x).where(false())) + {printsql}SELECT x FROM t WHERE false + + A backend which does not support true/false constants will render as + an expression against 1 or 0: + + .. sourcecode:: pycon+sql + + >>> print(select(t.c.x).where(false())) + {printsql}SELECT x FROM t WHERE 0 = 1 + + The :func:`.true` and :func:`.false` constants also feature + "short circuit" operation within an :func:`.and_` or :func:`.or_` + conjunction: + + .. sourcecode:: pycon+sql + + >>> print(select(t.c.x).where(or_(t.c.x > 5, true()))) + {printsql}SELECT x FROM t WHERE true{stop} + + >>> print(select(t.c.x).where(and_(t.c.x > 5, false()))) + {printsql}SELECT x FROM t WHERE false{stop} + + .. seealso:: + + :func:`.true` + + """ + + return False_._instance() + + +def funcfilter( + func: FunctionElement[_T], *criterion: _ColumnExpressionArgument[bool] +) -> FunctionFilter[_T]: + """Produce a :class:`.FunctionFilter` object against a function. + + Used against aggregate and window functions, + for database backends that support the "FILTER" clause. + + E.g.:: + + from sqlalchemy import funcfilter + + funcfilter(func.count(1), MyClass.name == "some name") + + Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')". + + This function is also available from the :data:`~.expression.func` + construct itself via the :meth:`.FunctionElement.filter` method. + + .. seealso:: + + :ref:`tutorial_functions_within_group` - in the + :ref:`unified_tutorial` + + :meth:`.FunctionElement.filter` + + """ + return FunctionFilter(func, *criterion) + + +def label( + name: str, + element: _ColumnExpressionArgument[_T], + type_: Optional[_TypeEngineArgument[_T]] = None, +) -> Label[_T]: + """Return a :class:`Label` object for the + given :class:`_expression.ColumnElement`. + + A label changes the name of an element in the columns clause of a + ``SELECT`` statement, typically via the ``AS`` SQL keyword. + + This functionality is more conveniently available via the + :meth:`_expression.ColumnElement.label` method on + :class:`_expression.ColumnElement`. + + :param name: label name + + :param obj: a :class:`_expression.ColumnElement`. + + """ + return Label(name, element, type_) + + +def null() -> Null: + """Return a constant :class:`.Null` construct.""" + + return Null._instance() + + +def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]: + """Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression. + + :func:`.nulls_first` is intended to modify the expression produced + by :func:`.asc` or :func:`.desc`, and indicates how NULL values + should be handled when they are encountered during ordering:: + + + from sqlalchemy import desc, nulls_first + + stmt = select(users_table).order_by(nulls_first(desc(users_table.c.name))) + + The SQL expression from the above would resemble: + + .. sourcecode:: sql + + SELECT id, name FROM user ORDER BY name DESC NULLS FIRST + + Like :func:`.asc` and :func:`.desc`, :func:`.nulls_first` is typically + invoked from the column expression itself using + :meth:`_expression.ColumnElement.nulls_first`, + rather than as its standalone + function version, as in:: + + stmt = select(users_table).order_by( + users_table.c.name.desc().nulls_first() + ) + + .. versionchanged:: 1.4 :func:`.nulls_first` is renamed from + :func:`.nullsfirst` in previous releases. + The previous name remains available for backwards compatibility. + + .. seealso:: + + :func:`.asc` + + :func:`.desc` + + :func:`.nulls_last` + + :meth:`_expression.Select.order_by` + + """ # noqa: E501 + return UnaryExpression._create_nulls_first(column) + + +def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]: + """Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression. + + :func:`.nulls_last` is intended to modify the expression produced + by :func:`.asc` or :func:`.desc`, and indicates how NULL values + should be handled when they are encountered during ordering:: + + + from sqlalchemy import desc, nulls_last + + stmt = select(users_table).order_by(nulls_last(desc(users_table.c.name))) + + The SQL expression from the above would resemble: + + .. sourcecode:: sql + + SELECT id, name FROM user ORDER BY name DESC NULLS LAST + + Like :func:`.asc` and :func:`.desc`, :func:`.nulls_last` is typically + invoked from the column expression itself using + :meth:`_expression.ColumnElement.nulls_last`, + rather than as its standalone + function version, as in:: + + stmt = select(users_table).order_by(users_table.c.name.desc().nulls_last()) + + .. versionchanged:: 1.4 :func:`.nulls_last` is renamed from + :func:`.nullslast` in previous releases. + The previous name remains available for backwards compatibility. + + .. seealso:: + + :func:`.asc` + + :func:`.desc` + + :func:`.nulls_first` + + :meth:`_expression.Select.order_by` + + """ # noqa: E501 + return UnaryExpression._create_nulls_last(column) + + +def or_( # type: ignore[empty-body] + initial_clause: Union[Literal[False], _ColumnExpressionArgument[bool]], + *clauses: _ColumnExpressionArgument[bool], +) -> ColumnElement[bool]: + """Produce a conjunction of expressions joined by ``OR``. + + E.g.:: + + from sqlalchemy import or_ + + stmt = select(users_table).where( + or_(users_table.c.name == "wendy", users_table.c.name == "jack") + ) + + The :func:`.or_` conjunction is also available using the + Python ``|`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select(users_table).where( + (users_table.c.name == "wendy") | (users_table.c.name == "jack") + ) + + The :func:`.or_` construct must be given at least one positional + argument in order to be valid; a :func:`.or_` construct with no + arguments is ambiguous. To produce an "empty" or dynamically + generated :func:`.or_` expression, from a given list of expressions, + a "default" element of :func:`_sql.false` (or just ``False``) should be + specified:: + + from sqlalchemy import false + + or_criteria = or_(false(), *expressions) + + The above expression will compile to SQL as the expression ``false`` + or ``0 = 1``, depending on backend, if no other expressions are + present. If expressions are present, then the :func:`_sql.false` value is + ignored as it does not affect the outcome of an OR expression which + has other elements. + + .. deprecated:: 1.4 The :func:`.or_` element now requires that at + least one argument is passed; creating the :func:`.or_` construct + with no arguments is deprecated, and will emit a deprecation warning + while continuing to produce a blank SQL string. + + .. seealso:: + + :func:`.and_` + + """ + ... + + +if not TYPE_CHECKING: + # handle deprecated case which allows zero-arguments + def or_(*clauses): # noqa: F811 + """Produce a conjunction of expressions joined by ``OR``. + + E.g.:: + + from sqlalchemy import or_ + + stmt = select(users_table).where( + or_(users_table.c.name == "wendy", users_table.c.name == "jack") + ) + + The :func:`.or_` conjunction is also available using the + Python ``|`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select(users_table).where( + (users_table.c.name == "wendy") | (users_table.c.name == "jack") + ) + + The :func:`.or_` construct must be given at least one positional + argument in order to be valid; a :func:`.or_` construct with no + arguments is ambiguous. To produce an "empty" or dynamically + generated :func:`.or_` expression, from a given list of expressions, + a "default" element of :func:`_sql.false` (or just ``False``) should be + specified:: + + from sqlalchemy import false + + or_criteria = or_(false(), *expressions) + + The above expression will compile to SQL as the expression ``false`` + or ``0 = 1``, depending on backend, if no other expressions are + present. If expressions are present, then the :func:`_sql.false` value + is ignored as it does not affect the outcome of an OR expression which + has other elements. + + .. deprecated:: 1.4 The :func:`.or_` element now requires that at + least one argument is passed; creating the :func:`.or_` construct + with no arguments is deprecated, and will emit a deprecation warning + while continuing to produce a blank SQL string. + + .. seealso:: + + :func:`.and_` + + """ # noqa: E501 + return BooleanClauseList.or_(*clauses) + + +def over( + element: FunctionElement[_T], + partition_by: Optional[_ByArgument] = None, + order_by: Optional[_ByArgument] = None, + range_: Optional[typing_Tuple[Optional[int], Optional[int]]] = None, + rows: Optional[typing_Tuple[Optional[int], Optional[int]]] = None, + groups: Optional[typing_Tuple[Optional[int], Optional[int]]] = None, +) -> Over[_T]: + r"""Produce an :class:`.Over` object against a function. + + Used against aggregate or so-called "window" functions, + for database backends that support window functions. + + :func:`_expression.over` is usually called using + the :meth:`.FunctionElement.over` method, e.g.:: + + func.row_number().over(order_by=mytable.c.some_column) + + Would produce: + + .. sourcecode:: sql + + ROW_NUMBER() OVER(ORDER BY some_column) + + Ranges are also possible using the :paramref:`.expression.over.range_`, + :paramref:`.expression.over.rows`, and :paramref:`.expression.over.groups` + parameters. These + mutually-exclusive parameters each accept a 2-tuple, which contains + a combination of integers and None:: + + func.row_number().over(order_by=my_table.c.some_column, range_=(None, 0)) + + The above would produce: + + .. sourcecode:: sql + + ROW_NUMBER() OVER(ORDER BY some_column + RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) + + A value of ``None`` indicates "unbounded", a + value of zero indicates "current row", and negative / positive + integers indicate "preceding" and "following": + + * RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING:: + + func.row_number().over(order_by="x", range_=(-5, 10)) + + * ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:: + + func.row_number().over(order_by="x", rows=(None, 0)) + + * RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING:: + + func.row_number().over(order_by="x", range_=(-2, None)) + + * RANGE BETWEEN 1 FOLLOWING AND 3 FOLLOWING:: + + func.row_number().over(order_by="x", range_=(1, 3)) + + * GROUPS BETWEEN 1 FOLLOWING AND 3 FOLLOWING:: + + func.row_number().over(order_by="x", groups=(1, 3)) + + :param element: a :class:`.FunctionElement`, :class:`.WithinGroup`, + or other compatible construct. + :param partition_by: a column element or string, or a list + of such, that will be used as the PARTITION BY clause + of the OVER construct. + :param order_by: a column element or string, or a list + of such, that will be used as the ORDER BY clause + of the OVER construct. + :param range\_: optional range clause for the window. This is a + tuple value which can contain integer values or ``None``, + and will render a RANGE BETWEEN PRECEDING / FOLLOWING clause. + :param rows: optional rows clause for the window. This is a tuple + value which can contain integer values or None, and will render + a ROWS BETWEEN PRECEDING / FOLLOWING clause. + :param groups: optional groups clause for the window. This is a + tuple value which can contain integer values or ``None``, + and will render a GROUPS BETWEEN PRECEDING / FOLLOWING clause. + + .. versionadded:: 2.0.40 + + This function is also available from the :data:`~.expression.func` + construct itself via the :meth:`.FunctionElement.over` method. + + .. seealso:: + + :ref:`tutorial_window_functions` - in the :ref:`unified_tutorial` + + :data:`.expression.func` + + :func:`_expression.within_group` + + """ # noqa: E501 + return Over(element, partition_by, order_by, range_, rows, groups) + + +@_document_text_coercion("text", ":func:`.text`", ":paramref:`.text.text`") +def text(text: str) -> TextClause: + r"""Construct a new :class:`_expression.TextClause` clause, + representing + a textual SQL string directly. + + E.g.:: + + from sqlalchemy import text + + t = text("SELECT * FROM users") + result = connection.execute(t) + + The advantages :func:`_expression.text` + provides over a plain string are + backend-neutral support for bind parameters, per-statement + execution options, as well as + bind parameter and result-column typing behavior, allowing + SQLAlchemy type constructs to play a role when executing + a statement that is specified literally. The construct can also + be provided with a ``.c`` collection of column elements, allowing + it to be embedded in other SQL expression constructs as a subquery. + + Bind parameters are specified by name, using the format ``:name``. + E.g.:: + + t = text("SELECT * FROM users WHERE id=:user_id") + result = connection.execute(t, {"user_id": 12}) + + For SQL statements where a colon is required verbatim, as within + an inline string, use a backslash to escape:: + + t = text(r"SELECT * FROM users WHERE name='\:username'") + + The :class:`_expression.TextClause` + construct includes methods which can + provide information about the bound parameters as well as the column + values which would be returned from the textual statement, assuming + it's an executable SELECT type of statement. The + :meth:`_expression.TextClause.bindparams` + method is used to provide bound + parameter detail, and :meth:`_expression.TextClause.columns` + method allows + specification of return columns including names and types:: + + t = ( + text("SELECT * FROM users WHERE id=:user_id") + .bindparams(user_id=7) + .columns(id=Integer, name=String) + ) + + for id, name in connection.execute(t): + print(id, name) + + The :func:`_expression.text` construct is used in cases when + a literal string SQL fragment is specified as part of a larger query, + such as for the WHERE clause of a SELECT statement:: + + s = select(users.c.id, users.c.name).where(text("id=:user_id")) + result = connection.execute(s, {"user_id": 12}) + + :func:`_expression.text` is also used for the construction + of a full, standalone statement using plain text. + As such, SQLAlchemy refers + to it as an :class:`.Executable` object and may be used + like any other statement passed to an ``.execute()`` method. + + :param text: + the text of the SQL statement to be created. Use ``:`` + to specify bind parameters; they will be compiled to their + engine-specific format. + + .. seealso:: + + :ref:`tutorial_select_arbitrary_text` + + """ + return TextClause(text) + + +def true() -> True_: + """Return a constant :class:`.True_` construct. + + E.g.: + + .. sourcecode:: pycon+sql + + >>> from sqlalchemy import true + >>> print(select(t.c.x).where(true())) + {printsql}SELECT x FROM t WHERE true + + A backend which does not support true/false constants will render as + an expression against 1 or 0: + + .. sourcecode:: pycon+sql + + >>> print(select(t.c.x).where(true())) + {printsql}SELECT x FROM t WHERE 1 = 1 + + The :func:`.true` and :func:`.false` constants also feature + "short circuit" operation within an :func:`.and_` or :func:`.or_` + conjunction: + + .. sourcecode:: pycon+sql + + >>> print(select(t.c.x).where(or_(t.c.x > 5, true()))) + {printsql}SELECT x FROM t WHERE true{stop} + + >>> print(select(t.c.x).where(and_(t.c.x > 5, false()))) + {printsql}SELECT x FROM t WHERE false{stop} + + .. seealso:: + + :func:`.false` + + """ + + return True_._instance() + + +def tuple_( + *clauses: _ColumnExpressionArgument[Any], + types: Optional[Sequence[_TypeEngineArgument[Any]]] = None, +) -> Tuple: + """Return a :class:`.Tuple`. + + Main usage is to produce a composite IN construct using + :meth:`.ColumnOperators.in_` :: + + from sqlalchemy import tuple_ + + tuple_(table.c.col1, table.c.col2).in_([(1, 2), (5, 12), (10, 19)]) + + .. versionchanged:: 1.3.6 Added support for SQLite IN tuples. + + .. warning:: + + The composite IN construct is not supported by all backends, and is + currently known to work on PostgreSQL, MySQL, and SQLite. + Unsupported backends will raise a subclass of + :class:`~sqlalchemy.exc.DBAPIError` when such an expression is + invoked. + + """ + return Tuple(*clauses, types=types) + + +def type_coerce( + expression: _ColumnExpressionOrLiteralArgument[Any], + type_: _TypeEngineArgument[_T], +) -> TypeCoerce[_T]: + r"""Associate a SQL expression with a particular type, without rendering + ``CAST``. + + E.g.:: + + from sqlalchemy import type_coerce + + stmt = select(type_coerce(log_table.date_string, StringDateTime())) + + The above construct will produce a :class:`.TypeCoerce` object, which + does not modify the rendering in any way on the SQL side, with the + possible exception of a generated label if used in a columns clause + context: + + .. sourcecode:: sql + + SELECT date_string AS date_string FROM log + + When result rows are fetched, the ``StringDateTime`` type processor + will be applied to result rows on behalf of the ``date_string`` column. + + .. note:: the :func:`.type_coerce` construct does not render any + SQL syntax of its own, including that it does not imply + parenthesization. Please use :meth:`.TypeCoerce.self_group` + if explicit parenthesization is required. + + In order to provide a named label for the expression, use + :meth:`_expression.ColumnElement.label`:: + + stmt = select( + type_coerce(log_table.date_string, StringDateTime()).label("date") + ) + + A type that features bound-value handling will also have that behavior + take effect when literal values or :func:`.bindparam` constructs are + passed to :func:`.type_coerce` as targets. + For example, if a type implements the + :meth:`.TypeEngine.bind_expression` + method or :meth:`.TypeEngine.bind_processor` method or equivalent, + these functions will take effect at statement compilation/execution + time when a literal value is passed, as in:: + + # bound-value handling of MyStringType will be applied to the + # literal value "some string" + stmt = select(type_coerce("some string", MyStringType)) + + When using :func:`.type_coerce` with composed expressions, note that + **parenthesis are not applied**. If :func:`.type_coerce` is being + used in an operator context where the parenthesis normally present from + CAST are necessary, use the :meth:`.TypeCoerce.self_group` method: + + .. sourcecode:: pycon+sql + + >>> some_integer = column("someint", Integer) + >>> some_string = column("somestr", String) + >>> expr = type_coerce(some_integer + 5, String) + some_string + >>> print(expr) + {printsql}someint + :someint_1 || somestr{stop} + >>> expr = type_coerce(some_integer + 5, String).self_group() + some_string + >>> print(expr) + {printsql}(someint + :someint_1) || somestr{stop} + + :param expression: A SQL expression, such as a + :class:`_expression.ColumnElement` + expression or a Python string which will be coerced into a bound + literal value. + + :param type\_: A :class:`.TypeEngine` class or instance indicating + the type to which the expression is coerced. + + .. seealso:: + + :ref:`tutorial_casts` + + :func:`.cast` + + """ # noqa + return TypeCoerce(expression, type_) + + +def within_group( + element: FunctionElement[_T], *order_by: _ColumnExpressionArgument[Any] +) -> WithinGroup[_T]: + r"""Produce a :class:`.WithinGroup` object against a function. + + Used against so-called "ordered set aggregate" and "hypothetical + set aggregate" functions, including :class:`.percentile_cont`, + :class:`.rank`, :class:`.dense_rank`, etc. + + :func:`_expression.within_group` is usually called using + the :meth:`.FunctionElement.within_group` method, e.g.:: + + from sqlalchemy import within_group + + stmt = select( + department.c.id, + func.percentile_cont(0.5).within_group(department.c.salary.desc()), + ) + + The above statement would produce SQL similar to + ``SELECT department.id, percentile_cont(0.5) + WITHIN GROUP (ORDER BY department.salary DESC)``. + + :param element: a :class:`.FunctionElement` construct, typically + generated by :data:`~.expression.func`. + :param \*order_by: one or more column elements that will be used + as the ORDER BY clause of the WITHIN GROUP construct. + + .. seealso:: + + :ref:`tutorial_functions_within_group` - in the + :ref:`unified_tutorial` + + :data:`.expression.func` + + :func:`_expression.over` + + """ + return WithinGroup(element, *order_by) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_orm_types.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_orm_types.py new file mode 100644 index 0000000000000000000000000000000000000000..c37d805ef3fb52ec2735d5431e9613eb30c94220 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_orm_types.py @@ -0,0 +1,20 @@ +# sql/_orm_types.py +# Copyright (C) 2022-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""ORM types that need to present specifically for **documentation only** of +the Executable.execution_options() method, which includes options that +are meaningful to the ORM. + +""" + + +from __future__ import annotations + +from ..util.typing import Literal + +SynchronizeSessionArgument = Literal[False, "auto", "evaluate", "fetch"] +DMLStrategyArgument = Literal["bulk", "raw", "orm", "auto"] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_py_util.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_py_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9e1a084a3f5a8e1a3a4d784417d6efbce43cd2a7 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_py_util.py @@ -0,0 +1,75 @@ +# sql/_py_util.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import typing +from typing import Any +from typing import Dict +from typing import Tuple +from typing import Union + +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + from .cache_key import CacheConst + + +class prefix_anon_map(Dict[str, str]): + """A map that creates new keys for missing key access. + + Considers keys of the form " " to produce + new symbols "_", where "index" is an incrementing integer + corresponding to . + + Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which + is otherwise usually used for this type of operation. + + """ + + def __missing__(self, key: str) -> str: + (ident, derived) = key.split(" ", 1) + anonymous_counter = self.get(derived, 1) + self[derived] = anonymous_counter + 1 # type: ignore + value = f"{derived}_{anonymous_counter}" + self[key] = value + return value + + +class cache_anon_map( + Dict[Union[int, "Literal[CacheConst.NO_CACHE]"], Union[Literal[True], str]] +): + """A map that creates new keys for missing key access. + + Produces an incrementing sequence given a series of unique keys. + + This is similar to the compiler prefix_anon_map class although simpler. + + Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which + is otherwise usually used for this type of operation. + + """ + + _index = 0 + + def get_anon(self, object_: Any) -> Tuple[str, bool]: + idself = id(object_) + if idself in self: + s_val = self[idself] + assert s_val is not True + return s_val, True + else: + # inline of __missing__ + self[idself] = id_ = str(self._index) + self._index += 1 + + return id_, False + + def __missing__(self, key: int) -> str: + self[key] = val = str(self._index) + self._index += 1 + return val diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_selectable_constructors.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_selectable_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..ae83efa5d7901032e0c8a24cb5d4f8409d489e34 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_selectable_constructors.py @@ -0,0 +1,715 @@ +# sql/_selectable_constructors.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +from typing import Any +from typing import Optional +from typing import overload +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from . import coercions +from . import roles +from ._typing import _ColumnsClauseArgument +from ._typing import _no_kw +from .elements import ColumnClause +from .selectable import Alias +from .selectable import CompoundSelect +from .selectable import Exists +from .selectable import FromClause +from .selectable import Join +from .selectable import Lateral +from .selectable import LateralFromClause +from .selectable import NamedFromClause +from .selectable import Select +from .selectable import TableClause +from .selectable import TableSample +from .selectable import Values + +if TYPE_CHECKING: + from ._typing import _FromClauseArgument + from ._typing import _OnClauseArgument + from ._typing import _SelectStatementForCompoundArgument + from ._typing import _T0 + from ._typing import _T1 + from ._typing import _T2 + from ._typing import _T3 + from ._typing import _T4 + from ._typing import _T5 + from ._typing import _T6 + from ._typing import _T7 + from ._typing import _T8 + from ._typing import _T9 + from ._typing import _TP + from ._typing import _TypedColumnClauseArgument as _TCCA + from .functions import Function + from .selectable import CTE + from .selectable import HasCTE + from .selectable import ScalarSelect + from .selectable import SelectBase + + +def alias( + selectable: FromClause, name: Optional[str] = None, flat: bool = False +) -> NamedFromClause: + """Return a named alias of the given :class:`.FromClause`. + + For :class:`.Table` and :class:`.Join` objects, the return type is the + :class:`_expression.Alias` object. Other kinds of :class:`.NamedFromClause` + objects may be returned for other kinds of :class:`.FromClause` objects. + + The named alias represents any :class:`_expression.FromClause` with an + alternate name assigned within SQL, typically using the ``AS`` clause when + generated, e.g. ``SELECT * FROM table AS aliasname``. + + Equivalent functionality is available via the + :meth:`_expression.FromClause.alias` + method available on all :class:`_expression.FromClause` objects. + + :param selectable: any :class:`_expression.FromClause` subclass, + such as a table, select statement, etc. + + :param name: string name to be assigned as the alias. + If ``None``, a name will be deterministically generated at compile + time. Deterministic means the name is guaranteed to be unique against + other constructs used in the same statement, and will also be the same + name for each successive compilation of the same statement object. + + :param flat: Will be passed through to if the given selectable + is an instance of :class:`_expression.Join` - see + :meth:`_expression.Join.alias` for details. + + """ + return Alias._factory(selectable, name=name, flat=flat) + + +def cte( + selectable: HasCTE, name: Optional[str] = None, recursive: bool = False +) -> CTE: + r"""Return a new :class:`_expression.CTE`, + or Common Table Expression instance. + + Please see :meth:`_expression.HasCTE.cte` for detail on CTE usage. + + """ + return coercions.expect(roles.HasCTERole, selectable).cte( + name=name, recursive=recursive + ) + + +# TODO: mypy requires the _TypedSelectable overloads in all compound select +# constructors since _SelectStatementForCompoundArgument includes +# untyped args that make it return CompoundSelect[Unpack[tuple[Never, ...]]] +# pyright does not have this issue +_TypedSelectable = Union["Select[_TP]", "CompoundSelect[_TP]"] + + +@overload +def except_( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def except_( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def except_( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return an ``EXCEPT`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + """ + return CompoundSelect._create_except(*selects) + + +@overload +def except_all( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def except_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def except_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return an ``EXCEPT ALL`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + """ + return CompoundSelect._create_except_all(*selects) + + +def exists( + __argument: Optional[ + Union[_ColumnsClauseArgument[Any], SelectBase, ScalarSelect[Any]] + ] = None, +) -> Exists: + """Construct a new :class:`_expression.Exists` construct. + + The :func:`_sql.exists` can be invoked by itself to produce an + :class:`_sql.Exists` construct, which will accept simple WHERE + criteria:: + + exists_criteria = exists().where(table1.c.col1 == table2.c.col2) + + However, for greater flexibility in constructing the SELECT, an + existing :class:`_sql.Select` construct may be converted to an + :class:`_sql.Exists`, most conveniently by making use of the + :meth:`_sql.SelectBase.exists` method:: + + exists_criteria = ( + select(table2.c.col2).where(table1.c.col1 == table2.c.col2).exists() + ) + + The EXISTS criteria is then used inside of an enclosing SELECT:: + + stmt = select(table1.c.col1).where(exists_criteria) + + The above statement will then be of the form: + + .. sourcecode:: sql + + SELECT col1 FROM table1 WHERE EXISTS + (SELECT table2.col2 FROM table2 WHERE table2.col2 = table1.col1) + + .. seealso:: + + :ref:`tutorial_exists` - in the :term:`2.0 style` tutorial. + + :meth:`_sql.SelectBase.exists` - method to transform a ``SELECT`` to an + ``EXISTS`` clause. + + """ # noqa: E501 + + return Exists(__argument) + + +@overload +def intersect( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def intersect( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def intersect( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return an ``INTERSECT`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + """ + return CompoundSelect._create_intersect(*selects) + + +@overload +def intersect_all( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def intersect_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def intersect_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return an ``INTERSECT ALL`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + + """ + return CompoundSelect._create_intersect_all(*selects) + + +def join( + left: _FromClauseArgument, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + isouter: bool = False, + full: bool = False, +) -> Join: + """Produce a :class:`_expression.Join` object, given two + :class:`_expression.FromClause` + expressions. + + E.g.:: + + j = join( + user_table, address_table, user_table.c.id == address_table.c.user_id + ) + stmt = select(user_table).select_from(j) + + would emit SQL along the lines of: + + .. sourcecode:: sql + + SELECT user.id, user.name FROM user + JOIN address ON user.id = address.user_id + + Similar functionality is available given any + :class:`_expression.FromClause` object (e.g. such as a + :class:`_schema.Table`) using + the :meth:`_expression.FromClause.join` method. + + :param left: The left side of the join. + + :param right: the right side of the join; this is any + :class:`_expression.FromClause` object such as a + :class:`_schema.Table` object, and + may also be a selectable-compatible object such as an ORM-mapped + class. + + :param onclause: a SQL expression representing the ON clause of the + join. If left at ``None``, :meth:`_expression.FromClause.join` + will attempt to + join the two tables based on a foreign key relationship. + + :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. + + :param full: if True, render a FULL OUTER JOIN, instead of JOIN. + + .. seealso:: + + :meth:`_expression.FromClause.join` - method form, + based on a given left side. + + :class:`_expression.Join` - the type of object produced. + + """ # noqa: E501 + + return Join(left, right, onclause, isouter, full) + + +def lateral( + selectable: Union[SelectBase, _FromClauseArgument], + name: Optional[str] = None, +) -> LateralFromClause: + """Return a :class:`_expression.Lateral` object. + + :class:`_expression.Lateral` is an :class:`_expression.Alias` + subclass that represents + a subquery with the LATERAL keyword applied to it. + + The special behavior of a LATERAL subquery is that it appears in the + FROM clause of an enclosing SELECT, but may correlate to other + FROM clauses of that SELECT. It is a special case of subquery + only supported by a small number of backends, currently more recent + PostgreSQL versions. + + .. seealso:: + + :ref:`tutorial_lateral_correlation` - overview of usage. + + """ + return Lateral._factory(selectable, name=name) + + +def outerjoin( + left: _FromClauseArgument, + right: _FromClauseArgument, + onclause: Optional[_OnClauseArgument] = None, + full: bool = False, +) -> Join: + """Return an ``OUTER JOIN`` clause element. + + The returned object is an instance of :class:`_expression.Join`. + + Similar functionality is also available via the + :meth:`_expression.FromClause.outerjoin` method on any + :class:`_expression.FromClause`. + + :param left: The left side of the join. + + :param right: The right side of the join. + + :param onclause: Optional criterion for the ``ON`` clause, is + derived from foreign key relationships established between + left and right otherwise. + + To chain joins together, use the :meth:`_expression.FromClause.join` + or + :meth:`_expression.FromClause.outerjoin` methods on the resulting + :class:`_expression.Join` object. + + """ + return Join(left, right, onclause, isouter=True, full=full) + + +# START OVERLOADED FUNCTIONS select Select 1-10 + +# code within this block is **programmatically, +# statically generated** by tools/generate_tuple_map_overloads.py + + +@overload +def select(__ent0: _TCCA[_T0]) -> Select[Tuple[_T0]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] +) -> Select[Tuple[_T0, _T1]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] +) -> Select[Tuple[_T0, _T1, _T2]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], +) -> Select[Tuple[_T0, _T1, _T2, _T3]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + __ent8: _TCCA[_T8], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8]]: ... + + +@overload +def select( + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + __ent8: _TCCA[_T8], + __ent9: _TCCA[_T9], +) -> Select[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9]]: ... + + +# END OVERLOADED FUNCTIONS select + + +@overload +def select( + *entities: _ColumnsClauseArgument[Any], **__kw: Any +) -> Select[Any]: ... + + +def select(*entities: _ColumnsClauseArgument[Any], **__kw: Any) -> Select[Any]: + r"""Construct a new :class:`_expression.Select`. + + + .. versionadded:: 1.4 - The :func:`_sql.select` function now accepts + column arguments positionally. The top-level :func:`_sql.select` + function will automatically use the 1.x or 2.x style API based on + the incoming arguments; using :func:`_sql.select` from the + ``sqlalchemy.future`` module will enforce that only the 2.x style + constructor is used. + + Similar functionality is also available via the + :meth:`_expression.FromClause.select` method on any + :class:`_expression.FromClause`. + + .. seealso:: + + :ref:`tutorial_selecting_data` - in the :ref:`unified_tutorial` + + :param \*entities: + Entities to SELECT from. For Core usage, this is typically a series + of :class:`_expression.ColumnElement` and / or + :class:`_expression.FromClause` + objects which will form the columns clause of the resulting + statement. For those objects that are instances of + :class:`_expression.FromClause` (typically :class:`_schema.Table` + or :class:`_expression.Alias` + objects), the :attr:`_expression.FromClause.c` + collection is extracted + to form a collection of :class:`_expression.ColumnElement` objects. + + This parameter will also accept :class:`_expression.TextClause` + constructs as + given, as well as ORM-mapped classes. + + """ + # the keyword args are a necessary element in order for the typing + # to work out w/ the varargs vs. having named "keyword" arguments that + # aren't always present. + if __kw: + raise _no_kw() + return Select(*entities) + + +def table(name: str, *columns: ColumnClause[Any], **kw: Any) -> TableClause: + """Produce a new :class:`_expression.TableClause`. + + The object returned is an instance of + :class:`_expression.TableClause`, which + represents the "syntactical" portion of the schema-level + :class:`_schema.Table` object. + It may be used to construct lightweight table constructs. + + :param name: Name of the table. + + :param columns: A collection of :func:`_expression.column` constructs. + + :param schema: The schema name for this table. + + .. versionadded:: 1.3.18 :func:`_expression.table` can now + accept a ``schema`` argument. + """ + + return TableClause(name, *columns, **kw) + + +def tablesample( + selectable: _FromClauseArgument, + sampling: Union[float, Function[Any]], + name: Optional[str] = None, + seed: Optional[roles.ExpressionElementRole[Any]] = None, +) -> TableSample: + """Return a :class:`_expression.TableSample` object. + + :class:`_expression.TableSample` is an :class:`_expression.Alias` + subclass that represents + a table with the TABLESAMPLE clause applied to it. + :func:`_expression.tablesample` + is also available from the :class:`_expression.FromClause` + class via the + :meth:`_expression.FromClause.tablesample` method. + + The TABLESAMPLE clause allows selecting a randomly selected approximate + percentage of rows from a table. It supports multiple sampling methods, + most commonly BERNOULLI and SYSTEM. + + e.g.:: + + from sqlalchemy import func + + selectable = people.tablesample( + func.bernoulli(1), name="alias", seed=func.random() + ) + stmt = select(selectable.c.people_id) + + Assuming ``people`` with a column ``people_id``, the above + statement would render as: + + .. sourcecode:: sql + + SELECT alias.people_id FROM + people AS alias TABLESAMPLE bernoulli(:bernoulli_1) + REPEATABLE (random()) + + :param sampling: a ``float`` percentage between 0 and 100 or + :class:`_functions.Function`. + + :param name: optional alias name + + :param seed: any real-valued SQL expression. When specified, the + REPEATABLE sub-clause is also rendered. + + """ + return TableSample._factory(selectable, sampling, name=name, seed=seed) + + +@overload +def union( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def union( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def union( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return a ``UNION`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + A similar :func:`union()` method is available on all + :class:`_expression.FromClause` subclasses. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + :param \**kwargs: + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect._create_union(*selects) + + +@overload +def union_all( + *selects: _TypedSelectable[_TP], +) -> CompoundSelect[_TP]: ... + + +@overload +def union_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: ... + + +def union_all( + *selects: _SelectStatementForCompoundArgument[_TP], +) -> CompoundSelect[_TP]: + r"""Return a ``UNION ALL`` of multiple selectables. + + The returned object is an instance of + :class:`_expression.CompoundSelect`. + + A similar :func:`union_all()` method is available on all + :class:`_expression.FromClause` subclasses. + + :param \*selects: + a list of :class:`_expression.Select` instances. + + """ + return CompoundSelect._create_union_all(*selects) + + +def values( + *columns: ColumnClause[Any], + name: Optional[str] = None, + literal_binds: bool = False, +) -> Values: + r"""Construct a :class:`_expression.Values` construct. + + The column expressions and the actual data for + :class:`_expression.Values` are given in two separate steps. The + constructor receives the column expressions typically as + :func:`_expression.column` constructs, + and the data is then passed via the + :meth:`_expression.Values.data` method as a list, + which can be called multiple + times to add more data, e.g.:: + + from sqlalchemy import column + from sqlalchemy import values + from sqlalchemy import Integer + from sqlalchemy import String + + value_expr = values( + column("id", Integer), + column("name", String), + name="my_values", + ).data([(1, "name1"), (2, "name2"), (3, "name3")]) + + :param \*columns: column expressions, typically composed using + :func:`_expression.column` objects. + + :param name: the name for this VALUES construct. If omitted, the + VALUES construct will be unnamed in a SQL expression. Different + backends may have different requirements here. + + :param literal_binds: Defaults to False. Whether or not to render + the data values inline in the SQL output, rather than using bound + parameters. + + """ + return Values(*columns, literal_binds=literal_binds, name=name) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_typing.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe86f63748ce3f7e11be9f7ffaa60d4604e4206 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/_typing.py @@ -0,0 +1,468 @@ +# sql/_typing.py +# Copyright (C) 2022-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import operator +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import roles +from .. import exc +from .. import util +from ..inspection import Inspectable +from ..util.typing import Literal +from ..util.typing import Protocol +from ..util.typing import TypeAlias + +if TYPE_CHECKING: + from datetime import date + from datetime import datetime + from datetime import time + from datetime import timedelta + from decimal import Decimal + from uuid import UUID + + from .base import Executable + from .compiler import Compiled + from .compiler import DDLCompiler + from .compiler import SQLCompiler + from .dml import UpdateBase + from .dml import ValuesBase + from .elements import ClauseElement + from .elements import ColumnElement + from .elements import KeyedColumnElement + from .elements import quoted_name + from .elements import SQLCoreOperations + from .elements import TextClause + from .lambdas import LambdaElement + from .roles import FromClauseRole + from .schema import Column + from .selectable import Alias + from .selectable import CompoundSelect + from .selectable import CTE + from .selectable import FromClause + from .selectable import Join + from .selectable import NamedFromClause + from .selectable import ReturnsRows + from .selectable import Select + from .selectable import Selectable + from .selectable import SelectBase + from .selectable import Subquery + from .selectable import TableClause + from .sqltypes import TableValueType + from .sqltypes import TupleType + from .type_api import TypeEngine + from ..engine import Connection + from ..engine import Dialect + from ..engine import Engine + from ..engine.mock import MockConnection + from ..util.typing import TypeGuard + +_T = TypeVar("_T", bound=Any) +_T_co = TypeVar("_T_co", bound=Any, covariant=True) + + +_CE = TypeVar("_CE", bound="ColumnElement[Any]") + +_CLE = TypeVar("_CLE", bound="ClauseElement") + + +class _HasClauseElement(Protocol, Generic[_T_co]): + """indicates a class that has a __clause_element__() method""" + + def __clause_element__(self) -> roles.ExpressionElementRole[_T_co]: ... + + +class _CoreAdapterProto(Protocol): + """protocol for the ClauseAdapter/ColumnAdapter.traverse() method.""" + + def __call__(self, obj: _CE) -> _CE: ... + + +class _HasDialect(Protocol): + """protocol for Engine/Connection-like objects that have dialect + attribute. + """ + + @property + def dialect(self) -> Dialect: ... + + +# match column types that are not ORM entities +_NOT_ENTITY = TypeVar( + "_NOT_ENTITY", + int, + str, + bool, + "datetime", + "date", + "time", + "timedelta", + "UUID", + float, + "Decimal", +) + +_StarOrOne = Literal["*", 1] + +_MAYBE_ENTITY = TypeVar( + "_MAYBE_ENTITY", + roles.ColumnsClauseRole, + _StarOrOne, + Type[Any], + Inspectable[_HasClauseElement[Any]], + _HasClauseElement[Any], +) + + +# convention: +# XYZArgument - something that the end user is passing to a public API method +# XYZElement - the internal representation that we use for the thing. +# the coercions system is responsible for converting from XYZArgument to +# XYZElement. + +_TextCoercedExpressionArgument = Union[ + str, + "TextClause", + "ColumnElement[_T]", + _HasClauseElement[_T], + roles.ExpressionElementRole[_T], +] + +_ColumnsClauseArgument = Union[ + roles.TypedColumnsClauseRole[_T], + roles.ColumnsClauseRole, + "SQLCoreOperations[_T]", + _StarOrOne, + Type[_T], + Inspectable[_HasClauseElement[_T]], + _HasClauseElement[_T], +] +"""open-ended SELECT columns clause argument. + +Includes column expressions, tables, ORM mapped entities, a few literal values. + +This type is used for lists of columns / entities to be returned in result +sets; select(...), insert().returning(...), etc. + + +""" + +_TypedColumnClauseArgument = Union[ + roles.TypedColumnsClauseRole[_T], + "SQLCoreOperations[_T]", + Type[_T], +] + +_TP = TypeVar("_TP", bound=Tuple[Any, ...]) + +_T0 = TypeVar("_T0", bound=Any) +_T1 = TypeVar("_T1", bound=Any) +_T2 = TypeVar("_T2", bound=Any) +_T3 = TypeVar("_T3", bound=Any) +_T4 = TypeVar("_T4", bound=Any) +_T5 = TypeVar("_T5", bound=Any) +_T6 = TypeVar("_T6", bound=Any) +_T7 = TypeVar("_T7", bound=Any) +_T8 = TypeVar("_T8", bound=Any) +_T9 = TypeVar("_T9", bound=Any) + + +_ColumnExpressionArgument = Union[ + "ColumnElement[_T]", + _HasClauseElement[_T], + "SQLCoreOperations[_T]", + roles.ExpressionElementRole[_T], + roles.TypedColumnsClauseRole[_T], + Callable[[], "ColumnElement[_T]"], + "LambdaElement", +] +"See docs in public alias ColumnExpressionArgument." + +ColumnExpressionArgument: TypeAlias = _ColumnExpressionArgument[_T] +"""Narrower "column expression" argument. + +This type is used for all the other "column" kinds of expressions that +typically represent a single SQL column expression, not a set of columns the +way a table or ORM entity does. + +This includes ColumnElement, or ORM-mapped attributes that will have a +``__clause_element__()`` method, it also has the ExpressionElementRole +overall which brings in the TextClause object also. + +.. versionadded:: 2.0.13 + +""" + +_ColumnExpressionOrLiteralArgument = Union[Any, _ColumnExpressionArgument[_T]] + +_ColumnExpressionOrStrLabelArgument = Union[str, _ColumnExpressionArgument[_T]] + +_ByArgument = Union[ + Iterable[_ColumnExpressionOrStrLabelArgument[Any]], + _ColumnExpressionOrStrLabelArgument[Any], +] +"""Used for keyword-based ``order_by`` and ``partition_by`` parameters.""" + + +_InfoType = Dict[Any, Any] +"""the .info dictionary accepted and used throughout Core /ORM""" + +_FromClauseArgument = Union[ + roles.FromClauseRole, + Type[Any], + Inspectable[_HasClauseElement[Any]], + _HasClauseElement[Any], +] +"""A FROM clause, like we would send to select().select_from(). + +Also accommodates ORM entities and related constructs. + +""" + +_JoinTargetArgument = Union[_FromClauseArgument, roles.JoinTargetRole] +"""target for join() builds on _FromClauseArgument to include additional +join target roles such as those which come from the ORM. + +""" + +_OnClauseArgument = Union[_ColumnExpressionArgument[Any], roles.OnClauseRole] +"""target for an ON clause, includes additional roles such as those which +come from the ORM. + +""" + +_SelectStatementForCompoundArgument = Union[ + "Select[_TP]", + "CompoundSelect[_TP]", + roles.CompoundElementRole, +] +"""SELECT statement acceptable by ``union()`` and other SQL set operations""" + +_DMLColumnArgument = Union[ + str, + _HasClauseElement[Any], + roles.DMLColumnRole, + "SQLCoreOperations[Any]", +] +"""A DML column expression. This is a "key" inside of insert().values(), +update().values(), and related. + +These are usually strings or SQL table columns. + +There's also edge cases like JSON expression assignment, which we would want +the DMLColumnRole to be able to accommodate. + +""" + +_DMLKey = TypeVar("_DMLKey", bound=_DMLColumnArgument) +_DMLColumnKeyMapping = Mapping[_DMLKey, Any] + + +_DDLColumnArgument = Union[str, "Column[Any]", roles.DDLConstraintColumnRole] +"""DDL column. + +used for :class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`, etc. + +""" + +_DMLTableArgument = Union[ + "TableClause", + "Join", + "Alias", + "CTE", + Type[Any], + Inspectable[_HasClauseElement[Any]], + _HasClauseElement[Any], +] + +_PropagateAttrsType = util.immutabledict[str, Any] + +_TypeEngineArgument = Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"] + +_EquivalentColumnMap = Dict["ColumnElement[Any]", Set["ColumnElement[Any]"]] + +_LimitOffsetType = Union[int, _ColumnExpressionArgument[int], None] + +_AutoIncrementType = Union[bool, Literal["auto", "ignore_fk"]] + +_CreateDropBind = Union["Engine", "Connection", "MockConnection"] + +if TYPE_CHECKING: + + def is_sql_compiler(c: Compiled) -> TypeGuard[SQLCompiler]: ... + + def is_ddl_compiler(c: Compiled) -> TypeGuard[DDLCompiler]: ... + + def is_named_from_clause( + t: FromClauseRole, + ) -> TypeGuard[NamedFromClause]: ... + + def is_column_element( + c: ClauseElement, + ) -> TypeGuard[ColumnElement[Any]]: ... + + def is_keyed_column_element( + c: ClauseElement, + ) -> TypeGuard[KeyedColumnElement[Any]]: ... + + def is_text_clause(c: ClauseElement) -> TypeGuard[TextClause]: ... + + def is_from_clause(c: ClauseElement) -> TypeGuard[FromClause]: ... + + def is_tuple_type(t: TypeEngine[Any]) -> TypeGuard[TupleType]: ... + + def is_table_value_type( + t: TypeEngine[Any], + ) -> TypeGuard[TableValueType]: ... + + def is_selectable(t: Any) -> TypeGuard[Selectable]: ... + + def is_select_base( + t: Union[Executable, ReturnsRows] + ) -> TypeGuard[SelectBase]: ... + + def is_select_statement( + t: Union[Executable, ReturnsRows] + ) -> TypeGuard[Select[Any]]: ... + + def is_table(t: FromClause) -> TypeGuard[TableClause]: ... + + def is_subquery(t: FromClause) -> TypeGuard[Subquery]: ... + + def is_dml(c: ClauseElement) -> TypeGuard[UpdateBase]: ... + +else: + is_sql_compiler = operator.attrgetter("is_sql") + is_ddl_compiler = operator.attrgetter("is_ddl") + is_named_from_clause = operator.attrgetter("named_with_column") + is_column_element = operator.attrgetter("_is_column_element") + is_keyed_column_element = operator.attrgetter("_is_keyed_column_element") + is_text_clause = operator.attrgetter("_is_text_clause") + is_from_clause = operator.attrgetter("_is_from_clause") + is_tuple_type = operator.attrgetter("_is_tuple_type") + is_table_value_type = operator.attrgetter("_is_table_value") + is_selectable = operator.attrgetter("is_selectable") + is_select_base = operator.attrgetter("_is_select_base") + is_select_statement = operator.attrgetter("_is_select_statement") + is_table = operator.attrgetter("_is_table") + is_subquery = operator.attrgetter("_is_subquery") + is_dml = operator.attrgetter("is_dml") + + +def has_schema_attr(t: FromClauseRole) -> TypeGuard[TableClause]: + return hasattr(t, "schema") + + +def is_quoted_name(s: str) -> TypeGuard[quoted_name]: + return hasattr(s, "quote") + + +def is_has_clause_element(s: object) -> TypeGuard[_HasClauseElement[Any]]: + return hasattr(s, "__clause_element__") + + +def is_insert_update(c: ClauseElement) -> TypeGuard[ValuesBase]: + return c.is_dml and (c.is_insert or c.is_update) # type: ignore + + +def _no_kw() -> exc.ArgumentError: + return exc.ArgumentError( + "Additional keyword arguments are not accepted by this " + "function/method. The presence of **kw is for pep-484 typing purposes" + ) + + +def _unexpected_kw(methname: str, kw: Dict[str, Any]) -> NoReturn: + k = list(kw)[0] + raise TypeError(f"{methname} got an unexpected keyword argument '{k}'") + + +@overload +def Nullable( + val: "SQLCoreOperations[_T]", +) -> "SQLCoreOperations[Optional[_T]]": ... + + +@overload +def Nullable( + val: roles.ExpressionElementRole[_T], +) -> roles.ExpressionElementRole[Optional[_T]]: ... + + +@overload +def Nullable(val: Type[_T]) -> Type[Optional[_T]]: ... + + +def Nullable( + val: _TypedColumnClauseArgument[_T], +) -> _TypedColumnClauseArgument[Optional[_T]]: + """Types a column or ORM class as nullable. + + This can be used in select and other contexts to express that the value of + a column can be null, for example due to an outer join:: + + stmt1 = select(A, Nullable(B)).outerjoin(A.bs) + stmt2 = select(A.data, Nullable(B.data)).outerjoin(A.bs) + + At runtime this method returns the input unchanged. + + .. versionadded:: 2.0.20 + """ + return val + + +@overload +def NotNullable( + val: "SQLCoreOperations[Optional[_T]]", +) -> "SQLCoreOperations[_T]": ... + + +@overload +def NotNullable( + val: roles.ExpressionElementRole[Optional[_T]], +) -> roles.ExpressionElementRole[_T]: ... + + +@overload +def NotNullable(val: Type[Optional[_T]]) -> Type[_T]: ... + + +@overload +def NotNullable(val: Optional[Type[_T]]) -> Type[_T]: ... + + +def NotNullable( + val: Union[_TypedColumnClauseArgument[Optional[_T]], Optional[Type[_T]]], +) -> _TypedColumnClauseArgument[_T]: + """Types a column or ORM class as not nullable. + + This can be used in select and other contexts to express that the value of + a column cannot be null, for example due to a where condition on a + nullable column:: + + stmt = select(NotNullable(A.value)).where(A.value.is_not(None)) + + At runtime this method returns the input unchanged. + + .. versionadded:: 2.0.20 + """ + return val # type: ignore diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/annotation.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..bf445ff330db14ea144d7d060eec2252fe553b1f --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/annotation.py @@ -0,0 +1,585 @@ +# sql/annotation.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""The :class:`.Annotated` class and related routines; creates hash-equivalent +copies of SQL constructs which contain context-specific markers and +associations. + +Note that the :class:`.Annotated` concept as implemented in this module is not +related in any way to the pep-593 concept of "Annotated". + + +""" + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import FrozenSet +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar + +from . import operators +from .cache_key import HasCacheKey +from .visitors import anon_map +from .visitors import ExternallyTraversible +from .visitors import InternalTraversal +from .. import util +from ..util.typing import Literal +from ..util.typing import Self + +if TYPE_CHECKING: + from .base import _EntityNamespace + from .visitors import _TraverseInternalsType + +_AnnotationDict = Mapping[str, Any] + +EMPTY_ANNOTATIONS: util.immutabledict[str, Any] = util.EMPTY_DICT + + +class SupportsAnnotations(ExternallyTraversible): + __slots__ = () + + _annotations: util.immutabledict[str, Any] = EMPTY_ANNOTATIONS + + proxy_set: util.generic_fn_descriptor[FrozenSet[Any]] + + _is_immutable: bool + + def _annotate(self, values: _AnnotationDict) -> Self: + raise NotImplementedError() + + @overload + def _deannotate( + self, + values: Literal[None] = ..., + clone: bool = ..., + ) -> Self: ... + + @overload + def _deannotate( + self, + values: Sequence[str] = ..., + clone: bool = ..., + ) -> SupportsAnnotations: ... + + def _deannotate( + self, + values: Optional[Sequence[str]] = None, + clone: bool = False, + ) -> SupportsAnnotations: + raise NotImplementedError() + + @util.memoized_property + def _annotations_cache_key(self) -> Tuple[Any, ...]: + anon_map_ = anon_map() + + return self._gen_annotations_cache_key(anon_map_) + + def _gen_annotations_cache_key( + self, anon_map: anon_map + ) -> Tuple[Any, ...]: + return ( + "_annotations", + tuple( + ( + key, + ( + value._gen_cache_key(anon_map, []) + if isinstance(value, HasCacheKey) + else value + ), + ) + for key, value in [ + (key, self._annotations[key]) + for key in sorted(self._annotations) + ] + ), + ) + + +class SupportsWrappingAnnotations(SupportsAnnotations): + __slots__ = () + + _constructor: Callable[..., SupportsWrappingAnnotations] + + if TYPE_CHECKING: + + @util.ro_non_memoized_property + def entity_namespace(self) -> _EntityNamespace: ... + + def _annotate(self, values: _AnnotationDict) -> Self: + """return a copy of this ClauseElement with annotations + updated by the given dictionary. + + """ + return Annotated._as_annotated_instance(self, values) # type: ignore + + def _with_annotations(self, values: _AnnotationDict) -> Self: + """return a copy of this ClauseElement with annotations + replaced by the given dictionary. + + """ + return Annotated._as_annotated_instance(self, values) # type: ignore + + @overload + def _deannotate( + self, + values: Literal[None] = ..., + clone: bool = ..., + ) -> Self: ... + + @overload + def _deannotate( + self, + values: Sequence[str] = ..., + clone: bool = ..., + ) -> SupportsAnnotations: ... + + def _deannotate( + self, + values: Optional[Sequence[str]] = None, + clone: bool = False, + ) -> SupportsAnnotations: + """return a copy of this :class:`_expression.ClauseElement` + with annotations + removed. + + :param values: optional tuple of individual values + to remove. + + """ + if clone: + s = self._clone() + return s + else: + return self + + +class SupportsCloneAnnotations(SupportsWrappingAnnotations): + # SupportsCloneAnnotations extends from SupportsWrappingAnnotations + # to support the structure of having the base ClauseElement + # be a subclass of SupportsWrappingAnnotations. Any ClauseElement + # subclass that wants to extend from SupportsCloneAnnotations + # will inherently also be subclassing SupportsWrappingAnnotations, so + # make that specific here. + + if not typing.TYPE_CHECKING: + __slots__ = () + + _clone_annotations_traverse_internals: _TraverseInternalsType = [ + ("_annotations", InternalTraversal.dp_annotations_key) + ] + + def _annotate(self, values: _AnnotationDict) -> Self: + """return a copy of this ClauseElement with annotations + updated by the given dictionary. + + """ + new = self._clone() + new._annotations = new._annotations.union(values) + new.__dict__.pop("_annotations_cache_key", None) + new.__dict__.pop("_generate_cache_key", None) + return new + + def _with_annotations(self, values: _AnnotationDict) -> Self: + """return a copy of this ClauseElement with annotations + replaced by the given dictionary. + + """ + new = self._clone() + new._annotations = util.immutabledict(values) + new.__dict__.pop("_annotations_cache_key", None) + new.__dict__.pop("_generate_cache_key", None) + return new + + @overload + def _deannotate( + self, + values: Literal[None] = ..., + clone: bool = ..., + ) -> Self: ... + + @overload + def _deannotate( + self, + values: Sequence[str] = ..., + clone: bool = ..., + ) -> SupportsAnnotations: ... + + def _deannotate( + self, + values: Optional[Sequence[str]] = None, + clone: bool = False, + ) -> SupportsAnnotations: + """return a copy of this :class:`_expression.ClauseElement` + with annotations + removed. + + :param values: optional tuple of individual values + to remove. + + """ + if clone or self._annotations: + # clone is used when we are also copying + # the expression for a deep deannotation + new = self._clone() + new._annotations = util.immutabledict() + new.__dict__.pop("_annotations_cache_key", None) + return new + else: + return self + + +class Annotated(SupportsAnnotations): + """clones a SupportsAnnotations and applies an 'annotations' dictionary. + + Unlike regular clones, this clone also mimics __hash__() and + __eq__() of the original element so that it takes its place + in hashed collections. + + A reference to the original element is maintained, for the important + reason of keeping its hash value current. When GC'ed, the + hash value may be reused, causing conflicts. + + .. note:: The rationale for Annotated producing a brand new class, + rather than placing the functionality directly within ClauseElement, + is **performance**. The __hash__() method is absent on plain + ClauseElement which leads to significantly reduced function call + overhead, as the use of sets and dictionaries against ClauseElement + objects is prevalent, but most are not "annotated". + + """ + + _is_column_operators = False + + @classmethod + def _as_annotated_instance( + cls, element: SupportsWrappingAnnotations, values: _AnnotationDict + ) -> Annotated: + try: + cls = annotated_classes[element.__class__] + except KeyError: + cls = _new_annotation_type(element.__class__, cls) + return cls(element, values) + + _annotations: util.immutabledict[str, Any] + __element: SupportsWrappingAnnotations + _hash: int + + def __new__(cls: Type[Self], *args: Any) -> Self: + return object.__new__(cls) + + def __init__( + self, element: SupportsWrappingAnnotations, values: _AnnotationDict + ): + self.__dict__ = element.__dict__.copy() + self.__dict__.pop("_annotations_cache_key", None) + self.__dict__.pop("_generate_cache_key", None) + self.__element = element + self._annotations = util.immutabledict(values) + self._hash = hash(element) + + def _annotate(self, values: _AnnotationDict) -> Self: + _values = self._annotations.union(values) + new = self._with_annotations(_values) + return new + + def _with_annotations(self, values: _AnnotationDict) -> Self: + clone = self.__class__.__new__(self.__class__) + clone.__dict__ = self.__dict__.copy() + clone.__dict__.pop("_annotations_cache_key", None) + clone.__dict__.pop("_generate_cache_key", None) + clone._annotations = util.immutabledict(values) + return clone + + @overload + def _deannotate( + self, + values: Literal[None] = ..., + clone: bool = ..., + ) -> Self: ... + + @overload + def _deannotate( + self, + values: Sequence[str] = ..., + clone: bool = ..., + ) -> Annotated: ... + + def _deannotate( + self, + values: Optional[Sequence[str]] = None, + clone: bool = True, + ) -> SupportsAnnotations: + if values is None: + return self.__element + else: + return self._with_annotations( + util.immutabledict( + { + key: value + for key, value in self._annotations.items() + if key not in values + } + ) + ) + + if not typing.TYPE_CHECKING: + # manually proxy some methods that need extra attention + def _compiler_dispatch(self, visitor: Any, **kw: Any) -> Any: + return self.__element.__class__._compiler_dispatch( + self, visitor, **kw + ) + + @property + def _constructor(self): + return self.__element._constructor + + def _clone(self, **kw: Any) -> Self: + clone = self.__element._clone(**kw) + if clone is self.__element: + # detect immutable, don't change anything + return self + else: + # update the clone with any changes that have occurred + # to this object's __dict__. + clone.__dict__.update(self.__dict__) + return self.__class__(clone, self._annotations) + + def __reduce__(self) -> Tuple[Type[Annotated], Tuple[Any, ...]]: + return self.__class__, (self.__element, self._annotations) + + def __hash__(self) -> int: + return self._hash + + def __eq__(self, other: Any) -> bool: + if self._is_column_operators: + return self.__element.__class__.__eq__(self, other) + else: + return hash(other) == hash(self) + + @util.ro_non_memoized_property + def entity_namespace(self) -> _EntityNamespace: + if "entity_namespace" in self._annotations: + return cast( + SupportsWrappingAnnotations, + self._annotations["entity_namespace"], + ).entity_namespace + else: + return self.__element.entity_namespace + + +# hard-generate Annotated subclasses. this technique +# is used instead of on-the-fly types (i.e. type.__new__()) +# so that the resulting objects are pickleable; additionally, other +# decisions can be made up front about the type of object being annotated +# just once per class rather than per-instance. +annotated_classes: Dict[Type[SupportsWrappingAnnotations], Type[Annotated]] = ( + {} +) + +_SA = TypeVar("_SA", bound="SupportsAnnotations") + + +def _safe_annotate(to_annotate: _SA, annotations: _AnnotationDict) -> _SA: + try: + _annotate = to_annotate._annotate + except AttributeError: + # skip objects that don't actually have an `_annotate` + # attribute, namely QueryableAttribute inside of a join + # condition + return to_annotate + else: + return _annotate(annotations) + + +def _deep_annotate( + element: _SA, + annotations: _AnnotationDict, + exclude: Optional[Sequence[SupportsAnnotations]] = None, + *, + detect_subquery_cols: bool = False, + ind_cols_on_fromclause: bool = False, + annotate_callable: Optional[ + Callable[[SupportsAnnotations, _AnnotationDict], SupportsAnnotations] + ] = None, +) -> _SA: + """Deep copy the given ClauseElement, annotating each element + with the given annotations dictionary. + + Elements within the exclude collection will be cloned but not annotated. + + """ + + # annotated objects hack the __hash__() method so if we want to + # uniquely process them we have to use id() + + cloned_ids: Dict[int, SupportsAnnotations] = {} + + def clone(elem: SupportsAnnotations, **kw: Any) -> SupportsAnnotations: + # ind_cols_on_fromclause means make sure an AnnotatedFromClause + # has its own .c collection independent of that which its proxying. + # this is used specifically by orm.LoaderCriteriaOption to break + # a reference cycle that it's otherwise prone to building, + # see test_relationship_criteria-> + # test_loader_criteria_subquery_w_same_entity. logic here was + # changed for #8796 and made explicit; previously it occurred + # by accident + + kw["detect_subquery_cols"] = detect_subquery_cols + id_ = id(elem) + + if id_ in cloned_ids: + return cloned_ids[id_] + + if ( + exclude + and hasattr(elem, "proxy_set") + and elem.proxy_set.intersection(exclude) + ): + newelem = elem._clone(clone=clone, **kw) + elif annotations != elem._annotations: + if detect_subquery_cols and elem._is_immutable: + to_annotate = elem._clone(clone=clone, **kw) + else: + to_annotate = elem + if annotate_callable: + newelem = annotate_callable(to_annotate, annotations) + else: + newelem = _safe_annotate(to_annotate, annotations) + else: + newelem = elem + + newelem._copy_internals( + clone=clone, ind_cols_on_fromclause=ind_cols_on_fromclause + ) + + cloned_ids[id_] = newelem + return newelem + + if element is not None: + element = cast(_SA, clone(element)) + clone = None # type: ignore # remove gc cycles + return element + + +@overload +def _deep_deannotate( + element: Literal[None], values: Optional[Sequence[str]] = None +) -> Literal[None]: ... + + +@overload +def _deep_deannotate( + element: _SA, values: Optional[Sequence[str]] = None +) -> _SA: ... + + +def _deep_deannotate( + element: Optional[_SA], values: Optional[Sequence[str]] = None +) -> Optional[_SA]: + """Deep copy the given element, removing annotations.""" + + cloned: Dict[Any, SupportsAnnotations] = {} + + def clone(elem: SupportsAnnotations, **kw: Any) -> SupportsAnnotations: + key: Any + if values: + key = id(elem) + else: + key = elem + + if key not in cloned: + newelem = elem._deannotate(values=values, clone=True) + newelem._copy_internals(clone=clone) + cloned[key] = newelem + return newelem + else: + return cloned[key] + + if element is not None: + element = cast(_SA, clone(element)) + clone = None # type: ignore # remove gc cycles + return element + + +def _shallow_annotate(element: _SA, annotations: _AnnotationDict) -> _SA: + """Annotate the given ClauseElement and copy its internals so that + internal objects refer to the new annotated object. + + Basically used to apply a "don't traverse" annotation to a + selectable, without digging throughout the whole + structure wasting time. + """ + element = element._annotate(annotations) + element._copy_internals() + return element + + +def _new_annotation_type( + cls: Type[SupportsWrappingAnnotations], base_cls: Type[Annotated] +) -> Type[Annotated]: + """Generates a new class that subclasses Annotated and proxies a given + element type. + + """ + if issubclass(cls, Annotated): + return cls + elif cls in annotated_classes: + return annotated_classes[cls] + + for super_ in cls.__mro__: + # check if an Annotated subclass more specific than + # the given base_cls is already registered, such + # as AnnotatedColumnElement. + if super_ in annotated_classes: + base_cls = annotated_classes[super_] + break + + annotated_classes[cls] = anno_cls = cast( + Type[Annotated], + type("Annotated%s" % cls.__name__, (base_cls, cls), {}), + ) + globals()["Annotated%s" % cls.__name__] = anno_cls + + if "_traverse_internals" in cls.__dict__: + anno_cls._traverse_internals = list(cls._traverse_internals) + [ + ("_annotations", InternalTraversal.dp_annotations_key) + ] + elif cls.__dict__.get("inherit_cache", False): + anno_cls._traverse_internals = list(cls._traverse_internals) + [ + ("_annotations", InternalTraversal.dp_annotations_key) + ] + + # some classes include this even if they have traverse_internals + # e.g. BindParameter, add it if present. + if cls.__dict__.get("inherit_cache", False): + anno_cls.inherit_cache = True # type: ignore + elif "inherit_cache" in cls.__dict__: + anno_cls.inherit_cache = cls.__dict__["inherit_cache"] # type: ignore + + anno_cls._is_column_operators = issubclass(cls, operators.ColumnOperators) + + return anno_cls + + +def _prepare_annotations( + target_hierarchy: Type[SupportsWrappingAnnotations], + base_cls: Type[Annotated], +) -> None: + for cls in util.walk_subclasses(target_hierarchy): + _new_annotation_type(cls, base_cls) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/base.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/base.py new file mode 100644 index 0000000000000000000000000000000000000000..102fddd9447dfb24a947c105fd0bf232e14186d3 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/base.py @@ -0,0 +1,2197 @@ +# sql/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Foundational utilities common to many sql modules. + +""" + + +from __future__ import annotations + +import collections +from enum import Enum +import itertools +from itertools import zip_longest +import operator +import re +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import roles +from . import visitors +from .cache_key import HasCacheKey # noqa +from .cache_key import MemoizedHasCacheKey # noqa +from .traversals import HasCopyInternals # noqa +from .visitors import ClauseVisitor +from .visitors import ExtendedInternalTraversal +from .visitors import ExternallyTraversible +from .visitors import InternalTraversal +from .. import event +from .. import exc +from .. import util +from ..util import HasMemoized as HasMemoized +from ..util import hybridmethod +from ..util import typing as compat_typing +from ..util.typing import Protocol +from ..util.typing import Self +from ..util.typing import TypeGuard + +if TYPE_CHECKING: + from . import coercions + from . import elements + from . import type_api + from ._orm_types import DMLStrategyArgument + from ._orm_types import SynchronizeSessionArgument + from ._typing import _CLE + from .compiler import SQLCompiler + from .elements import BindParameter + from .elements import ClauseList + from .elements import ColumnClause # noqa + from .elements import ColumnElement + from .elements import NamedColumn + from .elements import SQLCoreOperations + from .elements import TextClause + from .schema import Column + from .schema import DefaultGenerator + from .selectable import _JoinTargetElement + from .selectable import _SelectIterable + from .selectable import FromClause + from ..engine import Connection + from ..engine import CursorResult + from ..engine.interfaces import _CoreMultiExecuteParams + from ..engine.interfaces import _ExecuteOptions + from ..engine.interfaces import _ImmutableExecuteOptions + from ..engine.interfaces import CacheStats + from ..engine.interfaces import Compiled + from ..engine.interfaces import CompiledCacheType + from ..engine.interfaces import CoreExecuteOptionsParameter + from ..engine.interfaces import Dialect + from ..engine.interfaces import IsolationLevel + from ..engine.interfaces import SchemaTranslateMapType + from ..event import dispatcher + +if not TYPE_CHECKING: + coercions = None # noqa + elements = None # noqa + type_api = None # noqa + + +class _NoArg(Enum): + NO_ARG = 0 + + def __repr__(self): + return f"_NoArg.{self.name}" + + +NO_ARG = _NoArg.NO_ARG + + +class _NoneName(Enum): + NONE_NAME = 0 + """indicate a 'deferred' name that was ultimately the value None.""" + + +_NONE_NAME = _NoneName.NONE_NAME + +_T = TypeVar("_T", bound=Any) + +_Fn = TypeVar("_Fn", bound=Callable[..., Any]) + +_AmbiguousTableNameMap = MutableMapping[str, str] + + +class _DefaultDescriptionTuple(NamedTuple): + arg: Any + is_scalar: Optional[bool] + is_callable: Optional[bool] + is_sentinel: Optional[bool] + + @classmethod + def _from_column_default( + cls, default: Optional[DefaultGenerator] + ) -> _DefaultDescriptionTuple: + return ( + _DefaultDescriptionTuple( + default.arg, # type: ignore + default.is_scalar, + default.is_callable, + default.is_sentinel, + ) + if default + and ( + default.has_arg + or (not default.for_update and default.is_sentinel) + ) + else _DefaultDescriptionTuple(None, None, None, None) + ) + + +_never_select_column = operator.attrgetter("_omit_from_statements") + + +class _EntityNamespace(Protocol): + def __getattr__(self, key: str) -> SQLCoreOperations[Any]: ... + + +class _HasEntityNamespace(Protocol): + @util.ro_non_memoized_property + def entity_namespace(self) -> _EntityNamespace: ... + + +def _is_has_entity_namespace(element: Any) -> TypeGuard[_HasEntityNamespace]: + return hasattr(element, "entity_namespace") + + +# Remove when https://github.com/python/mypy/issues/14640 will be fixed +_Self = TypeVar("_Self", bound=Any) + + +class Immutable: + """mark a ClauseElement as 'immutable' when expressions are cloned. + + "immutable" objects refers to the "mutability" of an object in the + context of SQL DQL and DML generation. Such as, in DQL, one can + compose a SELECT or subquery of varied forms, but one cannot modify + the structure of a specific table or column within DQL. + :class:`.Immutable` is mostly intended to follow this concept, and as + such the primary "immutable" objects are :class:`.ColumnClause`, + :class:`.Column`, :class:`.TableClause`, :class:`.Table`. + + """ + + __slots__ = () + + _is_immutable = True + + def unique_params(self, *optionaldict, **kwargs): + raise NotImplementedError("Immutable objects do not support copying") + + def params(self, *optionaldict, **kwargs): + raise NotImplementedError("Immutable objects do not support copying") + + def _clone(self: _Self, **kw: Any) -> _Self: + return self + + def _copy_internals( + self, *, omit_attrs: Iterable[str] = (), **kw: Any + ) -> None: + pass + + +class SingletonConstant(Immutable): + """Represent SQL constants like NULL, TRUE, FALSE""" + + _is_singleton_constant = True + + _singleton: SingletonConstant + + def __new__(cls: _T, *arg: Any, **kw: Any) -> _T: + return cast(_T, cls._singleton) + + @util.non_memoized_property + def proxy_set(self) -> FrozenSet[ColumnElement[Any]]: + raise NotImplementedError() + + @classmethod + def _create_singleton(cls): + obj = object.__new__(cls) + obj.__init__() # type: ignore + + # for a long time this was an empty frozenset, meaning + # a SingletonConstant would never be a "corresponding column" in + # a statement. This referred to #6259. However, in #7154 we see + # that we do in fact need "correspondence" to work when matching cols + # in result sets, so the non-correspondence was moved to a more + # specific level when we are actually adapting expressions for SQL + # render only. + obj.proxy_set = frozenset([obj]) + cls._singleton = obj + + +def _from_objects( + *elements: Union[ + ColumnElement[Any], FromClause, TextClause, _JoinTargetElement + ] +) -> Iterator[FromClause]: + return itertools.chain.from_iterable( + [element._from_objects for element in elements] + ) + + +def _select_iterables( + elements: Iterable[roles.ColumnsClauseRole], +) -> _SelectIterable: + """expand tables into individual columns in the + given list of column expressions. + + """ + return itertools.chain.from_iterable( + [c._select_iterable for c in elements] + ) + + +_SelfGenerativeType = TypeVar("_SelfGenerativeType", bound="_GenerativeType") + + +class _GenerativeType(compat_typing.Protocol): + def _generate(self) -> Self: ... + + +def _generative(fn: _Fn) -> _Fn: + """non-caching _generative() decorator. + + This is basically the legacy decorator that copies the object and + runs a method on the new copy. + + """ + + @util.decorator + def _generative( + fn: _Fn, self: _SelfGenerativeType, *args: Any, **kw: Any + ) -> _SelfGenerativeType: + """Mark a method as generative.""" + + self = self._generate() + x = fn(self, *args, **kw) + assert x is self, "generative methods must return self" + return self + + decorated = _generative(fn) + decorated.non_generative = fn # type: ignore + return decorated + + +def _exclusive_against(*names: str, **kw: Any) -> Callable[[_Fn], _Fn]: + msgs = kw.pop("msgs", {}) + + defaults = kw.pop("defaults", {}) + + getters = [ + (name, operator.attrgetter(name), defaults.get(name, None)) + for name in names + ] + + @util.decorator + def check(fn, *args, **kw): + # make pylance happy by not including "self" in the argument + # list + self = args[0] + args = args[1:] + for name, getter, default_ in getters: + if getter(self) is not default_: + msg = msgs.get( + name, + "Method %s() has already been invoked on this %s construct" + % (fn.__name__, self.__class__), + ) + raise exc.InvalidRequestError(msg) + return fn(self, *args, **kw) + + return check + + +def _clone(element, **kw): + return element._clone(**kw) + + +def _expand_cloned( + elements: Iterable[_CLE], +) -> Iterable[_CLE]: + """expand the given set of ClauseElements to be the set of all 'cloned' + predecessors. + + """ + # TODO: cython candidate + return itertools.chain(*[x._cloned_set for x in elements]) + + +def _de_clone( + elements: Iterable[_CLE], +) -> Iterable[_CLE]: + for x in elements: + while x._is_clone_of is not None: + x = x._is_clone_of + yield x + + +def _cloned_intersection(a: Iterable[_CLE], b: Iterable[_CLE]) -> Set[_CLE]: + """return the intersection of sets a and b, counting + any overlap between 'cloned' predecessors. + + The returned set is in terms of the entities present within 'a'. + + """ + all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) + return {elem for elem in a if all_overlap.intersection(elem._cloned_set)} + + +def _cloned_difference(a: Iterable[_CLE], b: Iterable[_CLE]) -> Set[_CLE]: + all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) + return { + elem for elem in a if not all_overlap.intersection(elem._cloned_set) + } + + +class _DialectArgView(MutableMapping[str, Any]): + """A dictionary view of dialect-level arguments in the form + _. + + """ + + __slots__ = ("obj",) + + def __init__(self, obj): + self.obj = obj + + def _key(self, key): + try: + dialect, value_key = key.split("_", 1) + except ValueError as err: + raise KeyError(key) from err + else: + return dialect, value_key + + def __getitem__(self, key): + dialect, value_key = self._key(key) + + try: + opt = self.obj.dialect_options[dialect] + except exc.NoSuchModuleError as err: + raise KeyError(key) from err + else: + return opt[value_key] + + def __setitem__(self, key, value): + try: + dialect, value_key = self._key(key) + except KeyError as err: + raise exc.ArgumentError( + "Keys must be of the form _" + ) from err + else: + self.obj.dialect_options[dialect][value_key] = value + + def __delitem__(self, key): + dialect, value_key = self._key(key) + del self.obj.dialect_options[dialect][value_key] + + def __len__(self): + return sum( + len(args._non_defaults) + for args in self.obj.dialect_options.values() + ) + + def __iter__(self): + return ( + "%s_%s" % (dialect_name, value_name) + for dialect_name in self.obj.dialect_options + for value_name in self.obj.dialect_options[ + dialect_name + ]._non_defaults + ) + + +class _DialectArgDict(MutableMapping[str, Any]): + """A dictionary view of dialect-level arguments for a specific + dialect. + + Maintains a separate collection of user-specified arguments + and dialect-specified default arguments. + + """ + + def __init__(self): + self._non_defaults = {} + self._defaults = {} + + def __len__(self): + return len(set(self._non_defaults).union(self._defaults)) + + def __iter__(self): + return iter(set(self._non_defaults).union(self._defaults)) + + def __getitem__(self, key): + if key in self._non_defaults: + return self._non_defaults[key] + else: + return self._defaults[key] + + def __setitem__(self, key, value): + self._non_defaults[key] = value + + def __delitem__(self, key): + del self._non_defaults[key] + + +@util.preload_module("sqlalchemy.dialects") +def _kw_reg_for_dialect(dialect_name): + dialect_cls = util.preloaded.dialects.registry.load(dialect_name) + if dialect_cls.construct_arguments is None: + return None + return dict(dialect_cls.construct_arguments) + + +class DialectKWArgs: + """Establish the ability for a class to have dialect-specific arguments + with defaults and constructor validation. + + The :class:`.DialectKWArgs` interacts with the + :attr:`.DefaultDialect.construct_arguments` present on a dialect. + + .. seealso:: + + :attr:`.DefaultDialect.construct_arguments` + + """ + + __slots__ = () + + _dialect_kwargs_traverse_internals = [ + ("dialect_options", InternalTraversal.dp_dialect_options) + ] + + @classmethod + def argument_for(cls, dialect_name, argument_name, default): + """Add a new kind of dialect-specific keyword argument for this class. + + E.g.:: + + Index.argument_for("mydialect", "length", None) + + some_index = Index("a", "b", mydialect_length=5) + + The :meth:`.DialectKWArgs.argument_for` method is a per-argument + way adding extra arguments to the + :attr:`.DefaultDialect.construct_arguments` dictionary. This + dictionary provides a list of argument names accepted by various + schema-level constructs on behalf of a dialect. + + New dialects should typically specify this dictionary all at once as a + data member of the dialect class. The use case for ad-hoc addition of + argument names is typically for end-user code that is also using + a custom compilation scheme which consumes the additional arguments. + + :param dialect_name: name of a dialect. The dialect must be + locatable, else a :class:`.NoSuchModuleError` is raised. The + dialect must also include an existing + :attr:`.DefaultDialect.construct_arguments` collection, indicating + that it participates in the keyword-argument validation and default + system, else :class:`.ArgumentError` is raised. If the dialect does + not include this collection, then any keyword argument can be + specified on behalf of this dialect already. All dialects packaged + within SQLAlchemy include this collection, however for third party + dialects, support may vary. + + :param argument_name: name of the parameter. + + :param default: default value of the parameter. + + """ + + construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] + if construct_arg_dictionary is None: + raise exc.ArgumentError( + "Dialect '%s' does have keyword-argument " + "validation and defaults enabled configured" % dialect_name + ) + if cls not in construct_arg_dictionary: + construct_arg_dictionary[cls] = {} + construct_arg_dictionary[cls][argument_name] = default + + @property + def dialect_kwargs(self): + """A collection of keyword arguments specified as dialect-specific + options to this construct. + + The arguments are present here in their original ``_`` + format. Only arguments that were actually passed are included; + unlike the :attr:`.DialectKWArgs.dialect_options` collection, which + contains all options known by this dialect including defaults. + + The collection is also writable; keys are accepted of the + form ``_`` where the value will be assembled + into the list of options. + + .. seealso:: + + :attr:`.DialectKWArgs.dialect_options` - nested dictionary form + + """ + return _DialectArgView(self) + + @property + def kwargs(self): + """A synonym for :attr:`.DialectKWArgs.dialect_kwargs`.""" + return self.dialect_kwargs + + _kw_registry = util.PopulateDict(_kw_reg_for_dialect) + + @classmethod + def _kw_reg_for_dialect_cls(cls, dialect_name): + construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] + d = _DialectArgDict() + + if construct_arg_dictionary is None: + d._defaults.update({"*": None}) + else: + for cls in reversed(cls.__mro__): + if cls in construct_arg_dictionary: + d._defaults.update(construct_arg_dictionary[cls]) + return d + + @util.memoized_property + def dialect_options(self): + """A collection of keyword arguments specified as dialect-specific + options to this construct. + + This is a two-level nested registry, keyed to ```` + and ````. For example, the ``postgresql_where`` + argument would be locatable as:: + + arg = my_object.dialect_options["postgresql"]["where"] + + .. versionadded:: 0.9.2 + + .. seealso:: + + :attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form + + """ + + return util.PopulateDict(self._kw_reg_for_dialect_cls) + + def _validate_dialect_kwargs(self, kwargs: Dict[str, Any]) -> None: + # validate remaining kwargs that they all specify DB prefixes + + if not kwargs: + return + + for k in kwargs: + m = re.match("^(.+?)_(.+)$", k) + if not m: + raise TypeError( + "Additional arguments should be " + "named _, got '%s'" % k + ) + dialect_name, arg_name = m.group(1, 2) + + try: + construct_arg_dictionary = self.dialect_options[dialect_name] + except exc.NoSuchModuleError: + util.warn( + "Can't validate argument %r; can't " + "locate any SQLAlchemy dialect named %r" + % (k, dialect_name) + ) + self.dialect_options[dialect_name] = d = _DialectArgDict() + d._defaults.update({"*": None}) + d._non_defaults[arg_name] = kwargs[k] + else: + if ( + "*" not in construct_arg_dictionary + and arg_name not in construct_arg_dictionary + ): + raise exc.ArgumentError( + "Argument %r is not accepted by " + "dialect %r on behalf of %r" + % (k, dialect_name, self.__class__) + ) + else: + construct_arg_dictionary[arg_name] = kwargs[k] + + +class CompileState: + """Produces additional object state necessary for a statement to be + compiled. + + the :class:`.CompileState` class is at the base of classes that assemble + state for a particular statement object that is then used by the + compiler. This process is essentially an extension of the process that + the SQLCompiler.visit_XYZ() method takes, however there is an emphasis + on converting raw user intent into more organized structures rather than + producing string output. The top-level :class:`.CompileState` for the + statement being executed is also accessible when the execution context + works with invoking the statement and collecting results. + + The production of :class:`.CompileState` is specific to the compiler, such + as within the :meth:`.SQLCompiler.visit_insert`, + :meth:`.SQLCompiler.visit_select` etc. methods. These methods are also + responsible for associating the :class:`.CompileState` with the + :class:`.SQLCompiler` itself, if the statement is the "toplevel" statement, + i.e. the outermost SQL statement that's actually being executed. + There can be other :class:`.CompileState` objects that are not the + toplevel, such as when a SELECT subquery or CTE-nested + INSERT/UPDATE/DELETE is generated. + + .. versionadded:: 1.4 + + """ + + __slots__ = ("statement", "_ambiguous_table_name_map") + + plugins: Dict[Tuple[str, str], Type[CompileState]] = {} + + _ambiguous_table_name_map: Optional[_AmbiguousTableNameMap] + + @classmethod + def create_for_statement( + cls, statement: Executable, compiler: SQLCompiler, **kw: Any + ) -> CompileState: + # factory construction. + + if statement._propagate_attrs: + plugin_name = statement._propagate_attrs.get( + "compile_state_plugin", "default" + ) + klass = cls.plugins.get( + (plugin_name, statement._effective_plugin_target), None + ) + if klass is None: + klass = cls.plugins[ + ("default", statement._effective_plugin_target) + ] + + else: + klass = cls.plugins[ + ("default", statement._effective_plugin_target) + ] + + if klass is cls: + return cls(statement, compiler, **kw) + else: + return klass.create_for_statement(statement, compiler, **kw) + + def __init__(self, statement, compiler, **kw): + self.statement = statement + + @classmethod + def get_plugin_class( + cls, statement: Executable + ) -> Optional[Type[CompileState]]: + plugin_name = statement._propagate_attrs.get( + "compile_state_plugin", None + ) + + if plugin_name: + key = (plugin_name, statement._effective_plugin_target) + if key in cls.plugins: + return cls.plugins[key] + + # there's no case where we call upon get_plugin_class() and want + # to get None back, there should always be a default. return that + # if there was no plugin-specific class (e.g. Insert with "orm" + # plugin) + try: + return cls.plugins[("default", statement._effective_plugin_target)] + except KeyError: + return None + + @classmethod + def _get_plugin_class_for_plugin( + cls, statement: Executable, plugin_name: str + ) -> Optional[Type[CompileState]]: + try: + return cls.plugins[ + (plugin_name, statement._effective_plugin_target) + ] + except KeyError: + return None + + @classmethod + def plugin_for( + cls, plugin_name: str, visit_name: str + ) -> Callable[[_Fn], _Fn]: + def decorate(cls_to_decorate): + cls.plugins[(plugin_name, visit_name)] = cls_to_decorate + return cls_to_decorate + + return decorate + + +class Generative(HasMemoized): + """Provide a method-chaining pattern in conjunction with the + @_generative decorator.""" + + def _generate(self) -> Self: + skip = self._memoized_keys + cls = self.__class__ + s = cls.__new__(cls) + if skip: + # ensure this iteration remains atomic + s.__dict__ = { + k: v for k, v in self.__dict__.copy().items() if k not in skip + } + else: + s.__dict__ = self.__dict__.copy() + return s + + +class InPlaceGenerative(HasMemoized): + """Provide a method-chaining pattern in conjunction with the + @_generative decorator that mutates in place.""" + + __slots__ = () + + def _generate(self): + skip = self._memoized_keys + # note __dict__ needs to be in __slots__ if this is used + for k in skip: + self.__dict__.pop(k, None) + return self + + +class HasCompileState(Generative): + """A class that has a :class:`.CompileState` associated with it.""" + + _compile_state_plugin: Optional[Type[CompileState]] = None + + _attributes: util.immutabledict[str, Any] = util.EMPTY_DICT + + _compile_state_factory = CompileState.create_for_statement + + +class _MetaOptions(type): + """metaclass for the Options class. + + This metaclass is actually necessary despite the availability of the + ``__init_subclass__()`` hook as this type also provides custom class-level + behavior for the ``__add__()`` method. + + """ + + _cache_attrs: Tuple[str, ...] + + def __add__(self, other): + o1 = self() + + if set(other).difference(self._cache_attrs): + raise TypeError( + "dictionary contains attributes not covered by " + "Options class %s: %r" + % (self, set(other).difference(self._cache_attrs)) + ) + + o1.__dict__.update(other) + return o1 + + if TYPE_CHECKING: + + def __getattr__(self, key: str) -> Any: ... + + def __setattr__(self, key: str, value: Any) -> None: ... + + def __delattr__(self, key: str) -> None: ... + + +class Options(metaclass=_MetaOptions): + """A cacheable option dictionary with defaults.""" + + __slots__ = () + + _cache_attrs: Tuple[str, ...] + + def __init_subclass__(cls) -> None: + dict_ = cls.__dict__ + cls._cache_attrs = tuple( + sorted( + d + for d in dict_ + if not d.startswith("__") + and d not in ("_cache_key_traversal",) + ) + ) + super().__init_subclass__() + + def __init__(self, **kw): + self.__dict__.update(kw) + + def __add__(self, other): + o1 = self.__class__.__new__(self.__class__) + o1.__dict__.update(self.__dict__) + + if set(other).difference(self._cache_attrs): + raise TypeError( + "dictionary contains attributes not covered by " + "Options class %s: %r" + % (self, set(other).difference(self._cache_attrs)) + ) + + o1.__dict__.update(other) + return o1 + + def __eq__(self, other): + # TODO: very inefficient. This is used only in test suites + # right now. + for a, b in zip_longest(self._cache_attrs, other._cache_attrs): + if getattr(self, a) != getattr(other, b): + return False + return True + + def __repr__(self): + # TODO: fairly inefficient, used only in debugging right now. + + return "%s(%s)" % ( + self.__class__.__name__, + ", ".join( + "%s=%r" % (k, self.__dict__[k]) + for k in self._cache_attrs + if k in self.__dict__ + ), + ) + + @classmethod + def isinstance(cls, klass: Type[Any]) -> bool: + return issubclass(cls, klass) + + @hybridmethod + def add_to_element(self, name, value): + return self + {name: getattr(self, name) + value} + + @hybridmethod + def _state_dict_inst(self) -> Mapping[str, Any]: + return self.__dict__ + + _state_dict_const: util.immutabledict[str, Any] = util.EMPTY_DICT + + @_state_dict_inst.classlevel + def _state_dict(cls) -> Mapping[str, Any]: + return cls._state_dict_const + + @classmethod + def safe_merge(cls, other): + d = other._state_dict() + + # only support a merge with another object of our class + # and which does not have attrs that we don't. otherwise + # we risk having state that might not be part of our cache + # key strategy + + if ( + cls is not other.__class__ + and other._cache_attrs + and set(other._cache_attrs).difference(cls._cache_attrs) + ): + raise TypeError( + "other element %r is not empty, is not of type %s, " + "and contains attributes not covered here %r" + % ( + other, + cls, + set(other._cache_attrs).difference(cls._cache_attrs), + ) + ) + return cls + d + + @classmethod + def from_execution_options( + cls, key, attrs, exec_options, statement_exec_options + ): + """process Options argument in terms of execution options. + + + e.g.:: + + ( + load_options, + execution_options, + ) = QueryContext.default_load_options.from_execution_options( + "_sa_orm_load_options", + {"populate_existing", "autoflush", "yield_per"}, + execution_options, + statement._execution_options, + ) + + get back the Options and refresh "_sa_orm_load_options" in the + exec options dict w/ the Options as well + + """ + + # common case is that no options we are looking for are + # in either dictionary, so cancel for that first + check_argnames = attrs.intersection( + set(exec_options).union(statement_exec_options) + ) + + existing_options = exec_options.get(key, cls) + + if check_argnames: + result = {} + for argname in check_argnames: + local = "_" + argname + if argname in exec_options: + result[local] = exec_options[argname] + elif argname in statement_exec_options: + result[local] = statement_exec_options[argname] + + new_options = existing_options + result + exec_options = util.immutabledict().merge_with( + exec_options, {key: new_options} + ) + return new_options, exec_options + + else: + return existing_options, exec_options + + if TYPE_CHECKING: + + def __getattr__(self, key: str) -> Any: ... + + def __setattr__(self, key: str, value: Any) -> None: ... + + def __delattr__(self, key: str) -> None: ... + + +class CacheableOptions(Options, HasCacheKey): + __slots__ = () + + @hybridmethod + def _gen_cache_key_inst(self, anon_map, bindparams): + return HasCacheKey._gen_cache_key(self, anon_map, bindparams) + + @_gen_cache_key_inst.classlevel + def _gen_cache_key(cls, anon_map, bindparams): + return (cls, ()) + + @hybridmethod + def _generate_cache_key(self): + return HasCacheKey._generate_cache_key_for_object(self) + + +class ExecutableOption(HasCopyInternals): + __slots__ = () + + _annotations = util.EMPTY_DICT + + __visit_name__ = "executable_option" + + _is_has_cache_key = False + + _is_core = True + + def _clone(self, **kw): + """Create a shallow copy of this ExecutableOption.""" + c = self.__class__.__new__(self.__class__) + c.__dict__ = dict(self.__dict__) # type: ignore + return c + + +class Executable(roles.StatementRole): + """Mark a :class:`_expression.ClauseElement` as supporting execution. + + :class:`.Executable` is a superclass for all "statement" types + of objects, including :func:`select`, :func:`delete`, :func:`update`, + :func:`insert`, :func:`text`. + + """ + + supports_execution: bool = True + _execution_options: _ImmutableExecuteOptions = util.EMPTY_DICT + _is_default_generator = False + _with_options: Tuple[ExecutableOption, ...] = () + _with_context_options: Tuple[ + Tuple[Callable[[CompileState], None], Any], ... + ] = () + _compile_options: Optional[Union[Type[CacheableOptions], CacheableOptions]] + + _executable_traverse_internals = [ + ("_with_options", InternalTraversal.dp_executable_options), + ( + "_with_context_options", + ExtendedInternalTraversal.dp_with_context_options, + ), + ("_propagate_attrs", ExtendedInternalTraversal.dp_propagate_attrs), + ] + + is_select = False + is_from_statement = False + is_update = False + is_insert = False + is_text = False + is_delete = False + is_dml = False + + if TYPE_CHECKING: + __visit_name__: str + + def _compile_w_cache( + self, + dialect: Dialect, + *, + compiled_cache: Optional[CompiledCacheType], + column_keys: List[str], + for_executemany: bool = False, + schema_translate_map: Optional[SchemaTranslateMapType] = None, + **kw: Any, + ) -> Tuple[ + Compiled, Optional[Sequence[BindParameter[Any]]], CacheStats + ]: ... + + def _execute_on_connection( + self, + connection: Connection, + distilled_params: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> CursorResult[Any]: ... + + def _execute_on_scalar( + self, + connection: Connection, + distilled_params: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> Any: ... + + @util.ro_non_memoized_property + def _all_selected_columns(self): + raise NotImplementedError() + + @property + def _effective_plugin_target(self) -> str: + return self.__visit_name__ + + @_generative + def options(self, *options: ExecutableOption) -> Self: + """Apply options to this statement. + + In the general sense, options are any kind of Python object + that can be interpreted by the SQL compiler for the statement. + These options can be consumed by specific dialects or specific kinds + of compilers. + + The most commonly known kind of option are the ORM level options + that apply "eager load" and other loading behaviors to an ORM + query. However, options can theoretically be used for many other + purposes. + + For background on specific kinds of options for specific kinds of + statements, refer to the documentation for those option objects. + + .. versionchanged:: 1.4 - added :meth:`.Executable.options` to + Core statement objects towards the goal of allowing unified + Core / ORM querying capabilities. + + .. seealso:: + + :ref:`loading_columns` - refers to options specific to the usage + of ORM queries + + :ref:`relationship_loader_options` - refers to options specific + to the usage of ORM queries + + """ + self._with_options += tuple( + coercions.expect(roles.ExecutableOptionRole, opt) + for opt in options + ) + return self + + @_generative + def _set_compile_options(self, compile_options: CacheableOptions) -> Self: + """Assign the compile options to a new value. + + :param compile_options: appropriate CacheableOptions structure + + """ + + self._compile_options = compile_options + return self + + @_generative + def _update_compile_options(self, options: CacheableOptions) -> Self: + """update the _compile_options with new keys.""" + + assert self._compile_options is not None + self._compile_options += options + return self + + @_generative + def _add_context_option( + self, + callable_: Callable[[CompileState], None], + cache_args: Any, + ) -> Self: + """Add a context option to this statement. + + These are callable functions that will + be given the CompileState object upon compilation. + + A second argument cache_args is required, which will be combined with + the ``__code__`` identity of the function itself in order to produce a + cache key. + + """ + self._with_context_options += ((callable_, cache_args),) + return self + + @overload + def execution_options( + self, + *, + compiled_cache: Optional[CompiledCacheType] = ..., + logging_token: str = ..., + isolation_level: IsolationLevel = ..., + no_parameters: bool = False, + stream_results: bool = False, + max_row_buffer: int = ..., + yield_per: int = ..., + insertmanyvalues_page_size: int = ..., + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + populate_existing: bool = False, + autoflush: bool = False, + synchronize_session: SynchronizeSessionArgument = ..., + dml_strategy: DMLStrategyArgument = ..., + render_nulls: bool = ..., + is_delete_using: bool = ..., + is_update_from: bool = ..., + preserve_rowcount: bool = False, + **opt: Any, + ) -> Self: ... + + @overload + def execution_options(self, **opt: Any) -> Self: ... + + @_generative + def execution_options(self, **kw: Any) -> Self: + """Set non-SQL options for the statement which take effect during + execution. + + Execution options can be set at many scopes, including per-statement, + per-connection, or per execution, using methods such as + :meth:`_engine.Connection.execution_options` and parameters which + accept a dictionary of options such as + :paramref:`_engine.Connection.execute.execution_options` and + :paramref:`_orm.Session.execute.execution_options`. + + The primary characteristic of an execution option, as opposed to + other kinds of options such as ORM loader options, is that + **execution options never affect the compiled SQL of a query, only + things that affect how the SQL statement itself is invoked or how + results are fetched**. That is, execution options are not part of + what's accommodated by SQL compilation nor are they considered part of + the cached state of a statement. + + The :meth:`_sql.Executable.execution_options` method is + :term:`generative`, as + is the case for the method as applied to the :class:`_engine.Engine` + and :class:`_orm.Query` objects, which means when the method is called, + a copy of the object is returned, which applies the given parameters to + that new copy, but leaves the original unchanged:: + + statement = select(table.c.x, table.c.y) + new_statement = statement.execution_options(my_option=True) + + An exception to this behavior is the :class:`_engine.Connection` + object, where the :meth:`_engine.Connection.execution_options` method + is explicitly **not** generative. + + The kinds of options that may be passed to + :meth:`_sql.Executable.execution_options` and other related methods and + parameter dictionaries include parameters that are explicitly consumed + by SQLAlchemy Core or ORM, as well as arbitrary keyword arguments not + defined by SQLAlchemy, which means the methods and/or parameter + dictionaries may be used for user-defined parameters that interact with + custom code, which may access the parameters using methods such as + :meth:`_sql.Executable.get_execution_options` and + :meth:`_engine.Connection.get_execution_options`, or within selected + event hooks using a dedicated ``execution_options`` event parameter + such as + :paramref:`_events.ConnectionEvents.before_execute.execution_options` + or :attr:`_orm.ORMExecuteState.execution_options`, e.g.:: + + from sqlalchemy import event + + + @event.listens_for(some_engine, "before_execute") + def _process_opt(conn, statement, multiparams, params, execution_options): + "run a SQL function before invoking a statement" + + if execution_options.get("do_special_thing", False): + conn.exec_driver_sql("run_special_function()") + + Within the scope of options that are explicitly recognized by + SQLAlchemy, most apply to specific classes of objects and not others. + The most common execution options include: + + * :paramref:`_engine.Connection.execution_options.isolation_level` - + sets the isolation level for a connection or a class of connections + via an :class:`_engine.Engine`. This option is accepted only + by :class:`_engine.Connection` or :class:`_engine.Engine`. + + * :paramref:`_engine.Connection.execution_options.stream_results` - + indicates results should be fetched using a server side cursor; + this option is accepted by :class:`_engine.Connection`, by the + :paramref:`_engine.Connection.execute.execution_options` parameter + on :meth:`_engine.Connection.execute`, and additionally by + :meth:`_sql.Executable.execution_options` on a SQL statement object, + as well as by ORM constructs like :meth:`_orm.Session.execute`. + + * :paramref:`_engine.Connection.execution_options.compiled_cache` - + indicates a dictionary that will serve as the + :ref:`SQL compilation cache ` + for a :class:`_engine.Connection` or :class:`_engine.Engine`, as + well as for ORM methods like :meth:`_orm.Session.execute`. + Can be passed as ``None`` to disable caching for statements. + This option is not accepted by + :meth:`_sql.Executable.execution_options` as it is inadvisable to + carry along a compilation cache within a statement object. + + * :paramref:`_engine.Connection.execution_options.schema_translate_map` + - a mapping of schema names used by the + :ref:`Schema Translate Map ` feature, accepted + by :class:`_engine.Connection`, :class:`_engine.Engine`, + :class:`_sql.Executable`, as well as by ORM constructs + like :meth:`_orm.Session.execute`. + + .. seealso:: + + :meth:`_engine.Connection.execution_options` + + :paramref:`_engine.Connection.execute.execution_options` + + :paramref:`_orm.Session.execute.execution_options` + + :ref:`orm_queryguide_execution_options` - documentation on all + ORM-specific execution options + + """ # noqa: E501 + if "isolation_level" in kw: + raise exc.ArgumentError( + "'isolation_level' execution option may only be specified " + "on Connection.execution_options(), or " + "per-engine using the isolation_level " + "argument to create_engine()." + ) + if "compiled_cache" in kw: + raise exc.ArgumentError( + "'compiled_cache' execution option may only be specified " + "on Connection.execution_options(), not per statement." + ) + self._execution_options = self._execution_options.union(kw) + return self + + def get_execution_options(self) -> _ExecuteOptions: + """Get the non-SQL options which will take effect during execution. + + .. versionadded:: 1.3 + + .. seealso:: + + :meth:`.Executable.execution_options` + """ + return self._execution_options + + +class SchemaEventTarget(event.EventTarget): + """Base class for elements that are the targets of :class:`.DDLEvents` + events. + + This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. + + """ + + dispatch: dispatcher[SchemaEventTarget] + + def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None: + """Associate with this SchemaEvent's parent object.""" + + def _set_parent_with_dispatch( + self, parent: SchemaEventTarget, **kw: Any + ) -> None: + self.dispatch.before_parent_attach(self, parent) + self._set_parent(parent, **kw) + self.dispatch.after_parent_attach(self, parent) + + +class SchemaVisitable(SchemaEventTarget, visitors.Visitable): + """Base class for elements that are targets of a :class:`.SchemaVisitor`. + + .. versionadded:: 2.0.41 + + """ + + +class SchemaVisitor(ClauseVisitor): + """Define the visiting for ``SchemaItem`` and more + generally ``SchemaVisitable`` objects. + + """ + + __traverse_options__ = {"schema_visitor": True} + + +class _SentinelDefaultCharacterization(Enum): + NONE = "none" + UNKNOWN = "unknown" + CLIENTSIDE = "clientside" + SENTINEL_DEFAULT = "sentinel_default" + SERVERSIDE = "serverside" + IDENTITY = "identity" + SEQUENCE = "sequence" + + +class _SentinelColumnCharacterization(NamedTuple): + columns: Optional[Sequence[Column[Any]]] = None + is_explicit: bool = False + is_autoinc: bool = False + default_characterization: _SentinelDefaultCharacterization = ( + _SentinelDefaultCharacterization.NONE + ) + + +_COLKEY = TypeVar("_COLKEY", Union[None, str], str) + +_COL_co = TypeVar("_COL_co", bound="ColumnElement[Any]", covariant=True) +_COL = TypeVar("_COL", bound="ColumnElement[Any]") + + +class _ColumnMetrics(Generic[_COL_co]): + __slots__ = ("column",) + + column: _COL_co + + def __init__( + self, collection: ColumnCollection[Any, _COL_co], col: _COL_co + ): + self.column = col + + # proxy_index being non-empty means it was initialized. + # so we need to update it + pi = collection._proxy_index + if pi: + for eps_col in col._expanded_proxy_set: + pi[eps_col].add(self) + + def get_expanded_proxy_set(self): + return self.column._expanded_proxy_set + + def dispose(self, collection): + pi = collection._proxy_index + if not pi: + return + for col in self.column._expanded_proxy_set: + colset = pi.get(col, None) + if colset: + colset.discard(self) + if colset is not None and not colset: + del pi[col] + + def embedded( + self, + target_set: Union[ + Set[ColumnElement[Any]], FrozenSet[ColumnElement[Any]] + ], + ) -> bool: + expanded_proxy_set = self.column._expanded_proxy_set + for t in target_set.difference(expanded_proxy_set): + if not expanded_proxy_set.intersection(_expand_cloned([t])): + return False + return True + + +class ColumnCollection(Generic[_COLKEY, _COL_co]): + """Collection of :class:`_expression.ColumnElement` instances, + typically for + :class:`_sql.FromClause` objects. + + The :class:`_sql.ColumnCollection` object is most commonly available + as the :attr:`_schema.Table.c` or :attr:`_schema.Table.columns` collection + on the :class:`_schema.Table` object, introduced at + :ref:`metadata_tables_and_columns`. + + The :class:`_expression.ColumnCollection` has both mapping- and sequence- + like behaviors. A :class:`_expression.ColumnCollection` usually stores + :class:`_schema.Column` objects, which are then accessible both via mapping + style access as well as attribute access style. + + To access :class:`_schema.Column` objects using ordinary attribute-style + access, specify the name like any other object attribute, such as below + a column named ``employee_name`` is accessed:: + + >>> employee_table.c.employee_name + + To access columns that have names with special characters or spaces, + index-style access is used, such as below which illustrates a column named + ``employee ' payment`` is accessed:: + + >>> employee_table.c["employee ' payment"] + + As the :class:`_sql.ColumnCollection` object provides a Python dictionary + interface, common dictionary method names like + :meth:`_sql.ColumnCollection.keys`, :meth:`_sql.ColumnCollection.values`, + and :meth:`_sql.ColumnCollection.items` are available, which means that + database columns that are keyed under these names also need to use indexed + access:: + + >>> employee_table.c["values"] + + + The name for which a :class:`_schema.Column` would be present is normally + that of the :paramref:`_schema.Column.key` parameter. In some contexts, + such as a :class:`_sql.Select` object that uses a label style set + using the :meth:`_sql.Select.set_label_style` method, a column of a certain + key may instead be represented under a particular label name such + as ``tablename_columnname``:: + + >>> from sqlalchemy import select, column, table + >>> from sqlalchemy import LABEL_STYLE_TABLENAME_PLUS_COL + >>> t = table("t", column("c")) + >>> stmt = select(t).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + >>> subq = stmt.subquery() + >>> subq.c.t_c + + + :class:`.ColumnCollection` also indexes the columns in order and allows + them to be accessible by their integer position:: + + >>> cc[0] + Column('x', Integer(), table=None) + >>> cc[1] + Column('y', Integer(), table=None) + + .. versionadded:: 1.4 :class:`_expression.ColumnCollection` + allows integer-based + index access to the collection. + + Iterating the collection yields the column expressions in order:: + + >>> list(cc) + [Column('x', Integer(), table=None), + Column('y', Integer(), table=None)] + + The base :class:`_expression.ColumnCollection` object can store + duplicates, which can + mean either two columns with the same key, in which case the column + returned by key access is **arbitrary**:: + + >>> x1, x2 = Column("x", Integer), Column("x", Integer) + >>> cc = ColumnCollection(columns=[(x1.name, x1), (x2.name, x2)]) + >>> list(cc) + [Column('x', Integer(), table=None), + Column('x', Integer(), table=None)] + >>> cc["x"] is x1 + False + >>> cc["x"] is x2 + True + + Or it can also mean the same column multiple times. These cases are + supported as :class:`_expression.ColumnCollection` + is used to represent the columns in + a SELECT statement which may include duplicates. + + A special subclass :class:`.DedupeColumnCollection` exists which instead + maintains SQLAlchemy's older behavior of not allowing duplicates; this + collection is used for schema level objects like :class:`_schema.Table` + and + :class:`.PrimaryKeyConstraint` where this deduping is helpful. The + :class:`.DedupeColumnCollection` class also has additional mutation methods + as the schema constructs have more use cases that require removal and + replacement of columns. + + .. versionchanged:: 1.4 :class:`_expression.ColumnCollection` + now stores duplicate + column keys as well as the same column in multiple positions. The + :class:`.DedupeColumnCollection` class is added to maintain the + former behavior in those cases where deduplication as well as + additional replace/remove operations are needed. + + + """ + + __slots__ = "_collection", "_index", "_colset", "_proxy_index" + + _collection: List[Tuple[_COLKEY, _COL_co, _ColumnMetrics[_COL_co]]] + _index: Dict[Union[None, str, int], Tuple[_COLKEY, _COL_co]] + _proxy_index: Dict[ColumnElement[Any], Set[_ColumnMetrics[_COL_co]]] + _colset: Set[_COL_co] + + def __init__( + self, columns: Optional[Iterable[Tuple[_COLKEY, _COL_co]]] = None + ): + object.__setattr__(self, "_colset", set()) + object.__setattr__(self, "_index", {}) + object.__setattr__( + self, "_proxy_index", collections.defaultdict(util.OrderedSet) + ) + object.__setattr__(self, "_collection", []) + if columns: + self._initial_populate(columns) + + @util.preload_module("sqlalchemy.sql.elements") + def __clause_element__(self) -> ClauseList: + elements = util.preloaded.sql_elements + + return elements.ClauseList( + _literal_as_text_role=roles.ColumnsClauseRole, + group=False, + *self._all_columns, + ) + + def _initial_populate( + self, iter_: Iterable[Tuple[_COLKEY, _COL_co]] + ) -> None: + self._populate_separate_keys(iter_) + + @property + def _all_columns(self) -> List[_COL_co]: + return [col for (_, col, _) in self._collection] + + def keys(self) -> List[_COLKEY]: + """Return a sequence of string key names for all columns in this + collection.""" + return [k for (k, _, _) in self._collection] + + def values(self) -> List[_COL_co]: + """Return a sequence of :class:`_sql.ColumnClause` or + :class:`_schema.Column` objects for all columns in this + collection.""" + return [col for (_, col, _) in self._collection] + + def items(self) -> List[Tuple[_COLKEY, _COL_co]]: + """Return a sequence of (key, column) tuples for all columns in this + collection each consisting of a string key name and a + :class:`_sql.ColumnClause` or + :class:`_schema.Column` object. + """ + + return [(k, col) for (k, col, _) in self._collection] + + def __bool__(self) -> bool: + return bool(self._collection) + + def __len__(self) -> int: + return len(self._collection) + + def __iter__(self) -> Iterator[_COL_co]: + # turn to a list first to maintain over a course of changes + return iter([col for _, col, _ in self._collection]) + + @overload + def __getitem__(self, key: Union[str, int]) -> _COL_co: ... + + @overload + def __getitem__( + self, key: Tuple[Union[str, int], ...] + ) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]: ... + + @overload + def __getitem__( + self, key: slice + ) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]: ... + + def __getitem__( + self, key: Union[str, int, slice, Tuple[Union[str, int], ...]] + ) -> Union[ReadOnlyColumnCollection[_COLKEY, _COL_co], _COL_co]: + try: + if isinstance(key, (tuple, slice)): + if isinstance(key, slice): + cols = ( + (sub_key, col) + for (sub_key, col, _) in self._collection[key] + ) + else: + cols = (self._index[sub_key] for sub_key in key) + + return ColumnCollection(cols).as_readonly() + else: + return self._index[key][1] + except KeyError as err: + if isinstance(err.args[0], int): + raise IndexError(err.args[0]) from err + else: + raise + + def __getattr__(self, key: str) -> _COL_co: + try: + return self._index[key][1] + except KeyError as err: + raise AttributeError(key) from err + + def __contains__(self, key: str) -> bool: + if key not in self._index: + if not isinstance(key, str): + raise exc.ArgumentError( + "__contains__ requires a string argument" + ) + return False + else: + return True + + def compare(self, other: ColumnCollection[Any, Any]) -> bool: + """Compare this :class:`_expression.ColumnCollection` to another + based on the names of the keys""" + + for l, r in zip_longest(self, other): + if l is not r: + return False + else: + return True + + def __eq__(self, other: Any) -> bool: + return self.compare(other) + + @overload + def get(self, key: str, default: None = None) -> Optional[_COL_co]: ... + + @overload + def get(self, key: str, default: _COL) -> Union[_COL_co, _COL]: ... + + def get( + self, key: str, default: Optional[_COL] = None + ) -> Optional[Union[_COL_co, _COL]]: + """Get a :class:`_sql.ColumnClause` or :class:`_schema.Column` object + based on a string key name from this + :class:`_expression.ColumnCollection`.""" + + if key in self._index: + return self._index[key][1] + else: + return default + + def __str__(self) -> str: + return "%s(%s)" % ( + self.__class__.__name__, + ", ".join(str(c) for c in self), + ) + + def __setitem__(self, key: str, value: Any) -> NoReturn: + raise NotImplementedError() + + def __delitem__(self, key: str) -> NoReturn: + raise NotImplementedError() + + def __setattr__(self, key: str, obj: Any) -> NoReturn: + raise NotImplementedError() + + def clear(self) -> NoReturn: + """Dictionary clear() is not implemented for + :class:`_sql.ColumnCollection`.""" + raise NotImplementedError() + + def remove(self, column: Any) -> None: + raise NotImplementedError() + + def update(self, iter_: Any) -> NoReturn: + """Dictionary update() is not implemented for + :class:`_sql.ColumnCollection`.""" + raise NotImplementedError() + + # https://github.com/python/mypy/issues/4266 + __hash__ = None # type: ignore + + def _populate_separate_keys( + self, iter_: Iterable[Tuple[_COLKEY, _COL_co]] + ) -> None: + """populate from an iterator of (key, column)""" + + self._collection[:] = collection = [ + (k, c, _ColumnMetrics(self, c)) for k, c in iter_ + ] + self._colset.update(c._deannotate() for _, c, _ in collection) + self._index.update( + {idx: (k, c) for idx, (k, c, _) in enumerate(collection)} + ) + self._index.update({k: (k, col) for k, col, _ in reversed(collection)}) + + def add( + self, column: ColumnElement[Any], key: Optional[_COLKEY] = None + ) -> None: + """Add a column to this :class:`_sql.ColumnCollection`. + + .. note:: + + This method is **not normally used by user-facing code**, as the + :class:`_sql.ColumnCollection` is usually part of an existing + object such as a :class:`_schema.Table`. To add a + :class:`_schema.Column` to an existing :class:`_schema.Table` + object, use the :meth:`_schema.Table.append_column` method. + + """ + colkey: _COLKEY + + if key is None: + colkey = column.key # type: ignore + else: + colkey = key + + l = len(self._collection) + + # don't really know how this part is supposed to work w/ the + # covariant thing + + _column = cast(_COL_co, column) + + self._collection.append( + (colkey, _column, _ColumnMetrics(self, _column)) + ) + self._colset.add(_column._deannotate()) + self._index[l] = (colkey, _column) + if colkey not in self._index: + self._index[colkey] = (colkey, _column) + + def __getstate__(self) -> Dict[str, Any]: + return { + "_collection": [(k, c) for k, c, _ in self._collection], + "_index": self._index, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + object.__setattr__(self, "_index", state["_index"]) + object.__setattr__( + self, "_proxy_index", collections.defaultdict(util.OrderedSet) + ) + object.__setattr__( + self, + "_collection", + [ + (k, c, _ColumnMetrics(self, c)) + for (k, c) in state["_collection"] + ], + ) + object.__setattr__( + self, "_colset", {col for k, col, _ in self._collection} + ) + + def contains_column(self, col: ColumnElement[Any]) -> bool: + """Checks if a column object exists in this collection""" + if col not in self._colset: + if isinstance(col, str): + raise exc.ArgumentError( + "contains_column cannot be used with string arguments. " + "Use ``col_name in table.c`` instead." + ) + return False + else: + return True + + def as_readonly(self) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]: + """Return a "read only" form of this + :class:`_sql.ColumnCollection`.""" + + return ReadOnlyColumnCollection(self) + + def _init_proxy_index(self): + """populate the "proxy index", if empty. + + proxy index is added in 2.0 to provide more efficient operation + for the corresponding_column() method. + + For reasons of both time to construct new .c collections as well as + memory conservation for large numbers of large .c collections, the + proxy_index is only filled if corresponding_column() is called. once + filled it stays that way, and new _ColumnMetrics objects created after + that point will populate it with new data. Note this case would be + unusual, if not nonexistent, as it means a .c collection is being + mutated after corresponding_column() were used, however it is tested in + test/base/test_utils.py. + + """ + pi = self._proxy_index + if pi: + return + + for _, _, metrics in self._collection: + eps = metrics.column._expanded_proxy_set + + for eps_col in eps: + pi[eps_col].add(metrics) + + def corresponding_column( + self, column: _COL, require_embedded: bool = False + ) -> Optional[Union[_COL, _COL_co]]: + """Given a :class:`_expression.ColumnElement`, return the exported + :class:`_expression.ColumnElement` object from this + :class:`_expression.ColumnCollection` + which corresponds to that original :class:`_expression.ColumnElement` + via a common + ancestor column. + + :param column: the target :class:`_expression.ColumnElement` + to be matched. + + :param require_embedded: only return corresponding columns for + the given :class:`_expression.ColumnElement`, if the given + :class:`_expression.ColumnElement` + is actually present within a sub-element + of this :class:`_expression.Selectable`. + Normally the column will match if + it merely shares a common ancestor with one of the exported + columns of this :class:`_expression.Selectable`. + + .. seealso:: + + :meth:`_expression.Selectable.corresponding_column` + - invokes this method + against the collection returned by + :attr:`_expression.Selectable.exported_columns`. + + .. versionchanged:: 1.4 the implementation for ``corresponding_column`` + was moved onto the :class:`_expression.ColumnCollection` itself. + + """ + # TODO: cython candidate + + # don't dig around if the column is locally present + if column in self._colset: + return column + + selected_intersection, selected_metrics = None, None + target_set = column.proxy_set + + pi = self._proxy_index + if not pi: + self._init_proxy_index() + + for current_metrics in ( + mm for ts in target_set if ts in pi for mm in pi[ts] + ): + if not require_embedded or current_metrics.embedded(target_set): + if selected_metrics is None: + # no corresponding column yet, pick this one. + selected_metrics = current_metrics + continue + + current_intersection = target_set.intersection( + current_metrics.column._expanded_proxy_set + ) + if selected_intersection is None: + selected_intersection = target_set.intersection( + selected_metrics.column._expanded_proxy_set + ) + + if len(current_intersection) > len(selected_intersection): + # 'current' has a larger field of correspondence than + # 'selected'. i.e. selectable.c.a1_x->a1.c.x->table.c.x + # matches a1.c.x->table.c.x better than + # selectable.c.x->table.c.x does. + + selected_metrics = current_metrics + selected_intersection = current_intersection + elif current_intersection == selected_intersection: + # they have the same field of correspondence. see + # which proxy_set has fewer columns in it, which + # indicates a closer relationship with the root + # column. Also take into account the "weight" + # attribute which CompoundSelect() uses to give + # higher precedence to columns based on vertical + # position in the compound statement, and discard + # columns that have no reference to the target + # column (also occurs with CompoundSelect) + + selected_col_distance = sum( + [ + sc._annotations.get("weight", 1) + for sc in ( + selected_metrics.column._uncached_proxy_list() + ) + if sc.shares_lineage(column) + ], + ) + current_col_distance = sum( + [ + sc._annotations.get("weight", 1) + for sc in ( + current_metrics.column._uncached_proxy_list() + ) + if sc.shares_lineage(column) + ], + ) + if current_col_distance < selected_col_distance: + selected_metrics = current_metrics + selected_intersection = current_intersection + + return selected_metrics.column if selected_metrics else None + + +_NAMEDCOL = TypeVar("_NAMEDCOL", bound="NamedColumn[Any]") + + +class DedupeColumnCollection(ColumnCollection[str, _NAMEDCOL]): + """A :class:`_expression.ColumnCollection` + that maintains deduplicating behavior. + + This is useful by schema level objects such as :class:`_schema.Table` and + :class:`.PrimaryKeyConstraint`. The collection includes more + sophisticated mutator methods as well to suit schema objects which + require mutable column collections. + + .. versionadded:: 1.4 + + """ + + def add( # type: ignore[override] + self, column: _NAMEDCOL, key: Optional[str] = None + ) -> None: + if key is not None and column.key != key: + raise exc.ArgumentError( + "DedupeColumnCollection requires columns be under " + "the same key as their .key" + ) + key = column.key + + if key is None: + raise exc.ArgumentError( + "Can't add unnamed column to column collection" + ) + + if key in self._index: + existing = self._index[key][1] + + if existing is column: + return + + self.replace(column) + + # pop out memoized proxy_set as this + # operation may very well be occurring + # in a _make_proxy operation + util.memoized_property.reset(column, "proxy_set") + else: + self._append_new_column(key, column) + + def _append_new_column(self, key: str, named_column: _NAMEDCOL) -> None: + l = len(self._collection) + self._collection.append( + (key, named_column, _ColumnMetrics(self, named_column)) + ) + self._colset.add(named_column._deannotate()) + self._index[l] = (key, named_column) + self._index[key] = (key, named_column) + + def _populate_separate_keys( + self, iter_: Iterable[Tuple[str, _NAMEDCOL]] + ) -> None: + """populate from an iterator of (key, column)""" + cols = list(iter_) + + replace_col = [] + for k, col in cols: + if col.key != k: + raise exc.ArgumentError( + "DedupeColumnCollection requires columns be under " + "the same key as their .key" + ) + if col.name in self._index and col.key != col.name: + replace_col.append(col) + elif col.key in self._index: + replace_col.append(col) + else: + self._index[k] = (k, col) + self._collection.append((k, col, _ColumnMetrics(self, col))) + self._colset.update(c._deannotate() for (k, c, _) in self._collection) + + self._index.update( + (idx, (k, c)) for idx, (k, c, _) in enumerate(self._collection) + ) + for col in replace_col: + self.replace(col) + + def extend(self, iter_: Iterable[_NAMEDCOL]) -> None: + self._populate_separate_keys((col.key, col) for col in iter_) + + def remove(self, column: _NAMEDCOL) -> None: + if column not in self._colset: + raise ValueError( + "Can't remove column %r; column is not in this collection" + % column + ) + del self._index[column.key] + self._colset.remove(column) + self._collection[:] = [ + (k, c, metrics) + for (k, c, metrics) in self._collection + if c is not column + ] + for metrics in self._proxy_index.get(column, ()): + metrics.dispose(self) + + self._index.update( + {idx: (k, col) for idx, (k, col, _) in enumerate(self._collection)} + ) + # delete higher index + del self._index[len(self._collection)] + + def replace( + self, + column: _NAMEDCOL, + extra_remove: Optional[Iterable[_NAMEDCOL]] = None, + ) -> None: + """add the given column to this collection, removing unaliased + versions of this column as well as existing columns with the + same key. + + e.g.:: + + t = Table("sometable", metadata, Column("col1", Integer)) + t.columns.replace(Column("col1", Integer, key="columnone")) + + will remove the original 'col1' from the collection, and add + the new column under the name 'columnname'. + + Used by schema.Column to override columns during table reflection. + + """ + + if extra_remove: + remove_col = set(extra_remove) + else: + remove_col = set() + # remove up to two columns based on matches of name as well as key + if column.name in self._index and column.key != column.name: + other = self._index[column.name][1] + if other.name == other.key: + remove_col.add(other) + + if column.key in self._index: + remove_col.add(self._index[column.key][1]) + + if not remove_col: + self._append_new_column(column.key, column) + return + new_cols: List[Tuple[str, _NAMEDCOL, _ColumnMetrics[_NAMEDCOL]]] = [] + replaced = False + for k, col, metrics in self._collection: + if col in remove_col: + if not replaced: + replaced = True + new_cols.append( + (column.key, column, _ColumnMetrics(self, column)) + ) + else: + new_cols.append((k, col, metrics)) + + if remove_col: + self._colset.difference_update(remove_col) + + for rc in remove_col: + for metrics in self._proxy_index.get(rc, ()): + metrics.dispose(self) + + if not replaced: + new_cols.append((column.key, column, _ColumnMetrics(self, column))) + + self._colset.add(column._deannotate()) + self._collection[:] = new_cols + + self._index.clear() + + self._index.update( + {idx: (k, col) for idx, (k, col, _) in enumerate(self._collection)} + ) + self._index.update({k: (k, col) for (k, col, _) in self._collection}) + + +class ReadOnlyColumnCollection( + util.ReadOnlyContainer, ColumnCollection[_COLKEY, _COL_co] +): + __slots__ = ("_parent",) + + def __init__(self, collection): + object.__setattr__(self, "_parent", collection) + object.__setattr__(self, "_colset", collection._colset) + object.__setattr__(self, "_index", collection._index) + object.__setattr__(self, "_collection", collection._collection) + object.__setattr__(self, "_proxy_index", collection._proxy_index) + + def __getstate__(self): + return {"_parent": self._parent} + + def __setstate__(self, state): + parent = state["_parent"] + self.__init__(parent) # type: ignore + + def add(self, column: Any, key: Any = ...) -> Any: + self._readonly() + + def extend(self, elements: Any) -> NoReturn: + self._readonly() + + def remove(self, item: Any) -> NoReturn: + self._readonly() + + +class ColumnSet(util.OrderedSet["ColumnClause[Any]"]): + def contains_column(self, col): + return col in self + + def extend(self, cols): + for col in cols: + self.add(col) + + def __eq__(self, other): + l = [] + for c in other: + for local in self: + if c.shares_lineage(local): + l.append(c == local) + return elements.and_(*l) + + def __hash__(self): # type: ignore[override] + return hash(tuple(x for x in self)) + + +def _entity_namespace( + entity: Union[_HasEntityNamespace, ExternallyTraversible] +) -> _EntityNamespace: + """Return the nearest .entity_namespace for the given entity. + + If not immediately available, does an iterate to find a sub-element + that has one, if any. + + """ + try: + return cast(_HasEntityNamespace, entity).entity_namespace + except AttributeError: + for elem in visitors.iterate(cast(ExternallyTraversible, entity)): + if _is_has_entity_namespace(elem): + return elem.entity_namespace + else: + raise + + +def _entity_namespace_key( + entity: Union[_HasEntityNamespace, ExternallyTraversible], + key: str, + default: Union[SQLCoreOperations[Any], _NoArg] = NO_ARG, +) -> SQLCoreOperations[Any]: + """Return an entry from an entity_namespace. + + + Raises :class:`_exc.InvalidRequestError` rather than attribute error + on not found. + + """ + + try: + ns = _entity_namespace(entity) + if default is not NO_ARG: + return getattr(ns, key, default) + else: + return getattr(ns, key) # type: ignore + except AttributeError as err: + raise exc.InvalidRequestError( + 'Entity namespace for "%s" has no property "%s"' % (entity, key) + ) from err diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/cache_key.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/cache_key.py new file mode 100644 index 0000000000000000000000000000000000000000..cec0450aa61bc72b6151e8c1a90a4021e8097f77 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/cache_key.py @@ -0,0 +1,1057 @@ +# sql/cache_key.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +from __future__ import annotations + +import enum +from itertools import zip_longest +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import NamedTuple +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + +from .visitors import anon_map +from .visitors import HasTraversalDispatch +from .visitors import HasTraverseInternals +from .visitors import InternalTraversal +from .visitors import prefix_anon_map +from .. import util +from ..inspection import inspect +from ..util import HasMemoized +from ..util.typing import Literal +from ..util.typing import Protocol + +if typing.TYPE_CHECKING: + from .elements import BindParameter + from .elements import ClauseElement + from .elements import ColumnElement + from .visitors import _TraverseInternalsType + from ..engine.interfaces import _CoreSingleExecuteParams + + +class _CacheKeyTraversalDispatchType(Protocol): + def __call__( + s, self: HasCacheKey, visitor: _CacheKeyTraversal + ) -> _CacheKeyTraversalDispatchTypeReturn: ... + + +class CacheConst(enum.Enum): + NO_CACHE = 0 + + +NO_CACHE = CacheConst.NO_CACHE + + +_CacheKeyTraversalType = Union[ + "_TraverseInternalsType", Literal[CacheConst.NO_CACHE], Literal[None] +] + + +class CacheTraverseTarget(enum.Enum): + CACHE_IN_PLACE = 0 + CALL_GEN_CACHE_KEY = 1 + STATIC_CACHE_KEY = 2 + PROPAGATE_ATTRS = 3 + ANON_NAME = 4 + + +( + CACHE_IN_PLACE, + CALL_GEN_CACHE_KEY, + STATIC_CACHE_KEY, + PROPAGATE_ATTRS, + ANON_NAME, +) = tuple(CacheTraverseTarget) + +_CacheKeyTraversalDispatchTypeReturn = Sequence[ + Tuple[ + str, + Any, + Union[ + Callable[..., Tuple[Any, ...]], + CacheTraverseTarget, + InternalTraversal, + ], + ] +] + + +class HasCacheKey: + """Mixin for objects which can produce a cache key. + + This class is usually in a hierarchy that starts with the + :class:`.HasTraverseInternals` base, but this is optional. Currently, + the class should be able to work on its own without including + :class:`.HasTraverseInternals`. + + .. seealso:: + + :class:`.CacheKey` + + :ref:`sql_caching` + + """ + + __slots__ = () + + _cache_key_traversal: _CacheKeyTraversalType = NO_CACHE + + _is_has_cache_key = True + + _hierarchy_supports_caching = True + """private attribute which may be set to False to prevent the + inherit_cache warning from being emitted for a hierarchy of subclasses. + + Currently applies to the :class:`.ExecutableDDLElement` hierarchy which + does not implement caching. + + """ + + inherit_cache: Optional[bool] = None + """Indicate if this :class:`.HasCacheKey` instance should make use of the + cache key generation scheme used by its immediate superclass. + + The attribute defaults to ``None``, which indicates that a construct has + not yet taken into account whether or not its appropriate for it to + participate in caching; this is functionally equivalent to setting the + value to ``False``, except that a warning is also emitted. + + This flag can be set to ``True`` on a particular class, if the SQL that + corresponds to the object does not change based on attributes which + are local to this class, and not its superclass. + + .. seealso:: + + :ref:`compilerext_caching` - General guideslines for setting the + :attr:`.HasCacheKey.inherit_cache` attribute for third-party or user + defined SQL constructs. + + """ + + __slots__ = () + + _generated_cache_key_traversal: Any + + @classmethod + def _generate_cache_attrs( + cls, + ) -> Union[_CacheKeyTraversalDispatchType, Literal[CacheConst.NO_CACHE]]: + """generate cache key dispatcher for a new class. + + This sets the _generated_cache_key_traversal attribute once called + so should only be called once per class. + + """ + inherit_cache = cls.__dict__.get("inherit_cache", None) + inherit = bool(inherit_cache) + + if inherit: + _cache_key_traversal = getattr(cls, "_cache_key_traversal", None) + if _cache_key_traversal is None: + try: + assert issubclass(cls, HasTraverseInternals) + _cache_key_traversal = cls._traverse_internals + except AttributeError: + cls._generated_cache_key_traversal = NO_CACHE + return NO_CACHE + + assert _cache_key_traversal is not NO_CACHE, ( + f"class {cls} has _cache_key_traversal=NO_CACHE, " + "which conflicts with inherit_cache=True" + ) + + # TODO: wouldn't we instead get this from our superclass? + # also, our superclass may not have this yet, but in any case, + # we'd generate for the superclass that has it. this is a little + # more complicated, so for the moment this is a little less + # efficient on startup but simpler. + return _cache_key_traversal_visitor.generate_dispatch( + cls, + _cache_key_traversal, + "_generated_cache_key_traversal", + ) + else: + _cache_key_traversal = cls.__dict__.get( + "_cache_key_traversal", None + ) + if _cache_key_traversal is None: + _cache_key_traversal = cls.__dict__.get( + "_traverse_internals", None + ) + if _cache_key_traversal is None: + cls._generated_cache_key_traversal = NO_CACHE + if ( + inherit_cache is None + and cls._hierarchy_supports_caching + ): + util.warn( + "Class %s will not make use of SQL compilation " + "caching as it does not set the 'inherit_cache' " + "attribute to ``True``. This can have " + "significant performance implications including " + "some performance degradations in comparison to " + "prior SQLAlchemy versions. Set this attribute " + "to True if this object can make use of the cache " + "key generated by the superclass. Alternatively, " + "this attribute may be set to False which will " + "disable this warning." % (cls.__name__), + code="cprf", + ) + return NO_CACHE + + return _cache_key_traversal_visitor.generate_dispatch( + cls, + _cache_key_traversal, + "_generated_cache_key_traversal", + ) + + @util.preload_module("sqlalchemy.sql.elements") + def _gen_cache_key( + self, anon_map: anon_map, bindparams: List[BindParameter[Any]] + ) -> Optional[Tuple[Any, ...]]: + """return an optional cache key. + + The cache key is a tuple which can contain any series of + objects that are hashable and also identifies + this object uniquely within the presence of a larger SQL expression + or statement, for the purposes of caching the resulting query. + + The cache key should be based on the SQL compiled structure that would + ultimately be produced. That is, two structures that are composed in + exactly the same way should produce the same cache key; any difference + in the structures that would affect the SQL string or the type handlers + should result in a different cache key. + + If a structure cannot produce a useful cache key, the NO_CACHE + symbol should be added to the anon_map and the method should + return None. + + """ + + cls = self.__class__ + + id_, found = anon_map.get_anon(self) + if found: + return (id_, cls) + + dispatcher: Union[ + Literal[CacheConst.NO_CACHE], + _CacheKeyTraversalDispatchType, + ] + + try: + dispatcher = cls.__dict__["_generated_cache_key_traversal"] + except KeyError: + # traversals.py -> _preconfigure_traversals() + # may be used to run these ahead of time, but + # is not enabled right now. + # this block will generate any remaining dispatchers. + dispatcher = cls._generate_cache_attrs() + + if dispatcher is NO_CACHE: + anon_map[NO_CACHE] = True + return None + + result: Tuple[Any, ...] = (id_, cls) + + # inline of _cache_key_traversal_visitor.run_generated_dispatch() + + for attrname, obj, meth in dispatcher( + self, _cache_key_traversal_visitor + ): + if obj is not None: + # TODO: see if C code can help here as Python lacks an + # efficient switch construct + + if meth is STATIC_CACHE_KEY: + sck = obj._static_cache_key + if sck is NO_CACHE: + anon_map[NO_CACHE] = True + return None + result += (attrname, sck) + elif meth is ANON_NAME: + elements = util.preloaded.sql_elements + if isinstance(obj, elements._anonymous_label): + obj = obj.apply_map(anon_map) # type: ignore + result += (attrname, obj) + elif meth is CALL_GEN_CACHE_KEY: + result += ( + attrname, + obj._gen_cache_key(anon_map, bindparams), + ) + + # remaining cache functions are against + # Python tuples, dicts, lists, etc. so we can skip + # if they are empty + elif obj: + if meth is CACHE_IN_PLACE: + result += (attrname, obj) + elif meth is PROPAGATE_ATTRS: + result += ( + attrname, + obj["compile_state_plugin"], + ( + obj["plugin_subject"]._gen_cache_key( + anon_map, bindparams + ) + if obj["plugin_subject"] + else None + ), + ) + elif meth is InternalTraversal.dp_annotations_key: + # obj is here is the _annotations dict. Table uses + # a memoized version of it. however in other cases, + # we generate it given anon_map as we may be from a + # Join, Aliased, etc. + # see #8790 + + if self._gen_static_annotations_cache_key: # type: ignore # noqa: E501 + result += self._annotations_cache_key # type: ignore # noqa: E501 + else: + result += self._gen_annotations_cache_key(anon_map) # type: ignore # noqa: E501 + + elif ( + meth is InternalTraversal.dp_clauseelement_list + or meth is InternalTraversal.dp_clauseelement_tuple + or meth + is InternalTraversal.dp_memoized_select_entities + ): + result += ( + attrname, + tuple( + [ + elem._gen_cache_key(anon_map, bindparams) + for elem in obj + ] + ), + ) + else: + result += meth( # type: ignore + attrname, obj, self, anon_map, bindparams + ) + return result + + def _generate_cache_key(self) -> Optional[CacheKey]: + """return a cache key. + + The cache key is a tuple which can contain any series of + objects that are hashable and also identifies + this object uniquely within the presence of a larger SQL expression + or statement, for the purposes of caching the resulting query. + + The cache key should be based on the SQL compiled structure that would + ultimately be produced. That is, two structures that are composed in + exactly the same way should produce the same cache key; any difference + in the structures that would affect the SQL string or the type handlers + should result in a different cache key. + + The cache key returned by this method is an instance of + :class:`.CacheKey`, which consists of a tuple representing the + cache key, as well as a list of :class:`.BindParameter` objects + which are extracted from the expression. While two expressions + that produce identical cache key tuples will themselves generate + identical SQL strings, the list of :class:`.BindParameter` objects + indicates the bound values which may have different values in + each one; these bound parameters must be consulted in order to + execute the statement with the correct parameters. + + a :class:`_expression.ClauseElement` structure that does not implement + a :meth:`._gen_cache_key` method and does not implement a + :attr:`.traverse_internals` attribute will not be cacheable; when + such an element is embedded into a larger structure, this method + will return None, indicating no cache key is available. + + """ + + bindparams: List[BindParameter[Any]] = [] + + _anon_map = anon_map() + key = self._gen_cache_key(_anon_map, bindparams) + if NO_CACHE in _anon_map: + return None + else: + assert key is not None + return CacheKey(key, bindparams) + + @classmethod + def _generate_cache_key_for_object( + cls, obj: HasCacheKey + ) -> Optional[CacheKey]: + bindparams: List[BindParameter[Any]] = [] + + _anon_map = anon_map() + key = obj._gen_cache_key(_anon_map, bindparams) + if NO_CACHE in _anon_map: + return None + else: + assert key is not None + return CacheKey(key, bindparams) + + +class HasCacheKeyTraverse(HasTraverseInternals, HasCacheKey): + pass + + +class MemoizedHasCacheKey(HasCacheKey, HasMemoized): + __slots__ = () + + @HasMemoized.memoized_instancemethod + def _generate_cache_key(self) -> Optional[CacheKey]: + return HasCacheKey._generate_cache_key(self) + + +class SlotsMemoizedHasCacheKey(HasCacheKey, util.MemoizedSlots): + __slots__ = () + + def _memoized_method__generate_cache_key(self) -> Optional[CacheKey]: + return HasCacheKey._generate_cache_key(self) + + +class CacheKey(NamedTuple): + """The key used to identify a SQL statement construct in the + SQL compilation cache. + + .. seealso:: + + :ref:`sql_caching` + + """ + + key: Tuple[Any, ...] + bindparams: Sequence[BindParameter[Any]] + + # can't set __hash__ attribute because it interferes + # with namedtuple + # can't use "if not TYPE_CHECKING" because mypy rejects it + # inside of a NamedTuple + def __hash__(self) -> Optional[int]: # type: ignore + """CacheKey itself is not hashable - hash the .key portion""" + return None + + def to_offline_string( + self, + statement_cache: MutableMapping[Any, str], + statement: ClauseElement, + parameters: _CoreSingleExecuteParams, + ) -> str: + """Generate an "offline string" form of this :class:`.CacheKey` + + The "offline string" is basically the string SQL for the + statement plus a repr of the bound parameter values in series. + Whereas the :class:`.CacheKey` object is dependent on in-memory + identities in order to work as a cache key, the "offline" version + is suitable for a cache that will work for other processes as well. + + The given ``statement_cache`` is a dictionary-like object where the + string form of the statement itself will be cached. This dictionary + should be in a longer lived scope in order to reduce the time spent + stringifying statements. + + + """ + if self.key not in statement_cache: + statement_cache[self.key] = sql_str = str(statement) + else: + sql_str = statement_cache[self.key] + + if not self.bindparams: + param_tuple = tuple(parameters[key] for key in sorted(parameters)) + else: + param_tuple = tuple( + parameters.get(bindparam.key, bindparam.value) + for bindparam in self.bindparams + ) + + return repr((sql_str, param_tuple)) + + def __eq__(self, other: Any) -> bool: + return bool(self.key == other.key) + + def __ne__(self, other: Any) -> bool: + return not (self.key == other.key) + + @classmethod + def _diff_tuples(cls, left: CacheKey, right: CacheKey) -> str: + ck1 = CacheKey(left, []) + ck2 = CacheKey(right, []) + return ck1._diff(ck2) + + def _whats_different(self, other: CacheKey) -> Iterator[str]: + k1 = self.key + k2 = other.key + + stack: List[int] = [] + pickup_index = 0 + while True: + s1, s2 = k1, k2 + for idx in stack: + s1 = s1[idx] + s2 = s2[idx] + + for idx, (e1, e2) in enumerate(zip_longest(s1, s2)): + if idx < pickup_index: + continue + if e1 != e2: + if isinstance(e1, tuple) and isinstance(e2, tuple): + stack.append(idx) + break + else: + yield "key%s[%d]: %s != %s" % ( + "".join("[%d]" % id_ for id_ in stack), + idx, + e1, + e2, + ) + else: + stack.pop(-1) + break + + def _diff(self, other: CacheKey) -> str: + return ", ".join(self._whats_different(other)) + + def __str__(self) -> str: + stack: List[Union[Tuple[Any, ...], HasCacheKey]] = [self.key] + + output = [] + sentinel = object() + indent = -1 + while stack: + elem = stack.pop(0) + if elem is sentinel: + output.append((" " * (indent * 2)) + "),") + indent -= 1 + elif isinstance(elem, tuple): + if not elem: + output.append((" " * ((indent + 1) * 2)) + "()") + else: + indent += 1 + stack = list(elem) + [sentinel] + stack + output.append((" " * (indent * 2)) + "(") + else: + if isinstance(elem, HasCacheKey): + repr_ = "<%s object at %s>" % ( + type(elem).__name__, + hex(id(elem)), + ) + else: + repr_ = repr(elem) + output.append((" " * (indent * 2)) + " " + repr_ + ", ") + + return "CacheKey(key=%s)" % ("\n".join(output),) + + def _generate_param_dict(self) -> Dict[str, Any]: + """used for testing""" + + _anon_map = prefix_anon_map() + return {b.key % _anon_map: b.effective_value for b in self.bindparams} + + @util.preload_module("sqlalchemy.sql.elements") + def _apply_params_to_element( + self, original_cache_key: CacheKey, target_element: ColumnElement[Any] + ) -> ColumnElement[Any]: + if target_element._is_immutable or original_cache_key is self: + return target_element + + elements = util.preloaded.sql_elements + return elements._OverrideBinds( + target_element, self.bindparams, original_cache_key.bindparams + ) + + +def _ad_hoc_cache_key_from_args( + tokens: Tuple[Any, ...], + traverse_args: Iterable[Tuple[str, InternalTraversal]], + args: Iterable[Any], +) -> Tuple[Any, ...]: + """a quick cache key generator used by reflection.flexi_cache.""" + bindparams: List[BindParameter[Any]] = [] + + _anon_map = anon_map() + + tup = tokens + + for (attrname, sym), arg in zip(traverse_args, args): + key = sym.name + visit_key = key.replace("dp_", "visit_") + + if arg is None: + tup += (attrname, None) + continue + + meth = getattr(_cache_key_traversal_visitor, visit_key) + if meth is CACHE_IN_PLACE: + tup += (attrname, arg) + elif meth in ( + CALL_GEN_CACHE_KEY, + STATIC_CACHE_KEY, + ANON_NAME, + PROPAGATE_ATTRS, + ): + raise NotImplementedError( + f"Haven't implemented symbol {meth} for ad-hoc key from args" + ) + else: + tup += meth(attrname, arg, None, _anon_map, bindparams) + return tup + + +class _CacheKeyTraversal(HasTraversalDispatch): + # very common elements are inlined into the main _get_cache_key() method + # to produce a dramatic savings in Python function call overhead + + visit_has_cache_key = visit_clauseelement = CALL_GEN_CACHE_KEY + visit_clauseelement_list = InternalTraversal.dp_clauseelement_list + visit_annotations_key = InternalTraversal.dp_annotations_key + visit_clauseelement_tuple = InternalTraversal.dp_clauseelement_tuple + visit_memoized_select_entities = ( + InternalTraversal.dp_memoized_select_entities + ) + + visit_string = visit_boolean = visit_operator = visit_plain_obj = ( + CACHE_IN_PLACE + ) + visit_statement_hint_list = CACHE_IN_PLACE + visit_type = STATIC_CACHE_KEY + visit_anon_name = ANON_NAME + + visit_propagate_attrs = PROPAGATE_ATTRS + + def visit_with_context_options( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return tuple((fn.__code__, c_key) for fn, c_key in obj) + + def visit_inspectable( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return (attrname, inspect(obj)._gen_cache_key(anon_map, bindparams)) + + def visit_string_list( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return tuple(obj) + + def visit_multi( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + ( + obj._gen_cache_key(anon_map, bindparams) + if isinstance(obj, HasCacheKey) + else obj + ), + ) + + def visit_multi_list( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + tuple( + ( + elem._gen_cache_key(anon_map, bindparams) + if isinstance(elem, HasCacheKey) + else elem + ) + for elem in obj + ), + ) + + def visit_has_cache_key_tuples( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + return ( + attrname, + tuple( + tuple( + elem._gen_cache_key(anon_map, bindparams) + for elem in tup_elem + ) + for tup_elem in obj + ), + ) + + def visit_has_cache_key_list( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + return ( + attrname, + tuple(elem._gen_cache_key(anon_map, bindparams) for elem in obj), + ) + + def visit_executable_options( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + return ( + attrname, + tuple( + elem._gen_cache_key(anon_map, bindparams) + for elem in obj + if elem._is_has_cache_key + ), + ) + + def visit_inspectable_list( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return self.visit_has_cache_key_list( + attrname, [inspect(o) for o in obj], parent, anon_map, bindparams + ) + + def visit_clauseelement_tuples( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return self.visit_has_cache_key_tuples( + attrname, obj, parent, anon_map, bindparams + ) + + def visit_fromclause_ordered_set( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + return ( + attrname, + tuple([elem._gen_cache_key(anon_map, bindparams) for elem in obj]), + ) + + def visit_clauseelement_unordered_set( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + cache_keys = [ + elem._gen_cache_key(anon_map, bindparams) for elem in obj + ] + return ( + attrname, + tuple( + sorted(cache_keys) + ), # cache keys all start with (id_, class) + ) + + def visit_named_ddl_element( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return (attrname, obj.name) + + def visit_prefix_sequence( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + + return ( + attrname, + tuple( + [ + (clause._gen_cache_key(anon_map, bindparams), strval) + for clause, strval in obj + ] + ), + ) + + def visit_setup_join_tuple( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return tuple( + ( + target._gen_cache_key(anon_map, bindparams), + ( + onclause._gen_cache_key(anon_map, bindparams) + if onclause is not None + else None + ), + ( + from_._gen_cache_key(anon_map, bindparams) + if from_ is not None + else None + ), + tuple([(key, flags[key]) for key in sorted(flags)]), + ) + for (target, onclause, from_, flags) in obj + ) + + def visit_table_hint_list( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + if not obj: + return () + + return ( + attrname, + tuple( + [ + ( + clause._gen_cache_key(anon_map, bindparams), + dialect_name, + text, + ) + for (clause, dialect_name), text in obj.items() + ] + ), + ) + + def visit_plain_dict( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return (attrname, tuple([(key, obj[key]) for key in sorted(obj)])) + + def visit_dialect_options( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + tuple( + ( + dialect_name, + tuple( + [ + (key, obj[dialect_name][key]) + for key in sorted(obj[dialect_name]) + ] + ), + ) + for dialect_name in sorted(obj) + ), + ) + + def visit_string_clauseelement_dict( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + tuple( + (key, obj[key]._gen_cache_key(anon_map, bindparams)) + for key in sorted(obj) + ), + ) + + def visit_string_multi_dict( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + tuple( + ( + key, + ( + value._gen_cache_key(anon_map, bindparams) + if isinstance(value, HasCacheKey) + else value + ), + ) + for key, value in [(key, obj[key]) for key in sorted(obj)] + ), + ) + + def visit_fromclause_canonical_column_collection( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + # inlining into the internals of ColumnCollection + return ( + attrname, + tuple( + col._gen_cache_key(anon_map, bindparams) + for k, col, _ in obj._collection + ), + ) + + def visit_unknown_structure( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + anon_map[NO_CACHE] = True + return () + + def visit_dml_ordered_values( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + return ( + attrname, + tuple( + ( + ( + key._gen_cache_key(anon_map, bindparams) + if hasattr(key, "__clause_element__") + else key + ), + value._gen_cache_key(anon_map, bindparams), + ) + for key, value in obj + ), + ) + + def visit_dml_values( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + # in py37 we can assume two dictionaries created in the same + # insert ordering will retain that sorting + return ( + attrname, + tuple( + ( + ( + k._gen_cache_key(anon_map, bindparams) + if hasattr(k, "__clause_element__") + else k + ), + obj[k]._gen_cache_key(anon_map, bindparams), + ) + for k in obj + ), + ) + + def visit_dml_multi_values( + self, + attrname: str, + obj: Any, + parent: Any, + anon_map: anon_map, + bindparams: List[BindParameter[Any]], + ) -> Tuple[Any, ...]: + # multivalues are simply not cacheable right now + anon_map[NO_CACHE] = True + return () + + +_cache_key_traversal_visitor = _CacheKeyTraversal() diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/coercions.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/coercions.py new file mode 100644 index 0000000000000000000000000000000000000000..e174833fbdc6d1f41f3a6886e5d2f49c79051764 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/coercions.py @@ -0,0 +1,1403 @@ +# sql/coercions.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +from __future__ import annotations + +import collections.abc as collections_abc +import numbers +import re +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import roles +from . import visitors +from ._typing import is_from_clause +from .base import ExecutableOption +from .base import Options +from .cache_key import HasCacheKey +from .visitors import Visitable +from .. import exc +from .. import inspection +from .. import util +from ..util.typing import Literal + +if typing.TYPE_CHECKING: + # elements lambdas schema selectable are set by __init__ + from . import elements + from . import lambdas + from . import schema + from . import selectable + from ._typing import _ColumnExpressionArgument + from ._typing import _ColumnsClauseArgument + from ._typing import _DDLColumnArgument + from ._typing import _DMLTableArgument + from ._typing import _FromClauseArgument + from .dml import _DMLTableElement + from .elements import BindParameter + from .elements import ClauseElement + from .elements import ColumnClause + from .elements import ColumnElement + from .elements import NamedColumn + from .elements import SQLCoreOperations + from .elements import TextClause + from .schema import Column + from .selectable import _ColumnsClauseElement + from .selectable import _JoinTargetProtocol + from .selectable import FromClause + from .selectable import HasCTE + from .selectable import SelectBase + from .selectable import Subquery + from .visitors import _TraverseCallableType + +_SR = TypeVar("_SR", bound=roles.SQLRole) +_F = TypeVar("_F", bound=Callable[..., Any]) +_StringOnlyR = TypeVar("_StringOnlyR", bound=roles.StringRole) +_T = TypeVar("_T", bound=Any) + + +def _is_literal(element: Any) -> bool: + """Return whether or not the element is a "literal" in the context + of a SQL expression construct. + + """ + + return not isinstance( + element, + (Visitable, schema.SchemaEventTarget), + ) and not hasattr(element, "__clause_element__") + + +def _deep_is_literal(element): + """Return whether or not the element is a "literal" in the context + of a SQL expression construct. + + does a deeper more esoteric check than _is_literal. is used + for lambda elements that have to distinguish values that would + be bound vs. not without any context. + + """ + + if isinstance(element, collections_abc.Sequence) and not isinstance( + element, str + ): + for elem in element: + if not _deep_is_literal(elem): + return False + else: + return True + + return ( + not isinstance( + element, + ( + Visitable, + schema.SchemaEventTarget, + HasCacheKey, + Options, + util.langhelpers.symbol, + ), + ) + and not hasattr(element, "__clause_element__") + and ( + not isinstance(element, type) + or not issubclass(element, HasCacheKey) + ) + ) + + +def _document_text_coercion( + paramname: str, meth_rst: str, param_rst: str +) -> Callable[[_F], _F]: + return util.add_parameter_text( + paramname, + ( + ".. warning:: " + "The %s argument to %s can be passed as a Python string argument, " + "which will be treated " + "as **trusted SQL text** and rendered as given. **DO NOT PASS " + "UNTRUSTED INPUT TO THIS PARAMETER**." + ) + % (param_rst, meth_rst), + ) + + +def _expression_collection_was_a_list( + attrname: str, + fnname: str, + args: Union[Sequence[_T], Sequence[Sequence[_T]]], +) -> Sequence[_T]: + if args and isinstance(args[0], (list, set, dict)) and len(args) == 1: + if isinstance(args[0], list): + raise exc.ArgumentError( + f'The "{attrname}" argument to {fnname}(), when ' + "referring to a sequence " + "of items, is now passed as a series of positional " + "elements, rather than as a list. " + ) + return cast("Sequence[_T]", args[0]) + + return cast("Sequence[_T]", args) + + +@overload +def expect( + role: Type[roles.TruncatedLabelRole], + element: Any, + **kw: Any, +) -> str: ... + + +@overload +def expect( + role: Type[roles.DMLColumnRole], + element: Any, + *, + as_key: Literal[True] = ..., + **kw: Any, +) -> str: ... + + +@overload +def expect( + role: Type[roles.LiteralValueRole], + element: Any, + **kw: Any, +) -> BindParameter[Any]: ... + + +@overload +def expect( + role: Type[roles.DDLReferredColumnRole], + element: Any, + **kw: Any, +) -> Union[Column[Any], str]: ... + + +@overload +def expect( + role: Type[roles.DDLConstraintColumnRole], + element: Any, + **kw: Any, +) -> Union[Column[Any], str]: ... + + +@overload +def expect( + role: Type[roles.StatementOptionRole], + element: Any, + **kw: Any, +) -> Union[ColumnElement[Any], TextClause]: ... + + +@overload +def expect( + role: Type[roles.LabeledColumnExprRole[Any]], + element: _ColumnExpressionArgument[_T], + **kw: Any, +) -> NamedColumn[_T]: ... + + +@overload +def expect( + role: Union[ + Type[roles.ExpressionElementRole[Any]], + Type[roles.LimitOffsetRole], + Type[roles.WhereHavingRole], + ], + element: _ColumnExpressionArgument[_T], + **kw: Any, +) -> ColumnElement[_T]: ... + + +@overload +def expect( + role: Union[ + Type[roles.ExpressionElementRole[Any]], + Type[roles.LimitOffsetRole], + Type[roles.WhereHavingRole], + Type[roles.OnClauseRole], + Type[roles.ColumnArgumentRole], + ], + element: Any, + **kw: Any, +) -> ColumnElement[Any]: ... + + +@overload +def expect( + role: Type[roles.DMLTableRole], + element: _DMLTableArgument, + **kw: Any, +) -> _DMLTableElement: ... + + +@overload +def expect( + role: Type[roles.HasCTERole], + element: HasCTE, + **kw: Any, +) -> HasCTE: ... + + +@overload +def expect( + role: Type[roles.SelectStatementRole], + element: SelectBase, + **kw: Any, +) -> SelectBase: ... + + +@overload +def expect( + role: Type[roles.FromClauseRole], + element: _FromClauseArgument, + **kw: Any, +) -> FromClause: ... + + +@overload +def expect( + role: Type[roles.FromClauseRole], + element: SelectBase, + *, + explicit_subquery: Literal[True] = ..., + **kw: Any, +) -> Subquery: ... + + +@overload +def expect( + role: Type[roles.ColumnsClauseRole], + element: _ColumnsClauseArgument[Any], + **kw: Any, +) -> _ColumnsClauseElement: ... + + +@overload +def expect( + role: Type[roles.JoinTargetRole], + element: _JoinTargetProtocol, + **kw: Any, +) -> _JoinTargetProtocol: ... + + +# catchall for not-yet-implemented overloads +@overload +def expect( + role: Type[_SR], + element: Any, + **kw: Any, +) -> Any: ... + + +def expect( + role: Type[_SR], + element: Any, + *, + apply_propagate_attrs: Optional[ClauseElement] = None, + argname: Optional[str] = None, + post_inspect: bool = False, + disable_inspection: bool = False, + **kw: Any, +) -> Any: + if ( + role.allows_lambda + # note callable() will not invoke a __getattr__() method, whereas + # hasattr(obj, "__call__") will. by keeping the callable() check here + # we prevent most needless calls to hasattr() and therefore + # __getattr__(), which is present on ColumnElement. + and callable(element) + and hasattr(element, "__code__") + ): + return lambdas.LambdaElement( + element, + role, + lambdas.LambdaOptions(**kw), + apply_propagate_attrs=apply_propagate_attrs, + ) + + # major case is that we are given a ClauseElement already, skip more + # elaborate logic up front if possible + impl = _impl_lookup[role] + + original_element = element + + if not isinstance( + element, + ( + elements.CompilerElement, + schema.SchemaItem, + schema.FetchedValue, + lambdas.PyWrapper, + ), + ): + resolved = None + + if impl._resolve_literal_only: + resolved = impl._literal_coercion(element, **kw) + else: + original_element = element + + is_clause_element = False + + # this is a special performance optimization for ORM + # joins used by JoinTargetImpl that we don't go through the + # work of creating __clause_element__() when we only need the + # original QueryableAttribute, as the former will do clause + # adaption and all that which is just thrown away here. + if ( + impl._skip_clauseelement_for_target_match + and isinstance(element, role) + and hasattr(element, "__clause_element__") + ): + is_clause_element = True + else: + while hasattr(element, "__clause_element__"): + is_clause_element = True + + if not getattr(element, "is_clause_element", False): + element = element.__clause_element__() + else: + break + + if not is_clause_element: + if impl._use_inspection and not disable_inspection: + insp = inspection.inspect(element, raiseerr=False) + if insp is not None: + if post_inspect: + insp._post_inspect + try: + resolved = insp.__clause_element__() + except AttributeError: + impl._raise_for_expected(original_element, argname) + + if resolved is None: + resolved = impl._literal_coercion( + element, argname=argname, **kw + ) + else: + resolved = element + elif isinstance(element, lambdas.PyWrapper): + resolved = element._sa__py_wrapper_literal(**kw) + else: + resolved = element + + if apply_propagate_attrs is not None: + if typing.TYPE_CHECKING: + assert isinstance(resolved, (SQLCoreOperations, ClauseElement)) + + if not apply_propagate_attrs._propagate_attrs and getattr( + resolved, "_propagate_attrs", None + ): + apply_propagate_attrs._propagate_attrs = resolved._propagate_attrs + + if impl._role_class in resolved.__class__.__mro__: + if impl._post_coercion: + resolved = impl._post_coercion( + resolved, + argname=argname, + original_element=original_element, + **kw, + ) + return resolved + else: + return impl._implicit_coercions( + original_element, resolved, argname=argname, **kw + ) + + +def expect_as_key( + role: Type[roles.DMLColumnRole], element: Any, **kw: Any +) -> str: + kw.pop("as_key", None) + return expect(role, element, as_key=True, **kw) + + +def expect_col_expression_collection( + role: Type[roles.DDLConstraintColumnRole], + expressions: Iterable[_DDLColumnArgument], +) -> Iterator[ + Tuple[ + Union[str, Column[Any]], + Optional[ColumnClause[Any]], + Optional[str], + Optional[Union[Column[Any], str]], + ] +]: + for expr in expressions: + strname = None + column = None + + resolved: Union[Column[Any], str] = expect(role, expr) + if isinstance(resolved, str): + assert isinstance(expr, str) + strname = resolved = expr + else: + cols: List[Column[Any]] = [] + col_append: _TraverseCallableType[Column[Any]] = cols.append + visitors.traverse(resolved, {}, {"column": col_append}) + if cols: + column = cols[0] + add_element = column if column is not None else strname + + yield resolved, column, strname, add_element + + +class RoleImpl: + __slots__ = ("_role_class", "name", "_use_inspection") + + def _literal_coercion(self, element, **kw): + raise NotImplementedError() + + _post_coercion: Any = None + _resolve_literal_only = False + _skip_clauseelement_for_target_match = False + + def __init__(self, role_class): + self._role_class = role_class + self.name = role_class._role_name + self._use_inspection = issubclass(role_class, roles.UsesInspection) + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + self._raise_for_expected(element, argname, resolved) + + def _raise_for_expected( + self, + element: Any, + argname: Optional[str] = None, + resolved: Optional[Any] = None, + *, + advice: Optional[str] = None, + code: Optional[str] = None, + err: Optional[Exception] = None, + **kw: Any, + ) -> NoReturn: + if resolved is not None and resolved is not element: + got = "%r object resolved from %r object" % (resolved, element) + else: + got = repr(element) + + if argname: + msg = "%s expected for argument %r; got %s." % ( + self.name, + argname, + got, + ) + else: + msg = "%s expected, got %s." % (self.name, got) + + if advice: + msg += " " + advice + + raise exc.ArgumentError(msg, code=code) from err + + +class _Deannotate: + __slots__ = () + + def _post_coercion(self, resolved, **kw): + from .util import _deep_deannotate + + return _deep_deannotate(resolved) + + +class _StringOnly: + __slots__ = () + + _resolve_literal_only = True + + +class _ReturnsStringKey(RoleImpl): + __slots__ = () + + def _implicit_coercions(self, element, resolved, argname=None, **kw): + if isinstance(element, str): + return element + else: + self._raise_for_expected(element, argname, resolved) + + def _literal_coercion(self, element, **kw): + return element + + +class _ColumnCoercions(RoleImpl): + __slots__ = () + + def _warn_for_scalar_subquery_coercion(self): + util.warn( + "implicitly coercing SELECT object to scalar subquery; " + "please use the .scalar_subquery() method to produce a scalar " + "subquery.", + ) + + def _implicit_coercions(self, element, resolved, argname=None, **kw): + original_element = element + if not getattr(resolved, "is_clause_element", False): + self._raise_for_expected(original_element, argname, resolved) + elif resolved._is_select_base: + self._warn_for_scalar_subquery_coercion() + return resolved.scalar_subquery() + elif resolved._is_from_clause and isinstance( + resolved, selectable.Subquery + ): + self._warn_for_scalar_subquery_coercion() + return resolved.element.scalar_subquery() + elif self._role_class.allows_lambda and resolved._is_lambda_element: + return resolved + else: + self._raise_for_expected(original_element, argname, resolved) + + +def _no_text_coercion( + element: Any, + argname: Optional[str] = None, + exc_cls: Type[exc.SQLAlchemyError] = exc.ArgumentError, + extra: Optional[str] = None, + err: Optional[Exception] = None, +) -> NoReturn: + raise exc_cls( + "%(extra)sTextual SQL expression %(expr)r %(argname)sshould be " + "explicitly declared as text(%(expr)r)" + % { + "expr": util.ellipses_string(element), + "argname": "for argument %s" % (argname,) if argname else "", + "extra": "%s " % extra if extra else "", + } + ) from err + + +class _NoTextCoercion(RoleImpl): + __slots__ = () + + def _literal_coercion(self, element, *, argname=None, **kw): + if isinstance(element, str) and issubclass( + elements.TextClause, self._role_class + ): + _no_text_coercion(element, argname) + else: + self._raise_for_expected(element, argname) + + +class _CoerceLiterals(RoleImpl): + __slots__ = () + _coerce_consts = False + _coerce_star = False + _coerce_numerics = False + + def _text_coercion(self, element, argname=None): + return _no_text_coercion(element, argname) + + def _literal_coercion(self, element, *, argname=None, **kw): + if isinstance(element, str): + if self._coerce_star and element == "*": + return elements.ColumnClause("*", is_literal=True) + else: + return self._text_coercion(element, argname, **kw) + + if self._coerce_consts: + if element is None: + return elements.Null() + elif element is False: + return elements.False_() + elif element is True: + return elements.True_() + + if self._coerce_numerics and isinstance(element, (numbers.Number)): + return elements.ColumnClause(str(element), is_literal=True) + + self._raise_for_expected(element, argname) + + +class LiteralValueImpl(RoleImpl): + _resolve_literal_only = True + + def _implicit_coercions( + self, + element, + resolved, + argname=None, + *, + type_=None, + literal_execute=False, + **kw, + ): + if not _is_literal(resolved): + self._raise_for_expected( + element, resolved=resolved, argname=argname, **kw + ) + + return elements.BindParameter( + None, + element, + type_=type_, + unique=True, + literal_execute=literal_execute, + ) + + def _literal_coercion(self, element, **kw): + return element + + +class _SelectIsNotFrom(RoleImpl): + __slots__ = () + + def _raise_for_expected( + self, + element: Any, + argname: Optional[str] = None, + resolved: Optional[Any] = None, + *, + advice: Optional[str] = None, + code: Optional[str] = None, + err: Optional[Exception] = None, + **kw: Any, + ) -> NoReturn: + if ( + not advice + and isinstance(element, roles.SelectStatementRole) + or isinstance(resolved, roles.SelectStatementRole) + ): + advice = ( + "To create a " + "FROM clause from a %s object, use the .subquery() method." + % (resolved.__class__ if resolved is not None else element,) + ) + code = "89ve" + else: + code = None + + super()._raise_for_expected( + element, + argname=argname, + resolved=resolved, + advice=advice, + code=code, + err=err, + **kw, + ) + # never reached + assert False + + +class HasCacheKeyImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if isinstance(element, HasCacheKey): + return element + else: + self._raise_for_expected(element, argname, resolved) + + def _literal_coercion(self, element, **kw): + return element + + +class ExecutableOptionImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if isinstance(element, ExecutableOption): + return element + else: + self._raise_for_expected(element, argname, resolved) + + def _literal_coercion(self, element, **kw): + return element + + +class ExpressionElementImpl(_ColumnCoercions, RoleImpl): + __slots__ = () + + def _literal_coercion( + self, element, *, name=None, type_=None, is_crud=False, **kw + ): + if ( + element is None + and not is_crud + and (type_ is None or not type_.should_evaluate_none) + ): + # TODO: there's no test coverage now for the + # "should_evaluate_none" part of this, as outside of "crud" this + # codepath is not normally used except in some special cases + return elements.Null() + else: + try: + return elements.BindParameter( + name, element, type_, unique=True, _is_crud=is_crud + ) + except exc.ArgumentError as err: + self._raise_for_expected(element, err=err) + + def _raise_for_expected(self, element, argname=None, resolved=None, **kw): + # select uses implicit coercion with warning instead of raising + if isinstance(element, selectable.Values): + advice = ( + "To create a column expression from a VALUES clause, " + "use the .scalar_values() method." + ) + elif isinstance(element, roles.AnonymizedFromClauseRole): + advice = ( + "To create a column expression from a FROM clause row " + "as a whole, use the .table_valued() method." + ) + else: + advice = None + + return super()._raise_for_expected( + element, argname=argname, resolved=resolved, advice=advice, **kw + ) + + +class BinaryElementImpl(ExpressionElementImpl, RoleImpl): + __slots__ = () + + def _literal_coercion( # type: ignore[override] + self, + element, + *, + expr, + operator, + bindparam_type=None, + argname=None, + **kw, + ): + try: + return expr._bind_param(operator, element, type_=bindparam_type) + except exc.ArgumentError as err: + self._raise_for_expected(element, err=err) + + def _post_coercion(self, resolved, *, expr, bindparam_type=None, **kw): + if resolved.type._isnull and not expr.type._isnull: + resolved = resolved._with_binary_element_type( + bindparam_type if bindparam_type is not None else expr.type + ) + return resolved + + +class InElementImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if resolved._is_from_clause: + if ( + isinstance(resolved, selectable.Alias) + and resolved.element._is_select_base + ): + self._warn_for_implicit_coercion(resolved) + return self._post_coercion(resolved.element, **kw) + else: + self._warn_for_implicit_coercion(resolved) + return self._post_coercion(resolved.select(), **kw) + else: + self._raise_for_expected(element, argname, resolved) + + def _warn_for_implicit_coercion(self, elem): + util.warn( + "Coercing %s object into a select() for use in IN(); " + "please pass a select() construct explicitly" + % (elem.__class__.__name__) + ) + + @util.preload_module("sqlalchemy.sql.elements") + def _literal_coercion(self, element, *, expr, operator, **kw): + if util.is_non_string_iterable(element): + non_literal_expressions: Dict[ + Optional[_ColumnExpressionArgument[Any]], + _ColumnExpressionArgument[Any], + ] = {} + element = list(element) + for o in element: + if not _is_literal(o): + if not isinstance( + o, util.preloaded.sql_elements.ColumnElement + ) and not hasattr(o, "__clause_element__"): + self._raise_for_expected(element, **kw) + + else: + non_literal_expressions[o] = o + + if non_literal_expressions: + return elements.ClauseList( + *[ + ( + non_literal_expressions[o] + if o in non_literal_expressions + else expr._bind_param(operator, o) + ) + for o in element + ] + ) + else: + return expr._bind_param(operator, element, expanding=True) + + else: + self._raise_for_expected(element, **kw) + + def _post_coercion(self, element, *, expr, operator, **kw): + if element._is_select_base: + # for IN, we are doing scalar_subquery() coercion without + # a warning + return element.scalar_subquery() + elif isinstance(element, elements.ClauseList): + assert not len(element.clauses) == 0 + return element.self_group(against=operator) + + elif isinstance(element, elements.BindParameter): + element = element._clone(maintain_key=True) + element.expanding = True + element.expand_op = operator + + return element + elif isinstance(element, selectable.Values): + return element.scalar_values() + else: + return element + + +class OnClauseImpl(_ColumnCoercions, RoleImpl): + __slots__ = () + + _coerce_consts = True + + def _literal_coercion(self, element, **kw): + self._raise_for_expected(element) + + def _post_coercion(self, resolved, *, original_element=None, **kw): + # this is a hack right now as we want to use coercion on an + # ORM InstrumentedAttribute, but we want to return the object + # itself if it is one, not its clause element. + # ORM context _join and _legacy_join() would need to be improved + # to look for annotations in a clause element form. + if isinstance(original_element, roles.JoinTargetRole): + return original_element + return resolved + + +class WhereHavingImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl): + __slots__ = () + + _coerce_consts = True + + def _text_coercion(self, element, argname=None): + return _no_text_coercion(element, argname) + + +class StatementOptionImpl(_CoerceLiterals, RoleImpl): + __slots__ = () + + _coerce_consts = True + + def _text_coercion(self, element, argname=None): + return elements.TextClause(element) + + +class ColumnArgumentImpl(_NoTextCoercion, RoleImpl): + __slots__ = () + + +class ColumnArgumentOrKeyImpl(_ReturnsStringKey, RoleImpl): + __slots__ = () + + +class StrAsPlainColumnImpl(_CoerceLiterals, RoleImpl): + __slots__ = () + + def _text_coercion(self, element, argname=None): + return elements.ColumnClause(element) + + +class ByOfImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl, roles.ByOfRole): + __slots__ = () + + _coerce_consts = True + + def _text_coercion(self, element, argname=None): + return elements._textual_label_reference(element) + + +class OrderByImpl(ByOfImpl, RoleImpl): + __slots__ = () + + def _post_coercion(self, resolved, **kw): + if ( + isinstance(resolved, self._role_class) + and resolved._order_by_label_element is not None + ): + return elements._label_reference(resolved) + else: + return resolved + + +class GroupByImpl(ByOfImpl, RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if is_from_clause(resolved): + return elements.ClauseList(*resolved.c) + else: + return resolved + + +class DMLColumnImpl(_ReturnsStringKey, RoleImpl): + __slots__ = () + + def _post_coercion(self, element, *, as_key=False, **kw): + if as_key: + return element.key + else: + return element + + +class ConstExprImpl(RoleImpl): + __slots__ = () + + def _literal_coercion(self, element, *, argname=None, **kw): + if element is None: + return elements.Null() + elif element is False: + return elements.False_() + elif element is True: + return elements.True_() + else: + self._raise_for_expected(element, argname) + + +class TruncatedLabelImpl(_StringOnly, RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if isinstance(element, str): + return resolved + else: + self._raise_for_expected(element, argname, resolved) + + def _literal_coercion(self, element, **kw): + """coerce the given value to :class:`._truncated_label`. + + Existing :class:`._truncated_label` and + :class:`._anonymous_label` objects are passed + unchanged. + """ + + if isinstance(element, elements._truncated_label): + return element + else: + return elements._truncated_label(element) + + +class DDLExpressionImpl(_Deannotate, _CoerceLiterals, RoleImpl): + __slots__ = () + + _coerce_consts = True + + def _text_coercion(self, element, argname=None): + # see #5754 for why we can't easily deprecate this coercion. + # essentially expressions like postgresql_where would have to be + # text() as they come back from reflection and we don't want to + # have text() elements wired into the inspection dictionaries. + return elements.TextClause(element) + + +class DDLConstraintColumnImpl(_Deannotate, _ReturnsStringKey, RoleImpl): + __slots__ = () + + +class DDLReferredColumnImpl(DDLConstraintColumnImpl): + __slots__ = () + + +class LimitOffsetImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if resolved is None: + return None + else: + self._raise_for_expected(element, argname, resolved) + + def _literal_coercion( # type: ignore[override] + self, element, *, name, type_, **kw + ): + if element is None: + return None + else: + value = util.asint(element) + return selectable._OffsetLimitParam( + name, value, type_=type_, unique=True + ) + + +class LabeledColumnExprImpl(ExpressionElementImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if isinstance(resolved, roles.ExpressionElementRole): + return resolved.label(None) + else: + new = super()._implicit_coercions( + element, resolved, argname=argname, **kw + ) + if isinstance(new, roles.ExpressionElementRole): + return new.label(None) + else: + self._raise_for_expected(element, argname, resolved) + + +class ColumnsClauseImpl(_SelectIsNotFrom, _CoerceLiterals, RoleImpl): + __slots__ = () + + _coerce_consts = True + _coerce_numerics = True + _coerce_star = True + + _guess_straight_column = re.compile(r"^\w\S*$", re.I) + + def _raise_for_expected( + self, element, argname=None, resolved=None, *, advice=None, **kw + ): + if not advice and isinstance(element, list): + advice = ( + f"Did you mean to say select(" + f"{', '.join(repr(e) for e in element)})?" + ) + + return super()._raise_for_expected( + element, argname=argname, resolved=resolved, advice=advice, **kw + ) + + def _text_coercion(self, element, argname=None): + element = str(element) + + guess_is_literal = not self._guess_straight_column.match(element) + raise exc.ArgumentError( + "Textual column expression %(column)r %(argname)sshould be " + "explicitly declared with text(%(column)r), " + "or use %(literal_column)s(%(column)r) " + "for more specificity" + % { + "column": util.ellipses_string(element), + "argname": "for argument %s" % (argname,) if argname else "", + "literal_column": ( + "literal_column" if guess_is_literal else "column" + ), + } + ) + + +class ReturnsRowsImpl(RoleImpl): + __slots__ = () + + +class StatementImpl(_CoerceLiterals, RoleImpl): + __slots__ = () + + def _post_coercion( + self, resolved, *, original_element, argname=None, **kw + ): + if resolved is not original_element and not isinstance( + original_element, str + ): + # use same method as Connection uses; this will later raise + # ObjectNotExecutableError + try: + original_element._execute_on_connection + except AttributeError: + util.warn_deprecated( + "Object %r should not be used directly in a SQL statement " + "context, such as passing to methods such as " + "session.execute(). This usage will be disallowed in a " + "future release. " + "Please use Core select() / update() / delete() etc. " + "with Session.execute() and other statement execution " + "methods." % original_element, + "1.4", + ) + + return resolved + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if resolved._is_lambda_element: + return resolved + else: + return super()._implicit_coercions( + element, resolved, argname=argname, **kw + ) + + +class SelectStatementImpl(_NoTextCoercion, RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if resolved._is_text_clause: + return resolved.columns() + else: + self._raise_for_expected(element, argname, resolved) + + +class HasCTEImpl(ReturnsRowsImpl): + __slots__ = () + + +class IsCTEImpl(RoleImpl): + __slots__ = () + + +class JoinTargetImpl(RoleImpl): + __slots__ = () + + _skip_clauseelement_for_target_match = True + + def _literal_coercion(self, element, *, argname=None, **kw): + self._raise_for_expected(element, argname) + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + *, + legacy: bool = False, + **kw: Any, + ) -> Any: + if isinstance(element, roles.JoinTargetRole): + # note that this codepath no longer occurs as of + # #6550, unless JoinTargetImpl._skip_clauseelement_for_target_match + # were set to False. + return element + elif legacy and resolved._is_select_base: + util.warn_deprecated( + "Implicit coercion of SELECT and textual SELECT " + "constructs into FROM clauses is deprecated; please call " + ".subquery() on any Core select or ORM Query object in " + "order to produce a subquery object.", + version="1.4", + ) + # TODO: doing _implicit_subquery here causes tests to fail, + # how was this working before? probably that ORM + # join logic treated it as a select and subquery would happen + # in _ORMJoin->Join + return resolved + else: + self._raise_for_expected(element, argname, resolved) + + +class FromClauseImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + *, + explicit_subquery: bool = False, + allow_select: bool = True, + **kw: Any, + ) -> Any: + if resolved._is_select_base: + if explicit_subquery: + return resolved.subquery() + elif allow_select: + util.warn_deprecated( + "Implicit coercion of SELECT and textual SELECT " + "constructs into FROM clauses is deprecated; please call " + ".subquery() on any Core select or ORM Query object in " + "order to produce a subquery object.", + version="1.4", + ) + return resolved._implicit_subquery + elif resolved._is_text_clause: + return resolved + else: + self._raise_for_expected(element, argname, resolved) + + def _post_coercion(self, element, *, deannotate=False, **kw): + if deannotate: + return element._deannotate() + else: + return element + + +class StrictFromClauseImpl(FromClauseImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + *, + allow_select: bool = False, + **kw: Any, + ) -> Any: + if resolved._is_select_base and allow_select: + util.warn_deprecated( + "Implicit coercion of SELECT and textual SELECT constructs " + "into FROM clauses is deprecated; please call .subquery() " + "on any Core select or ORM Query object in order to produce a " + "subquery object.", + version="1.4", + ) + return resolved._implicit_subquery + else: + self._raise_for_expected(element, argname, resolved) + + +class AnonymizedFromClauseImpl(StrictFromClauseImpl): + __slots__ = () + + def _post_coercion(self, element, *, flat=False, name=None, **kw): + assert name is None + + return element._anonymous_fromclause(flat=flat) + + +class DMLTableImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl): + __slots__ = () + + def _post_coercion(self, element, **kw): + if "dml_table" in element._annotations: + return element._annotations["dml_table"] + else: + return element + + +class DMLSelectImpl(_NoTextCoercion, RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, + element: Any, + resolved: Any, + argname: Optional[str] = None, + **kw: Any, + ) -> Any: + if resolved._is_from_clause: + if ( + isinstance(resolved, selectable.Alias) + and resolved.element._is_select_base + ): + return resolved.element + else: + return resolved.select() + else: + self._raise_for_expected(element, argname, resolved) + + +class CompoundElementImpl(_NoTextCoercion, RoleImpl): + __slots__ = () + + def _raise_for_expected(self, element, argname=None, resolved=None, **kw): + if isinstance(element, roles.FromClauseRole): + if element._is_subquery: + advice = ( + "Use the plain select() object without " + "calling .subquery() or .alias()." + ) + else: + advice = ( + "To SELECT from any FROM clause, use the .select() method." + ) + else: + advice = None + return super()._raise_for_expected( + element, argname=argname, resolved=resolved, advice=advice, **kw + ) + + +_impl_lookup = {} + + +for name in dir(roles): + cls = getattr(roles, name) + if name.endswith("Role"): + name = name.replace("Role", "Impl") + if name in globals(): + impl = globals()[name](cls) + _impl_lookup[cls] = impl + +if not TYPE_CHECKING: + ee_impl = _impl_lookup[roles.ExpressionElementRole] + + for py_type in (int, bool, str, float): + _impl_lookup[roles.ExpressionElementRole[py_type]] = ee_impl diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/compiler.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..f171256d4a1b5b0971e463079410df07790bc82c --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/compiler.py @@ -0,0 +1,7946 @@ +# sql/compiler.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Base SQL and DDL compiler implementations. + +Classes provided include: + +:class:`.compiler.SQLCompiler` - renders SQL +strings + +:class:`.compiler.DDLCompiler` - renders DDL +(data definition language) strings + +:class:`.compiler.GenericTypeCompiler` - renders +type specification strings. + +To generate user-defined SQL strings, see +:doc:`/ext/compiler`. + +""" +from __future__ import annotations + +import collections +import collections.abc as collections_abc +import contextlib +from enum import IntEnum +import functools +import itertools +import operator +import re +from time import perf_counter +import typing +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import FrozenSet +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import NoReturn +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from . import base +from . import coercions +from . import crud +from . import elements +from . import functions +from . import operators +from . import roles +from . import schema +from . import selectable +from . import sqltypes +from . import util as sql_util +from ._typing import is_column_element +from ._typing import is_dml +from .base import _de_clone +from .base import _from_objects +from .base import _NONE_NAME +from .base import _SentinelDefaultCharacterization +from .base import NO_ARG +from .elements import quoted_name +from .sqltypes import TupleType +from .visitors import prefix_anon_map +from .. import exc +from .. import util +from ..util import FastIntFlag +from ..util.typing import Literal +from ..util.typing import Protocol +from ..util.typing import Self +from ..util.typing import TypedDict + +if typing.TYPE_CHECKING: + from .annotation import _AnnotationDict + from .base import _AmbiguousTableNameMap + from .base import CompileState + from .base import Executable + from .cache_key import CacheKey + from .ddl import ExecutableDDLElement + from .dml import Insert + from .dml import Update + from .dml import UpdateBase + from .dml import UpdateDMLState + from .dml import ValuesBase + from .elements import _truncated_label + from .elements import BinaryExpression + from .elements import BindParameter + from .elements import ClauseElement + from .elements import ColumnClause + from .elements import ColumnElement + from .elements import False_ + from .elements import Label + from .elements import Null + from .elements import True_ + from .functions import Function + from .schema import Column + from .schema import Constraint + from .schema import ForeignKeyConstraint + from .schema import Index + from .schema import PrimaryKeyConstraint + from .schema import Table + from .schema import UniqueConstraint + from .selectable import _ColumnsClauseElement + from .selectable import AliasedReturnsRows + from .selectable import CompoundSelectState + from .selectable import CTE + from .selectable import FromClause + from .selectable import NamedFromClause + from .selectable import ReturnsRows + from .selectable import Select + from .selectable import SelectState + from .type_api import _BindProcessorType + from .type_api import TypeDecorator + from .type_api import TypeEngine + from .type_api import UserDefinedType + from .visitors import Visitable + from ..engine.cursor import CursorResultMetaData + from ..engine.interfaces import _CoreSingleExecuteParams + from ..engine.interfaces import _DBAPIAnyExecuteParams + from ..engine.interfaces import _DBAPIMultiExecuteParams + from ..engine.interfaces import _DBAPISingleExecuteParams + from ..engine.interfaces import _ExecuteOptions + from ..engine.interfaces import _GenericSetInputSizesType + from ..engine.interfaces import _MutableCoreSingleExecuteParams + from ..engine.interfaces import Dialect + from ..engine.interfaces import SchemaTranslateMapType + + +_FromHintsType = Dict["FromClause", str] + +RESERVED_WORDS = { + "all", + "analyse", + "analyze", + "and", + "any", + "array", + "as", + "asc", + "asymmetric", + "authorization", + "between", + "binary", + "both", + "case", + "cast", + "check", + "collate", + "column", + "constraint", + "create", + "cross", + "current_date", + "current_role", + "current_time", + "current_timestamp", + "current_user", + "default", + "deferrable", + "desc", + "distinct", + "do", + "else", + "end", + "except", + "false", + "for", + "foreign", + "freeze", + "from", + "full", + "grant", + "group", + "having", + "ilike", + "in", + "initially", + "inner", + "intersect", + "into", + "is", + "isnull", + "join", + "leading", + "left", + "like", + "limit", + "localtime", + "localtimestamp", + "natural", + "new", + "not", + "notnull", + "null", + "off", + "offset", + "old", + "on", + "only", + "or", + "order", + "outer", + "overlaps", + "placing", + "primary", + "references", + "right", + "select", + "session_user", + "set", + "similar", + "some", + "symmetric", + "table", + "then", + "to", + "trailing", + "true", + "union", + "unique", + "user", + "using", + "verbose", + "when", + "where", +} + +LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I) +LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I) +ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(["$"]) + +FK_ON_DELETE = re.compile( + r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I +) +FK_ON_UPDATE = re.compile( + r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I +) +FK_INITIALLY = re.compile(r"^(?:DEFERRED|IMMEDIATE)$", re.I) +BIND_PARAMS = re.compile(r"(? ", + operators.ge: " >= ", + operators.eq: " = ", + operators.is_distinct_from: " IS DISTINCT FROM ", + operators.is_not_distinct_from: " IS NOT DISTINCT FROM ", + operators.concat_op: " || ", + operators.match_op: " MATCH ", + operators.not_match_op: " NOT MATCH ", + operators.in_op: " IN ", + operators.not_in_op: " NOT IN ", + operators.comma_op: ", ", + operators.from_: " FROM ", + operators.as_: " AS ", + operators.is_: " IS ", + operators.is_not: " IS NOT ", + operators.collate: " COLLATE ", + # unary + operators.exists: "EXISTS ", + operators.distinct_op: "DISTINCT ", + operators.inv: "NOT ", + operators.any_op: "ANY ", + operators.all_op: "ALL ", + # modifiers + operators.desc_op: " DESC", + operators.asc_op: " ASC", + operators.nulls_first_op: " NULLS FIRST", + operators.nulls_last_op: " NULLS LAST", + # bitwise + operators.bitwise_xor_op: " ^ ", + operators.bitwise_or_op: " | ", + operators.bitwise_and_op: " & ", + operators.bitwise_not_op: "~", + operators.bitwise_lshift_op: " << ", + operators.bitwise_rshift_op: " >> ", +} + +FUNCTIONS: Dict[Type[Function[Any]], str] = { + functions.coalesce: "coalesce", + functions.current_date: "CURRENT_DATE", + functions.current_time: "CURRENT_TIME", + functions.current_timestamp: "CURRENT_TIMESTAMP", + functions.current_user: "CURRENT_USER", + functions.localtime: "LOCALTIME", + functions.localtimestamp: "LOCALTIMESTAMP", + functions.random: "random", + functions.sysdate: "sysdate", + functions.session_user: "SESSION_USER", + functions.user: "USER", + functions.cube: "CUBE", + functions.rollup: "ROLLUP", + functions.grouping_sets: "GROUPING SETS", +} + + +EXTRACT_MAP = { + "month": "month", + "day": "day", + "year": "year", + "second": "second", + "hour": "hour", + "doy": "doy", + "minute": "minute", + "quarter": "quarter", + "dow": "dow", + "week": "week", + "epoch": "epoch", + "milliseconds": "milliseconds", + "microseconds": "microseconds", + "timezone_hour": "timezone_hour", + "timezone_minute": "timezone_minute", +} + +COMPOUND_KEYWORDS = { + selectable._CompoundSelectKeyword.UNION: "UNION", + selectable._CompoundSelectKeyword.UNION_ALL: "UNION ALL", + selectable._CompoundSelectKeyword.EXCEPT: "EXCEPT", + selectable._CompoundSelectKeyword.EXCEPT_ALL: "EXCEPT ALL", + selectable._CompoundSelectKeyword.INTERSECT: "INTERSECT", + selectable._CompoundSelectKeyword.INTERSECT_ALL: "INTERSECT ALL", +} + + +class ResultColumnsEntry(NamedTuple): + """Tracks a column expression that is expected to be represented + in the result rows for this statement. + + This normally refers to the columns clause of a SELECT statement + but may also refer to a RETURNING clause, as well as for dialect-specific + emulations. + + """ + + keyname: str + """string name that's expected in cursor.description""" + + name: str + """column name, may be labeled""" + + objects: Tuple[Any, ...] + """sequence of objects that should be able to locate this column + in a RowMapping. This is typically string names and aliases + as well as Column objects. + + """ + + type: TypeEngine[Any] + """Datatype to be associated with this column. This is where + the "result processing" logic directly links the compiled statement + to the rows that come back from the cursor. + + """ + + +class _ResultMapAppender(Protocol): + def __call__( + self, + keyname: str, + name: str, + objects: Sequence[Any], + type_: TypeEngine[Any], + ) -> None: ... + + +# integer indexes into ResultColumnsEntry used by cursor.py. +# some profiling showed integer access faster than named tuple +RM_RENDERED_NAME: Literal[0] = 0 +RM_NAME: Literal[1] = 1 +RM_OBJECTS: Literal[2] = 2 +RM_TYPE: Literal[3] = 3 + + +class _BaseCompilerStackEntry(TypedDict): + asfrom_froms: Set[FromClause] + correlate_froms: Set[FromClause] + selectable: ReturnsRows + + +class _CompilerStackEntry(_BaseCompilerStackEntry, total=False): + compile_state: CompileState + need_result_map_for_nested: bool + need_result_map_for_compound: bool + select_0: ReturnsRows + insert_from_select: Select[Any] + + +class ExpandedState(NamedTuple): + """represents state to use when producing "expanded" and + "post compile" bound parameters for a statement. + + "expanded" parameters are parameters that are generated at + statement execution time to suit a number of parameters passed, the most + prominent example being the individual elements inside of an IN expression. + + "post compile" parameters are parameters where the SQL literal value + will be rendered into the SQL statement at execution time, rather than + being passed as separate parameters to the driver. + + To create an :class:`.ExpandedState` instance, use the + :meth:`.SQLCompiler.construct_expanded_state` method on any + :class:`.SQLCompiler` instance. + + """ + + statement: str + """String SQL statement with parameters fully expanded""" + + parameters: _CoreSingleExecuteParams + """Parameter dictionary with parameters fully expanded. + + For a statement that uses named parameters, this dictionary will map + exactly to the names in the statement. For a statement that uses + positional parameters, the :attr:`.ExpandedState.positional_parameters` + will yield a tuple with the positional parameter set. + + """ + + processors: Mapping[str, _BindProcessorType[Any]] + """mapping of bound value processors""" + + positiontup: Optional[Sequence[str]] + """Sequence of string names indicating the order of positional + parameters""" + + parameter_expansion: Mapping[str, List[str]] + """Mapping representing the intermediary link from original parameter + name to list of "expanded" parameter names, for those parameters that + were expanded.""" + + @property + def positional_parameters(self) -> Tuple[Any, ...]: + """Tuple of positional parameters, for statements that were compiled + using a positional paramstyle. + + """ + if self.positiontup is None: + raise exc.InvalidRequestError( + "statement does not use a positional paramstyle" + ) + return tuple(self.parameters[key] for key in self.positiontup) + + @property + def additional_parameters(self) -> _CoreSingleExecuteParams: + """synonym for :attr:`.ExpandedState.parameters`.""" + return self.parameters + + +class _InsertManyValues(NamedTuple): + """represents state to use for executing an "insertmanyvalues" statement. + + The primary consumers of this object are the + :meth:`.SQLCompiler._deliver_insertmanyvalues_batches` and + :meth:`.DefaultDialect._deliver_insertmanyvalues_batches` methods. + + .. versionadded:: 2.0 + + """ + + is_default_expr: bool + """if True, the statement is of the form + ``INSERT INTO TABLE DEFAULT VALUES``, and can't be rewritten as a "batch" + + """ + + single_values_expr: str + """The rendered "values" clause of the INSERT statement. + + This is typically the parenthesized section e.g. "(?, ?, ?)" or similar. + The insertmanyvalues logic uses this string as a search and replace + target. + + """ + + insert_crud_params: List[crud._CrudParamElementStr] + """List of Column / bind names etc. used while rewriting the statement""" + + num_positional_params_counted: int + """the number of bound parameters in a single-row statement. + + This count may be larger or smaller than the actual number of columns + targeted in the INSERT, as it accommodates for SQL expressions + in the values list that may have zero or more parameters embedded + within them. + + This count is part of what's used to organize rewritten parameter lists + when batching. + + """ + + sort_by_parameter_order: bool = False + """if the deterministic_returnined_order parameter were used on the + insert. + + All of the attributes following this will only be used if this is True. + + """ + + includes_upsert_behaviors: bool = False + """if True, we have to accommodate for upsert behaviors. + + This will in some cases downgrade "insertmanyvalues" that requests + deterministic ordering. + + """ + + sentinel_columns: Optional[Sequence[Column[Any]]] = None + """List of sentinel columns that were located. + + This list is only here if the INSERT asked for + sort_by_parameter_order=True, + and dialect-appropriate sentinel columns were located. + + .. versionadded:: 2.0.10 + + """ + + num_sentinel_columns: int = 0 + """how many sentinel columns are in the above list, if any. + + This is the same as + ``len(sentinel_columns) if sentinel_columns is not None else 0`` + + """ + + sentinel_param_keys: Optional[Sequence[str]] = None + """parameter str keys in each param dictionary / tuple + that would link to the client side "sentinel" values for that row, which + we can use to match up parameter sets to result rows. + + This is only present if sentinel_columns is present and the INSERT + statement actually refers to client side values for these sentinel + columns. + + .. versionadded:: 2.0.10 + + .. versionchanged:: 2.0.29 - the sequence is now string dictionary keys + only, used against the "compiled parameteters" collection before + the parameters were converted by bound parameter processors + + """ + + implicit_sentinel: bool = False + """if True, we have exactly one sentinel column and it uses a server side + value, currently has to generate an incrementing integer value. + + The dialect in question would have asserted that it supports receiving + these values back and sorting on that value as a means of guaranteeing + correlation with the incoming parameter list. + + .. versionadded:: 2.0.10 + + """ + + embed_values_counter: bool = False + """Whether to embed an incrementing integer counter in each parameter + set within the VALUES clause as parameters are batched over. + + This is only used for a specific INSERT..SELECT..VALUES..RETURNING syntax + where a subquery is used to produce value tuples. Current support + includes PostgreSQL, Microsoft SQL Server. + + .. versionadded:: 2.0.10 + + """ + + +class _InsertManyValuesBatch(NamedTuple): + """represents an individual batch SQL statement for insertmanyvalues. + + This is passed through the + :meth:`.SQLCompiler._deliver_insertmanyvalues_batches` and + :meth:`.DefaultDialect._deliver_insertmanyvalues_batches` methods out + to the :class:`.Connection` within the + :meth:`.Connection._exec_insertmany_context` method. + + .. versionadded:: 2.0.10 + + """ + + replaced_statement: str + replaced_parameters: _DBAPIAnyExecuteParams + processed_setinputsizes: Optional[_GenericSetInputSizesType] + batch: Sequence[_DBAPISingleExecuteParams] + sentinel_values: Sequence[Tuple[Any, ...]] + current_batch_size: int + batchnum: int + total_batches: int + rows_sorted: bool + is_downgraded: bool + + +class InsertmanyvaluesSentinelOpts(FastIntFlag): + """bitflag enum indicating styles of PK defaults + which can work as implicit sentinel columns + + """ + + NOT_SUPPORTED = 1 + AUTOINCREMENT = 2 + IDENTITY = 4 + SEQUENCE = 8 + + ANY_AUTOINCREMENT = AUTOINCREMENT | IDENTITY | SEQUENCE + _SUPPORTED_OR_NOT = NOT_SUPPORTED | ANY_AUTOINCREMENT + + USE_INSERT_FROM_SELECT = 16 + RENDER_SELECT_COL_CASTS = 64 + + +class CompilerState(IntEnum): + COMPILING = 0 + """statement is present, compilation phase in progress""" + + STRING_APPLIED = 1 + """statement is present, string form of the statement has been applied. + + Additional processors by subclasses may still be pending. + + """ + + NO_STATEMENT = 2 + """compiler does not have a statement to compile, is used + for method access""" + + +class Linting(IntEnum): + """represent preferences for the 'SQL linting' feature. + + this feature currently includes support for flagging cartesian products + in SQL statements. + + """ + + NO_LINTING = 0 + "Disable all linting." + + COLLECT_CARTESIAN_PRODUCTS = 1 + """Collect data on FROMs and cartesian products and gather into + 'self.from_linter'""" + + WARN_LINTING = 2 + "Emit warnings for linters that find problems" + + FROM_LINTING = COLLECT_CARTESIAN_PRODUCTS | WARN_LINTING + """Warn for cartesian products; combines COLLECT_CARTESIAN_PRODUCTS + and WARN_LINTING""" + + +NO_LINTING, COLLECT_CARTESIAN_PRODUCTS, WARN_LINTING, FROM_LINTING = tuple( + Linting +) + + +class FromLinter(collections.namedtuple("FromLinter", ["froms", "edges"])): + """represents current state for the "cartesian product" detection + feature.""" + + def lint(self, start=None): + froms = self.froms + if not froms: + return None, None + + edges = set(self.edges) + the_rest = set(froms) + + if start is not None: + start_with = start + the_rest.remove(start_with) + else: + start_with = the_rest.pop() + + stack = collections.deque([start_with]) + + while stack and the_rest: + node = stack.popleft() + the_rest.discard(node) + + # comparison of nodes in edges here is based on hash equality, as + # there are "annotated" elements that match the non-annotated ones. + # to remove the need for in-python hash() calls, use native + # containment routines (e.g. "node in edge", "edge.index(node)") + to_remove = {edge for edge in edges if node in edge} + + # appendleft the node in each edge that is not + # the one that matched. + stack.extendleft(edge[not edge.index(node)] for edge in to_remove) + edges.difference_update(to_remove) + + # FROMS left over? boom + if the_rest: + return the_rest, start_with + else: + return None, None + + def warn(self, stmt_type="SELECT"): + the_rest, start_with = self.lint() + + # FROMS left over? boom + if the_rest: + froms = the_rest + if froms: + template = ( + "{stmt_type} statement has a cartesian product between " + "FROM element(s) {froms} and " + 'FROM element "{start}". Apply join condition(s) ' + "between each element to resolve." + ) + froms_str = ", ".join( + f'"{self.froms[from_]}"' for from_ in froms + ) + message = template.format( + stmt_type=stmt_type, + froms=froms_str, + start=self.froms[start_with], + ) + + util.warn(message) + + +class Compiled: + """Represent a compiled SQL or DDL expression. + + The ``__str__`` method of the ``Compiled`` object should produce + the actual text of the statement. ``Compiled`` objects are + specific to their underlying database dialect, and also may + or may not be specific to the columns referenced within a + particular set of bind parameters. In no case should the + ``Compiled`` object be dependent on the actual values of those + bind parameters, even though it may reference those values as + defaults. + """ + + statement: Optional[ClauseElement] = None + "The statement to compile." + string: str = "" + "The string representation of the ``statement``" + + state: CompilerState + """description of the compiler's state""" + + is_sql = False + is_ddl = False + + _cached_metadata: Optional[CursorResultMetaData] = None + + _result_columns: Optional[List[ResultColumnsEntry]] = None + + schema_translate_map: Optional[SchemaTranslateMapType] = None + + execution_options: _ExecuteOptions = util.EMPTY_DICT + """ + Execution options propagated from the statement. In some cases, + sub-elements of the statement can modify these. + """ + + preparer: IdentifierPreparer + + _annotations: _AnnotationDict = util.EMPTY_DICT + + compile_state: Optional[CompileState] = None + """Optional :class:`.CompileState` object that maintains additional + state used by the compiler. + + Major executable objects such as :class:`_expression.Insert`, + :class:`_expression.Update`, :class:`_expression.Delete`, + :class:`_expression.Select` will generate this + state when compiled in order to calculate additional information about the + object. For the top level object that is to be executed, the state can be + stored here where it can also have applicability towards result set + processing. + + .. versionadded:: 1.4 + + """ + + dml_compile_state: Optional[CompileState] = None + """Optional :class:`.CompileState` assigned at the same point that + .isinsert, .isupdate, or .isdelete is assigned. + + This will normally be the same object as .compile_state, with the + exception of cases like the :class:`.ORMFromStatementCompileState` + object. + + .. versionadded:: 1.4.40 + + """ + + cache_key: Optional[CacheKey] = None + """The :class:`.CacheKey` that was generated ahead of creating this + :class:`.Compiled` object. + + This is used for routines that need access to the original + :class:`.CacheKey` instance generated when the :class:`.Compiled` + instance was first cached, typically in order to reconcile + the original list of :class:`.BindParameter` objects with a + per-statement list that's generated on each call. + + """ + + _gen_time: float + """Generation time of this :class:`.Compiled`, used for reporting + cache stats.""" + + def __init__( + self, + dialect: Dialect, + statement: Optional[ClauseElement], + schema_translate_map: Optional[SchemaTranslateMapType] = None, + render_schema_translate: bool = False, + compile_kwargs: Mapping[str, Any] = util.immutabledict(), + ): + """Construct a new :class:`.Compiled` object. + + :param dialect: :class:`.Dialect` to compile against. + + :param statement: :class:`_expression.ClauseElement` to be compiled. + + :param schema_translate_map: dictionary of schema names to be + translated when forming the resultant SQL + + .. seealso:: + + :ref:`schema_translating` + + :param compile_kwargs: additional kwargs that will be + passed to the initial call to :meth:`.Compiled.process`. + + + """ + self.dialect = dialect + self.preparer = self.dialect.identifier_preparer + if schema_translate_map: + self.schema_translate_map = schema_translate_map + self.preparer = self.preparer._with_schema_translate( + schema_translate_map + ) + + if statement is not None: + self.state = CompilerState.COMPILING + self.statement = statement + self.can_execute = statement.supports_execution + self._annotations = statement._annotations + if self.can_execute: + if TYPE_CHECKING: + assert isinstance(statement, Executable) + self.execution_options = statement._execution_options + self.string = self.process(self.statement, **compile_kwargs) + + if render_schema_translate: + assert schema_translate_map is not None + self.string = self.preparer._render_schema_translates( + self.string, schema_translate_map + ) + + self.state = CompilerState.STRING_APPLIED + else: + self.state = CompilerState.NO_STATEMENT + + self._gen_time = perf_counter() + + def __init_subclass__(cls) -> None: + cls._init_compiler_cls() + return super().__init_subclass__() + + @classmethod + def _init_compiler_cls(cls): + pass + + def _execute_on_connection( + self, connection, distilled_params, execution_options + ): + if self.can_execute: + return connection._execute_compiled( + self, distilled_params, execution_options + ) + else: + raise exc.ObjectNotExecutableError(self.statement) + + def visit_unsupported_compilation(self, element, err, **kw): + raise exc.UnsupportedCompilationError(self, type(element)) from err + + @property + def sql_compiler(self) -> SQLCompiler: + """Return a Compiled that is capable of processing SQL expressions. + + If this compiler is one, it would likely just return 'self'. + + """ + + raise NotImplementedError() + + def process(self, obj: Visitable, **kwargs: Any) -> str: + return obj._compiler_dispatch(self, **kwargs) + + def __str__(self) -> str: + """Return the string text of the generated SQL or DDL.""" + + if self.state is CompilerState.STRING_APPLIED: + return self.string + else: + return "" + + def construct_params( + self, + params: Optional[_CoreSingleExecuteParams] = None, + extracted_parameters: Optional[Sequence[BindParameter[Any]]] = None, + escape_names: bool = True, + ) -> Optional[_MutableCoreSingleExecuteParams]: + """Return the bind params for this compiled object. + + :param params: a dict of string/object pairs whose values will + override bind values compiled in to the + statement. + """ + + raise NotImplementedError() + + @property + def params(self): + """Return the bind params for this compiled object.""" + return self.construct_params() + + +class TypeCompiler(util.EnsureKWArg): + """Produces DDL specification for TypeEngine objects.""" + + ensure_kwarg = r"visit_\w+" + + def __init__(self, dialect: Dialect): + self.dialect = dialect + + def process(self, type_: TypeEngine[Any], **kw: Any) -> str: + if ( + type_._variant_mapping + and self.dialect.name in type_._variant_mapping + ): + type_ = type_._variant_mapping[self.dialect.name] + return type_._compiler_dispatch(self, **kw) + + def visit_unsupported_compilation( + self, element: Any, err: Exception, **kw: Any + ) -> NoReturn: + raise exc.UnsupportedCompilationError(self, element) from err + + +# this was a Visitable, but to allow accurate detection of +# column elements this is actually a column element +class _CompileLabel( + roles.BinaryElementRole[Any], elements.CompilerColumnElement +): + """lightweight label object which acts as an expression.Label.""" + + __visit_name__ = "label" + __slots__ = "element", "name", "_alt_names" + + def __init__(self, col, name, alt_names=()): + self.element = col + self.name = name + self._alt_names = (col,) + alt_names + + @property + def proxy_set(self): + return self.element.proxy_set + + @property + def type(self): + return self.element.type + + def self_group(self, **kw): + return self + + +class ilike_case_insensitive( + roles.BinaryElementRole[Any], elements.CompilerColumnElement +): + """produce a wrapping element for a case-insensitive portion of + an ILIKE construct. + + The construct usually renders the ``lower()`` function, but on + PostgreSQL will pass silently with the assumption that "ILIKE" + is being used. + + .. versionadded:: 2.0 + + """ + + __visit_name__ = "ilike_case_insensitive_operand" + __slots__ = "element", "comparator" + + def __init__(self, element): + self.element = element + self.comparator = element.comparator + + @property + def proxy_set(self): + return self.element.proxy_set + + @property + def type(self): + return self.element.type + + def self_group(self, **kw): + return self + + def _with_binary_element_type(self, type_): + return ilike_case_insensitive( + self.element._with_binary_element_type(type_) + ) + + +class SQLCompiler(Compiled): + """Default implementation of :class:`.Compiled`. + + Compiles :class:`_expression.ClauseElement` objects into SQL strings. + + """ + + extract_map = EXTRACT_MAP + + bindname_escape_characters: ClassVar[Mapping[str, str]] = ( + util.immutabledict( + { + "%": "P", + "(": "A", + ")": "Z", + ":": "C", + ".": "_", + "[": "_", + "]": "_", + " ": "_", + } + ) + ) + """A mapping (e.g. dict or similar) containing a lookup of + characters keyed to replacement characters which will be applied to all + 'bind names' used in SQL statements as a form of 'escaping'; the given + characters are replaced entirely with the 'replacement' character when + rendered in the SQL statement, and a similar translation is performed + on the incoming names used in parameter dictionaries passed to methods + like :meth:`_engine.Connection.execute`. + + This allows bound parameter names used in :func:`_sql.bindparam` and + other constructs to have any arbitrary characters present without any + concern for characters that aren't allowed at all on the target database. + + Third party dialects can establish their own dictionary here to replace the + default mapping, which will ensure that the particular characters in the + mapping will never appear in a bound parameter name. + + The dictionary is evaluated at **class creation time**, so cannot be + modified at runtime; it must be present on the class when the class + is first declared. + + Note that for dialects that have additional bound parameter rules such + as additional restrictions on leading characters, the + :meth:`_sql.SQLCompiler.bindparam_string` method may need to be augmented. + See the cx_Oracle compiler for an example of this. + + .. versionadded:: 2.0.0rc1 + + """ + + _bind_translate_re: ClassVar[Pattern[str]] + _bind_translate_chars: ClassVar[Mapping[str, str]] + + is_sql = True + + compound_keywords = COMPOUND_KEYWORDS + + isdelete: bool = False + isinsert: bool = False + isupdate: bool = False + """class-level defaults which can be set at the instance + level to define if this Compiled instance represents + INSERT/UPDATE/DELETE + """ + + postfetch: Optional[List[Column[Any]]] + """list of columns that can be post-fetched after INSERT or UPDATE to + receive server-updated values""" + + insert_prefetch: Sequence[Column[Any]] = () + """list of columns for which default values should be evaluated before + an INSERT takes place""" + + update_prefetch: Sequence[Column[Any]] = () + """list of columns for which onupdate default values should be evaluated + before an UPDATE takes place""" + + implicit_returning: Optional[Sequence[ColumnElement[Any]]] = None + """list of "implicit" returning columns for a toplevel INSERT or UPDATE + statement, used to receive newly generated values of columns. + + .. versionadded:: 2.0 ``implicit_returning`` replaces the previous + ``returning`` collection, which was not a generalized RETURNING + collection and instead was in fact specific to the "implicit returning" + feature. + + """ + + isplaintext: bool = False + + binds: Dict[str, BindParameter[Any]] + """a dictionary of bind parameter keys to BindParameter instances.""" + + bind_names: Dict[BindParameter[Any], str] + """a dictionary of BindParameter instances to "compiled" names + that are actually present in the generated SQL""" + + stack: List[_CompilerStackEntry] + """major statements such as SELECT, INSERT, UPDATE, DELETE are + tracked in this stack using an entry format.""" + + returning_precedes_values: bool = False + """set to True classwide to generate RETURNING + clauses before the VALUES or WHERE clause (i.e. MSSQL) + """ + + render_table_with_column_in_update_from: bool = False + """set to True classwide to indicate the SET clause + in a multi-table UPDATE statement should qualify + columns with the table name (i.e. MySQL only) + """ + + ansi_bind_rules: bool = False + """SQL 92 doesn't allow bind parameters to be used + in the columns clause of a SELECT, nor does it allow + ambiguous expressions like "? = ?". A compiler + subclass can set this flag to False if the target + driver/DB enforces this + """ + + bindtemplate: str + """template to render bound parameters based on paramstyle.""" + + compilation_bindtemplate: str + """template used by compiler to render parameters before positional + paramstyle application""" + + _numeric_binds_identifier_char: str + """Character that's used to as the identifier of a numerical bind param. + For example if this char is set to ``$``, numerical binds will be rendered + in the form ``$1, $2, $3``. + """ + + _result_columns: List[ResultColumnsEntry] + """relates label names in the final SQL to a tuple of local + column/label name, ColumnElement object (if any) and + TypeEngine. CursorResult uses this for type processing and + column targeting""" + + _textual_ordered_columns: bool = False + """tell the result object that the column names as rendered are important, + but they are also "ordered" vs. what is in the compiled object here. + + As of 1.4.42 this condition is only present when the statement is a + TextualSelect, e.g. text("....").columns(...), where it is required + that the columns are considered positionally and not by name. + + """ + + _ad_hoc_textual: bool = False + """tell the result that we encountered text() or '*' constructs in the + middle of the result columns, but we also have compiled columns, so + if the number of columns in cursor.description does not match how many + expressions we have, that means we can't rely on positional at all and + should match on name. + + """ + + _ordered_columns: bool = True + """ + if False, means we can't be sure the list of entries + in _result_columns is actually the rendered order. Usually + True unless using an unordered TextualSelect. + """ + + _loose_column_name_matching: bool = False + """tell the result object that the SQL statement is textual, wants to match + up to Column objects, and may be using the ._tq_label in the SELECT rather + than the base name. + + """ + + _numeric_binds: bool = False + """ + True if paramstyle is "numeric". This paramstyle is trickier than + all the others. + + """ + + _render_postcompile: bool = False + """ + whether to render out POSTCOMPILE params during the compile phase. + + This attribute is used only for end-user invocation of stmt.compile(); + it's never used for actual statement execution, where instead the + dialect internals access and render the internal postcompile structure + directly. + + """ + + _post_compile_expanded_state: Optional[ExpandedState] = None + """When render_postcompile is used, the ``ExpandedState`` used to create + the "expanded" SQL is assigned here, and then used by the ``.params`` + accessor and ``.construct_params()`` methods for their return values. + + .. versionadded:: 2.0.0rc1 + + """ + + _pre_expanded_string: Optional[str] = None + """Stores the original string SQL before 'post_compile' is applied, + for cases where 'post_compile' were used. + + """ + + _pre_expanded_positiontup: Optional[List[str]] = None + + _insertmanyvalues: Optional[_InsertManyValues] = None + + _insert_crud_params: Optional[crud._CrudParamSequence] = None + + literal_execute_params: FrozenSet[BindParameter[Any]] = frozenset() + """bindparameter objects that are rendered as literal values at statement + execution time. + + """ + + post_compile_params: FrozenSet[BindParameter[Any]] = frozenset() + """bindparameter objects that are rendered as bound parameter placeholders + at statement execution time. + + """ + + escaped_bind_names: util.immutabledict[str, str] = util.EMPTY_DICT + """Late escaping of bound parameter names that has to be converted + to the original name when looking in the parameter dictionary. + + """ + + has_out_parameters = False + """if True, there are bindparam() objects that have the isoutparam + flag set.""" + + postfetch_lastrowid = False + """if True, and this in insert, use cursor.lastrowid to populate + result.inserted_primary_key. """ + + _cache_key_bind_match: Optional[ + Tuple[ + Dict[ + BindParameter[Any], + List[BindParameter[Any]], + ], + Dict[ + str, + BindParameter[Any], + ], + ] + ] = None + """a mapping that will relate the BindParameter object we compile + to those that are part of the extracted collection of parameters + in the cache key, if we were given a cache key. + + """ + + positiontup: Optional[List[str]] = None + """for a compiled construct that uses a positional paramstyle, will be + a sequence of strings, indicating the names of bound parameters in order. + + This is used in order to render bound parameters in their correct order, + and is combined with the :attr:`_sql.Compiled.params` dictionary to + render parameters. + + This sequence always contains the unescaped name of the parameters. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ + _values_bindparam: Optional[List[str]] = None + + _visited_bindparam: Optional[List[str]] = None + + inline: bool = False + + ctes: Optional[MutableMapping[CTE, str]] + + # Detect same CTE references - Dict[(level, name), cte] + # Level is required for supporting nesting + ctes_by_level_name: Dict[Tuple[int, str], CTE] + + # To retrieve key/level in ctes_by_level_name - + # Dict[cte_reference, (level, cte_name, cte_opts)] + level_name_by_cte: Dict[CTE, Tuple[int, str, selectable._CTEOpts]] + + ctes_recursive: bool + + _post_compile_pattern = re.compile(r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]") + _pyformat_pattern = re.compile(r"%\(([^)]+?)\)s") + _positional_pattern = re.compile( + f"{_pyformat_pattern.pattern}|{_post_compile_pattern.pattern}" + ) + + @classmethod + def _init_compiler_cls(cls): + cls._init_bind_translate() + + @classmethod + def _init_bind_translate(cls): + reg = re.escape("".join(cls.bindname_escape_characters)) + cls._bind_translate_re = re.compile(f"[{reg}]") + cls._bind_translate_chars = cls.bindname_escape_characters + + def __init__( + self, + dialect: Dialect, + statement: Optional[ClauseElement], + cache_key: Optional[CacheKey] = None, + column_keys: Optional[Sequence[str]] = None, + for_executemany: bool = False, + linting: Linting = NO_LINTING, + _supporting_against: Optional[SQLCompiler] = None, + **kwargs: Any, + ): + """Construct a new :class:`.SQLCompiler` object. + + :param dialect: :class:`.Dialect` to be used + + :param statement: :class:`_expression.ClauseElement` to be compiled + + :param column_keys: a list of column names to be compiled into an + INSERT or UPDATE statement. + + :param for_executemany: whether INSERT / UPDATE statements should + expect that they are to be invoked in an "executemany" style, + which may impact how the statement will be expected to return the + values of defaults and autoincrement / sequences and similar. + Depending on the backend and driver in use, support for retrieving + these values may be disabled which means SQL expressions may + be rendered inline, RETURNING may not be rendered, etc. + + :param kwargs: additional keyword arguments to be consumed by the + superclass. + + """ + self.column_keys = column_keys + + self.cache_key = cache_key + + if cache_key: + cksm = {b.key: b for b in cache_key[1]} + ckbm = {b: [b] for b in cache_key[1]} + self._cache_key_bind_match = (ckbm, cksm) + + # compile INSERT/UPDATE defaults/sequences to expect executemany + # style execution, which may mean no pre-execute of defaults, + # or no RETURNING + self.for_executemany = for_executemany + + self.linting = linting + + # a dictionary of bind parameter keys to BindParameter + # instances. + self.binds = {} + + # a dictionary of BindParameter instances to "compiled" names + # that are actually present in the generated SQL + self.bind_names = util.column_dict() + + # stack which keeps track of nested SELECT statements + self.stack = [] + + self._result_columns = [] + + # true if the paramstyle is positional + self.positional = dialect.positional + if self.positional: + self._numeric_binds = nb = dialect.paramstyle.startswith("numeric") + if nb: + self._numeric_binds_identifier_char = ( + "$" if dialect.paramstyle == "numeric_dollar" else ":" + ) + + self.compilation_bindtemplate = _pyformat_template + else: + self.compilation_bindtemplate = BIND_TEMPLATES[dialect.paramstyle] + + self.ctes = None + + self.label_length = ( + dialect.label_length or dialect.max_identifier_length + ) + + # a map which tracks "anonymous" identifiers that are created on + # the fly here + self.anon_map = prefix_anon_map() + + # a map which tracks "truncated" names based on + # dialect.label_length or dialect.max_identifier_length + self.truncated_names: Dict[Tuple[str, str], str] = {} + self._truncated_counters: Dict[str, int] = {} + + Compiled.__init__(self, dialect, statement, **kwargs) + + if self.isinsert or self.isupdate or self.isdelete: + if TYPE_CHECKING: + assert isinstance(statement, UpdateBase) + + if self.isinsert or self.isupdate: + if TYPE_CHECKING: + assert isinstance(statement, ValuesBase) + if statement._inline: + self.inline = True + elif self.for_executemany and ( + not self.isinsert + or ( + self.dialect.insert_executemany_returning + and statement._return_defaults + ) + ): + self.inline = True + + self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] + + if _supporting_against: + self.__dict__.update( + { + k: v + for k, v in _supporting_against.__dict__.items() + if k + not in { + "state", + "dialect", + "preparer", + "positional", + "_numeric_binds", + "compilation_bindtemplate", + "bindtemplate", + } + } + ) + + if self.state is CompilerState.STRING_APPLIED: + if self.positional: + if self._numeric_binds: + self._process_numeric() + else: + self._process_positional() + + if self._render_postcompile: + parameters = self.construct_params( + escape_names=False, + _no_postcompile=True, + ) + + self._process_parameters_for_postcompile( + parameters, _populate_self=True + ) + + @property + def insert_single_values_expr(self) -> Optional[str]: + """When an INSERT is compiled with a single set of parameters inside + a VALUES expression, the string is assigned here, where it can be + used for insert batching schemes to rewrite the VALUES expression. + + .. versionadded:: 1.3.8 + + .. versionchanged:: 2.0 This collection is no longer used by + SQLAlchemy's built-in dialects, in favor of the currently + internal ``_insertmanyvalues`` collection that is used only by + :class:`.SQLCompiler`. + + """ + if self._insertmanyvalues is None: + return None + else: + return self._insertmanyvalues.single_values_expr + + @util.ro_memoized_property + def effective_returning(self) -> Optional[Sequence[ColumnElement[Any]]]: + """The effective "returning" columns for INSERT, UPDATE or DELETE. + + This is either the so-called "implicit returning" columns which are + calculated by the compiler on the fly, or those present based on what's + present in ``self.statement._returning`` (expanded into individual + columns using the ``._all_selected_columns`` attribute) i.e. those set + explicitly using the :meth:`.UpdateBase.returning` method. + + .. versionadded:: 2.0 + + """ + if self.implicit_returning: + return self.implicit_returning + elif self.statement is not None and is_dml(self.statement): + return [ + c + for c in self.statement._all_selected_columns + if is_column_element(c) + ] + + else: + return None + + @property + def returning(self): + """backwards compatibility; returns the + effective_returning collection. + + """ + return self.effective_returning + + @property + def current_executable(self): + """Return the current 'executable' that is being compiled. + + This is currently the :class:`_sql.Select`, :class:`_sql.Insert`, + :class:`_sql.Update`, :class:`_sql.Delete`, + :class:`_sql.CompoundSelect` object that is being compiled. + Specifically it's assigned to the ``self.stack`` list of elements. + + When a statement like the above is being compiled, it normally + is also assigned to the ``.statement`` attribute of the + :class:`_sql.Compiler` object. However, all SQL constructs are + ultimately nestable, and this attribute should never be consulted + by a ``visit_`` method, as it is not guaranteed to be assigned + nor guaranteed to correspond to the current statement being compiled. + + .. versionadded:: 1.3.21 + + For compatibility with previous versions, use the following + recipe:: + + statement = getattr(self, "current_executable", False) + if statement is False: + statement = self.stack[-1]["selectable"] + + For versions 1.4 and above, ensure only .current_executable + is used; the format of "self.stack" may change. + + + """ + try: + return self.stack[-1]["selectable"] + except IndexError as ie: + raise IndexError("Compiler does not have a stack entry") from ie + + @property + def prefetch(self): + return list(self.insert_prefetch) + list(self.update_prefetch) + + @util.memoized_property + def _global_attributes(self) -> Dict[Any, Any]: + return {} + + @util.memoized_instancemethod + def _init_cte_state(self) -> MutableMapping[CTE, str]: + """Initialize collections related to CTEs only if + a CTE is located, to save on the overhead of + these collections otherwise. + + """ + # collect CTEs to tack on top of a SELECT + # To store the query to print - Dict[cte, text_query] + ctes: MutableMapping[CTE, str] = util.OrderedDict() + self.ctes = ctes + + # Detect same CTE references - Dict[(level, name), cte] + # Level is required for supporting nesting + self.ctes_by_level_name = {} + + # To retrieve key/level in ctes_by_level_name - + # Dict[cte_reference, (level, cte_name, cte_opts)] + self.level_name_by_cte = {} + + self.ctes_recursive = False + + return ctes + + @contextlib.contextmanager + def _nested_result(self): + """special API to support the use case of 'nested result sets'""" + result_columns, ordered_columns = ( + self._result_columns, + self._ordered_columns, + ) + self._result_columns, self._ordered_columns = [], False + + try: + if self.stack: + entry = self.stack[-1] + entry["need_result_map_for_nested"] = True + else: + entry = None + yield self._result_columns, self._ordered_columns + finally: + if entry: + entry.pop("need_result_map_for_nested") + self._result_columns, self._ordered_columns = ( + result_columns, + ordered_columns, + ) + + def _process_positional(self): + assert not self.positiontup + assert self.state is CompilerState.STRING_APPLIED + assert not self._numeric_binds + + if self.dialect.paramstyle == "format": + placeholder = "%s" + else: + assert self.dialect.paramstyle == "qmark" + placeholder = "?" + + positions = [] + + def find_position(m: re.Match[str]) -> str: + normal_bind = m.group(1) + if normal_bind: + positions.append(normal_bind) + return placeholder + else: + # this a post-compile bind + positions.append(m.group(2)) + return m.group(0) + + self.string = re.sub( + self._positional_pattern, find_position, self.string + ) + + if self.escaped_bind_names: + reverse_escape = {v: k for k, v in self.escaped_bind_names.items()} + assert len(self.escaped_bind_names) == len(reverse_escape) + self.positiontup = [ + reverse_escape.get(name, name) for name in positions + ] + else: + self.positiontup = positions + + if self._insertmanyvalues: + positions = [] + + single_values_expr = re.sub( + self._positional_pattern, + find_position, + self._insertmanyvalues.single_values_expr, + ) + insert_crud_params = [ + ( + v[0], + v[1], + re.sub(self._positional_pattern, find_position, v[2]), + v[3], + ) + for v in self._insertmanyvalues.insert_crud_params + ] + + self._insertmanyvalues = self._insertmanyvalues._replace( + single_values_expr=single_values_expr, + insert_crud_params=insert_crud_params, + ) + + def _process_numeric(self): + assert self._numeric_binds + assert self.state is CompilerState.STRING_APPLIED + + num = 1 + param_pos: Dict[str, str] = {} + order: Iterable[str] + if self._insertmanyvalues and self._values_bindparam is not None: + # bindparams that are not in values are always placed first. + # this avoids the need of changing them when using executemany + # values () () + order = itertools.chain( + ( + name + for name in self.bind_names.values() + if name not in self._values_bindparam + ), + self.bind_names.values(), + ) + else: + order = self.bind_names.values() + + for bind_name in order: + if bind_name in param_pos: + continue + bind = self.binds[bind_name] + if ( + bind in self.post_compile_params + or bind in self.literal_execute_params + ): + # set to None to just mark the in positiontup, it will not + # be replaced below. + param_pos[bind_name] = None # type: ignore + else: + ph = f"{self._numeric_binds_identifier_char}{num}" + num += 1 + param_pos[bind_name] = ph + + self.next_numeric_pos = num + + self.positiontup = list(param_pos) + if self.escaped_bind_names: + len_before = len(param_pos) + param_pos = { + self.escaped_bind_names.get(name, name): pos + for name, pos in param_pos.items() + } + assert len(param_pos) == len_before + + # Can't use format here since % chars are not escaped. + self.string = self._pyformat_pattern.sub( + lambda m: param_pos[m.group(1)], self.string + ) + + if self._insertmanyvalues: + single_values_expr = ( + # format is ok here since single_values_expr includes only + # place-holders + self._insertmanyvalues.single_values_expr + % param_pos + ) + insert_crud_params = [ + (v[0], v[1], "%s", v[3]) + for v in self._insertmanyvalues.insert_crud_params + ] + + self._insertmanyvalues = self._insertmanyvalues._replace( + # This has the numbers (:1, :2) + single_values_expr=single_values_expr, + # The single binds are instead %s so they can be formatted + insert_crud_params=insert_crud_params, + ) + + @util.memoized_property + def _bind_processors( + self, + ) -> MutableMapping[ + str, Union[_BindProcessorType[Any], Sequence[_BindProcessorType[Any]]] + ]: + # mypy is not able to see the two value types as the above Union, + # it just sees "object". don't know how to resolve + return { + key: value # type: ignore + for key, value in ( + ( + self.bind_names[bindparam], + ( + bindparam.type._cached_bind_processor(self.dialect) + if not bindparam.type._is_tuple_type + else tuple( + elem_type._cached_bind_processor(self.dialect) + for elem_type in cast( + TupleType, bindparam.type + ).types + ) + ), + ) + for bindparam in self.bind_names + ) + if value is not None + } + + def is_subquery(self): + return len(self.stack) > 1 + + @property + def sql_compiler(self) -> Self: + return self + + def construct_expanded_state( + self, + params: Optional[_CoreSingleExecuteParams] = None, + escape_names: bool = True, + ) -> ExpandedState: + """Return a new :class:`.ExpandedState` for a given parameter set. + + For queries that use "expanding" or other late-rendered parameters, + this method will provide for both the finalized SQL string as well + as the parameters that would be used for a particular parameter set. + + .. versionadded:: 2.0.0rc1 + + """ + parameters = self.construct_params( + params, + escape_names=escape_names, + _no_postcompile=True, + ) + return self._process_parameters_for_postcompile( + parameters, + ) + + def construct_params( + self, + params: Optional[_CoreSingleExecuteParams] = None, + extracted_parameters: Optional[Sequence[BindParameter[Any]]] = None, + escape_names: bool = True, + _group_number: Optional[int] = None, + _check: bool = True, + _no_postcompile: bool = False, + ) -> _MutableCoreSingleExecuteParams: + """return a dictionary of bind parameter keys and values""" + + if self._render_postcompile and not _no_postcompile: + assert self._post_compile_expanded_state is not None + if not params: + return dict(self._post_compile_expanded_state.parameters) + else: + raise exc.InvalidRequestError( + "can't construct new parameters when render_postcompile " + "is used; the statement is hard-linked to the original " + "parameters. Use construct_expanded_state to generate a " + "new statement and parameters." + ) + + has_escaped_names = escape_names and bool(self.escaped_bind_names) + + if extracted_parameters: + # related the bound parameters collected in the original cache key + # to those collected in the incoming cache key. They will not have + # matching names but they will line up positionally in the same + # way. The parameters present in self.bind_names may be clones of + # these original cache key params in the case of DML but the .key + # will be guaranteed to match. + if self.cache_key is None: + raise exc.CompileError( + "This compiled object has no original cache key; " + "can't pass extracted_parameters to construct_params" + ) + else: + orig_extracted = self.cache_key[1] + + ckbm_tuple = self._cache_key_bind_match + assert ckbm_tuple is not None + ckbm, _ = ckbm_tuple + resolved_extracted = { + bind: extracted + for b, extracted in zip(orig_extracted, extracted_parameters) + for bind in ckbm[b] + } + else: + resolved_extracted = None + + if params: + pd = {} + for bindparam, name in self.bind_names.items(): + escaped_name = ( + self.escaped_bind_names.get(name, name) + if has_escaped_names + else name + ) + + if bindparam.key in params: + pd[escaped_name] = params[bindparam.key] + elif name in params: + pd[escaped_name] = params[name] + + elif _check and bindparam.required: + if _group_number: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r, " + "in parameter group %d" + % (bindparam.key, _group_number), + code="cd3x", + ) + else: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r" + % bindparam.key, + code="cd3x", + ) + else: + if resolved_extracted: + value_param = resolved_extracted.get( + bindparam, bindparam + ) + else: + value_param = bindparam + + if bindparam.callable: + pd[escaped_name] = value_param.effective_value + else: + pd[escaped_name] = value_param.value + return pd + else: + pd = {} + for bindparam, name in self.bind_names.items(): + escaped_name = ( + self.escaped_bind_names.get(name, name) + if has_escaped_names + else name + ) + + if _check and bindparam.required: + if _group_number: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r, " + "in parameter group %d" + % (bindparam.key, _group_number), + code="cd3x", + ) + else: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r" + % bindparam.key, + code="cd3x", + ) + + if resolved_extracted: + value_param = resolved_extracted.get(bindparam, bindparam) + else: + value_param = bindparam + + if bindparam.callable: + pd[escaped_name] = value_param.effective_value + else: + pd[escaped_name] = value_param.value + + return pd + + @util.memoized_instancemethod + def _get_set_input_sizes_lookup(self): + dialect = self.dialect + + include_types = dialect.include_set_input_sizes + exclude_types = dialect.exclude_set_input_sizes + + dbapi = dialect.dbapi + + def lookup_type(typ): + dbtype = typ._unwrapped_dialect_impl(dialect).get_dbapi_type(dbapi) + + if ( + dbtype is not None + and (exclude_types is None or dbtype not in exclude_types) + and (include_types is None or dbtype in include_types) + ): + return dbtype + else: + return None + + inputsizes = {} + + literal_execute_params = self.literal_execute_params + + for bindparam in self.bind_names: + if bindparam in literal_execute_params: + continue + + if bindparam.type._is_tuple_type: + inputsizes[bindparam] = [ + lookup_type(typ) + for typ in cast(TupleType, bindparam.type).types + ] + else: + inputsizes[bindparam] = lookup_type(bindparam.type) + + return inputsizes + + @property + def params(self): + """Return the bind param dictionary embedded into this + compiled object, for those values that are present. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ + return self.construct_params(_check=False) + + def _process_parameters_for_postcompile( + self, + parameters: _MutableCoreSingleExecuteParams, + _populate_self: bool = False, + ) -> ExpandedState: + """handle special post compile parameters. + + These include: + + * "expanding" parameters -typically IN tuples that are rendered + on a per-parameter basis for an otherwise fixed SQL statement string. + + * literal_binds compiled with the literal_execute flag. Used for + things like SQL Server "TOP N" where the driver does not accommodate + N as a bound parameter. + + """ + + expanded_parameters = {} + new_positiontup: Optional[List[str]] + + pre_expanded_string = self._pre_expanded_string + if pre_expanded_string is None: + pre_expanded_string = self.string + + if self.positional: + new_positiontup = [] + + pre_expanded_positiontup = self._pre_expanded_positiontup + if pre_expanded_positiontup is None: + pre_expanded_positiontup = self.positiontup + + else: + new_positiontup = pre_expanded_positiontup = None + + processors = self._bind_processors + single_processors = cast( + "Mapping[str, _BindProcessorType[Any]]", processors + ) + tuple_processors = cast( + "Mapping[str, Sequence[_BindProcessorType[Any]]]", processors + ) + + new_processors: Dict[str, _BindProcessorType[Any]] = {} + + replacement_expressions: Dict[str, Any] = {} + to_update_sets: Dict[str, Any] = {} + + # notes: + # *unescaped* parameter names in: + # self.bind_names, self.binds, self._bind_processors, self.positiontup + # + # *escaped* parameter names in: + # construct_params(), replacement_expressions + + numeric_positiontup: Optional[List[str]] = None + + if self.positional and pre_expanded_positiontup is not None: + names: Iterable[str] = pre_expanded_positiontup + if self._numeric_binds: + numeric_positiontup = [] + else: + names = self.bind_names.values() + + ebn = self.escaped_bind_names + for name in names: + escaped_name = ebn.get(name, name) if ebn else name + parameter = self.binds[name] + + if parameter in self.literal_execute_params: + if escaped_name not in replacement_expressions: + replacement_expressions[escaped_name] = ( + self.render_literal_bindparam( + parameter, + render_literal_value=parameters.pop(escaped_name), + ) + ) + continue + + if parameter in self.post_compile_params: + if escaped_name in replacement_expressions: + to_update = to_update_sets[escaped_name] + values = None + else: + # we are removing the parameter from parameters + # because it is a list value, which is not expected by + # TypeEngine objects that would otherwise be asked to + # process it. the single name is being replaced with + # individual numbered parameters for each value in the + # param. + # + # note we are also inserting *escaped* parameter names + # into the given dictionary. default dialect will + # use these param names directly as they will not be + # in the escaped_bind_names dictionary. + values = parameters.pop(name) + + leep_res = self._literal_execute_expanding_parameter( + escaped_name, parameter, values + ) + (to_update, replacement_expr) = leep_res + + to_update_sets[escaped_name] = to_update + replacement_expressions[escaped_name] = replacement_expr + + if not parameter.literal_execute: + parameters.update(to_update) + if parameter.type._is_tuple_type: + assert values is not None + new_processors.update( + ( + "%s_%s_%s" % (name, i, j), + tuple_processors[name][j - 1], + ) + for i, tuple_element in enumerate(values, 1) + for j, _ in enumerate(tuple_element, 1) + if name in tuple_processors + and tuple_processors[name][j - 1] is not None + ) + else: + new_processors.update( + (key, single_processors[name]) + for key, _ in to_update + if name in single_processors + ) + if numeric_positiontup is not None: + numeric_positiontup.extend( + name for name, _ in to_update + ) + elif new_positiontup is not None: + # to_update has escaped names, but that's ok since + # these are new names, that aren't in the + # escaped_bind_names dict. + new_positiontup.extend(name for name, _ in to_update) + expanded_parameters[name] = [ + expand_key for expand_key, _ in to_update + ] + elif new_positiontup is not None: + new_positiontup.append(name) + + def process_expanding(m): + key = m.group(1) + expr = replacement_expressions[key] + + # if POSTCOMPILE included a bind_expression, render that + # around each element + if m.group(2): + tok = m.group(2).split("~~") + be_left, be_right = tok[1], tok[3] + expr = ", ".join( + "%s%s%s" % (be_left, exp, be_right) + for exp in expr.split(", ") + ) + return expr + + statement = re.sub( + self._post_compile_pattern, process_expanding, pre_expanded_string + ) + + if numeric_positiontup is not None: + assert new_positiontup is not None + param_pos = { + key: f"{self._numeric_binds_identifier_char}{num}" + for num, key in enumerate( + numeric_positiontup, self.next_numeric_pos + ) + } + # Can't use format here since % chars are not escaped. + statement = self._pyformat_pattern.sub( + lambda m: param_pos[m.group(1)], statement + ) + new_positiontup.extend(numeric_positiontup) + + expanded_state = ExpandedState( + statement, + parameters, + new_processors, + new_positiontup, + expanded_parameters, + ) + + if _populate_self: + # this is for the "render_postcompile" flag, which is not + # otherwise used internally and is for end-user debugging and + # special use cases. + self._pre_expanded_string = pre_expanded_string + self._pre_expanded_positiontup = pre_expanded_positiontup + self.string = expanded_state.statement + self.positiontup = ( + list(expanded_state.positiontup or ()) + if self.positional + else None + ) + self._post_compile_expanded_state = expanded_state + + return expanded_state + + @util.preload_module("sqlalchemy.engine.cursor") + def _create_result_map(self): + """utility method used for unit tests only.""" + cursor = util.preloaded.engine_cursor + return cursor.CursorResultMetaData._create_description_match_map( + self._result_columns + ) + + # assigned by crud.py for insert/update statements + _get_bind_name_for_col: _BindNameForColProtocol + + @util.memoized_property + def _within_exec_param_key_getter(self) -> Callable[[Any], str]: + getter = self._get_bind_name_for_col + return getter + + @util.memoized_property + @util.preload_module("sqlalchemy.engine.result") + def _inserted_primary_key_from_lastrowid_getter(self): + result = util.preloaded.engine_result + + param_key_getter = self._within_exec_param_key_getter + + assert self.compile_state is not None + statement = self.compile_state.statement + + if TYPE_CHECKING: + assert isinstance(statement, Insert) + + table = statement.table + + getters = [ + (operator.methodcaller("get", param_key_getter(col), None), col) + for col in table.primary_key + ] + + autoinc_getter = None + autoinc_col = table._autoincrement_column + if autoinc_col is not None: + # apply type post processors to the lastrowid + lastrowid_processor = autoinc_col.type._cached_result_processor( + self.dialect, None + ) + autoinc_key = param_key_getter(autoinc_col) + + # if a bind value is present for the autoincrement column + # in the parameters, we need to do the logic dictated by + # #7998; honor a non-None user-passed parameter over lastrowid. + # previously in the 1.4 series we weren't fetching lastrowid + # at all if the key were present in the parameters + if autoinc_key in self.binds: + + def _autoinc_getter(lastrowid, parameters): + param_value = parameters.get(autoinc_key, lastrowid) + if param_value is not None: + # they supplied non-None parameter, use that. + # SQLite at least is observed to return the wrong + # cursor.lastrowid for INSERT..ON CONFLICT so it + # can't be used in all cases + return param_value + else: + # use lastrowid + return lastrowid + + # work around mypy https://github.com/python/mypy/issues/14027 + autoinc_getter = _autoinc_getter + + else: + lastrowid_processor = None + + row_fn = result.result_tuple([col.key for col in table.primary_key]) + + def get(lastrowid, parameters): + """given cursor.lastrowid value and the parameters used for INSERT, + return a "row" that represents the primary key, either by + using the "lastrowid" or by extracting values from the parameters + that were sent along with the INSERT. + + """ + if lastrowid_processor is not None: + lastrowid = lastrowid_processor(lastrowid) + + if lastrowid is None: + return row_fn(getter(parameters) for getter, col in getters) + else: + return row_fn( + ( + ( + autoinc_getter(lastrowid, parameters) + if autoinc_getter is not None + else lastrowid + ) + if col is autoinc_col + else getter(parameters) + ) + for getter, col in getters + ) + + return get + + @util.memoized_property + @util.preload_module("sqlalchemy.engine.result") + def _inserted_primary_key_from_returning_getter(self): + if typing.TYPE_CHECKING: + from ..engine import result + else: + result = util.preloaded.engine_result + + assert self.compile_state is not None + statement = self.compile_state.statement + + if TYPE_CHECKING: + assert isinstance(statement, Insert) + + param_key_getter = self._within_exec_param_key_getter + table = statement.table + + returning = self.implicit_returning + assert returning is not None + ret = {col: idx for idx, col in enumerate(returning)} + + getters = cast( + "List[Tuple[Callable[[Any], Any], bool]]", + [ + ( + (operator.itemgetter(ret[col]), True) + if col in ret + else ( + operator.methodcaller( + "get", param_key_getter(col), None + ), + False, + ) + ) + for col in table.primary_key + ], + ) + + row_fn = result.result_tuple([col.key for col in table.primary_key]) + + def get(row, parameters): + return row_fn( + getter(row) if use_row else getter(parameters) + for getter, use_row in getters + ) + + return get + + def default_from(self) -> str: + """Called when a SELECT statement has no froms, and no FROM clause is + to be appended. + + Gives Oracle Database a chance to tack on a ``FROM DUAL`` to the string + output. + + """ + return "" + + def visit_override_binds(self, override_binds, **kw): + """SQL compile the nested element of an _OverrideBinds with + bindparams swapped out. + + The _OverrideBinds is not normally expected to be compiled; it + is meant to be used when an already cached statement is to be used, + the compilation was already performed, and only the bound params should + be swapped in at execution time. + + However, there are test cases that exericise this object, and + additionally the ORM subquery loader is known to feed in expressions + which include this construct into new queries (discovered in #11173), + so it has to do the right thing at compile time as well. + + """ + + # get SQL text first + sqltext = override_binds.element._compiler_dispatch(self, **kw) + + # for a test compile that is not for caching, change binds after the + # fact. note that we don't try to + # swap the bindparam as we compile, because our element may be + # elsewhere in the statement already (e.g. a subquery or perhaps a + # CTE) and was already visited / compiled. See + # test_relationship_criteria.py -> + # test_selectinload_local_criteria_subquery + for k in override_binds.translate: + if k not in self.binds: + continue + bp = self.binds[k] + + # so this would work, just change the value of bp in place. + # but we dont want to mutate things outside. + # bp.value = override_binds.translate[bp.key] + # continue + + # instead, need to replace bp with new_bp or otherwise accommodate + # in all internal collections + new_bp = bp._with_value( + override_binds.translate[bp.key], + maintain_key=True, + required=False, + ) + + name = self.bind_names[bp] + self.binds[k] = self.binds[name] = new_bp + self.bind_names[new_bp] = name + self.bind_names.pop(bp, None) + + if bp in self.post_compile_params: + self.post_compile_params |= {new_bp} + if bp in self.literal_execute_params: + self.literal_execute_params |= {new_bp} + + ckbm_tuple = self._cache_key_bind_match + if ckbm_tuple: + ckbm, cksm = ckbm_tuple + for bp in bp._cloned_set: + if bp.key in cksm: + cb = cksm[bp.key] + ckbm[cb].append(new_bp) + + return sqltext + + def visit_grouping(self, grouping, asfrom=False, **kwargs): + return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" + + def visit_select_statement_grouping(self, grouping, **kwargs): + return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" + + def visit_label_reference( + self, element, within_columns_clause=False, **kwargs + ): + if self.stack and self.dialect.supports_simple_order_by_label: + try: + compile_state = cast( + "Union[SelectState, CompoundSelectState]", + self.stack[-1]["compile_state"], + ) + except KeyError as ke: + raise exc.CompileError( + "Can't resolve label reference for ORDER BY / " + "GROUP BY / DISTINCT etc." + ) from ke + + ( + with_cols, + only_froms, + only_cols, + ) = compile_state._label_resolve_dict + if within_columns_clause: + resolve_dict = only_froms + else: + resolve_dict = only_cols + + # this can be None in the case that a _label_reference() + # were subject to a replacement operation, in which case + # the replacement of the Label element may have changed + # to something else like a ColumnClause expression. + order_by_elem = element.element._order_by_label_element + + if ( + order_by_elem is not None + and order_by_elem.name in resolve_dict + and order_by_elem.shares_lineage( + resolve_dict[order_by_elem.name] + ) + ): + kwargs["render_label_as_label"] = ( + element.element._order_by_label_element + ) + return self.process( + element.element, + within_columns_clause=within_columns_clause, + **kwargs, + ) + + def visit_textual_label_reference( + self, element, within_columns_clause=False, **kwargs + ): + if not self.stack: + # compiling the element outside of the context of a SELECT + return self.process(element._text_clause) + + try: + compile_state = cast( + "Union[SelectState, CompoundSelectState]", + self.stack[-1]["compile_state"], + ) + except KeyError as ke: + coercions._no_text_coercion( + element.element, + extra=( + "Can't resolve label reference for ORDER BY / " + "GROUP BY / DISTINCT etc." + ), + exc_cls=exc.CompileError, + err=ke, + ) + + with_cols, only_froms, only_cols = compile_state._label_resolve_dict + try: + if within_columns_clause: + col = only_froms[element.element] + else: + col = with_cols[element.element] + except KeyError as err: + coercions._no_text_coercion( + element.element, + extra=( + "Can't resolve label reference for ORDER BY / " + "GROUP BY / DISTINCT etc." + ), + exc_cls=exc.CompileError, + err=err, + ) + else: + kwargs["render_label_as_label"] = col + return self.process( + col, within_columns_clause=within_columns_clause, **kwargs + ) + + def visit_label( + self, + label, + add_to_result_map=None, + within_label_clause=False, + within_columns_clause=False, + render_label_as_label=None, + result_map_targets=(), + **kw, + ): + # only render labels within the columns clause + # or ORDER BY clause of a select. dialect-specific compilers + # can modify this behavior. + render_label_with_as = ( + within_columns_clause and not within_label_clause + ) + render_label_only = render_label_as_label is label + + if render_label_only or render_label_with_as: + if isinstance(label.name, elements._truncated_label): + labelname = self._truncated_identifier("colident", label.name) + else: + labelname = label.name + + if render_label_with_as: + if add_to_result_map is not None: + add_to_result_map( + labelname, + label.name, + (label, labelname) + label._alt_names + result_map_targets, + label.type, + ) + return ( + label.element._compiler_dispatch( + self, + within_columns_clause=True, + within_label_clause=True, + **kw, + ) + + OPERATORS[operators.as_] + + self.preparer.format_label(label, labelname) + ) + elif render_label_only: + return self.preparer.format_label(label, labelname) + else: + return label.element._compiler_dispatch( + self, within_columns_clause=False, **kw + ) + + def _fallback_column_name(self, column): + raise exc.CompileError( + "Cannot compile Column object until its 'name' is assigned." + ) + + def visit_lambda_element(self, element, **kw): + sql_element = element._resolved + return self.process(sql_element, **kw) + + def visit_column( + self, + column: ColumnClause[Any], + add_to_result_map: Optional[_ResultMapAppender] = None, + include_table: bool = True, + result_map_targets: Tuple[Any, ...] = (), + ambiguous_table_name_map: Optional[_AmbiguousTableNameMap] = None, + **kwargs: Any, + ) -> str: + name = orig_name = column.name + if name is None: + name = self._fallback_column_name(column) + + is_literal = column.is_literal + if not is_literal and isinstance(name, elements._truncated_label): + name = self._truncated_identifier("colident", name) + + if add_to_result_map is not None: + targets = (column, name, column.key) + result_map_targets + if column._tq_label: + targets += (column._tq_label,) + + add_to_result_map(name, orig_name, targets, column.type) + + if is_literal: + # note we are not currently accommodating for + # literal_column(quoted_name('ident', True)) here + name = self.escape_literal_column(name) + else: + name = self.preparer.quote(name) + table = column.table + if table is None or not include_table or not table.named_with_column: + return name + else: + effective_schema = self.preparer.schema_for_object(table) + + if effective_schema: + schema_prefix = ( + self.preparer.quote_schema(effective_schema) + "." + ) + else: + schema_prefix = "" + + if TYPE_CHECKING: + assert isinstance(table, NamedFromClause) + tablename = table.name + + if ( + not effective_schema + and ambiguous_table_name_map + and tablename in ambiguous_table_name_map + ): + tablename = ambiguous_table_name_map[tablename] + + if isinstance(tablename, elements._truncated_label): + tablename = self._truncated_identifier("alias", tablename) + + return schema_prefix + self.preparer.quote(tablename) + "." + name + + def visit_collation(self, element, **kw): + return self.preparer.format_collation(element.collation) + + def visit_fromclause(self, fromclause, **kwargs): + return fromclause.name + + def visit_index(self, index, **kwargs): + return index.name + + def visit_typeclause(self, typeclause, **kw): + kw["type_expression"] = typeclause + kw["identifier_preparer"] = self.preparer + return self.dialect.type_compiler_instance.process( + typeclause.type, **kw + ) + + def post_process_text(self, text): + if self.preparer._double_percents: + text = text.replace("%", "%%") + return text + + def escape_literal_column(self, text): + if self.preparer._double_percents: + text = text.replace("%", "%%") + return text + + def visit_textclause(self, textclause, add_to_result_map=None, **kw): + def do_bindparam(m): + name = m.group(1) + if name in textclause._bindparams: + return self.process(textclause._bindparams[name], **kw) + else: + return self.bindparam_string(name, **kw) + + if not self.stack: + self.isplaintext = True + + if add_to_result_map: + # text() object is present in the columns clause of a + # select(). Add a no-name entry to the result map so that + # row[text()] produces a result + add_to_result_map(None, None, (textclause,), sqltypes.NULLTYPE) + + # un-escape any \:params + return BIND_PARAMS_ESC.sub( + lambda m: m.group(1), + BIND_PARAMS.sub( + do_bindparam, self.post_process_text(textclause.text) + ), + ) + + def visit_textual_select( + self, taf, compound_index=None, asfrom=False, **kw + ): + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + new_entry: _CompilerStackEntry = { + "correlate_froms": set(), + "asfrom_froms": set(), + "selectable": taf, + } + self.stack.append(new_entry) + + if taf._independent_ctes: + self._dispatch_independent_ctes(taf, kw) + + populate_result_map = ( + toplevel + or ( + compound_index == 0 + and entry.get("need_result_map_for_compound", False) + ) + or entry.get("need_result_map_for_nested", False) + ) + + if populate_result_map: + self._ordered_columns = self._textual_ordered_columns = ( + taf.positional + ) + + # enable looser result column matching when the SQL text links to + # Column objects by name only + self._loose_column_name_matching = not taf.positional and bool( + taf.column_args + ) + + for c in taf.column_args: + self.process( + c, + within_columns_clause=True, + add_to_result_map=self._add_to_result_map, + ) + + text = self.process(taf.element, **kw) + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = self._render_cte_clause(nesting_level=nesting_level) + text + + self.stack.pop(-1) + + return text + + def visit_null(self, expr: Null, **kw: Any) -> str: + return "NULL" + + def visit_true(self, expr: True_, **kw: Any) -> str: + if self.dialect.supports_native_boolean: + return "true" + else: + return "1" + + def visit_false(self, expr: False_, **kw: Any) -> str: + if self.dialect.supports_native_boolean: + return "false" + else: + return "0" + + def _generate_delimited_list(self, elements, separator, **kw): + return separator.join( + s + for s in (c._compiler_dispatch(self, **kw) for c in elements) + if s + ) + + def _generate_delimited_and_list(self, clauses, **kw): + lcc, clauses = elements.BooleanClauseList._process_clauses_for_boolean( + operators.and_, + elements.True_._singleton, + elements.False_._singleton, + clauses, + ) + if lcc == 1: + return clauses[0]._compiler_dispatch(self, **kw) + else: + separator = OPERATORS[operators.and_] + return separator.join( + s + for s in (c._compiler_dispatch(self, **kw) for c in clauses) + if s + ) + + def visit_tuple(self, clauselist, **kw): + return "(%s)" % self.visit_clauselist(clauselist, **kw) + + def visit_clauselist(self, clauselist, **kw): + sep = clauselist.operator + if sep is None: + sep = " " + else: + sep = OPERATORS[clauselist.operator] + + return self._generate_delimited_list(clauselist.clauses, sep, **kw) + + def visit_expression_clauselist(self, clauselist, **kw): + operator_ = clauselist.operator + + disp = self._get_operator_dispatch( + operator_, "expression_clauselist", None + ) + if disp: + return disp(clauselist, operator_, **kw) + + try: + opstring = OPERATORS[operator_] + except KeyError as err: + raise exc.UnsupportedCompilationError(self, operator_) from err + else: + kw["_in_operator_expression"] = True + return self._generate_delimited_list( + clauselist.clauses, opstring, **kw + ) + + def visit_case(self, clause, **kwargs): + x = "CASE " + if clause.value is not None: + x += clause.value._compiler_dispatch(self, **kwargs) + " " + for cond, result in clause.whens: + x += ( + "WHEN " + + cond._compiler_dispatch(self, **kwargs) + + " THEN " + + result._compiler_dispatch(self, **kwargs) + + " " + ) + if clause.else_ is not None: + x += ( + "ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " " + ) + x += "END" + return x + + def visit_type_coerce(self, type_coerce, **kw): + return type_coerce.typed_expression._compiler_dispatch(self, **kw) + + def visit_cast(self, cast, **kwargs): + type_clause = cast.typeclause._compiler_dispatch(self, **kwargs) + match = re.match("(.*)( COLLATE .*)", type_clause) + return "CAST(%s AS %s)%s" % ( + cast.clause._compiler_dispatch(self, **kwargs), + match.group(1) if match else type_clause, + match.group(2) if match else "", + ) + + def _format_frame_clause(self, range_, **kw): + return "%s AND %s" % ( + ( + "UNBOUNDED PRECEDING" + if range_[0] is elements.RANGE_UNBOUNDED + else ( + "CURRENT ROW" + if range_[0] is elements.RANGE_CURRENT + else ( + "%s PRECEDING" + % ( + self.process( + elements.literal(abs(range_[0])), **kw + ), + ) + if range_[0] < 0 + else "%s FOLLOWING" + % (self.process(elements.literal(range_[0]), **kw),) + ) + ) + ), + ( + "UNBOUNDED FOLLOWING" + if range_[1] is elements.RANGE_UNBOUNDED + else ( + "CURRENT ROW" + if range_[1] is elements.RANGE_CURRENT + else ( + "%s PRECEDING" + % ( + self.process( + elements.literal(abs(range_[1])), **kw + ), + ) + if range_[1] < 0 + else "%s FOLLOWING" + % (self.process(elements.literal(range_[1]), **kw),) + ) + ) + ), + ) + + def visit_over(self, over, **kwargs): + text = over.element._compiler_dispatch(self, **kwargs) + if over.range_ is not None: + range_ = "RANGE BETWEEN %s" % self._format_frame_clause( + over.range_, **kwargs + ) + elif over.rows is not None: + range_ = "ROWS BETWEEN %s" % self._format_frame_clause( + over.rows, **kwargs + ) + elif over.groups is not None: + range_ = "GROUPS BETWEEN %s" % self._format_frame_clause( + over.groups, **kwargs + ) + else: + range_ = None + + return "%s OVER (%s)" % ( + text, + " ".join( + [ + "%s BY %s" + % (word, clause._compiler_dispatch(self, **kwargs)) + for word, clause in ( + ("PARTITION", over.partition_by), + ("ORDER", over.order_by), + ) + if clause is not None and len(clause) + ] + + ([range_] if range_ else []) + ), + ) + + def visit_withingroup(self, withingroup, **kwargs): + return "%s WITHIN GROUP (ORDER BY %s)" % ( + withingroup.element._compiler_dispatch(self, **kwargs), + withingroup.order_by._compiler_dispatch(self, **kwargs), + ) + + def visit_funcfilter(self, funcfilter, **kwargs): + return "%s FILTER (WHERE %s)" % ( + funcfilter.func._compiler_dispatch(self, **kwargs), + funcfilter.criterion._compiler_dispatch(self, **kwargs), + ) + + def visit_extract(self, extract, **kwargs): + field = self.extract_map.get(extract.field, extract.field) + return "EXTRACT(%s FROM %s)" % ( + field, + extract.expr._compiler_dispatch(self, **kwargs), + ) + + def visit_scalar_function_column(self, element, **kw): + compiled_fn = self.visit_function(element.fn, **kw) + compiled_col = self.visit_column(element, **kw) + return "(%s).%s" % (compiled_fn, compiled_col) + + def visit_function( + self, + func: Function[Any], + add_to_result_map: Optional[_ResultMapAppender] = None, + **kwargs: Any, + ) -> str: + if add_to_result_map is not None: + add_to_result_map(func.name, func.name, (func.name,), func.type) + + disp = getattr(self, "visit_%s_func" % func.name.lower(), None) + + text: str + + if disp: + text = disp(func, **kwargs) + else: + name = FUNCTIONS.get(func._deannotate().__class__, None) + if name: + if func._has_args: + name += "%(expr)s" + else: + name = func.name + name = ( + self.preparer.quote(name) + if self.preparer._requires_quotes_illegal_chars(name) + or isinstance(name, elements.quoted_name) + else name + ) + name = name + "%(expr)s" + text = ".".join( + [ + ( + self.preparer.quote(tok) + if self.preparer._requires_quotes_illegal_chars(tok) + or isinstance(name, elements.quoted_name) + else tok + ) + for tok in func.packagenames + ] + + [name] + ) % {"expr": self.function_argspec(func, **kwargs)} + + if func._with_ordinality: + text += " WITH ORDINALITY" + return text + + def visit_next_value_func(self, next_value, **kw): + return self.visit_sequence(next_value.sequence) + + def visit_sequence(self, sequence, **kw): + raise NotImplementedError( + "Dialect '%s' does not support sequence increments." + % self.dialect.name + ) + + def function_argspec(self, func: Function[Any], **kwargs: Any) -> str: + return func.clause_expr._compiler_dispatch(self, **kwargs) + + def visit_compound_select( + self, cs, asfrom=False, compound_index=None, **kwargs + ): + toplevel = not self.stack + + compile_state = cs._compile_state_factory(cs, self, **kwargs) + + if toplevel and not self.compile_state: + self.compile_state = compile_state + + compound_stmt = compile_state.statement + + entry = self._default_stack_entry if toplevel else self.stack[-1] + need_result_map = toplevel or ( + not compound_index + and entry.get("need_result_map_for_compound", False) + ) + + # indicates there is already a CompoundSelect in play + if compound_index == 0: + entry["select_0"] = cs + + self.stack.append( + { + "correlate_froms": entry["correlate_froms"], + "asfrom_froms": entry["asfrom_froms"], + "selectable": cs, + "compile_state": compile_state, + "need_result_map_for_compound": need_result_map, + } + ) + + if compound_stmt._independent_ctes: + self._dispatch_independent_ctes(compound_stmt, kwargs) + + keyword = self.compound_keywords[cs.keyword] + + text = (" " + keyword + " ").join( + ( + c._compiler_dispatch( + self, asfrom=asfrom, compound_index=i, **kwargs + ) + for i, c in enumerate(cs.selects) + ) + ) + + kwargs["include_table"] = False + text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs)) + text += self.order_by_clause(cs, **kwargs) + if cs._has_row_limiting_clause: + text += self._row_limit_clause(cs, **kwargs) + + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + include_following_stack=True, + ) + + text + ) + + self.stack.pop(-1) + return text + + def _row_limit_clause(self, cs, **kwargs): + if cs._fetch_clause is not None: + return self.fetch_clause(cs, **kwargs) + else: + return self.limit_clause(cs, **kwargs) + + def _get_operator_dispatch(self, operator_, qualifier1, qualifier2): + attrname = "visit_%s_%s%s" % ( + operator_.__name__, + qualifier1, + "_" + qualifier2 if qualifier2 else "", + ) + return getattr(self, attrname, None) + + def visit_unary( + self, unary, add_to_result_map=None, result_map_targets=(), **kw + ): + if add_to_result_map is not None: + result_map_targets += (unary,) + kw["add_to_result_map"] = add_to_result_map + kw["result_map_targets"] = result_map_targets + + if unary.operator: + if unary.modifier: + raise exc.CompileError( + "Unary expression does not support operator " + "and modifier simultaneously" + ) + disp = self._get_operator_dispatch( + unary.operator, "unary", "operator" + ) + if disp: + return disp(unary, unary.operator, **kw) + else: + return self._generate_generic_unary_operator( + unary, OPERATORS[unary.operator], **kw + ) + elif unary.modifier: + disp = self._get_operator_dispatch( + unary.modifier, "unary", "modifier" + ) + if disp: + return disp(unary, unary.modifier, **kw) + else: + return self._generate_generic_unary_modifier( + unary, OPERATORS[unary.modifier], **kw + ) + else: + raise exc.CompileError( + "Unary expression has no operator or modifier" + ) + + def visit_truediv_binary(self, binary, operator, **kw): + if self.dialect.div_is_floordiv: + return ( + self.process(binary.left, **kw) + + " / " + # TODO: would need a fast cast again here, + # unless we want to use an implicit cast like "+ 0.0" + + self.process( + elements.Cast( + binary.right, + ( + binary.right.type + if binary.right.type._type_affinity + is sqltypes.Numeric + else sqltypes.Numeric() + ), + ), + **kw, + ) + ) + else: + return ( + self.process(binary.left, **kw) + + " / " + + self.process(binary.right, **kw) + ) + + def visit_floordiv_binary(self, binary, operator, **kw): + if ( + self.dialect.div_is_floordiv + and binary.right.type._type_affinity is sqltypes.Integer + ): + return ( + self.process(binary.left, **kw) + + " / " + + self.process(binary.right, **kw) + ) + else: + return "FLOOR(%s)" % ( + self.process(binary.left, **kw) + + " / " + + self.process(binary.right, **kw) + ) + + def visit_is_true_unary_operator(self, element, operator, **kw): + if ( + element._is_implicitly_boolean + or self.dialect.supports_native_boolean + ): + return self.process(element.element, **kw) + else: + return "%s = 1" % self.process(element.element, **kw) + + def visit_is_false_unary_operator(self, element, operator, **kw): + if ( + element._is_implicitly_boolean + or self.dialect.supports_native_boolean + ): + return "NOT %s" % self.process(element.element, **kw) + else: + return "%s = 0" % self.process(element.element, **kw) + + def visit_not_match_op_binary(self, binary, operator, **kw): + return "NOT %s" % self.visit_binary( + binary, override_operator=operators.match_op + ) + + def visit_not_in_op_binary(self, binary, operator, **kw): + # The brackets are required in the NOT IN operation because the empty + # case is handled using the form "(col NOT IN (null) OR 1 = 1)". + # The presence of the OR makes the brackets required. + return "(%s)" % self._generate_generic_binary( + binary, OPERATORS[operator], **kw + ) + + def visit_empty_set_op_expr(self, type_, expand_op, **kw): + if expand_op is operators.not_in_op: + if len(type_) > 1: + return "(%s)) OR (1 = 1" % ( + ", ".join("NULL" for element in type_) + ) + else: + return "NULL) OR (1 = 1" + elif expand_op is operators.in_op: + if len(type_) > 1: + return "(%s)) AND (1 != 1" % ( + ", ".join("NULL" for element in type_) + ) + else: + return "NULL) AND (1 != 1" + else: + return self.visit_empty_set_expr(type_) + + def visit_empty_set_expr(self, element_types, **kw): + raise NotImplementedError( + "Dialect '%s' does not support empty set expression." + % self.dialect.name + ) + + def _literal_execute_expanding_parameter_literal_binds( + self, parameter, values, bind_expression_template=None + ): + typ_dialect_impl = parameter.type._unwrapped_dialect_impl(self.dialect) + + if not values: + # empty IN expression. note we don't need to use + # bind_expression_template here because there are no + # expressions to render. + + if typ_dialect_impl._is_tuple_type: + replacement_expression = ( + "VALUES " if self.dialect.tuple_in_values else "" + ) + self.visit_empty_set_op_expr( + parameter.type.types, parameter.expand_op + ) + + else: + replacement_expression = self.visit_empty_set_op_expr( + [parameter.type], parameter.expand_op + ) + + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], collections_abc.Sequence) + and not isinstance(values[0], (str, bytes)) + ): + if typ_dialect_impl._has_bind_expression: + raise NotImplementedError( + "bind_expression() on TupleType not supported with " + "literal_binds" + ) + + replacement_expression = ( + "VALUES " if self.dialect.tuple_in_values else "" + ) + ", ".join( + "(%s)" + % ( + ", ".join( + self.render_literal_value(value, param_type) + for value, param_type in zip( + tuple_element, parameter.type.types + ) + ) + ) + for i, tuple_element in enumerate(values) + ) + else: + if bind_expression_template: + post_compile_pattern = self._post_compile_pattern + m = post_compile_pattern.search(bind_expression_template) + assert m and m.group( + 2 + ), "unexpected format for expanding parameter" + + tok = m.group(2).split("~~") + be_left, be_right = tok[1], tok[3] + replacement_expression = ", ".join( + "%s%s%s" + % ( + be_left, + self.render_literal_value(value, parameter.type), + be_right, + ) + for value in values + ) + else: + replacement_expression = ", ".join( + self.render_literal_value(value, parameter.type) + for value in values + ) + + return (), replacement_expression + + def _literal_execute_expanding_parameter(self, name, parameter, values): + if parameter.literal_execute: + return self._literal_execute_expanding_parameter_literal_binds( + parameter, values + ) + + dialect = self.dialect + typ_dialect_impl = parameter.type._unwrapped_dialect_impl(dialect) + + if self._numeric_binds: + bind_template = self.compilation_bindtemplate + else: + bind_template = self.bindtemplate + + if ( + self.dialect._bind_typing_render_casts + and typ_dialect_impl.render_bind_cast + ): + + def _render_bindtemplate(name): + return self.render_bind_cast( + parameter.type, + typ_dialect_impl, + bind_template % {"name": name}, + ) + + else: + + def _render_bindtemplate(name): + return bind_template % {"name": name} + + if not values: + to_update = [] + if typ_dialect_impl._is_tuple_type: + replacement_expression = self.visit_empty_set_op_expr( + parameter.type.types, parameter.expand_op + ) + else: + replacement_expression = self.visit_empty_set_op_expr( + [parameter.type], parameter.expand_op + ) + + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], collections_abc.Sequence) + and not isinstance(values[0], (str, bytes)) + ): + assert not typ_dialect_impl._is_array + to_update = [ + ("%s_%s_%s" % (name, i, j), value) + for i, tuple_element in enumerate(values, 1) + for j, value in enumerate(tuple_element, 1) + ] + + replacement_expression = ( + "VALUES " if dialect.tuple_in_values else "" + ) + ", ".join( + "(%s)" + % ( + ", ".join( + _render_bindtemplate( + to_update[i * len(tuple_element) + j][0] + ) + for j, value in enumerate(tuple_element) + ) + ) + for i, tuple_element in enumerate(values) + ) + else: + to_update = [ + ("%s_%s" % (name, i), value) + for i, value in enumerate(values, 1) + ] + replacement_expression = ", ".join( + _render_bindtemplate(key) for key, value in to_update + ) + + return to_update, replacement_expression + + def visit_binary( + self, + binary, + override_operator=None, + eager_grouping=False, + from_linter=None, + lateral_from_linter=None, + **kw, + ): + if from_linter and operators.is_comparison(binary.operator): + if lateral_from_linter is not None: + enclosing_lateral = kw["enclosing_lateral"] + lateral_from_linter.edges.update( + itertools.product( + _de_clone( + binary.left._from_objects + [enclosing_lateral] + ), + _de_clone( + binary.right._from_objects + [enclosing_lateral] + ), + ) + ) + else: + from_linter.edges.update( + itertools.product( + _de_clone(binary.left._from_objects), + _de_clone(binary.right._from_objects), + ) + ) + + # don't allow "? = ?" to render + if ( + self.ansi_bind_rules + and isinstance(binary.left, elements.BindParameter) + and isinstance(binary.right, elements.BindParameter) + ): + kw["literal_execute"] = True + + operator_ = override_operator or binary.operator + disp = self._get_operator_dispatch(operator_, "binary", None) + if disp: + return disp(binary, operator_, **kw) + else: + try: + opstring = OPERATORS[operator_] + except KeyError as err: + raise exc.UnsupportedCompilationError(self, operator_) from err + else: + return self._generate_generic_binary( + binary, + opstring, + from_linter=from_linter, + lateral_from_linter=lateral_from_linter, + **kw, + ) + + def visit_function_as_comparison_op_binary(self, element, operator, **kw): + return self.process(element.sql_function, **kw) + + def visit_mod_binary(self, binary, operator, **kw): + if self.preparer._double_percents: + return ( + self.process(binary.left, **kw) + + " %% " + + self.process(binary.right, **kw) + ) + else: + return ( + self.process(binary.left, **kw) + + " % " + + self.process(binary.right, **kw) + ) + + def visit_custom_op_binary(self, element, operator, **kw): + kw["eager_grouping"] = operator.eager_grouping + return self._generate_generic_binary( + element, + " " + self.escape_literal_column(operator.opstring) + " ", + **kw, + ) + + def visit_custom_op_unary_operator(self, element, operator, **kw): + return self._generate_generic_unary_operator( + element, self.escape_literal_column(operator.opstring) + " ", **kw + ) + + def visit_custom_op_unary_modifier(self, element, operator, **kw): + return self._generate_generic_unary_modifier( + element, " " + self.escape_literal_column(operator.opstring), **kw + ) + + def _generate_generic_binary( + self, + binary: BinaryExpression[Any], + opstring: str, + eager_grouping: bool = False, + **kw: Any, + ) -> str: + _in_operator_expression = kw.get("_in_operator_expression", False) + + kw["_in_operator_expression"] = True + kw["_binary_op"] = binary.operator + text = ( + binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + opstring + + binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + ) + + if _in_operator_expression and eager_grouping: + text = "(%s)" % text + return text + + def _generate_generic_unary_operator(self, unary, opstring, **kw): + return opstring + unary.element._compiler_dispatch(self, **kw) + + def _generate_generic_unary_modifier(self, unary, opstring, **kw): + return unary.element._compiler_dispatch(self, **kw) + opstring + + @util.memoized_property + def _like_percent_literal(self): + return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) + + def visit_ilike_case_insensitive_operand(self, element, **kw): + return f"lower({element.element._compiler_dispatch(self, **kw)})" + + def visit_contains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.concat(binary.right).concat(percent) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_not_contains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.concat(binary.right).concat(percent) + return self.visit_not_like_op_binary(binary, operator, **kw) + + def visit_icontains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent.concat( + ilike_case_insensitive(binary.right) + ).concat(percent) + return self.visit_ilike_op_binary(binary, operator, **kw) + + def visit_not_icontains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent.concat( + ilike_case_insensitive(binary.right) + ).concat(percent) + return self.visit_not_ilike_op_binary(binary, operator, **kw) + + def visit_startswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent._rconcat(binary.right) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_not_startswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent._rconcat(binary.right) + return self.visit_not_like_op_binary(binary, operator, **kw) + + def visit_istartswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent._rconcat(ilike_case_insensitive(binary.right)) + return self.visit_ilike_op_binary(binary, operator, **kw) + + def visit_not_istartswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent._rconcat(ilike_case_insensitive(binary.right)) + return self.visit_not_ilike_op_binary(binary, operator, **kw) + + def visit_endswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.concat(binary.right) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_not_endswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.concat(binary.right) + return self.visit_not_like_op_binary(binary, operator, **kw) + + def visit_iendswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent.concat(ilike_case_insensitive(binary.right)) + return self.visit_ilike_op_binary(binary, operator, **kw) + + def visit_not_iendswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.left = ilike_case_insensitive(binary.left) + binary.right = percent.concat(ilike_case_insensitive(binary.right)) + return self.visit_not_ilike_op_binary(binary, operator, **kw) + + def visit_like_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + + return "%s LIKE %s" % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw), + ) + ( + " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape is not None + else "" + ) + + def visit_not_like_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + return "%s NOT LIKE %s" % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw), + ) + ( + " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape is not None + else "" + ) + + def visit_ilike_op_binary(self, binary, operator, **kw): + if operator is operators.ilike_op: + binary = binary._clone() + binary.left = ilike_case_insensitive(binary.left) + binary.right = ilike_case_insensitive(binary.right) + # else we assume ilower() has been applied + + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_not_ilike_op_binary(self, binary, operator, **kw): + if operator is operators.not_ilike_op: + binary = binary._clone() + binary.left = ilike_case_insensitive(binary.left) + binary.right = ilike_case_insensitive(binary.right) + # else we assume ilower() has been applied + + return self.visit_not_like_op_binary(binary, operator, **kw) + + def visit_between_op_binary(self, binary, operator, **kw): + symmetric = binary.modifiers.get("symmetric", False) + return self._generate_generic_binary( + binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw + ) + + def visit_not_between_op_binary(self, binary, operator, **kw): + symmetric = binary.modifiers.get("symmetric", False) + return self._generate_generic_binary( + binary, + " NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ", + **kw, + ) + + def visit_regexp_match_op_binary( + self, binary: BinaryExpression[Any], operator: Any, **kw: Any + ) -> str: + raise exc.CompileError( + "%s dialect does not support regular expressions" + % self.dialect.name + ) + + def visit_not_regexp_match_op_binary( + self, binary: BinaryExpression[Any], operator: Any, **kw: Any + ) -> str: + raise exc.CompileError( + "%s dialect does not support regular expressions" + % self.dialect.name + ) + + def visit_regexp_replace_op_binary( + self, binary: BinaryExpression[Any], operator: Any, **kw: Any + ) -> str: + raise exc.CompileError( + "%s dialect does not support regular expression replacements" + % self.dialect.name + ) + + def visit_bindparam( + self, + bindparam, + within_columns_clause=False, + literal_binds=False, + skip_bind_expression=False, + literal_execute=False, + render_postcompile=False, + **kwargs, + ): + + if not skip_bind_expression: + impl = bindparam.type.dialect_impl(self.dialect) + if impl._has_bind_expression: + bind_expression = impl.bind_expression(bindparam) + wrapped = self.process( + bind_expression, + skip_bind_expression=True, + within_columns_clause=within_columns_clause, + literal_binds=literal_binds and not bindparam.expanding, + literal_execute=literal_execute, + render_postcompile=render_postcompile, + **kwargs, + ) + if bindparam.expanding: + # for postcompile w/ expanding, move the "wrapped" part + # of this into the inside + + m = re.match( + r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped + ) + assert m, "unexpected format for expanding parameter" + wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( + m.group(2), + m.group(1), + m.group(3), + ) + + if literal_binds: + ret = self.render_literal_bindparam( + bindparam, + within_columns_clause=True, + bind_expression_template=wrapped, + **kwargs, + ) + return "(%s)" % ret + + return wrapped + + if not literal_binds: + literal_execute = ( + literal_execute + or bindparam.literal_execute + or (within_columns_clause and self.ansi_bind_rules) + ) + post_compile = literal_execute or bindparam.expanding + else: + post_compile = False + + if literal_binds: + ret = self.render_literal_bindparam( + bindparam, within_columns_clause=True, **kwargs + ) + if bindparam.expanding: + ret = "(%s)" % ret + return ret + + name = self._truncate_bindparam(bindparam) + + if name in self.binds: + existing = self.binds[name] + if existing is not bindparam: + if ( + (existing.unique or bindparam.unique) + and not existing.proxy_set.intersection( + bindparam.proxy_set + ) + and not existing._cloned_set.intersection( + bindparam._cloned_set + ) + ): + raise exc.CompileError( + "Bind parameter '%s' conflicts with " + "unique bind parameter of the same name" % name + ) + elif existing.expanding != bindparam.expanding: + raise exc.CompileError( + "Can't reuse bound parameter name '%s' in both " + "'expanding' (e.g. within an IN expression) and " + "non-expanding contexts. If this parameter is to " + "receive a list/array value, set 'expanding=True' on " + "it for expressions that aren't IN, otherwise use " + "a different parameter name." % (name,) + ) + elif existing._is_crud or bindparam._is_crud: + if existing._is_crud and bindparam._is_crud: + # TODO: this condition is not well understood. + # see tests in test/sql/test_update.py + raise exc.CompileError( + "Encountered unsupported case when compiling an " + "INSERT or UPDATE statement. If this is a " + "multi-table " + "UPDATE statement, please provide string-named " + "arguments to the " + "values() method with distinct names; support for " + "multi-table UPDATE statements that " + "target multiple tables for UPDATE is very " + "limited", + ) + else: + raise exc.CompileError( + f"bindparam() name '{bindparam.key}' is reserved " + "for automatic usage in the VALUES or SET " + "clause of this " + "insert/update statement. Please use a " + "name other than column name when using " + "bindparam() " + "with insert() or update() (for example, " + f"'b_{bindparam.key}')." + ) + + self.binds[bindparam.key] = self.binds[name] = bindparam + + # if we are given a cache key that we're going to match against, + # relate the bindparam here to one that is most likely present + # in the "extracted params" portion of the cache key. this is used + # to set up a positional mapping that is used to determine the + # correct parameters for a subsequent use of this compiled with + # a different set of parameter values. here, we accommodate for + # parameters that may have been cloned both before and after the cache + # key was been generated. + ckbm_tuple = self._cache_key_bind_match + + if ckbm_tuple: + ckbm, cksm = ckbm_tuple + for bp in bindparam._cloned_set: + if bp.key in cksm: + cb = cksm[bp.key] + ckbm[cb].append(bindparam) + + if bindparam.isoutparam: + self.has_out_parameters = True + + if post_compile: + if render_postcompile: + self._render_postcompile = True + + if literal_execute: + self.literal_execute_params |= {bindparam} + else: + self.post_compile_params |= {bindparam} + + ret = self.bindparam_string( + name, + post_compile=post_compile, + expanding=bindparam.expanding, + bindparam_type=bindparam.type, + **kwargs, + ) + + if bindparam.expanding: + ret = "(%s)" % ret + + return ret + + def render_bind_cast(self, type_, dbapi_type, sqltext): + raise NotImplementedError() + + def render_literal_bindparam( + self, + bindparam, + render_literal_value=NO_ARG, + bind_expression_template=None, + **kw, + ): + if render_literal_value is not NO_ARG: + value = render_literal_value + else: + if bindparam.value is None and bindparam.callable is None: + op = kw.get("_binary_op", None) + if op and op not in (operators.is_, operators.is_not): + util.warn_limited( + "Bound parameter '%s' rendering literal NULL in a SQL " + "expression; comparisons to NULL should not use " + "operators outside of 'is' or 'is not'", + (bindparam.key,), + ) + return self.process(sqltypes.NULLTYPE, **kw) + value = bindparam.effective_value + + if bindparam.expanding: + leep = self._literal_execute_expanding_parameter_literal_binds + to_update, replacement_expr = leep( + bindparam, + value, + bind_expression_template=bind_expression_template, + ) + return replacement_expr + else: + return self.render_literal_value(value, bindparam.type) + + def render_literal_value( + self, value: Any, type_: sqltypes.TypeEngine[Any] + ) -> str: + """Render the value of a bind parameter as a quoted literal. + + This is used for statement sections that do not accept bind parameters + on the target driver/database. + + This should be implemented by subclasses using the quoting services + of the DBAPI. + + """ + + if value is None and not type_.should_evaluate_none: + # issue #10535 - handle NULL in the compiler without placing + # this onto each type, except for "evaluate None" types + # (e.g. JSON) + return self.process(elements.Null._instance()) + + processor = type_._cached_literal_processor(self.dialect) + if processor: + try: + return processor(value) + except Exception as e: + raise exc.CompileError( + f"Could not render literal value " + f'"{sql_util._repr_single_value(value)}" ' + f"with datatype " + f"{type_}; see parent stack trace for " + "more detail." + ) from e + + else: + raise exc.CompileError( + f"No literal value renderer is available for literal value " + f'"{sql_util._repr_single_value(value)}" ' + f"with datatype {type_}" + ) + + def _truncate_bindparam(self, bindparam): + if bindparam in self.bind_names: + return self.bind_names[bindparam] + + bind_name = bindparam.key + if isinstance(bind_name, elements._truncated_label): + bind_name = self._truncated_identifier("bindparam", bind_name) + + # add to bind_names for translation + self.bind_names[bindparam] = bind_name + + return bind_name + + def _truncated_identifier( + self, ident_class: str, name: _truncated_label + ) -> str: + if (ident_class, name) in self.truncated_names: + return self.truncated_names[(ident_class, name)] + + anonname = name.apply_map(self.anon_map) + + if len(anonname) > self.label_length - 6: + counter = self._truncated_counters.get(ident_class, 1) + truncname = ( + anonname[0 : max(self.label_length - 6, 0)] + + "_" + + hex(counter)[2:] + ) + self._truncated_counters[ident_class] = counter + 1 + else: + truncname = anonname + self.truncated_names[(ident_class, name)] = truncname + return truncname + + def _anonymize(self, name: str) -> str: + return name % self.anon_map + + def bindparam_string( + self, + name: str, + post_compile: bool = False, + expanding: bool = False, + escaped_from: Optional[str] = None, + bindparam_type: Optional[TypeEngine[Any]] = None, + accumulate_bind_names: Optional[Set[str]] = None, + visited_bindparam: Optional[List[str]] = None, + **kw: Any, + ) -> str: + # TODO: accumulate_bind_names is passed by crud.py to gather + # names on a per-value basis, visited_bindparam is passed by + # visit_insert() to collect all parameters in the statement. + # see if this gathering can be simplified somehow + if accumulate_bind_names is not None: + accumulate_bind_names.add(name) + if visited_bindparam is not None: + visited_bindparam.append(name) + + if not escaped_from: + if self._bind_translate_re.search(name): + # not quite the translate use case as we want to + # also get a quick boolean if we even found + # unusual characters in the name + new_name = self._bind_translate_re.sub( + lambda m: self._bind_translate_chars[m.group(0)], + name, + ) + escaped_from = name + name = new_name + + if escaped_from: + self.escaped_bind_names = self.escaped_bind_names.union( + {escaped_from: name} + ) + if post_compile: + ret = "__[POSTCOMPILE_%s]" % name + if expanding: + # for expanding, bound parameters or literal values will be + # rendered per item + return ret + + # otherwise, for non-expanding "literal execute", apply + # bind casts as determined by the datatype + if bindparam_type is not None: + type_impl = bindparam_type._unwrapped_dialect_impl( + self.dialect + ) + if type_impl.render_literal_cast: + ret = self.render_bind_cast(bindparam_type, type_impl, ret) + return ret + elif self.state is CompilerState.COMPILING: + ret = self.compilation_bindtemplate % {"name": name} + else: + ret = self.bindtemplate % {"name": name} + + if ( + bindparam_type is not None + and self.dialect._bind_typing_render_casts + ): + type_impl = bindparam_type._unwrapped_dialect_impl(self.dialect) + if type_impl.render_bind_cast: + ret = self.render_bind_cast(bindparam_type, type_impl, ret) + + return ret + + def _dispatch_independent_ctes(self, stmt, kw): + local_kw = kw.copy() + local_kw.pop("cte_opts", None) + for cte, opt in zip( + stmt._independent_ctes, stmt._independent_ctes_opts + ): + cte._compiler_dispatch(self, cte_opts=opt, **local_kw) + + def visit_cte( + self, + cte: CTE, + asfrom: bool = False, + ashint: bool = False, + fromhints: Optional[_FromHintsType] = None, + visiting_cte: Optional[CTE] = None, + from_linter: Optional[FromLinter] = None, + cte_opts: selectable._CTEOpts = selectable._CTEOpts(False), + **kwargs: Any, + ) -> Optional[str]: + self_ctes = self._init_cte_state() + assert self_ctes is self.ctes + + kwargs["visiting_cte"] = cte + + cte_name = cte.name + + if isinstance(cte_name, elements._truncated_label): + cte_name = self._truncated_identifier("alias", cte_name) + + is_new_cte = True + embedded_in_current_named_cte = False + + _reference_cte = cte._get_reference_cte() + + nesting = cte.nesting or cte_opts.nesting + + # check for CTE already encountered + if _reference_cte in self.level_name_by_cte: + cte_level, _, existing_cte_opts = self.level_name_by_cte[ + _reference_cte + ] + assert _ == cte_name + + cte_level_name = (cte_level, cte_name) + existing_cte = self.ctes_by_level_name[cte_level_name] + + # check if we are receiving it here with a specific + # "nest_here" location; if so, move it to this location + + if cte_opts.nesting: + if existing_cte_opts.nesting: + raise exc.CompileError( + "CTE is stated as 'nest_here' in " + "more than one location" + ) + + old_level_name = (cte_level, cte_name) + cte_level = len(self.stack) if nesting else 1 + cte_level_name = new_level_name = (cte_level, cte_name) + + del self.ctes_by_level_name[old_level_name] + self.ctes_by_level_name[new_level_name] = existing_cte + self.level_name_by_cte[_reference_cte] = new_level_name + ( + cte_opts, + ) + + else: + cte_level = len(self.stack) if nesting else 1 + cte_level_name = (cte_level, cte_name) + + if cte_level_name in self.ctes_by_level_name: + existing_cte = self.ctes_by_level_name[cte_level_name] + else: + existing_cte = None + + if existing_cte is not None: + embedded_in_current_named_cte = visiting_cte is existing_cte + + # we've generated a same-named CTE that we are enclosed in, + # or this is the same CTE. just return the name. + if cte is existing_cte._restates or cte is existing_cte: + is_new_cte = False + elif existing_cte is cte._restates: + # we've generated a same-named CTE that is + # enclosed in us - we take precedence, so + # discard the text for the "inner". + del self_ctes[existing_cte] + + existing_cte_reference_cte = existing_cte._get_reference_cte() + + assert existing_cte_reference_cte is _reference_cte + assert existing_cte_reference_cte is existing_cte + + del self.level_name_by_cte[existing_cte_reference_cte] + else: + if ( + # if the two CTEs have the same hash, which we expect + # here means that one/both is an annotated of the other + (hash(cte) == hash(existing_cte)) + # or... + or ( + ( + # if they are clones, i.e. they came from the ORM + # or some other visit method + cte._is_clone_of is not None + or existing_cte._is_clone_of is not None + ) + # and are deep-copy identical + and cte.compare(existing_cte) + ) + ): + # then consider these two CTEs the same + is_new_cte = False + else: + # otherwise these are two CTEs that either will render + # differently, or were indicated separately by the user, + # with the same name + raise exc.CompileError( + "Multiple, unrelated CTEs found with " + "the same name: %r" % cte_name + ) + + if not asfrom and not is_new_cte: + return None + + if cte._cte_alias is not None: + pre_alias_cte = cte._cte_alias + cte_pre_alias_name = cte._cte_alias.name + if isinstance(cte_pre_alias_name, elements._truncated_label): + cte_pre_alias_name = self._truncated_identifier( + "alias", cte_pre_alias_name + ) + else: + pre_alias_cte = cte + cte_pre_alias_name = None + + if is_new_cte: + self.ctes_by_level_name[cte_level_name] = cte + self.level_name_by_cte[_reference_cte] = cte_level_name + ( + cte_opts, + ) + + if pre_alias_cte not in self.ctes: + self.visit_cte(pre_alias_cte, **kwargs) + + if not cte_pre_alias_name and cte not in self_ctes: + if cte.recursive: + self.ctes_recursive = True + text = self.preparer.format_alias(cte, cte_name) + if cte.recursive: + col_source = cte.element + + # TODO: can we get at the .columns_plus_names collection + # that is already (or will be?) generated for the SELECT + # rather than calling twice? + recur_cols = [ + # TODO: proxy_name is not technically safe, + # see test_cte-> + # test_with_recursive_no_name_currently_buggy. not + # clear what should be done with such a case + fallback_label_name or proxy_name + for ( + _, + proxy_name, + fallback_label_name, + c, + repeated, + ) in (col_source._generate_columns_plus_names(True)) + if not repeated + ] + + text += "(%s)" % ( + ", ".join( + self.preparer.format_label_name( + ident, anon_map=self.anon_map + ) + for ident in recur_cols + ) + ) + + assert kwargs.get("subquery", False) is False + + if not self.stack: + # toplevel, this is a stringify of the + # cte directly. just compile the inner + # the way alias() does. + return cte.element._compiler_dispatch( + self, asfrom=asfrom, **kwargs + ) + else: + prefixes = self._generate_prefixes( + cte, cte._prefixes, **kwargs + ) + inner = cte.element._compiler_dispatch( + self, asfrom=True, **kwargs + ) + + text += " AS %s\n(%s)" % (prefixes, inner) + + if cte._suffixes: + text += " " + self._generate_prefixes( + cte, cte._suffixes, **kwargs + ) + + self_ctes[cte] = text + + if asfrom: + if from_linter: + from_linter.froms[cte._de_clone()] = cte_name + + if not is_new_cte and embedded_in_current_named_cte: + return self.preparer.format_alias(cte, cte_name) + + if cte_pre_alias_name: + text = self.preparer.format_alias(cte, cte_pre_alias_name) + if self.preparer._requires_quotes(cte_name): + cte_name = self.preparer.quote(cte_name) + text += self.get_render_as_alias_suffix(cte_name) + return text + else: + return self.preparer.format_alias(cte, cte_name) + + return None + + def visit_table_valued_alias(self, element, **kw): + if element.joins_implicitly: + kw["from_linter"] = None + if element._is_lateral: + return self.visit_lateral(element, **kw) + else: + return self.visit_alias(element, **kw) + + def visit_table_valued_column(self, element, **kw): + return self.visit_column(element, **kw) + + def visit_alias( + self, + alias, + asfrom=False, + ashint=False, + iscrud=False, + fromhints=None, + subquery=False, + lateral=False, + enclosing_alias=None, + from_linter=None, + **kwargs, + ): + if lateral: + if "enclosing_lateral" not in kwargs: + # if lateral is set and enclosing_lateral is not + # present, we assume we are being called directly + # from visit_lateral() and we need to set enclosing_lateral. + assert alias._is_lateral + kwargs["enclosing_lateral"] = alias + + # for lateral objects, we track a second from_linter that is... + # lateral! to the level above us. + if ( + from_linter + and "lateral_from_linter" not in kwargs + and "enclosing_lateral" in kwargs + ): + kwargs["lateral_from_linter"] = from_linter + + if enclosing_alias is not None and enclosing_alias.element is alias: + inner = alias.element._compiler_dispatch( + self, + asfrom=asfrom, + ashint=ashint, + iscrud=iscrud, + fromhints=fromhints, + lateral=lateral, + enclosing_alias=alias, + **kwargs, + ) + if subquery and (asfrom or lateral): + inner = "(%s)" % (inner,) + return inner + else: + kwargs["enclosing_alias"] = alias + + if asfrom or ashint: + if isinstance(alias.name, elements._truncated_label): + alias_name = self._truncated_identifier("alias", alias.name) + else: + alias_name = alias.name + + if ashint: + return self.preparer.format_alias(alias, alias_name) + elif asfrom: + if from_linter: + from_linter.froms[alias._de_clone()] = alias_name + + inner = alias.element._compiler_dispatch( + self, asfrom=True, lateral=lateral, **kwargs + ) + if subquery: + inner = "(%s)" % (inner,) + + ret = inner + self.get_render_as_alias_suffix( + self.preparer.format_alias(alias, alias_name) + ) + + if alias._supports_derived_columns and alias._render_derived: + ret += "(%s)" % ( + ", ".join( + "%s%s" + % ( + self.preparer.quote(col.name), + ( + " %s" + % self.dialect.type_compiler_instance.process( + col.type, **kwargs + ) + if alias._render_derived_w_types + else "" + ), + ) + for col in alias.c + ) + ) + + if fromhints and alias in fromhints: + ret = self.format_from_hint_text( + ret, alias, fromhints[alias], iscrud + ) + + return ret + else: + # note we cancel the "subquery" flag here as well + return alias.element._compiler_dispatch( + self, lateral=lateral, **kwargs + ) + + def visit_subquery(self, subquery, **kw): + kw["subquery"] = True + return self.visit_alias(subquery, **kw) + + def visit_lateral(self, lateral_, **kw): + kw["lateral"] = True + return "LATERAL %s" % self.visit_alias(lateral_, **kw) + + def visit_tablesample(self, tablesample, asfrom=False, **kw): + text = "%s TABLESAMPLE %s" % ( + self.visit_alias(tablesample, asfrom=True, **kw), + tablesample._get_method()._compiler_dispatch(self, **kw), + ) + + if tablesample.seed is not None: + text += " REPEATABLE (%s)" % ( + tablesample.seed._compiler_dispatch(self, **kw) + ) + + return text + + def _render_values(self, element, **kw): + kw.setdefault("literal_binds", element.literal_binds) + tuples = ", ".join( + self.process( + elements.Tuple( + types=element._column_types, *elem + ).self_group(), + **kw, + ) + for chunk in element._data + for elem in chunk + ) + return f"VALUES {tuples}" + + def visit_values(self, element, asfrom=False, from_linter=None, **kw): + v = self._render_values(element, **kw) + + if element._unnamed: + name = None + elif isinstance(element.name, elements._truncated_label): + name = self._truncated_identifier("values", element.name) + else: + name = element.name + + if element._is_lateral: + lateral = "LATERAL " + else: + lateral = "" + + if asfrom: + if from_linter: + from_linter.froms[element._de_clone()] = ( + name if name is not None else "(unnamed VALUES element)" + ) + + if name: + kw["include_table"] = False + v = "%s(%s)%s (%s)" % ( + lateral, + v, + self.get_render_as_alias_suffix(self.preparer.quote(name)), + ( + ", ".join( + c._compiler_dispatch(self, **kw) + for c in element.columns + ) + ), + ) + else: + v = "%s(%s)" % (lateral, v) + return v + + def visit_scalar_values(self, element, **kw): + return f"({self._render_values(element, **kw)})" + + def get_render_as_alias_suffix(self, alias_name_text): + return " AS " + alias_name_text + + def _add_to_result_map( + self, + keyname: str, + name: str, + objects: Tuple[Any, ...], + type_: TypeEngine[Any], + ) -> None: + + # note objects must be non-empty for cursor.py to handle the + # collection properly + assert objects + + if keyname is None or keyname == "*": + self._ordered_columns = False + self._ad_hoc_textual = True + if type_._is_tuple_type: + raise exc.CompileError( + "Most backends don't support SELECTing " + "from a tuple() object. If this is an ORM query, " + "consider using the Bundle object." + ) + self._result_columns.append( + ResultColumnsEntry(keyname, name, objects, type_) + ) + + def _label_returning_column( + self, stmt, column, populate_result_map, column_clause_args=None, **kw + ): + """Render a column with necessary labels inside of a RETURNING clause. + + This method is provided for individual dialects in place of calling + the _label_select_column method directly, so that the two use cases + of RETURNING vs. SELECT can be disambiguated going forward. + + .. versionadded:: 1.4.21 + + """ + return self._label_select_column( + None, + column, + populate_result_map, + False, + {} if column_clause_args is None else column_clause_args, + **kw, + ) + + def _label_select_column( + self, + select, + column, + populate_result_map, + asfrom, + column_clause_args, + name=None, + proxy_name=None, + fallback_label_name=None, + within_columns_clause=True, + column_is_repeated=False, + need_column_expressions=False, + include_table=True, + ): + """produce labeled columns present in a select().""" + impl = column.type.dialect_impl(self.dialect) + + if impl._has_column_expression and ( + need_column_expressions or populate_result_map + ): + col_expr = impl.column_expression(column) + else: + col_expr = column + + if populate_result_map: + # pass an "add_to_result_map" callable into the compilation + # of embedded columns. this collects information about the + # column as it will be fetched in the result and is coordinated + # with cursor.description when the query is executed. + add_to_result_map = self._add_to_result_map + + # if the SELECT statement told us this column is a repeat, + # wrap the callable with one that prevents the addition of the + # targets + if column_is_repeated: + _add_to_result_map = add_to_result_map + + def add_to_result_map(keyname, name, objects, type_): + _add_to_result_map(keyname, name, (keyname,), type_) + + # if we redefined col_expr for type expressions, wrap the + # callable with one that adds the original column to the targets + elif col_expr is not column: + _add_to_result_map = add_to_result_map + + def add_to_result_map(keyname, name, objects, type_): + _add_to_result_map( + keyname, name, (column,) + objects, type_ + ) + + else: + add_to_result_map = None + + # this method is used by some of the dialects for RETURNING, + # which has different inputs. _label_returning_column was added + # as the better target for this now however for 1.4 we will keep + # _label_select_column directly compatible with this use case. + # these assertions right now set up the current expected inputs + assert within_columns_clause, ( + "_label_select_column is only relevant within " + "the columns clause of a SELECT or RETURNING" + ) + if isinstance(column, elements.Label): + if col_expr is not column: + result_expr = _CompileLabel( + col_expr, column.name, alt_names=(column.element,) + ) + else: + result_expr = col_expr + + elif name: + # here, _columns_plus_names has determined there's an explicit + # label name we need to use. this is the default for + # tablenames_plus_columnnames as well as when columns are being + # deduplicated on name + + assert ( + proxy_name is not None + ), "proxy_name is required if 'name' is passed" + + result_expr = _CompileLabel( + col_expr, + name, + alt_names=( + proxy_name, + # this is a hack to allow legacy result column lookups + # to work as they did before; this goes away in 2.0. + # TODO: this only seems to be tested indirectly + # via test/orm/test_deprecations.py. should be a + # resultset test for this + column._tq_label, + ), + ) + else: + # determine here whether this column should be rendered in + # a labelled context or not, as we were given no required label + # name from the caller. Here we apply heuristics based on the kind + # of SQL expression involved. + + if col_expr is not column: + # type-specific expression wrapping the given column, + # so we render a label + render_with_label = True + elif isinstance(column, elements.ColumnClause): + # table-bound column, we render its name as a label if we are + # inside of a subquery only + render_with_label = ( + asfrom + and not column.is_literal + and column.table is not None + ) + elif isinstance(column, elements.TextClause): + render_with_label = False + elif isinstance(column, elements.UnaryExpression): + render_with_label = column.wraps_column_expression or asfrom + elif ( + # general class of expressions that don't have a SQL-column + # addressible name. includes scalar selects, bind parameters, + # SQL functions, others + not isinstance(column, elements.NamedColumn) + # deeper check that indicates there's no natural "name" to + # this element, which accommodates for custom SQL constructs + # that might have a ".name" attribute (but aren't SQL + # functions) but are not implementing this more recently added + # base class. in theory the "NamedColumn" check should be + # enough, however here we seek to maintain legacy behaviors + # as well. + and column._non_anon_label is None + ): + render_with_label = True + else: + render_with_label = False + + if render_with_label: + if not fallback_label_name: + # used by the RETURNING case right now. we generate it + # here as 3rd party dialects may be referring to + # _label_select_column method directly instead of the + # just-added _label_returning_column method + assert not column_is_repeated + fallback_label_name = column._anon_name_label + + fallback_label_name = ( + elements._truncated_label(fallback_label_name) + if not isinstance( + fallback_label_name, elements._truncated_label + ) + else fallback_label_name + ) + + result_expr = _CompileLabel( + col_expr, fallback_label_name, alt_names=(proxy_name,) + ) + else: + result_expr = col_expr + + column_clause_args.update( + within_columns_clause=within_columns_clause, + add_to_result_map=add_to_result_map, + include_table=include_table, + ) + return result_expr._compiler_dispatch(self, **column_clause_args) + + def format_from_hint_text(self, sqltext, table, hint, iscrud): + hinttext = self.get_from_hint_text(table, hint) + if hinttext: + sqltext += " " + hinttext + return sqltext + + def get_select_hint_text(self, byfroms): + return None + + def get_from_hint_text( + self, table: FromClause, text: Optional[str] + ) -> Optional[str]: + return None + + def get_crud_hint_text(self, table, text): + return None + + def get_statement_hint_text(self, hint_texts): + return " ".join(hint_texts) + + _default_stack_entry: _CompilerStackEntry + + if not typing.TYPE_CHECKING: + _default_stack_entry = util.immutabledict( + [("correlate_froms", frozenset()), ("asfrom_froms", frozenset())] + ) + + def _display_froms_for_select( + self, select_stmt, asfrom, lateral=False, **kw + ): + # utility method to help external dialects + # get the correct from list for a select. + # specifically the oracle dialect needs this feature + # right now. + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + compile_state = select_stmt._compile_state_factory(select_stmt, self) + + correlate_froms = entry["correlate_froms"] + asfrom_froms = entry["asfrom_froms"] + + if asfrom and not lateral: + froms = compile_state._get_display_froms( + explicit_correlate_froms=correlate_froms.difference( + asfrom_froms + ), + implicit_correlate_froms=(), + ) + else: + froms = compile_state._get_display_froms( + explicit_correlate_froms=correlate_froms, + implicit_correlate_froms=asfrom_froms, + ) + return froms + + translate_select_structure: Any = None + """if not ``None``, should be a callable which accepts ``(select_stmt, + **kw)`` and returns a select object. this is used for structural changes + mostly to accommodate for LIMIT/OFFSET schemes + + """ + + def visit_select( + self, + select_stmt, + asfrom=False, + insert_into=False, + fromhints=None, + compound_index=None, + select_wraps_for=None, + lateral=False, + from_linter=None, + **kwargs, + ): + assert select_wraps_for is None, ( + "SQLAlchemy 1.4 requires use of " + "the translate_select_structure hook for structural " + "translations of SELECT objects" + ) + + # initial setup of SELECT. the compile_state_factory may now + # be creating a totally different SELECT from the one that was + # passed in. for ORM use this will convert from an ORM-state + # SELECT to a regular "Core" SELECT. other composed operations + # such as computation of joins will be performed. + + kwargs["within_columns_clause"] = False + + compile_state = select_stmt._compile_state_factory( + select_stmt, self, **kwargs + ) + kwargs["ambiguous_table_name_map"] = ( + compile_state._ambiguous_table_name_map + ) + + select_stmt = compile_state.statement + + toplevel = not self.stack + + if toplevel and not self.compile_state: + self.compile_state = compile_state + + is_embedded_select = compound_index is not None or insert_into + + # translate step for Oracle, SQL Server which often need to + # restructure the SELECT to allow for LIMIT/OFFSET and possibly + # other conditions + if self.translate_select_structure: + new_select_stmt = self.translate_select_structure( + select_stmt, asfrom=asfrom, **kwargs + ) + + # if SELECT was restructured, maintain a link to the originals + # and assemble a new compile state + if new_select_stmt is not select_stmt: + compile_state_wraps_for = compile_state + select_wraps_for = select_stmt + select_stmt = new_select_stmt + + compile_state = select_stmt._compile_state_factory( + select_stmt, self, **kwargs + ) + select_stmt = compile_state.statement + + entry = self._default_stack_entry if toplevel else self.stack[-1] + + populate_result_map = need_column_expressions = ( + toplevel + or entry.get("need_result_map_for_compound", False) + or entry.get("need_result_map_for_nested", False) + ) + + # indicates there is a CompoundSelect in play and we are not the + # first select + if compound_index: + populate_result_map = False + + # this was first proposed as part of #3372; however, it is not + # reached in current tests and could possibly be an assertion + # instead. + if not populate_result_map and "add_to_result_map" in kwargs: + del kwargs["add_to_result_map"] + + froms = self._setup_select_stack( + select_stmt, compile_state, entry, asfrom, lateral, compound_index + ) + + column_clause_args = kwargs.copy() + column_clause_args.update( + {"within_label_clause": False, "within_columns_clause": False} + ) + + text = "SELECT " # we're off to a good start ! + + if select_stmt._hints: + hint_text, byfrom = self._setup_select_hints(select_stmt) + if hint_text: + text += hint_text + " " + else: + byfrom = None + + if select_stmt._independent_ctes: + self._dispatch_independent_ctes(select_stmt, kwargs) + + if select_stmt._prefixes: + text += self._generate_prefixes( + select_stmt, select_stmt._prefixes, **kwargs + ) + + text += self.get_select_precolumns(select_stmt, **kwargs) + # the actual list of columns to print in the SELECT column list. + inner_columns = [ + c + for c in [ + self._label_select_column( + select_stmt, + column, + populate_result_map, + asfrom, + column_clause_args, + name=name, + proxy_name=proxy_name, + fallback_label_name=fallback_label_name, + column_is_repeated=repeated, + need_column_expressions=need_column_expressions, + ) + for ( + name, + proxy_name, + fallback_label_name, + column, + repeated, + ) in compile_state.columns_plus_names + ] + if c is not None + ] + + if populate_result_map and select_wraps_for is not None: + # if this select was generated from translate_select, + # rewrite the targeted columns in the result map + + translate = dict( + zip( + [ + name + for ( + key, + proxy_name, + fallback_label_name, + name, + repeated, + ) in compile_state.columns_plus_names + ], + [ + name + for ( + key, + proxy_name, + fallback_label_name, + name, + repeated, + ) in compile_state_wraps_for.columns_plus_names + ], + ) + ) + + self._result_columns = [ + ResultColumnsEntry( + key, name, tuple(translate.get(o, o) for o in obj), type_ + ) + for key, name, obj, type_ in self._result_columns + ] + + text = self._compose_select_body( + text, + select_stmt, + compile_state, + inner_columns, + froms, + byfrom, + toplevel, + kwargs, + ) + + if select_stmt._statement_hints: + per_dialect = [ + ht + for (dialect_name, ht) in select_stmt._statement_hints + if dialect_name in ("*", self.dialect.name) + ] + if per_dialect: + text += " " + self.get_statement_hint_text(per_dialect) + + # In compound query, CTEs are shared at the compound level + if self.ctes and (not is_embedded_select or toplevel): + nesting_level = len(self.stack) if not toplevel else None + text = self._render_cte_clause(nesting_level=nesting_level) + text + + if select_stmt._suffixes: + text += " " + self._generate_prefixes( + select_stmt, select_stmt._suffixes, **kwargs + ) + + self.stack.pop(-1) + + return text + + def _setup_select_hints( + self, select: Select[Any] + ) -> Tuple[str, _FromHintsType]: + byfrom = { + from_: hinttext + % {"name": from_._compiler_dispatch(self, ashint=True)} + for (from_, dialect), hinttext in select._hints.items() + if dialect in ("*", self.dialect.name) + } + hint_text = self.get_select_hint_text(byfrom) + return hint_text, byfrom + + def _setup_select_stack( + self, select, compile_state, entry, asfrom, lateral, compound_index + ): + correlate_froms = entry["correlate_froms"] + asfrom_froms = entry["asfrom_froms"] + + if compound_index == 0: + entry["select_0"] = select + elif compound_index: + select_0 = entry["select_0"] + numcols = len(select_0._all_selected_columns) + + if len(compile_state.columns_plus_names) != numcols: + raise exc.CompileError( + "All selectables passed to " + "CompoundSelect must have identical numbers of " + "columns; select #%d has %d columns, select " + "#%d has %d" + % ( + 1, + numcols, + compound_index + 1, + len(select._all_selected_columns), + ) + ) + + if asfrom and not lateral: + froms = compile_state._get_display_froms( + explicit_correlate_froms=correlate_froms.difference( + asfrom_froms + ), + implicit_correlate_froms=(), + ) + else: + froms = compile_state._get_display_froms( + explicit_correlate_froms=correlate_froms, + implicit_correlate_froms=asfrom_froms, + ) + + new_correlate_froms = set(_from_objects(*froms)) + all_correlate_froms = new_correlate_froms.union(correlate_froms) + + new_entry: _CompilerStackEntry = { + "asfrom_froms": new_correlate_froms, + "correlate_froms": all_correlate_froms, + "selectable": select, + "compile_state": compile_state, + } + self.stack.append(new_entry) + + return froms + + def _compose_select_body( + self, + text, + select, + compile_state, + inner_columns, + froms, + byfrom, + toplevel, + kwargs, + ): + text += ", ".join(inner_columns) + + if self.linting & COLLECT_CARTESIAN_PRODUCTS: + from_linter = FromLinter({}, set()) + warn_linting = self.linting & WARN_LINTING + if toplevel: + self.from_linter = from_linter + else: + from_linter = None + warn_linting = False + + # adjust the whitespace for no inner columns, part of #9440, + # so that a no-col SELECT comes out as "SELECT WHERE..." or + # "SELECT FROM ...". + # while it would be better to have built the SELECT starting string + # without trailing whitespace first, then add whitespace only if inner + # cols were present, this breaks compatibility with various custom + # compilation schemes that are currently being tested. + if not inner_columns: + text = text.rstrip() + + if froms: + text += " \nFROM " + + if select._hints: + text += ", ".join( + [ + f._compiler_dispatch( + self, + asfrom=True, + fromhints=byfrom, + from_linter=from_linter, + **kwargs, + ) + for f in froms + ] + ) + else: + text += ", ".join( + [ + f._compiler_dispatch( + self, + asfrom=True, + from_linter=from_linter, + **kwargs, + ) + for f in froms + ] + ) + else: + text += self.default_from() + + if select._where_criteria: + t = self._generate_delimited_and_list( + select._where_criteria, from_linter=from_linter, **kwargs + ) + if t: + text += " \nWHERE " + t + + if warn_linting: + assert from_linter is not None + from_linter.warn() + + if select._group_by_clauses: + text += self.group_by_clause(select, **kwargs) + + if select._having_criteria: + t = self._generate_delimited_and_list( + select._having_criteria, **kwargs + ) + if t: + text += " \nHAVING " + t + + if select._order_by_clauses: + text += self.order_by_clause(select, **kwargs) + + if select._has_row_limiting_clause: + text += self._row_limit_clause(select, **kwargs) + + if select._for_update_arg is not None: + text += self.for_update_clause(select, **kwargs) + + return text + + def _generate_prefixes(self, stmt, prefixes, **kw): + clause = " ".join( + prefix._compiler_dispatch(self, **kw) + for prefix, dialect_name in prefixes + if dialect_name in (None, "*") or dialect_name == self.dialect.name + ) + if clause: + clause += " " + return clause + + def _render_cte_clause( + self, + nesting_level=None, + include_following_stack=False, + ): + """ + include_following_stack + Also render the nesting CTEs on the next stack. Useful for + SQL structures like UNION or INSERT that can wrap SELECT + statements containing nesting CTEs. + """ + if not self.ctes: + return "" + + ctes: MutableMapping[CTE, str] + + if nesting_level and nesting_level > 1: + ctes = util.OrderedDict() + for cte in list(self.ctes.keys()): + cte_level, cte_name, cte_opts = self.level_name_by_cte[ + cte._get_reference_cte() + ] + nesting = cte.nesting or cte_opts.nesting + is_rendered_level = cte_level == nesting_level or ( + include_following_stack and cte_level == nesting_level + 1 + ) + if not (nesting and is_rendered_level): + continue + + ctes[cte] = self.ctes[cte] + + else: + ctes = self.ctes + + if not ctes: + return "" + ctes_recursive = any([cte.recursive for cte in ctes]) + + cte_text = self.get_cte_preamble(ctes_recursive) + " " + cte_text += ", \n".join([txt for txt in ctes.values()]) + cte_text += "\n " + + if nesting_level and nesting_level > 1: + for cte in list(ctes.keys()): + cte_level, cte_name, cte_opts = self.level_name_by_cte[ + cte._get_reference_cte() + ] + del self.ctes[cte] + del self.ctes_by_level_name[(cte_level, cte_name)] + del self.level_name_by_cte[cte._get_reference_cte()] + + return cte_text + + def get_cte_preamble(self, recursive): + if recursive: + return "WITH RECURSIVE" + else: + return "WITH" + + def get_select_precolumns(self, select: Select[Any], **kw: Any) -> str: + """Called when building a ``SELECT`` statement, position is just + before column list. + + """ + if select._distinct_on: + util.warn_deprecated( + "DISTINCT ON is currently supported only by the PostgreSQL " + "dialect. Use of DISTINCT ON for other backends is currently " + "silently ignored, however this usage is deprecated, and will " + "raise CompileError in a future release for all backends " + "that do not support this syntax.", + version="1.4", + ) + return "DISTINCT " if select._distinct else "" + + def group_by_clause(self, select, **kw): + """allow dialects to customize how GROUP BY is rendered.""" + + group_by = self._generate_delimited_list( + select._group_by_clauses, OPERATORS[operators.comma_op], **kw + ) + if group_by: + return " GROUP BY " + group_by + else: + return "" + + def order_by_clause(self, select, **kw): + """allow dialects to customize how ORDER BY is rendered.""" + + order_by = self._generate_delimited_list( + select._order_by_clauses, OPERATORS[operators.comma_op], **kw + ) + + if order_by: + return " ORDER BY " + order_by + else: + return "" + + def for_update_clause(self, select, **kw): + return " FOR UPDATE" + + def returning_clause( + self, + stmt: UpdateBase, + returning_cols: Sequence[_ColumnsClauseElement], + *, + populate_result_map: bool, + **kw: Any, + ) -> str: + columns = [ + self._label_returning_column( + stmt, + column, + populate_result_map, + fallback_label_name=fallback_label_name, + column_is_repeated=repeated, + name=name, + proxy_name=proxy_name, + **kw, + ) + for ( + name, + proxy_name, + fallback_label_name, + column, + repeated, + ) in stmt._generate_columns_plus_names( + True, cols=base._select_iterables(returning_cols) + ) + ] + + return "RETURNING " + ", ".join(columns) + + def limit_clause(self, select, **kw): + text = "" + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += "\n LIMIT -1" + text += " OFFSET " + self.process(select._offset_clause, **kw) + return text + + def fetch_clause( + self, + select, + fetch_clause=None, + require_offset=False, + use_literal_execute_for_simple_int=False, + **kw, + ): + if fetch_clause is None: + fetch_clause = select._fetch_clause + fetch_clause_options = select._fetch_clause_options + else: + fetch_clause_options = {"percent": False, "with_ties": False} + + text = "" + + if select._offset_clause is not None: + offset_clause = select._offset_clause + if ( + use_literal_execute_for_simple_int + and select._simple_int_clause(offset_clause) + ): + offset_clause = offset_clause.render_literal_execute() + offset_str = self.process(offset_clause, **kw) + text += "\n OFFSET %s ROWS" % offset_str + elif require_offset: + text += "\n OFFSET 0 ROWS" + + if fetch_clause is not None: + if ( + use_literal_execute_for_simple_int + and select._simple_int_clause(fetch_clause) + ): + fetch_clause = fetch_clause.render_literal_execute() + text += "\n FETCH FIRST %s%s ROWS %s" % ( + self.process(fetch_clause, **kw), + " PERCENT" if fetch_clause_options["percent"] else "", + "WITH TIES" if fetch_clause_options["with_ties"] else "ONLY", + ) + return text + + def visit_table( + self, + table, + asfrom=False, + iscrud=False, + ashint=False, + fromhints=None, + use_schema=True, + from_linter=None, + ambiguous_table_name_map=None, + enclosing_alias=None, + **kwargs, + ): + if from_linter: + from_linter.froms[table] = table.fullname + + if asfrom or ashint: + effective_schema = self.preparer.schema_for_object(table) + + if use_schema and effective_schema: + ret = ( + self.preparer.quote_schema(effective_schema) + + "." + + self.preparer.quote(table.name) + ) + else: + ret = self.preparer.quote(table.name) + + if ( + ( + enclosing_alias is None + or enclosing_alias.element is not table + ) + and not effective_schema + and ambiguous_table_name_map + and table.name in ambiguous_table_name_map + ): + anon_name = self._truncated_identifier( + "alias", ambiguous_table_name_map[table.name] + ) + + ret = ret + self.get_render_as_alias_suffix( + self.preparer.format_alias(None, anon_name) + ) + + if fromhints and table in fromhints: + ret = self.format_from_hint_text( + ret, table, fromhints[table], iscrud + ) + return ret + else: + return "" + + def visit_join(self, join, asfrom=False, from_linter=None, **kwargs): + if from_linter: + from_linter.edges.update( + itertools.product( + _de_clone(join.left._from_objects), + _de_clone(join.right._from_objects), + ) + ) + + if join.full: + join_type = " FULL OUTER JOIN " + elif join.isouter: + join_type = " LEFT OUTER JOIN " + else: + join_type = " JOIN " + return ( + join.left._compiler_dispatch( + self, asfrom=True, from_linter=from_linter, **kwargs + ) + + join_type + + join.right._compiler_dispatch( + self, asfrom=True, from_linter=from_linter, **kwargs + ) + + " ON " + # TODO: likely need asfrom=True here? + + join.onclause._compiler_dispatch( + self, from_linter=from_linter, **kwargs + ) + ) + + def _setup_crud_hints(self, stmt, table_text): + dialect_hints = { + table: hint_text + for (table, dialect), hint_text in stmt._hints.items() + if dialect in ("*", self.dialect.name) + } + if stmt.table in dialect_hints: + table_text = self.format_from_hint_text( + table_text, stmt.table, dialect_hints[stmt.table], True + ) + return dialect_hints, table_text + + # within the realm of "insertmanyvalues sentinel columns", + # these lookups match different kinds of Column() configurations + # to specific backend capabilities. they are broken into two + # lookups, one for autoincrement columns and the other for non + # autoincrement columns + _sentinel_col_non_autoinc_lookup = util.immutabledict( + { + _SentinelDefaultCharacterization.CLIENTSIDE: ( + InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT + ), + _SentinelDefaultCharacterization.SENTINEL_DEFAULT: ( + InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT + ), + _SentinelDefaultCharacterization.NONE: ( + InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT + ), + _SentinelDefaultCharacterization.IDENTITY: ( + InsertmanyvaluesSentinelOpts.IDENTITY + ), + _SentinelDefaultCharacterization.SEQUENCE: ( + InsertmanyvaluesSentinelOpts.SEQUENCE + ), + } + ) + _sentinel_col_autoinc_lookup = _sentinel_col_non_autoinc_lookup.union( + { + _SentinelDefaultCharacterization.NONE: ( + InsertmanyvaluesSentinelOpts.AUTOINCREMENT + ), + } + ) + + def _get_sentinel_column_for_table( + self, table: Table + ) -> Optional[Sequence[Column[Any]]]: + """given a :class:`.Table`, return a usable sentinel column or + columns for this dialect if any. + + Return None if no sentinel columns could be identified, or raise an + error if a column was marked as a sentinel explicitly but isn't + compatible with this dialect. + + """ + + sentinel_opts = self.dialect.insertmanyvalues_implicit_sentinel + sentinel_characteristics = table._sentinel_column_characteristics + + sent_cols = sentinel_characteristics.columns + + if sent_cols is None: + return None + + if sentinel_characteristics.is_autoinc: + bitmask = self._sentinel_col_autoinc_lookup.get( + sentinel_characteristics.default_characterization, 0 + ) + else: + bitmask = self._sentinel_col_non_autoinc_lookup.get( + sentinel_characteristics.default_characterization, 0 + ) + + if sentinel_opts & bitmask: + return sent_cols + + if sentinel_characteristics.is_explicit: + # a column was explicitly marked as insert_sentinel=True, + # however it is not compatible with this dialect. they should + # not indicate this column as a sentinel if they need to include + # this dialect. + + # TODO: do we want non-primary key explicit sentinel cols + # that can gracefully degrade for some backends? + # insert_sentinel="degrade" perhaps. not for the initial release. + # I am hoping people are generally not dealing with this sentinel + # business at all. + + # if is_explicit is True, there will be only one sentinel column. + + raise exc.InvalidRequestError( + f"Column {sent_cols[0]} can't be explicitly " + "marked as a sentinel column when using the " + f"{self.dialect.name} dialect, as the " + "particular type of default generation on this column is " + "not currently compatible with this dialect's specific " + f"INSERT..RETURNING syntax which can receive the " + "server-generated value in " + "a deterministic way. To remove this error, remove " + "insert_sentinel=True from primary key autoincrement " + "columns; these columns are automatically used as " + "sentinels for supported dialects in any case." + ) + + return None + + def _deliver_insertmanyvalues_batches( + self, + statement: str, + parameters: _DBAPIMultiExecuteParams, + compiled_parameters: List[_MutableCoreSingleExecuteParams], + generic_setinputsizes: Optional[_GenericSetInputSizesType], + batch_size: int, + sort_by_parameter_order: bool, + schema_translate_map: Optional[SchemaTranslateMapType], + ) -> Iterator[_InsertManyValuesBatch]: + imv = self._insertmanyvalues + assert imv is not None + + if not imv.sentinel_param_keys: + _sentinel_from_params = None + else: + _sentinel_from_params = operator.itemgetter( + *imv.sentinel_param_keys + ) + + lenparams = len(parameters) + if imv.is_default_expr and not self.dialect.supports_default_metavalue: + # backend doesn't support + # INSERT INTO table (pk_col) VALUES (DEFAULT), (DEFAULT), ... + # at the moment this is basically SQL Server due to + # not being able to use DEFAULT for identity column + # just yield out that many single statements! still + # faster than a whole connection.execute() call ;) + # + # note we still are taking advantage of the fact that we know + # we are using RETURNING. The generalized approach of fetching + # cursor.lastrowid etc. still goes through the more heavyweight + # "ExecutionContext per statement" system as it isn't usable + # as a generic "RETURNING" approach + use_row_at_a_time = True + downgraded = False + elif not self.dialect.supports_multivalues_insert or ( + sort_by_parameter_order + and self._result_columns + and (imv.sentinel_columns is None or imv.includes_upsert_behaviors) + ): + # deterministic order was requested and the compiler could + # not organize sentinel columns for this dialect/statement. + # use row at a time + use_row_at_a_time = True + downgraded = True + else: + use_row_at_a_time = False + downgraded = False + + if use_row_at_a_time: + for batchnum, (param, compiled_param) in enumerate( + cast( + "Sequence[Tuple[_DBAPISingleExecuteParams, _MutableCoreSingleExecuteParams]]", # noqa: E501 + zip(parameters, compiled_parameters), + ), + 1, + ): + yield _InsertManyValuesBatch( + statement, + param, + generic_setinputsizes, + [param], + ( + [_sentinel_from_params(compiled_param)] + if _sentinel_from_params + else [] + ), + 1, + batchnum, + lenparams, + sort_by_parameter_order, + downgraded, + ) + return + + if schema_translate_map: + rst = functools.partial( + self.preparer._render_schema_translates, + schema_translate_map=schema_translate_map, + ) + else: + rst = None + + imv_single_values_expr = imv.single_values_expr + if rst: + imv_single_values_expr = rst(imv_single_values_expr) + + executemany_values = f"({imv_single_values_expr})" + statement = statement.replace(executemany_values, "__EXECMANY_TOKEN__") + + # Use optional insertmanyvalues_max_parameters + # to further shrink the batch size so that there are no more than + # insertmanyvalues_max_parameters params. + # Currently used by SQL Server, which limits statements to 2100 bound + # parameters (actually 2099). + max_params = self.dialect.insertmanyvalues_max_parameters + if max_params: + total_num_of_params = len(self.bind_names) + num_params_per_batch = len(imv.insert_crud_params) + num_params_outside_of_batch = ( + total_num_of_params - num_params_per_batch + ) + batch_size = min( + batch_size, + ( + (max_params - num_params_outside_of_batch) + // num_params_per_batch + ), + ) + + batches = cast("List[Sequence[Any]]", list(parameters)) + compiled_batches = cast( + "List[Sequence[Any]]", list(compiled_parameters) + ) + + processed_setinputsizes: Optional[_GenericSetInputSizesType] = None + batchnum = 1 + total_batches = lenparams // batch_size + ( + 1 if lenparams % batch_size else 0 + ) + + insert_crud_params = imv.insert_crud_params + assert insert_crud_params is not None + + if rst: + insert_crud_params = [ + (col, key, rst(expr), st) + for col, key, expr, st in insert_crud_params + ] + + escaped_bind_names: Mapping[str, str] + expand_pos_lower_index = expand_pos_upper_index = 0 + + if not self.positional: + if self.escaped_bind_names: + escaped_bind_names = self.escaped_bind_names + else: + escaped_bind_names = {} + + all_keys = set(parameters[0]) + + def apply_placeholders(keys, formatted): + for key in keys: + key = escaped_bind_names.get(key, key) + formatted = formatted.replace( + self.bindtemplate % {"name": key}, + self.bindtemplate + % {"name": f"{key}__EXECMANY_INDEX__"}, + ) + return formatted + + if imv.embed_values_counter: + imv_values_counter = ", _IMV_VALUES_COUNTER" + else: + imv_values_counter = "" + formatted_values_clause = f"""({', '.join( + apply_placeholders(bind_keys, formatted) + for _, _, formatted, bind_keys in insert_crud_params + )}{imv_values_counter})""" + + keys_to_replace = all_keys.intersection( + escaped_bind_names.get(key, key) + for _, _, _, bind_keys in insert_crud_params + for key in bind_keys + ) + base_parameters = { + key: parameters[0][key] + for key in all_keys.difference(keys_to_replace) + } + executemany_values_w_comma = "" + else: + formatted_values_clause = "" + keys_to_replace = set() + base_parameters = {} + + if imv.embed_values_counter: + executemany_values_w_comma = ( + f"({imv_single_values_expr}, _IMV_VALUES_COUNTER), " + ) + else: + executemany_values_w_comma = f"({imv_single_values_expr}), " + + all_names_we_will_expand: Set[str] = set() + for elem in imv.insert_crud_params: + all_names_we_will_expand.update(elem[3]) + + # get the start and end position in a particular list + # of parameters where we will be doing the "expanding". + # statements can have params on either side or both sides, + # given RETURNING and CTEs + if all_names_we_will_expand: + positiontup = self.positiontup + assert positiontup is not None + + all_expand_positions = { + idx + for idx, name in enumerate(positiontup) + if name in all_names_we_will_expand + } + expand_pos_lower_index = min(all_expand_positions) + expand_pos_upper_index = max(all_expand_positions) + 1 + assert ( + len(all_expand_positions) + == expand_pos_upper_index - expand_pos_lower_index + ) + + if self._numeric_binds: + escaped = re.escape(self._numeric_binds_identifier_char) + executemany_values_w_comma = re.sub( + rf"{escaped}\d+", "%s", executemany_values_w_comma + ) + + while batches: + batch = batches[0:batch_size] + compiled_batch = compiled_batches[0:batch_size] + + batches[0:batch_size] = [] + compiled_batches[0:batch_size] = [] + + if batches: + current_batch_size = batch_size + else: + current_batch_size = len(batch) + + if generic_setinputsizes: + # if setinputsizes is present, expand this collection to + # suit the batch length as well + # currently this will be mssql+pyodbc for internal dialects + processed_setinputsizes = [ + (new_key, len_, typ) + for new_key, len_, typ in ( + (f"{key}_{index}", len_, typ) + for index in range(current_batch_size) + for key, len_, typ in generic_setinputsizes + ) + ] + + replaced_parameters: Any + if self.positional: + num_ins_params = imv.num_positional_params_counted + + batch_iterator: Iterable[Sequence[Any]] + extra_params_left: Sequence[Any] + extra_params_right: Sequence[Any] + + if num_ins_params == len(batch[0]): + extra_params_left = extra_params_right = () + batch_iterator = batch + else: + extra_params_left = batch[0][:expand_pos_lower_index] + extra_params_right = batch[0][expand_pos_upper_index:] + batch_iterator = ( + b[expand_pos_lower_index:expand_pos_upper_index] + for b in batch + ) + + if imv.embed_values_counter: + expanded_values_string = ( + "".join( + executemany_values_w_comma.replace( + "_IMV_VALUES_COUNTER", str(i) + ) + for i, _ in enumerate(batch) + ) + )[:-2] + else: + expanded_values_string = ( + (executemany_values_w_comma * current_batch_size) + )[:-2] + + if self._numeric_binds and num_ins_params > 0: + # numeric will always number the parameters inside of + # VALUES (and thus order self.positiontup) to be higher + # than non-VALUES parameters, no matter where in the + # statement those non-VALUES parameters appear (this is + # ensured in _process_numeric by numbering first all + # params that are not in _values_bindparam) + # therefore all extra params are always + # on the left side and numbered lower than the VALUES + # parameters + assert not extra_params_right + + start = expand_pos_lower_index + 1 + end = num_ins_params * (current_batch_size) + start + + # need to format here, since statement may contain + # unescaped %, while values_string contains just (%s, %s) + positions = tuple( + f"{self._numeric_binds_identifier_char}{i}" + for i in range(start, end) + ) + expanded_values_string = expanded_values_string % positions + + replaced_statement = statement.replace( + "__EXECMANY_TOKEN__", expanded_values_string + ) + + replaced_parameters = tuple( + itertools.chain.from_iterable(batch_iterator) + ) + + replaced_parameters = ( + extra_params_left + + replaced_parameters + + extra_params_right + ) + + else: + replaced_values_clauses = [] + replaced_parameters = base_parameters.copy() + + for i, param in enumerate(batch): + fmv = formatted_values_clause.replace( + "EXECMANY_INDEX__", str(i) + ) + if imv.embed_values_counter: + fmv = fmv.replace("_IMV_VALUES_COUNTER", str(i)) + + replaced_values_clauses.append(fmv) + replaced_parameters.update( + {f"{key}__{i}": param[key] for key in keys_to_replace} + ) + + replaced_statement = statement.replace( + "__EXECMANY_TOKEN__", + ", ".join(replaced_values_clauses), + ) + + yield _InsertManyValuesBatch( + replaced_statement, + replaced_parameters, + processed_setinputsizes, + batch, + ( + [_sentinel_from_params(cb) for cb in compiled_batch] + if _sentinel_from_params + else [] + ), + current_batch_size, + batchnum, + total_batches, + sort_by_parameter_order, + False, + ) + batchnum += 1 + + def visit_insert( + self, insert_stmt, visited_bindparam=None, visiting_cte=None, **kw + ): + compile_state = insert_stmt._compile_state_factory( + insert_stmt, self, **kw + ) + insert_stmt = compile_state.statement + + if visiting_cte is not None: + kw["visiting_cte"] = visiting_cte + toplevel = False + else: + toplevel = not self.stack + + if toplevel: + self.isinsert = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state + if not self.compile_state: + self.compile_state = compile_state + + self.stack.append( + { + "correlate_froms": set(), + "asfrom_froms": set(), + "selectable": insert_stmt, + } + ) + + counted_bindparam = 0 + + # reset any incoming "visited_bindparam" collection + visited_bindparam = None + + # for positional, insertmanyvalues needs to know how many + # bound parameters are in the VALUES sequence; there's no simple + # rule because default expressions etc. can have zero or more + # params inside them. After multiple attempts to figure this out, + # this very simplistic "count after" works and is + # likely the least amount of callcounts, though looks clumsy + if self.positional and visiting_cte is None: + # if we are inside a CTE, don't count parameters + # here since they wont be for insertmanyvalues. keep + # visited_bindparam at None so no counting happens. + # see #9173 + visited_bindparam = [] + + crud_params_struct = crud._get_crud_params( + self, + insert_stmt, + compile_state, + toplevel, + visited_bindparam=visited_bindparam, + **kw, + ) + + if self.positional and visited_bindparam is not None: + counted_bindparam = len(visited_bindparam) + if self._numeric_binds: + if self._values_bindparam is not None: + self._values_bindparam += visited_bindparam + else: + self._values_bindparam = visited_bindparam + + crud_params_single = crud_params_struct.single_params + + if ( + not crud_params_single + and not self.dialect.supports_default_values + and not self.dialect.supports_default_metavalue + and not self.dialect.supports_empty_insert + ): + raise exc.CompileError( + "The '%s' dialect with current database " + "version settings does not support empty " + "inserts." % self.dialect.name + ) + + if compile_state._has_multi_parameters: + if not self.dialect.supports_multivalues_insert: + raise exc.CompileError( + "The '%s' dialect with current database " + "version settings does not support " + "in-place multirow inserts." % self.dialect.name + ) + elif ( + self.implicit_returning or insert_stmt._returning + ) and insert_stmt._sort_by_parameter_order: + raise exc.CompileError( + "RETURNING cannot be determinstically sorted when " + "using an INSERT which includes multi-row values()." + ) + crud_params_single = crud_params_struct.single_params + else: + crud_params_single = crud_params_struct.single_params + + preparer = self.preparer + supports_default_values = self.dialect.supports_default_values + + text = "INSERT " + + if insert_stmt._prefixes: + text += self._generate_prefixes( + insert_stmt, insert_stmt._prefixes, **kw + ) + + text += "INTO " + table_text = preparer.format_table(insert_stmt.table) + + if insert_stmt._hints: + _, table_text = self._setup_crud_hints(insert_stmt, table_text) + + if insert_stmt._independent_ctes: + self._dispatch_independent_ctes(insert_stmt, kw) + + text += table_text + + if crud_params_single or not supports_default_values: + text += " (%s)" % ", ".join( + [expr for _, expr, _, _ in crud_params_single] + ) + + # look for insertmanyvalues attributes that would have been configured + # by crud.py as it scanned through the columns to be part of the + # INSERT + use_insertmanyvalues = crud_params_struct.use_insertmanyvalues + named_sentinel_params: Optional[Sequence[str]] = None + add_sentinel_cols = None + implicit_sentinel = False + + returning_cols = self.implicit_returning or insert_stmt._returning + if returning_cols: + add_sentinel_cols = crud_params_struct.use_sentinel_columns + if add_sentinel_cols is not None: + assert use_insertmanyvalues + + # search for the sentinel column explicitly present + # in the INSERT columns list, and additionally check that + # this column has a bound parameter name set up that's in the + # parameter list. If both of these cases are present, it means + # we will have a client side value for the sentinel in each + # parameter set. + + _params_by_col = { + col: param_names + for col, _, _, param_names in crud_params_single + } + named_sentinel_params = [] + for _add_sentinel_col in add_sentinel_cols: + if _add_sentinel_col not in _params_by_col: + named_sentinel_params = None + break + param_name = self._within_exec_param_key_getter( + _add_sentinel_col + ) + if param_name not in _params_by_col[_add_sentinel_col]: + named_sentinel_params = None + break + named_sentinel_params.append(param_name) + + if named_sentinel_params is None: + # if we are not going to have a client side value for + # the sentinel in the parameter set, that means it's + # an autoincrement, an IDENTITY, or a server-side SQL + # expression like nextval('seqname'). So this is + # an "implicit" sentinel; we will look for it in + # RETURNING + # only, and then sort on it. For this case on PG, + # SQL Server we have to use a special INSERT form + # that guarantees the server side function lines up with + # the entries in the VALUES. + if ( + self.dialect.insertmanyvalues_implicit_sentinel + & InsertmanyvaluesSentinelOpts.ANY_AUTOINCREMENT + ): + implicit_sentinel = True + else: + # here, we are not using a sentinel at all + # and we are likely the SQLite dialect. + # The first add_sentinel_col that we have should not + # be marked as "insert_sentinel=True". if it was, + # an error should have been raised in + # _get_sentinel_column_for_table. + assert not add_sentinel_cols[0]._insert_sentinel, ( + "sentinel selection rules should have prevented " + "us from getting here for this dialect" + ) + + # always put the sentinel columns last. even if they are + # in the returning list already, they will be there twice + # then. + returning_cols = list(returning_cols) + list(add_sentinel_cols) + + returning_clause = self.returning_clause( + insert_stmt, + returning_cols, + populate_result_map=toplevel, + ) + + if self.returning_precedes_values: + text += " " + returning_clause + + else: + returning_clause = None + + if insert_stmt.select is not None: + # placed here by crud.py + select_text = self.process( + self.stack[-1]["insert_from_select"], insert_into=True, **kw + ) + + if self.ctes and self.dialect.cte_follows_insert: + nesting_level = len(self.stack) if not toplevel else None + text += " %s%s" % ( + self._render_cte_clause( + nesting_level=nesting_level, + include_following_stack=True, + ), + select_text, + ) + else: + text += " %s" % select_text + elif not crud_params_single and supports_default_values: + text += " DEFAULT VALUES" + if use_insertmanyvalues: + self._insertmanyvalues = _InsertManyValues( + True, + self.dialect.default_metavalue_token, + cast( + "List[crud._CrudParamElementStr]", crud_params_single + ), + counted_bindparam, + sort_by_parameter_order=( + insert_stmt._sort_by_parameter_order + ), + includes_upsert_behaviors=( + insert_stmt._post_values_clause is not None + ), + sentinel_columns=add_sentinel_cols, + num_sentinel_columns=( + len(add_sentinel_cols) if add_sentinel_cols else 0 + ), + implicit_sentinel=implicit_sentinel, + ) + elif compile_state._has_multi_parameters: + text += " VALUES %s" % ( + ", ".join( + "(%s)" + % (", ".join(value for _, _, value, _ in crud_param_set)) + for crud_param_set in crud_params_struct.all_multi_params + ), + ) + else: + insert_single_values_expr = ", ".join( + [ + value + for _, _, value, _ in cast( + "List[crud._CrudParamElementStr]", + crud_params_single, + ) + ] + ) + + if use_insertmanyvalues: + if ( + implicit_sentinel + and ( + self.dialect.insertmanyvalues_implicit_sentinel + & InsertmanyvaluesSentinelOpts.USE_INSERT_FROM_SELECT + ) + # this is checking if we have + # INSERT INTO table (id) VALUES (DEFAULT). + and not (crud_params_struct.is_default_metavalue_only) + ): + # if we have a sentinel column that is server generated, + # then for selected backends render the VALUES list as a + # subquery. This is the orderable form supported by + # PostgreSQL and SQL Server. + embed_sentinel_value = True + + render_bind_casts = ( + self.dialect.insertmanyvalues_implicit_sentinel + & InsertmanyvaluesSentinelOpts.RENDER_SELECT_COL_CASTS + ) + + colnames = ", ".join( + f"p{i}" for i, _ in enumerate(crud_params_single) + ) + + if render_bind_casts: + # render casts for the SELECT list. For PG, we are + # already rendering bind casts in the parameter list, + # selectively for the more "tricky" types like ARRAY. + # however, even for the "easy" types, if the parameter + # is NULL for every entry, PG gives up and says + # "it must be TEXT", which fails for other easy types + # like ints. So we cast on this side too. + colnames_w_cast = ", ".join( + self.render_bind_cast( + col.type, + col.type._unwrapped_dialect_impl(self.dialect), + f"p{i}", + ) + for i, (col, *_) in enumerate(crud_params_single) + ) + else: + colnames_w_cast = colnames + + text += ( + f" SELECT {colnames_w_cast} FROM " + f"(VALUES ({insert_single_values_expr})) " + f"AS imp_sen({colnames}, sen_counter) " + "ORDER BY sen_counter" + ) + else: + # otherwise, if no sentinel or backend doesn't support + # orderable subquery form, use a plain VALUES list + embed_sentinel_value = False + text += f" VALUES ({insert_single_values_expr})" + + self._insertmanyvalues = _InsertManyValues( + is_default_expr=False, + single_values_expr=insert_single_values_expr, + insert_crud_params=cast( + "List[crud._CrudParamElementStr]", + crud_params_single, + ), + num_positional_params_counted=counted_bindparam, + sort_by_parameter_order=( + insert_stmt._sort_by_parameter_order + ), + includes_upsert_behaviors=( + insert_stmt._post_values_clause is not None + ), + sentinel_columns=add_sentinel_cols, + num_sentinel_columns=( + len(add_sentinel_cols) if add_sentinel_cols else 0 + ), + sentinel_param_keys=named_sentinel_params, + implicit_sentinel=implicit_sentinel, + embed_values_counter=embed_sentinel_value, + ) + + else: + text += f" VALUES ({insert_single_values_expr})" + + if insert_stmt._post_values_clause is not None: + post_values_clause = self.process( + insert_stmt._post_values_clause, **kw + ) + if post_values_clause: + text += " " + post_values_clause + + if returning_clause and not self.returning_precedes_values: + text += " " + returning_clause + + if self.ctes and not self.dialect.cte_follows_insert: + nesting_level = len(self.stack) if not toplevel else None + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + include_following_stack=True, + ) + + text + ) + + self.stack.pop(-1) + + return text + + def update_limit_clause(self, update_stmt): + """Provide a hook for MySQL to add LIMIT to the UPDATE""" + return None + + def delete_limit_clause(self, delete_stmt): + """Provide a hook for MySQL to add LIMIT to the DELETE""" + return None + + def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): + """Provide a hook to override the initial table clause + in an UPDATE statement. + + MySQL overrides this. + + """ + kw["asfrom"] = True + return from_table._compiler_dispatch(self, iscrud=True, **kw) + + def update_from_clause( + self, update_stmt, from_table, extra_froms, from_hints, **kw + ): + """Provide a hook to override the generation of an + UPDATE..FROM clause. + + MySQL and MSSQL override this. + + """ + raise NotImplementedError( + "This backend does not support multiple-table " + "criteria within UPDATE" + ) + + def visit_update( + self, + update_stmt: Update, + visiting_cte: Optional[CTE] = None, + **kw: Any, + ) -> str: + compile_state = update_stmt._compile_state_factory( # type: ignore[call-arg] # noqa: E501 + update_stmt, self, **kw # type: ignore[arg-type] + ) + if TYPE_CHECKING: + assert isinstance(compile_state, UpdateDMLState) + update_stmt = compile_state.statement # type: ignore[assignment] + + if visiting_cte is not None: + kw["visiting_cte"] = visiting_cte + toplevel = False + else: + toplevel = not self.stack + + if toplevel: + self.isupdate = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state + if not self.compile_state: + self.compile_state = compile_state + + if self.linting & COLLECT_CARTESIAN_PRODUCTS: + from_linter = FromLinter({}, set()) + warn_linting = self.linting & WARN_LINTING + if toplevel: + self.from_linter = from_linter + else: + from_linter = None + warn_linting = False + + extra_froms = compile_state._extra_froms + is_multitable = bool(extra_froms) + + if is_multitable: + # main table might be a JOIN + main_froms = set(_from_objects(update_stmt.table)) + render_extra_froms = [ + f for f in extra_froms if f not in main_froms + ] + correlate_froms = main_froms.union(extra_froms) + else: + render_extra_froms = [] + correlate_froms = {update_stmt.table} + + self.stack.append( + { + "correlate_froms": correlate_froms, + "asfrom_froms": correlate_froms, + "selectable": update_stmt, + } + ) + + text = "UPDATE " + + if update_stmt._prefixes: + text += self._generate_prefixes( + update_stmt, update_stmt._prefixes, **kw + ) + + table_text = self.update_tables_clause( + update_stmt, + update_stmt.table, + render_extra_froms, + from_linter=from_linter, + **kw, + ) + crud_params_struct = crud._get_crud_params( + self, update_stmt, compile_state, toplevel, **kw + ) + crud_params = crud_params_struct.single_params + + if update_stmt._hints: + dialect_hints, table_text = self._setup_crud_hints( + update_stmt, table_text + ) + else: + dialect_hints = None + + if update_stmt._independent_ctes: + self._dispatch_independent_ctes(update_stmt, kw) + + text += table_text + + text += " SET " + text += ", ".join( + expr + "=" + value + for _, expr, value, _ in cast( + "List[Tuple[Any, str, str, Any]]", crud_params + ) + ) + + if self.implicit_returning or update_stmt._returning: + if self.returning_precedes_values: + text += " " + self.returning_clause( + update_stmt, + self.implicit_returning or update_stmt._returning, + populate_result_map=toplevel, + ) + + if extra_froms: + extra_from_text = self.update_from_clause( + update_stmt, + update_stmt.table, + render_extra_froms, + dialect_hints, + from_linter=from_linter, + **kw, + ) + if extra_from_text: + text += " " + extra_from_text + + if update_stmt._where_criteria: + t = self._generate_delimited_and_list( + update_stmt._where_criteria, from_linter=from_linter, **kw + ) + if t: + text += " WHERE " + t + + limit_clause = self.update_limit_clause(update_stmt) + if limit_clause: + text += " " + limit_clause + + if ( + self.implicit_returning or update_stmt._returning + ) and not self.returning_precedes_values: + text += " " + self.returning_clause( + update_stmt, + self.implicit_returning or update_stmt._returning, + populate_result_map=toplevel, + ) + + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = self._render_cte_clause(nesting_level=nesting_level) + text + + if warn_linting: + assert from_linter is not None + from_linter.warn(stmt_type="UPDATE") + + self.stack.pop(-1) + + return text + + def delete_extra_from_clause( + self, delete_stmt, from_table, extra_froms, from_hints, **kw + ): + """Provide a hook to override the generation of an + DELETE..FROM clause. + + This can be used to implement DELETE..USING for example. + + MySQL and MSSQL override this. + + """ + raise NotImplementedError( + "This backend does not support multiple-table " + "criteria within DELETE" + ) + + def delete_table_clause(self, delete_stmt, from_table, extra_froms, **kw): + return from_table._compiler_dispatch( + self, asfrom=True, iscrud=True, **kw + ) + + def visit_delete(self, delete_stmt, visiting_cte=None, **kw): + compile_state = delete_stmt._compile_state_factory( + delete_stmt, self, **kw + ) + delete_stmt = compile_state.statement + + if visiting_cte is not None: + kw["visiting_cte"] = visiting_cte + toplevel = False + else: + toplevel = not self.stack + + if toplevel: + self.isdelete = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state + if not self.compile_state: + self.compile_state = compile_state + + if self.linting & COLLECT_CARTESIAN_PRODUCTS: + from_linter = FromLinter({}, set()) + warn_linting = self.linting & WARN_LINTING + if toplevel: + self.from_linter = from_linter + else: + from_linter = None + warn_linting = False + + extra_froms = compile_state._extra_froms + + correlate_froms = {delete_stmt.table}.union(extra_froms) + self.stack.append( + { + "correlate_froms": correlate_froms, + "asfrom_froms": correlate_froms, + "selectable": delete_stmt, + } + ) + + text = "DELETE " + + if delete_stmt._prefixes: + text += self._generate_prefixes( + delete_stmt, delete_stmt._prefixes, **kw + ) + + text += "FROM " + + try: + table_text = self.delete_table_clause( + delete_stmt, + delete_stmt.table, + extra_froms, + from_linter=from_linter, + ) + except TypeError: + # anticipate 3rd party dialects that don't include **kw + # TODO: remove in 2.1 + table_text = self.delete_table_clause( + delete_stmt, delete_stmt.table, extra_froms + ) + if from_linter: + _ = self.process(delete_stmt.table, from_linter=from_linter) + + crud._get_crud_params(self, delete_stmt, compile_state, toplevel, **kw) + + if delete_stmt._hints: + dialect_hints, table_text = self._setup_crud_hints( + delete_stmt, table_text + ) + else: + dialect_hints = None + + if delete_stmt._independent_ctes: + self._dispatch_independent_ctes(delete_stmt, kw) + + text += table_text + + if ( + self.implicit_returning or delete_stmt._returning + ) and self.returning_precedes_values: + text += " " + self.returning_clause( + delete_stmt, + self.implicit_returning or delete_stmt._returning, + populate_result_map=toplevel, + ) + + if extra_froms: + extra_from_text = self.delete_extra_from_clause( + delete_stmt, + delete_stmt.table, + extra_froms, + dialect_hints, + from_linter=from_linter, + **kw, + ) + if extra_from_text: + text += " " + extra_from_text + + if delete_stmt._where_criteria: + t = self._generate_delimited_and_list( + delete_stmt._where_criteria, from_linter=from_linter, **kw + ) + if t: + text += " WHERE " + t + + limit_clause = self.delete_limit_clause(delete_stmt) + if limit_clause: + text += " " + limit_clause + + if ( + self.implicit_returning or delete_stmt._returning + ) and not self.returning_precedes_values: + text += " " + self.returning_clause( + delete_stmt, + self.implicit_returning or delete_stmt._returning, + populate_result_map=toplevel, + ) + + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = self._render_cte_clause(nesting_level=nesting_level) + text + + if warn_linting: + assert from_linter is not None + from_linter.warn(stmt_type="DELETE") + + self.stack.pop(-1) + + return text + + def visit_savepoint(self, savepoint_stmt, **kw): + return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) + + def visit_rollback_to_savepoint(self, savepoint_stmt, **kw): + return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint( + savepoint_stmt + ) + + def visit_release_savepoint(self, savepoint_stmt, **kw): + return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint( + savepoint_stmt + ) + + +class StrSQLCompiler(SQLCompiler): + """A :class:`.SQLCompiler` subclass which allows a small selection + of non-standard SQL features to render into a string value. + + The :class:`.StrSQLCompiler` is invoked whenever a Core expression + element is directly stringified without calling upon the + :meth:`_expression.ClauseElement.compile` method. + It can render a limited set + of non-standard SQL constructs to assist in basic stringification, + however for more substantial custom or dialect-specific SQL constructs, + it will be necessary to make use of + :meth:`_expression.ClauseElement.compile` + directly. + + .. seealso:: + + :ref:`faq_sql_expression_string` + + """ + + def _fallback_column_name(self, column): + return "" + + @util.preload_module("sqlalchemy.engine.url") + def visit_unsupported_compilation(self, element, err, **kw): + if element.stringify_dialect != "default": + url = util.preloaded.engine_url + dialect = url.URL.create(element.stringify_dialect).get_dialect()() + + compiler = dialect.statement_compiler( + dialect, None, _supporting_against=self + ) + if not isinstance(compiler, StrSQLCompiler): + return compiler.process(element, **kw) + + return super().visit_unsupported_compilation(element, err) + + def visit_getitem_binary(self, binary, operator, **kw): + return "%s[%s]" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + return self.visit_getitem_binary(binary, operator, **kw) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + return self.visit_getitem_binary(binary, operator, **kw) + + def visit_sequence(self, sequence, **kw): + return ( + f"" + ) + + def returning_clause( + self, + stmt: UpdateBase, + returning_cols: Sequence[_ColumnsClauseElement], + *, + populate_result_map: bool, + **kw: Any, + ) -> str: + columns = [ + self._label_select_column(None, c, True, False, {}) + for c in base._select_iterables(returning_cols) + ] + return "RETURNING " + ", ".join(columns) + + def update_from_clause( + self, update_stmt, from_table, extra_froms, from_hints, **kw + ): + kw["asfrom"] = True + return "FROM " + ", ".join( + t._compiler_dispatch(self, fromhints=from_hints, **kw) + for t in extra_froms + ) + + def delete_extra_from_clause( + self, delete_stmt, from_table, extra_froms, from_hints, **kw + ): + kw["asfrom"] = True + return ", " + ", ".join( + t._compiler_dispatch(self, fromhints=from_hints, **kw) + for t in extra_froms + ) + + def visit_empty_set_expr(self, element_types, **kw): + return "SELECT 1 WHERE 1!=1" + + def get_from_hint_text(self, table, text): + return "[%s]" % text + + def visit_regexp_match_op_binary(self, binary, operator, **kw): + return self._generate_generic_binary(binary, " ", **kw) + + def visit_not_regexp_match_op_binary(self, binary, operator, **kw): + return self._generate_generic_binary(binary, " ", **kw) + + def visit_regexp_replace_op_binary(self, binary, operator, **kw): + return "(%s, %s)" % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw), + ) + + def visit_try_cast(self, cast, **kwargs): + return "TRY_CAST(%s AS %s)" % ( + cast.clause._compiler_dispatch(self, **kwargs), + cast.typeclause._compiler_dispatch(self, **kwargs), + ) + + +class DDLCompiler(Compiled): + is_ddl = True + + if TYPE_CHECKING: + + def __init__( + self, + dialect: Dialect, + statement: ExecutableDDLElement, + schema_translate_map: Optional[SchemaTranslateMapType] = ..., + render_schema_translate: bool = ..., + compile_kwargs: Mapping[str, Any] = ..., + ): ... + + @util.ro_memoized_property + def sql_compiler(self) -> SQLCompiler: + return self.dialect.statement_compiler( + self.dialect, None, schema_translate_map=self.schema_translate_map + ) + + @util.memoized_property + def type_compiler(self): + return self.dialect.type_compiler_instance + + def construct_params( + self, + params: Optional[_CoreSingleExecuteParams] = None, + extracted_parameters: Optional[Sequence[BindParameter[Any]]] = None, + escape_names: bool = True, + ) -> Optional[_MutableCoreSingleExecuteParams]: + return None + + def visit_ddl(self, ddl, **kwargs): + # table events can substitute table and schema name + context = ddl.context + if isinstance(ddl.target, schema.Table): + context = context.copy() + + preparer = self.preparer + path = preparer.format_table_seq(ddl.target) + if len(path) == 1: + table, sch = path[0], "" + else: + table, sch = path[-1], path[0] + + context.setdefault("table", table) + context.setdefault("schema", sch) + context.setdefault("fullname", preparer.format_table(ddl.target)) + + return self.sql_compiler.post_process_text(ddl.statement % context) + + def visit_create_schema(self, create, **kw): + text = "CREATE SCHEMA " + if create.if_not_exists: + text += "IF NOT EXISTS " + return text + self.preparer.format_schema(create.element) + + def visit_drop_schema(self, drop, **kw): + text = "DROP SCHEMA " + if drop.if_exists: + text += "IF EXISTS " + text += self.preparer.format_schema(drop.element) + if drop.cascade: + text += " CASCADE" + return text + + def visit_create_table(self, create, **kw): + table = create.element + preparer = self.preparer + + text = "\nCREATE " + if table._prefixes: + text += " ".join(table._prefixes) + " " + + text += "TABLE " + if create.if_not_exists: + text += "IF NOT EXISTS " + + text += preparer.format_table(table) + " " + + create_table_suffix = self.create_table_suffix(table) + if create_table_suffix: + text += create_table_suffix + " " + + text += "(" + + separator = "\n" + + # if only one primary key, specify it along with the column + first_pk = False + for create_column in create.columns: + column = create_column.element + try: + processed = self.process( + create_column, first_pk=column.primary_key and not first_pk + ) + if processed is not None: + text += separator + separator = ", \n" + text += "\t" + processed + if column.primary_key: + first_pk = True + except exc.CompileError as ce: + raise exc.CompileError( + "(in table '%s', column '%s'): %s" + % (table.description, column.name, ce.args[0]) + ) from ce + + const = self.create_table_constraints( + table, + _include_foreign_key_constraints=create.include_foreign_key_constraints, # noqa + ) + if const: + text += separator + "\t" + const + + text += "\n)%s\n\n" % self.post_create_table(table) + return text + + def visit_create_column(self, create, first_pk=False, **kw): + column = create.element + + if column.system: + return None + + text = self.get_column_specification(column, first_pk=first_pk) + const = " ".join( + self.process(constraint) for constraint in column.constraints + ) + if const: + text += " " + const + + return text + + def create_table_constraints( + self, table, _include_foreign_key_constraints=None, **kw + ): + # On some DB order is significant: visit PK first, then the + # other constraints (engine.ReflectionTest.testbasic failed on FB2) + constraints = [] + if table.primary_key: + constraints.append(table.primary_key) + + all_fkcs = table.foreign_key_constraints + if _include_foreign_key_constraints is not None: + omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints) + else: + omit_fkcs = set() + + constraints.extend( + [ + c + for c in table._sorted_constraints + if c is not table.primary_key and c not in omit_fkcs + ] + ) + + return ", \n\t".join( + p + for p in ( + self.process(constraint) + for constraint in constraints + if (constraint._should_create_for_compiler(self)) + and ( + not self.dialect.supports_alter + or not getattr(constraint, "use_alter", False) + ) + ) + if p is not None + ) + + def visit_drop_table(self, drop, **kw): + text = "\nDROP TABLE " + if drop.if_exists: + text += "IF EXISTS " + return text + self.preparer.format_table(drop.element) + + def visit_drop_view(self, drop, **kw): + return "\nDROP VIEW " + self.preparer.format_table(drop.element) + + def _verify_index_table(self, index: Index) -> None: + if index.table is None: + raise exc.CompileError( + "Index '%s' is not associated with any table." % index.name + ) + + def visit_create_index( + self, create, include_schema=False, include_table_schema=True, **kw + ): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + if index.name is None: + raise exc.CompileError( + "CREATE INDEX requires that the index have a name" + ) + + text += "INDEX " + if create.if_not_exists: + text += "IF NOT EXISTS " + + text += "%s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=include_schema), + preparer.format_table( + index.table, use_schema=include_table_schema + ), + ", ".join( + self.sql_compiler.process( + expr, include_table=False, literal_binds=True + ) + for expr in index.expressions + ), + ) + return text + + def visit_drop_index(self, drop, **kw): + index = drop.element + + if index.name is None: + raise exc.CompileError( + "DROP INDEX requires that the index have a name" + ) + text = "\nDROP INDEX " + if drop.if_exists: + text += "IF EXISTS " + + return text + self._prepared_index_name(index, include_schema=True) + + def _prepared_index_name( + self, index: Index, include_schema: bool = False + ) -> str: + if index.table is not None: + effective_schema = self.preparer.schema_for_object(index.table) + else: + effective_schema = None + if include_schema and effective_schema: + schema_name = self.preparer.quote_schema(effective_schema) + else: + schema_name = None + + index_name = self.preparer.format_index(index) + + if schema_name: + index_name = schema_name + "." + index_name + return index_name + + def visit_add_constraint(self, create, **kw): + return "ALTER TABLE %s ADD %s" % ( + self.preparer.format_table(create.element.table), + self.process(create.element), + ) + + def visit_set_table_comment(self, create, **kw): + return "COMMENT ON TABLE %s IS %s" % ( + self.preparer.format_table(create.element), + self.sql_compiler.render_literal_value( + create.element.comment, sqltypes.String() + ), + ) + + def visit_drop_table_comment(self, drop, **kw): + return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table( + drop.element + ) + + def visit_set_column_comment(self, create, **kw): + return "COMMENT ON COLUMN %s IS %s" % ( + self.preparer.format_column( + create.element, use_table=True, use_schema=True + ), + self.sql_compiler.render_literal_value( + create.element.comment, sqltypes.String() + ), + ) + + def visit_drop_column_comment(self, drop, **kw): + return "COMMENT ON COLUMN %s IS NULL" % self.preparer.format_column( + drop.element, use_table=True + ) + + def visit_set_constraint_comment(self, create, **kw): + raise exc.UnsupportedCompilationError(self, type(create)) + + def visit_drop_constraint_comment(self, drop, **kw): + raise exc.UnsupportedCompilationError(self, type(drop)) + + def get_identity_options(self, identity_options): + text = [] + if identity_options.increment is not None: + text.append("INCREMENT BY %d" % identity_options.increment) + if identity_options.start is not None: + text.append("START WITH %d" % identity_options.start) + if identity_options.minvalue is not None: + text.append("MINVALUE %d" % identity_options.minvalue) + if identity_options.maxvalue is not None: + text.append("MAXVALUE %d" % identity_options.maxvalue) + if identity_options.nominvalue is not None: + text.append("NO MINVALUE") + if identity_options.nomaxvalue is not None: + text.append("NO MAXVALUE") + if identity_options.cache is not None: + text.append("CACHE %d" % identity_options.cache) + if identity_options.cycle is not None: + text.append("CYCLE" if identity_options.cycle else "NO CYCLE") + return " ".join(text) + + def visit_create_sequence(self, create, prefix=None, **kw): + text = "CREATE SEQUENCE " + if create.if_not_exists: + text += "IF NOT EXISTS " + text += self.preparer.format_sequence(create.element) + + if prefix: + text += prefix + options = self.get_identity_options(create.element) + if options: + text += " " + options + return text + + def visit_drop_sequence(self, drop, **kw): + text = "DROP SEQUENCE " + if drop.if_exists: + text += "IF EXISTS " + return text + self.preparer.format_sequence(drop.element) + + def visit_drop_constraint(self, drop, **kw): + constraint = drop.element + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + else: + formatted_name = None + + if formatted_name is None: + raise exc.CompileError( + "Can't emit DROP CONSTRAINT for constraint %r; " + "it has no name" % drop.element + ) + return "ALTER TABLE %s DROP CONSTRAINT %s%s%s" % ( + self.preparer.format_table(drop.element.table), + "IF EXISTS " if drop.if_exists else "", + formatted_name, + " CASCADE" if drop.cascade else "", + ) + + def get_column_specification(self, column, **kwargs): + colspec = ( + self.preparer.format_column(column) + + " " + + self.dialect.type_compiler_instance.process( + column.type, type_expression=column + ) + ) + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + if column.computed is not None: + colspec += " " + self.process(column.computed) + + if ( + column.identity is not None + and self.dialect.supports_identity_columns + ): + colspec += " " + self.process(column.identity) + + if not column.nullable and ( + not column.identity or not self.dialect.supports_identity_columns + ): + colspec += " NOT NULL" + return colspec + + def create_table_suffix(self, table): + return "" + + def post_create_table(self, table): + return "" + + def get_column_default_string(self, column: Column[Any]) -> Optional[str]: + if isinstance(column.server_default, schema.DefaultClause): + return self.render_default_string(column.server_default.arg) + else: + return None + + def render_default_string(self, default: Union[Visitable, str]) -> str: + if isinstance(default, str): + return self.sql_compiler.render_literal_value( + default, sqltypes.STRINGTYPE + ) + else: + return self.sql_compiler.process(default, literal_binds=True) + + def visit_table_or_column_check_constraint(self, constraint, **kw): + if constraint.is_column_level: + return self.visit_column_check_constraint(constraint) + else: + return self.visit_check_constraint(constraint) + + def visit_check_constraint(self, constraint, **kw): + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "CHECK (%s)" % self.sql_compiler.process( + constraint.sqltext, include_table=False, literal_binds=True + ) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_column_check_constraint(self, constraint, **kw): + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "CHECK (%s)" % self.sql_compiler.process( + constraint.sqltext, include_table=False, literal_binds=True + ) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_primary_key_constraint( + self, constraint: PrimaryKeyConstraint, **kw: Any + ) -> str: + if len(constraint) == 0: + return "" + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "PRIMARY KEY " + text += "(%s)" % ", ".join( + self.preparer.quote(c.name) + for c in ( + constraint.columns_autoinc_first + if constraint._implicit_generated + else constraint.columns + ) + ) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_foreign_key_constraint(self, constraint, **kw): + preparer = self.preparer + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + remote_table = list(constraint.elements)[0].column.table + text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( + ", ".join( + preparer.quote(f.parent.name) for f in constraint.elements + ), + self.define_constraint_remote_table( + constraint, remote_table, preparer + ), + ", ".join( + preparer.quote(f.column.name) for f in constraint.elements + ), + ) + text += self.define_constraint_match(constraint) + text += self.define_constraint_cascades(constraint) + text += self.define_constraint_deferrability(constraint) + return text + + def define_constraint_remote_table(self, constraint, table, preparer): + """Format the remote table clause of a CREATE CONSTRAINT clause.""" + + return preparer.format_table(table) + + def visit_unique_constraint( + self, constraint: UniqueConstraint, **kw: Any + ) -> str: + if len(constraint) == 0: + return "" + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "UNIQUE %s(%s)" % ( + self.define_unique_constraint_distinct(constraint, **kw), + ", ".join(self.preparer.quote(c.name) for c in constraint), + ) + text += self.define_constraint_deferrability(constraint) + return text + + def define_unique_constraint_distinct( + self, constraint: UniqueConstraint, **kw: Any + ) -> str: + return "" + + def define_constraint_cascades( + self, constraint: ForeignKeyConstraint + ) -> str: + text = "" + if constraint.ondelete is not None: + text += self.define_constraint_ondelete_cascade(constraint) + + if constraint.onupdate is not None: + text += self.define_constraint_onupdate_cascade(constraint) + return text + + def define_constraint_ondelete_cascade( + self, constraint: ForeignKeyConstraint + ) -> str: + return " ON DELETE %s" % self.preparer.validate_sql_phrase( + constraint.ondelete, FK_ON_DELETE + ) + + def define_constraint_onupdate_cascade( + self, constraint: ForeignKeyConstraint + ) -> str: + return " ON UPDATE %s" % self.preparer.validate_sql_phrase( + constraint.onupdate, FK_ON_UPDATE + ) + + def define_constraint_deferrability(self, constraint: Constraint) -> str: + text = "" + if constraint.deferrable is not None: + if constraint.deferrable: + text += " DEFERRABLE" + else: + text += " NOT DEFERRABLE" + if constraint.initially is not None: + text += " INITIALLY %s" % self.preparer.validate_sql_phrase( + constraint.initially, FK_INITIALLY + ) + return text + + def define_constraint_match(self, constraint): + text = "" + if constraint.match is not None: + text += " MATCH %s" % constraint.match + return text + + def visit_computed_column(self, generated, **kw): + text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process( + generated.sqltext, include_table=False, literal_binds=True + ) + if generated.persisted is True: + text += " STORED" + elif generated.persisted is False: + text += " VIRTUAL" + return text + + def visit_identity_column(self, identity, **kw): + text = "GENERATED %s AS IDENTITY" % ( + "ALWAYS" if identity.always else "BY DEFAULT", + ) + options = self.get_identity_options(identity) + if options: + text += " (%s)" % options + return text + + +class GenericTypeCompiler(TypeCompiler): + def visit_FLOAT(self, type_: sqltypes.Float[Any], **kw: Any) -> str: + return "FLOAT" + + def visit_DOUBLE(self, type_: sqltypes.Double[Any], **kw: Any) -> str: + return "DOUBLE" + + def visit_DOUBLE_PRECISION( + self, type_: sqltypes.DOUBLE_PRECISION[Any], **kw: Any + ) -> str: + return "DOUBLE PRECISION" + + def visit_REAL(self, type_: sqltypes.REAL[Any], **kw: Any) -> str: + return "REAL" + + def visit_NUMERIC(self, type_: sqltypes.Numeric[Any], **kw: Any) -> str: + if type_.precision is None: + return "NUMERIC" + elif type_.scale is None: + return "NUMERIC(%(precision)s)" % {"precision": type_.precision} + else: + return "NUMERIC(%(precision)s, %(scale)s)" % { + "precision": type_.precision, + "scale": type_.scale, + } + + def visit_DECIMAL(self, type_: sqltypes.DECIMAL[Any], **kw: Any) -> str: + if type_.precision is None: + return "DECIMAL" + elif type_.scale is None: + return "DECIMAL(%(precision)s)" % {"precision": type_.precision} + else: + return "DECIMAL(%(precision)s, %(scale)s)" % { + "precision": type_.precision, + "scale": type_.scale, + } + + def visit_INTEGER(self, type_: sqltypes.Integer, **kw: Any) -> str: + return "INTEGER" + + def visit_SMALLINT(self, type_: sqltypes.SmallInteger, **kw: Any) -> str: + return "SMALLINT" + + def visit_BIGINT(self, type_: sqltypes.BigInteger, **kw: Any) -> str: + return "BIGINT" + + def visit_TIMESTAMP(self, type_: sqltypes.TIMESTAMP, **kw: Any) -> str: + return "TIMESTAMP" + + def visit_DATETIME(self, type_: sqltypes.DateTime, **kw: Any) -> str: + return "DATETIME" + + def visit_DATE(self, type_: sqltypes.Date, **kw: Any) -> str: + return "DATE" + + def visit_TIME(self, type_: sqltypes.Time, **kw: Any) -> str: + return "TIME" + + def visit_CLOB(self, type_: sqltypes.CLOB, **kw: Any) -> str: + return "CLOB" + + def visit_NCLOB(self, type_: sqltypes.Text, **kw: Any) -> str: + return "NCLOB" + + def _render_string_type( + self, name: str, length: Optional[int], collation: Optional[str] + ) -> str: + text = name + if length: + text += f"({length})" + if collation: + text += f' COLLATE "{collation}"' + return text + + def visit_CHAR(self, type_: sqltypes.CHAR, **kw: Any) -> str: + return self._render_string_type("CHAR", type_.length, type_.collation) + + def visit_NCHAR(self, type_: sqltypes.NCHAR, **kw: Any) -> str: + return self._render_string_type("NCHAR", type_.length, type_.collation) + + def visit_VARCHAR(self, type_: sqltypes.String, **kw: Any) -> str: + return self._render_string_type( + "VARCHAR", type_.length, type_.collation + ) + + def visit_NVARCHAR(self, type_: sqltypes.NVARCHAR, **kw: Any) -> str: + return self._render_string_type( + "NVARCHAR", type_.length, type_.collation + ) + + def visit_TEXT(self, type_: sqltypes.Text, **kw: Any) -> str: + return self._render_string_type("TEXT", type_.length, type_.collation) + + def visit_UUID(self, type_: sqltypes.Uuid[Any], **kw: Any) -> str: + return "UUID" + + def visit_BLOB(self, type_: sqltypes.LargeBinary, **kw: Any) -> str: + return "BLOB" + + def visit_BINARY(self, type_: sqltypes.BINARY, **kw: Any) -> str: + return "BINARY" + (type_.length and "(%d)" % type_.length or "") + + def visit_VARBINARY(self, type_: sqltypes.VARBINARY, **kw: Any) -> str: + return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") + + def visit_BOOLEAN(self, type_: sqltypes.Boolean, **kw: Any) -> str: + return "BOOLEAN" + + def visit_uuid(self, type_: sqltypes.Uuid[Any], **kw: Any) -> str: + if not type_.native_uuid or not self.dialect.supports_native_uuid: + return self._render_string_type("CHAR", length=32, collation=None) + else: + return self.visit_UUID(type_, **kw) + + def visit_large_binary( + self, type_: sqltypes.LargeBinary, **kw: Any + ) -> str: + return self.visit_BLOB(type_, **kw) + + def visit_boolean(self, type_: sqltypes.Boolean, **kw: Any) -> str: + return self.visit_BOOLEAN(type_, **kw) + + def visit_time(self, type_: sqltypes.Time, **kw: Any) -> str: + return self.visit_TIME(type_, **kw) + + def visit_datetime(self, type_: sqltypes.DateTime, **kw: Any) -> str: + return self.visit_DATETIME(type_, **kw) + + def visit_date(self, type_: sqltypes.Date, **kw: Any) -> str: + return self.visit_DATE(type_, **kw) + + def visit_big_integer(self, type_: sqltypes.BigInteger, **kw: Any) -> str: + return self.visit_BIGINT(type_, **kw) + + def visit_small_integer( + self, type_: sqltypes.SmallInteger, **kw: Any + ) -> str: + return self.visit_SMALLINT(type_, **kw) + + def visit_integer(self, type_: sqltypes.Integer, **kw: Any) -> str: + return self.visit_INTEGER(type_, **kw) + + def visit_real(self, type_: sqltypes.REAL[Any], **kw: Any) -> str: + return self.visit_REAL(type_, **kw) + + def visit_float(self, type_: sqltypes.Float[Any], **kw: Any) -> str: + return self.visit_FLOAT(type_, **kw) + + def visit_double(self, type_: sqltypes.Double[Any], **kw: Any) -> str: + return self.visit_DOUBLE(type_, **kw) + + def visit_numeric(self, type_: sqltypes.Numeric[Any], **kw: Any) -> str: + return self.visit_NUMERIC(type_, **kw) + + def visit_string(self, type_: sqltypes.String, **kw: Any) -> str: + return self.visit_VARCHAR(type_, **kw) + + def visit_unicode(self, type_: sqltypes.Unicode, **kw: Any) -> str: + return self.visit_VARCHAR(type_, **kw) + + def visit_text(self, type_: sqltypes.Text, **kw: Any) -> str: + return self.visit_TEXT(type_, **kw) + + def visit_unicode_text( + self, type_: sqltypes.UnicodeText, **kw: Any + ) -> str: + return self.visit_TEXT(type_, **kw) + + def visit_enum(self, type_: sqltypes.Enum, **kw: Any) -> str: + return self.visit_VARCHAR(type_, **kw) + + def visit_null(self, type_, **kw): + raise exc.CompileError( + "Can't generate DDL for %r; " + "did you forget to specify a " + "type on this Column?" % type_ + ) + + def visit_type_decorator( + self, type_: TypeDecorator[Any], **kw: Any + ) -> str: + return self.process(type_.type_engine(self.dialect), **kw) + + def visit_user_defined( + self, type_: UserDefinedType[Any], **kw: Any + ) -> str: + return type_.get_col_spec(**kw) + + +class StrSQLTypeCompiler(GenericTypeCompiler): + def process(self, type_, **kw): + try: + _compiler_dispatch = type_._compiler_dispatch + except AttributeError: + return self._visit_unknown(type_, **kw) + else: + return _compiler_dispatch(self, **kw) + + def __getattr__(self, key): + if key.startswith("visit_"): + return self._visit_unknown + else: + raise AttributeError(key) + + def _visit_unknown(self, type_, **kw): + if type_.__class__.__name__ == type_.__class__.__name__.upper(): + return type_.__class__.__name__ + else: + return repr(type_) + + def visit_null(self, type_, **kw): + return "NULL" + + def visit_user_defined(self, type_, **kw): + try: + get_col_spec = type_.get_col_spec + except AttributeError: + return repr(type_) + else: + return get_col_spec(**kw) + + +class _SchemaForObjectCallable(Protocol): + def __call__(self, __obj: Any) -> str: ... + + +class _BindNameForColProtocol(Protocol): + def __call__(self, col: ColumnClause[Any]) -> str: ... + + +class IdentifierPreparer: + """Handle quoting and case-folding of identifiers based on options.""" + + reserved_words = RESERVED_WORDS + + legal_characters = LEGAL_CHARACTERS + + illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS + + initial_quote: str + + final_quote: str + + _strings: MutableMapping[str, str] + + schema_for_object: _SchemaForObjectCallable = operator.attrgetter("schema") + """Return the .schema attribute for an object. + + For the default IdentifierPreparer, the schema for an object is always + the value of the ".schema" attribute. if the preparer is replaced + with one that has a non-empty schema_translate_map, the value of the + ".schema" attribute is rendered a symbol that will be converted to a + real schema name from the mapping post-compile. + + """ + + _includes_none_schema_translate: bool = False + + def __init__( + self, + dialect: Dialect, + initial_quote: str = '"', + final_quote: Optional[str] = None, + escape_quote: str = '"', + quote_case_sensitive_collations: bool = True, + omit_schema: bool = False, + ): + """Construct a new ``IdentifierPreparer`` object. + + initial_quote + Character that begins a delimited identifier. + + final_quote + Character that ends a delimited identifier. Defaults to + `initial_quote`. + + omit_schema + Prevent prepending schema name. Useful for databases that do + not support schemae. + """ + + self.dialect = dialect + self.initial_quote = initial_quote + self.final_quote = final_quote or self.initial_quote + self.escape_quote = escape_quote + self.escape_to_quote = self.escape_quote * 2 + self.omit_schema = omit_schema + self.quote_case_sensitive_collations = quote_case_sensitive_collations + self._strings = {} + self._double_percents = self.dialect.paramstyle in ( + "format", + "pyformat", + ) + + def _with_schema_translate(self, schema_translate_map): + prep = self.__class__.__new__(self.__class__) + prep.__dict__.update(self.__dict__) + + includes_none = None in schema_translate_map + + def symbol_getter(obj): + name = obj.schema + if obj._use_schema_map and (name is not None or includes_none): + if name is not None and ("[" in name or "]" in name): + raise exc.CompileError( + "Square bracket characters ([]) not supported " + "in schema translate name '%s'" % name + ) + return quoted_name( + "__[SCHEMA_%s]" % (name or "_none"), quote=False + ) + else: + return obj.schema + + prep.schema_for_object = symbol_getter + prep._includes_none_schema_translate = includes_none + return prep + + def _render_schema_translates( + self, statement: str, schema_translate_map: SchemaTranslateMapType + ) -> str: + d = schema_translate_map + if None in d: + if not self._includes_none_schema_translate: + raise exc.InvalidRequestError( + "schema translate map which previously did not have " + "`None` present as a key now has `None` present; compiled " + "statement may lack adequate placeholders. Please use " + "consistent keys in successive " + "schema_translate_map dictionaries." + ) + + d["_none"] = d[None] # type: ignore[index] + + def replace(m): + name = m.group(2) + if name in d: + effective_schema = d[name] + else: + if name in (None, "_none"): + raise exc.InvalidRequestError( + "schema translate map which previously had `None` " + "present as a key now no longer has it present; don't " + "know how to apply schema for compiled statement. " + "Please use consistent keys in successive " + "schema_translate_map dictionaries." + ) + effective_schema = name + + if not effective_schema: + effective_schema = self.dialect.default_schema_name + if not effective_schema: + # TODO: no coverage here + raise exc.CompileError( + "Dialect has no default schema name; can't " + "use None as dynamic schema target." + ) + return self.quote_schema(effective_schema) + + return re.sub(r"(__\[SCHEMA_([^\]]+)\])", replace, statement) + + def _escape_identifier(self, value: str) -> str: + """Escape an identifier. + + Subclasses should override this to provide database-dependent + escaping behavior. + """ + + value = value.replace(self.escape_quote, self.escape_to_quote) + if self._double_percents: + value = value.replace("%", "%%") + return value + + def _unescape_identifier(self, value: str) -> str: + """Canonicalize an escaped identifier. + + Subclasses should override this to provide database-dependent + unescaping behavior that reverses _escape_identifier. + """ + + return value.replace(self.escape_to_quote, self.escape_quote) + + def validate_sql_phrase(self, element, reg): + """keyword sequence filter. + + a filter for elements that are intended to represent keyword sequences, + such as "INITIALLY", "INITIALLY DEFERRED", etc. no special characters + should be present. + + .. versionadded:: 1.3 + + """ + + if element is not None and not reg.match(element): + raise exc.CompileError( + "Unexpected SQL phrase: %r (matching against %r)" + % (element, reg.pattern) + ) + return element + + def quote_identifier(self, value: str) -> str: + """Quote an identifier. + + Subclasses should override this to provide database-dependent + quoting behavior. + """ + + return ( + self.initial_quote + + self._escape_identifier(value) + + self.final_quote + ) + + def _requires_quotes(self, value: str) -> bool: + """Return True if the given identifier requires quoting.""" + lc_value = value.lower() + return ( + lc_value in self.reserved_words + or value[0] in self.illegal_initial_characters + or not self.legal_characters.match(str(value)) + or (lc_value != value) + ) + + def _requires_quotes_illegal_chars(self, value): + """Return True if the given identifier requires quoting, but + not taking case convention into account.""" + return not self.legal_characters.match(str(value)) + + def quote_schema(self, schema: str, force: Any = None) -> str: + """Conditionally quote a schema name. + + + The name is quoted if it is a reserved word, contains quote-necessary + characters, or is an instance of :class:`.quoted_name` which includes + ``quote`` set to ``True``. + + Subclasses can override this to provide database-dependent + quoting behavior for schema names. + + :param schema: string schema name + :param force: unused + + .. deprecated:: 0.9 + + The :paramref:`.IdentifierPreparer.quote_schema.force` + parameter is deprecated and will be removed in a future + release. This flag has no effect on the behavior of the + :meth:`.IdentifierPreparer.quote` method; please refer to + :class:`.quoted_name`. + + """ + if force is not None: + # not using the util.deprecated_params() decorator in this + # case because of the additional function call overhead on this + # very performance-critical spot. + util.warn_deprecated( + "The IdentifierPreparer.quote_schema.force parameter is " + "deprecated and will be removed in a future release. This " + "flag has no effect on the behavior of the " + "IdentifierPreparer.quote method; please refer to " + "quoted_name().", + # deprecated 0.9. warning from 1.3 + version="0.9", + ) + + return self.quote(schema) + + def quote(self, ident: str, force: Any = None) -> str: + """Conditionally quote an identifier. + + The identifier is quoted if it is a reserved word, contains + quote-necessary characters, or is an instance of + :class:`.quoted_name` which includes ``quote`` set to ``True``. + + Subclasses can override this to provide database-dependent + quoting behavior for identifier names. + + :param ident: string identifier + :param force: unused + + .. deprecated:: 0.9 + + The :paramref:`.IdentifierPreparer.quote.force` + parameter is deprecated and will be removed in a future + release. This flag has no effect on the behavior of the + :meth:`.IdentifierPreparer.quote` method; please refer to + :class:`.quoted_name`. + + """ + if force is not None: + # not using the util.deprecated_params() decorator in this + # case because of the additional function call overhead on this + # very performance-critical spot. + util.warn_deprecated( + "The IdentifierPreparer.quote.force parameter is " + "deprecated and will be removed in a future release. This " + "flag has no effect on the behavior of the " + "IdentifierPreparer.quote method; please refer to " + "quoted_name().", + # deprecated 0.9. warning from 1.3 + version="0.9", + ) + + force = getattr(ident, "quote", None) + + if force is None: + if ident in self._strings: + return self._strings[ident] + else: + if self._requires_quotes(ident): + self._strings[ident] = self.quote_identifier(ident) + else: + self._strings[ident] = ident + return self._strings[ident] + elif force: + return self.quote_identifier(ident) + else: + return ident + + def format_collation(self, collation_name): + if self.quote_case_sensitive_collations: + return self.quote(collation_name) + else: + return collation_name + + def format_sequence( + self, sequence: schema.Sequence, use_schema: bool = True + ) -> str: + name = self.quote(sequence.name) + + effective_schema = self.schema_for_object(sequence) + + if ( + not self.omit_schema + and use_schema + and effective_schema is not None + ): + name = self.quote_schema(effective_schema) + "." + name + return name + + def format_label( + self, label: Label[Any], name: Optional[str] = None + ) -> str: + return self.quote(name or label.name) + + def format_alias( + self, alias: Optional[AliasedReturnsRows], name: Optional[str] = None + ) -> str: + if name is None: + assert alias is not None + return self.quote(alias.name) + else: + return self.quote(name) + + def format_savepoint(self, savepoint, name=None): + # Running the savepoint name through quoting is unnecessary + # for all known dialects. This is here to support potential + # third party use cases + ident = name or savepoint.ident + if self._requires_quotes(ident): + ident = self.quote_identifier(ident) + return ident + + @util.preload_module("sqlalchemy.sql.naming") + def format_constraint( + self, constraint: Union[Constraint, Index], _alembic_quote: bool = True + ) -> Optional[str]: + naming = util.preloaded.sql_naming + + if constraint.name is _NONE_NAME: + name = naming._constraint_name_for_table( + constraint, constraint.table + ) + + if name is None: + return None + else: + name = constraint.name + + assert name is not None + if constraint.__visit_name__ == "index": + return self.truncate_and_render_index_name( + name, _alembic_quote=_alembic_quote + ) + else: + return self.truncate_and_render_constraint_name( + name, _alembic_quote=_alembic_quote + ) + + def truncate_and_render_index_name( + self, name: str, _alembic_quote: bool = True + ) -> str: + # calculate these at format time so that ad-hoc changes + # to dialect.max_identifier_length etc. can be reflected + # as IdentifierPreparer is long lived + max_ = ( + self.dialect.max_index_name_length + or self.dialect.max_identifier_length + ) + return self._truncate_and_render_maxlen_name( + name, max_, _alembic_quote + ) + + def truncate_and_render_constraint_name( + self, name: str, _alembic_quote: bool = True + ) -> str: + # calculate these at format time so that ad-hoc changes + # to dialect.max_identifier_length etc. can be reflected + # as IdentifierPreparer is long lived + max_ = ( + self.dialect.max_constraint_name_length + or self.dialect.max_identifier_length + ) + return self._truncate_and_render_maxlen_name( + name, max_, _alembic_quote + ) + + def _truncate_and_render_maxlen_name( + self, name: str, max_: int, _alembic_quote: bool + ) -> str: + if isinstance(name, elements._truncated_label): + if len(name) > max_: + name = name[0 : max_ - 8] + "_" + util.md5_hex(name)[-4:] + else: + self.dialect.validate_identifier(name) + + if not _alembic_quote: + return name + else: + return self.quote(name) + + def format_index(self, index: Index) -> str: + name = self.format_constraint(index) + assert name is not None + return name + + def format_table( + self, + table: FromClause, + use_schema: bool = True, + name: Optional[str] = None, + ) -> str: + """Prepare a quoted table and schema name.""" + if name is None: + if TYPE_CHECKING: + assert isinstance(table, NamedFromClause) + name = table.name + + result = self.quote(name) + + effective_schema = self.schema_for_object(table) + + if not self.omit_schema and use_schema and effective_schema: + result = self.quote_schema(effective_schema) + "." + result + return result + + def format_schema(self, name): + """Prepare a quoted schema name.""" + + return self.quote(name) + + def format_label_name( + self, + name, + anon_map=None, + ): + """Prepare a quoted column name.""" + + if anon_map is not None and isinstance( + name, elements._truncated_label + ): + name = name.apply_map(anon_map) + + return self.quote(name) + + def format_column( + self, + column: ColumnElement[Any], + use_table: bool = False, + name: Optional[str] = None, + table_name: Optional[str] = None, + use_schema: bool = False, + anon_map: Optional[Mapping[str, Any]] = None, + ) -> str: + """Prepare a quoted column name.""" + + if name is None: + name = column.name + assert name is not None + + if anon_map is not None and isinstance( + name, elements._truncated_label + ): + name = name.apply_map(anon_map) + + if not getattr(column, "is_literal", False): + if use_table: + return ( + self.format_table( + column.table, use_schema=use_schema, name=table_name + ) + + "." + + self.quote(name) + ) + else: + return self.quote(name) + else: + # literal textual elements get stuck into ColumnClause a lot, + # which shouldn't get quoted + + if use_table: + return ( + self.format_table( + column.table, use_schema=use_schema, name=table_name + ) + + "." + + name + ) + else: + return name + + def format_table_seq(self, table, use_schema=True): + """Format table name and schema as a tuple.""" + + # Dialects with more levels in their fully qualified references + # ('database', 'owner', etc.) could override this and return + # a longer sequence. + + effective_schema = self.schema_for_object(table) + + if not self.omit_schema and use_schema and effective_schema: + return ( + self.quote_schema(effective_schema), + self.format_table(table, use_schema=False), + ) + else: + return (self.format_table(table, use_schema=False),) + + @util.memoized_property + def _r_identifiers(self): + initial, final, escaped_final = ( + re.escape(s) + for s in ( + self.initial_quote, + self.final_quote, + self._escape_identifier(self.final_quote), + ) + ) + r = re.compile( + r"(?:" + r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s" + r"|([^\.]+))(?=\.|$))+" + % {"initial": initial, "final": final, "escaped": escaped_final} + ) + return r + + def unformat_identifiers(self, identifiers: str) -> Sequence[str]: + """Unpack 'schema.table.column'-like strings into components.""" + + r = self._r_identifiers + return [ + self._unescape_identifier(i) + for i in [a or b for a, b in r.findall(identifiers)] + ] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/crud.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/crud.py new file mode 100644 index 0000000000000000000000000000000000000000..4a592ff7b975873e281a68fb06d51422f13295a6 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/crud.py @@ -0,0 +1,1678 @@ +# sql/crud.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Functions used by compiler.py to determine the parameters rendered +within INSERT and UPDATE statements. + +""" +from __future__ import annotations + +import functools +import operator +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Iterable +from typing import List +from typing import MutableMapping +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from . import coercions +from . import dml +from . import elements +from . import roles +from .base import _DefaultDescriptionTuple +from .dml import isinsert as _compile_state_isinsert +from .elements import ColumnClause +from .schema import default_is_clause_element +from .schema import default_is_sequence +from .selectable import Select +from .selectable import TableClause +from .. import exc +from .. import util +from ..util.typing import Literal + +if TYPE_CHECKING: + from .compiler import _BindNameForColProtocol + from .compiler import SQLCompiler + from .dml import _DMLColumnElement + from .dml import DMLState + from .dml import ValuesBase + from .elements import ColumnElement + from .elements import KeyedColumnElement + from .schema import _SQLExprDefault + from .schema import Column + +REQUIRED = util.symbol( + "REQUIRED", + """ +Placeholder for the value within a :class:`.BindParameter` +which is required to be present when the statement is passed +to :meth:`_engine.Connection.execute`. + +This symbol is typically used when a :func:`_expression.insert` +or :func:`_expression.update` statement is compiled without parameter +values present. + +""", +) + + +def _as_dml_column(c: ColumnElement[Any]) -> ColumnClause[Any]: + if not isinstance(c, ColumnClause): + raise exc.CompileError( + f"Can't create DML statement against column expression {c!r}" + ) + return c + + +_CrudParamElement = Tuple[ + "ColumnElement[Any]", + str, # column name + Optional[ + Union[str, "_SQLExprDefault"] + ], # bound parameter string or SQL expression to apply + Iterable[str], +] +_CrudParamElementStr = Tuple[ + "KeyedColumnElement[Any]", + str, # column name + str, # bound parameter string + Iterable[str], +] +_CrudParamElementSQLExpr = Tuple[ + "ColumnClause[Any]", + str, + "_SQLExprDefault", # SQL expression to apply + Iterable[str], +] + +_CrudParamSequence = List[_CrudParamElement] + + +class _CrudParams(NamedTuple): + single_params: _CrudParamSequence + all_multi_params: List[Sequence[_CrudParamElementStr]] + is_default_metavalue_only: bool = False + use_insertmanyvalues: bool = False + use_sentinel_columns: Optional[Sequence[Column[Any]]] = None + + +def _get_crud_params( + compiler: SQLCompiler, + stmt: ValuesBase, + compile_state: DMLState, + toplevel: bool, + **kw: Any, +) -> _CrudParams: + """create a set of tuples representing column/string pairs for use + in an INSERT or UPDATE statement. + + Also generates the Compiled object's postfetch, prefetch, and + returning column collections, used for default handling and ultimately + populating the CursorResult's prefetch_cols() and postfetch_cols() + collections. + + """ + + # note: the _get_crud_params() system was written with the notion in mind + # that INSERT, UPDATE, DELETE are always the top level statement and + # that there is only one of them. With the addition of CTEs that can + # make use of DML, this assumption is no longer accurate; the DML + # statement is not necessarily the top-level "row returning" thing + # and it is also theoretically possible (fortunately nobody has asked yet) + # to have a single statement with multiple DMLs inside of it via CTEs. + + # the current _get_crud_params() design doesn't accommodate these cases + # right now. It "just works" for a CTE that has a single DML inside of + # it, and for a CTE with multiple DML, it's not clear what would happen. + + # overall, the "compiler.XYZ" collections here would need to be in a + # per-DML structure of some kind, and DefaultDialect would need to + # navigate these collections on a per-statement basis, with additional + # emphasis on the "toplevel returning data" statement. However we + # still need to run through _get_crud_params() for all DML as we have + # Python / SQL generated column defaults that need to be rendered. + + # if there is user need for this kind of thing, it's likely a post 2.0 + # kind of change as it would require deep changes to DefaultDialect + # as well as here. + + compiler.postfetch = [] + compiler.insert_prefetch = [] + compiler.update_prefetch = [] + compiler.implicit_returning = [] + + visiting_cte = kw.get("visiting_cte", None) + if visiting_cte is not None: + # for insert -> CTE -> insert, don't populate an incoming + # _crud_accumulate_bind_names collection; the INSERT we process here + # will not be inline within the VALUES of the enclosing INSERT as the + # CTE is placed on the outside. See issue #9173 + kw.pop("accumulate_bind_names", None) + assert ( + "accumulate_bind_names" not in kw + ), "Don't know how to handle insert within insert without a CTE" + + # getters - these are normally just column.key, + # but in the case of mysql multi-table update, the rules for + # .key must conditionally take tablename into account + ( + _column_as_key, + _getattr_col_key, + _col_bind_name, + ) = _key_getters_for_crud_column(compiler, stmt, compile_state) + + compiler._get_bind_name_for_col = _col_bind_name + + if stmt._returning and stmt._return_defaults: + raise exc.CompileError( + "Can't compile statement that includes returning() and " + "return_defaults() simultaneously" + ) + + if compile_state.isdelete: + _setup_delete_return_defaults( + compiler, + stmt, + compile_state, + (), + _getattr_col_key, + _column_as_key, + _col_bind_name, + (), + (), + toplevel, + kw, + ) + return _CrudParams([], []) + + # no parameters in the statement, no parameters in the + # compiled params - return binds for all columns + if compiler.column_keys is None and compile_state._no_parameters: + return _CrudParams( + [ + ( + c, + compiler.preparer.format_column(c), + _create_bind_param(compiler, c, None, required=True), + (c.key,), + ) + for c in stmt.table.columns + if not c._omit_from_statements + ], + [], + ) + + stmt_parameter_tuples: Optional[ + List[Tuple[Union[str, ColumnClause[Any]], Any]] + ] + spd: Optional[MutableMapping[_DMLColumnElement, Any]] + + if ( + _compile_state_isinsert(compile_state) + and compile_state._has_multi_parameters + ): + mp = compile_state._multi_parameters + assert mp is not None + spd = mp[0] + stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} + elif compile_state._ordered_values: + spd = compile_state._dict_parameters + stmt_parameter_tuples = compile_state._ordered_values + assert spd is not None + spd_str_key = {_column_as_key(key) for key in spd} + elif compile_state._dict_parameters: + spd = compile_state._dict_parameters + stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} + else: + stmt_parameter_tuples = spd_str_key = None + + # if we have statement parameters - set defaults in the + # compiled params + if compiler.column_keys is None: + parameters = {} + elif stmt_parameter_tuples: + assert spd_str_key is not None + parameters = { + _column_as_key(key): REQUIRED + for key in compiler.column_keys + if key not in spd_str_key + } + else: + parameters = { + _column_as_key(key): REQUIRED for key in compiler.column_keys + } + + # create a list of column assignment clauses as tuples + values: List[_CrudParamElement] = [] + + if stmt_parameter_tuples is not None: + _get_stmt_parameter_tuples_params( + compiler, + compile_state, + parameters, + stmt_parameter_tuples, + _column_as_key, + values, + kw, + ) + + check_columns: Dict[str, ColumnClause[Any]] = {} + + # special logic that only occurs for multi-table UPDATE + # statements + if dml.isupdate(compile_state) and compile_state.is_multitable: + _get_update_multitable_params( + compiler, + stmt, + compile_state, + stmt_parameter_tuples, + check_columns, + _col_bind_name, + _getattr_col_key, + values, + kw, + ) + + if _compile_state_isinsert(compile_state) and stmt._select_names: + # is an insert from select, is not a multiparams + + assert not compile_state._has_multi_parameters + + _scan_insert_from_select_cols( + compiler, + stmt, + compile_state, + parameters, + _getattr_col_key, + _column_as_key, + _col_bind_name, + check_columns, + values, + toplevel, + kw, + ) + use_insertmanyvalues = False + use_sentinel_columns = None + else: + use_insertmanyvalues, use_sentinel_columns = _scan_cols( + compiler, + stmt, + compile_state, + parameters, + _getattr_col_key, + _column_as_key, + _col_bind_name, + check_columns, + values, + toplevel, + kw, + ) + + if parameters and stmt_parameter_tuples: + check = ( + set(parameters) + .intersection(_column_as_key(k) for k, v in stmt_parameter_tuples) + .difference(check_columns) + ) + if check: + raise exc.CompileError( + "Unconsumed column names: %s" + % (", ".join("%s" % (c,) for c in check)) + ) + + is_default_metavalue_only = False + + if ( + _compile_state_isinsert(compile_state) + and compile_state._has_multi_parameters + ): + # is a multiparams, is not an insert from a select + assert not stmt._select_names + multi_extended_values = _extend_values_for_multiparams( + compiler, + stmt, + compile_state, + cast( + "Sequence[_CrudParamElementStr]", + values, + ), + cast("Callable[..., str]", _column_as_key), + kw, + ) + return _CrudParams(values, multi_extended_values) + elif ( + not values + and compiler.for_executemany + and compiler.dialect.supports_default_metavalue + ): + # convert an "INSERT DEFAULT VALUES" + # into INSERT (firstcol) VALUES (DEFAULT) which can be turned + # into an in-place multi values. This supports + # insert_executemany_returning mode :) + values = [ + ( + _as_dml_column(stmt.table.columns[0]), + compiler.preparer.format_column(stmt.table.columns[0]), + compiler.dialect.default_metavalue_token, + (), + ) + ] + is_default_metavalue_only = True + + return _CrudParams( + values, + [], + is_default_metavalue_only=is_default_metavalue_only, + use_insertmanyvalues=use_insertmanyvalues, + use_sentinel_columns=use_sentinel_columns, + ) + + +@overload +def _create_bind_param( + compiler: SQLCompiler, + col: ColumnElement[Any], + value: Any, + process: Literal[True] = ..., + required: bool = False, + name: Optional[str] = None, + force_anonymous: bool = False, + **kw: Any, +) -> str: ... + + +@overload +def _create_bind_param( + compiler: SQLCompiler, + col: ColumnElement[Any], + value: Any, + **kw: Any, +) -> str: ... + + +def _create_bind_param( + compiler: SQLCompiler, + col: ColumnElement[Any], + value: Any, + process: bool = True, + required: bool = False, + name: Optional[str] = None, + force_anonymous: bool = False, + **kw: Any, +) -> Union[str, elements.BindParameter[Any]]: + if force_anonymous: + name = None + elif name is None: + name = col.key + + bindparam = elements.BindParameter( + name, value, type_=col.type, required=required + ) + bindparam._is_crud = True + if process: + return bindparam._compiler_dispatch(compiler, **kw) + else: + return bindparam + + +def _handle_values_anonymous_param(compiler, col, value, name, **kw): + # the insert() and update() constructs as of 1.4 will now produce anonymous + # bindparam() objects in the values() collections up front when given plain + # literal values. This is so that cache key behaviors, which need to + # produce bound parameters in deterministic order without invoking any + # compilation here, can be applied to these constructs when they include + # values() (but not yet multi-values, which are not included in caching + # right now). + # + # in order to produce the desired "crud" style name for these parameters, + # which will also be targetable in engine/default.py through the usual + # conventions, apply our desired name to these unique parameters by + # populating the compiler truncated names cache with the desired name, + # rather than having + # compiler.visit_bindparam()->compiler._truncated_identifier make up a + # name. Saves on call counts also. + + # for INSERT/UPDATE that's a CTE, we don't need names to match to + # external parameters and these would also conflict in the case where + # multiple insert/update are combined together using CTEs + is_cte = "visiting_cte" in kw + + if ( + not is_cte + and value.unique + and isinstance(value.key, elements._truncated_label) + ): + compiler.truncated_names[("bindparam", value.key)] = name + + if value.type._isnull: + # either unique parameter, or other bound parameters that were + # passed in directly + # set type to that of the column unconditionally + value = value._with_binary_element_type(col.type) + + return value._compiler_dispatch(compiler, **kw) + + +def _key_getters_for_crud_column( + compiler: SQLCompiler, stmt: ValuesBase, compile_state: DMLState +) -> Tuple[ + Callable[[Union[str, ColumnClause[Any]]], Union[str, Tuple[str, str]]], + Callable[[ColumnClause[Any]], Union[str, Tuple[str, str]]], + _BindNameForColProtocol, +]: + if dml.isupdate(compile_state) and compile_state._extra_froms: + # when extra tables are present, refer to the columns + # in those extra tables as table-qualified, including in + # dictionaries and when rendering bind param names. + # the "main" table of the statement remains unqualified, + # allowing the most compatibility with a non-multi-table + # statement. + _et = set(compile_state._extra_froms) + + c_key_role = functools.partial( + coercions.expect_as_key, roles.DMLColumnRole + ) + + def _column_as_key( + key: Union[ColumnClause[Any], str], + ) -> Union[str, Tuple[str, str]]: + str_key = c_key_role(key) + if hasattr(key, "table") and key.table in _et: + return (key.table.name, str_key) # type: ignore + else: + return str_key + + def _getattr_col_key( + col: ColumnClause[Any], + ) -> Union[str, Tuple[str, str]]: + if col.table in _et: + return (col.table.name, col.key) # type: ignore + else: + return col.key + + def _col_bind_name(col: ColumnClause[Any]) -> str: + if col.table in _et: + if TYPE_CHECKING: + assert isinstance(col.table, TableClause) + return "%s_%s" % (col.table.name, col.key) + else: + return col.key + + else: + _column_as_key = functools.partial( + coercions.expect_as_key, roles.DMLColumnRole + ) + _getattr_col_key = _col_bind_name = operator.attrgetter("key") # type: ignore # noqa: E501 + + return _column_as_key, _getattr_col_key, _col_bind_name + + +def _scan_insert_from_select_cols( + compiler, + stmt, + compile_state, + parameters, + _getattr_col_key, + _column_as_key, + _col_bind_name, + check_columns, + values, + toplevel, + kw, +): + cols = [stmt.table.c[_column_as_key(name)] for name in stmt._select_names] + + assert compiler.stack[-1]["selectable"] is stmt + + compiler.stack[-1]["insert_from_select"] = stmt.select + + add_select_cols: List[_CrudParamElementSQLExpr] = [] + if stmt.include_insert_from_select_defaults: + col_set = set(cols) + for col in stmt.table.columns: + # omit columns that were not in the SELECT statement. + # this will omit columns marked as omit_from_statements naturally, + # as long as that col was not explicit in the SELECT. + # if an omit_from_statements col has a "default" on it, then + # we need to include it, as these defaults should still fire off. + # but, if it has that default and it's the "sentinel" default, + # we don't do sentinel default operations for insert_from_select + # here so we again omit it. + if ( + col not in col_set + and col.default + and not col.default.is_sentinel + ): + cols.append(col) + + for c in cols: + col_key = _getattr_col_key(c) + if col_key in parameters and col_key not in check_columns: + parameters.pop(col_key) + values.append((c, compiler.preparer.format_column(c), None, ())) + else: + _append_param_insert_select_hasdefault( + compiler, stmt, c, add_select_cols, kw + ) + + if add_select_cols: + values.extend(add_select_cols) + ins_from_select = compiler.stack[-1]["insert_from_select"] + if not isinstance(ins_from_select, Select): + raise exc.CompileError( + f"Can't extend statement for INSERT..FROM SELECT to include " + f"additional default-holding column(s) " + f"""{ + ', '.join(repr(key) for _, key, _, _ in add_select_cols) + }. Convert the selectable to a subquery() first, or pass """ + "include_defaults=False to Insert.from_select() to skip these " + "columns." + ) + ins_from_select = ins_from_select._generate() + # copy raw_columns + ins_from_select._raw_columns = list(ins_from_select._raw_columns) + [ + expr for _, _, expr, _ in add_select_cols + ] + compiler.stack[-1]["insert_from_select"] = ins_from_select + + +def _scan_cols( + compiler, + stmt, + compile_state, + parameters, + _getattr_col_key, + _column_as_key, + _col_bind_name, + check_columns, + values, + toplevel, + kw, +): + ( + need_pks, + implicit_returning, + implicit_return_defaults, + postfetch_lastrowid, + use_insertmanyvalues, + use_sentinel_columns, + ) = _get_returning_modifiers(compiler, stmt, compile_state, toplevel) + + assert compile_state.isupdate or compile_state.isinsert + + if compile_state._parameter_ordering: + parameter_ordering = [ + _column_as_key(key) for key in compile_state._parameter_ordering + ] + ordered_keys = set(parameter_ordering) + cols = [ + stmt.table.c[key] + for key in parameter_ordering + if isinstance(key, str) and key in stmt.table.c + ] + [c for c in stmt.table.c if c.key not in ordered_keys] + + else: + cols = stmt.table.columns + + isinsert = _compile_state_isinsert(compile_state) + if isinsert and not compile_state._has_multi_parameters: + # new rules for #7998. fetch lastrowid or implicit returning + # for autoincrement column even if parameter is NULL, for DBs that + # override NULL param for primary key (sqlite, mysql/mariadb) + autoincrement_col = stmt.table._autoincrement_column + insert_null_pk_still_autoincrements = ( + compiler.dialect.insert_null_pk_still_autoincrements + ) + else: + autoincrement_col = insert_null_pk_still_autoincrements = None + + if stmt._supplemental_returning: + supplemental_returning = set(stmt._supplemental_returning) + else: + supplemental_returning = set() + + compiler_implicit_returning = compiler.implicit_returning + + # TODO - see TODO(return_defaults_columns) below + # cols_in_params = set() + + for c in cols: + # scan through every column in the target table + + col_key = _getattr_col_key(c) + + if col_key in parameters and col_key not in check_columns: + # parameter is present for the column. use that. + + _append_param_parameter( + compiler, + stmt, + compile_state, + c, + col_key, + parameters, + _col_bind_name, + implicit_returning, + implicit_return_defaults, + postfetch_lastrowid, + values, + autoincrement_col, + insert_null_pk_still_autoincrements, + kw, + ) + + # TODO - see TODO(return_defaults_columns) below + # cols_in_params.add(c) + + elif isinsert: + # no parameter is present and it's an insert. + + if c.primary_key and need_pks: + # it's a primary key column, it will need to be generated by a + # default generator of some kind, and the statement expects + # inserted_primary_key to be available. + + if implicit_returning: + # we can use RETURNING, find out how to invoke this + # column and get the value where RETURNING is an option. + # we can inline server-side functions in this case. + + _append_param_insert_pk_returning( + compiler, stmt, c, values, kw + ) + else: + # otherwise, find out how to invoke this column + # and get its value where RETURNING is not an option. + # if we have to invoke a server-side function, we need + # to pre-execute it. or if this is a straight + # autoincrement column and the dialect supports it + # we can use cursor.lastrowid. + + _append_param_insert_pk_no_returning( + compiler, stmt, c, values, kw + ) + + elif c.default is not None: + # column has a default, but it's not a pk column, or it is but + # we don't need to get the pk back. + if not c.default.is_sentinel or ( + use_sentinel_columns is not None + ): + _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, values, kw + ) + + elif c.server_default is not None: + # column has a DDL-level default, and is either not a pk + # column or we don't need the pk. + if implicit_return_defaults and c in implicit_return_defaults: + compiler_implicit_returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + + elif implicit_return_defaults and c in implicit_return_defaults: + compiler_implicit_returning.append(c) + + elif ( + c.primary_key + and c is not stmt.table._autoincrement_column + and not c.nullable + ): + _warn_pk_with_no_anticipated_value(c) + + elif compile_state.isupdate: + # no parameter is present and it's an insert. + + _append_param_update( + compiler, + compile_state, + stmt, + c, + implicit_return_defaults, + values, + kw, + ) + + # adding supplemental cols to implicit_returning in table + # order so that order is maintained between multiple INSERT + # statements which may have different parameters included, but all + # have the same RETURNING clause + if ( + c in supplemental_returning + and c not in compiler_implicit_returning + ): + compiler_implicit_returning.append(c) + + if supplemental_returning: + # we should have gotten every col into implicit_returning, + # however supplemental returning can also have SQL functions etc. + # in it + remaining_supplemental = supplemental_returning.difference( + compiler_implicit_returning + ) + compiler_implicit_returning.extend( + c + for c in stmt._supplemental_returning + if c in remaining_supplemental + ) + + # TODO(return_defaults_columns): there can still be more columns in + # _return_defaults_columns in the case that they are from something like an + # aliased of the table. we can add them here, however this breaks other ORM + # things. so this is for another day. see + # test/orm/dml/test_update_delete_where.py -> test_update_from_alias + + # if stmt._return_defaults_columns: + # compiler_implicit_returning.extend( + # set(stmt._return_defaults_columns) + # .difference(compiler_implicit_returning) + # .difference(cols_in_params) + # ) + + return (use_insertmanyvalues, use_sentinel_columns) + + +def _setup_delete_return_defaults( + compiler, + stmt, + compile_state, + parameters, + _getattr_col_key, + _column_as_key, + _col_bind_name, + check_columns, + values, + toplevel, + kw, +): + (_, _, implicit_return_defaults, *_) = _get_returning_modifiers( + compiler, stmt, compile_state, toplevel + ) + + if not implicit_return_defaults: + return + + if stmt._return_defaults_columns: + compiler.implicit_returning.extend(implicit_return_defaults) + + if stmt._supplemental_returning: + ir_set = set(compiler.implicit_returning) + compiler.implicit_returning.extend( + c for c in stmt._supplemental_returning if c not in ir_set + ) + + +def _append_param_parameter( + compiler, + stmt, + compile_state, + c, + col_key, + parameters, + _col_bind_name, + implicit_returning, + implicit_return_defaults, + postfetch_lastrowid, + values, + autoincrement_col, + insert_null_pk_still_autoincrements, + kw, +): + value = parameters.pop(col_key) + + has_visiting_cte = kw.get("visiting_cte") is not None + col_value = compiler.preparer.format_column( + c, use_table=compile_state.include_table_with_column_exprs + ) + + accumulated_bind_names: Set[str] = set() + + if coercions._is_literal(value): + if ( + insert_null_pk_still_autoincrements + and c.primary_key + and c is autoincrement_col + ): + # support use case for #7998, fetch autoincrement cols + # even if value was given. + + if postfetch_lastrowid: + compiler.postfetch_lastrowid = True + elif implicit_returning: + compiler.implicit_returning.append(c) + + value = _create_bind_param( + compiler, + c, + value, + required=value is REQUIRED, + name=( + _col_bind_name(c) + if not _compile_state_isinsert(compile_state) + or not compile_state._has_multi_parameters + else "%s_m0" % _col_bind_name(c) + ), + accumulate_bind_names=accumulated_bind_names, + force_anonymous=has_visiting_cte, + **kw, + ) + elif value._is_bind_parameter: + if ( + insert_null_pk_still_autoincrements + and value.value is None + and c.primary_key + and c is autoincrement_col + ): + # support use case for #7998, fetch autoincrement cols + # even if value was given + if implicit_returning: + compiler.implicit_returning.append(c) + elif compiler.dialect.postfetch_lastrowid: + compiler.postfetch_lastrowid = True + + value = _handle_values_anonymous_param( + compiler, + c, + value, + name=( + _col_bind_name(c) + if not _compile_state_isinsert(compile_state) + or not compile_state._has_multi_parameters + else "%s_m0" % _col_bind_name(c) + ), + accumulate_bind_names=accumulated_bind_names, + **kw, + ) + else: + # value is a SQL expression + value = compiler.process( + value.self_group(), + accumulate_bind_names=accumulated_bind_names, + **kw, + ) + + if compile_state.isupdate: + if implicit_return_defaults and c in implicit_return_defaults: + compiler.implicit_returning.append(c) + + else: + compiler.postfetch.append(c) + else: + if c.primary_key: + if implicit_returning: + compiler.implicit_returning.append(c) + elif compiler.dialect.postfetch_lastrowid: + compiler.postfetch_lastrowid = True + + elif implicit_return_defaults and (c in implicit_return_defaults): + compiler.implicit_returning.append(c) + + else: + # postfetch specifically means, "we can SELECT the row we just + # inserted by primary key to get back the server generated + # defaults". so by definition this can't be used to get the + # primary key value back, because we need to have it ahead of + # time. + + compiler.postfetch.append(c) + + values.append((c, col_value, value, accumulated_bind_names)) + + +def _append_param_insert_pk_returning(compiler, stmt, c, values, kw): + """Create a primary key expression in the INSERT statement where + we want to populate result.inserted_primary_key and RETURNING + is available. + + """ + if c.default is not None: + if c.default.is_sequence: + if compiler.dialect.supports_sequences and ( + not c.default.optional + or not compiler.dialect.sequences_optional + ): + accumulated_bind_names: Set[str] = set() + values.append( + ( + c, + compiler.preparer.format_column(c), + compiler.process( + c.default, + accumulate_bind_names=accumulated_bind_names, + **kw, + ), + accumulated_bind_names, + ) + ) + compiler.implicit_returning.append(c) + elif c.default.is_clause_element: + accumulated_bind_names = set() + values.append( + ( + c, + compiler.preparer.format_column(c), + compiler.process( + c.default.arg.self_group(), + accumulate_bind_names=accumulated_bind_names, + **kw, + ), + accumulated_bind_names, + ) + ) + compiler.implicit_returning.append(c) + else: + # client side default. OK we can't use RETURNING, need to + # do a "prefetch", which in fact fetches the default value + # on the Python side + values.append( + ( + c, + compiler.preparer.format_column(c), + _create_insert_prefetch_bind_param(compiler, c, **kw), + (c.key,), + ) + ) + elif c is stmt.table._autoincrement_column or c.server_default is not None: + compiler.implicit_returning.append(c) + elif not c.nullable: + # no .default, no .server_default, not autoincrement, we have + # no indication this primary key column will have any value + _warn_pk_with_no_anticipated_value(c) + + +def _append_param_insert_pk_no_returning(compiler, stmt, c, values, kw): + """Create a primary key expression in the INSERT statement where + we want to populate result.inserted_primary_key and we cannot use + RETURNING. + + Depending on the kind of default here we may create a bound parameter + in the INSERT statement and pre-execute a default generation function, + or we may use cursor.lastrowid if supported by the dialect. + + + """ + + if ( + # column has a Python-side default + c.default is not None + and ( + # and it either is not a sequence, or it is and we support + # sequences and want to invoke it + not c.default.is_sequence + or ( + compiler.dialect.supports_sequences + and ( + not c.default.optional + or not compiler.dialect.sequences_optional + ) + ) + ) + ) or ( + # column is the "autoincrement column" + c is stmt.table._autoincrement_column + and ( + # dialect can't use cursor.lastrowid + not compiler.dialect.postfetch_lastrowid + and ( + # column has a Sequence and we support those + ( + c.default is not None + and c.default.is_sequence + and compiler.dialect.supports_sequences + ) + or + # column has no default on it, but dialect can run the + # "autoincrement" mechanism explicitly, e.g. PostgreSQL + # SERIAL we know the sequence name + ( + c.default is None + and compiler.dialect.preexecute_autoincrement_sequences + ) + ) + ) + ): + # do a pre-execute of the default + values.append( + ( + c, + compiler.preparer.format_column(c), + _create_insert_prefetch_bind_param(compiler, c, **kw), + (c.key,), + ) + ) + elif ( + c.default is None + and c.server_default is None + and not c.nullable + and c is not stmt.table._autoincrement_column + ): + # no .default, no .server_default, not autoincrement, we have + # no indication this primary key column will have any value + _warn_pk_with_no_anticipated_value(c) + elif compiler.dialect.postfetch_lastrowid: + # finally, where it seems like there will be a generated primary key + # value and we haven't set up any other way to fetch it, and the + # dialect supports cursor.lastrowid, switch on the lastrowid flag so + # that the DefaultExecutionContext calls upon cursor.lastrowid + compiler.postfetch_lastrowid = True + + +def _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, values, kw +): + if c.default.is_sequence: + if compiler.dialect.supports_sequences and ( + not c.default.optional or not compiler.dialect.sequences_optional + ): + accumulated_bind_names: Set[str] = set() + values.append( + ( + c, + compiler.preparer.format_column(c), + compiler.process( + c.default, + accumulate_bind_names=accumulated_bind_names, + **kw, + ), + accumulated_bind_names, + ) + ) + if implicit_return_defaults and c in implicit_return_defaults: + compiler.implicit_returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + elif c.default.is_clause_element: + accumulated_bind_names = set() + values.append( + ( + c, + compiler.preparer.format_column(c), + compiler.process( + c.default.arg.self_group(), + accumulate_bind_names=accumulated_bind_names, + **kw, + ), + accumulated_bind_names, + ) + ) + + if implicit_return_defaults and c in implicit_return_defaults: + compiler.implicit_returning.append(c) + elif not c.primary_key: + # don't add primary key column to postfetch + compiler.postfetch.append(c) + else: + values.append( + ( + c, + compiler.preparer.format_column(c), + _create_insert_prefetch_bind_param(compiler, c, **kw), + (c.key,), + ) + ) + + +def _append_param_insert_select_hasdefault( + compiler: SQLCompiler, + stmt: ValuesBase, + c: ColumnClause[Any], + values: List[_CrudParamElementSQLExpr], + kw: Dict[str, Any], +) -> None: + if default_is_sequence(c.default): + if compiler.dialect.supports_sequences and ( + not c.default.optional or not compiler.dialect.sequences_optional + ): + values.append( + ( + c, + compiler.preparer.format_column(c), + c.default.next_value(), + (), + ) + ) + elif default_is_clause_element(c.default): + values.append( + ( + c, + compiler.preparer.format_column(c), + c.default.arg.self_group(), + (), + ) + ) + else: + values.append( + ( + c, + compiler.preparer.format_column(c), + _create_insert_prefetch_bind_param( + compiler, c, process=False, **kw + ), + (c.key,), + ) + ) + + +def _append_param_update( + compiler, compile_state, stmt, c, implicit_return_defaults, values, kw +): + include_table = compile_state.include_table_with_column_exprs + if c.onupdate is not None and not c.onupdate.is_sequence: + if c.onupdate.is_clause_element: + values.append( + ( + c, + compiler.preparer.format_column( + c, + use_table=include_table, + ), + compiler.process(c.onupdate.arg.self_group(), **kw), + (), + ) + ) + if implicit_return_defaults and c in implicit_return_defaults: + compiler.implicit_returning.append(c) + else: + compiler.postfetch.append(c) + else: + values.append( + ( + c, + compiler.preparer.format_column( + c, + use_table=include_table, + ), + _create_update_prefetch_bind_param(compiler, c, **kw), + (c.key,), + ) + ) + elif c.server_onupdate is not None: + if implicit_return_defaults and c in implicit_return_defaults: + compiler.implicit_returning.append(c) + else: + compiler.postfetch.append(c) + elif ( + implicit_return_defaults + and (stmt._return_defaults_columns or not stmt._return_defaults) + and c in implicit_return_defaults + ): + compiler.implicit_returning.append(c) + + +@overload +def _create_insert_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: Literal[True] = ..., + **kw: Any, +) -> str: ... + + +@overload +def _create_insert_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: Literal[False], + **kw: Any, +) -> elements.BindParameter[Any]: ... + + +def _create_insert_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: bool = True, + name: Optional[str] = None, + **kw: Any, +) -> Union[elements.BindParameter[Any], str]: + param = _create_bind_param( + compiler, c, None, process=process, name=name, **kw + ) + compiler.insert_prefetch.append(c) # type: ignore + return param + + +@overload +def _create_update_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: Literal[True] = ..., + **kw: Any, +) -> str: ... + + +@overload +def _create_update_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: Literal[False], + **kw: Any, +) -> elements.BindParameter[Any]: ... + + +def _create_update_prefetch_bind_param( + compiler: SQLCompiler, + c: ColumnElement[Any], + process: bool = True, + name: Optional[str] = None, + **kw: Any, +) -> Union[elements.BindParameter[Any], str]: + param = _create_bind_param( + compiler, c, None, process=process, name=name, **kw + ) + compiler.update_prefetch.append(c) # type: ignore + return param + + +class _multiparam_column(elements.ColumnElement[Any]): + _is_multiparam_column = True + + def __init__(self, original, index): + self.index = index + self.key = "%s_m%d" % (original.key, index + 1) + self.original = original + self.default = original.default + self.type = original.type + + def compare(self, other, **kw): + raise NotImplementedError() + + def _copy_internals(self, **kw): + raise NotImplementedError() + + def __eq__(self, other): + return ( + isinstance(other, _multiparam_column) + and other.key == self.key + and other.original == self.original + ) + + @util.memoized_property + def _default_description_tuple(self) -> _DefaultDescriptionTuple: + """used by default.py -> _process_execute_defaults()""" + + return _DefaultDescriptionTuple._from_column_default(self.default) + + @util.memoized_property + def _onupdate_description_tuple(self) -> _DefaultDescriptionTuple: + """used by default.py -> _process_execute_defaults()""" + + return _DefaultDescriptionTuple._from_column_default(self.onupdate) + + +def _process_multiparam_default_bind( + compiler: SQLCompiler, + stmt: ValuesBase, + c: KeyedColumnElement[Any], + index: int, + kw: Dict[str, Any], +) -> str: + if not c.default: + raise exc.CompileError( + "INSERT value for column %s is explicitly rendered as a bound" + "parameter in the VALUES clause; " + "a Python-side value or SQL expression is required" % c + ) + elif default_is_clause_element(c.default): + return compiler.process(c.default.arg.self_group(), **kw) + elif c.default.is_sequence: + # these conditions would have been established + # by append_param_insert_(?:hasdefault|pk_returning|pk_no_returning) + # in order for us to be here, so these don't need to be + # checked + # assert compiler.dialect.supports_sequences and ( + # not c.default.optional + # or not compiler.dialect.sequences_optional + # ) + return compiler.process(c.default, **kw) + else: + col = _multiparam_column(c, index) + assert isinstance(stmt, dml.Insert) + return _create_insert_prefetch_bind_param( + compiler, col, process=True, **kw + ) + + +def _get_update_multitable_params( + compiler, + stmt, + compile_state, + stmt_parameter_tuples, + check_columns, + _col_bind_name, + _getattr_col_key, + values, + kw, +): + normalized_params = { + coercions.expect(roles.DMLColumnRole, c): param + for c, param in stmt_parameter_tuples or () + } + + include_table = compile_state.include_table_with_column_exprs + + affected_tables = set() + for t in compile_state._extra_froms: + for c in t.c: + if c in normalized_params: + affected_tables.add(t) + check_columns[_getattr_col_key(c)] = c + value = normalized_params[c] + + col_value = compiler.process(c, include_table=include_table) + if coercions._is_literal(value): + value = _create_bind_param( + compiler, + c, + value, + required=value is REQUIRED, + name=_col_bind_name(c), + **kw, # TODO: no test coverage for literal binds here + ) + accumulated_bind_names: Iterable[str] = (c.key,) + elif value._is_bind_parameter: + cbn = _col_bind_name(c) + value = _handle_values_anonymous_param( + compiler, c, value, name=cbn, **kw + ) + accumulated_bind_names = (cbn,) + else: + compiler.postfetch.append(c) + value = compiler.process(value.self_group(), **kw) + accumulated_bind_names = () + values.append((c, col_value, value, accumulated_bind_names)) + # determine tables which are actually to be updated - process onupdate + # and server_onupdate for these + for t in affected_tables: + for c in t.c: + if c in normalized_params: + continue + elif c.onupdate is not None and not c.onupdate.is_sequence: + if c.onupdate.is_clause_element: + values.append( + ( + c, + compiler.process(c, include_table=include_table), + compiler.process( + c.onupdate.arg.self_group(), **kw + ), + (), + ) + ) + compiler.postfetch.append(c) + else: + values.append( + ( + c, + compiler.process(c, include_table=include_table), + _create_update_prefetch_bind_param( + compiler, c, name=_col_bind_name(c), **kw + ), + (c.key,), + ) + ) + elif c.server_onupdate is not None: + compiler.postfetch.append(c) + + +def _extend_values_for_multiparams( + compiler: SQLCompiler, + stmt: ValuesBase, + compile_state: DMLState, + initial_values: Sequence[_CrudParamElementStr], + _column_as_key: Callable[..., str], + kw: Dict[str, Any], +) -> List[Sequence[_CrudParamElementStr]]: + values_0 = initial_values + values = [initial_values] + + has_visiting_cte = kw.get("visiting_cte") is not None + mp = compile_state._multi_parameters + assert mp is not None + for i, row in enumerate(mp[1:]): + extension: List[_CrudParamElementStr] = [] + + row = {_column_as_key(key): v for key, v in row.items()} + + for col, col_expr, param, accumulated_names in values_0: + if col.key in row: + key = col.key + + if coercions._is_literal(row[key]): + new_param = _create_bind_param( + compiler, + col, + row[key], + name=("%s_m%d" % (col.key, i + 1)), + force_anonymous=has_visiting_cte, + **kw, + ) + else: + new_param = compiler.process(row[key].self_group(), **kw) + else: + new_param = _process_multiparam_default_bind( + compiler, stmt, col, i, kw + ) + + extension.append((col, col_expr, new_param, accumulated_names)) + + values.append(extension) + + return values + + +def _get_stmt_parameter_tuples_params( + compiler, + compile_state, + parameters, + stmt_parameter_tuples, + _column_as_key, + values, + kw, +): + for k, v in stmt_parameter_tuples: + colkey = _column_as_key(k) + if colkey is not None: + parameters.setdefault(colkey, v) + else: + # a non-Column expression on the left side; + # add it to values() in an "as-is" state, + # coercing right side to bound param + + # note one of the main use cases for this is array slice + # updates on PostgreSQL, as the left side is also an expression. + + col_expr = compiler.process( + k, include_table=compile_state.include_table_with_column_exprs + ) + + if coercions._is_literal(v): + v = compiler.process( + elements.BindParameter(None, v, type_=k.type), **kw + ) + else: + if v._is_bind_parameter and v.type._isnull: + # either unique parameter, or other bound parameters that + # were passed in directly + # set type to that of the column unconditionally + v = v._with_binary_element_type(k.type) + + v = compiler.process(v.self_group(), **kw) + + # TODO: not sure if accumulated_bind_names applies here + values.append((k, col_expr, v, ())) + + +def _get_returning_modifiers(compiler, stmt, compile_state, toplevel): + """determines RETURNING strategy, if any, for the statement. + + This is where it's determined what we need to fetch from the + INSERT or UPDATE statement after it's invoked. + + """ + + dialect = compiler.dialect + + need_pks = ( + toplevel + and _compile_state_isinsert(compile_state) + and not stmt._inline + and ( + not compiler.for_executemany + or (dialect.insert_executemany_returning and stmt._return_defaults) + ) + and not stmt._returning + # and (not stmt._returning or stmt._return_defaults) + and not compile_state._has_multi_parameters + ) + + # check if we have access to simple cursor.lastrowid. we can use that + # after the INSERT if that's all we need. + postfetch_lastrowid = ( + need_pks + and dialect.postfetch_lastrowid + and stmt.table._autoincrement_column is not None + ) + + # see if we want to add RETURNING to an INSERT in order to get + # primary key columns back. This would be instead of postfetch_lastrowid + # if that's set. + implicit_returning = ( + # statement itself can veto it + need_pks + # the dialect can veto it if it just doesnt support RETURNING + # with INSERT + and dialect.insert_returning + # user-defined implicit_returning on Table can veto it + and compile_state._primary_table.implicit_returning + # the compile_state can veto it (SQlite uses this to disable + # RETURNING for an ON CONFLICT insert, as SQLite does not return + # for rows that were updated, which is wrong) + and compile_state._supports_implicit_returning + and ( + # since we support MariaDB and SQLite which also support lastrowid, + # decide if we should use lastrowid or RETURNING. for insert + # that didnt call return_defaults() and has just one set of + # parameters, we can use lastrowid. this is more "traditional" + # and a lot of weird use cases are supported by it. + # SQLite lastrowid times 3x faster than returning, + # Mariadb lastrowid 2x faster than returning + (not postfetch_lastrowid or dialect.favor_returning_over_lastrowid) + or compile_state._has_multi_parameters + or stmt._return_defaults + ) + ) + if implicit_returning: + postfetch_lastrowid = False + + if _compile_state_isinsert(compile_state): + should_implicit_return_defaults = ( + implicit_returning and stmt._return_defaults + ) + explicit_returning = ( + should_implicit_return_defaults + or stmt._returning + or stmt._supplemental_returning + ) + use_insertmanyvalues = ( + toplevel + and compiler.for_executemany + and dialect.use_insertmanyvalues + and ( + explicit_returning or dialect.use_insertmanyvalues_wo_returning + ) + ) + + use_sentinel_columns = None + if ( + use_insertmanyvalues + and explicit_returning + and stmt._sort_by_parameter_order + ): + use_sentinel_columns = compiler._get_sentinel_column_for_table( + stmt.table + ) + + elif compile_state.isupdate: + should_implicit_return_defaults = ( + stmt._return_defaults + and compile_state._primary_table.implicit_returning + and compile_state._supports_implicit_returning + and dialect.update_returning + ) + use_insertmanyvalues = False + use_sentinel_columns = None + elif compile_state.isdelete: + should_implicit_return_defaults = ( + stmt._return_defaults + and compile_state._primary_table.implicit_returning + and compile_state._supports_implicit_returning + and dialect.delete_returning + ) + use_insertmanyvalues = False + use_sentinel_columns = None + else: + should_implicit_return_defaults = False # pragma: no cover + use_insertmanyvalues = False + use_sentinel_columns = None + + if should_implicit_return_defaults: + if not stmt._return_defaults_columns: + # TODO: this is weird. See #9685 where we have to + # take an extra step to prevent this from happening. why + # would this ever be *all* columns? but if we set to blank, then + # that seems to break things also in the ORM. So we should + # try to clean this up and figure out what return_defaults + # needs to do w/ the ORM etc. here + implicit_return_defaults = set(stmt.table.c) + else: + implicit_return_defaults = set(stmt._return_defaults_columns) + else: + implicit_return_defaults = None + + return ( + need_pks, + implicit_returning or should_implicit_return_defaults, + implicit_return_defaults, + postfetch_lastrowid, + use_insertmanyvalues, + use_sentinel_columns, + ) + + +def _warn_pk_with_no_anticipated_value(c): + msg = ( + "Column '%s.%s' is marked as a member of the " + "primary key for table '%s', " + "but has no Python-side or server-side default generator indicated, " + "nor does it indicate 'autoincrement=True' or 'nullable=True', " + "and no explicit value is passed. " + "Primary key columns typically may not store NULL." + % (c.table.fullname, c.name, c.table.fullname) + ) + if len(c.table.primary_key) > 1: + msg += ( + " Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be " + "indicated explicitly for composite (e.g. multicolumn) primary " + "keys if AUTO_INCREMENT/SERIAL/IDENTITY " + "behavior is expected for one of the columns in the primary key. " + "CREATE TABLE statements are impacted by this change as well on " + "most backends." + ) + util.warn(msg) diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/ddl.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/ddl.py new file mode 100644 index 0000000000000000000000000000000000000000..69287d6f21519e733b8c2ebce09b43ce457cbb39 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/ddl.py @@ -0,0 +1,1442 @@ +# sql/ddl.py +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +""" +Provides the hierarchy of DDL-defining schema items as well as routines +to invoke them for a create/drop call. + +""" +from __future__ import annotations + +import contextlib +import typing +from typing import Any +from typing import Callable +from typing import Generic +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence as typing_Sequence +from typing import Tuple +from typing import TypeVar +from typing import Union + +from . import roles +from .base import _generative +from .base import Executable +from .base import SchemaVisitor +from .elements import ClauseElement +from .. import exc +from .. import util +from ..util import topological +from ..util.typing import Protocol +from ..util.typing import Self + +if typing.TYPE_CHECKING: + from .compiler import Compiled + from .compiler import DDLCompiler + from .elements import BindParameter + from .schema import Column + from .schema import Constraint + from .schema import ForeignKeyConstraint + from .schema import Index + from .schema import SchemaItem + from .schema import Sequence as Sequence # noqa: F401 + from .schema import Table + from .selectable import TableClause + from ..engine.base import Connection + from ..engine.interfaces import CacheStats + from ..engine.interfaces import CompiledCacheType + from ..engine.interfaces import Dialect + from ..engine.interfaces import SchemaTranslateMapType + +_SI = TypeVar("_SI", bound=Union["SchemaItem", str]) + + +class BaseDDLElement(ClauseElement): + """The root of DDL constructs, including those that are sub-elements + within the "create table" and other processes. + + .. versionadded:: 2.0 + + """ + + _hierarchy_supports_caching = False + """disable cache warnings for all _DDLCompiles subclasses. """ + + def _compiler(self, dialect, **kw): + """Return a compiler appropriate for this ClauseElement, given a + Dialect.""" + + return dialect.ddl_compiler(dialect, self, **kw) + + def _compile_w_cache( + self, + dialect: Dialect, + *, + compiled_cache: Optional[CompiledCacheType], + column_keys: List[str], + for_executemany: bool = False, + schema_translate_map: Optional[SchemaTranslateMapType] = None, + **kw: Any, + ) -> Tuple[ + Compiled, Optional[typing_Sequence[BindParameter[Any]]], CacheStats + ]: + raise NotImplementedError() + + +class DDLIfCallable(Protocol): + def __call__( + self, + ddl: BaseDDLElement, + target: Union[SchemaItem, str], + bind: Optional[Connection], + tables: Optional[List[Table]] = None, + state: Optional[Any] = None, + *, + dialect: Dialect, + compiler: Optional[DDLCompiler] = ..., + checkfirst: bool, + ) -> bool: ... + + +class DDLIf(typing.NamedTuple): + dialect: Optional[str] + callable_: Optional[DDLIfCallable] + state: Optional[Any] + + def _should_execute( + self, + ddl: BaseDDLElement, + target: Union[SchemaItem, str], + bind: Optional[Connection], + compiler: Optional[DDLCompiler] = None, + **kw: Any, + ) -> bool: + if bind is not None: + dialect = bind.dialect + elif compiler is not None: + dialect = compiler.dialect + else: + assert False, "compiler or dialect is required" + + if isinstance(self.dialect, str): + if self.dialect != dialect.name: + return False + elif isinstance(self.dialect, (tuple, list, set)): + if dialect.name not in self.dialect: + return False + if self.callable_ is not None and not self.callable_( + ddl, + target, + bind, + state=self.state, + dialect=dialect, + compiler=compiler, + **kw, + ): + return False + + return True + + +class ExecutableDDLElement(roles.DDLRole, Executable, BaseDDLElement): + """Base class for standalone executable DDL expression constructs. + + This class is the base for the general purpose :class:`.DDL` class, + as well as the various create/drop clause constructs such as + :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, + etc. + + .. versionchanged:: 2.0 :class:`.ExecutableDDLElement` is renamed from + :class:`.DDLElement`, which still exists for backwards compatibility. + + :class:`.ExecutableDDLElement` integrates closely with SQLAlchemy events, + introduced in :ref:`event_toplevel`. An instance of one is + itself an event receiving callable:: + + event.listen( + users, + "after_create", + AddConstraint(constraint).execute_if(dialect="postgresql"), + ) + + .. seealso:: + + :class:`.DDL` + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + :ref:`schema_ddl_sequences` + + """ + + _ddl_if: Optional[DDLIf] = None + target: Union[SchemaItem, str, None] = None + + def _execute_on_connection( + self, connection, distilled_params, execution_options + ): + return connection._execute_ddl( + self, distilled_params, execution_options + ) + + @_generative + def against(self, target: SchemaItem) -> Self: + """Return a copy of this :class:`_schema.ExecutableDDLElement` which + will include the given target. + + This essentially applies the given item to the ``.target`` attribute of + the returned :class:`_schema.ExecutableDDLElement` object. This target + is then usable by event handlers and compilation routines in order to + provide services such as tokenization of a DDL string in terms of a + particular :class:`_schema.Table`. + + When a :class:`_schema.ExecutableDDLElement` object is established as + an event handler for the :meth:`_events.DDLEvents.before_create` or + :meth:`_events.DDLEvents.after_create` events, and the event then + occurs for a given target such as a :class:`_schema.Constraint` or + :class:`_schema.Table`, that target is established with a copy of the + :class:`_schema.ExecutableDDLElement` object using this method, which + then proceeds to the :meth:`_schema.ExecutableDDLElement.execute` + method in order to invoke the actual DDL instruction. + + :param target: a :class:`_schema.SchemaItem` that will be the subject + of a DDL operation. + + :return: a copy of this :class:`_schema.ExecutableDDLElement` with the + ``.target`` attribute assigned to the given + :class:`_schema.SchemaItem`. + + .. seealso:: + + :class:`_schema.DDL` - uses tokenization against the "target" when + processing the DDL string. + + """ + self.target = target + return self + + @_generative + def execute_if( + self, + dialect: Optional[str] = None, + callable_: Optional[DDLIfCallable] = None, + state: Optional[Any] = None, + ) -> Self: + r"""Return a callable that will execute this + :class:`_ddl.ExecutableDDLElement` conditionally within an event + handler. + + Used to provide a wrapper for event listening:: + + event.listen( + metadata, + "before_create", + DDL("my_ddl").execute_if(dialect="postgresql"), + ) + + :param dialect: May be a string or tuple of strings. + If a string, it will be compared to the name of the + executing database dialect:: + + DDL("something").execute_if(dialect="postgresql") + + If a tuple, specifies multiple dialect names:: + + DDL("something").execute_if(dialect=("postgresql", "mysql")) + + :param callable\_: A callable, which will be invoked with + three positional arguments as well as optional keyword + arguments: + + :ddl: + This DDL element. + + :target: + The :class:`_schema.Table` or :class:`_schema.MetaData` + object which is the + target of this event. May be None if the DDL is executed + explicitly. + + :bind: + The :class:`_engine.Connection` being used for DDL execution. + May be None if this construct is being created inline within + a table, in which case ``compiler`` will be present. + + :tables: + Optional keyword argument - a list of Table objects which are to + be created/ dropped within a MetaData.create_all() or drop_all() + method call. + + :dialect: keyword argument, but always present - the + :class:`.Dialect` involved in the operation. + + :compiler: keyword argument. Will be ``None`` for an engine + level DDL invocation, but will refer to a :class:`.DDLCompiler` + if this DDL element is being created inline within a table. + + :state: + Optional keyword argument - will be the ``state`` argument + passed to this function. + + :checkfirst: + Keyword argument, will be True if the 'checkfirst' flag was + set during the call to ``create()``, ``create_all()``, + ``drop()``, ``drop_all()``. + + If the callable returns a True value, the DDL statement will be + executed. + + :param state: any value which will be passed to the callable\_ + as the ``state`` keyword argument. + + .. seealso:: + + :meth:`.SchemaItem.ddl_if` + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + """ + self._ddl_if = DDLIf(dialect, callable_, state) + return self + + def _should_execute(self, target, bind, **kw): + if self._ddl_if is None: + return True + else: + return self._ddl_if._should_execute(self, target, bind, **kw) + + def _invoke_with(self, bind): + if self._should_execute(self.target, bind): + return bind.execute(self) + + def __call__(self, target, bind, **kw): + """Execute the DDL as a ddl_listener.""" + + self.against(target)._invoke_with(bind) + + def _generate(self): + s = self.__class__.__new__(self.__class__) + s.__dict__ = self.__dict__.copy() + return s + + +DDLElement = ExecutableDDLElement +""":class:`.DDLElement` is renamed to :class:`.ExecutableDDLElement`.""" + + +class DDL(ExecutableDDLElement): + """A literal DDL statement. + + Specifies literal SQL DDL to be executed by the database. DDL objects + function as DDL event listeners, and can be subscribed to those events + listed in :class:`.DDLEvents`, using either :class:`_schema.Table` or + :class:`_schema.MetaData` objects as targets. + Basic templating support allows + a single DDL instance to handle repetitive tasks for multiple tables. + + Examples:: + + from sqlalchemy import event, DDL + + tbl = Table("users", metadata, Column("uid", Integer)) + event.listen(tbl, "before_create", DDL("DROP TRIGGER users_trigger")) + + spow = DDL("ALTER TABLE %(table)s SET secretpowers TRUE") + event.listen(tbl, "after_create", spow.execute_if(dialect="somedb")) + + drop_spow = DDL("ALTER TABLE users SET secretpowers FALSE") + connection.execute(drop_spow) + + When operating on Table events, the following ``statement`` + string substitutions are available: + + .. sourcecode:: text + + %(table)s - the Table name, with any required quoting applied + %(schema)s - the schema name, with any required quoting applied + %(fullname)s - the Table name including schema, quoted if needed + + The DDL's "context", if any, will be combined with the standard + substitutions noted above. Keys present in the context will override + the standard substitutions. + + """ + + __visit_name__ = "ddl" + + def __init__(self, statement, context=None): + """Create a DDL statement. + + :param statement: + A string or unicode string to be executed. Statements will be + processed with Python's string formatting operator using + a fixed set of string substitutions, as well as additional + substitutions provided by the optional :paramref:`.DDL.context` + parameter. + + A literal '%' in a statement must be escaped as '%%'. + + SQL bind parameters are not available in DDL statements. + + :param context: + Optional dictionary, defaults to None. These values will be + available for use in string substitutions on the DDL statement. + + .. seealso:: + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + """ + + if not isinstance(statement, str): + raise exc.ArgumentError( + "Expected a string or unicode SQL statement, got '%r'" + % statement + ) + + self.statement = statement + self.context = context or {} + + def __repr__(self): + parts = [repr(self.statement)] + if self.context: + parts.append(f"context={self.context}") + + return "<%s@%s; %s>" % ( + type(self).__name__, + id(self), + ", ".join(parts), + ) + + +class _CreateDropBase(ExecutableDDLElement, Generic[_SI]): + """Base class for DDL constructs that represent CREATE and DROP or + equivalents. + + The common theme of _CreateDropBase is a single + ``element`` attribute which refers to the element + to be created or dropped. + + """ + + def __init__(self, element: _SI) -> None: + self.element = self.target = element + self._ddl_if = getattr(element, "_ddl_if", None) + + @property + def stringify_dialect(self): + assert not isinstance(self.element, str) + return self.element.create_drop_stringify_dialect + + def _create_rule_disable(self, compiler): + """Allow disable of _create_rule using a callable. + + Pass to _create_rule using + util.portable_instancemethod(self._create_rule_disable) + to retain serializability. + + """ + return False + + +class _CreateBase(_CreateDropBase[_SI]): + def __init__(self, element: _SI, if_not_exists: bool = False) -> None: + super().__init__(element) + self.if_not_exists = if_not_exists + + +class _DropBase(_CreateDropBase[_SI]): + def __init__(self, element: _SI, if_exists: bool = False) -> None: + super().__init__(element) + self.if_exists = if_exists + + +class CreateSchema(_CreateBase[str]): + """Represent a CREATE SCHEMA statement. + + The argument here is the string name of the schema. + + """ + + __visit_name__ = "create_schema" + + stringify_dialect = "default" + + def __init__( + self, + name: str, + if_not_exists: bool = False, + ) -> None: + """Create a new :class:`.CreateSchema` construct.""" + + super().__init__(element=name, if_not_exists=if_not_exists) + + +class DropSchema(_DropBase[str]): + """Represent a DROP SCHEMA statement. + + The argument here is the string name of the schema. + + """ + + __visit_name__ = "drop_schema" + + stringify_dialect = "default" + + def __init__( + self, + name: str, + cascade: bool = False, + if_exists: bool = False, + ) -> None: + """Create a new :class:`.DropSchema` construct.""" + + super().__init__(element=name, if_exists=if_exists) + self.cascade = cascade + + +class CreateTable(_CreateBase["Table"]): + """Represent a CREATE TABLE statement.""" + + __visit_name__ = "create_table" + + def __init__( + self, + element: Table, + include_foreign_key_constraints: Optional[ + typing_Sequence[ForeignKeyConstraint] + ] = None, + if_not_exists: bool = False, + ) -> None: + """Create a :class:`.CreateTable` construct. + + :param element: a :class:`_schema.Table` that's the subject + of the CREATE + :param on: See the description for 'on' in :class:`.DDL`. + :param include_foreign_key_constraints: optional sequence of + :class:`_schema.ForeignKeyConstraint` objects that will be included + inline within the CREATE construct; if omitted, all foreign key + constraints that do not specify use_alter=True are included. + + :param if_not_exists: if True, an IF NOT EXISTS operator will be + applied to the construct. + + .. versionadded:: 1.4.0b2 + + """ + super().__init__(element, if_not_exists=if_not_exists) + self.columns = [CreateColumn(column) for column in element.columns] + self.include_foreign_key_constraints = include_foreign_key_constraints + + +class _DropView(_DropBase["Table"]): + """Semi-public 'DROP VIEW' construct. + + Used by the test suite for dialect-agnostic drops of views. + This object will eventually be part of a public "view" API. + + """ + + __visit_name__ = "drop_view" + + +class CreateConstraint(BaseDDLElement): + element: Constraint + + def __init__(self, element: Constraint) -> None: + self.element = element + + +class CreateColumn(BaseDDLElement): + """Represent a :class:`_schema.Column` + as rendered in a CREATE TABLE statement, + via the :class:`.CreateTable` construct. + + This is provided to support custom column DDL within the generation + of CREATE TABLE statements, by using the + compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel` + to extend :class:`.CreateColumn`. + + Typical integration is to examine the incoming :class:`_schema.Column` + object, and to redirect compilation if a particular flag or condition + is found:: + + from sqlalchemy import schema + from sqlalchemy.ext.compiler import compiles + + + @compiles(schema.CreateColumn) + def compile(element, compiler, **kw): + column = element.element + + if "special" not in column.info: + return compiler.visit_create_column(element, **kw) + + text = "%s SPECIAL DIRECTIVE %s" % ( + column.name, + compiler.type_compiler.process(column.type), + ) + default = compiler.get_column_default_string(column) + if default is not None: + text += " DEFAULT " + default + + if not column.nullable: + text += " NOT NULL" + + if column.constraints: + text += " ".join( + compiler.process(const) for const in column.constraints + ) + return text + + The above construct can be applied to a :class:`_schema.Table` + as follows:: + + from sqlalchemy import Table, Metadata, Column, Integer, String + from sqlalchemy import schema + + metadata = MetaData() + + table = Table( + "mytable", + MetaData(), + Column("x", Integer, info={"special": True}, primary_key=True), + Column("y", String(50)), + Column("z", String(20), info={"special": True}), + ) + + metadata.create_all(conn) + + Above, the directives we've added to the :attr:`_schema.Column.info` + collection + will be detected by our custom compilation scheme: + + .. sourcecode:: sql + + CREATE TABLE mytable ( + x SPECIAL DIRECTIVE INTEGER NOT NULL, + y VARCHAR(50), + z SPECIAL DIRECTIVE VARCHAR(20), + PRIMARY KEY (x) + ) + + The :class:`.CreateColumn` construct can also be used to skip certain + columns when producing a ``CREATE TABLE``. This is accomplished by + creating a compilation rule that conditionally returns ``None``. + This is essentially how to produce the same effect as using the + ``system=True`` argument on :class:`_schema.Column`, which marks a column + as an implicitly-present "system" column. + + For example, suppose we wish to produce a :class:`_schema.Table` + which skips + rendering of the PostgreSQL ``xmin`` column against the PostgreSQL + backend, but on other backends does render it, in anticipation of a + triggered rule. A conditional compilation rule could skip this name only + on PostgreSQL:: + + from sqlalchemy.schema import CreateColumn + + + @compiles(CreateColumn, "postgresql") + def skip_xmin(element, compiler, **kw): + if element.element.name == "xmin": + return None + else: + return compiler.visit_create_column(element, **kw) + + + my_table = Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("xmin", Integer), + ) + + Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE`` + which only includes the ``id`` column in the string; the ``xmin`` column + will be omitted, but only against the PostgreSQL backend. + + """ + + __visit_name__ = "create_column" + + element: Column[Any] + + def __init__(self, element: Column[Any]) -> None: + self.element = element + + +class DropTable(_DropBase["Table"]): + """Represent a DROP TABLE statement.""" + + __visit_name__ = "drop_table" + + def __init__(self, element: Table, if_exists: bool = False) -> None: + """Create a :class:`.DropTable` construct. + + :param element: a :class:`_schema.Table` that's the subject + of the DROP. + :param on: See the description for 'on' in :class:`.DDL`. + :param if_exists: if True, an IF EXISTS operator will be applied to the + construct. + + .. versionadded:: 1.4.0b2 + + """ + super().__init__(element, if_exists=if_exists) + + +class CreateSequence(_CreateBase["Sequence"]): + """Represent a CREATE SEQUENCE statement.""" + + __visit_name__ = "create_sequence" + + +class DropSequence(_DropBase["Sequence"]): + """Represent a DROP SEQUENCE statement.""" + + __visit_name__ = "drop_sequence" + + +class CreateIndex(_CreateBase["Index"]): + """Represent a CREATE INDEX statement.""" + + __visit_name__ = "create_index" + + def __init__(self, element: Index, if_not_exists: bool = False) -> None: + """Create a :class:`.Createindex` construct. + + :param element: a :class:`_schema.Index` that's the subject + of the CREATE. + :param if_not_exists: if True, an IF NOT EXISTS operator will be + applied to the construct. + + .. versionadded:: 1.4.0b2 + + """ + super().__init__(element, if_not_exists=if_not_exists) + + +class DropIndex(_DropBase["Index"]): + """Represent a DROP INDEX statement.""" + + __visit_name__ = "drop_index" + + def __init__(self, element: Index, if_exists: bool = False) -> None: + """Create a :class:`.DropIndex` construct. + + :param element: a :class:`_schema.Index` that's the subject + of the DROP. + :param if_exists: if True, an IF EXISTS operator will be applied to the + construct. + + .. versionadded:: 1.4.0b2 + + """ + super().__init__(element, if_exists=if_exists) + + +class AddConstraint(_CreateBase["Constraint"]): + """Represent an ALTER TABLE ADD CONSTRAINT statement.""" + + __visit_name__ = "add_constraint" + + def __init__( + self, + element: Constraint, + *, + isolate_from_table: bool = True, + ) -> None: + """Construct a new :class:`.AddConstraint` construct. + + :param element: a :class:`.Constraint` object + + :param isolate_from_table: optional boolean, defaults to True. Has + the effect of the incoming constraint being isolated from being + included in a CREATE TABLE sequence when associated with a + :class:`.Table`. + + .. versionadded:: 2.0.39 - added + :paramref:`.AddConstraint.isolate_from_table`, defaulting + to True. Previously, the behavior of this parameter was implicitly + turned on in all cases. + + """ + super().__init__(element) + + if isolate_from_table: + element._create_rule = util.portable_instancemethod( + self._create_rule_disable + ) + + +class DropConstraint(_DropBase["Constraint"]): + """Represent an ALTER TABLE DROP CONSTRAINT statement.""" + + __visit_name__ = "drop_constraint" + + def __init__( + self, + element: Constraint, + *, + cascade: bool = False, + if_exists: bool = False, + isolate_from_table: bool = True, + **kw: Any, + ) -> None: + """Construct a new :class:`.DropConstraint` construct. + + :param element: a :class:`.Constraint` object + :param cascade: optional boolean, indicates backend-specific + "CASCADE CONSTRAINT" directive should be rendered if available + :param if_exists: optional boolean, indicates backend-specific + "IF EXISTS" directive should be rendered if available + :param isolate_from_table: optional boolean, defaults to True. Has + the effect of the incoming constraint being isolated from being + included in a CREATE TABLE sequence when associated with a + :class:`.Table`. + + .. versionadded:: 2.0.39 - added + :paramref:`.DropConstraint.isolate_from_table`, defaulting + to True. Previously, the behavior of this parameter was implicitly + turned on in all cases. + + """ + self.cascade = cascade + super().__init__(element, if_exists=if_exists, **kw) + + if isolate_from_table: + element._create_rule = util.portable_instancemethod( + self._create_rule_disable + ) + + +class SetTableComment(_CreateDropBase["Table"]): + """Represent a COMMENT ON TABLE IS statement.""" + + __visit_name__ = "set_table_comment" + + +class DropTableComment(_CreateDropBase["Table"]): + """Represent a COMMENT ON TABLE '' statement. + + Note this varies a lot across database backends. + + """ + + __visit_name__ = "drop_table_comment" + + +class SetColumnComment(_CreateDropBase["Column[Any]"]): + """Represent a COMMENT ON COLUMN IS statement.""" + + __visit_name__ = "set_column_comment" + + +class DropColumnComment(_CreateDropBase["Column[Any]"]): + """Represent a COMMENT ON COLUMN IS NULL statement.""" + + __visit_name__ = "drop_column_comment" + + +class SetConstraintComment(_CreateDropBase["Constraint"]): + """Represent a COMMENT ON CONSTRAINT IS statement.""" + + __visit_name__ = "set_constraint_comment" + + +class DropConstraintComment(_CreateDropBase["Constraint"]): + """Represent a COMMENT ON CONSTRAINT IS NULL statement.""" + + __visit_name__ = "drop_constraint_comment" + + +class InvokeDDLBase(SchemaVisitor): + def __init__(self, connection, **kw): + self.connection = connection + assert not kw, f"Unexpected keywords: {kw.keys()}" + + @contextlib.contextmanager + def with_ddl_events(self, target, **kw): + """helper context manager that will apply appropriate DDL events + to a CREATE or DROP operation.""" + + raise NotImplementedError() + + +class InvokeCreateDDLBase(InvokeDDLBase): + @contextlib.contextmanager + def with_ddl_events(self, target, **kw): + """helper context manager that will apply appropriate DDL events + to a CREATE or DROP operation.""" + + target.dispatch.before_create( + target, self.connection, _ddl_runner=self, **kw + ) + yield + target.dispatch.after_create( + target, self.connection, _ddl_runner=self, **kw + ) + + +class InvokeDropDDLBase(InvokeDDLBase): + @contextlib.contextmanager + def with_ddl_events(self, target, **kw): + """helper context manager that will apply appropriate DDL events + to a CREATE or DROP operation.""" + + target.dispatch.before_drop( + target, self.connection, _ddl_runner=self, **kw + ) + yield + target.dispatch.after_drop( + target, self.connection, _ddl_runner=self, **kw + ) + + +class SchemaGenerator(InvokeCreateDDLBase): + def __init__( + self, dialect, connection, checkfirst=False, tables=None, **kwargs + ): + super().__init__(connection, **kwargs) + self.checkfirst = checkfirst + self.tables = tables + self.preparer = dialect.identifier_preparer + self.dialect = dialect + self.memo = {} + + def _can_create_table(self, table): + self.dialect.validate_identifier(table.name) + effective_schema = self.connection.schema_for_object(table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or not self.dialect.has_table( + self.connection, table.name, schema=effective_schema + ) + + def _can_create_index(self, index): + effective_schema = self.connection.schema_for_object(index.table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or not self.dialect.has_index( + self.connection, + index.table.name, + index.name, + schema=effective_schema, + ) + + def _can_create_sequence(self, sequence): + effective_schema = self.connection.schema_for_object(sequence) + + return self.dialect.supports_sequences and ( + (not self.dialect.sequences_optional or not sequence.optional) + and ( + not self.checkfirst + or not self.dialect.has_sequence( + self.connection, sequence.name, schema=effective_schema + ) + ) + ) + + def visit_metadata(self, metadata): + if self.tables is not None: + tables = self.tables + else: + tables = list(metadata.tables.values()) + + collection = sort_tables_and_constraints( + [t for t in tables if self._can_create_table(t)] + ) + + seq_coll = [ + s + for s in metadata._sequences.values() + if s.column is None and self._can_create_sequence(s) + ] + + event_collection = [t for (t, fks) in collection if t is not None] + + with self.with_ddl_events( + metadata, + tables=event_collection, + checkfirst=self.checkfirst, + ): + for seq in seq_coll: + self.traverse_single(seq, create_ok=True) + + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, + create_ok=True, + include_foreign_key_constraints=fkcs, + _is_metadata_operation=True, + ) + else: + for fkc in fkcs: + self.traverse_single(fkc) + + def visit_table( + self, + table, + create_ok=False, + include_foreign_key_constraints=None, + _is_metadata_operation=False, + ): + if not create_ok and not self._can_create_table(table): + return + + with self.with_ddl_events( + table, + checkfirst=self.checkfirst, + _is_metadata_operation=_is_metadata_operation, + ): + for column in table.columns: + if column.default is not None: + self.traverse_single(column.default) + + if not self.dialect.supports_alter: + # e.g., don't omit any foreign key constraints + include_foreign_key_constraints = None + + CreateTable( + table, + include_foreign_key_constraints=( + include_foreign_key_constraints + ), + )._invoke_with(self.connection) + + if hasattr(table, "indexes"): + for index in table.indexes: + self.traverse_single(index, create_ok=True) + + if ( + self.dialect.supports_comments + and not self.dialect.inline_comments + ): + if table.comment is not None: + SetTableComment(table)._invoke_with(self.connection) + + for column in table.columns: + if column.comment is not None: + SetColumnComment(column)._invoke_with(self.connection) + + if self.dialect.supports_constraint_comments: + for constraint in table.constraints: + if constraint.comment is not None: + self.connection.execute( + SetConstraintComment(constraint) + ) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + + with self.with_ddl_events(constraint): + AddConstraint(constraint)._invoke_with(self.connection) + + def visit_sequence(self, sequence, create_ok=False): + if not create_ok and not self._can_create_sequence(sequence): + return + with self.with_ddl_events(sequence): + CreateSequence(sequence)._invoke_with(self.connection) + + def visit_index(self, index, create_ok=False): + if not create_ok and not self._can_create_index(index): + return + with self.with_ddl_events(index): + CreateIndex(index)._invoke_with(self.connection) + + +class SchemaDropper(InvokeDropDDLBase): + def __init__( + self, dialect, connection, checkfirst=False, tables=None, **kwargs + ): + super().__init__(connection, **kwargs) + self.checkfirst = checkfirst + self.tables = tables + self.preparer = dialect.identifier_preparer + self.dialect = dialect + self.memo = {} + + def visit_metadata(self, metadata): + if self.tables is not None: + tables = self.tables + else: + tables = list(metadata.tables.values()) + + try: + unsorted_tables = [t for t in tables if self._can_drop_table(t)] + collection = list( + reversed( + sort_tables_and_constraints( + unsorted_tables, + filter_fn=lambda constraint: ( + False + if not self.dialect.supports_alter + or constraint.name is None + else None + ), + ) + ) + ) + except exc.CircularDependencyError as err2: + if not self.dialect.supports_alter: + util.warn( + "Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s; and backend does " + "not support ALTER. To restore at least a partial sort, " + "apply use_alter=True to ForeignKey and " + "ForeignKeyConstraint " + "objects involved in the cycle to mark these as known " + "cycles that will be ignored." + % (", ".join(sorted([t.fullname for t in err2.cycles]))) + ) + collection = [(t, ()) for t in unsorted_tables] + else: + raise exc.CircularDependencyError( + err2.args[0], + err2.cycles, + err2.edges, + msg="Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s. Please ensure " + "that the ForeignKey and ForeignKeyConstraint objects " + "involved in the cycle have " + "names so that they can be dropped using " + "DROP CONSTRAINT." + % (", ".join(sorted([t.fullname for t in err2.cycles]))), + ) from err2 + + seq_coll = [ + s + for s in metadata._sequences.values() + if self._can_drop_sequence(s) + ] + + event_collection = [t for (t, fks) in collection if t is not None] + + with self.with_ddl_events( + metadata, + tables=event_collection, + checkfirst=self.checkfirst, + ): + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, + drop_ok=True, + _is_metadata_operation=True, + _ignore_sequences=seq_coll, + ) + else: + for fkc in fkcs: + self.traverse_single(fkc) + + for seq in seq_coll: + self.traverse_single(seq, drop_ok=seq.column is None) + + def _can_drop_table(self, table): + self.dialect.validate_identifier(table.name) + effective_schema = self.connection.schema_for_object(table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or self.dialect.has_table( + self.connection, table.name, schema=effective_schema + ) + + def _can_drop_index(self, index): + effective_schema = self.connection.schema_for_object(index.table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or self.dialect.has_index( + self.connection, + index.table.name, + index.name, + schema=effective_schema, + ) + + def _can_drop_sequence(self, sequence): + effective_schema = self.connection.schema_for_object(sequence) + return self.dialect.supports_sequences and ( + (not self.dialect.sequences_optional or not sequence.optional) + and ( + not self.checkfirst + or self.dialect.has_sequence( + self.connection, sequence.name, schema=effective_schema + ) + ) + ) + + def visit_index(self, index, drop_ok=False): + if not drop_ok and not self._can_drop_index(index): + return + + with self.with_ddl_events(index): + DropIndex(index)(index, self.connection) + + def visit_table( + self, + table, + drop_ok=False, + _is_metadata_operation=False, + _ignore_sequences=(), + ): + if not drop_ok and not self._can_drop_table(table): + return + + with self.with_ddl_events( + table, + checkfirst=self.checkfirst, + _is_metadata_operation=_is_metadata_operation, + ): + DropTable(table)._invoke_with(self.connection) + + # traverse client side defaults which may refer to server-side + # sequences. noting that some of these client side defaults may + # also be set up as server side defaults + # (see https://docs.sqlalchemy.org/en/ + # latest/core/defaults.html + # #associating-a-sequence-as-the-server-side- + # default), so have to be dropped after the table is dropped. + for column in table.columns: + if ( + column.default is not None + and column.default not in _ignore_sequences + ): + self.traverse_single(column.default) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + with self.with_ddl_events(constraint): + DropConstraint(constraint)._invoke_with(self.connection) + + def visit_sequence(self, sequence, drop_ok=False): + if not drop_ok and not self._can_drop_sequence(sequence): + return + with self.with_ddl_events(sequence): + DropSequence(sequence)._invoke_with(self.connection) + + +def sort_tables( + tables: Iterable[TableClause], + skip_fn: Optional[Callable[[ForeignKeyConstraint], bool]] = None, + extra_dependencies: Optional[ + typing_Sequence[Tuple[TableClause, TableClause]] + ] = None, +) -> List[Table]: + """Sort a collection of :class:`_schema.Table` objects based on + dependency. + + This is a dependency-ordered sort which will emit :class:`_schema.Table` + objects such that they will follow their dependent :class:`_schema.Table` + objects. + Tables are dependent on another based on the presence of + :class:`_schema.ForeignKeyConstraint` + objects as well as explicit dependencies + added by :meth:`_schema.Table.add_is_dependent_on`. + + .. warning:: + + The :func:`._schema.sort_tables` function cannot by itself + accommodate automatic resolution of dependency cycles between + tables, which are usually caused by mutually dependent foreign key + constraints. When these cycles are detected, the foreign keys + of these tables are omitted from consideration in the sort. + A warning is emitted when this condition occurs, which will be an + exception raise in a future release. Tables which are not part + of the cycle will still be returned in dependency order. + + To resolve these cycles, the + :paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be + applied to those constraints which create a cycle. Alternatively, + the :func:`_schema.sort_tables_and_constraints` function will + automatically return foreign key constraints in a separate + collection when cycles are detected so that they may be applied + to a schema separately. + + .. versionchanged:: 1.3.17 - a warning is emitted when + :func:`_schema.sort_tables` cannot perform a proper sort due to + cyclical dependencies. This will be an exception in a future + release. Additionally, the sort will continue to return + other tables not involved in the cycle in dependency order + which was not the case previously. + + :param tables: a sequence of :class:`_schema.Table` objects. + + :param skip_fn: optional callable which will be passed a + :class:`_schema.ForeignKeyConstraint` object; if it returns True, this + constraint will not be considered as a dependency. Note this is + **different** from the same parameter in + :func:`.sort_tables_and_constraints`, which is + instead passed the owning :class:`_schema.ForeignKeyConstraint` object. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. seealso:: + + :func:`.sort_tables_and_constraints` + + :attr:`_schema.MetaData.sorted_tables` - uses this function to sort + + + """ + + if skip_fn is not None: + fixed_skip_fn = skip_fn + + def _skip_fn(fkc): + for fk in fkc.elements: + if fixed_skip_fn(fk): + return True + else: + return None + + else: + _skip_fn = None # type: ignore + + return [ + t + for (t, fkcs) in sort_tables_and_constraints( + tables, + filter_fn=_skip_fn, + extra_dependencies=extra_dependencies, + _warn_for_cycles=True, + ) + if t is not None + ] + + +def sort_tables_and_constraints( + tables, filter_fn=None, extra_dependencies=None, _warn_for_cycles=False +): + """Sort a collection of :class:`_schema.Table` / + :class:`_schema.ForeignKeyConstraint` + objects. + + This is a dependency-ordered sort which will emit tuples of + ``(Table, [ForeignKeyConstraint, ...])`` such that each + :class:`_schema.Table` follows its dependent :class:`_schema.Table` + objects. + Remaining :class:`_schema.ForeignKeyConstraint` + objects that are separate due to + dependency rules not satisfied by the sort are emitted afterwards + as ``(None, [ForeignKeyConstraint ...])``. + + Tables are dependent on another based on the presence of + :class:`_schema.ForeignKeyConstraint` objects, explicit dependencies + added by :meth:`_schema.Table.add_is_dependent_on`, + as well as dependencies + stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn` + and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies` + parameters. + + :param tables: a sequence of :class:`_schema.Table` objects. + + :param filter_fn: optional callable which will be passed a + :class:`_schema.ForeignKeyConstraint` object, + and returns a value based on + whether this constraint should definitely be included or excluded as + an inline constraint, or neither. If it returns False, the constraint + will definitely be included as a dependency that cannot be subject + to ALTER; if True, it will **only** be included as an ALTER result at + the end. Returning None means the constraint is included in the + table-based result unless it is detected as part of a dependency cycle. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. seealso:: + + :func:`.sort_tables` + + + """ + + fixed_dependencies = set() + mutable_dependencies = set() + + if extra_dependencies is not None: + fixed_dependencies.update(extra_dependencies) + + remaining_fkcs = set() + for table in tables: + for fkc in table.foreign_key_constraints: + if fkc.use_alter is True: + remaining_fkcs.add(fkc) + continue + + if filter_fn: + filtered = filter_fn(fkc) + + if filtered is True: + remaining_fkcs.add(fkc) + continue + + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.add((dependent_on, table)) + + fixed_dependencies.update( + (parent, table) for parent in table._extra_dependencies + ) + + try: + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), + tables, + ) + ) + except exc.CircularDependencyError as err: + if _warn_for_cycles: + util.warn( + "Cannot correctly sort tables; there are unresolvable cycles " + 'between tables "%s", which is usually caused by mutually ' + "dependent foreign key constraints. Foreign key constraints " + "involving these tables will not be considered; this warning " + "may raise an error in a future release." + % (", ".join(sorted(t.fullname for t in err.cycles)),) + ) + for edge in err.edges: + if edge in mutable_dependencies: + table = edge[1] + if table not in err.cycles: + continue + can_remove = [ + fkc + for fkc in table.foreign_key_constraints + if filter_fn is None or filter_fn(fkc) is not False + ] + remaining_fkcs.update(can_remove) + for fkc in can_remove: + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.discard((dependent_on, table)) + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), + tables, + ) + ) + + return [ + (table, table.foreign_key_constraints.difference(remaining_fkcs)) + for table in candidate_sort + ] + [(None, list(remaining_fkcs))] diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/default_comparator.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/default_comparator.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa5dafe9ce4875e58fe79fedfc34cc34e3157fe --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/default_comparator.py @@ -0,0 +1,552 @@ +# sql/default_comparator.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + +"""Default implementation of SQL comparison operations. +""" + +from __future__ import annotations + +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import NoReturn +from typing import Optional +from typing import Tuple +from typing import Type +from typing import Union + +from . import coercions +from . import operators +from . import roles +from . import type_api +from .elements import and_ +from .elements import BinaryExpression +from .elements import ClauseElement +from .elements import CollationClause +from .elements import CollectionAggregate +from .elements import ExpressionClauseList +from .elements import False_ +from .elements import Null +from .elements import OperatorExpression +from .elements import or_ +from .elements import True_ +from .elements import UnaryExpression +from .operators import OperatorType +from .. import exc +from .. import util + +_T = typing.TypeVar("_T", bound=Any) + +if typing.TYPE_CHECKING: + from .elements import ColumnElement + from .operators import custom_op + from .type_api import TypeEngine + + +def _boolean_compare( + expr: ColumnElement[Any], + op: OperatorType, + obj: Any, + *, + negate_op: Optional[OperatorType] = None, + reverse: bool = False, + _python_is_types: Tuple[Type[Any], ...] = (type(None), bool), + result_type: Optional[TypeEngine[bool]] = None, + **kwargs: Any, +) -> OperatorExpression[bool]: + if result_type is None: + result_type = type_api.BOOLEANTYPE + + if isinstance(obj, _python_is_types + (Null, True_, False_)): + # allow x ==/!= True/False to be treated as a literal. + # this comes out to "== / != true/false" or "1/0" if those + # constants aren't supported and works on all platforms + if op in (operators.eq, operators.ne) and isinstance( + obj, (bool, True_, False_) + ): + return OperatorExpression._construct_for_op( + expr, + coercions.expect(roles.ConstExprRole, obj), + op, + type_=result_type, + negate=negate_op, + modifiers=kwargs, + ) + elif op in ( + operators.is_distinct_from, + operators.is_not_distinct_from, + ): + return OperatorExpression._construct_for_op( + expr, + coercions.expect(roles.ConstExprRole, obj), + op, + type_=result_type, + negate=negate_op, + modifiers=kwargs, + ) + elif expr._is_collection_aggregate: + obj = coercions.expect( + roles.ConstExprRole, element=obj, operator=op, expr=expr + ) + else: + # all other None uses IS, IS NOT + if op in (operators.eq, operators.is_): + return OperatorExpression._construct_for_op( + expr, + coercions.expect(roles.ConstExprRole, obj), + operators.is_, + negate=operators.is_not, + type_=result_type, + ) + elif op in (operators.ne, operators.is_not): + return OperatorExpression._construct_for_op( + expr, + coercions.expect(roles.ConstExprRole, obj), + operators.is_not, + negate=operators.is_, + type_=result_type, + ) + else: + raise exc.ArgumentError( + "Only '=', '!=', 'is_()', 'is_not()', " + "'is_distinct_from()', 'is_not_distinct_from()' " + "operators can be used with None/True/False" + ) + else: + obj = coercions.expect( + roles.BinaryElementRole, element=obj, operator=op, expr=expr + ) + + if reverse: + return OperatorExpression._construct_for_op( + obj, + expr, + op, + type_=result_type, + negate=negate_op, + modifiers=kwargs, + ) + else: + return OperatorExpression._construct_for_op( + expr, + obj, + op, + type_=result_type, + negate=negate_op, + modifiers=kwargs, + ) + + +def _custom_op_operate( + expr: ColumnElement[Any], + op: custom_op[Any], + obj: Any, + reverse: bool = False, + result_type: Optional[TypeEngine[Any]] = None, + **kw: Any, +) -> ColumnElement[Any]: + if result_type is None: + if op.return_type: + result_type = op.return_type + elif op.is_comparison: + result_type = type_api.BOOLEANTYPE + + return _binary_operate( + expr, op, obj, reverse=reverse, result_type=result_type, **kw + ) + + +def _binary_operate( + expr: ColumnElement[Any], + op: OperatorType, + obj: roles.BinaryElementRole[Any], + *, + reverse: bool = False, + result_type: Optional[TypeEngine[_T]] = None, + **kw: Any, +) -> OperatorExpression[_T]: + coerced_obj = coercions.expect( + roles.BinaryElementRole, obj, expr=expr, operator=op + ) + + if reverse: + left, right = coerced_obj, expr + else: + left, right = expr, coerced_obj + + if result_type is None: + op, result_type = left.comparator._adapt_expression( + op, right.comparator + ) + + return OperatorExpression._construct_for_op( + left, right, op, type_=result_type, modifiers=kw + ) + + +def _conjunction_operate( + expr: ColumnElement[Any], op: OperatorType, other: Any, **kw: Any +) -> ColumnElement[Any]: + if op is operators.and_: + return and_(expr, other) + elif op is operators.or_: + return or_(expr, other) + else: + raise NotImplementedError() + + +def _scalar( + expr: ColumnElement[Any], + op: OperatorType, + fn: Callable[[ColumnElement[Any]], ColumnElement[Any]], + **kw: Any, +) -> ColumnElement[Any]: + return fn(expr) + + +def _in_impl( + expr: ColumnElement[Any], + op: OperatorType, + seq_or_selectable: ClauseElement, + negate_op: OperatorType, + **kw: Any, +) -> ColumnElement[Any]: + seq_or_selectable = coercions.expect( + roles.InElementRole, seq_or_selectable, expr=expr, operator=op + ) + if "in_ops" in seq_or_selectable._annotations: + op, negate_op = seq_or_selectable._annotations["in_ops"] + + return _boolean_compare( + expr, op, seq_or_selectable, negate_op=negate_op, **kw + ) + + +def _getitem_impl( + expr: ColumnElement[Any], op: OperatorType, other: Any, **kw: Any +) -> ColumnElement[Any]: + if ( + isinstance(expr.type, type_api.INDEXABLE) + or isinstance(expr.type, type_api.TypeDecorator) + and isinstance(expr.type.impl_instance, type_api.INDEXABLE) + ): + other = coercions.expect( + roles.BinaryElementRole, other, expr=expr, operator=op + ) + return _binary_operate(expr, op, other, **kw) + else: + _unsupported_impl(expr, op, other, **kw) + + +def _unsupported_impl( + expr: ColumnElement[Any], op: OperatorType, *arg: Any, **kw: Any +) -> NoReturn: + raise NotImplementedError( + "Operator '%s' is not supported on this expression" % op.__name__ + ) + + +def _inv_impl( + expr: ColumnElement[Any], op: OperatorType, **kw: Any +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.__inv__`.""" + + # undocumented element currently used by the ORM for + # relationship.contains() + if hasattr(expr, "negation_clause"): + return expr.negation_clause + else: + return expr._negate() + + +def _neg_impl( + expr: ColumnElement[Any], op: OperatorType, **kw: Any +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.__neg__`.""" + return UnaryExpression(expr, operator=operators.neg, type_=expr.type) + + +def _bitwise_not_impl( + expr: ColumnElement[Any], op: OperatorType, **kw: Any +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.bitwise_not`.""" + + return UnaryExpression( + expr, operator=operators.bitwise_not_op, type_=expr.type + ) + + +def _match_impl( + expr: ColumnElement[Any], op: OperatorType, other: Any, **kw: Any +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.match`.""" + + return _boolean_compare( + expr, + operators.match_op, + coercions.expect( + roles.BinaryElementRole, + other, + expr=expr, + operator=operators.match_op, + ), + result_type=type_api.MATCHTYPE, + negate_op=( + operators.not_match_op + if op is operators.match_op + else operators.match_op + ), + **kw, + ) + + +def _distinct_impl( + expr: ColumnElement[Any], op: OperatorType, **kw: Any +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.distinct`.""" + return UnaryExpression( + expr, operator=operators.distinct_op, type_=expr.type + ) + + +def _between_impl( + expr: ColumnElement[Any], + op: OperatorType, + cleft: Any, + cright: Any, + **kw: Any, +) -> ColumnElement[Any]: + """See :meth:`.ColumnOperators.between`.""" + return BinaryExpression( + expr, + ExpressionClauseList._construct_for_list( + operators.and_, + type_api.NULLTYPE, + coercions.expect( + roles.BinaryElementRole, + cleft, + expr=expr, + operator=operators.and_, + ), + coercions.expect( + roles.BinaryElementRole, + cright, + expr=expr, + operator=operators.and_, + ), + group=False, + ), + op, + negate=( + operators.not_between_op + if op is operators.between_op + else operators.between_op + ), + modifiers=kw, + ) + + +def _collate_impl( + expr: ColumnElement[str], op: OperatorType, collation: str, **kw: Any +) -> ColumnElement[str]: + return CollationClause._create_collation_expression(expr, collation) + + +def _regexp_match_impl( + expr: ColumnElement[str], + op: OperatorType, + pattern: Any, + flags: Optional[str], + **kw: Any, +) -> ColumnElement[Any]: + return BinaryExpression( + expr, + coercions.expect( + roles.BinaryElementRole, + pattern, + expr=expr, + operator=operators.comma_op, + ), + op, + negate=operators.not_regexp_match_op, + modifiers={"flags": flags}, + ) + + +def _regexp_replace_impl( + expr: ColumnElement[Any], + op: OperatorType, + pattern: Any, + replacement: Any, + flags: Optional[str], + **kw: Any, +) -> ColumnElement[Any]: + return BinaryExpression( + expr, + ExpressionClauseList._construct_for_list( + operators.comma_op, + type_api.NULLTYPE, + coercions.expect( + roles.BinaryElementRole, + pattern, + expr=expr, + operator=operators.comma_op, + ), + coercions.expect( + roles.BinaryElementRole, + replacement, + expr=expr, + operator=operators.comma_op, + ), + group=False, + ), + op, + modifiers={"flags": flags}, + ) + + +# a mapping of operators with the method they use, along with +# additional keyword arguments to be passed +operator_lookup: Dict[ + str, + Tuple[ + Callable[..., ColumnElement[Any]], + util.immutabledict[ + str, Union[OperatorType, Callable[..., ColumnElement[Any]]] + ], + ], +] = { + "and_": (_conjunction_operate, util.EMPTY_DICT), + "or_": (_conjunction_operate, util.EMPTY_DICT), + "inv": (_inv_impl, util.EMPTY_DICT), + "add": (_binary_operate, util.EMPTY_DICT), + "mul": (_binary_operate, util.EMPTY_DICT), + "sub": (_binary_operate, util.EMPTY_DICT), + "div": (_binary_operate, util.EMPTY_DICT), + "mod": (_binary_operate, util.EMPTY_DICT), + "bitwise_xor_op": (_binary_operate, util.EMPTY_DICT), + "bitwise_or_op": (_binary_operate, util.EMPTY_DICT), + "bitwise_and_op": (_binary_operate, util.EMPTY_DICT), + "bitwise_not_op": (_bitwise_not_impl, util.EMPTY_DICT), + "bitwise_lshift_op": (_binary_operate, util.EMPTY_DICT), + "bitwise_rshift_op": (_binary_operate, util.EMPTY_DICT), + "truediv": (_binary_operate, util.EMPTY_DICT), + "floordiv": (_binary_operate, util.EMPTY_DICT), + "custom_op": (_custom_op_operate, util.EMPTY_DICT), + "json_path_getitem_op": (_binary_operate, util.EMPTY_DICT), + "json_getitem_op": (_binary_operate, util.EMPTY_DICT), + "concat_op": (_binary_operate, util.EMPTY_DICT), + "any_op": ( + _scalar, + util.immutabledict({"fn": CollectionAggregate._create_any}), + ), + "all_op": ( + _scalar, + util.immutabledict({"fn": CollectionAggregate._create_all}), + ), + "lt": (_boolean_compare, util.immutabledict({"negate_op": operators.ge})), + "le": (_boolean_compare, util.immutabledict({"negate_op": operators.gt})), + "ne": (_boolean_compare, util.immutabledict({"negate_op": operators.eq})), + "gt": (_boolean_compare, util.immutabledict({"negate_op": operators.le})), + "ge": (_boolean_compare, util.immutabledict({"negate_op": operators.lt})), + "eq": (_boolean_compare, util.immutabledict({"negate_op": operators.ne})), + "is_distinct_from": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.is_not_distinct_from}), + ), + "is_not_distinct_from": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.is_distinct_from}), + ), + "like_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_like_op}), + ), + "ilike_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_ilike_op}), + ), + "not_like_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.like_op}), + ), + "not_ilike_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.ilike_op}), + ), + "contains_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_contains_op}), + ), + "icontains_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_icontains_op}), + ), + "startswith_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_startswith_op}), + ), + "istartswith_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_istartswith_op}), + ), + "endswith_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_endswith_op}), + ), + "iendswith_op": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.not_iendswith_op}), + ), + "desc_op": ( + _scalar, + util.immutabledict({"fn": UnaryExpression._create_desc}), + ), + "asc_op": ( + _scalar, + util.immutabledict({"fn": UnaryExpression._create_asc}), + ), + "nulls_first_op": ( + _scalar, + util.immutabledict({"fn": UnaryExpression._create_nulls_first}), + ), + "nulls_last_op": ( + _scalar, + util.immutabledict({"fn": UnaryExpression._create_nulls_last}), + ), + "in_op": ( + _in_impl, + util.immutabledict({"negate_op": operators.not_in_op}), + ), + "not_in_op": ( + _in_impl, + util.immutabledict({"negate_op": operators.in_op}), + ), + "is_": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.is_}), + ), + "is_not": ( + _boolean_compare, + util.immutabledict({"negate_op": operators.is_not}), + ), + "collate": (_collate_impl, util.EMPTY_DICT), + "match_op": (_match_impl, util.EMPTY_DICT), + "not_match_op": (_match_impl, util.EMPTY_DICT), + "distinct_op": (_distinct_impl, util.EMPTY_DICT), + "between_op": (_between_impl, util.EMPTY_DICT), + "not_between_op": (_between_impl, util.EMPTY_DICT), + "neg": (_neg_impl, util.EMPTY_DICT), + "getitem": (_getitem_impl, util.EMPTY_DICT), + "lshift": (_unsupported_impl, util.EMPTY_DICT), + "rshift": (_unsupported_impl, util.EMPTY_DICT), + "contains": (_unsupported_impl, util.EMPTY_DICT), + "regexp_match_op": (_regexp_match_impl, util.EMPTY_DICT), + "not_regexp_match_op": (_regexp_match_impl, util.EMPTY_DICT), + "regexp_replace_op": (_regexp_replace_impl, util.EMPTY_DICT), +} diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/dml.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/dml.py new file mode 100644 index 0000000000000000000000000000000000000000..f5071146be2464e2def98f73f4372c8a35ce2999 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/dml.py @@ -0,0 +1,1837 @@ +# sql/dml.py +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +""" +Provide :class:`_expression.Insert`, :class:`_expression.Update` and +:class:`_expression.Delete`. + +""" +from __future__ import annotations + +import collections.abc as collections_abc +import operator +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import List +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import coercions +from . import roles +from . import util as sql_util +from ._typing import _TP +from ._typing import _unexpected_kw +from ._typing import is_column_element +from ._typing import is_named_from_clause +from .base import _entity_namespace_key +from .base import _exclusive_against +from .base import _from_objects +from .base import _generative +from .base import _select_iterables +from .base import ColumnCollection +from .base import ColumnSet +from .base import CompileState +from .base import DialectKWArgs +from .base import Executable +from .base import Generative +from .base import HasCompileState +from .elements import BooleanClauseList +from .elements import ClauseElement +from .elements import ColumnClause +from .elements import ColumnElement +from .elements import Null +from .selectable import Alias +from .selectable import ExecutableReturnsRows +from .selectable import FromClause +from .selectable import HasCTE +from .selectable import HasPrefixes +from .selectable import Join +from .selectable import SelectLabelStyle +from .selectable import TableClause +from .selectable import TypedReturnsRows +from .sqltypes import NullType +from .visitors import InternalTraversal +from .. import exc +from .. import util +from ..util.typing import Self +from ..util.typing import TypeGuard + +if TYPE_CHECKING: + from ._typing import _ColumnExpressionArgument + from ._typing import _ColumnsClauseArgument + from ._typing import _DMLColumnArgument + from ._typing import _DMLColumnKeyMapping + from ._typing import _DMLTableArgument + from ._typing import _T0 # noqa + from ._typing import _T1 # noqa + from ._typing import _T2 # noqa + from ._typing import _T3 # noqa + from ._typing import _T4 # noqa + from ._typing import _T5 # noqa + from ._typing import _T6 # noqa + from ._typing import _T7 # noqa + from ._typing import _TypedColumnClauseArgument as _TCCA # noqa + from .base import ReadOnlyColumnCollection + from .compiler import SQLCompiler + from .elements import KeyedColumnElement + from .selectable import _ColumnsClauseElement + from .selectable import _SelectIterable + from .selectable import Select + from .selectable import Selectable + + def isupdate(dml: DMLState) -> TypeGuard[UpdateDMLState]: ... + + def isdelete(dml: DMLState) -> TypeGuard[DeleteDMLState]: ... + + def isinsert(dml: DMLState) -> TypeGuard[InsertDMLState]: ... + +else: + isupdate = operator.attrgetter("isupdate") + isdelete = operator.attrgetter("isdelete") + isinsert = operator.attrgetter("isinsert") + + +_T = TypeVar("_T", bound=Any) + +_DMLColumnElement = Union[str, ColumnClause[Any]] +_DMLTableElement = Union[TableClause, Alias, Join] + + +class DMLState(CompileState): + _no_parameters = True + _dict_parameters: Optional[MutableMapping[_DMLColumnElement, Any]] = None + _multi_parameters: Optional[ + List[MutableMapping[_DMLColumnElement, Any]] + ] = None + _ordered_values: Optional[List[Tuple[_DMLColumnElement, Any]]] = None + _parameter_ordering: Optional[List[_DMLColumnElement]] = None + _primary_table: FromClause + _supports_implicit_returning = True + + isupdate = False + isdelete = False + isinsert = False + + statement: UpdateBase + + def __init__( + self, statement: UpdateBase, compiler: SQLCompiler, **kw: Any + ): + raise NotImplementedError() + + @classmethod + def get_entity_description(cls, statement: UpdateBase) -> Dict[str, Any]: + return { + "name": ( + statement.table.name + if is_named_from_clause(statement.table) + else None + ), + "table": statement.table, + } + + @classmethod + def get_returning_column_descriptions( + cls, statement: UpdateBase + ) -> List[Dict[str, Any]]: + return [ + { + "name": c.key, + "type": c.type, + "expr": c, + } + for c in statement._all_selected_columns + ] + + @property + def dml_table(self) -> _DMLTableElement: + return self.statement.table + + if TYPE_CHECKING: + + @classmethod + def get_plugin_class(cls, statement: Executable) -> Type[DMLState]: ... + + @classmethod + def _get_multi_crud_kv_pairs( + cls, + statement: UpdateBase, + multi_kv_iterator: Iterable[Dict[_DMLColumnArgument, Any]], + ) -> List[Dict[_DMLColumnElement, Any]]: + return [ + { + coercions.expect(roles.DMLColumnRole, k): v + for k, v in mapping.items() + } + for mapping in multi_kv_iterator + ] + + @classmethod + def _get_crud_kv_pairs( + cls, + statement: UpdateBase, + kv_iterator: Iterable[Tuple[_DMLColumnArgument, Any]], + needs_to_be_cacheable: bool, + ) -> List[Tuple[_DMLColumnElement, Any]]: + return [ + ( + coercions.expect(roles.DMLColumnRole, k), + ( + v + if not needs_to_be_cacheable + else coercions.expect( + roles.ExpressionElementRole, + v, + type_=NullType(), + is_crud=True, + ) + ), + ) + for k, v in kv_iterator + ] + + def _make_extra_froms( + self, statement: DMLWhereBase + ) -> Tuple[FromClause, List[FromClause]]: + froms: List[FromClause] = [] + + all_tables = list(sql_util.tables_from_leftmost(statement.table)) + primary_table = all_tables[0] + seen = {primary_table} + + consider = statement._where_criteria + if self._dict_parameters: + consider += tuple(self._dict_parameters.values()) + + for crit in consider: + for item in _from_objects(crit): + if not seen.intersection(item._cloned_set): + froms.append(item) + seen.update(item._cloned_set) + + froms.extend(all_tables[1:]) + return primary_table, froms + + def _process_values(self, statement: ValuesBase) -> None: + if self._no_parameters: + self._dict_parameters = statement._values + self._no_parameters = False + + def _process_select_values(self, statement: ValuesBase) -> None: + assert statement._select_names is not None + parameters: MutableMapping[_DMLColumnElement, Any] = { + name: Null() for name in statement._select_names + } + + if self._no_parameters: + self._no_parameters = False + self._dict_parameters = parameters + else: + # this condition normally not reachable as the Insert + # does not allow this construction to occur + assert False, "This statement already has parameters" + + def _no_multi_values_supported(self, statement: ValuesBase) -> NoReturn: + raise exc.InvalidRequestError( + "%s construct does not support " + "multiple parameter sets." % statement.__visit_name__.upper() + ) + + def _cant_mix_formats_error(self) -> NoReturn: + raise exc.InvalidRequestError( + "Can't mix single and multiple VALUES " + "formats in one INSERT statement; one style appends to a " + "list while the other replaces values, so the intent is " + "ambiguous." + ) + + +@CompileState.plugin_for("default", "insert") +class InsertDMLState(DMLState): + isinsert = True + + include_table_with_column_exprs = False + + _has_multi_parameters = False + + def __init__( + self, + statement: Insert, + compiler: SQLCompiler, + disable_implicit_returning: bool = False, + **kw: Any, + ): + self.statement = statement + self._primary_table = statement.table + + if disable_implicit_returning: + self._supports_implicit_returning = False + + self.isinsert = True + if statement._select_names: + self._process_select_values(statement) + if statement._values is not None: + self._process_values(statement) + if statement._multi_values: + self._process_multi_values(statement) + + @util.memoized_property + def _insert_col_keys(self) -> List[str]: + # this is also done in crud.py -> _key_getters_for_crud_column + return [ + coercions.expect(roles.DMLColumnRole, col, as_key=True) + for col in self._dict_parameters or () + ] + + def _process_values(self, statement: ValuesBase) -> None: + if self._no_parameters: + self._has_multi_parameters = False + self._dict_parameters = statement._values + self._no_parameters = False + elif self._has_multi_parameters: + self._cant_mix_formats_error() + + def _process_multi_values(self, statement: ValuesBase) -> None: + for parameters in statement._multi_values: + multi_parameters: List[MutableMapping[_DMLColumnElement, Any]] = [ + ( + { + c.key: value + for c, value in zip(statement.table.c, parameter_set) + } + if isinstance(parameter_set, collections_abc.Sequence) + else parameter_set + ) + for parameter_set in parameters + ] + + if self._no_parameters: + self._no_parameters = False + self._has_multi_parameters = True + self._multi_parameters = multi_parameters + self._dict_parameters = self._multi_parameters[0] + elif not self._has_multi_parameters: + self._cant_mix_formats_error() + else: + assert self._multi_parameters + self._multi_parameters.extend(multi_parameters) + + +@CompileState.plugin_for("default", "update") +class UpdateDMLState(DMLState): + isupdate = True + + include_table_with_column_exprs = False + + def __init__(self, statement: Update, compiler: SQLCompiler, **kw: Any): + self.statement = statement + + self.isupdate = True + if statement._ordered_values is not None: + self._process_ordered_values(statement) + elif statement._values is not None: + self._process_values(statement) + elif statement._multi_values: + self._no_multi_values_supported(statement) + t, ef = self._make_extra_froms(statement) + self._primary_table = t + self._extra_froms = ef + + self.is_multitable = mt = ef + self.include_table_with_column_exprs = bool( + mt and compiler.render_table_with_column_in_update_from + ) + + def _process_ordered_values(self, statement: ValuesBase) -> None: + parameters = statement._ordered_values + + if self._no_parameters: + self._no_parameters = False + assert parameters is not None + self._dict_parameters = dict(parameters) + self._ordered_values = parameters + self._parameter_ordering = [key for key, value in parameters] + else: + raise exc.InvalidRequestError( + "Can only invoke ordered_values() once, and not mixed " + "with any other values() call" + ) + + +@CompileState.plugin_for("default", "delete") +class DeleteDMLState(DMLState): + isdelete = True + + def __init__(self, statement: Delete, compiler: SQLCompiler, **kw: Any): + self.statement = statement + + self.isdelete = True + t, ef = self._make_extra_froms(statement) + self._primary_table = t + self._extra_froms = ef + self.is_multitable = ef + + +class UpdateBase( + roles.DMLRole, + HasCTE, + HasCompileState, + DialectKWArgs, + HasPrefixes, + Generative, + ExecutableReturnsRows, + ClauseElement, +): + """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.""" + + __visit_name__ = "update_base" + + _hints: util.immutabledict[Tuple[_DMLTableElement, str], str] = ( + util.EMPTY_DICT + ) + named_with_column = False + + _label_style: SelectLabelStyle = ( + SelectLabelStyle.LABEL_STYLE_DISAMBIGUATE_ONLY + ) + table: _DMLTableElement + + _return_defaults = False + _return_defaults_columns: Optional[Tuple[_ColumnsClauseElement, ...]] = ( + None + ) + _supplemental_returning: Optional[Tuple[_ColumnsClauseElement, ...]] = None + _returning: Tuple[_ColumnsClauseElement, ...] = () + + is_dml = True + + def _generate_fromclause_column_proxies( + self, + fromclause: FromClause, + columns: ColumnCollection[str, KeyedColumnElement[Any]], + primary_key: ColumnSet, + foreign_keys: Set[KeyedColumnElement[Any]], + ) -> None: + columns._populate_separate_keys( + col._make_proxy( + fromclause, primary_key=primary_key, foreign_keys=foreign_keys + ) + for col in self._all_selected_columns + if is_column_element(col) + ) + + def params(self, *arg: Any, **kw: Any) -> NoReturn: + """Set the parameters for the statement. + + This method raises ``NotImplementedError`` on the base class, + and is overridden by :class:`.ValuesBase` to provide the + SET/VALUES clause of UPDATE and INSERT. + + """ + raise NotImplementedError( + "params() is not supported for INSERT/UPDATE/DELETE statements." + " To set the values for an INSERT or UPDATE statement, use" + " stmt.values(**parameters)." + ) + + @_generative + def with_dialect_options(self, **opt: Any) -> Self: + """Add dialect options to this INSERT/UPDATE/DELETE object. + + e.g.:: + + upd = table.update().dialect_options(mysql_limit=10) + + .. versionadded: 1.4 - this method supersedes the dialect options + associated with the constructor. + + + """ + self._validate_dialect_kwargs(opt) + return self + + @_generative + def return_defaults( + self, + *cols: _DMLColumnArgument, + supplemental_cols: Optional[Iterable[_DMLColumnArgument]] = None, + sort_by_parameter_order: bool = False, + ) -> Self: + """Make use of a :term:`RETURNING` clause for the purpose + of fetching server-side expressions and defaults, for supporting + backends only. + + .. deepalchemy:: + + The :meth:`.UpdateBase.return_defaults` method is used by the ORM + for its internal work in fetching newly generated primary key + and server default values, in particular to provide the underyling + implementation of the :paramref:`_orm.Mapper.eager_defaults` + ORM feature as well as to allow RETURNING support with bulk + ORM inserts. Its behavior is fairly idiosyncratic + and is not really intended for general use. End users should + stick with using :meth:`.UpdateBase.returning` in order to + add RETURNING clauses to their INSERT, UPDATE and DELETE + statements. + + Normally, a single row INSERT statement will automatically populate the + :attr:`.CursorResult.inserted_primary_key` attribute when executed, + which stores the primary key of the row that was just inserted in the + form of a :class:`.Row` object with column names as named tuple keys + (and the :attr:`.Row._mapping` view fully populated as well). The + dialect in use chooses the strategy to use in order to populate this + data; if it was generated using server-side defaults and / or SQL + expressions, dialect-specific approaches such as ``cursor.lastrowid`` + or ``RETURNING`` are typically used to acquire the new primary key + value. + + However, when the statement is modified by calling + :meth:`.UpdateBase.return_defaults` before executing the statement, + additional behaviors take place **only** for backends that support + RETURNING and for :class:`.Table` objects that maintain the + :paramref:`.Table.implicit_returning` parameter at its default value of + ``True``. In these cases, when the :class:`.CursorResult` is returned + from the statement's execution, not only will + :attr:`.CursorResult.inserted_primary_key` be populated as always, the + :attr:`.CursorResult.returned_defaults` attribute will also be + populated with a :class:`.Row` named-tuple representing the full range + of server generated + values from that single row, including values for any columns that + specify :paramref:`_schema.Column.server_default` or which make use of + :paramref:`_schema.Column.default` using a SQL expression. + + When invoking INSERT statements with multiple rows using + :ref:`insertmanyvalues `, the + :meth:`.UpdateBase.return_defaults` modifier will have the effect of + the :attr:`_engine.CursorResult.inserted_primary_key_rows` and + :attr:`_engine.CursorResult.returned_defaults_rows` attributes being + fully populated with lists of :class:`.Row` objects representing newly + inserted primary key values as well as newly inserted server generated + values for each row inserted. The + :attr:`.CursorResult.inserted_primary_key` and + :attr:`.CursorResult.returned_defaults` attributes will also continue + to be populated with the first row of these two collections. + + If the backend does not support RETURNING or the :class:`.Table` in use + has disabled :paramref:`.Table.implicit_returning`, then no RETURNING + clause is added and no additional data is fetched, however the + INSERT, UPDATE or DELETE statement proceeds normally. + + E.g.:: + + stmt = table.insert().values(data="newdata").return_defaults() + + result = connection.execute(stmt) + + server_created_at = result.returned_defaults["created_at"] + + When used against an UPDATE statement + :meth:`.UpdateBase.return_defaults` instead looks for columns that + include :paramref:`_schema.Column.onupdate` or + :paramref:`_schema.Column.server_onupdate` parameters assigned, when + constructing the columns that will be included in the RETURNING clause + by default if explicit columns were not specified. When used against a + DELETE statement, no columns are included in RETURNING by default, they + instead must be specified explicitly as there are no columns that + normally change values when a DELETE statement proceeds. + + .. versionadded:: 2.0 :meth:`.UpdateBase.return_defaults` is supported + for DELETE statements also and has been moved from + :class:`.ValuesBase` to :class:`.UpdateBase`. + + The :meth:`.UpdateBase.return_defaults` method is mutually exclusive + against the :meth:`.UpdateBase.returning` method and errors will be + raised during the SQL compilation process if both are used at the same + time on one statement. The RETURNING clause of the INSERT, UPDATE or + DELETE statement is therefore controlled by only one of these methods + at a time. + + The :meth:`.UpdateBase.return_defaults` method differs from + :meth:`.UpdateBase.returning` in these ways: + + 1. :meth:`.UpdateBase.return_defaults` method causes the + :attr:`.CursorResult.returned_defaults` collection to be populated + with the first row from the RETURNING result. This attribute is not + populated when using :meth:`.UpdateBase.returning`. + + 2. :meth:`.UpdateBase.return_defaults` is compatible with existing + logic used to fetch auto-generated primary key values that are then + populated into the :attr:`.CursorResult.inserted_primary_key` + attribute. By contrast, using :meth:`.UpdateBase.returning` will + have the effect of the :attr:`.CursorResult.inserted_primary_key` + attribute being left unpopulated. + + 3. :meth:`.UpdateBase.return_defaults` can be called against any + backend. Backends that don't support RETURNING will skip the usage + of the feature, rather than raising an exception, *unless* + ``supplemental_cols`` is passed. The return value + of :attr:`_engine.CursorResult.returned_defaults` will be ``None`` + for backends that don't support RETURNING or for which the target + :class:`.Table` sets :paramref:`.Table.implicit_returning` to + ``False``. + + 4. An INSERT statement invoked with executemany() is supported if the + backend database driver supports the + :ref:`insertmanyvalues ` + feature which is now supported by most SQLAlchemy-included backends. + When executemany is used, the + :attr:`_engine.CursorResult.returned_defaults_rows` and + :attr:`_engine.CursorResult.inserted_primary_key_rows` accessors + will return the inserted defaults and primary keys. + + .. versionadded:: 1.4 Added + :attr:`_engine.CursorResult.returned_defaults_rows` and + :attr:`_engine.CursorResult.inserted_primary_key_rows` accessors. + In version 2.0, the underlying implementation which fetches and + populates the data for these attributes was generalized to be + supported by most backends, whereas in 1.4 they were only + supported by the ``psycopg2`` driver. + + + :param cols: optional list of column key names or + :class:`_schema.Column` that acts as a filter for those columns that + will be fetched. + :param supplemental_cols: optional list of RETURNING expressions, + in the same form as one would pass to the + :meth:`.UpdateBase.returning` method. When present, the additional + columns will be included in the RETURNING clause, and the + :class:`.CursorResult` object will be "rewound" when returned, so + that methods like :meth:`.CursorResult.all` will return new rows + mostly as though the statement used :meth:`.UpdateBase.returning` + directly. However, unlike when using :meth:`.UpdateBase.returning` + directly, the **order of the columns is undefined**, so can only be + targeted using names or :attr:`.Row._mapping` keys; they cannot + reliably be targeted positionally. + + .. versionadded:: 2.0 + + :param sort_by_parameter_order: for a batch INSERT that is being + executed against multiple parameter sets, organize the results of + RETURNING so that the returned rows correspond to the order of + parameter sets passed in. This applies only to an :term:`executemany` + execution for supporting dialects and typically makes use of the + :term:`insertmanyvalues` feature. + + .. versionadded:: 2.0.10 + + .. seealso:: + + :ref:`engine_insertmanyvalues_returning_order` - background on + sorting of RETURNING rows for bulk INSERT + + .. seealso:: + + :meth:`.UpdateBase.returning` + + :attr:`_engine.CursorResult.returned_defaults` + + :attr:`_engine.CursorResult.returned_defaults_rows` + + :attr:`_engine.CursorResult.inserted_primary_key` + + :attr:`_engine.CursorResult.inserted_primary_key_rows` + + """ + + if self._return_defaults: + # note _return_defaults_columns = () means return all columns, + # so if we have been here before, only update collection if there + # are columns in the collection + if self._return_defaults_columns and cols: + self._return_defaults_columns = tuple( + util.OrderedSet(self._return_defaults_columns).union( + coercions.expect(roles.ColumnsClauseRole, c) + for c in cols + ) + ) + else: + # set for all columns + self._return_defaults_columns = () + else: + self._return_defaults_columns = tuple( + coercions.expect(roles.ColumnsClauseRole, c) for c in cols + ) + self._return_defaults = True + if sort_by_parameter_order: + if not self.is_insert: + raise exc.ArgumentError( + "The 'sort_by_parameter_order' argument to " + "return_defaults() only applies to INSERT statements" + ) + self._sort_by_parameter_order = True + if supplemental_cols: + # uniquifying while also maintaining order (the maintain of order + # is for test suites but also for vertical splicing + supplemental_col_tup = ( + coercions.expect(roles.ColumnsClauseRole, c) + for c in supplemental_cols + ) + + if self._supplemental_returning is None: + self._supplemental_returning = tuple( + util.unique_list(supplemental_col_tup) + ) + else: + self._supplemental_returning = tuple( + util.unique_list( + self._supplemental_returning + + tuple(supplemental_col_tup) + ) + ) + + return self + + def is_derived_from(self, fromclause: Optional[FromClause]) -> bool: + """Return ``True`` if this :class:`.ReturnsRows` is + 'derived' from the given :class:`.FromClause`. + + Since these are DMLs, we dont want such statements ever being adapted + so we return False for derives. + + """ + return False + + @_generative + def returning( + self, + *cols: _ColumnsClauseArgument[Any], + sort_by_parameter_order: bool = False, + **__kw: Any, + ) -> UpdateBase: + r"""Add a :term:`RETURNING` or equivalent clause to this statement. + + e.g.: + + .. sourcecode:: pycon+sql + + >>> stmt = ( + ... table.update() + ... .where(table.c.data == "value") + ... .values(status="X") + ... .returning(table.c.server_flag, table.c.updated_timestamp) + ... ) + >>> print(stmt) + {printsql}UPDATE some_table SET status=:status + WHERE some_table.data = :data_1 + RETURNING some_table.server_flag, some_table.updated_timestamp + + The method may be invoked multiple times to add new entries to the + list of expressions to be returned. + + .. versionadded:: 1.4.0b2 The method may be invoked multiple times to + add new entries to the list of expressions to be returned. + + The given collection of column expressions should be derived from the + table that is the target of the INSERT, UPDATE, or DELETE. While + :class:`_schema.Column` objects are typical, the elements can also be + expressions: + + .. sourcecode:: pycon+sql + + >>> stmt = table.insert().returning( + ... (table.c.first_name + " " + table.c.last_name).label("fullname") + ... ) + >>> print(stmt) + {printsql}INSERT INTO some_table (first_name, last_name) + VALUES (:first_name, :last_name) + RETURNING some_table.first_name || :first_name_1 || some_table.last_name AS fullname + + Upon compilation, a RETURNING clause, or database equivalent, + will be rendered within the statement. For INSERT and UPDATE, + the values are the newly inserted/updated values. For DELETE, + the values are those of the rows which were deleted. + + Upon execution, the values of the columns to be returned are made + available via the result set and can be iterated using + :meth:`_engine.CursorResult.fetchone` and similar. + For DBAPIs which do not + natively support returning values (i.e. cx_oracle), SQLAlchemy will + approximate this behavior at the result level so that a reasonable + amount of behavioral neutrality is provided. + + Note that not all databases/DBAPIs + support RETURNING. For those backends with no support, + an exception is raised upon compilation and/or execution. + For those who do support it, the functionality across backends + varies greatly, including restrictions on executemany() + and other statements which return multiple rows. Please + read the documentation notes for the database in use in + order to determine the availability of RETURNING. + + :param \*cols: series of columns, SQL expressions, or whole tables + entities to be returned. + :param sort_by_parameter_order: for a batch INSERT that is being + executed against multiple parameter sets, organize the results of + RETURNING so that the returned rows correspond to the order of + parameter sets passed in. This applies only to an :term:`executemany` + execution for supporting dialects and typically makes use of the + :term:`insertmanyvalues` feature. + + .. versionadded:: 2.0.10 + + .. seealso:: + + :ref:`engine_insertmanyvalues_returning_order` - background on + sorting of RETURNING rows for bulk INSERT (Core level discussion) + + :ref:`orm_queryguide_bulk_insert_returning_ordered` - example of + use with :ref:`orm_queryguide_bulk_insert` (ORM level discussion) + + .. seealso:: + + :meth:`.UpdateBase.return_defaults` - an alternative method tailored + towards efficient fetching of server-side defaults and triggers + for single-row INSERTs or UPDATEs. + + :ref:`tutorial_insert_returning` - in the :ref:`unified_tutorial` + + """ # noqa: E501 + if __kw: + raise _unexpected_kw("UpdateBase.returning()", __kw) + if self._return_defaults: + raise exc.InvalidRequestError( + "return_defaults() is already configured on this statement" + ) + self._returning += tuple( + coercions.expect(roles.ColumnsClauseRole, c) for c in cols + ) + if sort_by_parameter_order: + if not self.is_insert: + raise exc.ArgumentError( + "The 'sort_by_parameter_order' argument to returning() " + "only applies to INSERT statements" + ) + self._sort_by_parameter_order = True + return self + + def corresponding_column( + self, column: KeyedColumnElement[Any], require_embedded: bool = False + ) -> Optional[ColumnElement[Any]]: + return self.exported_columns.corresponding_column( + column, require_embedded=require_embedded + ) + + @util.ro_memoized_property + def _all_selected_columns(self) -> _SelectIterable: + return [c for c in _select_iterables(self._returning)] + + @util.ro_memoized_property + def exported_columns( + self, + ) -> ReadOnlyColumnCollection[Optional[str], ColumnElement[Any]]: + """Return the RETURNING columns as a column collection for this + statement. + + .. versionadded:: 1.4 + + """ + return ColumnCollection( + (c.key, c) + for c in self._all_selected_columns + if is_column_element(c) + ).as_readonly() + + @_generative + def with_hint( + self, + text: str, + selectable: Optional[_DMLTableArgument] = None, + dialect_name: str = "*", + ) -> Self: + """Add a table hint for a single table to this + INSERT/UPDATE/DELETE statement. + + .. note:: + + :meth:`.UpdateBase.with_hint` currently applies only to + Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use + :meth:`.UpdateBase.prefix_with`. + + The text of the hint is rendered in the appropriate + location for the database backend in use, relative + to the :class:`_schema.Table` that is the subject of this + statement, or optionally to that of the given + :class:`_schema.Table` passed as the ``selectable`` argument. + + The ``dialect_name`` option will limit the rendering of a particular + hint to a particular backend. Such as, to add a hint + that only takes effect for SQL Server:: + + mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") + + :param text: Text of the hint. + :param selectable: optional :class:`_schema.Table` that specifies + an element of the FROM clause within an UPDATE or DELETE + to be the subject of the hint - applies only to certain backends. + :param dialect_name: defaults to ``*``, if specified as the name + of a particular dialect, will apply these hints only when + that dialect is in use. + """ + if selectable is None: + selectable = self.table + else: + selectable = coercions.expect(roles.DMLTableRole, selectable) + self._hints = self._hints.union({(selectable, dialect_name): text}) + return self + + @property + def entity_description(self) -> Dict[str, Any]: + """Return a :term:`plugin-enabled` description of the table and/or + entity which this DML construct is operating against. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor + is derived from the :attr:`.UpdateBase.table` attribute, and + refers to the :class:`.Table` being inserted, updated, or deleted:: + + >>> stmt = insert(user_table) + >>> stmt.entity_description + { + "name": "user_table", + "table": Table("user_table", ...) + } + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.returning_column_descriptions` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ + meth = DMLState.get_plugin_class(self).get_entity_description + return meth(self) + + @property + def returning_column_descriptions(self) -> List[Dict[str, Any]]: + """Return a :term:`plugin-enabled` description of the columns + which this DML construct is RETURNING against, in other words + the expressions established as part of :meth:`.UpdateBase.returning`. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor is + derived from the same objects that are returned by the + :attr:`.UpdateBase.exported_columns` accessor:: + + >>> stmt = insert(user_table).returning(user_table.c.id, user_table.c.name) + >>> stmt.entity_description + [ + { + "name": "id", + "type": Integer, + "expr": Column("id", Integer(), table=, ...) + }, + { + "name": "name", + "type": String(), + "expr": Column("name", String(), table=, ...) + }, + ] + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.entity_description` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ # noqa: E501 + meth = DMLState.get_plugin_class( + self + ).get_returning_column_descriptions + return meth(self) + + +class ValuesBase(UpdateBase): + """Supplies support for :meth:`.ValuesBase.values` to + INSERT and UPDATE constructs.""" + + __visit_name__ = "values_base" + + _supports_multi_parameters = False + + select: Optional[Select[Any]] = None + """SELECT statement for INSERT .. FROM SELECT""" + + _post_values_clause: Optional[ClauseElement] = None + """used by extensions to Insert etc. to add additional syntacitcal + constructs, e.g. ON CONFLICT etc.""" + + _values: Optional[util.immutabledict[_DMLColumnElement, Any]] = None + _multi_values: Tuple[ + Union[ + Sequence[Dict[_DMLColumnElement, Any]], + Sequence[Sequence[Any]], + ], + ..., + ] = () + + _ordered_values: Optional[List[Tuple[_DMLColumnElement, Any]]] = None + + _select_names: Optional[List[str]] = None + _inline: bool = False + + def __init__(self, table: _DMLTableArgument): + self.table = coercions.expect( + roles.DMLTableRole, table, apply_propagate_attrs=self + ) + + @_generative + @_exclusive_against( + "_select_names", + "_ordered_values", + msgs={ + "_select_names": "This construct already inserts from a SELECT", + "_ordered_values": "This statement already has ordered " + "values present", + }, + ) + def values( + self, + *args: Union[ + _DMLColumnKeyMapping[Any], + Sequence[Any], + ], + **kwargs: Any, + ) -> Self: + r"""Specify a fixed VALUES clause for an INSERT statement, or the SET + clause for an UPDATE. + + Note that the :class:`_expression.Insert` and + :class:`_expression.Update` + constructs support + per-execution time formatting of the VALUES and/or SET clauses, + based on the arguments passed to :meth:`_engine.Connection.execute`. + However, the :meth:`.ValuesBase.values` method can be used to "fix" a + particular set of parameters into the statement. + + Multiple calls to :meth:`.ValuesBase.values` will produce a new + construct, each one with the parameter list modified to include + the new parameters sent. In the typical case of a single + dictionary of parameters, the newly passed keys will replace + the same keys in the previous construct. In the case of a list-based + "multiple values" construct, each new list of values is extended + onto the existing list of values. + + :param \**kwargs: key value pairs representing the string key + of a :class:`_schema.Column` + mapped to the value to be rendered into the + VALUES or SET clause:: + + users.insert().values(name="some name") + + users.update().where(users.c.id == 5).values(name="some name") + + :param \*args: As an alternative to passing key/value parameters, + a dictionary, tuple, or list of dictionaries or tuples can be passed + as a single positional argument in order to form the VALUES or + SET clause of the statement. The forms that are accepted vary + based on whether this is an :class:`_expression.Insert` or an + :class:`_expression.Update` construct. + + For either an :class:`_expression.Insert` or + :class:`_expression.Update` + construct, a single dictionary can be passed, which works the same as + that of the kwargs form:: + + users.insert().values({"name": "some name"}) + + users.update().values({"name": "some new name"}) + + Also for either form but more typically for the + :class:`_expression.Insert` construct, a tuple that contains an + entry for every column in the table is also accepted:: + + users.insert().values((5, "some name")) + + The :class:`_expression.Insert` construct also supports being + passed a list of dictionaries or full-table-tuples, which on the + server will render the less common SQL syntax of "multiple values" - + this syntax is supported on backends such as SQLite, PostgreSQL, + MySQL, but not necessarily others:: + + users.insert().values( + [ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ] + ) + + The above form would render a multiple VALUES statement similar to: + + .. sourcecode:: sql + + INSERT INTO users (name) VALUES + (:name_1), + (:name_2), + (:name_3) + + It is essential to note that **passing multiple values is + NOT the same as using traditional executemany() form**. The above + syntax is a **special** syntax not typically used. To emit an + INSERT statement against multiple rows, the normal method is + to pass a multiple values list to the + :meth:`_engine.Connection.execute` + method, which is supported by all database backends and is generally + more efficient for a very large number of parameters. + + .. seealso:: + + :ref:`tutorial_multiple_parameters` - an introduction to + the traditional Core method of multiple parameter set + invocation for INSERTs and other statements. + + The UPDATE construct also supports rendering the SET parameters + in a specific order. For this feature refer to the + :meth:`_expression.Update.ordered_values` method. + + .. seealso:: + + :meth:`_expression.Update.ordered_values` + + + """ + if args: + # positional case. this is currently expensive. we don't + # yet have positional-only args so we have to check the length. + # then we need to check multiparams vs. single dictionary. + # since the parameter format is needed in order to determine + # a cache key, we need to determine this up front. + arg = args[0] + + if kwargs: + raise exc.ArgumentError( + "Can't pass positional and kwargs to values() " + "simultaneously" + ) + elif len(args) > 1: + raise exc.ArgumentError( + "Only a single dictionary/tuple or list of " + "dictionaries/tuples is accepted positionally." + ) + + elif isinstance(arg, collections_abc.Sequence): + if arg and isinstance(arg[0], dict): + multi_kv_generator = DMLState.get_plugin_class( + self + )._get_multi_crud_kv_pairs + self._multi_values += (multi_kv_generator(self, arg),) + return self + + if arg and isinstance(arg[0], (list, tuple)): + self._multi_values += (arg,) + return self + + if TYPE_CHECKING: + # crud.py raises during compilation if this is not the + # case + assert isinstance(self, Insert) + + # tuple values + arg = {c.key: value for c, value in zip(self.table.c, arg)} + + else: + # kwarg path. this is the most common path for non-multi-params + # so this is fairly quick. + arg = cast("Dict[_DMLColumnArgument, Any]", kwargs) + if args: + raise exc.ArgumentError( + "Only a single dictionary/tuple or list of " + "dictionaries/tuples is accepted positionally." + ) + + # for top level values(), convert literals to anonymous bound + # parameters at statement construction time, so that these values can + # participate in the cache key process like any other ClauseElement. + # crud.py now intercepts bound parameters with unique=True from here + # and ensures they get the "crud"-style name when rendered. + + kv_generator = DMLState.get_plugin_class(self)._get_crud_kv_pairs + coerced_arg = dict(kv_generator(self, arg.items(), True)) + if self._values: + self._values = self._values.union(coerced_arg) + else: + self._values = util.immutabledict(coerced_arg) + return self + + +class Insert(ValuesBase): + """Represent an INSERT construct. + + The :class:`_expression.Insert` object is created using the + :func:`_expression.insert()` function. + + """ + + __visit_name__ = "insert" + + _supports_multi_parameters = True + + select = None + include_insert_from_select_defaults = False + + _sort_by_parameter_order: bool = False + + is_insert = True + + table: TableClause + + _traverse_internals = ( + [ + ("table", InternalTraversal.dp_clauseelement), + ("_inline", InternalTraversal.dp_boolean), + ("_select_names", InternalTraversal.dp_string_list), + ("_values", InternalTraversal.dp_dml_values), + ("_multi_values", InternalTraversal.dp_dml_multi_values), + ("select", InternalTraversal.dp_clauseelement), + ("_post_values_clause", InternalTraversal.dp_clauseelement), + ("_returning", InternalTraversal.dp_clauseelement_tuple), + ("_hints", InternalTraversal.dp_table_hint_list), + ("_return_defaults", InternalTraversal.dp_boolean), + ( + "_return_defaults_columns", + InternalTraversal.dp_clauseelement_tuple, + ), + ("_sort_by_parameter_order", InternalTraversal.dp_boolean), + ] + + HasPrefixes._has_prefixes_traverse_internals + + DialectKWArgs._dialect_kwargs_traverse_internals + + Executable._executable_traverse_internals + + HasCTE._has_ctes_traverse_internals + ) + + def __init__(self, table: _DMLTableArgument): + super().__init__(table) + + @_generative + def inline(self) -> Self: + """Make this :class:`_expression.Insert` construct "inline" . + + When set, no attempt will be made to retrieve the + SQL-generated default values to be provided within the statement; + in particular, + this allows SQL expressions to be rendered 'inline' within the + statement without the need to pre-execute them beforehand; for + backends that support "returning", this turns off the "implicit + returning" feature for the statement. + + + .. versionchanged:: 1.4 the :paramref:`_expression.Insert.inline` + parameter + is now superseded by the :meth:`_expression.Insert.inline` method. + + """ + self._inline = True + return self + + @_generative + def from_select( + self, + names: Sequence[_DMLColumnArgument], + select: Selectable, + include_defaults: bool = True, + ) -> Self: + """Return a new :class:`_expression.Insert` construct which represents + an ``INSERT...FROM SELECT`` statement. + + e.g.:: + + sel = select(table1.c.a, table1.c.b).where(table1.c.c > 5) + ins = table2.insert().from_select(["a", "b"], sel) + + :param names: a sequence of string column names or + :class:`_schema.Column` + objects representing the target columns. + :param select: a :func:`_expression.select` construct, + :class:`_expression.FromClause` + or other construct which resolves into a + :class:`_expression.FromClause`, + such as an ORM :class:`_query.Query` object, etc. The order of + columns returned from this FROM clause should correspond to the + order of columns sent as the ``names`` parameter; while this + is not checked before passing along to the database, the database + would normally raise an exception if these column lists don't + correspond. + :param include_defaults: if True, non-server default values and + SQL expressions as specified on :class:`_schema.Column` objects + (as documented in :ref:`metadata_defaults_toplevel`) not + otherwise specified in the list of names will be rendered + into the INSERT and SELECT statements, so that these values are also + included in the data to be inserted. + + .. note:: A Python-side default that uses a Python callable function + will only be invoked **once** for the whole statement, and **not + per row**. + + """ + + if self._values: + raise exc.InvalidRequestError( + "This construct already inserts value expressions" + ) + + self._select_names = [ + coercions.expect(roles.DMLColumnRole, name, as_key=True) + for name in names + ] + self._inline = True + self.include_insert_from_select_defaults = include_defaults + self.select = coercions.expect(roles.DMLSelectRole, select) + return self + + if TYPE_CHECKING: + # START OVERLOADED FUNCTIONS self.returning ReturningInsert 1-8 ", *, sort_by_parameter_order: bool = False" # noqa: E501 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def returning( + self, __ent0: _TCCA[_T0], *, sort_by_parameter_order: bool = False + ) -> ReturningInsert[Tuple[_T0]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1, _T2]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + *, + sort_by_parameter_order: bool = False, + ) -> ReturningInsert[ + Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7] + ]: ... + + # END OVERLOADED FUNCTIONS self.returning + + @overload + def returning( + self, + *cols: _ColumnsClauseArgument[Any], + sort_by_parameter_order: bool = False, + **__kw: Any, + ) -> ReturningInsert[Any]: ... + + def returning( + self, + *cols: _ColumnsClauseArgument[Any], + sort_by_parameter_order: bool = False, + **__kw: Any, + ) -> ReturningInsert[Any]: ... + + +class ReturningInsert(Insert, TypedReturnsRows[_TP]): + """Typing-only class that establishes a generic type form of + :class:`.Insert` which tracks returned column types. + + This datatype is delivered when calling the + :meth:`.Insert.returning` method. + + .. versionadded:: 2.0 + + """ + + +class DMLWhereBase: + table: _DMLTableElement + _where_criteria: Tuple[ColumnElement[Any], ...] = () + + @_generative + def where(self, *whereclause: _ColumnExpressionArgument[bool]) -> Self: + """Return a new construct with the given expression(s) added to + its WHERE clause, joined to the existing clause via AND, if any. + + Both :meth:`_dml.Update.where` and :meth:`_dml.Delete.where` + support multiple-table forms, including database-specific + ``UPDATE...FROM`` as well as ``DELETE..USING``. For backends that + don't have multiple-table support, a backend agnostic approach + to using multiple tables is to make use of correlated subqueries. + See the linked tutorial sections below for examples. + + .. seealso:: + + :ref:`tutorial_correlated_updates` + + :ref:`tutorial_update_from` + + :ref:`tutorial_multi_table_deletes` + + """ + + for criterion in whereclause: + where_criteria: ColumnElement[Any] = coercions.expect( + roles.WhereHavingRole, criterion, apply_propagate_attrs=self + ) + self._where_criteria += (where_criteria,) + return self + + def filter(self, *criteria: roles.ExpressionElementRole[Any]) -> Self: + """A synonym for the :meth:`_dml.DMLWhereBase.where` method. + + .. versionadded:: 1.4 + + """ + + return self.where(*criteria) + + def _filter_by_zero(self) -> _DMLTableElement: + return self.table + + def filter_by(self, **kwargs: Any) -> Self: + r"""apply the given filtering criterion as a WHERE clause + to this select. + + """ + from_entity = self._filter_by_zero() + + clauses = [ + _entity_namespace_key(from_entity, key) == value + for key, value in kwargs.items() + ] + return self.filter(*clauses) + + @property + def whereclause(self) -> Optional[ColumnElement[Any]]: + """Return the completed WHERE clause for this :class:`.DMLWhereBase` + statement. + + This assembles the current collection of WHERE criteria + into a single :class:`_expression.BooleanClauseList` construct. + + + .. versionadded:: 1.4 + + """ + + return BooleanClauseList._construct_for_whereclause( + self._where_criteria + ) + + +class Update(DMLWhereBase, ValuesBase): + """Represent an Update construct. + + The :class:`_expression.Update` object is created using the + :func:`_expression.update()` function. + + """ + + __visit_name__ = "update" + + is_update = True + + _traverse_internals = ( + [ + ("table", InternalTraversal.dp_clauseelement), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), + ("_inline", InternalTraversal.dp_boolean), + ("_ordered_values", InternalTraversal.dp_dml_ordered_values), + ("_values", InternalTraversal.dp_dml_values), + ("_returning", InternalTraversal.dp_clauseelement_tuple), + ("_hints", InternalTraversal.dp_table_hint_list), + ("_return_defaults", InternalTraversal.dp_boolean), + ( + "_return_defaults_columns", + InternalTraversal.dp_clauseelement_tuple, + ), + ] + + HasPrefixes._has_prefixes_traverse_internals + + DialectKWArgs._dialect_kwargs_traverse_internals + + Executable._executable_traverse_internals + + HasCTE._has_ctes_traverse_internals + ) + + def __init__(self, table: _DMLTableArgument): + super().__init__(table) + + @_generative + def ordered_values(self, *args: Tuple[_DMLColumnArgument, Any]) -> Self: + """Specify the VALUES clause of this UPDATE statement with an explicit + parameter ordering that will be maintained in the SET clause of the + resulting UPDATE statement. + + E.g.:: + + stmt = table.update().ordered_values(("name", "ed"), ("ident", "foo")) + + .. seealso:: + + :ref:`tutorial_parameter_ordered_updates` - full example of the + :meth:`_expression.Update.ordered_values` method. + + .. versionchanged:: 1.4 The :meth:`_expression.Update.ordered_values` + method + supersedes the + :paramref:`_expression.update.preserve_parameter_order` + parameter, which will be removed in SQLAlchemy 2.0. + + """ # noqa: E501 + if self._values: + raise exc.ArgumentError( + "This statement already has values present" + ) + elif self._ordered_values: + raise exc.ArgumentError( + "This statement already has ordered values present" + ) + + kv_generator = DMLState.get_plugin_class(self)._get_crud_kv_pairs + self._ordered_values = kv_generator(self, args, True) + return self + + @_generative + def inline(self) -> Self: + """Make this :class:`_expression.Update` construct "inline" . + + When set, SQL defaults present on :class:`_schema.Column` + objects via the + ``default`` keyword will be compiled 'inline' into the statement and + not pre-executed. This means that their values will not be available + in the dictionary returned from + :meth:`_engine.CursorResult.last_updated_params`. + + .. versionchanged:: 1.4 the :paramref:`_expression.update.inline` + parameter + is now superseded by the :meth:`_expression.Update.inline` method. + + """ + self._inline = True + return self + + if TYPE_CHECKING: + # START OVERLOADED FUNCTIONS self.returning ReturningUpdate 1-8 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def returning( + self, __ent0: _TCCA[_T0] + ) -> ReturningUpdate[Tuple[_T0]]: ... + + @overload + def returning( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] + ) -> ReturningUpdate[Tuple[_T0, _T1]]: ... + + @overload + def returning( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] + ) -> ReturningUpdate[Tuple[_T0, _T1, _T2]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + ) -> ReturningUpdate[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + ) -> ReturningUpdate[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + ) -> ReturningUpdate[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + ) -> ReturningUpdate[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + ) -> ReturningUpdate[ + Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7] + ]: ... + + # END OVERLOADED FUNCTIONS self.returning + + @overload + def returning( + self, *cols: _ColumnsClauseArgument[Any], **__kw: Any + ) -> ReturningUpdate[Any]: ... + + def returning( + self, *cols: _ColumnsClauseArgument[Any], **__kw: Any + ) -> ReturningUpdate[Any]: ... + + +class ReturningUpdate(Update, TypedReturnsRows[_TP]): + """Typing-only class that establishes a generic type form of + :class:`.Update` which tracks returned column types. + + This datatype is delivered when calling the + :meth:`.Update.returning` method. + + .. versionadded:: 2.0 + + """ + + +class Delete(DMLWhereBase, UpdateBase): + """Represent a DELETE construct. + + The :class:`_expression.Delete` object is created using the + :func:`_expression.delete()` function. + + """ + + __visit_name__ = "delete" + + is_delete = True + + _traverse_internals = ( + [ + ("table", InternalTraversal.dp_clauseelement), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), + ("_returning", InternalTraversal.dp_clauseelement_tuple), + ("_hints", InternalTraversal.dp_table_hint_list), + ] + + HasPrefixes._has_prefixes_traverse_internals + + DialectKWArgs._dialect_kwargs_traverse_internals + + Executable._executable_traverse_internals + + HasCTE._has_ctes_traverse_internals + ) + + def __init__(self, table: _DMLTableArgument): + self.table = coercions.expect( + roles.DMLTableRole, table, apply_propagate_attrs=self + ) + + if TYPE_CHECKING: + # START OVERLOADED FUNCTIONS self.returning ReturningDelete 1-8 + + # code within this block is **programmatically, + # statically generated** by tools/generate_tuple_map_overloads.py + + @overload + def returning( + self, __ent0: _TCCA[_T0] + ) -> ReturningDelete[Tuple[_T0]]: ... + + @overload + def returning( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1] + ) -> ReturningDelete[Tuple[_T0, _T1]]: ... + + @overload + def returning( + self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2] + ) -> ReturningDelete[Tuple[_T0, _T1, _T2]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + ) -> ReturningDelete[Tuple[_T0, _T1, _T2, _T3]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + ) -> ReturningDelete[Tuple[_T0, _T1, _T2, _T3, _T4]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + ) -> ReturningDelete[Tuple[_T0, _T1, _T2, _T3, _T4, _T5]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + ) -> ReturningDelete[Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6]]: ... + + @overload + def returning( + self, + __ent0: _TCCA[_T0], + __ent1: _TCCA[_T1], + __ent2: _TCCA[_T2], + __ent3: _TCCA[_T3], + __ent4: _TCCA[_T4], + __ent5: _TCCA[_T5], + __ent6: _TCCA[_T6], + __ent7: _TCCA[_T7], + ) -> ReturningDelete[ + Tuple[_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7] + ]: ... + + # END OVERLOADED FUNCTIONS self.returning + + @overload + def returning( + self, *cols: _ColumnsClauseArgument[Any], **__kw: Any + ) -> ReturningDelete[Any]: ... + + def returning( + self, *cols: _ColumnsClauseArgument[Any], **__kw: Any + ) -> ReturningDelete[Any]: ... + + +class ReturningDelete(Update, TypedReturnsRows[_TP]): + """Typing-only class that establishes a generic type form of + :class:`.Delete` which tracks returned column types. + + This datatype is delivered when calling the + :meth:`.Delete.returning` method. + + .. versionadded:: 2.0 + + """ diff --git a/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/elements.py b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/elements.py new file mode 100644 index 0000000000000000000000000000000000000000..88cb2529d8810509f516608d7b7fbb6f0a0e6e76 --- /dev/null +++ b/Scripts_RSCM_sim_growth_n_climate_to_Yield/.venv/lib/python3.10/site-packages/sqlalchemy/sql/elements.py @@ -0,0 +1,5544 @@ +# sql/elements.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Core SQL expression elements, including :class:`_expression.ClauseElement`, +:class:`_expression.ColumnElement`, and derived classes. + +""" + +from __future__ import annotations + +from decimal import Decimal +from enum import Enum +import itertools +import operator +import re +import typing +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import FrozenSet +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple as typing_Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from . import coercions +from . import operators +from . import roles +from . import traversals +from . import type_api +from ._typing import has_schema_attr +from ._typing import is_named_from_clause +from ._typing import is_quoted_name +from ._typing import is_tuple_type +from .annotation import Annotated +from .annotation import SupportsWrappingAnnotations +from .base import _clone +from .base import _expand_cloned +from .base import _generative +from .base import _NoArg +from .base import Executable +from .base import Generative +from .base import HasMemoized +from .base import Immutable +from .base import NO_ARG +from .base import SingletonConstant +from .cache_key import MemoizedHasCacheKey +from .cache_key import NO_CACHE +from .coercions import _document_text_coercion # noqa +from .operators import ColumnOperators +from .traversals import HasCopyInternals +from .visitors import cloned_traverse +from .visitors import ExternallyTraversible +from .visitors import InternalTraversal +from .visitors import traverse +from .visitors import Visitable +from .. import exc +from .. import inspection +from .. import util +from ..util import HasMemoized_ro_memoized_attribute +from ..util import TypingOnly +from ..util.typing import Literal +from ..util.typing import ParamSpec +from ..util.typing import Self + +if typing.TYPE_CHECKING: + from ._typing import _ByArgument + from ._typing import _ColumnExpressionArgument + from ._typing import _ColumnExpressionOrStrLabelArgument + from ._typing import _HasDialect + from ._typing import _InfoType + from ._typing import _PropagateAttrsType + from ._typing import _TypeEngineArgument + from .base import ColumnSet + from .cache_key import _CacheKeyTraversalType + from .cache_key import CacheKey + from .compiler import Compiled + from .compiler import SQLCompiler + from .functions import FunctionElement + from .operators import OperatorType + from .schema import Column + from .schema import DefaultGenerator + from .schema import FetchedValue + from .schema import ForeignKey + from .selectable import _SelectIterable + from .selectable import FromClause + from .selectable import NamedFromClause + from .selectable import TextualSelect + from .sqltypes import TupleType + from .type_api import TypeEngine + from .visitors import _CloneCallableType + from .visitors import _TraverseInternalsType + from .visitors import anon_map + from ..engine import Connection + from ..engine import Dialect + from ..engine.interfaces import _CoreMultiExecuteParams + from ..engine.interfaces import CacheStats + from ..engine.interfaces import CompiledCacheType + from ..engine.interfaces import CoreExecuteOptionsParameter + from ..engine.interfaces import SchemaTranslateMapType + from ..engine.result import Result + +_NUMERIC = Union[float, Decimal] +_NUMBER = Union[float, int, Decimal] + +_T = TypeVar("_T", bound="Any") +_T_co = TypeVar("_T_co", bound=Any, covariant=True) +_OPT = TypeVar("_OPT", bound="Any") +_NT = TypeVar("_NT", bound="_NUMERIC") + +_NMT = TypeVar("_NMT", bound="_NUMBER") + + +@overload +def literal( + value: Any, + type_: _TypeEngineArgument[_T], + literal_execute: bool = False, +) -> BindParameter[_T]: ... + + +@overload +def literal( + value: _T, + type_: None = None, + literal_execute: bool = False, +) -> BindParameter[_T]: ... + + +@overload +def literal( + value: Any, + type_: Optional[_TypeEngineArgument[Any]] = None, + literal_execute: bool = False, +) -> BindParameter[Any]: ... + + +def literal( + value: Any, + type_: Optional[_TypeEngineArgument[Any]] = None, + literal_execute: bool = False, +) -> BindParameter[Any]: + r"""Return a literal clause, bound to a bind parameter. + + Literal clauses are created automatically when non- + :class:`_expression.ClauseElement` objects (such as strings, ints, dates, + etc.) are + used in a comparison operation with a :class:`_expression.ColumnElement` + subclass, + such as a :class:`~sqlalchemy.schema.Column` object. Use this function + to force the generation of a literal clause, which will be created as a + :class:`BindParameter` with a bound value. + + :param value: the value to be bound. Can be any Python object supported by + the underlying DB-API, or is translatable via the given type argument. + + :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which will + provide bind-parameter translation for this literal. + + :param literal_execute: optional bool, when True, the SQL engine will + attempt to render the bound value directly in the SQL statement at + execution time rather than providing as a parameter value. + + .. versionadded:: 2.0 + + """ + return coercions.expect( + roles.LiteralValueRole, + value, + type_=type_, + literal_execute=literal_execute, + ) + + +def literal_column( + text: str, type_: Optional[_TypeEngineArgument[_T]] = None +) -> ColumnClause[_T]: + r"""Produce a :class:`.ColumnClause` object that has the + :paramref:`_expression.column.is_literal` flag set to True. + + :func:`_expression.literal_column` is similar to + :func:`_expression.column`, except that + it is more often used as a "standalone" column expression that renders + exactly as stated; while :func:`_expression.column` + stores a string name that + will be assumed to be part of a table and may be quoted as such, + :func:`_expression.literal_column` can be that, + or any other arbitrary column-oriented + expression. + + :param text: the text of the expression; can be any SQL expression. + Quoting rules will not be applied. To specify a column-name expression + which should be subject to quoting rules, use the :func:`column` + function. + + :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` + object which will + provide result-set translation and additional expression semantics for + this column. If left as ``None`` the type will be :class:`.NullType`. + + .. seealso:: + + :func:`_expression.column` + + :func:`_expression.text` + + :ref:`tutorial_select_arbitrary_text` + + """ + return ColumnClause(text, type_=type_, is_literal=True) + + +class CompilerElement(Visitable): + """base class for SQL elements that can be compiled to produce a + SQL string. + + .. versionadded:: 2.0 + + """ + + __slots__ = () + __visit_name__ = "compiler_element" + + supports_execution = False + + stringify_dialect = "default" + + @util.preload_module("sqlalchemy.engine.default") + @util.preload_module("sqlalchemy.engine.url") + def compile( + self, + bind: Optional[_HasDialect] = None, + dialect: Optional[Dialect] = None, + **kw: Any, + ) -> Compiled: + """Compile this SQL expression. + + The return value is a :class:`~.Compiled` object. + Calling ``str()`` or ``unicode()`` on the returned value will yield a + string representation of the result. The + :class:`~.Compiled` object also can return a + dictionary of bind parameter names and values + using the ``params`` accessor. + + :param bind: An :class:`.Connection` or :class:`.Engine` which + can provide a :class:`.Dialect` in order to generate a + :class:`.Compiled` object. If the ``bind`` and + ``dialect`` parameters are both omitted, a default SQL compiler + is used. + + :param column_keys: Used for INSERT and UPDATE statements, a list of + column names which should be present in the VALUES clause of the + compiled statement. If ``None``, all columns from the target table + object are rendered. + + :param dialect: A :class:`.Dialect` instance which can generate + a :class:`.Compiled` object. This argument takes precedence over + the ``bind`` argument. + + :param compile_kwargs: optional dictionary of additional parameters + that will be passed through to the compiler within all "visit" + methods. This allows any custom flag to be passed through to + a custom compilation construct, for example. It is also used + for the case of passing the ``literal_binds`` flag through:: + + from sqlalchemy.sql import table, column, select + + t = table("t", column("x")) + + s = select(t).where(t.c.x == 5) + + print(s.compile(compile_kwargs={"literal_binds": True})) + + .. seealso:: + + :ref:`faq_sql_expression_string` + + """ + + if dialect is None: + if bind: + dialect = bind.dialect + elif self.stringify_dialect == "default": + dialect = self._default_dialect() + else: + url = util.preloaded.engine_url + dialect = url.URL.create( + self.stringify_dialect + ).get_dialect()() + + return self._compiler(dialect, **kw) + + def _default_dialect(self): + default = util.preloaded.engine_default + return default.StrCompileDialect() + + def _compiler(self, dialect: Dialect, **kw: Any) -> Compiled: + """Return a compiler appropriate for this ClauseElement, given a + Dialect.""" + + if TYPE_CHECKING: + assert isinstance(self, ClauseElement) + return dialect.statement_compiler(dialect, self, **kw) + + def __str__(self) -> str: + return str(self.compile()) + + +@inspection._self_inspects +class ClauseElement( + SupportsWrappingAnnotations, + MemoizedHasCacheKey, + HasCopyInternals, + ExternallyTraversible, + CompilerElement, +): + """Base class for elements of a programmatically constructed SQL + expression. + + """ + + __visit_name__ = "clause" + + if TYPE_CHECKING: + + @util.memoized_property + def _propagate_attrs(self) -> _PropagateAttrsType: + """like annotations, however these propagate outwards liberally + as SQL constructs are built, and are set up at construction time. + + """ + ... + + else: + _propagate_attrs = util.EMPTY_DICT + + @util.ro_memoized_property + def description(self) -> Optional[str]: + return None + + _is_clone_of: Optional[Self] = None + + is_clause_element = True + is_selectable = False + is_dml = False + _is_column_element = False + _is_keyed_column_element = False + _is_table = False + _gen_static_annotations_cache_key = False + _is_textual = False + _is_from_clause = False + _is_returns_rows = False + _is_text_clause = False + _is_from_container = False + _is_select_container = False + _is_select_base = False + _is_select_statement = False + _is_bind_parameter = False + _is_clause_list = False + _is_lambda_element = False + _is_singleton_constant = False + _is_immutable = False + _is_star = False + + @property + def _order_by_label_element(self) -> Optional[Label[Any]]: + return None + + _cache_key_traversal: _CacheKeyTraversalType = None + + negation_clause: ColumnElement[bool] + + if typing.TYPE_CHECKING: + + def get_children( + self, *, omit_attrs: typing_Tuple[str, ...] = ..., **kw: Any + ) -> Iterable[ClauseElement]: ... + + @util.ro_non_memoized_property + def _from_objects(self) -> List[FromClause]: + return [] + + def _set_propagate_attrs(self, values: Mapping[str, Any]) -> Self: + # usually, self._propagate_attrs is empty here. one case where it's + # not is a subquery against ORM select, that is then pulled as a + # property of an aliased class. should all be good + + # assert not self._propagate_attrs + + self._propagate_attrs = util.immutabledict(values) + return self + + def _default_compiler(self) -> SQLCompiler: + dialect = self._default_dialect() + return dialect.statement_compiler(dialect, self) # type: ignore + + def _clone(self, **kw: Any) -> Self: + """Create a shallow copy of this ClauseElement. + + This method may be used by a generative API. Its also used as + part of the "deep" copy afforded by a traversal that combines + the _copy_internals() method. + + """ + + skip = self._memoized_keys + c = self.__class__.__new__(self.__class__) + + if skip: + # ensure this iteration remains atomic + c.__dict__ = { + k: v for k, v in self.__dict__.copy().items() if k not in skip + } + else: + c.__dict__ = self.__dict__.copy() + + # this is a marker that helps to "equate" clauses to each other + # when a Select returns its list of FROM clauses. the cloning + # process leaves around a lot of remnants of the previous clause + # typically in the form of column expressions still attached to the + # old table. + cc = self._is_clone_of + c._is_clone_of = cc if cc is not None else self + return c + + def _negate_in_binary(self, negated_op, original_op): + """a hook to allow the right side of a binary expression to respond + to a negation of the binary expression. + + Used for the special case of expanding bind parameter with IN. + + """ + return self + + def _with_binary_element_type(self, type_): + """in the context of binary expression, convert the type of this + object to the one given. + + applies only to :class:`_expression.ColumnElement` classes. + + """ + return self + + @property + def _constructor(self): + """return the 'constructor' for this ClauseElement. + + This is for the purposes for creating a new object of + this type. Usually, its just the element's __class__. + However, the "Annotated" version of the object overrides + to return the class of its proxied element. + + """ + return self.__class__ + + @HasMemoized.memoized_attribute + def _cloned_set(self): + """Return the set consisting all cloned ancestors of this + ClauseElement. + + Includes this ClauseElement. This accessor tends to be used for + FromClause objects to identify 'equivalent' FROM clauses, regardless + of transformative operations. + + """ + s = util.column_set() + f: Optional[ClauseElement] = self + + # note this creates a cycle, asserted in test_memusage. however, + # turning this into a plain @property adds tends of thousands of method + # calls to Core / ORM performance tests, so the small overhead + # introduced by the relatively small amount of short term cycles + # produced here is preferable + while f is not None: + s.add(f) + f = f._is_clone_of + return s + + def _de_clone(self): + while self._is_clone_of is not None: + self = self._is_clone_of + return self + + @property + def entity_namespace(self): + raise AttributeError( + "This SQL expression has no entity namespace " + "with which to filter from." + ) + + def __getstate__(self): + d = self.__dict__.copy() + d.pop("_is_clone_of", None) + d.pop("_generate_cache_key", None) + return d + + def _execute_on_connection( + self, + connection: Connection, + distilled_params: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> Result[Any]: + if self.supports_execution: + if TYPE_CHECKING: + assert isinstance(self, Executable) + return connection._execute_clauseelement( + self, distilled_params, execution_options + ) + else: + raise exc.ObjectNotExecutableError(self) + + def _execute_on_scalar( + self, + connection: Connection, + distilled_params: _CoreMultiExecuteParams, + execution_options: CoreExecuteOptionsParameter, + ) -> Any: + """an additional hook for subclasses to provide a different + implementation for connection.scalar() vs. connection.execute(). + + .. versionadded:: 2.0 + + """ + return self._execute_on_connection( + connection, distilled_params, execution_options + ).scalar() + + def _get_embedded_bindparams(self) -> Sequence[BindParameter[Any]]: + """Return the list of :class:`.BindParameter` objects embedded in the + object. + + This accomplishes the same purpose as ``visitors.traverse()`` or + similar would provide, however by making use of the cache key + it takes advantage of memoization of the key to result in fewer + net method calls, assuming the statement is also going to be + executed. + + """ + + key = self._generate_cache_key() + if key is None: + bindparams: List[BindParameter[Any]] = [] + + traverse(self, {}, {"bindparam": bindparams.append}) + return bindparams + + else: + return key.bindparams + + def unique_params( + self, + __optionaldict: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> Self: + """Return a copy with :func:`_expression.bindparam` elements + replaced. + + Same functionality as :meth:`_expression.ClauseElement.params`, + except adds `unique=True` + to affected bind parameters so that multiple statements can be + used. + + """ + return self._replace_params(True, __optionaldict, kwargs) + + def params( + self, + __optionaldict: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Self: + """Return a copy with :func:`_expression.bindparam` elements + replaced. + + Returns a copy of this ClauseElement with + :func:`_expression.bindparam` + elements replaced with values taken from the given dictionary:: + + >>> clause = column("x") + bindparam("foo") + >>> print(clause.compile().params) + {'foo':None} + >>> print(clause.params({"foo": 7}).compile().params) + {'foo':7} + + """ + return self._replace_params(False, __optionaldict, kwargs) + + def _replace_params( + self, + unique: bool, + optionaldict: Optional[Mapping[str, Any]], + kwargs: Dict[str, Any], + ) -> Self: + if optionaldict: + kwargs.update(optionaldict) + + def visit_bindparam(bind: BindParameter[Any]) -> None: + if bind.key in kwargs: + bind.value = kwargs[bind.key] + bind.required = False + if unique: + bind._convert_to_unique() + + return cloned_traverse( + self, + {"maintain_key": True, "detect_subquery_cols": True}, + {"bindparam": visit_bindparam}, + ) + + def compare(self, other: ClauseElement, **kw: Any) -> bool: + r"""Compare this :class:`_expression.ClauseElement` to + the given :class:`_expression.ClauseElement`. + + Subclasses should override the default behavior, which is a + straight identity comparison. + + \**kw are arguments consumed by subclass ``compare()`` methods and + may be used to modify the criteria for comparison + (see :class:`_expression.ColumnElement`). + + """ + return traversals.compare(self, other, **kw) + + def self_group( + self, against: Optional[OperatorType] = None + ) -> ClauseElement: + """Apply a 'grouping' to this :class:`_expression.ClauseElement`. + + This method is overridden by subclasses to return a "grouping" + construct, i.e. parenthesis. In particular it's used by "binary" + expressions to provide a grouping around themselves when placed into a + larger expression, as well as by :func:`_expression.select` + constructs when placed into the FROM clause of another + :func:`_expression.select`. (Note that subqueries should be + normally created using the :meth:`_expression.Select.alias` method, + as many + platforms require nested SELECT statements to be named). + + As expressions are composed together, the application of + :meth:`self_group` is automatic - end-user code should never + need to use this method directly. Note that SQLAlchemy's + clause constructs take operator precedence into account - + so parenthesis might not be needed, for example, in + an expression like ``x OR (y AND z)`` - AND takes precedence + over OR. + + The base :meth:`self_group` method of + :class:`_expression.ClauseElement` + just returns self. + """ + return self + + def _ungroup(self) -> ClauseElement: + """Return this :class:`_expression.ClauseElement` + without any groupings. + """ + + return self + + def _compile_w_cache( + self, + dialect: Dialect, + *, + compiled_cache: Optional[CompiledCacheType], + column_keys: List[str], + for_executemany: bool = False, + schema_translate_map: Optional[SchemaTranslateMapType] = None, + **kw: Any, + ) -> typing_Tuple[ + Compiled, Optional[Sequence[BindParameter[Any]]], CacheStats + ]: + elem_cache_key: Optional[CacheKey] + + if compiled_cache is not None and dialect._supports_statement_cache: + elem_cache_key = self._generate_cache_key() + else: + elem_cache_key = None + + if elem_cache_key is not None: + if TYPE_CHECKING: + assert compiled_cache is not None + + cache_key, extracted_params = elem_cache_key + key = ( + dialect, + cache_key, + tuple(column_keys), + bool(schema_translate_map), + for_executemany, + ) + compiled_sql = compiled_cache.get(key) + + if compiled_sql is None: + cache_hit = dialect.CACHE_MISS + compiled_sql = self._compiler( + dialect, + cache_key=elem_cache_key, + column_keys=column_keys, + for_executemany=for_executemany, + schema_translate_map=schema_translate_map, + **kw, + ) + compiled_cache[key] = compiled_sql + else: + cache_hit = dialect.CACHE_HIT + else: + extracted_params = None + compiled_sql = self._compiler( + dialect, + cache_key=elem_cache_key, + column_keys=column_keys, + for_executemany=for_executemany, + schema_translate_map=schema_translate_map, + **kw, + ) + + if not dialect._supports_statement_cache: + cache_hit = dialect.NO_DIALECT_SUPPORT + elif compiled_cache is None: + cache_hit = dialect.CACHING_DISABLED + else: + cache_hit = dialect.NO_CACHE_KEY + + return compiled_sql, extracted_params, cache_hit + + def __invert__(self): + # undocumented element currently used by the ORM for + # relationship.contains() + if hasattr(self, "negation_clause"): + return self.negation_clause + else: + return self._negate() + + def _negate(self) -> ClauseElement: + grouped = self.self_group(against=operators.inv) + assert isinstance(grouped, ColumnElement) + return UnaryExpression(grouped, operator=operators.inv) + + def __bool__(self): + raise TypeError("Boolean value of this clause is not defined") + + def __repr__(self): + friendly = self.description + if friendly is None: + return object.__repr__(self) + else: + return "<%s.%s at 0x%x; %s>" % ( + self.__module__, + self.__class__.__name__, + id(self), + friendly, + ) + + +class DQLDMLClauseElement(ClauseElement): + """represents a :class:`.ClauseElement` that compiles to a DQL or DML + expression, not DDL. + + .. versionadded:: 2.0 + + """ + + if typing.TYPE_CHECKING: + + def _compiler(self, dialect: Dialect, **kw: Any) -> SQLCompiler: + """Return a compiler appropriate for this ClauseElement, given a + Dialect.""" + ... + + def compile( # noqa: A001 + self, + bind: Optional[_HasDialect] = None, + dialect: Optional[Dialect] = None, + **kw: Any, + ) -> SQLCompiler: ... + + +class CompilerColumnElement( + roles.DMLColumnRole, + roles.DDLConstraintColumnRole, + roles.ColumnsClauseRole, + CompilerElement, +): + """A compiler-only column element used for ad-hoc string compilations. + + .. versionadded:: 2.0 + + """ + + __slots__ = () + + _propagate_attrs = util.EMPTY_DICT + _is_collection_aggregate = False + + +# SQLCoreOperations should be suiting the ExpressionElementRole +# and ColumnsClauseRole. however the MRO issues become too elaborate +# at the moment. +class SQLCoreOperations(Generic[_T_co], ColumnOperators, TypingOnly): + __slots__ = () + + # annotations for comparison methods + # these are from operators->Operators / ColumnOperators, + # redefined with the specific types returned by ColumnElement hierarchies + if typing.TYPE_CHECKING: + + @util.non_memoized_property + def _propagate_attrs(self) -> _PropagateAttrsType: ... + + def operate( + self, op: OperatorType, *other: Any, **kwargs: Any + ) -> ColumnElement[Any]: ... + + def reverse_operate( + self, op: OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: ... + + @overload + def op( + self, + opstring: str, + precedence: int = ..., + is_comparison: bool = ..., + *, + return_type: _TypeEngineArgument[_OPT], + python_impl: Optional[Callable[..., Any]] = None, + ) -> Callable[[Any], BinaryExpression[_OPT]]: ... + + @overload + def op( + self, + opstring: str, + precedence: int = ..., + is_comparison: bool = ..., + return_type: Optional[_TypeEngineArgument[Any]] = ..., + python_impl: Optional[Callable[..., Any]] = ..., + ) -> Callable[[Any], BinaryExpression[Any]]: ... + + def op( + self, + opstring: str, + precedence: int = 0, + is_comparison: bool = False, + return_type: Optional[_TypeEngineArgument[Any]] = None, + python_impl: Optional[Callable[..., Any]] = None, + ) -> Callable[[Any], BinaryExpression[Any]]: ... + + def bool_op( + self, + opstring: str, + precedence: int = 0, + python_impl: Optional[Callable[..., Any]] = None, + ) -> Callable[[Any], BinaryExpression[bool]]: ... + + def __and__(self, other: Any) -> BooleanClauseList: ... + + def __or__(self, other: Any) -> BooleanClauseList: ... + + def __invert__(self) -> ColumnElement[_T_co]: ... + + def __lt__(self, other: Any) -> ColumnElement[bool]: ... + + def __le__(self, other: Any) -> ColumnElement[bool]: ... + + # declare also that this class has an hash method otherwise + # it may be assumed to be None by type checkers since the + # object defines __eq__ and python sets it to None in that case: + # https://docs.python.org/3/reference/datamodel.html#object.__hash__ + def __hash__(self) -> int: ... + + def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + ... + + def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 + ... + + def is_distinct_from(self, other: Any) -> ColumnElement[bool]: ... + + def is_not_distinct_from(self, other: Any) -> ColumnElement[bool]: ... + + def __gt__(self, other: Any) -> ColumnElement[bool]: ... + + def __ge__(self, other: Any) -> ColumnElement[bool]: ... + + def __neg__(self) -> UnaryExpression[_T_co]: ... + + def __contains__(self, other: Any) -> ColumnElement[bool]: ... + + def __getitem__(self, index: Any) -> ColumnElement[Any]: ... + + @overload + def __lshift__(self: _SQO[int], other: Any) -> ColumnElement[int]: ... + + @overload + def __lshift__(self, other: Any) -> ColumnElement[Any]: ... + + def __lshift__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rshift__(self: _SQO[int], other: Any) -> ColumnElement[int]: ... + + @overload + def __rshift__(self, other: Any) -> ColumnElement[Any]: ... + + def __rshift__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def concat(self: _SQO[str], other: Any) -> ColumnElement[str]: ... + + @overload + def concat(self, other: Any) -> ColumnElement[Any]: ... + + def concat(self, other: Any) -> ColumnElement[Any]: ... + + def like( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def ilike( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def bitwise_xor(self, other: Any) -> BinaryExpression[Any]: ... + + def bitwise_or(self, other: Any) -> BinaryExpression[Any]: ... + + def bitwise_and(self, other: Any) -> BinaryExpression[Any]: ... + + def bitwise_not(self) -> UnaryExpression[_T_co]: ... + + def bitwise_lshift(self, other: Any) -> BinaryExpression[Any]: ... + + def bitwise_rshift(self, other: Any) -> BinaryExpression[Any]: ... + + def in_( + self, + other: Union[ + Iterable[Any], BindParameter[Any], roles.InElementRole + ], + ) -> BinaryExpression[bool]: ... + + def not_in( + self, + other: Union[ + Iterable[Any], BindParameter[Any], roles.InElementRole + ], + ) -> BinaryExpression[bool]: ... + + def notin_( + self, + other: Union[ + Iterable[Any], BindParameter[Any], roles.InElementRole + ], + ) -> BinaryExpression[bool]: ... + + def not_like( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def notlike( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def not_ilike( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def notilike( + self, other: Any, escape: Optional[str] = None + ) -> BinaryExpression[bool]: ... + + def is_(self, other: Any) -> BinaryExpression[bool]: ... + + def is_not(self, other: Any) -> BinaryExpression[bool]: ... + + def isnot(self, other: Any) -> BinaryExpression[bool]: ... + + def startswith( + self, + other: Any, + escape: Optional[str] = None, + autoescape: bool = False, + ) -> ColumnElement[bool]: ... + + def istartswith( + self, + other: Any, + escape: Optional[str] = None, + autoescape: bool = False, + ) -> ColumnElement[bool]: ... + + def endswith( + self, + other: Any, + escape: Optional[str] = None, + autoescape: bool = False, + ) -> ColumnElement[bool]: ... + + def iendswith( + self, + other: Any, + escape: Optional[str] = None, + autoescape: bool = False, + ) -> ColumnElement[bool]: ... + + def contains(self, other: Any, **kw: Any) -> ColumnElement[bool]: ... + + def icontains(self, other: Any, **kw: Any) -> ColumnElement[bool]: ... + + def match(self, other: Any, **kwargs: Any) -> ColumnElement[bool]: ... + + def regexp_match( + self, pattern: Any, flags: Optional[str] = None + ) -> ColumnElement[bool]: ... + + def regexp_replace( + self, pattern: Any, replacement: Any, flags: Optional[str] = None + ) -> ColumnElement[str]: ... + + def desc(self) -> UnaryExpression[_T_co]: ... + + def asc(self) -> UnaryExpression[_T_co]: ... + + def nulls_first(self) -> UnaryExpression[_T_co]: ... + + def nullsfirst(self) -> UnaryExpression[_T_co]: ... + + def nulls_last(self) -> UnaryExpression[_T_co]: ... + + def nullslast(self) -> UnaryExpression[_T_co]: ... + + def collate(self, collation: str) -> CollationClause: ... + + def between( + self, cleft: Any, cright: Any, symmetric: bool = False + ) -> BinaryExpression[bool]: ... + + def distinct(self: _SQO[_T_co]) -> UnaryExpression[_T_co]: ... + + def any_(self) -> CollectionAggregate[Any]: ... + + def all_(self) -> CollectionAggregate[Any]: ... + + # numeric overloads. These need more tweaking + # in particular they all need to have a variant for Optiona[_T] + # because Optional only applies to the data side, not the expression + # side + + @overload + def __add__( + self: _SQO[_NMT], + other: Any, + ) -> ColumnElement[_NMT]: ... + + @overload + def __add__( + self: _SQO[str], + other: Any, + ) -> ColumnElement[str]: ... + + @overload + def __add__(self, other: Any) -> ColumnElement[Any]: ... + + def __add__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __radd__(self: _SQO[_NMT], other: Any) -> ColumnElement[_NMT]: ... + + @overload + def __radd__(self: _SQO[str], other: Any) -> ColumnElement[str]: ... + + def __radd__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __sub__( + self: _SQO[_NMT], + other: Any, + ) -> ColumnElement[_NMT]: ... + + @overload + def __sub__(self, other: Any) -> ColumnElement[Any]: ... + + def __sub__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rsub__( + self: _SQO[_NMT], + other: Any, + ) -> ColumnElement[_NMT]: ... + + @overload + def __rsub__(self, other: Any) -> ColumnElement[Any]: ... + + def __rsub__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __mul__( + self: _SQO[_NMT], + other: Any, + ) -> ColumnElement[_NMT]: ... + + @overload + def __mul__(self, other: Any) -> ColumnElement[Any]: ... + + def __mul__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rmul__( + self: _SQO[_NMT], + other: Any, + ) -> ColumnElement[_NMT]: ... + + @overload + def __rmul__(self, other: Any) -> ColumnElement[Any]: ... + + def __rmul__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __mod__(self: _SQO[_NMT], other: Any) -> ColumnElement[_NMT]: ... + + @overload + def __mod__(self, other: Any) -> ColumnElement[Any]: ... + + def __mod__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rmod__(self: _SQO[_NMT], other: Any) -> ColumnElement[_NMT]: ... + + @overload + def __rmod__(self, other: Any) -> ColumnElement[Any]: ... + + def __rmod__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __truediv__( + self: _SQO[int], other: Any + ) -> ColumnElement[_NUMERIC]: ... + + @overload + def __truediv__(self: _SQO[_NT], other: Any) -> ColumnElement[_NT]: ... + + @overload + def __truediv__(self, other: Any) -> ColumnElement[Any]: ... + + def __truediv__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rtruediv__( + self: _SQO[_NMT], other: Any + ) -> ColumnElement[_NUMERIC]: ... + + @overload + def __rtruediv__(self, other: Any) -> ColumnElement[Any]: ... + + def __rtruediv__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __floordiv__( + self: _SQO[_NMT], other: Any + ) -> ColumnElement[_NMT]: ... + + @overload + def __floordiv__(self, other: Any) -> ColumnElement[Any]: ... + + def __floordiv__(self, other: Any) -> ColumnElement[Any]: ... + + @overload + def __rfloordiv__( + self: _SQO[_NMT], other: Any + ) -> ColumnElement[_NMT]: ... + + @overload + def __rfloordiv__(self, other: Any) -> ColumnElement[Any]: ... + + def __rfloordiv__(self, other: Any) -> ColumnElement[Any]: ... + + +class SQLColumnExpression( + SQLCoreOperations[_T_co], roles.ExpressionElementRole[_T_co], TypingOnly +): + """A type that may be used to indicate any SQL column element or object + that acts in place of one. + + :class:`.SQLColumnExpression` is a base of + :class:`.ColumnElement`, as well as within the bases of ORM elements + such as :class:`.InstrumentedAttribute`, and may be used in :pep:`484` + typing to indicate arguments or return values that should behave + as column expressions. + + .. versionadded:: 2.0.0b4 + + + """ + + __slots__ = () + + +_SQO = SQLCoreOperations + + +class ColumnElement( + roles.ColumnArgumentOrKeyRole, + roles.StatementOptionRole, + roles.WhereHavingRole, + roles.BinaryElementRole[_T], + roles.OrderByRole, + roles.ColumnsClauseRole, + roles.LimitOffsetRole, + roles.DMLColumnRole, + roles.DDLConstraintColumnRole, + roles.DDLExpressionRole, + SQLColumnExpression[_T], + DQLDMLClauseElement, +): + """Represent a column-oriented SQL expression suitable for usage in the + "columns" clause, WHERE clause etc. of a statement. + + While the most familiar kind of :class:`_expression.ColumnElement` is the + :class:`_schema.Column` object, :class:`_expression.ColumnElement` + serves as the basis + for any unit that may be present in a SQL expression, including + the expressions themselves, SQL functions, bound parameters, + literal expressions, keywords such as ``NULL``, etc. + :class:`_expression.ColumnElement` + is the ultimate base class for all such elements. + + A wide variety of SQLAlchemy Core functions work at the SQL expression + level, and are intended to accept instances of + :class:`_expression.ColumnElement` as + arguments. These functions will typically document that they accept a + "SQL expression" as an argument. What this means in terms of SQLAlchemy + usually refers to an input which is either already in the form of a + :class:`_expression.ColumnElement` object, + or a value which can be **coerced** into + one. The coercion rules followed by most, but not all, SQLAlchemy Core + functions with regards to SQL expressions are as follows: + + * a literal Python value, such as a string, integer or floating + point value, boolean, datetime, ``Decimal`` object, or virtually + any other Python object, will be coerced into a "literal bound + value". This generally means that a :func:`.bindparam` will be + produced featuring the given value embedded into the construct; the + resulting :class:`.BindParameter` object is an instance of + :class:`_expression.ColumnElement`. + The Python value will ultimately be sent + to the DBAPI at execution time as a parameterized argument to the + ``execute()`` or ``executemany()`` methods, after SQLAlchemy + type-specific converters (e.g. those provided by any associated + :class:`.TypeEngine` objects) are applied to the value. + + * any special object value, typically ORM-level constructs, which + feature an accessor called ``__clause_element__()``. The Core + expression system looks for this method when an object of otherwise + unknown type is passed to a function that is looking to coerce the + argument into a :class:`_expression.ColumnElement` and sometimes a + :class:`_expression.SelectBase` expression. + It is used within the ORM to + convert from ORM-specific objects like mapped classes and + mapped attributes into Core expression objects. + + * The Python ``None`` value is typically interpreted as ``NULL``, + which in SQLAlchemy Core produces an instance of :func:`.null`. + + A :class:`_expression.ColumnElement` provides the ability to generate new + :class:`_expression.ColumnElement` + objects using Python expressions. This means that Python operators + such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations, + and allow the instantiation of further :class:`_expression.ColumnElement` + instances + which are composed from other, more fundamental + :class:`_expression.ColumnElement` + objects. For example, two :class:`.ColumnClause` objects can be added + together with the addition operator ``+`` to produce + a :class:`.BinaryExpression`. + Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses + of :class:`_expression.ColumnElement`: + + .. sourcecode:: pycon+sql + + >>> from sqlalchemy.sql import column + >>> column("a") + column("b") + + >>> print(column("a") + column("b")) + {printsql}a + b + + .. seealso:: + + :class:`_schema.Column` + + :func:`_expression.column` + + """ + + __visit_name__ = "column_element" + + primary_key: bool = False + _is_clone_of: Optional[ColumnElement[_T]] + _is_column_element = True + _insert_sentinel: bool = False + _omit_from_statements = False + _is_collection_aggregate = False + + foreign_keys: AbstractSet[ForeignKey] = frozenset() + + @util.memoized_property + def _proxies(self) -> List[ColumnElement[Any]]: + return [] + + @util.non_memoized_property + def _tq_label(self) -> Optional[str]: + """The named label that can be used to target + this column in a result set in a "table qualified" context. + + This label is almost always the label used when + rendering AS AS "; typically columns that don't have + any parent table and are named the same as what the label would be + in any case. + + """ + + _allow_label_resolve = True + """A flag that can be flipped to prevent a column from being resolvable + by string label name. + + The joined eager loader strategy in the ORM uses this, for example. + + """ + + _is_implicitly_boolean = False + + _alt_names: Sequence[str] = () + + @overload + def self_group(self, against: None = None) -> ColumnElement[_T]: ... + + @overload + def self_group( + self, against: Optional[OperatorType] = None + ) -> ColumnElement[Any]: ... + + def self_group( + self, against: Optional[OperatorType] = None + ) -> ColumnElement[Any]: + if ( + against in (operators.and_, operators.or_, operators._asbool) + and self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity + ): + return AsBoolean(self, operators.is_true, operators.is_false) + elif against in (operators.any_op, operators.all_op): + return Grouping(self) + else: + return self + + @overload + def _negate(self: ColumnElement[bool]) -> ColumnElement[bool]: ... + + @overload + def _negate(self: ColumnElement[_T]) -> ColumnElement[_T]: ... + + def _negate(self) -> ColumnElement[Any]: + if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity: + return AsBoolean(self, operators.is_false, operators.is_true) + else: + grouped = self.self_group(against=operators.inv) + assert isinstance(grouped, ColumnElement) + return UnaryExpression( + grouped, operator=operators.inv, wraps_column_expression=True + ) + + type: TypeEngine[_T] + + if not TYPE_CHECKING: + + @util.memoized_property + def type(self) -> TypeEngine[_T]: # noqa: A001 + # used for delayed setup of + # type_api + return type_api.NULLTYPE + + @HasMemoized.memoized_attribute + def comparator(self) -> TypeEngine.Comparator[_T]: + try: + comparator_factory = self.type.comparator_factory + except AttributeError as err: + raise TypeError( + "Object %r associated with '.type' attribute " + "is not a TypeEngine class or object" % self.type + ) from err + else: + return comparator_factory(self) + + def __setstate__(self, state): + self.__dict__.update(state) + + def __getattr__(self, key: str) -> Any: + try: + return getattr(self.comparator, key) + except AttributeError as err: + raise AttributeError( + "Neither %r object nor %r object has an attribute %r" + % ( + type(self).__name__, + type(self.comparator).__name__, + key, + ) + ) from err + + def operate( + self, + op: operators.OperatorType, + *other: Any, + **kwargs: Any, + ) -> ColumnElement[Any]: + return op(self.comparator, *other, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def reverse_operate( + self, op: operators.OperatorType, other: Any, **kwargs: Any + ) -> ColumnElement[Any]: + return op(other, self.comparator, **kwargs) # type: ignore[no-any-return] # noqa: E501 + + def _bind_param( + self, + operator: operators.OperatorType, + obj: Any, + type_: Optional[TypeEngine[_T]] = None, + expanding: bool = False, + ) -> BindParameter[_T]: + return BindParameter( + None, + obj, + _compared_to_operator=operator, + type_=type_, + _compared_to_type=self.type, + unique=True, + expanding=expanding, + ) + + @property + def expression(self) -> ColumnElement[Any]: + """Return a column expression. + + Part of the inspection interface; returns self. + + """ + return self + + @property + def _select_iterable(self) -> _SelectIterable: + return (self,) + + @util.memoized_property + def base_columns(self) -> FrozenSet[ColumnElement[Any]]: + return frozenset(c for c in self.proxy_set if not c._proxies) + + @util.memoized_property + def proxy_set(self) -> FrozenSet[ColumnElement[Any]]: + """set of all columns we are proxying + + as of 2.0 this is explicitly deannotated columns. previously it was + effectively deannotated columns but wasn't enforced. annotated + columns should basically not go into sets if at all possible because + their hashing behavior is very non-performant. + + """ + return frozenset([self._deannotate()]).union( + itertools.chain(*[c.proxy_set for c in self._proxies]) + ) + + @util.memoized_property + def _expanded_proxy_set(self) -> FrozenSet[ColumnElement[Any]]: + return frozenset(_expand_cloned(self.proxy_set)) + + def _uncached_proxy_list(self) -> List[ColumnElement[Any]]: + """An 'uncached' version of proxy set. + + This list includes annotated columns which perform very poorly in + set operations. + + """ + + return [self] + list( + itertools.chain(*[c._uncached_proxy_list() for c in self._proxies]) + ) + + def shares_lineage(self, othercolumn: ColumnElement[Any]) -> bool: + """Return True if the given :class:`_expression.ColumnElement` + has a common ancestor to this :class:`_expression.ColumnElement`.""" + + return bool(self.proxy_set.intersection(othercolumn.proxy_set)) + + def _compare_name_for_result(self, other: ColumnElement[Any]) -> bool: + """Return True if the given column element compares to this one + when targeting within a result row.""" + + return ( + hasattr(other, "name") + and hasattr(self, "name") + and other.name == self.name + ) + + @HasMemoized.memoized_attribute + def _proxy_key(self) -> Optional[str]: + if self._annotations and "proxy_key" in self._annotations: + return cast(str, self._annotations["proxy_key"]) + + name = self.key + if not name: + # there's a bit of a seeming contradiction which is that the + # "_non_anon_label" of a column can in fact be an + # "_anonymous_label"; this is when it's on a column that is + # proxying for an anonymous expression in a subquery. + name = self._non_anon_label + + if isinstance(name, _anonymous_label): + return None + else: + return name + + @HasMemoized.memoized_attribute + def _expression_label(self) -> Optional[str]: + """a suggested label to use in the case that the column has no name, + which should be used if possible as the explicit 'AS